hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16557fb191c1ea62849d52d444fde47864d855b9 | 43,651 | py | Python | lantz/drivers/sacher/Sacher_EPOS.py | mtsolmn/lantz-drivers | f48caf9000ddd08f2abb837d832e341410af4788 | [
"BSD-3-Clause"
] | 4 | 2019-05-04T00:10:53.000Z | 2020-10-22T18:08:40.000Z | lantz/drivers/sacher/Sacher_EPOS.py | mtsolmn/lantz-drivers | f48caf9000ddd08f2abb837d832e341410af4788 | [
"BSD-3-Clause"
] | 3 | 2019-07-12T13:44:17.000Z | 2020-10-22T19:32:08.000Z | lantz/drivers/sacher/Sacher_EPOS.py | mtsolmn/lantz-drivers | f48caf9000ddd08f2abb837d832e341410af4788 | [
"BSD-3-Clause"
] | 9 | 2019-04-03T17:07:03.000Z | 2021-02-15T21:53:55.000Z | # sacher_epos.py, python wrapper for sacher epos motor
# David Christle <[email protected]>, August 2014
#
"""
Possbily Maxon EPOS now
"""
"""
This is the actual version that works
But only in the lab32 virtual environment
"""
# from instrument import Instrument
# import qt
import ctypes
import ctypes.wintypes
import logging
import time
# from instrument import Instrument
from ctypes.wintypes import DWORD, WORD
import numpy as np
"""
okay so we import a bunch of random stuff
I always forget what ctypes is for but I'll worry about it later
"""
# from subprocess import Popen, PIPE
# from multiprocessing.managers import BaseManager
# import atexit
# import os
# python32_dir = "C:\\Users\\Alex\\Miniconda3\\envs\\lab32"
# assert os.path.isdir(python32_dir)
# os.chdir(python32_dir)
# derp = "C:\\Users\\Alex\\Documents\\wow_such_code"
# assert os.path.isdir(derp)
# os.chdir(derp)
# p = Popen([python32_dir + "\\python.exe", derp + "\\delegate.py"], stdout=PIPE, cwd=derp)
# atexit.register(p.terminate)
# port = int(p.stdout.readline())
# authkey = p.stdout.read()
# print(port, authkey)
# m = BaseManager(address=("localhost", port), authkey=authkey)
# m.connect()
# tell manager to expect an attribute called LibC
# m.register("SacherLasaTeknique")
# access and use libc
# libc = m.SacherLasaTeknique()
# print(libc.vcs())
# eposlib = ctypes.windll.eposcmd
eposlib = ctypes.windll.LoadLibrary('C:\\Users\\Carbro\\Desktop\\Charmander\\EposCmd.dll')
DeviceName = b'EPOS'
ProtocolStackName = b'MAXON_RS232'
InterfaceName = b'RS232'
"""
Max on
Max off
but anyway it looks like ctypes is the thing that's talking to the epos dll
"""
HISTCHAN = 65536
TTREADMAX = 131072
RANGES = 8
MODE_HIST = 0
MODE_T2 = 2
MODE_T3 = 3
FLAG_OVERFLOW = 0x0040
FLAG_FIFOFULL = 0x0003
# in mV
ZCMIN = 0
ZCMAX = 20
DISCRMIN = 0
DISCRMAX = 800
# in ps
OFFSETMIN = 0
OFFSETMAX = 1000000000
# in ms
ACQTMIN = 1
ACQTMAX = 10 * 60 * 60 * 1000
# in mV
PHR800LVMIN = -1600
PHR800LVMAX = 2400
"""
wooooooo a bunch a variables and none of them are explained
way to go dc you da real champ
"""
class Sacher_EPOS():
"""
ok before I dive into this giant Sacher class thing let me just list here all the functions that are being defined in this class:
check(self)
before
wreck(self)
ok but actually:
__init__(self, name, address, reset=False)
__del__(self)
get_bit(self, byteval,idx)
_u32todouble(self, uinput)
open(self)
close(self)
get_offset(self)
fine_tuning_steps(self, steps)
set_new_offset(self, new_offset)
get_motor_position(self)
set_target_position(self, target, absolute, immediately)
do_get_wavelength(self)
do_set_wavelength(self, wavelength)
is_open(self)
clear_fault(self)
initialize(self)
The last one is really long
And also damn there are 16 of them
I'll comment about them as I go through them
"""
def __init__(self, name, address, reset=False):
# Instrument.__init__(self, name, tags=['physical'])
# self._port_name = str(address)
self._port_name = address
self._is_open = False
self._HPM = True
# self.add_parameter('wavelength',
# flags = Instrument.FLAG_GETSET,
# type = types.FloatType,
# units = 'nm',
# minval=1070.0,maxval=1180.0)
# self.add_function('open')
# self.add_function('close')
# self.add_function('fine_tuning_steps')
# self.add_function('get_motor_position')
# self.add_function('set_target_position')
# try:
self.open()
self.initialize()
# except:
# logging.error('Error loading Sacher EPOS motor. In use?')
"""
I mean to me this really seems like the initialize function
so I wonder what initialize(self) is doing
At any rate there doesn't seem to be a lot going on here
"""
def __del__(self):
# execute disconnect
self.close()
return
"""
this might be the only self explanatory one
it disconnects
"""
@staticmethod
def get_bit(byteval, idx):
# def get_bit(self, byteval,idx):
return ((byteval & (1 << idx)) != 0)
"""
you get the bits, and then you use them
but honestly I don't really get what this is doing
sudo git a_clue
"""
@staticmethod
def _u32todouble(uinput):
# def _u32todouble(self, uinput):
# this function implements the really weird/non-standard U32 to
# floating point conversion in the sacher VIs
# get sign of number
sign = Sacher_EPOS.get_bit(uinput, 31)
if sign == False:
mantissa_sign = 1
elif sign == True:
mantissa_sign = -1
exp_mask = 0b111111
# print 'uin u is %d' % uinput
# print 'type uin %s' % type(uinput)
# print 'binary input is %s' % bin(long(uinput))
# get sign of exponent
if Sacher_EPOS.get_bit(uinput, 7) == False:
exp_sign = 1
elif Sacher_EPOS.get_bit(uinput, 7) == True:
exp_sign = -1
# print 'exp extract %s' % bin(int(uinput & exp_mask))
# print 'exp conv %s' % (exp_sign*int(uinput & exp_mask))
# print 'sign of exponent %s' % self.get_bit(uinput,7)
# print 'binary constant is %s' % bin(int(0b10000000000000000000000000000000))
mantissa_mask = 0b01111111111111111111111100000000
# mantissa_mask = 0b0111111111111111111111110000000
# print 'mantissa extract is %s' % bin((uinput & mantissa_mask) >> 8)
mantissa = 1.0 / 1000000.0 * float(mantissa_sign) * float((uinput & mantissa_mask) >> 8)
# print 'mantissa is %.12f' % mantissa
# print(1 if Sacher_EPOS.get_bit(uinput,31) else 0, mantissa, 1 if Sacher_EPOS.get_bit(uinput,7) else 0, uinput & exp_mask)
output = mantissa * 2.0 ** (float(exp_sign) * float(int(uinput & exp_mask)))
# print 'output is %s' % output
return output
"""
ok dc gave some slight explanations here
Apparently there's a "really weird/non-standard U32 to floating point conversion in the sacher VIs"
It'd be gr8 if I knew what U32's were
unsigned 32 bit something something?
ah whatever
I'll have to worry about this later
"""
@staticmethod
def _doubletou32(dinput):
mantissa_bit = 0 if int(dinput / abs(dinput)) > 0 else 1
exp_bit = 1 if -1 < dinput < 1 else 0
b = np.ceil(np.log10(abs(dinput)))
a = dinput / 10 ** b
if dinput < 0:
a = -a
# print('a:\t{}\tb:\t{}'.format(a, b))
d = np.log2(10) * b
d_ = np.ceil(d)
c = a * 2 ** (d - d_)
# print('c:\t{}\td_:{}\toriginal:\t{}'.format(c, d_, c * 2 ** d_))
return (int(mantissa_bit) << 31) + (int(c * 1e6) << 8) + (int(exp_bit) << 7) + int(abs(d_))
def open(self):
eposlib.VCS_OpenDevice.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p,
ctypes.POINTER(DWORD)]
eposlib.VCS_OpenDevice.restype = ctypes.wintypes.HANDLE
buf = ctypes.pointer(DWORD(0))
ret = ctypes.wintypes.HANDLE()
# print 'types are all %s %s %s %s %s' % (type(DeviceName), type(ProtocolStackName), type(InterfaceName), type(self._port_name), type(buf))
ret = eposlib.VCS_OpenDevice(DeviceName, ProtocolStackName, InterfaceName, self._port_name, buf)
self._keyhandle = ret
# print 'keyhandle is %s' % self._keyhandle
# print 'open device ret %s' % buf
# print 'printing'
# print buf.contents.value
# print 'done printer'
if int(buf.contents.value) >= 0:
self._is_open = True
self._keyhandle = ret
return
"""
I have absolutely no idea what the hell this is doing
Considering that close(self) is apparently closing the EPOS motor, maybe this is opening it
"""
def close(self):
print('closing EPOS motor.')
eposlib.VCS_CloseDevice.argtypes = [ctypes.wintypes.HANDLE, ctypes.POINTER(DWORD)]
eposlib.VCS_CloseDevice.restype = ctypes.wintypes.BOOL
buf = ctypes.pointer(DWORD(0))
ret = ctypes.wintypes.BOOL()
ret = eposlib.VCS_CloseDevice(self._keyhandle, buf)
# print 'close device returned %s' % buf
if int(buf.contents.value) >= 0:
self._is_open = False
else:
logging.error(__name__ + ' did not close Sacher EPOS motor correctly.')
return
"""
Apparently this closes the EPOS motor
I don't know what "opening" and "closing" the motor means though
and yeah also these random variables don't make any sense to me
"""
def get_motor_current(self):
nodeID = ctypes.wintypes.WORD(0)
eposlib.VCS_GetCurrentIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetCurrentIs.restype = ctypes.wintypes.BOOL
motorCurrent = ctypes.c_uint8(0)
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_GetCurrentIs(self._keyhandle, nodeID, ctypes.byref(motorCurrent), ctypes.byref(buf))
return motorCurrent.value
"""
Not sure what this is doing yet
"""
def find_home(self):
nodeID = ctypes.wintypes.WORD(0)
eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_uint8,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_FindHome(self._keyhandle, nodeID, ctypes.c_uint8(35), ctypes.byref(buf))
print('Homing: {}'.format(ret))
return ret
"""
Not sure what this is doing yet
"""
def restore(self):
nodeID = ctypes.wintypes.WORD(0)
eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_Restore(self._keyhandle, nodeID, ctypes.byref(buf))
print('Restore: {}'.format(ret))
return ret
"""
Not sure what this is doing yet
"""
def get_offset(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# These are hardcoded values I got from the LabVIEW program -- I don't think
# any documentation exists on particular object indices
StoredPositionObject = ctypes.wintypes.WORD(8321)
StoredPositionObjectSubindex = ctypes.c_uint8(0)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_int32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_int32))
if ret == 0:
logging.error(__name__ + ' Could not read stored position from Sacher EPOS motor')
return CastedObjectData[0]
"""
Not sure what this is doing yet
"""
def fine_tuning_steps(self, steps):
current_motor_pos = self.get_motor_position()
self._offset = self.get_offset()
self.set_target_position(steps, False, True)
new_motor_pos = self.get_motor_position()
# print('New motor position is %s' % new_motor_pos)
# print 'new offset is %s' % (new_motor_pos-current_motor_pos+self._offset)
self.set_new_offset(new_motor_pos - current_motor_pos + self._offset)
"""
Not sure what this is doing yet
"""
def set_new_offset(self, new_offset):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL
# print 'setting new offset'
StoredPositionObject = ctypes.wintypes.WORD(8321)
StoredPositionObjectSubindex = ctypes.c_uint8(0)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(new_offset)
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
if ret == 0:
logging.error(__name__ + ' Could not write stored position from Sacher EPOS motor')
return
"""
Not sure what this is doing yet
"""
def set_coeffs(self, a, b, c, min_wl, max_wl):
print('')
print("setting coefficients...")
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL
# print 'setting new offset'
d = (min_wl << 16) + max_wl
StoredPositionObject = ctypes.wintypes.WORD(8204)
for subidx, coeff in enumerate([a, b, c]):
print(subidx, coeff)
StoredPositionObjectSubindex = ctypes.c_uint8(subidx + 1)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(self._doubletou32(coeff))
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
StoredPositionObjectSubindex = ctypes.c_uint8(4)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(d)
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
if ret == 0:
logging.error(__name__ + ' Could not write stored position from Sacher EPOS motor')
return
"""
Not sure what this is doing yet
"""
def get_motor_position(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
pPosition = ctypes.pointer(ctypes.c_long())
eposlib.VCS_GetPositionIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetPositionIs.restype = ctypes.wintypes.BOOL
ret = eposlib.VCS_GetPositionIs(self._keyhandle, nodeID, pPosition, ctypes.byref(buf))
# print 'get motor position ret %s' % ret
# print 'get motor position buf %s' % buf.value
# print 'get motor position value %s' % pPosition.contents.value
return pPosition.contents.value
# print('getting motor position...')
# print(ret)
# return print(pPosition.contents.value)
"""
Not sure what this is doing yet
"""
def set_target_position(self, target, absolute, immediately):
# print('check #1')
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# First, set enabled state
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
ret = eposlib.VCS_SetEnableState(self._keyhandle, nodeID, ctypes.byref(buf))
# print('Enable state ret %s buf %s' % (ret, buf.value))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
pTarget = ctypes.c_long(target)
pAbsolute = ctypes.wintypes.BOOL(absolute)
pImmediately = ctypes.wintypes.BOOL(immediately)
eposlib.VCS_MoveToPosition.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_long,
ctypes.wintypes.BOOL, ctypes.wintypes.BOOL,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_MoveToPosition.restype = ctypes.wintypes.BOOL
# print('check #2')
# print('About to set motor position')
# print('Current motor position is %d' % (self.get_motor_position()))
ret = eposlib.VCS_MoveToPosition(self._keyhandle, nodeID, pTarget, pAbsolute, pImmediately, ctypes.byref(buf))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('set motor position ret %s' % ret)
# print('set motor position buf %s' % buf.value)
steps_per_second = 14494.0 # hardcoded, estimated roughly, unused now
nchecks = 0
# print('check #3')
while nchecks < 1000:
# get the movement state. a movement state of 1 indicates the motor
# is done moving
# print('')
# print('check #4')
# print('Motor current: {}'.format(self.get_motor_current()))
print('Motor position: {}'.format(self.get_motor_position()))
# print('Motor offset: {}'.format(self.get_offset()))
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
pMovementState = ctypes.pointer(ctypes.wintypes.BOOL())
# print(pMovementState.contents.value)
eposlib.VCS_GetMovementState.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.wintypes.BOOL),
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetMovementState.restype = ctypes.wintypes.BOOL
# print('Getting movement state')
ret = eposlib.VCS_GetMovementState(self._keyhandle, nodeID, pMovementState, ctypes.byref(buf))
# print('set motor position ret %s' % ret)
# print('set motor position buf %s' % buf.value)
# print('Movement state is %s' % pMovementState.contents.value)
if pMovementState.contents.value == 1:
break
nchecks = nchecks + 1
# print('Current motor position is %d' % self.get_motor_position())
# print('check #5')
# print(nchecks)
# print('')
time.sleep(0.01)
# Now set disabled state
ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf))
# print('check #6')
# print('Disable state ret %s buf %s' % (ret, buf.value))
# print('Final motor position is %d' % (self.get_motor_position()))
# print('check #7')
return ret
"""
Not sure what this is doing yet
"""
def fuck_my_life(self, wavelength):
print('goddamn this piece of shit')
print('')
print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
# print('#3 Motor current: {}'.format(self.get_motor_current()))
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# Step 1: Get the actual motor position
# print('Getting motor position')
current_motor_pos = self.get_motor_position()
# Step 2: Get the motor offset
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
# Step 3: Convert the desired wavelength into a position
# Check sign of position-to-wavelength
pos0 = self._doubleA * (0.0) ** 2.0 + self._doubleB * 0.0 + self._doubleC
pos5000 = self._doubleA * (5000.0) ** 2.0 + self._doubleB * 5000.0 + self._doubleC
# logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated a wrong wavelength direction')
# If that's OK, use the quadratic formula to calculate the roots
b2a = -1.0 * self._doubleB / (2.0 * self._doubleA)
sqrtarg = self._doubleB ** 2.0 / (4.0 * self._doubleA ** 2.0) - (self._doubleC - wavelength) / self._doubleA
# print('wut da fuuuu')
# print(b2a)
# print(sqrtarg)
# print(pos0)
# print(pos5000)
if sqrtarg < 0.0:
logging.error(__name__ + ' Negative value under square root sign -- something is wrong')
if pos0 > pos5000:
# Take the + square root solution
x = b2a - np.sqrt(sqrtarg)
elif pos0 < pos5000:
x = b2a + np.sqrt(sqrtarg)
print(b2a)
print(np.sqrt(sqrtarg))
# print('Position is %s' % x)
wavelength_to_pos = int(round(x))
# Step 4: Calculate difference between the output position and the stored offset
# print('Step 4...')
diff_wavelength_offset = wavelength_to_pos - int(self._offset)
print('wavelength_to_pos: {}'.format(wavelength_to_pos))
print('diff_wavelength_offset: {}'.format(diff_wavelength_offset))
print('self._offset: {}'.format(int(self._offset)))
"""
Not sure what this is doing yet
"""
def do_get_wavelength(self):
self._offset = self.get_offset()
# self._currentwl = self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset + self._doubleC
self._currentwl = self._doubleA * (
self.get_motor_position()) ** 2.0 + self._doubleB * self.get_motor_position() + self._doubleC
print('Current wavelength: %.3f nm' % self._currentwl)
return self._currentwl
"""
Not sure what this is doing yet
"""
def do_set_wavelength(self, wavelength):
print('setting wavelength...')
print('')
# print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
# print('#3 Motor current: {}'.format(self.get_motor_current()))
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# Step 1: Get the actual motor position
# print('Getting motor position')
current_motor_pos = self.get_motor_position()
# Step 2: Get the motor offset
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
# Step 3: Convert the desired wavelength into a position
# Check sign of position-to-wavelength
pos0 = self._doubleA * (0.0) ** 2.0 + self._doubleB * 0.0 + self._doubleC
pos5000 = self._doubleA * (5000.0) ** 2.0 + self._doubleB * 5000.0 + self._doubleC
# logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated a wrong wavelength direction')
# If that's OK, use the quadratic formula to calculate the roots
b2a = -1.0 * self._doubleB / (2.0 * self._doubleA)
sqrtarg = self._doubleB ** 2.0 / (4.0 * self._doubleA ** 2.0) - (self._doubleC - wavelength) / self._doubleA
# print('wut da fuuuu')
# print(b2a)
# print(sqrtarg)
# print(pos0)
# print(pos5000)
if sqrtarg < 0.0:
logging.error(__name__ + ' Negative value under square root sign -- something is wrong')
if pos0 > pos5000:
# Take the + square root solution
x = b2a - np.sqrt(sqrtarg)
elif pos0 < pos5000:
x = b2a + np.sqrt(sqrtarg)
# x is what the motor position should be
# print('Position is %s' % x)
wavelength_to_pos = int(round(x))
# Step 4: Calculate difference between the output position and the stored offset
# print('Step 4...')
diff_wavelength_offset = wavelength_to_pos - int(self._offset)
# print('Diff wavelength offset %s' % diff_wavelength_offset)
# Step 5: If HPM is activated and the wavelength position is lower, overshoot
# the movement by 10,000 steps
# print('Step 5...')
# print('#4 Motor current: {}'.format(self.get_motor_current()))
if 1 == 2:
print('uh-oh')
# if self._HPM and diff_wavelength_offset < 0:
#
# print('Overshooting by 10000')
#
# self.set_target_position(diff_wavelength_offset - 10000, False, True)
# # Step 6: Set the real target position
#
# """
# HEY LOOK EVERYONE RIGHT ABOVE HERE THIS IS THE STUPID THING THAT'S NOT WORKING!
# """
#
# #print('Step 6a... diff wavelength')
#
# self.set_target_position(10000, False, True)
else:
# print('Step 6b... diff wavelength')
# self.set_target_position(diff_wavelength_offset, False, True)
"""WRONG"""
self.set_target_position(wavelength_to_pos, True, True)
"""this is the real shit right here
I need to set the absolute position to true
"""
# self.set_target_position(10000, False, True)
# Step 7: Get the actual motor position
new_motor_pos = self.get_motor_position()
# print('New motor position is %s' % new_motor_pos)
# print('new offset is %s' % (new_motor_pos-current_motor_pos+self._offset))
self.set_new_offset(new_motor_pos - current_motor_pos + self._offset)
# Step 8, get and print current wavelength
# print('Current wavelength is %.3f' % self.do_get_wavelength())
# print('setting wavelength done')
return
"""
Not sure what this is doing yet
"""
def is_open(self):
return self._is_open
"""
Not sure what this is doing yet
"""
def clear_fault(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf))
print('clear fault buf %s, ret %s' % (buf, ret))
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
"""
Not sure what this is doing yet
"""
def initialize(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
BaudRate = DWORD(38400)
Timeout = DWORD(100)
ret = eposlib.VCS_SetProtocolStackSettings(self._keyhandle, BaudRate, Timeout, ctypes.byref(buf))
# print 'set protocol buf %s ret %s' % (buf, ret)
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
# eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf))
# print 'clear fault buf %s, ret %s' % (buf, ret)
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
buf = ctypes.wintypes.DWORD(0)
plsenabled = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_GetEnableState(self._keyhandle, nodeID, ctypes.byref(plsenabled), ctypes.byref(buf))
# print 'get enable state buf %s ret %s and en %s' % (buf, ret, plsenabled)
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
if int(plsenabled.value) != 0:
logging.warning(__name__ + ' EPOS motor enabled, disabling before proceeding.')
ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf))
if int(ret) != 0:
logging.warning(__name__ + ' EPOS motor successfully disabled, proceeding')
else:
logging.error(__name__ + ' EPOS motor was not successfully disabled!')
buf = ctypes.wintypes.DWORD(0)
Counts = WORD(512) # incremental encoder counts in pulses per turn
PositionSensorType = WORD(4)
ret = eposlib.VCS_SetEncoderParameter(self._keyhandle, nodeID, Counts, PositionSensorType, ctypes.byref(buf))
## if ret == int(0):
## print 'errr'
## errbuf = ctypes.create_string_buffer(64)
## print 'sending'
## eposlib.VCS_GetErrorInfo.restype = ctypes.wintypes.BOOL
## print 'boolerrorinfo'
## eposlib.VCS_GetErrorInfo.argtypes = [ctypes.wintypes.DWORD, ctypes.c_char_p, ctypes.wintypes.WORD]
## print 'arg'
##
## ret = eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf), WORD(64))
## print 'err'
## raise ValueError(errbuf.value)
# For some reason, it appears normal in the LabVIEW code that this
# function actually returns an error, i.e. the return value is zero
# and the buffer has a non-zero error code in it; the LabVIEW code
# doesn't check it.
# Also, it appears that in the 2005 version of this DLL, the function
# VCS_GetErrorInfo doesn't exist!
# Get operation mode, check if it's 1 -- this is "profile position mode"
buf = ctypes.wintypes.DWORD(0)
pMode = ctypes.pointer(ctypes.c_int8())
eposlib.VCS_GetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetOperationMode.restype = ctypes.wintypes.BOOL
ret = eposlib.VCS_GetOperationMode(self._keyhandle, nodeID, pMode, ctypes.byref(buf))
# if mode is not 1, make it 1
if pMode.contents.value != 1:
eposlib.VCS_SetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_int8,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_SetOperationMode.restype = ctypes.wintypes.BOOL
pMode_setting = ctypes.c_int8(1)
ret = eposlib.VCS_SetOperationMode(self._keyhandle, nodeID, pMode_setting, ctypes.byref(buf))
eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL
pProfileVelocity = ctypes.pointer(ctypes.wintypes.DWORD())
pProfileAcceleration = ctypes.pointer(ctypes.wintypes.DWORD())
pProfileDeceleration = ctypes.pointer(ctypes.wintypes.DWORD())
ret = eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration,
pProfileDeceleration, ctypes.byref(buf))
print(pProfileVelocity.contents.value, pProfileAcceleration.contents.value, pProfileDeceleration.contents.value)
if (int(pProfileVelocity.contents.value) > int(11400) or int(pProfileAcceleration.contents.value) > int(
60000) or int(pProfileDeceleration.contents.value) > int(60000)):
eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.wintypes.DWORD, ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL
pProfileVelocity = ctypes.wintypes.DWORD(429)
pProfileAcceleration = ctypes.wintypes.DWORD(429)
pProfileDeceleration = ctypes.wintypes.DWORD(429)
logging.warning(__name__ + ' GetPositionProfile out of bounds, resetting...')
ret = eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration,
pProfileDeceleration, ctypes.byref(buf))
# Now get the motor position (stored position offset)
# from the device's "homposition" object
self._offset = self.get_offset()
# Now read the stored 'calculation parameters'
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# More hardcoded values
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(1)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefA = CastedObjectData[0]
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# Get coefficient B
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(2)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefB = CastedObjectData[0]
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# These are hardcoded values I got from the LabVIEW program -- I don't think
# any documentation exists on particular object indices
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(3)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefC = CastedObjectData[0]
# Get coefficient D
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# These are hardcoded values I got from the LabVIEW program -- I don't think
# any documentation exists on particular object indices
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(4)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefD = CastedObjectData[0]
# print 'coefficients are %s %s %s %s' % (self._coefA, self._coefB, self._coefC, self._coefD)
self._doubleA = self._u32todouble(self._coefA)
self._doubleB = self._u32todouble(self._coefB)
self._doubleC = self._u32todouble(self._coefC)
firstHalf = np.int16(self._coefD >> 16)
secondHalf = np.int16(self._coefD & 0xffff)
# Set the minimum and maximum wavelengths for the motor
self._minwl = float(firstHalf) / 10.0
self._maxwl = float(secondHalf) / 10.0
# print 'first %s second %s' % (firstHalf, secondHalf)
# This returns '10871' and '11859' for the Sacher, which are the correct
# wavelength ranges in Angstroms
# print 'Now calculate the current wavelength position:'
self._currentwl = self._doubleA * (self._offset) ** 2.0 + self._doubleB * self._offset + self._doubleC
print('Current wavelength: %.3f nm' % self._currentwl)
print('initializing done')
return True
"""
Not sure what this is doing yet
"""
"""
Also we're done with the Sacher_EPOS() class at this point
"""
if __name__ == '__main__':
epos = Sacher_EPOS(None, b'COM3')
# epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860)
# epos.do_get_wavelength()
# print('#1 Motor current: {}'.format(epos.get_motor_current()))
# epos.do_get_wavelength()
# print('motor position is...')
# current_pos = epos.get_motor_position()
# print('current position is {}'.format(current_pos))
# new_pos = current_pos + 10000
# epos.set_target_position(new_pos, True, True)
# print(epos.get_motor_position())
# print('#2 Motor current: {}'.format(epos.get_motor_current()))
# epos.find_home()
# epos.restore()
# time.sleep(7)
epos.do_set_wavelength(1151.5)
# epos.do_get_wavelength()
print('Motor current: {}'.format(epos.get_motor_current()))
print('Motor position: {}'.format(epos.get_motor_position()))
"""
OTHER MISC. NOTES:
increasing wavelength:
causes the square to rotate left
causes base to move to the left when square is stuck in
causes screw to loosen
causes large gold base to tighten
decreasing wavelength:
there's an overshoot when lowering wavelength
causes the square to rotate right
causes base to move to the right when square is stuck in
causes screw to tighten
causes large gold base to loosen, and also unplug the motor
Also you don't need to explicitly run epos.initialize() because there's an __init__ function which contains epos.initialize()
"""
# womp the end
| 41.532826 | 147 | 0.625644 | 40,030 | 0.917047 | 0 | 0 | 2,365 | 0.05418 | 0 | 0 | 15,854 | 0.363199 |
165616f6329f47d7fc22c8cc1eb0970f40d768d9 | 1,652 | py | Python | tools/generate_lst.py | haotianliu001/HRNet-Lesion | 9dae108879456e084b2200e39d7e58c1c08c2b16 | [
"MIT"
] | null | null | null | tools/generate_lst.py | haotianliu001/HRNet-Lesion | 9dae108879456e084b2200e39d7e58c1c08c2b16 | [
"MIT"
] | null | null | null | tools/generate_lst.py | haotianliu001/HRNet-Lesion | 9dae108879456e084b2200e39d7e58c1c08c2b16 | [
"MIT"
] | null | null | null | import argparse
import os
image_dir = 'image'
label_dir = 'label'
splits = ['train', 'val', 'test']
image_dirs = [
'image/{}',
'image/{}_crop'
]
label_dirs = [
'label/{}/annotations',
'label/{}/annotations_crop',
]
def generate(root):
assert len(image_dirs) == len(label_dirs)
for split in splits:
for image_path, label_path in zip(image_dirs, label_dirs):
image_path = image_path.format(split)
label_path = label_path.format(split)
if split != 'train' and image_path.endswith('_crop'):
label_path = label_path.replace('_crop', '')
if not os.path.exists(os.path.join(root, label_path)):
continue
lines = []
for label in os.listdir(os.path.join(root, label_path)):
image = label.replace('.png', '.jpg')
if os.path.exists(os.path.join(root, image_path, image)):
lines.append('{} {}\n'.format(os.path.join(image_path, image), os.path.join(label_path, label)))
else:
print('not found: {}'.format(os.path.join(root, image_path, image)))
print(image_path, label_path, len(lines))
output_file = '{}.lst'.format(image_path.split('/')[1])
with open(os.path.join(root, output_file), 'w') as f:
f.writelines(lines)
print(f'Save to {os.path.join(root, output_file)}\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('root', type=str, help='path of dataset root')
args = parser.parse_args()
generate(args.root)
| 30.036364 | 116 | 0.579903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.159201 |
1658161ce6f6978b51d0a1fdd4a0ce93c2160124 | 897 | py | Python | examples/example.py | f-dangel/unfoldNd | 63e9abc4867d8678c2ac00da567dc106e9f6f2c7 | [
"MIT"
] | 21 | 2021-03-04T04:56:20.000Z | 2022-03-31T11:15:28.000Z | examples/example.py | f-dangel/unfoldNd | 63e9abc4867d8678c2ac00da567dc106e9f6f2c7 | [
"MIT"
] | 12 | 2021-02-16T16:16:23.000Z | 2021-05-28T06:00:41.000Z | examples/example.py | f-dangel/unfoldNd | 63e9abc4867d8678c2ac00da567dc106e9f6f2c7 | [
"MIT"
] | 1 | 2021-11-04T12:52:19.000Z | 2021-11-04T12:52:19.000Z | """How to use ``unfoldNd``. A comparison with ``torch.nn.Unfold``."""
# imports, make this example deterministic
import torch
import unfoldNd
torch.manual_seed(0)
# random batched RGB 32x32 image-shaped input tensor of batch size 64
inputs = torch.randn((64, 3, 32, 32))
# module hyperparameters
kernel_size = 3
dilation = 1
padding = 1
stride = 2
# both modules accept the same arguments and perform the same operation
torch_module = torch.nn.Unfold(
kernel_size, dilation=dilation, padding=padding, stride=stride
)
lib_module = unfoldNd.UnfoldNd(
kernel_size, dilation=dilation, padding=padding, stride=stride
)
# forward pass
torch_outputs = torch_module(inputs)
lib_outputs = lib_module(inputs)
# check
if torch.allclose(torch_outputs, lib_outputs):
print("✔ Outputs of torch.nn.Unfold and unfoldNd.UnfoldNd match.")
else:
raise AssertionError("❌ Outputs don't match")
| 24.916667 | 71 | 0.753623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 382 | 0.423973 |
1658fa9a24f0d70843df0f950d0081f1ffadc11b | 797 | py | Python | src/pretix/helpers/escapejson.py | NicsTr/pretix | e6d2380d9ed1836cc64a688b2be20d00a8500eab | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-04-25T00:11:00.000Z | 2020-04-25T00:11:00.000Z | src/pretix/helpers/escapejson.py | NicsTr/pretix | e6d2380d9ed1836cc64a688b2be20d00a8500eab | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/helpers/escapejson.py | NicsTr/pretix | e6d2380d9ed1836cc64a688b2be20d00a8500eab | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from django.utils.encoding import force_str
from django.utils.functional import keep_lazy
from django.utils.safestring import SafeText, mark_safe
_json_escapes = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
}
_json_escapes_attr = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('"'): '"',
ord("'"): ''',
ord("="): '=',
}
@keep_lazy(str, SafeText)
def escapejson(value):
"""Hex encodes characters for use in a application/json type script."""
return mark_safe(force_str(value).translate(_json_escapes))
@keep_lazy(str, SafeText)
def escapejson_attr(value):
"""Hex encodes characters for use in a html attributw script."""
return mark_safe(force_str(value).translate(_json_escapes_attr))
| 25.709677 | 75 | 0.6399 | 0 | 0 | 0 | 0 | 379 | 0.475533 | 0 | 0 | 237 | 0.297365 |
1659ed45e2efb246708ee177c0a31eb71473cb9b | 1,813 | py | Python | pyxley/charts/plotly/base.py | snowind/pyxley | cff9e50b8d80b9794c6907355e541f166959cd6c | [
"MIT"
] | 2,536 | 2015-06-26T20:12:30.000Z | 2022-03-01T07:26:44.000Z | pyxley/charts/plotly/base.py | zhiaozhou/pyxley | 2dab00022d977d986169cd8a629b3a2f91be893f | [
"MIT"
] | 51 | 2015-07-17T14:16:43.000Z | 2021-07-09T21:34:36.000Z | pyxley/charts/plotly/base.py | zhiaozhou/pyxley | 2dab00022d977d986169cd8a629b3a2f91be893f | [
"MIT"
] | 335 | 2015-07-16T20:22:00.000Z | 2022-02-25T07:18:15.000Z |
from ..charts import Chart
from flask import jsonify, request
_BASE_CONFIG = {
"showLink": False,
"displaylogo": False,
"modeBarButtonsToRemove": ["sendDataToCloud"]
}
class PlotlyAPI(Chart):
""" Base class for Plotly.js API
This class is used to create charts using the plotly.js api
To keep this general, this chart does not have a default
method of transmitting data. Instead the user must supply
a route_func method.
"""
def __init__(self, chart_id, url, route_func, init_params={}):
options = {
"chartid": chart_id,
"url": url,
"params": init_params
}
super(PlotlyAPI, self).__init__("PlotlyAPI", options, route_func)
@staticmethod
def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG):
""" basic line plot
dataframe to json for a line plot
Args:
df (pandas.DataFrame): input dataframe
xypairs (list): list of tuples containing column names
mode (str): plotly.js mode (e.g. lines)
layout (dict): layout parameters
config (dict): config parameters
"""
if df.empty:
return {
"x": [],
"y": [],
"mode": mode
}
_data = []
for x, y in xypairs:
if (x in df.columns) and (y in df.columns):
_data.append(
{
"x": df[x].values.tolist(),
"y": df[y].values.tolist(),
"mode": mode
}
)
return {
"data": _data,
"layout": layout,
"config": config
}
| 27.059701 | 73 | 0.492554 | 1,629 | 0.898511 | 0 | 0 | 1,060 | 0.584666 | 0 | 0 | 791 | 0.436293 |
165b5afa3e28ca226423cdaac8f6894170030430 | 576 | py | Python | pyqt/getting_started/close_window.py | CospanDesign/python | 9f911509aae7abd9237c14a4635294c7719c9129 | [
"MIT"
] | 5 | 2015-12-12T20:16:45.000Z | 2020-02-21T19:50:31.000Z | pyqt/getting_started/close_window.py | CospanDesign/python | 9f911509aae7abd9237c14a4635294c7719c9129 | [
"MIT"
] | null | null | null | pyqt/getting_started/close_window.py | CospanDesign/python | 9f911509aae7abd9237c14a4635294c7719c9129 | [
"MIT"
] | 2 | 2020-06-01T06:27:06.000Z | 2022-03-10T13:21:03.000Z | #!/usr/bin/python
import sys
from PyQt4 import QtGui
from PyQt4 import QtCore
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
qbtn = QtGui.QPushButton('Quit', self)
qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit)
qbtn.resize(qbtn.sizeHint())
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('Quit Button')
self.show()
def main():
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 19.2 | 65 | 0.682292 | 365 | 0.633681 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.079861 |
165bd59707bf7d41b2fcb3dbf5d490a2e8660a09 | 732 | py | Python | test/means/test_zero_mean.py | bdecost/gpytorch | a5f1ad3e47daf3f8db04b605fb13ff3f9f871e3a | [
"MIT"
] | null | null | null | test/means/test_zero_mean.py | bdecost/gpytorch | a5f1ad3e47daf3f8db04b605fb13ff3f9f871e3a | [
"MIT"
] | null | null | null | test/means/test_zero_mean.py | bdecost/gpytorch | a5f1ad3e47daf3f8db04b605fb13ff3f9f871e3a | [
"MIT"
] | 1 | 2018-11-15T10:03:40.000Z | 2018-11-15T10:03:40.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import unittest
from gpytorch.means import ZeroMean
class TestZeroMean(unittest.TestCase):
def setUp(self):
self.mean = ZeroMean()
def test_forward(self):
a = torch.Tensor([[1, 2], [2, 4]])
res = self.mean(a)
self.assertEqual(tuple(res.size()), (2,))
self.assertTrue(res.eq(0).all())
def test_forward_batch(self):
a = torch.Tensor([[[1, 2], [1, 2], [2, 4]], [[2, 3], [2, 3], [1, 3]]])
res = self.mean(a)
self.assertEqual(tuple(res.size()), (2, 3))
self.assertTrue(res.eq(0).all())
| 28.153846 | 78 | 0.629781 | 514 | 0.702186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
165bdb25d95d9e2ecf502312358485ebe1274976 | 1,948 | py | Python | generator/contact.py | rizzak/python_training | 38bbe5d7e38892e8dcc28caeae1481b98cce7356 | [
"Apache-2.0"
] | null | null | null | generator/contact.py | rizzak/python_training | 38bbe5d7e38892e8dcc28caeae1481b98cce7356 | [
"Apache-2.0"
] | null | null | null | generator/contact.py | rizzak/python_training | 38bbe5d7e38892e8dcc28caeae1481b98cce7356 | [
"Apache-2.0"
] | null | null | null | import jsonpickle
import random
import string
from model.contact import Contact
import os.path
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Contact(first_name="", middle_name="", last_name="", nickname="", title="", company="", address="",
home_tel="", mobile_tel="", work_tel="", fax="", email="", homepage="", birthday="",
anniversary="", secondary_address="", secondary_tel="", notes="")] + [
Contact(first_name=random_string('first_name', 10), middle_name=random_string('middle_name', 10), last_name=random_string('last_name', 10),
nickname=random_string('nickname', 10), title=random_string('random_string', 10), company=random_string('company', 10),
address=random_string('address', 10), home_tel=random_string('home_tel', 10), mobile_tel=random_string('mobile_tel', 10),
work_tel=random_string('work_tel', 10), fax=random_string('fax', 10), email=random_string('email', 10),
homepage=random_string('homepage', 10), birthday=random_string('birthday', 10), anniversary=random_string('anniversary', 10),
secondary_address=random_string('secondary_address', 10), secondary_tel=random_string('secondary_tel', 10), notes=random_string('notes', 10))
for i in range(5)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file , "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata)) | 40.583333 | 153 | 0.664271 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 311 | 0.159651 |
165cb63df5c2c12565813006cb857ecc7266b584 | 9,952 | py | Python | Lib/test/test_runpy.py | arvindm95/unladen-swallow | 8175e37eaea7ca66ed03283b46bc1d2db0d3f9c3 | [
"PSF-2.0"
] | 2,293 | 2015-01-02T12:46:10.000Z | 2022-03-29T09:45:43.000Z | python/src/Lib/test/test_runpy.py | weiqiangzheng/sl4a | d3c17dca978cbeee545e12ea240a9dbf2a6999e9 | [
"Apache-2.0"
] | 315 | 2015-05-31T11:55:46.000Z | 2022-01-12T08:36:37.000Z | python/src/Lib/test/test_runpy.py | weiqiangzheng/sl4a | d3c17dca978cbeee545e12ea240a9dbf2a6999e9 | [
"Apache-2.0"
] | 1,033 | 2015-01-04T07:48:40.000Z | 2022-03-24T09:34:37.000Z | # Test the runpy module
import unittest
import os
import os.path
import sys
import tempfile
from test.test_support import verbose, run_unittest, forget
from runpy import _run_code, _run_module_code, run_module
# Note: This module can't safely test _run_module_as_main as it
# runs its tests in the current process, which would mess with the
# real __main__ module (usually test.regrtest)
# See test_cmd_line_script for a test that executes that code path
# Set up the test code and expected results
class RunModuleCodeTest(unittest.TestCase):
expected_result = ["Top level assignment", "Lower level reference"]
test_source = (
"# Check basic code execution\n"
"result = ['Top level assignment']\n"
"def f():\n"
" result.append('Lower level reference')\n"
"f()\n"
"# Check the sys module\n"
"import sys\n"
"run_argv0 = sys.argv[0]\n"
"run_name_in_sys_modules = __name__ in sys.modules\n"
"if run_name_in_sys_modules:\n"
" module_in_sys_modules = globals() is sys.modules[__name__].__dict__\n"
"# Check nested operation\n"
"import runpy\n"
"nested = runpy._run_module_code('x=1\\n', mod_name='<run>')\n"
)
def test_run_code(self):
saved_argv0 = sys.argv[0]
d = _run_code(self.test_source, {})
self.failUnless(d["result"] == self.expected_result)
self.failUnless(d["__name__"] is None)
self.failUnless(d["__file__"] is None)
self.failUnless(d["__loader__"] is None)
self.failUnless(d["__package__"] is None)
self.failUnless(d["run_argv0"] is saved_argv0)
self.failUnless("run_name" not in d)
self.failUnless(sys.argv[0] is saved_argv0)
def test_run_module_code(self):
initial = object()
name = "<Nonsense>"
file = "Some other nonsense"
loader = "Now you're just being silly"
package = '' # Treat as a top level module
d1 = dict(initial=initial)
saved_argv0 = sys.argv[0]
d2 = _run_module_code(self.test_source,
d1,
name,
file,
loader,
package)
self.failUnless("result" not in d1)
self.failUnless(d2["initial"] is initial)
self.failUnless(d2["result"] == self.expected_result)
self.failUnless(d2["nested"]["x"] == 1)
self.failUnless(d2["__name__"] is name)
self.failUnless(d2["run_name_in_sys_modules"])
self.failUnless(d2["module_in_sys_modules"])
self.failUnless(d2["__file__"] is file)
self.failUnless(d2["run_argv0"] is file)
self.failUnless(d2["__loader__"] is loader)
self.failUnless(d2["__package__"] is package)
self.failUnless(sys.argv[0] is saved_argv0)
self.failUnless(name not in sys.modules)
class RunModuleTest(unittest.TestCase):
def expect_import_error(self, mod_name):
try:
run_module(mod_name)
except ImportError:
pass
else:
self.fail("Expected import error for " + mod_name)
def test_invalid_names(self):
# Builtin module
self.expect_import_error("sys")
# Non-existent modules
self.expect_import_error("sys.imp.eric")
self.expect_import_error("os.path.half")
self.expect_import_error("a.bee")
self.expect_import_error(".howard")
self.expect_import_error("..eaten")
# Package
self.expect_import_error("logging")
def test_library_module(self):
run_module("runpy")
def _add_pkg_dir(self, pkg_dir):
os.mkdir(pkg_dir)
pkg_fname = os.path.join(pkg_dir, "__init__"+os.extsep+"py")
pkg_file = open(pkg_fname, "w")
pkg_file.close()
return pkg_fname
def _make_pkg(self, source, depth):
pkg_name = "__runpy_pkg__"
test_fname = "runpy_test"+os.extsep+"py"
pkg_dir = sub_dir = tempfile.mkdtemp()
if verbose: print " Package tree in:", sub_dir
sys.path.insert(0, pkg_dir)
if verbose: print " Updated sys.path:", sys.path[0]
for i in range(depth):
sub_dir = os.path.join(sub_dir, pkg_name)
pkg_fname = self._add_pkg_dir(sub_dir)
if verbose: print " Next level in:", sub_dir
if verbose: print " Created:", pkg_fname
mod_fname = os.path.join(sub_dir, test_fname)
mod_file = open(mod_fname, "w")
mod_file.write(source)
mod_file.close()
if verbose: print " Created:", mod_fname
mod_name = (pkg_name+".")*depth + "runpy_test"
return pkg_dir, mod_fname, mod_name
def _del_pkg(self, top, depth, mod_name):
for entry in list(sys.modules):
if entry.startswith("__runpy_pkg__"):
del sys.modules[entry]
if verbose: print " Removed sys.modules entries"
del sys.path[0]
if verbose: print " Removed sys.path entry"
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except OSError, ex:
if verbose: print ex # Persist with cleaning up
for name in dirs:
fullname = os.path.join(root, name)
try:
os.rmdir(fullname)
except OSError, ex:
if verbose: print ex # Persist with cleaning up
try:
os.rmdir(top)
if verbose: print " Removed package tree"
except OSError, ex:
if verbose: print ex # Persist with cleaning up
def _check_module(self, depth):
pkg_dir, mod_fname, mod_name = (
self._make_pkg("x=1\n", depth))
forget(mod_name)
try:
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name) # Read from source
self.failUnless("x" in d1)
self.failUnless(d1["x"] == 1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name) # Read from bytecode
self.failUnless("x" in d2)
self.failUnless(d2["x"] == 1)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def _add_relative_modules(self, base_dir, source, depth):
if depth <= 1:
raise ValueError("Relative module test needs depth > 1")
pkg_name = "__runpy_pkg__"
module_dir = base_dir
for i in range(depth):
parent_dir = module_dir
module_dir = os.path.join(module_dir, pkg_name)
# Add sibling module
sibling_fname = os.path.join(module_dir, "sibling"+os.extsep+"py")
sibling_file = open(sibling_fname, "w")
sibling_file.close()
if verbose: print " Added sibling module:", sibling_fname
# Add nephew module
uncle_dir = os.path.join(parent_dir, "uncle")
self._add_pkg_dir(uncle_dir)
if verbose: print " Added uncle package:", uncle_dir
cousin_dir = os.path.join(uncle_dir, "cousin")
self._add_pkg_dir(cousin_dir)
if verbose: print " Added cousin package:", cousin_dir
nephew_fname = os.path.join(cousin_dir, "nephew"+os.extsep+"py")
nephew_file = open(nephew_fname, "w")
nephew_file.close()
if verbose: print " Added nephew module:", nephew_fname
def _check_relative_imports(self, depth, run_name=None):
contents = r"""\
from __future__ import absolute_import
from . import sibling
from ..uncle.cousin import nephew
"""
pkg_dir, mod_fname, mod_name = (
self._make_pkg(contents, depth))
try:
self._add_relative_modules(pkg_dir, contents, depth)
pkg_name = mod_name.rpartition('.')[0]
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name, run_name=run_name) # Read from source
self.failUnless("__package__" in d1)
self.failUnless(d1["__package__"] == pkg_name)
self.failUnless("sibling" in d1)
self.failUnless("nephew" in d1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name, run_name=run_name) # Read from bytecode
self.failUnless("__package__" in d2)
self.failUnless(d2["__package__"] == pkg_name)
self.failUnless("sibling" in d2)
self.failUnless("nephew" in d2)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def test_run_module(self):
for depth in range(4):
if verbose: print "Testing package depth:", depth
self._check_module(depth)
def test_explicit_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing relative imports at depth:", depth
self._check_relative_imports(depth)
def test_main_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing main relative imports at depth:", depth
self._check_relative_imports(depth, "__main__")
def test_main():
run_unittest(RunModuleCodeTest)
run_unittest(RunModuleTest)
if __name__ == "__main__":
test_main()
| 39.181102 | 82 | 0.60621 | 9,315 | 0.935993 | 0 | 0 | 0 | 0 | 0 | 0 | 2,586 | 0.259847 |
165d5b352de2106b373e88fa207e7c0361117e91 | 4,795 | py | Python | experiments/_pytorch/_grpc_server/protofiles/imagedata_pb2.py | RedisAI/benchmarks | 65b8509b81795da73f25f51941c61fbd9765914c | [
"MIT"
] | 6 | 2019-04-18T10:17:52.000Z | 2021-07-02T19:57:08.000Z | experiments/_pytorch/_grpc_server/protofiles/imagedata_pb2.py | hhsecond/benchmarks | 65b8509b81795da73f25f51941c61fbd9765914c | [
"MIT"
] | 1 | 2021-07-21T12:17:08.000Z | 2021-07-21T12:17:08.000Z | experiments/_pytorch/_grpc_server/protofiles/imagedata_pb2.py | hhsecond/benchmarks | 65b8509b81795da73f25f51941c61fbd9765914c | [
"MIT"
] | 2 | 2020-03-15T00:37:57.000Z | 2022-02-26T04:36:00.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: imagedata.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='imagedata.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0fimagedata.proto\"H\n\tImageData\x12\r\n\x05image\x18\x01 \x01(\x0c\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\r\n\x05\x64type\x18\x04 \x01(\t\"!\n\x0fPredictionClass\x12\x0e\n\x06output\x18\x01 \x03(\x02\x32<\n\tPredictor\x12/\n\rGetPrediction\x12\n.ImageData\x1a\x10.PredictionClass\"\x00\x62\x06proto3')
)
_IMAGEDATA = _descriptor.Descriptor(
name='ImageData',
full_name='ImageData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image', full_name='ImageData.image', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='height', full_name='ImageData.height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='width', full_name='ImageData.width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dtype', full_name='ImageData.dtype', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=19,
serialized_end=91,
)
_PREDICTIONCLASS = _descriptor.Descriptor(
name='PredictionClass',
full_name='PredictionClass',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='output', full_name='PredictionClass.output', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=93,
serialized_end=126,
)
DESCRIPTOR.message_types_by_name['ImageData'] = _IMAGEDATA
DESCRIPTOR.message_types_by_name['PredictionClass'] = _PREDICTIONCLASS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ImageData = _reflection.GeneratedProtocolMessageType('ImageData', (_message.Message,), dict(
DESCRIPTOR = _IMAGEDATA,
__module__ = 'imagedata_pb2'
# @@protoc_insertion_point(class_scope:ImageData)
))
_sym_db.RegisterMessage(ImageData)
PredictionClass = _reflection.GeneratedProtocolMessageType('PredictionClass', (_message.Message,), dict(
DESCRIPTOR = _PREDICTIONCLASS,
__module__ = 'imagedata_pb2'
# @@protoc_insertion_point(class_scope:PredictionClass)
))
_sym_db.RegisterMessage(PredictionClass)
_PREDICTOR = _descriptor.ServiceDescriptor(
name='Predictor',
full_name='Predictor',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=128,
serialized_end=188,
methods=[
_descriptor.MethodDescriptor(
name='GetPrediction',
full_name='Predictor.GetPrediction',
index=0,
containing_service=None,
input_type=_IMAGEDATA,
output_type=_PREDICTIONCLASS,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_PREDICTOR)
DESCRIPTOR.services_by_name['Predictor'] = _PREDICTOR
# @@protoc_insertion_point(module_scope)
| 30.935484 | 365 | 0.740563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,037 | 0.216267 |
165e5478bb41b24d4a9ab5bce186c085b7367f24 | 4,937 | py | Python | app/api/admin_sales/discounted.py | akashtalole/python-flask-restful-api | 475d8fd7be1724183716a197aac4257f8fbbeac4 | [
"MIT"
] | 3 | 2019-09-05T05:28:49.000Z | 2020-06-10T09:03:37.000Z | app/api/admin_sales/discounted.py | akashtalole/python-flask-restful-api | 475d8fd7be1724183716a197aac4257f8fbbeac4 | [
"MIT"
] | null | null | null | app/api/admin_sales/discounted.py | akashtalole/python-flask-restful-api | 475d8fd7be1724183716a197aac4257f8fbbeac4 | [
"MIT"
] | null | null | null | from sqlalchemy import func
from flask_rest_jsonapi import ResourceList
from marshmallow_jsonapi import fields
from marshmallow_jsonapi.flask import Schema
from app.api.helpers.utilities import dasherize
from app.api.bootstrap import api
from app.models import db
from app.models.discount_code import DiscountCode
from app.models.event import Event
from app.models.order import Order, OrderTicket
from app.models.user import User
def sales_per_marketer_and_discount_by_status(status):
return db.session.query(Event.id.label('event_id'),
DiscountCode.id.label('discount_code_id'),
User.id.label('marketer_id'),
func.sum(Order.amount).label(status + '_sales'),
func.sum(OrderTicket.quantity).label(status + '_tickets')) \
.filter(Event.id == Order.event_id) \
.filter(Order.marketer_id == User.id) \
.filter(Order.discount_code_id == DiscountCode.id) \
.filter(Order.status == status) \
.group_by(Event) \
.group_by(DiscountCode) \
.group_by(User) \
.group_by(Order.status) \
.cte()
class AdminSalesDiscountedSchema(Schema):
"""
Discounted sales by event
Provides
Event name,
discount code,
marketer mail,
count of tickets and total sales for orders grouped by status
"""
class Meta:
type_ = 'admin-sales-discounted'
self_view = 'v1.admin_sales_discounted'
inflect = dasherize
id = fields.String()
code = fields.String()
email = fields.String()
event_name = fields.String()
payment_currency = fields.String()
sales = fields.Method('calc_sales')
@staticmethod
def calc_sales(obj):
"""
Returns sales (dictionary with total sales and ticket count) for
placed, completed and pending orders
"""
res = {'placed': {}, 'completed': {}, 'pending': {}}
res['placed']['sales_total'] = obj.placed_sales or 0
res['placed']['ticket_count'] = obj.placed_tickets or 0
res['completed']['sales_total'] = obj.completed_sales or 0
res['completed']['ticket_count'] = obj.completed_tickets or 0
res['pending']['sales_total'] = obj.pending_sales or 0
res['pending']['ticket_count'] = obj.pending_tickets or 0
return res
class AdminSalesDiscountedList(ResourceList):
"""
Resource for sales by marketer. Joins event marketer and orders and
subsequently accumulates sales by status
"""
def query(self, _):
pending = sales_per_marketer_and_discount_by_status('pending')
completed = sales_per_marketer_and_discount_by_status('completed')
placed = sales_per_marketer_and_discount_by_status('placed')
discounts = self.session.query(Event.id.label('event_id'),
Event.name.label('event_name'),
DiscountCode.id.label('discount_code_id'),
DiscountCode.code.label('code'),
User.id.label('marketer_id'),
User.email.label('email')) \
.filter(Event.id == Order.event_id) \
.filter(Order.marketer_id == User.id) \
.filter(Order.discount_code_id == DiscountCode.id) \
.cte()
return self.session.query(discounts, pending, completed, placed) \
.outerjoin(pending,
(pending.c.event_id == discounts.c.event_id) &
(pending.c.discount_code_id == discounts.c.discount_code_id) &
(pending.c.marketer_id == discounts.c.marketer_id)) \
.outerjoin(completed,
(completed.c.event_id == discounts.c.event_id) &
(completed.c.discount_code_id == discounts.c.discount_code_id) &
(completed.c.marketer_id == discounts.c.marketer_id)) \
.outerjoin(placed,
(placed.c.event_id == discounts.c.event_id) &
(placed.c.discount_code_id == discounts.c.discount_code_id) &
(placed.c.marketer_id == discounts.c.marketer_id))
methods = ['GET']
decorators = (api.has_permission('is_admin'), )
schema = AdminSalesDiscountedSchema
data_layer = {
'model': Event,
'session': db.session,
'methods': {
'query': query
}
}
| 41.838983 | 102 | 0.552157 | 3,642 | 0.737695 | 0 | 0 | 652 | 0.132064 | 0 | 0 | 880 | 0.178246 |
165e549759c53b8757e058aa4a4e0a0e6b69b060 | 407 | py | Python | spacy/lang/sr/__init__.py | g4brielvs/spaCy | cca8651fc8133172ebaa9d9fc438ed1fbf34fb33 | [
"BSD-3-Clause",
"MIT"
] | 4 | 2021-08-11T05:46:23.000Z | 2021-09-11T05:16:57.000Z | spacy/lang/sr/__init__.py | g4brielvs/spaCy | cca8651fc8133172ebaa9d9fc438ed1fbf34fb33 | [
"BSD-3-Clause",
"MIT"
] | 1 | 2021-03-01T19:01:37.000Z | 2021-03-01T19:01:37.000Z | spacy/lang/sr/__init__.py | g4brielvs/spaCy | cca8651fc8133172ebaa9d9fc438ed1fbf34fb33 | [
"BSD-3-Clause",
"MIT"
] | 2 | 2021-01-26T17:29:02.000Z | 2021-03-13T08:54:53.000Z | from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lex_attrs import LEX_ATTRS
from ...language import Language
class SerbianDefaults(Language.Defaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Serbian(Language):
lang = "sr"
Defaults = SerbianDefaults
__all__ = ["Serbian"]
| 21.421053 | 54 | 0.781327 | 221 | 0.542998 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.031941 |
165e63725354de429a448d866f665cccca991916 | 656 | py | Python | mmdet/ops/dcn/__init__.py | TJUsym/TJU_Advanced_CV_Homework | 2d85943390e9ba53b80988e0ab8d50aef0cd17da | [
"Apache-2.0"
] | 1,158 | 2019-04-26T01:08:32.000Z | 2022-03-30T06:46:24.000Z | mmdet/ops/dcn/__init__.py | TJUsym/TJU_Advanced_CV_Homework | 2d85943390e9ba53b80988e0ab8d50aef0cd17da | [
"Apache-2.0"
] | 148 | 2021-03-18T09:44:02.000Z | 2022-03-31T06:01:39.000Z | mmdet/ops/dcn/__init__.py | TJUsym/TJU_Advanced_CV_Homework | 2d85943390e9ba53b80988e0ab8d50aef0cd17da | [
"Apache-2.0"
] | 197 | 2020-01-29T09:58:27.000Z | 2022-03-25T12:08:56.000Z | from .functions.deform_conv import deform_conv, modulated_deform_conv
from .functions.deform_pool import deform_roi_pooling
from .modules.deform_conv import (DeformConv, ModulatedDeformConv,
DeformConvPack, ModulatedDeformConvPack)
from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack,
ModulatedDeformRoIPoolingPack)
__all__ = [
'DeformConv', 'DeformConvPack', 'ModulatedDeformConv',
'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack',
'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv',
'deform_roi_pooling'
]
| 46.857143 | 76 | 0.739329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.306402 |
165f2a4da2ed50464bfa13f0495fc689063e0199 | 1,189 | py | Python | api/skill/serializer.py | zaubermaerchen/imas_cg_api | 45ebdde8c47ff4fabbf58b75721721f142afb46b | [
"MIT"
] | 2 | 2016-02-01T21:03:53.000Z | 2018-10-20T09:15:12.000Z | api/skill/serializer.py | zaubermaerchen/imas_cg_api | 45ebdde8c47ff4fabbf58b75721721f142afb46b | [
"MIT"
] | 1 | 2020-01-05T12:50:35.000Z | 2020-01-05T12:50:35.000Z | api/skill/serializer.py | zaubermaerchen/imas_cg_api | 45ebdde8c47ff4fabbf58b75721721f142afb46b | [
"MIT"
] | null | null | null | # coding: utf-8
from rest_framework import serializers
from data.models import Skill, SkillValue
class ListSerializer(serializers.ModelSerializer):
skill_value_list = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Skill
fields = [
'skill_id',
'target_unit',
'target_member',
'target_type',
'target_num',
'target_param',
'skill_value_id',
'skill_value_list',
'comment'
]
@staticmethod
def get_skill_value_list(obj):
return SkillValue.get_value_list(obj.skill_value_id)
class Costar(object):
def __init__(self, name, count):
self.name = name
self.count = count
class CostarSerializer(serializers.Serializer):
name = serializers.CharField(max_length=255)
count = serializers.IntegerField()
def create(self, validated_data):
return Costar(**validated_data)
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.count = validated_data.get('count', instance.count)
return instance
| 26.422222 | 72 | 0.64508 | 1,083 | 0.910849 | 0 | 0 | 109 | 0.091674 | 0 | 0 | 148 | 0.124474 |
1660d7a15a18998c6c8ae4f9e573b184061a0341 | 5,061 | py | Python | Codes/Converting_RGB_to_GreyScale.py | sichkar-valentyn/Image_processing_in_Python | 43d7c979bcd742cc202a28c2dea6ea5bc87562a2 | [
"MIT"
] | 3 | 2018-12-02T03:59:51.000Z | 2019-11-20T18:37:41.000Z | Codes/Converting_RGB_to_GreyScale.py | sichkar-valentyn/Image_processing_in_Python | 43d7c979bcd742cc202a28c2dea6ea5bc87562a2 | [
"MIT"
] | null | null | null | Codes/Converting_RGB_to_GreyScale.py | sichkar-valentyn/Image_processing_in_Python | 43d7c979bcd742cc202a28c2dea6ea5bc87562a2 | [
"MIT"
] | 2 | 2018-10-18T07:01:26.000Z | 2022-03-22T08:22:33.000Z | # File: Converting_RGB_to_GreyScale.py
# Description: Opening RGB image as array, converting to GreyScale and saving result into new file
# Environment: PyCharm and Anaconda environment
#
# MIT License
# Copyright (c) 2018 Valentyn N Sichkar
# github.com/sichkar-valentyn
#
# Reference to:
# Valentyn N Sichkar. Image processing in Python // GitHub platform. DOI: 10.5281/zenodo.1343603
# Opening RGB image as array, converting to GreyScale and saving result into new file
# Importing needed libraries
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from skimage import color
from skimage import io
import scipy.misc
# Creating an array from image data
image_RGB = Image.open("images/eagle.jpg")
image_np = np.array(image_RGB)
# Checking the type of the array
print(type(image_np)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_np.shape)
# Showing image with every channel separately
channel_R = image_np[:, :, 0]
channel_G = image_np[:, :, 1]
channel_B = image_np[:, :, 2]
# Creating a figure with subplots
f, ax = plt.subplots(nrows=2, ncols=2)
# ax is (2, 2) np array and to make it easier to read we use 'flatten' function
# Or we can call each time ax[0, 0]
ax0, ax1, ax2, ax3 = ax.flatten()
# Adjusting first subplot
ax0.imshow(channel_R, cmap='Reds')
ax0.set_xlabel('')
ax0.set_ylabel('')
ax0.set_title('Red channel')
# Adjusting second subplot
ax1.imshow(channel_G, cmap='Greens')
ax1.set_xlabel('')
ax1.set_ylabel('')
ax1.set_title('Green channel')
# Adjusting third subplot
ax2.imshow(channel_B, cmap='Blues')
ax2.set_xlabel('')
ax2.set_ylabel('')
ax2.set_title('Blue channel')
# Adjusting fourth subplot
ax3.imshow(image_np)
ax3.set_xlabel('')
ax3.set_ylabel('')
ax3.set_title('Original image')
# Function to make distance between figures
plt.tight_layout()
# Giving the name to the window with figure
f.canvas.set_window_title('Eagle image in three channels R, G and B')
# Showing the plots
plt.show()
# Converting RGB image into GrayScale image
# Using formula:
# Y' = 0.299 R + 0.587 G + 0.114 B
image_RGB = Image.open("images/eagle.jpg")
image_np = np.array(image_RGB)
image_GreyScale = image_np[:, :, 0] * 0.299 + image_np[:, :, 1] * 0.587 + image_np[:, :, 2] * 0.114
# Checking the type of the array
print(type(image_GreyScale)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_GreyScale.shape)
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Showing the image by using obtained array
plt.imshow(image_GreyScale, cmap='Greys')
plt.show()
# Preparing array for saving - creating three channels with the same data in each
# Firstly, creating array with zero elements
# And by 'image_GreyScale.shape + tuple([3])' we add one more element '3' to the tuple
# Now the shape will be (1080, 1920, 3) - which is tuple type
image_GreyScale_with_3_channels = np.zeros(image_GreyScale.shape + tuple([3]))
# Secondly, reshaping GreyScale image from 2D to 3D
x = image_GreyScale.reshape((1080, 1920, 1))
# Finally, writing all data in three channels
image_GreyScale_with_3_channels[:, :, 0] = x[:, :, 0]
image_GreyScale_with_3_channels[:, :, 1] = x[:, :, 0]
image_GreyScale_with_3_channels[:, :, 2] = x[:, :, 0]
# Saving image into a file from obtained 3D array
scipy.misc.imsave("images/result_1.jpg", image_GreyScale_with_3_channels)
# Checking that image was written with three channels and they are identical
result_1 = Image.open("images/result_1.jpg")
result_1_np = np.array(result_1)
print(result_1_np.shape)
print(np.array_equal(result_1_np[:, :, 0], result_1_np[:, :, 1]))
print(np.array_equal(result_1_np[:, :, 1], result_1_np[:, :, 2]))
# Showing saved resulted image
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Here we don't need to specify the map like cmap='Greys'
plt.imshow(result_1_np)
plt.show()
# Another way to convert RGB image into GreyScale image
image_RGB = io.imread("images/eagle.jpg")
image_GreyScale = color.rgb2gray(image_RGB)
# Checking the type of the array
print(type(image_GreyScale)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_GreyScale.shape)
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Showing the image by using obtained array
plt.imshow(image_GreyScale, cmap='Greys')
plt.show()
# Saving converted image into a file from processed array
scipy.misc.imsave("images/result_2.jpg", image_GreyScale)
# One more way for converting
image_RGB_as_GreyScale = io.imread("images/eagle.jpg", as_gray=True)
# Checking the type of the array
print(type(image_RGB_as_GreyScale)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_RGB_as_GreyScale.shape)
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Showing the image by using obtained array
plt.imshow(image_RGB_as_GreyScale, cmap='Greys')
plt.show()
# Saving converted image into a file from processed array
scipy.misc.imsave("images/result_3.jpg", image_RGB_as_GreyScale)
| 33.966443 | 99 | 0.752223 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,878 | 0.568662 |
1661f7c0c438355d7d875aa2c983973094881c84 | 3,193 | py | Python | template_renderer.py | hamza-gheggad/gcp-iam-collector | 02b46453b9ec23af07a0d81f7250f1de61e0ee23 | [
"Apache-2.0"
] | null | null | null | template_renderer.py | hamza-gheggad/gcp-iam-collector | 02b46453b9ec23af07a0d81f7250f1de61e0ee23 | [
"Apache-2.0"
] | null | null | null | template_renderer.py | hamza-gheggad/gcp-iam-collector | 02b46453b9ec23af07a0d81f7250f1de61e0ee23 | [
"Apache-2.0"
] | null | null | null | import colorsys
import json
from jinja2 import Environment, PackageLoader
import graph
def create_html(formatted_nodes, formatted_edges, role_color_map, output_name):
env = Environment(loader=PackageLoader('visualisation', '.'))
template = env.get_template('visualisation.template')
default_filters = list(graph.type_properties.keys())
all_roles=list(role_color_map.keys())
print(all_roles)
html = template.render(formatted_nodes=formatted_nodes,
formatted_edges=formatted_edges,
type_properties=graph.type_properties,
default_filters=default_filters,
all_roles=all_roles)
with open(output_name, "w+") as resource_file:
resource_file.write(html)
def get_description(node):
desc = node.get_type_name() + "</br>"
if node.title:
desc = desc + node.title + "</br>"
if node.properties:
for k, v in node.properties.items():
desc = desc + k + ": " + str(v) + "</br>"
return desc
def render(nodes, edges, output_name):
color_map = roles_to_color_map(edges=edges)
formatted_nodes, formatted_edges = format_graph(nodes, edges, color_map)
create_html(formatted_nodes, formatted_edges, color_map, output_name)
def color_for_role(role, all_roles):
hue = float(all_roles.index(role)) / len(all_roles)
return '#%02x%02x%02x' % tuple(int(c) * 255 for c in colorsys.hsv_to_rgb(hue, 1, 0.85))
def sanitise_role(role):
return str(role).replace('roles/', '') \
.lower() \
.replace('writer', 'editor') \
.replace('reader', 'viewer')
def roles_to_color_map(edges):
all_roles = list({sanitise_role(e.role) for e in edges if e.role})
role_map = {}
for role in all_roles:
role_map[role] = color_for_role(role, all_roles)
role_map['other'] = '#00c0ff'
return role_map
def format_graph(nodes, edges, role_color_map):
nodes_list = []
node_ids = {}
for counter, node in enumerate(nodes):
node_ids[node.id] = counter
value = {
'id': counter,
'shape': 'icon',
'label': node.name,
'type': node.node_type,
'icon': {
'face': 'Font Awesome 5 Free',
'code': node.get_font_code(),
'size': node.get_size(),
'color': node.get_color(),
'weight': 'bold'
}
}
description = get_description(node)
if description:
value['title'] = description
nodes_list.append(json.dumps(value).replace("\\\\", "\\"))
edges_list = []
for edge in edges:
value = {
'from': node_ids[edge.node_from.id],
'to': node_ids[edge.node_to.id],
'arrows': 'to',
}
if edge.label:
value['label'] = edge.label
if edge.title:
value['title'] = edge.title
value['role'] = sanitise_role(edge.role) if edge.role else 'other'
value['color'] = role_color_map[value['role']]
edges_list.append(json.dumps(value))
return nodes_list, edges_list | 31.303922 | 91 | 0.593173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 319 | 0.099906 |
166293ba707b563d24827825716e3e79a6848c40 | 13,007 | py | Python | powerapi/cli/tools.py | danglotb/powerapi | 67b2508588bfe1e20d90f9fe6bccda34d3455262 | [
"BSD-3-Clause"
] | null | null | null | powerapi/cli/tools.py | danglotb/powerapi | 67b2508588bfe1e20d90f9fe6bccda34d3455262 | [
"BSD-3-Clause"
] | null | null | null | powerapi/cli/tools.py | danglotb/powerapi | 67b2508588bfe1e20d90f9fe6bccda34d3455262 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2018, INRIA
# Copyright (c) 2018, University of Lille
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import logging
from functools import reduce
from powerapi.exception import PowerAPIException
from powerapi.cli.parser import MainParser, ComponentSubParser
from powerapi.cli.parser import store_true
from powerapi.cli.parser import BadValueException, MissingValueException
from powerapi.cli.parser import BadTypeException, BadContextException
from powerapi.cli.parser import UnknowArgException
from powerapi.report_model import HWPCModel, PowerModel, FormulaModel, ControlModel
from powerapi.database import MongoDB, CsvDB, InfluxDB, OpenTSDB
from powerapi.puller import PullerActor
from powerapi.pusher import PusherActor
def enable_log(arg, val, args, acc):
acc[arg] = logging.DEBUG
return args, acc
def check_csv_files(files):
return reduce(lambda acc, f: acc and os.access(f, os.R_OK), files.split(','), True)
def extract_file_names(arg, val, args, acc):
acc[arg] = val.split(',')
return args, acc
class CommonCLIParser(MainParser):
def __init__(self):
MainParser.__init__(self)
self.add_argument('v', 'verbose', flag=True, action=enable_log, default=logging.NOTSET,
help='enable verbose mode')
self.add_argument('s', 'stream', flag=True, action=store_true, default=False, help='enable stream mode')
subparser_mongo_input = ComponentSubParser('mongodb')
subparser_mongo_input.add_argument('u', 'uri', help='sepcify MongoDB uri')
subparser_mongo_input.add_argument('d', 'db', help='specify MongoDB database name', )
subparser_mongo_input.add_argument('c', 'collection', help='specify MongoDB database collection')
subparser_mongo_input.add_argument('n', 'name', help='specify puller name', default='puller_mongodb')
subparser_mongo_input.add_argument('m', 'model', help='specify data type that will be storen in the database',
default='HWPCReport')
self.add_component_subparser('input', subparser_mongo_input,
help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ')
subparser_csv_input = ComponentSubParser('csv')
subparser_csv_input.add_argument('f', 'files',
help='specify input csv files with this format : file1,file2,file3',
action=extract_file_names, default=[], check=check_csv_files,
check_msg='one or more csv files couldn\'t be read')
subparser_csv_input.add_argument('m', 'model', help='specify data type that will be storen in the database',
default='HWPCReport')
subparser_csv_input.add_argument('n', 'name', help='specify puller name', default='puller_csv')
self.add_component_subparser('input', subparser_csv_input,
help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ')
subparser_mongo_output = ComponentSubParser('mongodb')
subparser_mongo_output.add_argument('u', 'uri', help='sepcify MongoDB uri')
subparser_mongo_output.add_argument('d', 'db', help='specify MongoDB database name')
subparser_mongo_output.add_argument('c', 'collection', help='specify MongoDB database collection')
subparser_mongo_output.add_argument('m', 'model', help='specify data type that will be storen in the database',
default='PowerReport')
subparser_mongo_output.add_argument('n', 'name', help='specify puller name', default='pusher_mongodb')
self.add_component_subparser('output', subparser_mongo_output,
help_str='specify a database output : --db_output database_name ARG1 ARG2 ...')
subparser_csv_output = ComponentSubParser('csv')
subparser_csv_output.add_argument('d', 'directory',
help='specify directory where where output csv files will be writen')
subparser_csv_output.add_argument('m', 'model', help='specify data type that will be storen in the database',
default='PowerReport')
subparser_csv_output.add_argument('n', 'name', help='specify puller name', default='pusher_csv')
self.add_component_subparser('output', subparser_csv_output,
help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ')
subparser_influx_output = ComponentSubParser('influxdb')
subparser_influx_output.add_argument('u', 'uri', help='sepcify InfluxDB uri')
subparser_influx_output.add_argument('d', 'db', help='specify InfluxDB database name')
subparser_influx_output.add_argument('p', 'port', help='specify InfluxDB connection port', type=int)
subparser_influx_output.add_argument('m', 'model', help='specify data type that will be storen in the database',
default='PowerReport')
subparser_influx_output.add_argument('n', 'name', help='specify puller name', default='pusher_influxdb')
self.add_component_subparser('output', subparser_influx_output,
help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ')
subparser_opentsdb_output = ComponentSubParser('opentsdb')
subparser_opentsdb_output.add_argument('u', 'uri', help='sepcify openTSDB host')
subparser_opentsdb_output.add_argument('p', 'port', help='specify openTSDB connection port', type=int)
subparser_opentsdb_output.add_argument('metric_name', help='specify metric name')
subparser_opentsdb_output.add_argument('m', 'model', help='specify data type that will be storen in the database',
default='PowerReport')
subparser_opentsdb_output.add_argument('n', 'name', help='specify puller name', default='pusher_opentsdb')
self.add_component_subparser('output', subparser_opentsdb_output,
help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ')
def parse_argv(self):
try:
return self.parse(sys.argv[1:])
except BadValueException as exn:
msg = 'CLI error : argument ' + exn.argument_name + ' : ' + exn.msg
print(msg, file=sys.stderr)
except MissingValueException as exn:
msg = 'CLI error : argument ' + exn.argument_name + ' : expect a value'
print(msg, file=sys.stderr)
except BadTypeException as exn:
msg = 'CLI error : argument ' + exn.argument_name + ' : expect '
msg += exn.article + ' ' + exn.type_name
print(msg, file=sys.stderr)
except UnknowArgException as exn:
msg = 'CLI error : unknow argument ' + exn.argument_name
print(msg, file=sys.stderr)
except BadContextException as exn:
msg = 'CLI error : argument ' + exn.argument_name
msg += ' not used in the correct context\nUse it with the following arguments :'
for main_arg_name, context_name in exn.context_list:
msg += '\n --' + main_arg_name + ' ' + context_name
print(msg, file=sys.stderr)
sys.exit()
class Generator:
def __init__(self, component_group_name):
self.component_group_name = component_group_name
def generate(self, config):
if self.component_group_name not in config:
print('CLI error : no ' + self.component_group_name + ' specified', file=sys.stderr)
sys.exit()
actors = {}
for component_type, components_list in config[self.component_group_name].items():
for component_name, component_config in components_list.items():
try:
actors[component_name] = self._gen_actor(component_type, component_config, config)
except KeyError as exn:
msg = 'CLI error : argument ' + exn.args[0]
msg += ' needed with --output ' + component_type
print(msg, file=sys.stderr)
sys.exit()
return actors
def _gen_actor(self, component_name, component_config, main_config):
raise NotImplementedError()
class ModelNameAlreadyUsed(PowerAPIException):
"""
Exception raised when attempting to add to a DBActorGenerator a model factory with a name already bound to another
model factory in the DBActorGenerator
"""
class ModelNameAlreadyUsed(PowerAPIException):
"""
Exception raised when attempting to add to a DBActorGenerator a database factory with a name already bound to another
database factory in the DBActorGenerator
"""
class DBActorGenerator(Generator):
def __init__(self, component_group_name):
Generator.__init__(self, component_group_name)
self.model_factory = {
'HWPCReport': HWPCModel(),
'PowerReport': PowerModel(),
'FormulaReport': FormulaModel(),
'ControlReport': ControlModel(),
}
self.db_factory = {
'mongodb': lambda db_config: MongoDB(db_config['uri'], db_config['db'], db_config['collection']),
'csv': lambda db_config: CsvDB(current_path=os.getcwd() if 'directory' not in db_config else db_config['directory'],
files=[] if 'files' not in db_config else db_config['files']),
'influxdb': lambda db_config: InfluxDB(db_config['uri'], db_config['port'], db_config['db']),
'opentsdb': lambda db_config: OpenTSDB(db_config['uri'], db_config['port'], db_config['metric_name']),
}
def add_model_factory(self, model_name, model_factory):
if model_name in self.model_factory:
raise ModelNameAlreadyUsed()
self.model_factory[model_name] = model_factory
def add_db_factory(self, db_name, db_factory):
if db_name in self.model_factory:
raise ModelNameAlreadyUsed()
self.model_factory[db_name] = db_factory
def _generate_db(self, db_name, db_config, main_config):
return self.db_factory[db_name](db_config)
def _gen_actor(self, db_name, db_config, main_config):
db = self._generate_db(db_name, db_config, main_config)
model = self.model_factory[db_config['model']]
name = db_config['name']
return self._actor_factory(name, db, model, main_config['stream'], main_config['verbose'])
def _actor_factory(self, name, db, model, stream_mode, level_logger):
raise NotImplementedError()
class PullerGenerator(DBActorGenerator):
def __init__(self, report_filter):
DBActorGenerator.__init__(self, 'input')
self.report_filter = report_filter
def _actor_factory(self, name, db, model, stream_mode, level_logger):
return PullerActor(name, db, self.report_filter, model, stream_mode, level_logger)
class PusherGenerator(DBActorGenerator):
def __init__(self):
DBActorGenerator.__init__(self, 'output')
def _actor_factory(self, name, db, model, stream_mode, level_logger):
return PusherActor(name, model, db, level_logger)
| 49.268939 | 128 | 0.667948 | 10,464 | 0.80449 | 0 | 0 | 0 | 0 | 0 | 0 | 4,409 | 0.338971 |
1662a331dbe1e237d08e9e21a3e8d596bcbce6c4 | 2,477 | py | Python | pyxrd/mixture/models/insitu_behaviours/insitu_behaviour.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 27 | 2018-06-15T15:28:18.000Z | 2022-03-10T12:23:50.000Z | pyxrd/mixture/models/insitu_behaviours/insitu_behaviour.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 22 | 2018-06-14T08:29:16.000Z | 2021-07-05T13:33:44.000Z | pyxrd/mixture/models/insitu_behaviours/insitu_behaviour.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 8 | 2019-04-13T13:03:51.000Z | 2021-06-19T09:29:11.000Z | # coding=UTF-8
# ex:ts=4:sw=4:et=on
#
# Copyright (c) 2013, Mathijs Dumon
# All rights reserved.
# Complete license can be found in the LICENSE file.
from mvc.models.properties import StringProperty
from pyxrd.generic.io.custom_io import storables, Storable
from pyxrd.generic.models.base import DataModel
from pyxrd.refinement.refinables.mixins import RefinementGroup
@storables.register()
class InSituBehaviour(DataModel, RefinementGroup, Storable):
"""
Interface class for coding in-situ behaviour scripts.
Sub-classes should override or implement the methods below.
"""
# MODEL INTEL:
class Meta(DataModel.Meta):
store_id = "InSituBehaviour" # Override this so it is a unique string
concrete = False # Indicates this cannot be instantiated and added in the UI
mixture = property(DataModel.parent.fget, DataModel.parent.fset)
# REFINEMENT GROUP IMPLEMENTATION:
@property
def refine_title(self):
return "In-situ behaviour"
@property
def refine_descriptor_data(self):
return dict(
phase_name=self.phase.refine_title,
component_name="*"
)
#: The name of this Behaviour
name = StringProperty(
default="New Behaviour", text="Name",
visible=True, persistent=True, tabular=True
)
# ------------------------------------------------------------
# Initialization and other internals
# ------------------------------------------------------------
def __init__(self, *args, **kwargs):
my_kwargs = self.pop_kwargs(kwargs,
*[prop.label for prop in InSituBehaviour.Meta.get_local_persistent_properties()]
)
super(InSituBehaviour, self).__init__(*args, **kwargs)
kwargs = my_kwargs
with self.data_changed.hold():
self.name = self.get_kwarg(kwargs, self.name, "name")
pass #end of constructor
# ------------------------------------------------------------
# Methods & Functions
# ------------------------------------------------------------
def apply(self, phase):
assert phase is not None, "Cannot apply on None"
assert self.is_compatible_with(phase), "`%r` is not compatible with phase `%r`" % (self, phase)
def is_compatible_with(self, phase):
return False # sub classes need to override this
pass #end of class | 34.402778 | 103 | 0.583771 | 2,082 | 0.840533 | 0 | 0 | 2,104 | 0.849415 | 0 | 0 | 972 | 0.39241 |
16635cf724808862aeb33d75c907fed77d96d1fc | 857 | py | Python | 1 plainProgrammingBug/start 1 plainProgrammingBug.py | vishalbelsare/SLAPP3 | da187b771831aaaabaee16a26ad341db2e968104 | [
"CC0-1.0"
] | 8 | 2017-10-18T05:19:17.000Z | 2020-03-24T21:23:52.000Z | 1 plainProgrammingBug/start 1 plainProgrammingBug.py | vishalbelsare/SLAPP3 | da187b771831aaaabaee16a26ad341db2e968104 | [
"CC0-1.0"
] | null | null | null | 1 plainProgrammingBug/start 1 plainProgrammingBug.py | vishalbelsare/SLAPP3 | da187b771831aaaabaee16a26ad341db2e968104 | [
"CC0-1.0"
] | 4 | 2017-10-25T09:07:49.000Z | 2019-08-18T09:17:58.000Z | # start 1 plainProgrammingBug.py
import random
def SimpleBug():
# the environment
worldXSize = 80
worldYSize = 80
# the bug
xPos = 40
yPos = 40
# the action
for i in range(100):
xPos += randomMove()
yPos += randomMove()
xPos = (xPos + worldXSize) % worldXSize
yPos = (yPos + worldYSize) % worldYSize
print ("I moved to X = ", xPos, " Y = ", yPos)
# returns -1, 0, 1 with equal probability
def randomMove():
return random.randint(-1, 1)
SimpleBug()
"""
you can eliminate the randomMove() function substituting
xPos += randomMove()
yPos += randomMove()
with
xPos += random.randint(-1, 1)
yPos += random.randint(-1, 1)
but the use of the function allows us to use here a self-explanatory
name
"""
| 19.930233 | 69 | 0.568261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 429 | 0.500583 |
166407e573ed13b6f495ddb118b6bb572fdf1148 | 423 | py | Python | ba5a-min-coins/money_change.py | kjco/bioinformatics-algorithms | 3c466157b89c1cbd54749563e39d86a307d7a3f3 | [
"MIT"
] | null | null | null | ba5a-min-coins/money_change.py | kjco/bioinformatics-algorithms | 3c466157b89c1cbd54749563e39d86a307d7a3f3 | [
"MIT"
] | null | null | null | ba5a-min-coins/money_change.py | kjco/bioinformatics-algorithms | 3c466157b89c1cbd54749563e39d86a307d7a3f3 | [
"MIT"
] | null | null | null |
money = 8074
#money = 18705
#coin_list = [24,23,21,5,3,1]
coin_list = [24,13,12,7,5,3,1]
#coin_list = map(int, open('dataset_71_8.txt').read().split(','))
d = {0:0}
for m in range(1,money+1):
min_coin = 1000000
for coin in coin_list:
if m >= coin:
if d[m-coin]+1 < min_coin:
min_coin = d[m-coin]+1
d[m] = min_coin
#print d
print d[money]
| 18.391304 | 66 | 0.51773 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.293144 |
1665579643c424a545b6a8b3af94a1a9e0f4f184 | 357 | py | Python | examples/remove_comments.py | igordejanovic/textx-bibtex | b1374a39b96da9c1bc979c367b9ed3feb04f4f01 | [
"MIT"
] | 1 | 2020-06-17T21:51:33.000Z | 2020-06-17T21:51:33.000Z | examples/remove_comments.py | igordejanovic/textx-bibtex | b1374a39b96da9c1bc979c367b9ed3feb04f4f01 | [
"MIT"
] | null | null | null | examples/remove_comments.py | igordejanovic/textx-bibtex | b1374a39b96da9c1bc979c367b9ed3feb04f4f01 | [
"MIT"
] | null | null | null | """
Remove comments from bib file.
"""
from textx import metamodel_for_language
from txbibtex import bibentry_str
BIB_FILE = 'references.bib'
bibfile = metamodel_for_language('bibtex').model_from_file(BIB_FILE)
# Drop line comments.
print('\n'.join([bibentry_str(e) for e in bibfile.entries
if e.__class__.__name__ != 'BibLineComment']))
| 27.461538 | 68 | 0.739496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.288515 |
1665f41d1c03f32167e2cea236d3cf7a022b6b61 | 3,202 | py | Python | google-cloud-sdk/lib/surface/compute/resource_policies/create/group_placement.py | bopopescu/Social-Lite | ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/surface/compute/resource_policies/create/group_placement.py | bopopescu/Social-Lite | ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf | [
"Apache-2.0"
] | 4 | 2020-07-21T12:51:46.000Z | 2022-01-22T10:29:25.000Z | google-cloud-sdk/lib/surface/compute/resource_policies/create/group_placement.py | bopopescu/Social-Lite | ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf | [
"Apache-2.0"
] | 1 | 2020-07-25T18:17:57.000Z | 2020-07-25T18:17:57.000Z | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create resource policy command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import utils as compute_api
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.resource_policies import flags
from googlecloudsdk.command_lib.compute.resource_policies import util
def _CommonArgs(parser, api_version):
"""A helper function to build args based on different API version."""
messages = apis.GetMessagesModule('compute', api_version)
flags.MakeResourcePolicyArg().AddArgument(parser)
flags.AddCommonArgs(parser)
flags.AddGroupPlacementArgs(parser, messages)
parser.display_info.AddCacheUpdater(None)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class CreateGroupPlacement(base.CreateCommand):
"""Create a Google Compute Engine Group Placement Resource Policy."""
@staticmethod
def Args(parser):
_CommonArgs(parser, api_version=compute_api.COMPUTE_ALPHA_API_VERSION)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
policy_ref = flags.MakeResourcePolicyArg().ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(holder.client))
messages = holder.client.messages
resource_policy = util.MakeGroupPlacementPolicy(policy_ref, args, messages)
create_request = messages.ComputeResourcePoliciesInsertRequest(
resourcePolicy=resource_policy,
project=policy_ref.project,
region=policy_ref.region)
service = holder.client.apitools_client.resourcePolicies
return client.MakeRequests([(service, 'Insert', create_request)])[0]
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateGroupPlacementBeta(CreateGroupPlacement):
"""Create a Google Compute Engine Group Placement Resource Policy."""
@staticmethod
def Args(parser):
_CommonArgs(parser, api_version=compute_api.COMPUTE_BETA_API_VERSION)
CreateGroupPlacement.detailed_help = {
'DESCRIPTION':
"""\
Create a Google Compute Engine Group Placement Resource Policy.
""",
'EXAMPLES':
"""\
To create a Google Compute Engine Group Placement Resource policy with 2 VMs and 2 availability domains, run:
$ {command} my-resource-policy --region=REGION --vm-count=2 --availability-domain-count=2
"""
}
| 37.232558 | 109 | 0.777327 | 1,194 | 0.372892 | 0 | 0 | 1,283 | 0.400687 | 0 | 0 | 1,173 | 0.366334 |
16661518293e1bbad26be3766a9addb9bc564758 | 629 | py | Python | paperoni/io.py | notoraptor/paperoni | acdf2d3d790b98d6a171177ffd9d6342f86bc7ea | [
"MIT"
] | 88 | 2020-08-27T17:58:58.000Z | 2021-12-01T19:29:56.000Z | paperoni/io.py | notoraptor/paperoni | acdf2d3d790b98d6a171177ffd9d6342f86bc7ea | [
"MIT"
] | 8 | 2020-08-27T02:54:11.000Z | 2022-02-01T13:35:41.000Z | paperoni/io.py | notoraptor/paperoni | acdf2d3d790b98d6a171177ffd9d6342f86bc7ea | [
"MIT"
] | 6 | 2020-08-25T16:43:28.000Z | 2021-12-08T16:41:02.000Z | import json
from .papers import Papers
from .researchers import Researchers
def ResearchersFile(filename):
"""Parse a file containing researchers."""
try:
with open(filename, "r") as file:
data = json.load(file)
except FileNotFoundError:
data = {}
return Researchers(data, filename=filename)
def PapersFile(filename, researchers=None):
"""Parse a file containing papers."""
try:
with open(filename, "r") as file:
data = json.load(file)
except FileNotFoundError:
data = {}
return Papers(data, filename=filename, researchers=researchers)
| 25.16 | 67 | 0.655008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.135135 |
16666943ca1f78d9acd45c2909883bd0b65b734d | 934 | py | Python | src/lib/sd2/test_addresses.py | zachkont/sd2 | 92d8c55a8c7ac51c00ba514be01955aa7162e4ef | [
"Apache-2.0"
] | null | null | null | src/lib/sd2/test_addresses.py | zachkont/sd2 | 92d8c55a8c7ac51c00ba514be01955aa7162e4ef | [
"Apache-2.0"
] | null | null | null | src/lib/sd2/test_addresses.py | zachkont/sd2 | 92d8c55a8c7ac51c00ba514be01955aa7162e4ef | [
"Apache-2.0"
] | null | null | null | #############################################################################
# Copyright (c) 2017 SiteWare Corp. All right reserved
#############################################################################
import logging
import pytest
from . import addresses
def test_pytest():
assert True
def test_object_exists():
assert addresses.cidr_db
def test_new_address():
address = addresses.cidr_db.get_address_for_host('test_test_foo')
assert address
assert address >= addresses.cidr_db.first_address()
assert address <= addresses.cidr_db.last_address()
addresses.cidr_db.reload()
assert addresses.cidr_db.get_address_for_host('test_test_foo') == address
assert addresses.cidr_db.has('test_test_foo')
addresses.cidr_db.forget('test_test_foo')
assert not addresses.cidr_db.has('test_test_foo')
addresses.cidr_db.reload()
assert not addresses.cidr_db.has('test_test_foo')
| 30.129032 | 77 | 0.626338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 298 | 0.319058 |
166739b28ed7ffa22c5f71499709f1fd302bd933 | 1,914 | py | Python | config_model.py | Asha-ai/BERT_abstractive_proj | f0e8f659d6b8821cfe0d15f4075e8cb890efdfe9 | [
"Apache-2.0"
] | 17 | 2020-01-11T15:15:21.000Z | 2021-12-08T10:03:36.000Z | config_model.py | Asha-ai/BERT_abstractive_proj | f0e8f659d6b8821cfe0d15f4075e8cb890efdfe9 | [
"Apache-2.0"
] | 6 | 2020-03-01T17:14:58.000Z | 2021-05-21T16:05:03.000Z | config_model.py | Asha-ai/BERT_abstractive_proj | f0e8f659d6b8821cfe0d15f4075e8cb890efdfe9 | [
"Apache-2.0"
] | 8 | 2020-05-11T21:24:51.000Z | 2021-07-23T09:18:46.000Z | import texar.tf as tx
beam_width = 5
hidden_dim = 768
bert = {
'pretrained_model_name': 'bert-base-uncased'
}
# See https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams
bert_encoder = {}
# From https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45
# with adjustments for BERT
decoder = {
'dim': hidden_dim,
'num_blocks': 6,
'multihead_attention': {
'num_heads': 8,
'output_dim': hidden_dim
},
'initializer': {
'type': 'variance_scaling_initializer',
'kwargs': {
'scale': 1.0,
'mode': 'fan_avg',
'distribution': 'uniform',
},
},
'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim)
}
loss_label_confidence = 0.9
opt = {
'optimizer': {
'type': 'AdamOptimizer',
'kwargs': {
'beta1': 0.9,
'beta2': 0.997,
'epsilon': 1e-9
}
}
}
lr = {
# The 'learning_rate_schedule' can have the following 3 values:
# - 'static' -> A simple static learning rate, specified by 'static_lr'
# - 'aiayn' -> The learning rate used in the "Attention is all you need" paper.
# - 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' -> The learning rate for Texar's Transformer example
'learning_rate_schedule': 'aiayn',
# The learning rate constant used for the 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate
'lr_constant': 2 * (hidden_dim ** -0.5),
# The warmup steps for the 'aiayn' and 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate
'warmup_steps': 4000,
# The static learning rate, when 'static' is used.
'static_lr': 1e-3,
# A multiplier that can be applied to the 'aiayn' learning rate.
'aiayn_multiplier': 0.2
}
| 31.377049 | 128 | 0.653083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,275 | 0.666144 |
16677a6fe2ff1b1e4b01bda4446f100594d88c8e | 390 | py | Python | wishes/migrations/0005_auto_20201029_0904.py | e-elson/bd | e35c59686e5ec81925c22353e269601f286634db | [
"MIT"
] | null | null | null | wishes/migrations/0005_auto_20201029_0904.py | e-elson/bd | e35c59686e5ec81925c22353e269601f286634db | [
"MIT"
] | null | null | null | wishes/migrations/0005_auto_20201029_0904.py | e-elson/bd | e35c59686e5ec81925c22353e269601f286634db | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-29 09:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wishes', '0004_auto_20201029_0857'),
]
operations = [
migrations.AlterField(
model_name='gallery',
name='image',
field=models.FilePathField(path='/images'),
),
]
| 20.526316 | 55 | 0.594872 | 297 | 0.761538 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.269231 |
166802c5b61892041a13896dbed6ef514fd83df2 | 7,115 | py | Python | undeployed/legacy/Landsat/DNtoReflectance.py | NASA-DEVELOP/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 65 | 2015-09-10T12:59:56.000Z | 2022-02-27T22:09:03.000Z | undeployed/legacy/Landsat/DNtoReflectance.py | snowzm/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 40 | 2015-04-08T19:23:30.000Z | 2015-08-04T15:53:11.000Z | undeployed/legacy/Landsat/DNtoReflectance.py | snowzm/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 45 | 2015-08-14T19:09:38.000Z | 2022-02-15T18:53:16.000Z | #-------------------------------------------------------------------------------
# Name: Landsat Digital Numbers to Radiance/Reflectance
# Purpose: To convert landsat 4,5, or 7 pixel values from digital numbers
# to Radiance, Reflectance, or Temperature
# Author: Quinten Geddes [email protected]
# NASA DEVELOP Program
# Created: 19/10/2012
#-------------------------------------------------------------------------------
import arcpy
import math
arcpy.CheckOutExtension("Spatial")
def DNtoReflectance(Lbands,MetaData,OutputType="Reflectance/Temperature",Save=False,OutputFolder=""):
"""This function is used to convert Landsat 4,5, or 7 pixel values from
digital numbers to Radiance, Reflectance, or Temperature (if using Band 6)
-----Inputs------
Lbands: GeoTIFF files containing individual bands of Landsat imagery. These
must have the original names as downloaded and must be from a single scene.
MetaData: The metadata text file that is downloaded with the Landsat Bands themselves.
This may be either the old or new MTL.txt file.
OutputType: Choose whether the output should be:
"Radiance"
"Reflectance/Temperature" - Calculates Reflectance for spectral bands
and Temperature in Kelvin for Thermal bands
Save: Boolean value that indicates whether the output rasters will be saved permanantly
Each band will be saved as an individual GeoTIFF file and be named
accoriding to the original filename and the output pixel unit
*if this is true, then the OutputFolder variable must also be set
OutputFolder: Folder in which to save the output rasters
-----Outputs-----
A list of arcpy raster objects in a sequence that mirrors that of the input Lbands
"""
OutList=[]
#These lists will be used to parse the meta data text file and locate relevant information
#metadata format was changed August 29, 2012. This tool can process either the new or old format
newMeta=['LANDSAT_SCENE_ID = "','DATE_ACQUIRED = ',"SUN_ELEVATION = ",
"RADIANCE_MAXIMUM_BAND_{0} = ","RADIANCE_MINIMUM_BAND_{0} = ",
"QUANTIZE_CAL_MAX_BAND_{0} = ","QUANTIZE_CAL_MIN_BAND_{0} = "]
oldMeta=['BAND1_FILE_NAME = "',"ACQUISITION_DATE = ","SUN_ELEVATION = ",
"LMAX_BAND{0} = ","LMIN_BAND{0} = ",
"QCALMAX_BAND{0} = ","QCALMIN_BAND{0} = "]
f=open(MetaData)
MText=f.read()
#the presence of a PRODUCT_CREATION_TIME category is used to identify old metadata
#if this is not present, the meta data is considered new.
#Band6length refers to the length of the Band 6 name string. In the new metadata this string is longer
if "PRODUCT_CREATION_TIME" in MText:
Meta=oldMeta
Band6length=2
else:
Meta=newMeta
Band6length=8
#The tilename is located using the newMeta/oldMeta indixes and the date of capture is recorded
if Meta==newMeta:
TileName=MText.split(Meta[0])[1].split('"')[0]
year=TileName[9:13]
jday=TileName[13:16]
elif Meta==oldMeta:
TileName=MText.split(Meta[0])[1].split('"')[0]
year=TileName[13:17]
jday=TileName[17:20]
date=MText.split(Meta[1])[1].split('\n')[0]
#the spacecraft from which the imagery was capture is identified
#this info determines the solar exoatmospheric irradiance (ESun) for each band
spacecraft=MText.split('SPACECRAFT_ID = "')[1].split('"')[0]
ThermBands=["6"]
if "7" in spacecraft:
ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0. ,82.07,1368.00)
ThermBands=["B6_VCID_1","B6_VCID_2"]
elif "5" in spacecraft: ESun=(1957.0,1826.0,1554.0,1036.0,215.0 ,0. ,80.67)
elif "4" in spacecraft: ESun=(1957.0,1825.0,1557.0,1033.0,214.9 ,0. ,80.72)
elif "8" in spacecraft:
ESun=(1857.0,1996.0,1812.0,1516.0,983.3 ,251.8,85.24,0.0,389.3,0.,0.)
ThermBands=["10","11"]
else:
arcpy.AddError("This tool only works for Landsat 4, 5, 7 or 8 ")
raise arcpy.ExecuteError()
#determing if year is leap year and setting the Days in year accordingly
if float(year) % 4 ==0: DIY=366.
else:DIY=365.
#using the date to determing the distance from the sun
theta =2*math.pi*float(jday)/DIY
dSun2 = (1.00011 + 0.034221*math.cos(theta) + 0.001280*math.sin(theta) +
0.000719*math.cos(2*theta)+ 0.000077*math.sin(2*theta) )
SZA=90.-float(MText.split(Meta[2])[1].split("\n")[0])
#Calculating values for each band
for pathname in Lbands:
try:
BandNum=pathname.split("\\")[-1].split("B")[1][0:2]
try: int(BandNum)
except: BandNum=pathname.split("\\")[-1].split("B")[1][0]
except:
msg="Error reading Band {0}. Bands must have original names as downloaded.".format(str(inputbandnum))
arcpy.AddError(msg)
print msg
raise arcpy.ExecuteError
#changing Band 6 name to match metadata
if BandNum=="6" and spacecraft[8]=="7":
BandNum=pathname.split("\\")[-1].split("B")[1][0:Band6length]
print "Processing Band {0}".format(BandNum)
Oraster=arcpy.Raster(pathname)
#using the oldMeta/newMeta indixes to pull the min/max for radiance/Digital numbers
LMax= float(MText.split(Meta[3].format(BandNum))[1].split("\n")[0])
LMin= float(MText.split(Meta[4].format(BandNum))[1].split("\n")[0])
QCalMax=float(MText.split(Meta[5].format(BandNum))[1].split("\n")[0])
QCalMin=float(MText.split(Meta[6].format(BandNum))[1].split("\n")[0])
Radraster=(((LMax - LMin)/(QCalMax-QCalMin)) * (Oraster - QCalMin)) +LMin
Oraster=0
if OutputType=="Radiance":
Radraster.save("{0}\\{1}_B{2}_Radiance.tif".format(OutputFolder,TileName,BandNum))
Radraster=0
elif OutputType=="Reflectance/Temperature":
#Calculating temperature for band 6 if present
if BandNum in ThermBands:
Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0))
BandPath="{0}\\{1}_B{2}_Temperature.tif".format(OutputFolder,TileName,BandNum)
arcpy.AddMessage("Proceeded through if")
#Otherwise calculate reflectance
else:
Refraster=( math.pi * Radraster * dSun2) / (ESun[int(BandNum[0])-1] * math.cos(SZA*math.pi/180) )
BandPath="{0}\\{1}_B{2}_TOA_Reflectance.tif".format(OutputFolder,TileName,BandNum)
arcpy.AddMessage("Proceeded through else")
if Save==True:
Refraster.save(BandPath)
OutList.append(arcpy.Raster(BandPath))
else:
OutList.append(Refraster)
del Refraster,Radraster
arcpy.AddMessage( "Reflectance Calculated for Band {0}".format(BandNum))
print "Reflectance Calculated for Band {0}".format(BandNum)
f.close()
return OutList
| 42.100592 | 113 | 0.619115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,621 | 0.508925 |
1668b92419e5394d4eb735fba074c84b5eb16b19 | 1,396 | py | Python | .modules/.theHarvester/discovery/twittersearch.py | termux-one/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 1,103 | 2018-04-20T14:08:11.000Z | 2022-03-29T06:22:43.000Z | .modules/.theHarvester/discovery/twittersearch.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 29 | 2019-04-03T14:52:38.000Z | 2022-03-24T12:33:05.000Z | .modules/.theHarvester/discovery/twittersearch.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 262 | 2017-09-16T22:15:50.000Z | 2022-03-31T00:38:42.000Z | import string
import requests
import sys
import myparser
import re
class search_twitter:
def __init__(self, word, limit):
self.word = word.replace(' ', '%20')
self.results = ""
self.totalresults = ""
self.server = "www.google.com"
self.hostname = "www.google.com"
self.userAgent = "(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100116 Firefox/3.7"
self.quantity = "100"
self.limit = int(limit)
self.counter = 0
def do_search(self):
try:
urly="https://"+ self.server + "/search?num=100&start=" + str(self.counter) + "&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20" + self.word
except Exception, e:
print e
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'}
try:
r=requests.get(urly,headers=headers)
except Exception,e:
print e
self.results = r.content
self.totalresults += self.results
def get_people(self):
rawres = myparser.parser(self.totalresults, self.word)
return rawres.people_twitter()
def process(self):
while (self.counter < self.limit):
self.do_search()
self.counter += 100
print "\tSearching " + str(self.counter) + " results.."
| 32.465116 | 169 | 0.592407 | 1,326 | 0.949857 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.256447 |
166903b8515452d27e1a1b1b4a84d3d174d4f220 | 708 | py | Python | scrap_instagram.py | genaforvena/nn_scrapper | 897766a52202aa056afd657995ed39b2b91e1fe2 | [
"Apache-2.0"
] | null | null | null | scrap_instagram.py | genaforvena/nn_scrapper | 897766a52202aa056afd657995ed39b2b91e1fe2 | [
"Apache-2.0"
] | null | null | null | scrap_instagram.py | genaforvena/nn_scrapper | 897766a52202aa056afd657995ed39b2b91e1fe2 | [
"Apache-2.0"
] | null | null | null | import urllib.request
import json
access_token = "265791501.a4af066.f45a9f44719a4b2cb2d137118524e32b"
api_url = "https://api.instagram.com/v1"
nn_lat = 56.296504
nn_lng = 43.936059
def request(endpoint, req_params = ""):
req = api_url + endpoint + "?access_token=" + access_token + "&" + req_params
print(req)
raw_response = urllib.request.urlopen(req).read()
return json.loads(raw_response.decode('utf8'))
locations = request("/locations/search", "lat=" + str(nn_lat) + "&lng=" + str(nn_lng))["data"]
print(locations)
for location in locations:
location_id = location["id"]
location_media = request("/locations/" + str(location_id) + "/media/recent")
print(location_media)
| 29.5 | 94 | 0.706215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.252825 |
16693286bda8fc5cb36e02f9aa7765ff20fcfe4e | 7,066 | py | Python | tests/unit/utils/test_validators.py | kajusK/HiddenPlaces | aa976f611a419bc33f8a65f0314956ec09fe2bfd | [
"MIT"
] | null | null | null | tests/unit/utils/test_validators.py | kajusK/HiddenPlaces | aa976f611a419bc33f8a65f0314956ec09fe2bfd | [
"MIT"
] | null | null | null | tests/unit/utils/test_validators.py | kajusK/HiddenPlaces | aa976f611a419bc33f8a65f0314956ec09fe2bfd | [
"MIT"
] | null | null | null | """Unit tests for app.validators. """
from wtforms import ValidationError
import flask
from pytest import raises
from app.utils.validators import password_rules, image_file, allowed_file
class DummyField(object):
"""Dummy field object to emulate wtforms field."""
def __init__(self, data=None, errors=(), raw_data=None):
self.data = data
self.errors = list(errors)
self.raw_data = raw_data
def gettext(self, string):
return string
def ngettext(self, singular, plural, n):
return singular
class DummyForm(dict):
"""Dummy form object to emulate wtforms form."""
pass
class DummyFile(object):
"""Dummy file like class to emulate uploaded file handler."""
def __init__(self, filename):
self.filename = filename
def __repr__(self):
return self.filename
def _run_validator_check(subtests, validator, valid, invalid):
"""Runs tests again validator with valid and invalid inputs.
Args:
subtest: Subtests fixture.
validator: Validator instance to run tests against
valid: List of valid inputs
invalid: List of invalid inputs
"""
field = DummyField()
for item in valid:
field.data = item
with subtests.test(item=item):
validator(DummyForm(), field)
for item in invalid:
field.data = item
with subtests.test(item=item):
with raises(ValidationError):
validator(DummyForm(), field)
def test_allowed_file(subtests, req_context):
validator = allowed_file()
extensions = ['exe', 'html']
valid = ['foo.jpg', 'exe', 'foo.exe.zip', 'foo']
invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html']
valid = [DummyFile(x) for x in valid]
invalid = [DummyFile(x) for x in invalid]
flask.current_app.config['DISABLED_EXTENSIONS'] = extensions
with flask.current_app.test_request_context():
_run_validator_check(subtests, validator, valid, invalid)
def test_allowed_file_multiple(subtests, req_context):
validator = allowed_file()
extensions = ['exe', 'html']
valid = ['foo.jpg', 'exe', 'foo.exe.zip', 'foo']
invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html']
valid = [[DummyFile(x) for x in valid], [DummyFile(valid[0])],
[DummyFile(valid[0]), DummyFile(valid[1])]]
invalid = [[DummyFile(x) for x in invalid], [DummyFile(invalid[0])],
[DummyFile(invalid[0]), DummyFile(invalid[1])]]
flask.current_app.config['DISABLED_EXTENSIONS'] = extensions
with flask.current_app.test_request_context():
_run_validator_check(subtests, validator, valid, invalid)
def test_allowed_file_message(req_context):
validator = allowed_file(message="custom message")
field = DummyField()
field.data = DummyFile("blah.foo")
flask.current_app.config['DISABLED_EXTENSIONS'] = ['foo']
with flask.current_app.test_request_context():
with raises(ValidationError) as e:
validator(DummyForm(), field)
assert str(e.value) == "custom message"
def test_image_file(subtests, req_context):
validator = image_file()
extensions = ['jpg', 'png', 'tiff']
valid = ['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg']
invalid = ['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif']
valid = [DummyFile(x) for x in valid]
invalid = [DummyFile(x) for x in invalid]
flask.current_app.config['IMAGE_EXTENSIONS'] = extensions
with flask.current_app.test_request_context():
_run_validator_check(subtests, validator, valid, invalid)
def test_image_file_multiple(subtests, req_context):
validator = image_file()
extensions = ['jpg', 'png', 'tiff']
valid = ['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg']
invalid = ['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif']
valid = [[DummyFile(x) for x in valid], [DummyFile(valid[0])],
[DummyFile(valid[0]), DummyFile(valid[1])]]
invalid = [[DummyFile(x) for x in invalid], [DummyFile(invalid[0])],
[DummyFile(invalid[0]), DummyFile(invalid[1])]]
flask.current_app.config['IMAGE_EXTENSIONS'] = extensions
with flask.current_app.test_request_context():
_run_validator_check(subtests, validator, valid, invalid)
def test_image_file_message(req_context):
validator = image_file(message="custom message")
field = DummyField()
field.data = DummyFile("blah")
flask.current_app.config['IMAGE_EXTENSIONS'] = ['foo']
with flask.current_app.test_request_context():
with raises(ValidationError) as e:
validator(DummyForm(), field)
assert str(e.value) == "custom message"
def test_password_rules_length(subtests):
validator = password_rules(length=6, upper=None, lower=None, numeric=None,
special=None)
valid = ["as123.21", "abcdef", "sdadadaswasasa", "1234567", "...,.,..,",
"AAAAAAA", "AbCdEf"]
invalid = ["abc", "123", "....", "aBcDe", "a1.V3"]
_run_validator_check(subtests, validator, valid, invalid)
def test_password_rules_upper(subtests):
validator = password_rules(length=6, upper=2, lower=None, numeric=None,
special=None)
valid = ["abcDEf", "HellOO", "ABCDEZ", "A.b#3CZ", "ADSDSA"]
invalid = ["abcdEf", "helloo", "A231sdsd"]
_run_validator_check(subtests, validator, valid, invalid)
def test_password_rules_lower(subtests):
validator = password_rules(length=6, upper=None, lower=3, numeric=None,
special=None)
valid = ["abcdefg", "axzBAR", "123abcdsa", "AbCdEfGh", "..as..2ds.."]
invalid = ["foOBAR", "123ABcdSA", "1a2b.C#"]
_run_validator_check(subtests, validator, valid, invalid)
def test_password_rules_numeric(subtests):
validator = password_rules(length=6, upper=None, lower=None, numeric=2,
special=None)
valid = ["1bcd4A.d", "123456", "a?9#.0"]
invalid = ["2ds.#<", "abcdef", "ABCDEF", "x2U.'Q"]
_run_validator_check(subtests, validator, valid, invalid)
def test_password_rules_special(subtests):
validator = password_rules(length=6, upper=None, lower=None, numeric=None,
special=3)
valid = ["ab.?123!", ".#@dS9", "abcdef123><?"]
invalid = ["abcdef", ".23134", "AbCd123,]"]
_run_validator_check(subtests, validator, valid, invalid)
def test_password_rules_all(subtests):
validator = password_rules(length=6, upper=2, lower=1, numeric=1,
special=1)
valid = ["ABc1.2", "abcDEF123#%^", "a2B.C?"]
invalid = ["helloo", "ABCDEF", "Ab1.?c"]
_run_validator_check(subtests, validator, valid, invalid)
def test_password_rules_message(subtests):
validator = password_rules(length=100, message="custom message")
field = DummyField()
field.data = "wrong"
with raises(ValidationError) as e:
validator(DummyForm(), field)
assert str(e.value) == "custom message"
| 35.686869 | 78 | 0.644495 | 653 | 0.092414 | 0 | 0 | 0 | 0 | 0 | 0 | 1,501 | 0.212426 |
166add4d1cc09be73d6135b394a15f57ecfca1b9 | 615 | py | Python | ts_eval/utils/nans.py | vshulyak/ts-eval | 2049b1268cf4272f5fa1471851523f8da14dd84c | [
"MIT"
] | 1 | 2021-07-12T08:58:07.000Z | 2021-07-12T08:58:07.000Z | ts_eval/utils/nans.py | vshulyak/ts-eval | 2049b1268cf4272f5fa1471851523f8da14dd84c | [
"MIT"
] | null | null | null | ts_eval/utils/nans.py | vshulyak/ts-eval | 2049b1268cf4272f5fa1471851523f8da14dd84c | [
"MIT"
] | null | null | null | import warnings
import numpy as np
def nans_in_same_positions(*arrays):
"""
Compares all provided arrays to see if they have NaNs in the same positions.
"""
if len(arrays) == 0:
return True
for arr in arrays[1:]:
if not (np.isnan(arrays[0]) == np.isnan(arr)).all():
return False
return True
def nanmeanw(arr, axis=None):
"""
Computes nanmean without raising a warning in case of NaNs in the dataset
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
return np.nanmean(arr, axis=axis)
| 24.6 | 80 | 0.642276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 189 | 0.307317 |
166b671e9115e476c69bab6e6077599dd6b6cdea | 5,434 | py | Python | tests/authorization/test_searches.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 2 | 2018-02-23T12:16:11.000Z | 2020-10-08T17:54:24.000Z | tests/authorization/test_searches.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 87 | 2017-04-21T18:57:15.000Z | 2021-12-13T19:43:57.000Z | tests/authorization/test_searches.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 1 | 2018-03-01T16:44:25.000Z | 2018-03-01T16:44:25.000Z | """Unit tests of authorization searches."""
import pytest
from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only
from dlkit.abstract_osid.osid import errors
from dlkit.primordium.id.primitives import Id
from dlkit.primordium.type.primitives import Type
from dlkit.runtime import PROXY_SESSION, proxy_example
from dlkit.runtime.managers import Runtime
REQUEST = proxy_example.SimpleRequest()
CONDITION = PROXY_SESSION.get_proxy_condition()
CONDITION.set_http_request(REQUEST)
PROXY = PROXY_SESSION.get_proxy(CONDITION)
DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'})
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def authorization_search_class_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'AUTHORIZATION',
proxy=PROXY,
implementation=request.cls.service_config)
create_form = request.cls.svc_mgr.get_vault_form_for_create([])
create_form.display_name = 'Test catalog'
create_form.description = 'Test catalog description'
request.cls.catalog = request.cls.svc_mgr.create_vault(create_form)
def class_tear_down():
request.cls.svc_mgr.delete_vault(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def authorization_search_test_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.search = request.cls.catalog.get_authorization_search()
@pytest.mark.usefixtures("authorization_search_class_fixture", "authorization_search_test_fixture")
class TestAuthorizationSearch(object):
"""Tests for AuthorizationSearch"""
@pytest.mark.skip('unimplemented test')
def test_search_among_authorizations(self):
"""Tests search_among_authorizations"""
pass
@pytest.mark.skip('unimplemented test')
def test_order_authorization_results(self):
"""Tests order_authorization_results"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_authorization_search_record(self):
"""Tests get_authorization_search_record"""
pass
@pytest.mark.usefixtures("authorization_search_results_class_fixture", "authorization_search_results_test_fixture")
class TestAuthorizationSearchResults(object):
"""Tests for AuthorizationSearchResults"""
@pytest.mark.skip('unimplemented test')
def test_get_authorizations(self):
"""Tests get_authorizations"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_authorization_query_inspector(self):
"""Tests get_authorization_query_inspector"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_authorization_search_results_record(self):
"""Tests get_authorization_search_results_record"""
pass
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def vault_search_class_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'AUTHORIZATION',
proxy=PROXY,
implementation=request.cls.service_config)
create_form = request.cls.svc_mgr.get_vault_form_for_create([])
create_form.display_name = 'Test catalog'
create_form.description = 'Test catalog description'
request.cls.catalog = request.cls.svc_mgr.create_vault(create_form)
def class_tear_down():
request.cls.svc_mgr.delete_vault(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def vault_search_test_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.search = request.cls.catalog.get_vault_search()
@pytest.mark.usefixtures("vault_search_class_fixture", "vault_search_test_fixture")
class TestVaultSearch(object):
"""Tests for VaultSearch"""
@pytest.mark.skip('unimplemented test')
def test_search_among_vaults(self):
"""Tests search_among_vaults"""
pass
@pytest.mark.skip('unimplemented test')
def test_order_vault_results(self):
"""Tests order_vault_results"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_vault_search_record(self):
"""Tests get_vault_search_record"""
pass
@pytest.mark.usefixtures("vault_search_results_class_fixture", "vault_search_results_test_fixture")
class TestVaultSearchResults(object):
"""Tests for VaultSearchResults"""
@pytest.mark.skip('unimplemented test')
def test_get_vaults(self):
"""Tests get_vaults"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_vault_query_inspector(self):
"""Tests get_vault_query_inspector"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_vault_search_results_record(self):
"""Tests get_vault_search_results_record"""
pass
| 36.469799 | 176 | 0.749724 | 2,108 | 0.387928 | 0 | 0 | 4,744 | 0.873022 | 0 | 0 | 1,892 | 0.348178 |
166ccaa355ece2f923c461999fa3eb16171b7163 | 350 | py | Python | mechroutines/models/_flux.py | keceli/mechdriver | 978994ba5c77b6df00078b639c4482dacf269440 | [
"Apache-2.0"
] | 1 | 2022-03-22T20:47:04.000Z | 2022-03-22T20:47:04.000Z | mechroutines/models/_flux.py | keceli/mechdriver | 978994ba5c77b6df00078b639c4482dacf269440 | [
"Apache-2.0"
] | 1 | 2021-02-12T21:11:16.000Z | 2021-12-07T21:32:14.000Z | mechroutines/models/_flux.py | keceli/mechdriver | 978994ba5c77b6df00078b639c4482dacf269440 | [
"Apache-2.0"
] | 8 | 2019-12-18T20:09:46.000Z | 2020-11-14T16:37:28.000Z | """
NEW: Handle flux files
"""
import autofile
def read_flux(ts_save_path, vrc_locs=(0,)):
""" Read the geometry from the filesys
"""
vrc_fs = autofile.fs.vrctst(ts_save_path)
if vrc_fs[-1].file.flux.exists(vrc_locs):
flux_str = vrc_fs[-1].file.flux.read(vrc_locs)
else:
flux_str = None
return flux_str
| 18.421053 | 54 | 0.64 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.222857 |
166ddfdb964d4dc41f4f840af0cda8cfbfe5a687 | 4,990 | py | Python | RandomForest/RandomForest.py | nachiket273/ML_Algo_Implemented | 74ae47fdf620545fdf8c934c5997784faadaebb7 | [
"MIT"
] | 7 | 2020-08-03T13:43:53.000Z | 2022-02-18T20:38:51.000Z | RandomForest/RandomForest.py | nachiket273/ML_Algo_Implemented | 74ae47fdf620545fdf8c934c5997784faadaebb7 | [
"MIT"
] | null | null | null | RandomForest/RandomForest.py | nachiket273/ML_Algo_Implemented | 74ae47fdf620545fdf8c934c5997784faadaebb7 | [
"MIT"
] | 2 | 2020-09-06T21:54:16.000Z | 2022-01-22T19:59:33.000Z | import math
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
import sys
import os
sys.path.append(os.path.abspath('../DecisionTree'))
from DecisionTree import DecisionTree
class RandomForest(BaseEstimator):
"""
Simple implementation of Random Forest.
This class has implementation for Random Forest classifier and regressor.
Dataset bagging is done by simple numpy random choice with replacement.
For classification the prediction is by majority vote.
For regression tree the prediction is averge of all estimator predictions.
Args:
n_estimators Number of base estimators (Decision Trees here)
max_features Maximum features to be used to construct tree.
Default:
- If classifier, default is square root of total
features.
- If regressor, default is total number of features.
max_depth The maximum depth to which estimators needs to be constructed.
Default: np.inf
min_samples_split Minimum number of samples need to present for split at the
node.
Default: 2
criterion criterion to be used for split.
For classification tree following criterion are supported:
- gini
- entropy
For regression tree following criterion are supported:
- mse (mean squared error)
- mae (mean absolute error)
Default: gini
random_seed random seed value for numpy operations.
Default: 0
"""
def __init__(self, n_estimators, max_features=0, max_depth=np.inf, min_samples_split=2,
criterion='gini', random_seed=0):
self.n_estimators = n_estimators
self.max_features = max_features
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.criterion = criterion
self.random_seed = random_seed
self.idxs = []
self.trees = []
for i in range(self.n_estimators):
self.trees.append(DecisionTree(max_depth= self.max_depth,
min_samples_split=self.min_samples_split,
max_features = self.max_features,
criterion=self.criterion,
random_seed = self.random_seed))
self.is_classification_forest = False
if self.criterion == 'gini' or self.criterion == 'entropy':
self.is_classification_forest = True
elif self.criterion == 'mse' or self.criterion == 'mae':
self.is_classification_forest = False
else:
raise Exception("Invalid criterion: {}".format(self.criterion))
def get_subsets(self, X, y, num=1):
subsets = []
if len(np.shape(y)) == 1:
y = np.expand_dims(y, axis=1)
Xy = np.concatenate((X, y), axis=1)
num_samples = X.shape[0]
np.random.shuffle(Xy)
rng = np.random.default_rng(seed= self.random_seed)
for _ in range(num):
idx = rng.choice(
range(num_samples),
size = np.shape(range(int(num_samples)), ),
replace=True
)
subsets.append([X[idx], y[idx]])
return subsets
def fit(self, X, y):
np.random.seed(self.random_seed)
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
subsets = self.get_subsets(X, y, self.n_estimators)
if self.max_features == 0:
if self.is_classification_forest:
self.max_features = int(math.sqrt(X.shape[1]))
else:
self.max_features = int(X.shape[1])
# Bagging - choose random features for each estimator
# if max_features is provided, else use square root of
# total number of features.
for i, _ in enumerate(self.trees):
self.trees[i].max_features = self.max_features
X_sub, y_sub = subsets[i]
self.trees[i].fit(X_sub, y_sub)
def predict(self, X):
all_preds = np.empty((X.shape[0], self.n_estimators))
for i, tree in enumerate(self.trees):
preds = tree.predict(X)
all_preds[:, i] = preds
y_preds = []
for preds in all_preds:
if self.is_classification_forest:
y_preds.append(np.bincount(preds.astype('int')).argmax())
else:
y_preds.append(np.average(preds))
return y_preds | 40.901639 | 93 | 0.546293 | 4,787 | 0.959319 | 0 | 0 | 0 | 0 | 0 | 0 | 1,834 | 0.367535 |
166e1671aebcb4e327d8e4f8b8b62dc58ec16062 | 556 | py | Python | tests/basics/generator_pend_throw.py | iotctl/pycopy | eeb841afea61b19800d054b3b289729665fc9aa4 | [
"MIT"
] | 663 | 2018-12-30T00:17:59.000Z | 2022-03-14T05:03:41.000Z | tests/basics/generator_pend_throw.py | iotctl/pycopy | eeb841afea61b19800d054b3b289729665fc9aa4 | [
"MIT"
] | 41 | 2019-06-06T08:31:19.000Z | 2022-02-13T16:53:41.000Z | tests/basics/generator_pend_throw.py | iotctl/pycopy | eeb841afea61b19800d054b3b289729665fc9aa4 | [
"MIT"
] | 60 | 2019-06-01T04:25:00.000Z | 2022-02-25T01:47:31.000Z | def gen():
i = 0
while 1:
yield i
i += 1
g = gen()
try:
g.pend_throw
except AttributeError:
print("SKIP")
raise SystemExit
print(next(g))
print(next(g))
g.pend_throw(ValueError())
v = None
try:
v = next(g)
except Exception as e:
print("raised", repr(e))
print("ret was:", v)
# It's legal to pend exception in a just-started generator, just the same
# as it's legal to .throw() into it.
g = gen()
g.pend_throw(ValueError())
try:
next(g)
except ValueError:
print("ValueError from just-started gen")
| 15.444444 | 73 | 0.624101 | 0 | 0 | 64 | 0.115108 | 0 | 0 | 0 | 0 | 167 | 0.30036 |
166e4003ce5bc54874ebae493377303b4c270f29 | 4,511 | py | Python | src/UnitTypes/ProjectileModule.py | USArmyResearchLab/ARL_Battlespace | 2f17a478f62c20a4db387d5d3e4bbeaa3197cd49 | [
"MIT"
] | 1 | 2022-03-31T19:15:04.000Z | 2022-03-31T19:15:04.000Z | src/UnitTypes/ProjectileModule.py | USArmyResearchLab/ARL_Battlespace | 2f17a478f62c20a4db387d5d3e4bbeaa3197cd49 | [
"MIT"
] | null | null | null | src/UnitTypes/ProjectileModule.py | USArmyResearchLab/ARL_Battlespace | 2f17a478f62c20a4db387d5d3e4bbeaa3197cd49 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 15 09:49:47 2020
@author: james.z.hare
"""
from src.UnitModule import UnitClass, advance
from copy import deepcopy
import math
class ProjectileClass(UnitClass):
"""
The Projectile Class
This is a subclass to the UnitClass
Virtual Functions
-----------------
- `__copy__()` to make shallow copies
- `__deepcopy__(memo)` to make deep copies
- `possibleActions(State)` to identify legal actions
- `observe(Unit)` to observe units located within VisibleRange
- `overlaps(Unit)` to identify if the unit overlaps with another unit
- `execute(Action, State)` to execute the action
Attributes
----------
ID:
a unique identifier of this unit
Owner:
the player the unit belongs to
Health:
the health of the unit
Extent:
the space occupied by unit
Position:
location of unit
Orientation:
as the name says
VisibleRange:
how far the unit can observe
Actions: dict
dictionary of actions common accross all units
ActionOptions:
list of list of action options.
Attack:
int that defines whether the unit is attacking in an advance action
RemaingLifetime:
int that defines the total number of turns until the unit is dead
"""
def __init__(self, ID, Owner, Health, RemainingLifetime=math.inf):
UnitClass.__init__(self, ID, Owner, Health, Extent=(1,1))
self.Actions = { "advance": lambda x: advance(self, x) }
self.ActionOptions = ( ( "advance", ), )
self.Attack = None
self.RemainingLifetime = RemainingLifetime
def __copy__(self):
Duplicate = ProjectileClass(self.ID, self.Owner, self.Health)
Duplicate.Position = self.Position
Duplicate.Orientation = self.Orientation
Duplicate.Attack = self.Attack
Duplicate.RemainingLifetime = self.RemainingLifetime
return Duplicate
def __deepcopy__(self, memo):
Default = None
Exists = memo.get(self, Default)
if Exists is not Default:
return Exists
Duplicate = ProjectileClass(deepcopy(self.ID, memo), deepcopy(self.Owner ,memo), deepcopy(self.Health, memo))
Duplicate.Position = deepcopy(self.Position, memo)
Duplicate.Orientation = deepcopy(self.Orientation, memo)
Duplicate.Attack = deepcopy(self.Attack, memo)
Duplicate.RemainingLifetime = deepcopy(self.RemainingLifetime, memo)
memo[self] = Duplicate
return Duplicate
def possibleActions(self, State):
"""
Identifies the set of feasible actions given the board size and position of the unit
Parameters
----------
State: StateClass
Returns
-------
TrueActions: list[str]
A list of the feasible actions
"""
return self.ActionOptions
def observe(self, Unit):
if Unit.ID == self.ID:
return Unit
return None
def overlaps(self, Unit):
MyOccupiedSpace = set([ (self.Position[0]+x, self.Position[1]+y, self.Position[2]) for x in range(self.Extent[0]) for y in range(self.Extent[1]) ])
#print(Unit)
TheirOccupiedSpace = set([ (Unit.Position[0]+x, Unit.Position[1]+y, Unit.Position[2]) for x in range(Unit.Extent[0]) for y in range(Unit.Extent[1]) ])
return len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0
def execute(self, Actions, State):
"""
Execute `Actions` on `State`.
Parameters
----------
Actions : list[str]
A set of actions to be performed on `State`.
State : StateClass
State on which to inflict actions.
Returns
-------
Changes : list
Resulting state of executed `Actions`.
"""
NewState = deepcopy(State)
Changes = []
for Action in Actions:
ActionResult = self.Actions[Action](NewState)
ActionResult[1].RemainingLifetime -= 1
if isinstance(ActionResult, list):
Changes += ActionResult
else:
Changes.append(ActionResult)
return Changes
# Will be used as the projectile for the missile launcher unit
class MissileClass(ProjectileClass):
def __init__(self, ID, Owner, Position, Life=1):
ProjectileClass.__init__(self, ID, Owner, Positon=Position, Life=Life) | 32.221429 | 158 | 0.62137 | 4,269 | 0.946353 | 0 | 0 | 0 | 0 | 0 | 0 | 1,963 | 0.435159 |
166ed868a00e2876de6024b3dcf661e7d6afc455 | 216 | py | Python | OOP_MiniQuiz/run_car_Level2.py | HelloYeew/helloyeew-lab-computer-programming-i | 60b05072f32f23bab4a336b506ba7f66e52c045d | [
"MIT"
] | null | null | null | OOP_MiniQuiz/run_car_Level2.py | HelloYeew/helloyeew-lab-computer-programming-i | 60b05072f32f23bab4a336b506ba7f66e52c045d | [
"MIT"
] | null | null | null | OOP_MiniQuiz/run_car_Level2.py | HelloYeew/helloyeew-lab-computer-programming-i | 60b05072f32f23bab4a336b506ba7f66e52c045d | [
"MIT"
] | null | null | null | from car import *
def compare(car1,car2):
print(car1)
print(car2)
car1 = Car("Nissan","Tiida",450000)
car2 = Car("Toyota","Vios",400000)
car3 = Car("BMW","X3",3400000)
compare(car3,car1)
compare(car1,car2) | 18 | 35 | 0.671296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.175926 |
166f10041a007d09adb3797f8fd4bf54942b5eeb | 1,513 | py | Python | prelude/monads.py | michel-slm/python-prelude | b3ca89ff2bf150f772764f59d2796d2fcce1013d | [
"MIT"
] | 2 | 2015-05-12T16:12:56.000Z | 2020-08-26T20:52:47.000Z | prelude/monads.py | michel-slm/python-prelude | b3ca89ff2bf150f772764f59d2796d2fcce1013d | [
"MIT"
] | null | null | null | prelude/monads.py | michel-slm/python-prelude | b3ca89ff2bf150f772764f59d2796d2fcce1013d | [
"MIT"
] | null | null | null | from abc import ABCMeta, abstractmethod
from prelude.typeclasses import Monad
from prelude.decorators import monad_eq, singleton
@monad_eq
class Either(Monad):
__metaclass__ = ABCMeta
@classmethod
def mreturn(cls, val):
return Right(val)
@abstractmethod
def __iter__(self):
pass
class Left(Either):
def __init__(self, val):
self.__val = val
def __rshift__(self, f):
return self
def __iter__(self):
return iter([])
def __eq__(self, other):
return type(self) == type(other)
def __repr__(self):
return "Left({})".format(self.__val)
class Right(Either):
def __init__(self, val):
self.__val = val
def __rshift__(self, f):
return f(self.__val)
def __iter__(self):
yield self.__val
def __repr__(self):
return "Right({})".format(self.__val)
class Maybe(Monad):
__metaclass__ = ABCMeta
@classmethod
def mreturn(cls, val):
return Just(val)
@abstractmethod
def __iter__(self):
pass
@monad_eq
class Just(Maybe):
def __init__(self, val):
self.__val = val
def __rshift__(self, f):
return f(self.__val)
def __iter__(self):
yield self.__val
def __repr__(self):
return "Just({})".format(self.__val)
@singleton
class Nothing(Maybe):
def __rshift__(self, f):
return self
def __iter__(self):
return iter([])
def __repr__(self):
return "Nothing()"
| 18.9125 | 50 | 0.613351 | 1,341 | 0.886319 | 88 | 0.058163 | 746 | 0.49306 | 0 | 0 | 42 | 0.027759 |
16715a2b77e2526acf8bf40591ec7bc531389bde | 848 | py | Python | Deep Sort/src/imgconverter.py | JJavier98/TFG-Dron-de-Vigilancia | 7fd68a981854ac480ad2f0c936a0dd58d2a9f38b | [
"MIT"
] | null | null | null | Deep Sort/src/imgconverter.py | JJavier98/TFG-Dron-de-Vigilancia | 7fd68a981854ac480ad2f0c936a0dd58d2a9f38b | [
"MIT"
] | null | null | null | Deep Sort/src/imgconverter.py | JJavier98/TFG-Dron-de-Vigilancia | 7fd68a981854ac480ad2f0c936a0dd58d2a9f38b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('msgs_to_cv2')
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class image_converter:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/bebop/image_raw",Image,self.callback)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
cv2.imshow("hola", cv_image)
cv2.waitKey(3)
def main(args):
while True:
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
"""
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
"""
if __name__ == '__main__':
main(sys.argv)
| 20.190476 | 77 | 0.741745 | 325 | 0.383255 | 0 | 0 | 0 | 0 | 0 | 0 | 197 | 0.232311 |
16718d7813439bbbc33bc80e98b6e4741d2b5b6c | 261 | py | Python | foodx_devops_tools/azure/__init__.py | Food-X-Technologies/foodx_devops_tools | 57d1bf1304d9c9a386eaffa427f9eb36c410c350 | [
"MIT"
] | 3 | 2021-06-23T20:53:43.000Z | 2022-01-26T14:19:43.000Z | foodx_devops_tools/azure/__init__.py | Food-X-Technologies/foodx_devops_tools | 57d1bf1304d9c9a386eaffa427f9eb36c410c350 | [
"MIT"
] | 33 | 2021-08-09T15:44:51.000Z | 2022-03-03T18:28:02.000Z | foodx_devops_tools/azure/__init__.py | Food-X-Technologies/foodx_devops_tools | 57d1bf1304d9c9a386eaffa427f9eb36c410c350 | [
"MIT"
] | 1 | 2021-06-23T20:53:52.000Z | 2021-06-23T20:53:52.000Z | # Copyright (c) 2021 Food-X Technologies
#
# This file is part of foodx_devops_tools.
#
# You should have received a copy of the MIT License along with
# foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>.
"""Azure related utilities."""
| 29 | 73 | 0.731801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 253 | 0.969349 |
16725a52de27142aa18864c727dddea44204b666 | 5,940 | py | Python | beartype/vale/__init__.py | posita/beartype | e56399686e1f2ffd5128a4030b19314504e32450 | [
"MIT"
] | null | null | null | beartype/vale/__init__.py | posita/beartype | e56399686e1f2ffd5128a4030b19314504e32450 | [
"MIT"
] | null | null | null | beartype/vale/__init__.py | posita/beartype | e56399686e1f2ffd5128a4030b19314504e32450 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype validators.**
This submodule publishes a PEP-compliant hierarchy of subscriptable (indexable)
classes enabling callers to validate the internal structure of arbitrarily
complex scalars, data structures, and third-party objects. Like annotation
objects defined by the :mod:`typing` module (e.g., :attr:`typing.Union`), these
classes dynamically generate PEP-compliant type hints when subscripted
(indexed) and are thus intended to annotate callables and variables. Unlike
annotation objects defined by the :mod:`typing` module, these classes are *not*
explicitly covered by existing PEPs and thus *not* directly usable as
annotations.
Instead, callers are expected to (in order):
#. Annotate callable parameters and returns to be validated with
:pep:`593`-compliant :attr:`typing.Annotated` type hints.
#. Subscript those hints with (in order):
#. The type of those parameters and returns.
#. One or more subscriptions of classes declared by this submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To avoid polluting the public module namespace, external attributes
# should be locally imported at module scope *ONLY* under alternate private
# names (e.g., "from argparse import ArgumentParser as _ArgumentParser" rather
# than merely "from argparse import ArgumentParser").
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype.vale._is._valeis import _IsFactory
from beartype.vale._is._valeistype import (
_IsInstanceFactory,
_IsSubclassFactory,
)
from beartype.vale._is._valeisobj import _IsAttrFactory
from beartype.vale._is._valeisoper import _IsEqualFactory
# ....................{ SINGLETONS }....................
# Public factory singletons instantiating these private factory classes.
Is = _IsFactory(basename='Is')
IsAttr = _IsAttrFactory(basename='IsAttr')
IsEqual = _IsEqualFactory(basename='IsEqual')
IsInstance = _IsInstanceFactory(basename='IsInstance')
IsSubclass = _IsSubclassFactory(basename='IsSubclass')
# Delete all private factory classes imported above for safety.
del (
_IsFactory,
_IsAttrFactory,
_IsEqualFactory,
_IsInstanceFactory,
_IsSubclassFactory,
)
# ....................{ TODO }....................
#FIXME: As intelligently requested by @Saphyel at #32, add support for
#additional classes support constraints resembling:
#
#* String constraints:
# * Email.
# * Uuid.
# * Choice.
# * Language.
# * Locale.
# * Country.
# * Currency.
#* Comparison constraints
# * IdenticalTo.
# * NotIdenticalTo.
# * LessThan.
# * GreaterThan.
# * Range.
# * DivisibleBy.
#FIXME: Add a new BeartypeValidator.get_cause_or_none() method with the same
#signature and docstring as the existing CauseSleuth.get_cause_or_none()
#method. This new BeartypeValidator.get_cause_or_none() method should then be
#called by the "_peperrorannotated" submodule to generate human-readable
#exception messages. Note that this implies that:
#* The BeartypeValidator.__init__() method will need to additionally accept a new
# mandatory "get_cause_or_none: Callable[[], Optional[str]]" parameter, which
# that method should then localize to "self.get_cause_or_none".
#* Each __class_getitem__() dunder method of each "_BeartypeValidatorFactoryABC" subclass will need
# to additionally define and pass that callable when creating and returning
# its "BeartypeValidator" instance.
#FIXME: *BRILLIANT IDEA.* Holyshitballstime. The idea here is that we can
#leverage all of our existing "beartype.is" infrastructure to dynamically
#synthesize PEP-compliant type hints that would then be implicitly supported by
#any runtime type checker. At present, subscriptions of "Is" (e.g.,
#"Annotated[str, Is[lambda text: bool(text)]]") are only supported by beartype
#itself. Of course, does anyone care? I mean, if you're using a runtime type
#checker, you're probably *ONLY* using beartype. Right? That said, this would
#technically improve portability by allowing users to switch between different
#checkers... except not really, since they'd still have to import beartype
#infrastructure to do so. So, this is probably actually useless.
#
#Nonetheless, the idea itself is trivial. We declare a new
#"beartype.is.Portable" singleton accessed in the same way: e.g.,
# from beartype import beartype
# from beartype.is import Portable
# NonEmptyStringTest = Is[lambda text: bool(text)]
# NonEmptyString = Portable[str, NonEmptyStringTest]
# @beartype
# def munge_it(text: NonEmptyString) -> str: ...
#
#So what's the difference between "typing.Annotated" and "beartype.is.Portable"
#then? Simple. The latter dynamically generates one new PEP 3119-compliant
#metaclass and associated class whenever subscripted. Clearly, this gets
#expensive in both space and time consumption fast -- which is why this won't
#be the default approach. For safety, this new class does *NOT* subclass the
#first subscripted class. Instead:
#* This new metaclass of this new class simply defines an __isinstancecheck__()
# dunder method. For the above example, this would be:
# class NonEmptyStringMetaclass(object):
# def __isinstancecheck__(cls, obj) -> bool:
# return isinstance(obj, str) and NonEmptyStringTest(obj)
#* This new class would then be entirely empty. For the above example, this
# would be:
# class NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass):
# pass
#
#Well, so much for brilliant. It's slow and big, so it seems doubtful anyone
#would actually do that. Nonetheless, that's food for thought for you.
| 45.343511 | 99 | 0.711616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,295 | 0.891414 |
16730d6f4856a5911d4dfcf4a29a2f5449a0ddb0 | 3,536 | py | Python | tests/test_authentication.py | movermeyer/cellardoor | 25192b07224ff7bd33fd29ebac07340bef53a2ed | [
"MIT"
] | null | null | null | tests/test_authentication.py | movermeyer/cellardoor | 25192b07224ff7bd33fd29ebac07340bef53a2ed | [
"MIT"
] | 3 | 2015-01-31T14:53:06.000Z | 2015-02-01T19:04:30.000Z | tests/test_authentication.py | movermeyer/cellardoor | 25192b07224ff7bd33fd29ebac07340bef53a2ed | [
"MIT"
] | 2 | 2015-01-31T14:54:28.000Z | 2018-03-05T17:33:42.000Z | import unittest
from mock import Mock
import base64
from cellardoor import errors
from cellardoor.authentication import *
from cellardoor.authentication.basic import BasicAuthIdentifier
class FooIdentifier(Identifier):
pass
class BarAuthenticator(Authenticator):
pass
class TestAuthentication(unittest.TestCase):
def test_abstract_identifier(self):
id = Identifier()
with self.assertRaises(NotImplementedError):
id.identify({})
def test_abstract_authenticator(self):
auth = Authenticator()
with self.assertRaises(NotImplementedError):
auth.authenticate({})
def test_bad_identifier(self):
self.assertRaises(ValueError, AuthenticationMiddleware, None, [(None, BarAuthenticator())])
def test_bad_authenticator(self):
self.assertRaises(ValueError, AuthenticationMiddleware, None, [(FooIdentifier(), None)])
def test_middleware(self):
identifier = FooIdentifier()
identifier.identify = Mock(return_value='foo')
authenticator = BarAuthenticator()
authenticator.authenticate = Mock(return_value='bar')
app = Mock(return_value=[])
middleware = AuthenticationMiddleware(app, pairs=[(identifier, authenticator)])
environ = {'skidoo':23}
middleware(environ, lambda: None)
identifier.identify.assert_called_once_with(environ)
authenticator.authenticate.assert_called_once_with('foo')
self.assertEquals(environ, {'skidoo':23, 'cellardoor.identity':'bar'})
def test_middleware_skip(self):
id_one = FooIdentifier()
id_one.identify = Mock(return_value=None)
id_two = FooIdentifier()
id_two.identify = Mock(return_value='two')
id_three = FooIdentifier()
id_three.identify = Mock(return_value='three')
auth_one = BarAuthenticator()
auth_one.authenticate = Mock(return_value='one')
auth_two = BarAuthenticator()
auth_two.authenticate = Mock(return_value='two')
auth_three = BarAuthenticator()
auth_three.authenticate = Mock(return_value='three')
app = Mock(return_value=[])
middleware = AuthenticationMiddleware(
app,
pairs=[
(id_one, auth_one),
(id_two, auth_two),
(id_three, auth_three)
]
)
environ = {}
middleware(environ, lambda: None)
self.assertEquals(environ, {'cellardoor.identity':'two'})
class TestBasic(unittest.TestCase):
def test_skip_if_no_auth_header(self):
identifier = BasicAuthIdentifier()
credentials = identifier.identify({})
self.assertEquals(credentials, None)
def test_skip_if_not_a_pair(self):
identifier = BasicAuthIdentifier()
credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo'})
self.assertEquals(credentials, None)
def test_skip_if_not_basic(self):
identifier = BasicAuthIdentifier()
credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo 123'})
self.assertEquals(credentials, None)
def test_error_if_not_base64(self):
identifier = BasicAuthIdentifier()
with self.assertRaises(errors.IdentificationError):
identifier.identify({'HTTP_AUTHORIZATION':'Basic \x000'})
def test_error_if_malformed(self):
identifier = BasicAuthIdentifier()
credentials = base64.standard_b64encode('foobar')
with self.assertRaises(errors.IdentificationError):
identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials})
def test_pass(self):
identifier = BasicAuthIdentifier()
credentials = base64.standard_b64encode('foo:bar')
identified_credentials = identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials})
self.assertEquals(identified_credentials, {'username':'foo', 'password':'bar'})
| 30.747826 | 95 | 0.756505 | 3,332 | 0.942308 | 0 | 0 | 0 | 0 | 0 | 0 | 306 | 0.086538 |
16731efe14cf79a4c56966e84b709e60bb9faf4f | 42 | py | Python | src/styleaug/__init__.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
] | 69 | 2019-04-09T18:05:33.000Z | 2022-03-11T05:58:59.000Z | src/styleaug/__init__.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
] | 6 | 2019-04-01T12:04:10.000Z | 2022-01-19T11:49:13.000Z | src/styleaug/__init__.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
] | 13 | 2019-05-22T19:08:36.000Z | 2021-08-13T01:21:47.000Z | from .styleAugmentor import StyleAugmentor | 42 | 42 | 0.904762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
167422ad1c22d904c1fb3127c28d48e06243100c | 2,698 | py | Python | configs/classification/imagenet/mixups/convnext/convnext_tiny_smooth_mix_8xb256_accu2_ema_fp16.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | 10 | 2021-12-30T10:22:27.000Z | 2022-03-30T02:31:38.000Z | configs/classification/imagenet/mixups/convnext/convnext_tiny_smooth_mix_8xb256_accu2_ema_fp16.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | 3 | 2022-01-20T21:02:48.000Z | 2022-03-19T13:49:45.000Z | configs/classification/imagenet/mixups/convnext/convnext_tiny_smooth_mix_8xb256_accu2_ema_fp16.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../../../_base_/datasets/imagenet/swin_sz224_4xbs256.py',
'../../../_base_/default_runtime.py',
]
# model settings
model = dict(
type='MixUpClassification',
pretrained=None,
alpha=0.2,
mix_mode="cutmix",
mix_args=dict(
attentivemix=dict(grid_size=32, top_k=None, beta=8), # AttentiveMix+ in this repo (use pre-trained)
automix=dict(mask_adjust=0, lam_margin=0), # require pre-trained mixblock
fmix=dict(decay_power=3, size=(224,224), max_soft=0., reformulate=False),
manifoldmix=dict(layer=(0, 3)),
puzzlemix=dict(transport=True, t_batch_size=32, t_size=-1, # adjust t_batch_size if CUDA out of memory
mp=None, block_num=4, # block_num<=4 and mp=2/4 for fast training
beta=1.2, gamma=0.5, eta=0.2, neigh_size=4, n_labels=3, t_eps=0.8),
resizemix=dict(scope=(0.1, 0.8), use_alpha=True),
samix=dict(mask_adjust=0, lam_margin=0.08), # require pre-trained mixblock
),
backbone=dict(
type='ConvNeXt',
arch='tiny',
out_indices=(3,),
norm_cfg=dict(type='LN2d', eps=1e-6),
act_cfg=dict(type='GELU'),
drop_path_rate=0.1,
gap_before_final_norm=True,
),
head=dict(
type='ClsMixupHead', # mixup CE + label smooth
loss=dict(type='LabelSmoothLoss',
label_smooth_val=0.1, num_classes=1000, mode='original', loss_weight=1.0),
with_avg_pool=False, # gap_before_final_norm is True
in_channels=768, num_classes=1000)
)
# interval for accumulate gradient
update_interval = 2 # total: 8 x bs256 x 2 accumulates = bs4096
# additional hooks
custom_hooks = [
dict(type='EMAHook', # EMA_W = (1 - m) * EMA_W + m * W
momentum=0.9999,
warmup='linear',
warmup_iters=20 * 626, warmup_ratio=0.9, # warmup 20 epochs.
update_interval=update_interval,
),
]
# optimizer
optimizer = dict(
type='AdamW',
lr=4e-3, # lr = 5e-4 * (256 * 4) * 4 accumulate / 1024 = 4e-3 / bs4096
weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999),
paramwise_options={
'(bn|ln|gn)(\d+)?.(weight|bias)': dict(weight_decay=0.),
'bias': dict(weight_decay=0.),
})
# apex
use_fp16 = True
fp16 = dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic'))
optimizer_config = dict(grad_clip=None, update_interval=update_interval, use_fp16=use_fp16)
# lr scheduler
lr_config = dict(
policy='CosineAnnealing',
by_epoch=False, min_lr=1e-5,
warmup='linear',
warmup_iters=20, warmup_by_epoch=True, # warmup 20 epochs.
warmup_ratio=1e-6,
)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=300)
| 34.151899 | 111 | 0.640474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 851 | 0.315419 |
16748f009db0117be1d076ddc5a413db7e45e64c | 2,274 | py | Python | mcstasscript/interface/reader.py | PaNOSC-ViNYL/McStasScript | bd94ebc6cac290c3c9662871df40d76edbe4a44e | [
"BSD-3-Clause"
] | 3 | 2019-08-29T14:15:06.000Z | 2021-03-04T12:08:48.000Z | mcstasscript/interface/reader.py | PaNOSC-ViNYL/McStasScript | bd94ebc6cac290c3c9662871df40d76edbe4a44e | [
"BSD-3-Clause"
] | 37 | 2019-03-05T12:28:32.000Z | 2022-03-22T10:11:23.000Z | mcstasscript/interface/reader.py | PaNOSC-ViNYL/McStasScript | bd94ebc6cac290c3c9662871df40d76edbe4a44e | [
"BSD-3-Clause"
] | 6 | 2019-10-21T20:19:10.000Z | 2022-03-09T10:12:16.000Z | import os
from mcstasscript.instr_reader.control import InstrumentReader
from mcstasscript.interface.instr import McStas_instr
class McStas_file:
"""
Reader of McStas files, can add to an existing McStasScript
instrument instance or create a corresponding McStasScript python
file.
Methods
-------
add_to_instr(Instr)
Add information from McStas file to McStasScript Instr instance
write_python_file(filename)
Write python file named filename that reproduce the McStas instr
"""
def __init__(self, filename):
"""
Initialization of McStas_file class, needs McStas instr filename
Parameters
----------
filename (str)
Name of McStas instrument file to be read
"""
# Check filename
if not os.path.isfile(filename):
raise ValueError("Given filename, \"" + filename
+ "\" could not be found.")
self.Reader = InstrumentReader(filename)
def add_to_instr(self, Instr):
"""
Adds information from the McStas file to McStasScript instr
Parameters
----------
Instr (McStasScript McStas_instr instance)
McStas_instr instance to add instrument information to
"""
# Check Instr
if not isinstance(Instr, McStas_instr):
raise TypeError("Given object is not of type McStas_instr!")
self.Reader.add_to_instr(Instr)
def write_python_file(self, filename, **kwargs):
"""
Writes python file that reproduces McStas instrument file
Parameters
----------
filename (str)
Filename of python file to be written
"""
if "force" in kwargs:
force = kwargs["force"]
else:
force = False
# Check product_filename is available
if os.path.isfile(filename):
if force:
os.remove(filename)
else:
raise ValueError("Filename \"" + filename
+ "\" already exists, you can overwrite with "
+ "force=True")
self.Reader.generate_py_version(filename)
| 28.425 | 79 | 0.579595 | 2,144 | 0.942832 | 0 | 0 | 0 | 0 | 0 | 0 | 1,280 | 0.562885 |
1676599bdfdd4b081bb8bb20aa32589f69c604ef | 3,701 | py | Python | src/regrtest.py | ucsd-progsys/csolve-bak | 89cfeb5403e617f45ece4bae9f88f8e6cd7ca934 | [
"BSD-3-Clause"
] | null | null | null | src/regrtest.py | ucsd-progsys/csolve-bak | 89cfeb5403e617f45ece4bae9f88f8e6cd7ca934 | [
"BSD-3-Clause"
] | 1 | 2018-04-24T10:43:07.000Z | 2018-04-24T10:43:07.000Z | src/regrtest.py | ucsd-progsys/csolve-bak | 89cfeb5403e617f45ece4bae9f88f8e6cd7ca934 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2009 The Regents of the University of California. All rights reserved.
#
# Permission is hereby granted, without written agreement and without
# license or royalty fees, to use, copy, modify, and distribute this
# software and its documentation for any purpose, provided that the
# above copyright notice and the following two paragraphs appear in
# all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
# IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION
# TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
import time, subprocess, optparse, sys, socket, os
import misc.rtest as rtest
solve = "./csolve -c".split()
null = open("/dev/null", "w")
now = (time.asctime(time.localtime(time.time()))).replace(" ","_")
logfile = "../tests/logs/regrtest_results_%s_%s" % (socket.gethostname (), now)
argcomment = "//! run with "
def logged_sys_call(args, out=None, err=None):
print "exec: " + " ".join(args)
return subprocess.call(args, stdout=out, stderr=err)
def solve_quals(file,bare,time,quiet,flags):
if quiet: out = null
else: out = None
if time: time = ["time"]
else: time = []
hygiene_flags = [("--csolveprefix=%s" % (file)), "-o", "/dev/null"]
out = open(file + ".log", "w")
rv = logged_sys_call(time + solve + flags + hygiene_flags + [file], out)
out.close()
return rv
def run_script(file,quiet):
if quiet: out = null
else: out = None
return logged_sys_call(file, out)
def getfileargs(file):
f = open(file)
l = f.readline()
f.close()
if l.startswith(argcomment):
return l[len(argcomment):].strip().split(" ")
else:
return []
class Config (rtest.TestConfig):
def __init__ (self, dargs, testdirs, logfile, threadcount):
rtest.TestConfig.__init__ (self, testdirs, logfile, threadcount)
self.dargs = dargs
if os.path.exists("../tests/postests/coreutils/"):
logged_sys_call(["../tests/postests/coreutils/makeCoreUtil.sh", "init"], None)
def run_test (self, file):
os.environ['CSOLVEFLAGS'] = self.dargs
if file.endswith(".c"):
fargs = getfileargs(file)
return solve_quals(file, True, False, True, fargs)
elif file.endswith(".sh"):
return run_script(file, True)
def is_test (self, file):
return (file.endswith(".sh") and os.access(file, os.X_OK)) \
or (file.endswith(".c") and not file.endswith(".csolve.save.c") and not file.endswith(".ssa.c"))
#####################################################################################
#testdirs = [("../postests", 0)]
#testdirs = [("../negtests", 1)]
#testdirs = [("../slowtests", 1)]
#DEFAULT
testdirs = [("../tests/postests", 0), ("../tests/negtests", [1, 2])]
#testdirs = [("../tests/microtests", 0)]
parser = optparse.OptionParser()
parser.add_option("-t", "--threads", dest="threadcount", default=1, type=int, help="spawn n threads")
parser.add_option("-o", "--opts", dest="opts", default="", type=str, help="additional arguments to csolve")
parser.disable_interspersed_args()
options, args = parser.parse_args()
runner = rtest.TestRunner (Config (options.opts, testdirs, logfile, options.threadcount))
exit (runner.run ())
| 38.154639 | 107 | 0.676574 | 782 | 0.211294 | 0 | 0 | 0 | 0 | 0 | 0 | 1,721 | 0.465009 |
16766ccc57f251df7ba9394a55b7eabdd7d12e46 | 2,925 | py | Python | country_capital_guesser.py | NathanMH/ComputerClub | 197585c1a77f71ee363547740d6e09f945e7526f | [
"MIT"
] | null | null | null | country_capital_guesser.py | NathanMH/ComputerClub | 197585c1a77f71ee363547740d6e09f945e7526f | [
"MIT"
] | null | null | null | country_capital_guesser.py | NathanMH/ComputerClub | 197585c1a77f71ee363547740d6e09f945e7526f | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
#######################
"""####################
Index:
1. Imports and Readme
2. Functions
3. Main
4. Testing
####################"""
#######################
###################################################################
# 1. IMPORTS AND README
###################################################################
import easygui
import country_list_getter
###################################################################
# 2. FUNCTIONS
###################################################################
# Dictionary. It has keys (Canada, France etc...) and Values (Paris, Ottawa)
country_list_getter.main()
COUNTRIES_CAPITALS = country_list_getter.FINAL_LIST
def ask_to_play():
return easygui.ynbox("Do you want to play a game?", "Country Guesser", ("Yes", "No"))
def ask_to_replay(correct_answers, total_questions):
score = round(((correct_answers / total_questions) * 100), 2)
if score >= 50:
return easygui.buttonbox("Your score: " + str(score) + ". Do you want to play again?", "~/Documents/ComputerClub/assets/happy_puppy.jpg", ["Yes", "No"])
else:
return easygui.buttonbox("Your score: " + str(score) + ". Do you want to play again?", "~/Documents/ComputerClub/assets/sad_puppy.jpg", ["Yes", "No"])
def main_question_box(country):
return easygui.enterbox("What is the capital of: " + country + "?", "Country Capital Guesser!!")
###################################################################
# 3. MAIN
###################################################################
def funtime():
playing = 1
correct_answers = 0
total_questions = 0
ask_to_play()
while playing:
for key, value in COUNTRIES_CAPITALS.items():
answer = main_question_box(key)
# answer = input("Name the capital of: " + key + "\n").lower()
total_questions += 1 # Short for total_questions = total_questions + 1
if answer == COUNTRIES_CAPITALS[key] or answer.title() == COUNTRIES_CAPITALS[key]:
correct_answers += 1
print("Correct!")
else:
print("Wrong!")
# Should we keep playing?
response = input("Would you like to play again?: \n")
if response.lower() == "yes" or response == "y":
playing = 1
else:
playing = 0
#score_screen(correct_answers, total_questions)
ask_to_replay(correct_answers, total_questions)
#print("You scored " + str(correct_answers)+ "/" + str(total_questions) + " (" + str(correct_percent) + "%)")
###################################################################
# 4. TESTING
###################################################################
# COUNTRIES_CAPITALS = {"Canada": "Ottawa", "United States": "Washington", "France": "Paris"}
def test_1():
pass
# ask_to_play()
# main_question_box("Canada")
funtime()
| 33.62069 | 160 | 0.494017 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,666 | 0.569573 |
1676c1cee546273be3e4746fcf8ddcdf0ca583bb | 2,288 | py | Python | data_analysis/audiocommons_ffont/scripts/rekordbox_xml_to_analysis_rhythm_rekordbox_file.py | aframires/freesound-loop-annotator | a24e0c23bfc671e41e8627150e7b9fcae5c8cb13 | [
"Apache-2.0"
] | 18 | 2020-01-22T14:58:18.000Z | 2022-02-21T12:07:51.000Z | data_analysis/audiocommons_ffont/scripts/rekordbox_xml_to_analysis_rhythm_rekordbox_file.py | aframires/freesound-loop-annotator | a24e0c23bfc671e41e8627150e7b9fcae5c8cb13 | [
"Apache-2.0"
] | 2 | 2020-02-24T13:14:05.000Z | 2020-09-21T13:34:53.000Z | data_analysis/audiocommons_ffont/scripts/rekordbox_xml_to_analysis_rhythm_rekordbox_file.py | aframires/freesound-loop-annotator | a24e0c23bfc671e41e8627150e7b9fcae5c8cb13 | [
"Apache-2.0"
] | 1 | 2020-01-22T14:55:36.000Z | 2020-01-22T14:55:36.000Z | # Need this to import from parent directory when running outside pycharm
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from ac_utils.general import save_to_json, load_from_json
import click
import xml.etree.ElementTree
from urllib import unquote
def find_corresponding_rekordbox_entry(sound_metadata, rekordbox_file):
collection = rekordbox_file.find('COLLECTION')
found = False
for document in collection:
if str(sound_metadata['id']) in document.attrib['Location'].split('/')[-1]:
found = document
break
if str(sound_metadata['wav_sound_path'].split('/')[-1]) in document.attrib['Location'].split('/')[-1]:
found = document
break
if str(sound_metadata['wav_sound_path'].split('/')[-1]) in unquote(document.attrib['Location'].split('/')[-1]):
found = document
break
return found
@click.command()
@click.argument('dataset_path')
def rekordbox_file_to_analysis_file(dataset_path):
"""
Read information from rekordbox_rhythm.xml present in dataset_path and convert it into
analsysis_rhythm_rekordbox.json to be stored in the same folder and compatible with our evaluation
framework.
"""
rekordbox_file = xml.etree.ElementTree.parse(os.path.join(dataset_path, 'rekordbox_rhythm.xml')).getroot()
metadata_file = load_from_json(os.path.join(dataset_path, 'metadata.json'))
out_file_path = os.path.join(dataset_path, 'analysis_rhythm_rekordbox.json')
analysis = dict()
with click.progressbar(metadata_file.keys(), label="Converting...") as metadata_keys:
for key in metadata_keys:
entry = find_corresponding_rekordbox_entry(metadata_file[key], rekordbox_file)
if entry is not False:
tempo_entry = entry.find('TEMPO')
if tempo_entry is not None:
bpm_raw = float(tempo_entry.attrib['Bpm'])
else:
bpm_raw = 0.0
analysis[key] = {"RekBox": {
"bpm": bpm_raw,
}
}
save_to_json(out_file_path, analysis, verbose=True)
if __name__ == '__main__':
rekordbox_file_to_analysis_file()
| 39.448276 | 119 | 0.660402 | 0 | 0 | 0 | 0 | 1,263 | 0.55201 | 0 | 0 | 518 | 0.226399 |
1676d72870f651008f4e3aca9c90ccf681a85a4a | 5,947 | py | Python | inventree/part.py | SergeoLacruz/inventree-python | 94681428f61de4ca51171e685812ebc436b9be42 | [
"MIT"
] | null | null | null | inventree/part.py | SergeoLacruz/inventree-python | 94681428f61de4ca51171e685812ebc436b9be42 | [
"MIT"
] | null | null | null | inventree/part.py | SergeoLacruz/inventree-python | 94681428f61de4ca51171e685812ebc436b9be42 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import re
import inventree.base
import inventree.stock
import inventree.company
import inventree.build
logger = logging.getLogger('inventree')
class PartCategory(inventree.base.InventreeObject):
""" Class representing the PartCategory database model """
URL = 'part/category'
def getParts(self, **kwargs):
return Part.list(self._api, category=self.pk, **kwargs)
def getParentCategory(self):
if self.parent:
return PartCategory(self._api, self.parent)
else:
return None
def getChildCategories(self, **kwargs):
return PartCategory.list(self._api, parent=self.pk, **kwargs)
def get_category_parameter_templates(self, fetch_parent=True):
"""
fetch_parent: enable to fetch templates for parent categories
"""
parameters_url = f'part/category/{self.pk}/parameters'
return self.list(self._api,
url=parameters_url,
fetch_parent=fetch_parent)
class Part(inventree.base.ImageMixin, inventree.base.InventreeObject):
""" Class representing the Part database model """
URL = 'part'
def getCategory(self):
""" Return the part category associated with this part """
return PartCategory(self._api, self.category)
def getTestTemplates(self):
""" Return all test templates associated with this part """
return PartTestTemplate.list(self._api, part=self.pk)
def getSupplierParts(self):
""" Return the supplier parts associated with this part """
return inventree.company.SupplierPart.list(self._api, part=self.pk)
def getBomItems(self):
""" Return the items required to make this part """
return BomItem.list(self._api, part=self.pk)
def isUsedIn(self):
""" Return a list of all the parts this part is used in """
return BomItem.list(self._api, sub_part=self.pk)
def getBuilds(self, **kwargs):
""" Return the builds associated with this part """
return inventree.build.Build.list(self._api, part=self.pk, **kwargs)
def getStockItems(self):
""" Return the stock items associated with this part """
return inventree.stock.StockItem.list(self._api, part=self.pk)
def getParameters(self):
""" Return parameters associated with this part """
return Parameter.list(self._api, part=self.pk)
def getRelated(self):
""" Return related parts associated with this part """
return PartRelated.list(self._api, part=self.pk)
def getInternalPriceList(self):
"""
Returns the InternalPrice list for this part
"""
return InternalPrice.list(self._api, part=self.pk)
def setInternalPrice(self, quantity: int, price: float):
"""
Set the internal price for this part
"""
return InternalPrice.setInternalPrice(self._api, self.pk, quantity, price)
def getAttachments(self):
return PartAttachment.list(self._api, part=self.pk)
def uploadAttachment(self, attachment, comment=''):
"""
Upload an attachment (file) against this Part.
Args:
attachment: Either a string (filename) or a file object
comment: Attachment comment
"""
return PartAttachment.upload(
self._api,
attachment,
comment=comment,
part=self.pk
)
class PartAttachment(inventree.base.Attachment):
""" Class representing a file attachment for a Part """
URL = 'part/attachment'
REQUIRED_KWARGS = ['part']
class PartTestTemplate(inventree.base.InventreeObject):
""" Class representing a test template for a Part """
URL = 'part/test-template'
@classmethod
def generateTestKey(cls, test_name):
""" Generate a 'key' for this test """
key = test_name.strip().lower()
key = key.replace(' ', '')
# Remove any characters that cannot be used to represent a variable
key = re.sub(r'[^a-zA-Z0-9]', '', key)
return key
def getTestKey(self):
return PartTestTemplate.generateTestKey(self.test_name)
class BomItem(inventree.base.InventreeObject):
""" Class representing the BomItem database model """
URL = 'bom'
class InternalPrice(inventree.base.InventreeObject):
""" Class representing the InternalPrice model """
URL = 'part/internal-price'
@classmethod
def setInternalPrice(cls, api, part, quantity: int, price: float):
"""
Set the internal price for this part
"""
data = {
'part': part,
'quantity': quantity,
'price': price,
}
# Send the data to the server
return api.post(cls.URL, data)
class PartRelated(inventree.base.InventreeObject):
""" Class representing a relationship between parts"""
URL = 'part/related'
@classmethod
def add_related(cls, api, part1, part2):
data = {
'part_1': part1,
'part_2': part2,
}
# Send the data to the server
if api.post(cls.URL, data):
logging.info("Related OK")
ret = True
else:
logging.warning("Related failed")
ret = False
return ret
class Parameter(inventree.base.InventreeObject):
"""class representing the Parameter database model """
URL = 'part/parameter'
def getunits(self):
""" Get the dimension and units for this parameter """
return [element for element
in ParameterTemplate.list(self._api)
if element['pk'] == self._data['template']]
class ParameterTemplate(inventree.base.InventreeObject):
""" class representing the Parameter Template database model"""
URL = 'part/parameter/template'
| 27.920188 | 82 | 0.626534 | 5,722 | 0.962166 | 0 | 0 | 1,052 | 0.176896 | 0 | 0 | 1,996 | 0.335631 |
167719b0cc59eef9b7fff6f4ce109cd0d2fe8bc1 | 12,932 | py | Python | tests/test_web_urldispatcher.py | avstarkov/aiohttp | b0a03cffccf677bf316227522a9b841c15dcb869 | [
"Apache-2.0"
] | null | null | null | tests/test_web_urldispatcher.py | avstarkov/aiohttp | b0a03cffccf677bf316227522a9b841c15dcb869 | [
"Apache-2.0"
] | null | null | null | tests/test_web_urldispatcher.py | avstarkov/aiohttp | b0a03cffccf677bf316227522a9b841c15dcb869 | [
"Apache-2.0"
] | null | null | null | import functools
import os
import shutil
import tempfile
from unittest import mock
from unittest.mock import MagicMock
import pytest
from aiohttp import abc, web
from aiohttp.web_urldispatcher import SystemRoute
@pytest.fixture(scope='function')
def tmp_dir_path(request):
"""
Give a path for a temporary directory
The directory is destroyed at the end of the test.
"""
# Temporary directory.
tmp_dir = tempfile.mkdtemp()
def teardown():
# Delete the whole directory:
shutil.rmtree(tmp_dir)
request.addfinalizer(teardown)
return tmp_dir
@pytest.mark.parametrize(
"show_index,status,prefix,data",
[pytest.param(False, 403, '/', None, id="index_forbidden"),
pytest.param(True, 200, '/',
b'<html>\n<head>\n<title>Index of /.</title>\n'
b'</head>\n<body>\n<h1>Index of /.</h1>\n<ul>\n'
b'<li><a href="/my_dir">my_dir/</a></li>\n'
b'<li><a href="/my_file">my_file</a></li>\n'
b'</ul>\n</body>\n</html>',
id="index_root"),
pytest.param(True, 200, '/static',
b'<html>\n<head>\n<title>Index of /.</title>\n'
b'</head>\n<body>\n<h1>Index of /.</h1>\n<ul>\n'
b'<li><a href="/static/my_dir">my_dir/</a></li>\n'
b'<li><a href="/static/my_file">my_file</a></li>\n'
b'</ul>\n</body>\n</html>',
id="index_static")])
async def test_access_root_of_static_handler(tmp_dir_path, aiohttp_client,
show_index, status, prefix, data):
"""
Tests the operation of static file server.
Try to access the root of static file server, and make
sure that correct HTTP statuses are returned depending if we directory
index should be shown or not.
"""
# Put a file inside tmp_dir_path:
my_file_path = os.path.join(tmp_dir_path, 'my_file')
with open(my_file_path, 'w') as fw:
fw.write('hello')
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, 'my_file_in_dir')
with open(my_file_path, 'w') as fw:
fw.write('world')
app = web.Application()
# Register global static route:
app.router.add_static(prefix, tmp_dir_path, show_index=show_index)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get(prefix)
assert r.status == status
if data:
assert r.headers['Content-Type'] == "text/html; charset=utf-8"
read_ = (await r.read())
assert read_ == data
async def test_follow_symlink(tmp_dir_path, aiohttp_client):
"""
Tests the access to a symlink, in static folder
"""
data = 'hello world'
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, 'my_file_in_dir')
with open(my_file_path, 'w') as fw:
fw.write(data)
my_symlink_path = os.path.join(tmp_dir_path, 'my_symlink')
os.symlink(my_dir_path, my_symlink_path)
app = web.Application()
# Register global static route:
app.router.add_static('/', tmp_dir_path, follow_symlinks=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/my_symlink/my_file_in_dir')
assert r.status == 200
assert (await r.text()) == data
@pytest.mark.parametrize('dir_name,filename,data', [
('', 'test file.txt', 'test text'),
('test dir name', 'test dir file .txt', 'test text file folder')
])
async def test_access_to_the_file_with_spaces(tmp_dir_path, aiohttp_client,
dir_name, filename, data):
"""
Checks operation of static files with spaces
"""
my_dir_path = os.path.join(tmp_dir_path, dir_name)
if dir_name:
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, filename)
with open(my_file_path, 'w') as fw:
fw.write(data)
app = web.Application()
url = os.path.join('/', dir_name, filename)
app.router.add_static('/', tmp_dir_path)
client = await aiohttp_client(app)
r = await client.get(url)
assert r.status == 200
assert (await r.text()) == data
async def test_access_non_existing_resource(tmp_dir_path, aiohttp_client):
"""
Tests accessing non-existing resource
Try to access a non-exiting resource and make sure that 404 HTTP status
returned.
"""
app = web.Application()
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/non_existing_resource')
assert r.status == 404
@pytest.mark.parametrize('registered_path,request_url', [
('/a:b', '/a:b'),
('/a@b', '/a@b'),
('/a:b', '/a%3Ab'),
])
async def test_url_escaping(aiohttp_client, registered_path, request_url):
"""
Tests accessing a resource with
"""
app = web.Application()
async def handler(request):
return web.Response()
app.router.add_get(registered_path, handler)
client = await aiohttp_client(app)
r = await client.get(request_url)
assert r.status == 200
async def test_handler_metadata_persistence():
"""
Tests accessing metadata of a handler after registering it on the app
router.
"""
app = web.Application()
async def async_handler(request):
"""Doc"""
return web.Response()
def sync_handler(request):
"""Doc"""
return web.Response()
app.router.add_get('/async', async_handler)
with pytest.warns(DeprecationWarning):
app.router.add_get('/sync', sync_handler)
for resource in app.router.resources():
for route in resource:
assert route.handler.__doc__ == 'Doc'
async def test_unauthorized_folder_access(tmp_dir_path, aiohttp_client):
"""
Tests the unauthorized access to a folder of static file server.
Try to list a folder content of static file server when server does not
have permissions to do so for the folder.
"""
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
app = web.Application()
with mock.patch('pathlib.Path.__new__') as path_constructor:
path = MagicMock()
path.joinpath.return_value = path
path.resolve.return_value = path
path.iterdir.return_value.__iter__.side_effect = PermissionError()
path_constructor.return_value = path
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/my_dir')
assert r.status == 403
async def test_access_symlink_loop(tmp_dir_path, aiohttp_client):
"""
Tests the access to a looped symlink, which could not be resolved.
"""
my_dir_path = os.path.join(tmp_dir_path, 'my_symlink')
os.symlink(my_dir_path, my_dir_path)
app = web.Application()
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/my_symlink')
assert r.status == 404
async def test_access_special_resource(tmp_dir_path, aiohttp_client):
"""
Tests the access to a resource that is neither a file nor a directory.
Checks that if a special resource is accessed (f.e. named pipe or UNIX
domain socket) then 404 HTTP status returned.
"""
app = web.Application()
with mock.patch('pathlib.Path.__new__') as path_constructor:
special = MagicMock()
special.is_dir.return_value = False
special.is_file.return_value = False
path = MagicMock()
path.joinpath.side_effect = lambda p: (special if p == 'special'
else path)
path.resolve.return_value = path
special.resolve.return_value = special
path_constructor.return_value = path
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/special')
assert r.status == 404
async def test_partialy_applied_handler(aiohttp_client):
app = web.Application()
async def handler(data, request):
return web.Response(body=data)
with pytest.warns(DeprecationWarning):
app.router.add_route('GET', '/', functools.partial(handler, b'hello'))
client = await aiohttp_client(app)
r = await client.get('/')
data = (await r.read())
assert data == b'hello'
def test_system_route():
route = SystemRoute(web.HTTPCreated(reason='test'))
with pytest.raises(RuntimeError):
route.url_for()
assert route.name is None
assert route.resource is None
assert "<SystemRoute 201: test>" == repr(route)
assert 201 == route.status
assert 'test' == route.reason
async def test_412_is_returned(aiohttp_client):
class MyRouter(abc.AbstractRouter):
async def resolve(self, request):
raise web.HTTPPreconditionFailed()
app = web.Application(router=MyRouter())
client = await aiohttp_client(app)
resp = await client.get('/')
assert resp.status == 412
async def test_allow_head(aiohttp_client):
"""
Test allow_head on routes.
"""
app = web.Application()
async def handler(_):
return web.Response()
app.router.add_get('/a', handler, name='a')
app.router.add_get('/b', handler, allow_head=False, name='b')
client = await aiohttp_client(app)
r = await client.get('/a')
assert r.status == 200
await r.release()
r = await client.head('/a')
assert r.status == 200
await r.release()
r = await client.get('/b')
assert r.status == 200
await r.release()
r = await client.head('/b')
assert r.status == 405
await r.release()
@pytest.mark.parametrize("path", [
'/a',
'/{a}',
])
def test_reuse_last_added_resource(path):
"""
Test that adding a route with the same name and path of the last added
resource doesn't create a new resource.
"""
app = web.Application()
async def handler(request):
return web.Response()
app.router.add_get(path, handler, name="a")
app.router.add_post(path, handler, name="a")
assert len(app.router.resources()) == 1
def test_resource_raw_match():
app = web.Application()
async def handler(request):
return web.Response()
route = app.router.add_get("/a", handler, name="a")
assert route.resource.raw_match("/a")
route = app.router.add_get("/{b}", handler, name="b")
assert route.resource.raw_match("/{b}")
resource = app.router.add_static("/static", ".")
assert not resource.raw_match("/static")
async def test_add_view(aiohttp_client):
app = web.Application()
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app.router.add_view("/a", MyView)
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
async def test_decorate_view(aiohttp_client):
routes = web.RouteTableDef()
@routes.view("/a")
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app = web.Application()
app.router.add_routes(routes)
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
async def test_web_view(aiohttp_client):
app = web.Application()
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app.router.add_routes([
web.view("/a", MyView)
])
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
| 27.514894 | 79 | 0.634009 | 578 | 0.044695 | 0 | 0 | 4,472 | 0.345809 | 9,990 | 0.772502 | 3,223 | 0.249227 |
1678ba6ffacdb3dc2a1730ee864aab5b2813d801 | 13,683 | py | Python | R-GMM-VGAE/model_citeseer.py | nairouz/R-GAE | acc7bfe36153a4c7d6f68e21a557bb4d99dab639 | [
"MIT"
] | 26 | 2021-07-18T01:31:48.000Z | 2022-03-31T03:23:11.000Z | R-GMM-VGAE/model_citeseer.py | Fawzidev/R-GAE | 80988ddf951f1723091a04b617ce4fc6d20ab9ce | [
"MIT"
] | 3 | 2021-10-01T07:24:42.000Z | 2021-11-03T14:25:55.000Z | R-GMM-VGAE/model_citeseer.py | Fawzidev/R-GAE | 80988ddf951f1723091a04b617ce4fc6d20ab9ce | [
"MIT"
] | 7 | 2021-07-18T01:47:01.000Z | 2022-01-24T21:09:10.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Authors : Nairouz Mrabah ([email protected]) & Mohamed Fawzi Touati ([email protected])
# @Paper : Rethinking Graph Autoencoder Models for Attributed Graph Clustering
# @License : MIT License
import torch
import numpy as np
import torch.nn as nn
import scipy.sparse as sp
import torch.nn.functional as F
from tqdm import tqdm
from torch.optim import Adam
from sklearn.mixture import GaussianMixture
from torch.optim.lr_scheduler import StepLR
from preprocessing import sparse_to_tuple
from sklearn.neighbors import NearestNeighbors
from sklearn import metrics
from munkres import Munkres
def random_uniform_init(input_dim, output_dim):
init_range = np.sqrt(6.0 / (input_dim + output_dim))
initial = torch.rand(input_dim, output_dim)*2*init_range - init_range
return nn.Parameter(initial)
def q_mat(X, centers, alpha=1.0):
X = X.detach().numpy()
centers = centers.detach().numpy()
if X.size == 0:
q = np.array([])
else:
q = 1.0 / (1.0 + (np.sum(np.square(np.expand_dims(X, 1) - centers), axis=2) / alpha))
q = q ** ((alpha + 1.0) / 2.0)
q = np.transpose(np.transpose(q) / np.sum(q, axis=1))
return q
def generate_unconflicted_data_index(emb, centers_emb, beta1, beta2):
unconf_indices = []
conf_indices = []
q = q_mat(emb, centers_emb, alpha=1.0)
confidence1 = q.max(1)
confidence2 = np.zeros((q.shape[0],))
a = np.argsort(q, axis=1)
for i in range(q.shape[0]):
confidence1[i] = q[i,a[i,-1]]
confidence2[i] = q[i,a[i,-2]]
if (confidence1[i]) > beta1 and (confidence1[i] - confidence2[i]) > beta2:
unconf_indices.append(i)
else:
conf_indices.append(i)
unconf_indices = np.asarray(unconf_indices, dtype=int)
conf_indices = np.asarray(conf_indices, dtype=int)
return unconf_indices, conf_indices
class clustering_metrics():
def __init__(self, true_label, predict_label):
self.true_label = true_label
self.pred_label = predict_label
def clusteringAcc(self):
# best mapping between true_label and predict label
l1 = list(set(self.true_label))
numclass1 = len(l1)
l2 = list(set(self.pred_label))
numclass2 = len(l2)
if numclass1 != numclass2:
print('Class Not equal, Error!!!!')
return 0
cost = np.zeros((numclass1, numclass2), dtype=int)
for i, c1 in enumerate(l1):
mps = [i1 for i1, e1 in enumerate(self.true_label) if e1 == c1]
for j, c2 in enumerate(l2):
mps_d = [i1 for i1 in mps if self.pred_label[i1] == c2]
cost[i][j] = len(mps_d)
# match two clustering results by Munkres algorithm
m = Munkres()
cost = cost.__neg__().tolist()
indexes = m.compute(cost)
# get the match results
new_predict = np.zeros(len(self.pred_label))
for i, c in enumerate(l1):
# correponding label in l2:
c2 = l2[indexes[i][1]]
# ai is the index with label==c2 in the pred_label list
ai = [ind for ind, elm in enumerate(self.pred_label) if elm == c2]
new_predict[ai] = c
acc = metrics.accuracy_score(self.true_label, new_predict)
f1_macro = metrics.f1_score(self.true_label, new_predict, average='macro')
precision_macro = metrics.precision_score(self.true_label, new_predict, average='macro')
recall_macro = metrics.recall_score(self.true_label, new_predict, average='macro')
f1_micro = metrics.f1_score(self.true_label, new_predict, average='micro')
precision_micro = metrics.precision_score(self.true_label, new_predict, average='micro')
recall_micro = metrics.recall_score(self.true_label, new_predict, average='micro')
return acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro
def evaluationClusterModelFromLabel(self):
nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label)
adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label)
acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro = self.clusteringAcc()
print('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore))
fh = open('recoder.txt', 'a')
fh.write('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore) )
fh.write('\r\n')
fh.flush()
fh.close()
return acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro
class GraphConvSparse(nn.Module):
def __init__(self, input_dim, output_dim, activation = F.relu, **kwargs):
super(GraphConvSparse, self).__init__(**kwargs)
self.weight = random_uniform_init(input_dim, output_dim)
self.activation = activation
def forward(self, inputs, adj):
x = inputs
x = torch.mm(x,self.weight)
x = torch.mm(adj, x)
outputs = self.activation(x)
return outputs
class ReGMM_VGAE(nn.Module):
def __init__(self, **kwargs):
super(ReGMM_VGAE, self).__init__()
self.num_neurons = kwargs['num_neurons']
self.num_features = kwargs['num_features']
self.embedding_size = kwargs['embedding_size']
self.nClusters = kwargs['nClusters']
# VGAE training parameters
self.base_gcn = GraphConvSparse( self.num_features, self.num_neurons)
self.gcn_mean = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x)
self.gcn_logstddev = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x)
# GMM training parameters
self.pi = nn.Parameter(torch.ones(self.nClusters)/self.nClusters, requires_grad=True)
self.mu_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True)
self.log_sigma2_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True)
def pretrain(self, adj, features, adj_label, y, weight_tensor, norm, epochs, lr, save_path, dataset):
opti = Adam(self.parameters(), lr=lr)
epoch_bar = tqdm(range(epochs))
gmm = GaussianMixture(n_components = self.nClusters , covariance_type = 'diag')
for _ in epoch_bar:
opti.zero_grad()
_,_, z = self.encode(features, adj)
x_ = self.decode(z)
loss = norm*F.binary_cross_entropy(x_.view(-1), adj_label.to_dense().view(-1), weight = weight_tensor)
loss.backward()
opti.step()
gmm.fit_predict(z.detach().numpy())
self.pi.data = torch.from_numpy(gmm.weights_)
self.mu_c.data = torch.from_numpy(gmm.means_)
self.log_sigma2_c.data = torch.log(torch.from_numpy(gmm.covariances_))
self.logstd = self.mean
def ELBO_Loss(self, features, adj, x_, adj_label, weight_tensor, norm, z_mu, z_sigma2_log, emb, L=1):
pi = self.pi
mu_c = self.mu_c
log_sigma2_c = self.log_sigma2_c
det = 1e-2
Loss = 1e-2 * norm * F.binary_cross_entropy(x_.view(-1), adj_label, weight = weight_tensor)
Loss = Loss * features.size(0)
yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det
yita_c = yita_c / (yita_c.sum(1).view(-1,1))
KL1 = 0.5 * torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+
torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+
(z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1))
Loss1 = KL1
KL2= torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1))
Loss1 -= KL2
return Loss, Loss1, Loss+Loss1
def generate_centers(self, emb_unconf):
y_pred = self.predict(emb_unconf)
nn = NearestNeighbors(n_neighbors= 1, algorithm='ball_tree').fit(emb_unconf.detach().numpy())
_, indices = nn.kneighbors(self.mu_c.detach().numpy())
return indices[y_pred]
def update_graph(self, adj, labels, emb, unconf_indices, conf_indices):
k = 0
y_pred = self.predict(emb)
emb_unconf = emb[unconf_indices]
adj = adj.tolil()
idx = unconf_indices[self.generate_centers(emb_unconf)]
for i, k in enumerate(unconf_indices):
adj_k = adj[k].tocsr().indices
if not(np.isin(idx[i], adj_k)) and (y_pred[k] == y_pred[idx[i]]) :
adj[k, idx[i]] = 1
for j in adj_k:
if np.isin(j, unconf_indices) and (np.isin(idx[i], adj_k)) and (y_pred[k] != y_pred[j]):
adj[k, j] = 0
adj = adj.tocsr()
adj_label = adj + sp.eye(adj.shape[0])
adj_label = sparse_to_tuple(adj_label)
adj_label = torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T),
torch.FloatTensor(adj_label[1]),
torch.Size(adj_label[2]))
weight_mask = adj_label.to_dense().view(-1) == 1
weight_tensor = torch.ones(weight_mask.size(0))
pos_weight_orig = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()
weight_tensor[weight_mask] = pos_weight_orig
return adj, adj_label, weight_tensor
def train(self, adj_norm, adj, features, y, norm, epochs, lr, beta1, beta2, save_path, dataset):
self.load_state_dict(torch.load(save_path + dataset + '/pretrain/model.pk'))
opti = Adam(self.parameters(), lr=lr, weight_decay = 0.089)
lr_s = StepLR(opti, step_size=10, gamma=0.9)
import os, csv
epoch_bar = tqdm(range(epochs))
previous_unconflicted = []
previous_conflicted = []
epoch_stable = 0
for epoch in epoch_bar:
opti.zero_grad()
z_mu, z_sigma2_log, emb = self.encode(features, adj_norm)
x_ = self.decode(emb)
unconflicted_ind, conflicted_ind = generate_unconflicted_data_index(emb, self.mu_c, beta1, beta2)
if epoch == 0:
adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind)
if len(previous_unconflicted) < len(unconflicted_ind) :
z_mu = z_mu[unconflicted_ind]
z_sigma2_log = z_sigma2_log[unconflicted_ind]
emb_unconf = emb[unconflicted_ind]
emb_conf = emb[conflicted_ind]
previous_conflicted = conflicted_ind
previous_unconflicted = unconflicted_ind
else :
epoch_stable += 1
z_mu = z_mu[previous_unconflicted]
z_sigma2_log = z_sigma2_log[previous_unconflicted]
emb_unconf = emb[previous_unconflicted]
emb_conf = emb[previous_conflicted]
if epoch_stable >= 15:
epoch_stable = 0
beta1 = beta1 * 0.96
beta2 = beta2 * 0.98
if epoch % 50 == 0 and epoch <= 200 :
adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind)
loss, loss1, elbo_loss = self.ELBO_Loss(features, adj_norm, x_, adj_label.to_dense().view(-1), weight_tensor, norm, z_mu , z_sigma2_log, emb_unconf)
epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy()))
y_pred = self.predict(emb)
cm = clustering_metrics(y, y_pred)
acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro = cm.evaluationClusterModelFromLabel()
elbo_loss.backward()
opti.step()
lr_s.step()
def gaussian_pdfs_log(self,x,mus,log_sigma2s):
G=[]
for c in range(self.nClusters):
G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1))
return torch.cat(G,1)
def gaussian_pdf_log(self,x,mu,log_sigma2):
c = -0.5 * torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1)
return c
def predict(self, z):
pi = self.pi
log_sigma2_c = self.log_sigma2_c
mu_c = self.mu_c
det = 1e-2
yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det
yita = yita_c.detach().numpy()
return np.argmax(yita, axis=1)
def encode(self, x_features, adj):
hidden = self.base_gcn(x_features, adj)
self.mean = self.gcn_mean(hidden, adj)
self.logstd = self.gcn_logstddev(hidden, adj)
gaussian_noise = torch.randn(x_features.size(0), self.embedding_size)
sampled_z = gaussian_noise * torch.exp(self.logstd) + self.mean
return self.mean, self.logstd ,sampled_z
@staticmethod
def decode(z):
A_pred = torch.sigmoid(torch.matmul(z,z.t()))
return A_pred | 46.699659 | 259 | 0.625448 | 11,731 | 0.857341 | 0 | 0 | 108 | 0.007893 | 0 | 0 | 1,000 | 0.073083 |
16796b947c516147ed6529d69a08e17bbd4afe73 | 3,005 | py | Python | odoo-13.0/addons/stock_account/models/account_chart_template.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/stock_account/models/account_chart_template.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/stock_account/models/account_chart_template.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
import logging
_logger = logging.getLogger(__name__)
class AccountChartTemplate(models.Model):
_inherit = "account.chart.template"
@api.model
def generate_journals(self, acc_template_ref, company, journals_dict=None):
journal_to_add = [{'name': _('Inventory Valuation'), 'type': 'general', 'code': 'STJ', 'favorite': False, 'sequence': 8}]
return super(AccountChartTemplate, self).generate_journals(acc_template_ref=acc_template_ref, company=company, journals_dict=journal_to_add)
def generate_properties(self, acc_template_ref, company, property_list=None):
res = super(AccountChartTemplate, self).generate_properties(acc_template_ref=acc_template_ref, company=company)
PropertyObj = self.env['ir.property'] # Property Stock Journal
value = self.env['account.journal'].search([('company_id', '=', company.id), ('code', '=', 'STJ'), ('type', '=', 'general')], limit=1)
if value:
field = self.env['ir.model.fields'].search([('name', '=', 'property_stock_journal'), ('model', '=', 'product.category'), ('relation', '=', 'account.journal')], limit=1)
vals = {
'name': 'property_stock_journal',
'company_id': company.id,
'fields_id': field.id,
'value': 'account.journal,%s' % value.id,
}
properties = PropertyObj.search([('name', '=', 'property_stock_journal'), ('company_id', '=', company.id)])
if properties:
# the property exist: modify it
properties.write(vals)
else:
# create the property
PropertyObj.create(vals)
todo_list = [ # Property Stock Accounts
'property_stock_account_input_categ_id',
'property_stock_account_output_categ_id',
'property_stock_valuation_account_id',
]
for record in todo_list:
account = getattr(self, record)
value = account and 'account.account,' + str(acc_template_ref[account.id]) or False
if value:
field = self.env['ir.model.fields'].search([('name', '=', record), ('model', '=', 'product.category'), ('relation', '=', 'account.account')], limit=1)
vals = {
'name': record,
'company_id': company.id,
'fields_id': field.id,
'value': value,
}
properties = PropertyObj.search([('name', '=', record), ('company_id', '=', company.id)], limit=1)
if not properties:
# create the property
PropertyObj.create(vals)
elif not properties.value_reference:
# update the property if False
properties.write(vals)
return res
| 47.698413 | 180 | 0.577704 | 2,816 | 0.937105 | 0 | 0 | 369 | 0.122795 | 0 | 0 | 937 | 0.311814 |
167a0dd80799c1a419238ba6164d01472b85e5d4 | 6,094 | py | Python | lib/roi_data/loader.py | BarneyQiao/pcl.pytorch | 4e0280e5e1470f705e620eda26f881d627c5016c | [
"MIT"
] | 233 | 2019-05-10T07:17:42.000Z | 2022-03-30T09:24:16.000Z | lib/roi_data/loader.py | Michael-Steven/Crack_Image_WSOD | 4e8591a7c0768cee9eb7240bb9debd54824f5b33 | [
"MIT"
] | 78 | 2019-05-10T21:10:47.000Z | 2022-03-29T13:57:32.000Z | lib/roi_data/loader.py | Michael-Steven/Crack_Image_WSOD | 4e8591a7c0768cee9eb7240bb9debd54824f5b33 | [
"MIT"
] | 57 | 2019-05-10T07:17:37.000Z | 2022-03-24T04:43:24.000Z | import math
import numpy as np
import numpy.random as npr
import torch
import torch.utils.data as data
import torch.utils.data.sampler as torch_sampler
from torch.utils.data.dataloader import default_collate
from torch._six import int_classes as _int_classes
from core.config import cfg
from roi_data.minibatch import get_minibatch
import utils.blob as blob_utils
# from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes
class RoiDataLoader(data.Dataset):
def __init__(self, roidb, num_classes, training=True):
self._roidb = roidb
self._num_classes = num_classes
self.training = training
self.DATA_SIZE = len(self._roidb)
def __getitem__(self, index_tuple):
index, ratio = index_tuple
single_db = [self._roidb[index]]
blobs, valid = get_minibatch(single_db, self._num_classes)
#TODO: Check if minibatch is valid ? If not, abandon it.
# Need to change _worker_loop in torch.utils.data.dataloader.py.
# Squeeze batch dim
# for key in blobs:
# if key != 'roidb':
# blobs[key] = blobs[key].squeeze(axis=0)
blobs['data'] = blobs['data'].squeeze(axis=0)
return blobs
def __len__(self):
return self.DATA_SIZE
def cal_minibatch_ratio(ratio_list):
"""Given the ratio_list, we want to make the RATIO same for each minibatch on each GPU.
Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob`
and 2) cfg.TRAIN.SCALES containing SINGLE scale.
Since all prepared images will have same min side length of cfg.TRAIN.SCALES[0], we can
pad and batch images base on that.
"""
DATA_SIZE = len(ratio_list)
ratio_list_minibatch = np.empty((DATA_SIZE,))
num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers
for i in range(num_minibatch):
left_idx = i * cfg.TRAIN.IMS_PER_BATCH
right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1)
if ratio_list[right_idx] < 1:
# for ratio < 1, we preserve the leftmost in each batch.
target_ratio = ratio_list[left_idx]
elif ratio_list[left_idx] > 1:
# for ratio > 1, we preserve the rightmost in each batch.
target_ratio = ratio_list[right_idx]
else:
# for ratio cross 1, we make it to be 1.
target_ratio = 1
ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio
return ratio_list_minibatch
class MinibatchSampler(torch_sampler.Sampler):
def __init__(self, ratio_list, ratio_index):
self.ratio_list = ratio_list
self.ratio_index = ratio_index
self.num_data = len(ratio_list)
def __iter__(self):
rand_perm = npr.permutation(self.num_data)
ratio_list = self.ratio_list[rand_perm]
ratio_index = self.ratio_index[rand_perm]
# re-calculate minibatch ratio list
ratio_list_minibatch = cal_minibatch_ratio(ratio_list)
return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
def __len__(self):
return self.num_data
class BatchSampler(torch_sampler.BatchSampler):
r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(range(10), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(range(10), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler, batch_size, drop_last):
if not isinstance(sampler, torch_sampler.Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx) # Difference: batch.append(int(idx))
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
def collate_minibatch(list_of_blobs):
"""Stack samples seperately and return a list of minibatches
A batch contains NUM_GPUS minibatches and image size in different minibatch may be different.
Hence, we need to stack smaples from each minibatch seperately.
"""
Batch = {key: [] for key in list_of_blobs[0]}
# Because roidb consists of entries of variable length, it can't be batch into a tensor.
# So we keep roidb in the type of "list of ndarray".
lists = []
for blobs in list_of_blobs:
lists.append({'data' : blobs.pop('data'),
'rois' : blobs.pop('rois'),
'labels' : blobs.pop('labels')})
for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH):
mini_list = lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)]
minibatch = default_collate(mini_list)
for key in minibatch:
Batch[key].append(minibatch[key])
return Batch
| 38.56962 | 97 | 0.639317 | 3,445 | 0.56531 | 314 | 0.051526 | 0 | 0 | 0 | 0 | 2,098 | 0.344273 |
167a8c5cf5187907cc0dbc578ad93057948ece69 | 28,272 | py | Python | venv/Lib/site-packages/sklearn/linear_model/tests/test_least_angle.py | andywu113/fuhe_predict | 7fd816ae83467aa659d420545cd3e25a5e933d5f | [
"MIT"
] | 3 | 2019-06-05T12:11:20.000Z | 2022-01-17T13:53:06.000Z | venv/Lib/site-packages/sklearn/linear_model/tests/test_least_angle.py | kevinten10/Clothing-Classification | 9aac6e339651137179f4e4da36fe7743cf4bdca4 | [
"MIT"
] | 3 | 2021-06-08T20:58:27.000Z | 2022-03-12T00:16:49.000Z | venv/Lib/site-packages/sklearn/linear_model/tests/test_least_angle.py | kevinten10/Clothing-Classification | 9aac6e339651137179f4e4da36fe7743cf4bdca4 | [
"MIT"
] | 1 | 2019-02-11T22:36:12.000Z | 2019-02-11T22:36:12.000Z | import warnings
from distutils.version import LooseVersion
import numpy as np
import pytest
from scipy import linalg
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC
# TODO: use another dataset that has multiple drops
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
n_samples = y.size
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from io import StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
_, _, coef_path_ = linear_model.lars_path(
X, y, method='lar', verbose=10)
sys.stdout = old_stdout
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert ocur == i + 1
else:
# no more than max_pred variables can go into the active set
assert ocur == X.shape[1]
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
_, _, coef_path_ = linear_model.lars_path(
X, y, Gram=G, method='lar')
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert ocur == i + 1
else:
# no more than max_pred variables can go into the active set
assert ocur == X.shape[1]
def _assert_same_lars_path_result(output1, output2):
assert_equal(len(output1), len(output2))
for o1, o2 in zip(output1, output2):
assert_allclose(o1, o2)
@pytest.mark.parametrize('method', ['lar', 'lasso'])
@pytest.mark.parametrize('return_path', [True, False])
def test_lars_path_gram_equivalent(method, return_path):
_assert_same_lars_path_result(
linear_model.lars_path_gram(
Xy=Xy, Gram=G, n_samples=n_samples, method=method,
return_path=return_path),
linear_model.lars_path(
X, y, Gram=G, method=method,
return_path=return_path))
def test_x_none_gram_none_raises_value_error():
# Test that lars_path with no X and Gram raises exception
Xy = np.dot(X.T, y)
assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None,
Xy=Xy)
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy,
method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
@pytest.mark.filterwarnings('ignore: `rcond` parameter will change')
# numpy deprecation
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * X # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
# Avoid FutureWarning about default value change when numpy >= 1.14
rcond = None if LooseVersion(np.__version__) >= '1.14' else -1
coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
@pytest.mark.filterwarnings('ignore:`rcond` parameter will change')
# numpy deprecation
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
_, _, coef_path_ = linear_model.lars_path(X, y, method='lasso')
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
rng = np.random.RandomState(0)
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert not np.isnan(coef_path_).any()
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = rng.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, _, coef_path_ = linear_model.lars_path(
X, y, method='lar')
alpha_, _, coef = linear_model.lars_path(
X, y, method='lar', return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
alphas_, _, coef_path_ = linear_model.lars_path(
X, y, method='lar', Gram=G)
alpha_, _, coef = linear_model.lars_path(
X, y, method='lar', Gram=G, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, _, coef_path_ = linear_model.lars_path(
X, y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9)
alpha_, _, coef = linear_model.lars_path(
X, y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
@pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22
@pytest.mark.parametrize(
'classifier',
[linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC])
def test_lars_precompute(classifier):
# Check for different values of precompute
G = np.dot(X.T, X)
clf = classifier(precompute=G)
output_1 = ignore_warnings(clf.fit)(X, y).coef_
for precompute in [True, False, 'auto', None]:
clf = classifier(precompute=precompute)
output_2 = clf.fit(X, y).coef_
assert_array_almost_equal(output_1, output_2, decimal=8)
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
_, _, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in (
[[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]]
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd():
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping():
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
# same test, with normalization
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert np.all(np.diff(lasso.alphas_) < 0)
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert np.all(np.isfinite(clf.coef_))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
Y = np.vstack([y, y ** 2]).T
n_targets = Y.shape[1]
estimators = [
linear_model.LassoLars(),
linear_model.Lars(),
# regression test for gh-1615
linear_model.LassoLars(fit_intercept=False),
linear_model.Lars(fit_intercept=False),
]
for estimator in estimators:
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
@pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
assert not hasattr(lars_cv, 'n_nonzero_coefs')
@pytest.mark.filterwarnings('ignore::FutureWarning')
def test_lars_cv_max_iter():
with warnings.catch_warnings(record=True) as w:
rng = np.random.RandomState(42)
x = rng.randn(len(y))
X = diabetes.data
X = np.c_[X, x, x] # add correlated features
lars_cv = linear_model.LassoLarsCV(max_iter=5)
lars_cv.fit(X, y)
assert len(w) == 0
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
@pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
# Once deprecation of LAR + positive option is done use these:
# assert_raises(ValueError, linear_model.lars_path, diabetes['data'],
# diabetes['target'], method='lar', positive=True)
with pytest.warns(DeprecationWarning, match='broken'):
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method='lar',
positive=True)
method = 'lasso'
_, _, coefs = \
linear_model.lars_path(X, y, return_path=True, method=method,
positive=False)
assert coefs.min() < 0
_, _, coefs = \
linear_model.lars_path(X, y, return_path=True, method=method,
positive=True)
assert coefs.min() >= 0
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'LassoLars': {'alpha': 0.1},
'LassoLarsCV': {},
'LassoLarsIC': {}}
@pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'LassoLars': {'alpha': 0.1},
'LassoLarsCV': {},
'LassoLarsIC': {}}
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(X, y)
assert estimator.coef_.min() < 0
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(X, y)
assert min(estimator.coef_) >= 0
def test_lasso_lars_vs_lasso_cd_positive():
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_R_implementation():
# Test that sklearn LassoLars implementation agrees with the LassoLars
# implementation available in R (lars library) under the following
# scenarios:
# 1) fit_intercept=False and normalize=False
# 2) fit_intercept=True and normalize=True
# Let's generate the data used in the bug report 7778
y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822,
-19.42109366])
x = np.array([[0.47299829, 0, 0, 0, 0],
[0.08239882, 0.85784863, 0, 0, 0],
[0.30114139, -0.07501577, 0.80895216, 0, 0],
[-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0],
[-0.69363927, 0.06754067, 0.18064514, -0.0803561,
0.40427291]])
X = x.T
###########################################################################
# Scenario 1: Let's compare R vs sklearn when fit_intercept=False and
# normalize=False
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE,
# trace=TRUE, normalize=FALSE)
# r = t(model_lasso_lars$beta)
#
r = np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829,
-83.777653739190711, -83.784156932888934,
-84.033390591756657],
[0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0,
0.025219751009936],
[0, -3.577397088285891, -4.702795355871871,
-7.016748621359461, -7.614898471899412, -0.336938391359179,
0, 0, 0.001213370600853, 0.048162321585148],
[0, 0, 0, 2.231558436628169, 2.723267514525966,
2.811549786389614, 2.813766976061531, 2.817462468949557,
2.817368178703816, 2.816221090636795],
[0, 0, -1.218422599914637, -3.457726183014808,
-4.021304522060710, -45.827461592423745,
-47.776608869312305,
-47.911561610746404, -47.914845922736234,
-48.039562334265717]])
model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False,
normalize=False)
model_lasso_lars.fit(X, y)
skl_betas = model_lasso_lars.coef_path_
assert_array_almost_equal(r, skl_betas, decimal=12)
###########################################################################
###########################################################################
# Scenario 2: Let's compare R vs sklearn when fit_intercept=True and
# normalize=True
#
# Note: When normalize is equal to True, R returns the coefficients in
# their original units, that is, they are rescaled back, whereas sklearn
# does not do that, therefore, we need to do this step before comparing
# their results.
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars2 = lars(X, t(y), type="lasso", intercept=TRUE,
# trace=TRUE, normalize=TRUE)
# r2 = t(model_lasso_lars2$beta)
r2 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 8.371887668009453, 19.463768371044026],
[0, 0, 0, 0, 9.901611055290553],
[0, 7.495923132833733, 9.245133544334507,
17.389369207545062, 26.971656815643499],
[0, 0, -1.569380717440311, -5.924804108067312,
-7.996385265061972]])
model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True,
normalize=True)
model_lasso_lars2.fit(X, y)
skl_betas2 = model_lasso_lars2.coef_path_
# Let's rescale back the coefficients returned by sklearn before comparing
# against the R result (read the note above)
temp = X - np.mean(X, axis=0)
normx = np.sqrt(np.sum(temp ** 2, axis=0))
skl_betas2 /= normx[:, np.newaxis]
assert_array_almost_equal(r2, skl_betas2, decimal=12)
###########################################################################
@pytest.mark.parametrize('copy_X', [True, False])
def test_lasso_lars_copyX_behaviour(copy_X):
"""
Test that user input regarding copy_X is not being overridden (it was until
at least version 0.21)
"""
lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False)
rng = np.random.RandomState(0)
X = rng.normal(0, 1, (100, 5))
X_copy = X.copy()
y = X[:, 2]
lasso_lars.fit(X, y)
assert copy_X == np.array_equal(X, X_copy)
@pytest.mark.parametrize('copy_X', [True, False])
def test_lasso_lars_fit_copyX_behaviour(copy_X):
"""
Test that user input to .fit for copy_X overrides default __init__ value
"""
lasso_lars = LassoLarsIC(precompute=False)
rng = np.random.RandomState(0)
X = rng.normal(0, 1, (100, 5))
X_copy = X.copy()
y = X[:, 2]
lasso_lars.fit(X, y, copy_X=copy_X)
assert copy_X == np.array_equal(X, X_copy)
| 38.360923 | 79 | 0.616051 | 0 | 0 | 0 | 0 | 7,165 | 0.253431 | 0 | 0 | 7,536 | 0.266553 |
167b4e3bb5a00625d3f0b289e41e2bc170fabc61 | 3,128 | py | Python | parser.py | FeroxTL/pynginxconfig-new | 71cb78c635930b0a764d3274646d436e8d2f1c4d | [
"MIT"
] | 8 | 2016-03-25T04:22:39.000Z | 2022-02-12T21:46:47.000Z | parser.py | Winnerer/pynginxconfig | 71cb78c635930b0a764d3274646d436e8d2f1c4d | [
"MIT"
] | null | null | null | parser.py | Winnerer/pynginxconfig | 71cb78c635930b0a764d3274646d436e8d2f1c4d | [
"MIT"
] | 3 | 2019-01-26T15:54:54.000Z | 2022-02-12T21:46:47.000Z | #coding: utf8
import copy
import re
from blocks import Block, EmptyBlock, KeyValueOption, Comment, Location
def parse(s, parent_block):
config = copy.copy(s)
pos, brackets_level, param_start = 0, 0, 0
while pos < len(config):
if config[pos] == '#' and brackets_level == 0:
re_sharp_comment = re.search('(?P<offset>[\s\n]*)#(?P<comment>.*)$', config, re.M)
sharp_comment = re_sharp_comment.groupdict()
parent_block.add_comment(Comment(sharp_comment['offset'], sharp_comment['comment']))
config = config[re_sharp_comment.end():]
pos, param_start = 0, 0
continue
if config[pos] == ';' and brackets_level == 0:
re_option = re.search('\s*(?P<param_name>\w+)\s*(?P<param_options>.*?);', config[param_start:], re.S)
if not re_option:
raise Exception('Wrong option')
option = re_option.groupdict()
parent_block[option['param_name']] = KeyValueOption(re.sub('[ \n]+', ' ', option['param_options']))
config = config[re_option.end():]
pos, param_start = 0, 0
continue
if config[pos] == '{':
brackets_level += 1
elif config[pos] == '}':
brackets_level -= 1
if brackets_level == 0 and param_start is not None:
re_block = re.search(
'(?P<param_name>\w+)\s*(?P<param_options>.*)\s*{(\n){0,1}(?P<block>(.|\n)*)}',
config[param_start:pos + 1],
)
block = re_block.groupdict()
if block['param_name'].lower() == 'location':
new_block = Location(block['param_options'])
parent_block.add_location(new_block)
else:
new_block = Block()
parent_block[block['param_name']] = new_block
if block['block']:
parse(block['block'], new_block)
config = config[re_block.end():]
pos, param_start = 0, 0
continue
pos += 1
if brackets_level != 0:
raise Exception('Not closed bracket')
qwe = EmptyBlock()
parse("""#{ asd #qweqeqwe{}
servername qweqweqweqweqwe; # comment {lalalal} #1
server {
listen
8080
tls;
root /data/up1;
location / {
l200;
}
location /qwe{
s 500;
}#123
}#qweqwe""", qwe)
print(qwe.render())
qwe = EmptyBlock()
parse(""" servername wqeqweqwe;
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
}#123123
""", qwe)
print(qwe.render())
| 24.825397 | 113 | 0.545716 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,141 | 0.36477 |
167b69684843eed85973a69dafe6205fbdff9406 | 845 | py | Python | cocos2d/tools/jenkins-scripts/configs/cocos-2dx-develop-win32.py | triompha/EarthWarrior3D | d68a347902fa1ca1282df198860f5fb95f326797 | [
"MIT"
] | null | null | null | cocos2d/tools/jenkins-scripts/configs/cocos-2dx-develop-win32.py | triompha/EarthWarrior3D | d68a347902fa1ca1282df198860f5fb95f326797 | [
"MIT"
] | null | null | null | cocos2d/tools/jenkins-scripts/configs/cocos-2dx-develop-win32.py | triompha/EarthWarrior3D | d68a347902fa1ca1282df198860f5fb95f326797 | [
"MIT"
] | null | null | null | import os
import subprocess
import sys
print 'Build Config:'
print ' Host:win7 x86'
print ' Branch:develop'
print ' Target:win32'
print ' "%VS110COMNTOOLS%..\IDE\devenv.com" "build\cocos2d-win32.vc2012.sln" /Build "Debug|Win32"'
if(os.path.exists('build/cocos2d-win32.vc2012.sln') == False):
node_name = os.environ['NODE_NAME']
source_dir = '../cocos-2dx-develop-base-repo/node/' + node_name
source_dir = source_dir.replace("/", os.sep)
os.system("xcopy " + source_dir + " . /E /Y /H")
os.system('git pull origin develop')
os.system('git submodule update --init --force')
ret = subprocess.call('"%VS110COMNTOOLS%..\IDE\devenv.com" "build\cocos2d-win32.vc2012.sln" /Build "Debug|Win32"', shell=True)
os.system('git clean -xdf -f')
print 'build exit'
print ret
if ret == 0:
exit(0)
else:
exit(1)
| 33.8 | 127 | 0.668639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.539645 |
167cfaccf65c4a217ee921178f5ab5094fc6d8a6 | 241 | py | Python | iris_sdk/models/data/ord/rate_center_search_order.py | NumberAI/python-bandwidth-iris | 0e05f79d68b244812afb97e00fd65b3f46d00aa3 | [
"MIT"
] | 2 | 2020-04-13T13:47:59.000Z | 2022-02-23T20:32:41.000Z | iris_sdk/models/data/ord/rate_center_search_order.py | bandwidthcom/python-bandwidth-iris | dbcb30569631395041b92917252d913166f7d3c9 | [
"MIT"
] | 5 | 2020-09-18T20:59:24.000Z | 2021-08-25T16:51:42.000Z | iris_sdk/models/data/ord/rate_center_search_order.py | bandwidthcom/python-bandwidth-iris | dbcb30569631395041b92917252d913166f7d3c9 | [
"MIT"
] | 5 | 2018-12-12T14:39:50.000Z | 2020-11-17T21:42:29.000Z | #!/usr/bin/env python
from iris_sdk.models.base_resource import BaseData
from iris_sdk.models.maps.ord.rate_center_search_order import \
RateCenterSearchOrderMap
class RateCenterSearchOrder(RateCenterSearchOrderMap, BaseData):
pass | 30.125 | 64 | 0.834025 | 73 | 0.302905 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.087137 |
167df72d7c85276ff20ea4552c3c38a522dba306 | 7,024 | py | Python | optimizer.py | thanusha22/CEC-1 | 02ad9247b006a348cc871a5714cf5abfa4a516af | [
"MIT"
] | null | null | null | optimizer.py | thanusha22/CEC-1 | 02ad9247b006a348cc871a5714cf5abfa4a516af | [
"MIT"
] | null | null | null | optimizer.py | thanusha22/CEC-1 | 02ad9247b006a348cc871a5714cf5abfa4a516af | [
"MIT"
] | null | null | null |
from pathlib import Path
import optimizers.PSO as pso
import optimizers.MVO as mvo
import optimizers.GWO as gwo
import optimizers.MFO as mfo
import optimizers.CS as cs
import optimizers.BAT as bat
import optimizers.WOA as woa
import optimizers.FFA as ffa
import optimizers.SSA as ssa
import optimizers.GA as ga
import optimizers.HHO as hho
import optimizers.SCA as sca
import optimizers.JAYA as jaya
import optimizers.HYBRID as hybrid
import benchmarks
import csv
import numpy
import time
import warnings
import os
import plot_convergence as conv_plot
import plot_boxplot as box_plot
warnings.simplefilter(action="ignore")
def selector(algo, func_details, popSize, Iter):
function_name = func_details[0]
lb = func_details[1]
ub = func_details[2]
dim = func_details[3]
if algo == "SSA":
x = ssa.SSA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "PSO":
x = pso.PSO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "GA":
x = ga.GA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "BAT":
x = bat.BAT(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "FFA":
x = ffa.FFA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "GWO":
x = gwo.GWO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "WOA":
x = woa.WOA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "MVO":
x = mvo.MVO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "MFO":
x = mfo.MFO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "CS":
x = cs.CS(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "HHO":
x = hho.HHO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "SCA":
x = sca.SCA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "JAYA":
x = jaya.JAYA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "HYBRID":
x = hybrid.HYBRID(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
else:
return null
return x
def run(optimizer, objectivefunc, NumOfRuns, params, export_flags):
"""
It serves as the main interface of the framework for running the experiments.
Parameters
----------
optimizer : list
The list of optimizers names
objectivefunc : list
The list of benchmark functions
NumOfRuns : int
The number of independent runs
params : set
The set of parameters which are:
1. Size of population (PopulationSize)
2. The number of iterations (Iterations)
export_flags : set
The set of Boolean flags which are:
1. Export (Exporting the results in a file)
2. Export_details (Exporting the detailed results in files)
3. Export_convergence (Exporting the covergence plots)
4. Export_boxplot (Exporting the box plots)
Returns
-----------
N/A
"""
# Select general parameters for all optimizers (population size, number of iterations) ....
PopulationSize = params["PopulationSize"]
Iterations = params["Iterations"]
# Export results ?
Export = export_flags["Export_avg"]
Export_details = export_flags["Export_details"]
Export_convergence = export_flags["Export_convergence"]
Export_boxplot = export_flags["Export_boxplot"]
Flag = False
Flag_details = False
# CSV Header for for the cinvergence
CnvgHeader = []
results_directory = time.strftime("%Y-%m-%d-%H-%M-%S") + "/"
Path(results_directory).mkdir(parents=True, exist_ok=True)
for l in range(0, Iterations):
CnvgHeader.append("Iter" + str(l + 1))
for i in range(0, len(optimizer)):
for j in range(0, len(objectivefunc)):
convergence = [0] * NumOfRuns
executionTime = [0] * NumOfRuns
for k in range(0, NumOfRuns):
func_details = benchmarks.getFunctionDetails(objectivefunc[j])
x = selector(optimizer[i], func_details, PopulationSize, Iterations)
convergence[k] = x.convergence
optimizerName = x.optimizer
objfname = x.objfname
if Export_details == True:
ExportToFile = results_directory + "experiment_details.csv"
with open(ExportToFile, "a", newline="\n") as out:
writer = csv.writer(out, delimiter=",")
if (
Flag_details == False
): # just one time to write the header of the CSV file
header = numpy.concatenate(
[["Optimizer", "objfname", "ExecutionTime"], CnvgHeader]
)
writer.writerow(header)
Flag_details = True # at least one experiment
executionTime[k] = x.executionTime
a = numpy.concatenate(
[[x.optimizer, x.objfname, x.executionTime], x.convergence]
)
writer.writerow(a)
out.close()
if Export == True:
ExportToFile = results_directory + "experiment.csv"
with open(ExportToFile, "a", newline="\n") as out:
writer = csv.writer(out, delimiter=",")
if (
Flag == False
): # just one time to write the header of the CSV file
header = numpy.concatenate(
[["Optimizer", "objfname", "ExecutionTime"], CnvgHeader]
)
writer.writerow(header)
Flag = True
avgExecutionTime = float("%0.2f" % (sum(executionTime) / NumOfRuns))
avgConvergence = numpy.around(
numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2
).tolist()
a = numpy.concatenate(
[[optimizerName, objfname, avgExecutionTime], avgConvergence]
)
writer.writerow(a)
out.close()
if Export_convergence == True:
conv_plot.run(results_directory, optimizer, objectivefunc, Iterations)
if Export_boxplot == True:
box_plot.run(results_directory, optimizer, objectivefunc, Iterations)
if Flag == False: # Faild to run at least one experiment
print(
"No Optomizer or Cost function is selected. Check lists of available optimizers and cost functions"
)
print("Execution completed")
| 38.173913 | 111 | 0.58955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,569 | 0.223377 |
167e133f17b315eee99f736bb553b46a271cd9cc | 1,614 | py | Python | tests/fields/test_primitive_types.py | slawak/dataclasses-avroschema | 04e69a176b3e72bfa0acd3edbd044ecd161b1a68 | [
"MIT"
] | null | null | null | tests/fields/test_primitive_types.py | slawak/dataclasses-avroschema | 04e69a176b3e72bfa0acd3edbd044ecd161b1a68 | [
"MIT"
] | null | null | null | tests/fields/test_primitive_types.py | slawak/dataclasses-avroschema | 04e69a176b3e72bfa0acd3edbd044ecd161b1a68 | [
"MIT"
] | null | null | null | import dataclasses
import pytest
from dataclasses_avroschema import fields
from . import consts
@pytest.mark.parametrize("primitive_type", fields.PYTHON_INMUTABLE_TYPES)
def test_primitive_types(primitive_type):
name = "a_field"
field = fields.Field(name, primitive_type, dataclasses.MISSING)
avro_type = fields.PYTHON_TYPE_TO_AVRO[primitive_type]
assert {"name": name, "type": avro_type} == field.to_dict()
@pytest.mark.parametrize("primitive_type", fields.PYTHON_INMUTABLE_TYPES)
def test_primitive_types_with_default_value_none(primitive_type):
name = "a_field"
field = fields.Field(name, primitive_type, None)
avro_type = [fields.NULL, fields.PYTHON_TYPE_TO_AVRO[primitive_type]]
assert {"name": name, "type": avro_type, "default": fields.NULL} == field.to_dict()
@pytest.mark.parametrize("primitive_type,default", consts.PRIMITIVE_TYPES_AND_DEFAULTS)
def test_primitive_types_with_default_value(primitive_type, default):
name = "a_field"
field = fields.Field(name, primitive_type, default)
avro_type = [fields.PYTHON_TYPE_TO_AVRO[primitive_type], fields.NULL]
assert {"name": name, "type": avro_type, "default": default} == field.to_dict()
@pytest.mark.parametrize(
"primitive_type,invalid_default", consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS
)
def test_invalid_default_values(primitive_type, invalid_default):
name = "a_field"
field = fields.Field(name, primitive_type, invalid_default)
msg = f"Invalid default type. Default should be {primitive_type}"
with pytest.raises(AssertionError, match=msg):
field.to_dict()
| 34.340426 | 87 | 0.76456 | 0 | 0 | 0 | 0 | 1,503 | 0.931227 | 0 | 0 | 237 | 0.14684 |
167f92f56a42d5741ea4dde46075bf065ebbe3cd | 11,512 | py | Python | Bindings/Python/examples/Moco/examplePredictAndTrack.py | mcx/opensim-core | c109f8cec3a81c732f335cd39752da6ae573b604 | [
"Apache-2.0"
] | 532 | 2015-03-13T18:51:10.000Z | 2022-03-27T08:08:29.000Z | Bindings/Python/examples/Moco/examplePredictAndTrack.py | mcx/opensim-core | c109f8cec3a81c732f335cd39752da6ae573b604 | [
"Apache-2.0"
] | 2,701 | 2015-01-03T21:33:34.000Z | 2022-03-30T07:13:41.000Z | Bindings/Python/examples/Moco/examplePredictAndTrack.py | mcx/opensim-core | c109f8cec3a81c732f335cd39752da6ae573b604 | [
"Apache-2.0"
] | 271 | 2015-02-16T23:25:29.000Z | 2022-03-30T20:12:17.000Z | # -------------------------------------------------------------------------- #
# OpenSim Moco: examplePredictAndTrack.py #
# -------------------------------------------------------------------------- #
# Copyright (c) 2018 Stanford University and the Authors #
# #
# Author(s): Christopher Dembia #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain a #
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
import os
import math
import opensim as osim
"""
This file performs the following problems using a
double pendulum model:
1. predict an optimal trajectory (and controls),
2. track the states from the optimal trajectory, and
3. track the marker trajectories from the optimal trajectory.
"""
visualize = True
# The following environment variable is set during automated testing.
if os.getenv('OPENSIM_USE_VISUALIZER') == '0':
visualize = False
# Create a model of a double pendulum.
# ------------------------------------
def createDoublePendulumModel():
model = osim.Model()
model.setName("double_pendulum")
# Create two links, each with a mass of 1 kg, center of mass at the body's
# origin, and moments and products of inertia of zero.
b0 = osim.Body("b0", 1, osim.Vec3(0), osim.Inertia(1))
model.addBody(b0)
b1 = osim.Body("b1", 1, osim.Vec3(0), osim.Inertia(1))
model.addBody(b1)
# Add markers to body origin locations.
m0 = osim.Marker("m0", b0, osim.Vec3(0))
m1 = osim.Marker("m1", b1, osim.Vec3(0))
model.addMarker(m0)
model.addMarker(m1)
# Connect the bodies with pin joints. Assume each body is 1 m long.
j0 = osim.PinJoint("j0", model.getGround(), osim.Vec3(0), osim.Vec3(0),
b0, osim.Vec3(-1, 0, 0), osim.Vec3(0))
q0 = j0.updCoordinate()
q0.setName("q0")
j1 = osim.PinJoint("j1",
b0, osim.Vec3(0), osim.Vec3(0), b1, osim.Vec3(-1, 0, 0), osim.Vec3(0))
q1 = j1.updCoordinate()
q1.setName("q1")
model.addJoint(j0)
model.addJoint(j1)
tau0 = osim.CoordinateActuator()
tau0.setCoordinate(j0.updCoordinate())
tau0.setName("tau0")
tau0.setOptimalForce(1)
model.addComponent(tau0)
tau1 = osim.CoordinateActuator()
tau1.setCoordinate(j1.updCoordinate())
tau1.setName("tau1")
tau1.setOptimalForce(1)
model.addComponent(tau1)
# Add display geometry.
bodyGeometry = osim.Ellipsoid(0.5, 0.1, 0.1)
transform = osim.Transform(osim.Vec3(-0.5, 0, 0))
b0Center = osim.PhysicalOffsetFrame("b0_center", b0, transform)
b0.addComponent(b0Center)
b0Center.attachGeometry(bodyGeometry.clone())
b1Center = osim.PhysicalOffsetFrame("b1_center", b1, transform)
b1.addComponent(b1Center)
b1Center.attachGeometry(bodyGeometry.clone())
model.finalizeConnections()
model.printToXML("double_pendulum.osim")
return model
def solvePrediction():
# Predict the optimal trajectory for a minimum time swing-up.
# In the diagram below, + represents the origin, and ---o represents a link
# in the double pendulum.
#
# o
# |
# o
# |
# +---o---o +
#
# iniital pose final pose
#
study = osim.MocoStudy()
study.setName("double_pendulum_predict")
problem = study.updProblem()
# Model (dynamics).
problem.setModel(createDoublePendulumModel())
# Bounds.
problem.setTimeBounds(0, [0, 5])
# Arguments are name, [lower bound, upper bound],
# initial [lower bound, upper bound],
# final [lower bound, upper bound].
problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0, 0)
problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0, 0)
problem.setControlInfo("/tau0", [-100, 100])
problem.setControlInfo("/tau1", [-100, 100])
# Cost: minimize final time and error from desired
# end effector position.
ftCost = osim.MocoFinalTimeGoal()
ftCost.setWeight(0.001)
problem.addGoal(ftCost)
finalCost = osim.MocoMarkerFinalGoal()
finalCost.setName("final")
finalCost.setWeight(1000.0)
finalCost.setPointName("/markerset/m1")
finalCost.setReferenceLocation(osim.Vec3(0, 2, 0))
problem.addGoal(finalCost)
# Configure the solver.
solver = study.initTropterSolver()
solver.set_num_mesh_intervals(100)
solver.set_verbosity(2)
solver.set_optim_solver("ipopt")
guess = solver.createGuess()
guess.setNumTimes(2)
guess.setTime([0, 1])
guess.setState("/jointset/j0/q0/value", [0, -math.pi])
guess.setState("/jointset/j1/q1/value", [0, 2*math.pi])
guess.setState("/jointset/j0/q0/speed", [0, 0])
guess.setState("/jointset/j1/q1/speed", [0, 0])
guess.setControl("/tau0", [0, 0])
guess.setControl("/tau1", [0, 0])
guess.resampleWithNumTimes(10)
solver.setGuess(guess)
# Save the problem to a setup file for reference.
study.printToXML("examplePredictAndTrack_predict.omoco")
# Solve the problem.
solution = study.solve()
solution.write("examplePredictAndTrack_predict_solution.sto")
if visualize:
study.visualize(solution)
return solution
def computeMarkersReference(predictedSolution):
model = createDoublePendulumModel()
model.initSystem()
states = predictedSolution.exportToStatesTable()
statesTraj = osim.StatesTrajectory.createFromStatesTable(model, states)
markerTrajectories = osim.TimeSeriesTableVec3()
markerTrajectories.setColumnLabels(["/markerset/m0", "/markerset/m1"])
for state in statesTraj:
model.realizePosition(state)
m0 = model.getComponent("markerset/m0")
m1 = model.getComponent("markerset/m1")
markerTrajectories.appendRow(state.getTime(),
osim.RowVectorVec3([m0.getLocationInGround(state),
m1.getLocationInGround(state)]))
# Assign a weight to each marker.
markerWeights = osim.SetMarkerWeights()
markerWeights.cloneAndAppend(osim.MarkerWeight("/markerset/m0", 1))
markerWeights.cloneAndAppend(osim.MarkerWeight("/markerset/m1", 5))
return osim.MarkersReference(markerTrajectories, markerWeights)
def solveStateTracking(stateRef):
# Predict the optimal trajectory for a minimum time swing-up.
study = osim.MocoStudy()
study.setName("double_pendulum_track")
problem = study.updProblem()
# Model (dynamics).
problem.setModel(createDoublePendulumModel())
# Bounds.
# Arguments are name, [lower bound, upper bound],
# initial [lower bound, upper bound],
# final [lower bound, upper bound].
finalTime = stateRef.getIndependentColumn()[-1]
problem.setTimeBounds(0, finalTime)
problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0)
problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0)
problem.setControlInfo("/tau0", [-150, 150])
problem.setControlInfo("/tau1", [-150, 150])
# Cost: track provided state data.
stateTracking = osim.MocoStateTrackingGoal()
stateTracking.setReference(osim.TableProcessor(stateRef))
problem.addGoal(stateTracking)
effort = osim.MocoControlGoal()
effort.setName("effort")
effort.setWeight(0.001)
# TODO problem.addGoal(effort)
# Configure the solver.
solver = study.initTropterSolver()
solver.set_num_mesh_intervals(50)
solver.set_verbosity(2)
solver.set_optim_solver("ipopt")
solver.set_optim_jacobian_approximation("exact")
solver.set_optim_hessian_approximation("exact")
solver.set_exact_hessian_block_sparsity_mode("dense")
# Save the problem to a setup file for reference.
study.printToXML("examplePredictAndTrack_track_states.omoco")
# Solve the problem.
solution = study.solve()
solution.write("examplePredictAndTrack_track_states_solution.sto")
if visualize:
study.visualize(solution)
return solution
def solveMarkerTracking(markersRef, guess):
# Predict the optimal trajectory for a minimum time swing-up.
study = osim.MocoStudy()
study.setName("double_pendulum_track")
problem = study.updProblem()
# Model (dynamics).
problem.setModel(createDoublePendulumModel())
# Bounds.
# Arguments are name, [lower bound, upper bound],
# initial [lower bound, upper bound],
# final [lower bound, upper bound].
finalTime = markersRef.getMarkerTable().getIndependentColumn()[-1]
problem.setTimeBounds(0, finalTime)
problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0)
problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0)
problem.setControlInfo("/tau0", [-100, 100])
problem.setControlInfo("/tau1", [-100, 100])
# Cost: track provided marker data.
markerTracking = osim.MocoMarkerTrackingGoal()
markerTracking.setMarkersReference(markersRef)
problem.addGoal(markerTracking)
effort = osim.MocoControlGoal()
effort.setName("effort")
effort.setWeight(0.0001)
# problem.addGoal(effort)
# Configure the solver.
solver = study.initTropterSolver()
solver.set_num_mesh_intervals(50)
solver.set_verbosity(2)
solver.set_optim_solver("ipopt")
solver.set_optim_jacobian_approximation("exact")
solver.set_optim_hessian_approximation("exact")
solver.set_exact_hessian_block_sparsity_mode("dense")
solver.setGuess(guess)
# Save the problem to a setup file for reference.
study.printToXML("examplePredictAndTrack_track_markers.omoco")
# Solve the problem.
solution = study.solve()
solution.write("examplePredictAndTrack_track_markers_solution.sto")
if visualize:
study.visualize(solution)
return solution
optimalTrajectory = solvePrediction()
markersRef = computeMarkersReference(optimalTrajectory)
trackedSolution = solveStateTracking(optimalTrajectory.exportToStatesTable())
trackedSolution2 = solveMarkerTracking(markersRef, trackedSolution)
| 34.160237 | 79 | 0.633687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,635 | 0.402623 |
1680693e61459262ca19480a0c2453b04b05a5a0 | 992 | py | Python | StorageSystem.py | aaronFritz2302/ZoomAuto | 41af90dc35104bfea970b6b61694e105a625535c | [
"MIT"
] | null | null | null | StorageSystem.py | aaronFritz2302/ZoomAuto | 41af90dc35104bfea970b6b61694e105a625535c | [
"MIT"
] | null | null | null | StorageSystem.py | aaronFritz2302/ZoomAuto | 41af90dc35104bfea970b6b61694e105a625535c | [
"MIT"
] | null | null | null | import sqlite3
from pandas import DataFrame
conn = sqlite3.connect('./data.db',check_same_thread=False)
class DataBase():
cursor = conn.cursor()
def __init__(self):
self.createTable()
def createTable(self):
'''
Creates A Table If it Doesnt Exist
'''
conn.execute("""CREATE TABLE IF NOT EXISTS MeetingData (Name text,ID text,Password text, DateTime text,Audio text,Video Text)""")
def enterData(self,meetingData):
'''
Enters Data From The UI Table To The DataBase
'''
meetingData.to_sql('MeetingData', con = conn, if_exists='replace', index = False)
def readData(self):
'''
Reads Data From The SQL DataBase
'''
self.cursor.execute('''SELECT * FROM MeetingData''')
retVal = DataFrame(self.cursor.fetchall(),columns=['Name','ID','Password','DateTime','Audio','Video'])
return retVal | 32 | 138 | 0.582661 | 865 | 0.871976 | 0 | 0 | 0 | 0 | 0 | 0 | 412 | 0.415323 |
1680b6fe6e7e3043a7d70ac1ab9bfc138b53e7ea | 5,255 | py | Python | pymapd/_parsers.py | mflaxman10/pymapd | 00b72ae399a0ff829507ee0b3a2b7404f3a06c26 | [
"Apache-2.0"
] | null | null | null | pymapd/_parsers.py | mflaxman10/pymapd | 00b72ae399a0ff829507ee0b3a2b7404f3a06c26 | [
"Apache-2.0"
] | null | null | null | pymapd/_parsers.py | mflaxman10/pymapd | 00b72ae399a0ff829507ee0b3a2b7404f3a06c26 | [
"Apache-2.0"
] | null | null | null | """
Utility methods for parsing data returned from MapD
"""
import datetime
from collections import namedtuple
from sqlalchemy import text
import mapd.ttypes as T
from ._utils import seconds_to_time
Description = namedtuple("Description", ["name", "type_code", "display_size",
"internal_size", "precision", "scale",
"null_ok"])
ColumnDetails = namedtuple("ColumnDetails", ["name", "type", "nullable",
"precision", "scale",
"comp_param"])
_typeattr = {
'SMALLINT': 'int',
'INT': 'int',
'BIGINT': 'int',
'TIME': 'int',
'TIMESTAMP': 'int',
'DATE': 'int',
'BOOL': 'int',
'FLOAT': 'real',
'DECIMAL': 'real',
'DOUBLE': 'real',
'STR': 'str',
}
_thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES
_thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES
def _extract_row_val(desc, val):
# type: (T.TColumnType, T.TDatum) -> Any
typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type]
if val.is_null:
return None
val = getattr(val.val, _typeattr[typename] + '_val')
base = datetime.datetime(1970, 1, 1)
if typename == 'TIMESTAMP':
val = (base + datetime.timedelta(seconds=val))
elif typename == 'DATE':
val = (base + datetime.timedelta(seconds=val)).date()
elif typename == 'TIME':
val = seconds_to_time(val)
return val
def _extract_col_vals(desc, val):
# type: (T.TColumnType, T.TColumn) -> Any
typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type]
nulls = val.nulls
vals = getattr(val.data, _typeattr[typename] + '_col')
vals = [None if null else v
for null, v in zip(nulls, vals)]
base = datetime.datetime(1970, 1, 1)
if typename == 'TIMESTAMP':
vals = [None if v is None else base + datetime.timedelta(seconds=v)
for v in vals]
elif typename == 'DATE':
vals = [None if v is None else (base +
datetime.timedelta(seconds=v)).date()
for v in vals]
elif typename == 'TIME':
vals = [None if v is None else seconds_to_time(v) for v in vals]
return vals
def _extract_description(row_desc):
# type: (List[T.TColumnType]) -> List[Description]
"""
Return a tuple of (name, type_code, display_size, internal_size,
precision, scale, null_ok)
https://www.python.org/dev/peps/pep-0249/#description
"""
return [Description(col.col_name, col.col_type.type,
None, None, None, None,
col.col_type.nullable)
for col in row_desc]
def _extract_column_details(row_desc):
# For Connection.get_table_details
return [
ColumnDetails(x.col_name, _thrift_values_to_types[x.col_type.type],
x.col_type.nullable, x.col_type.precision,
x.col_type.scale, x.col_type.comp_param)
for x in row_desc
]
def _is_columnar(data):
# type: (T.TQueryResult) -> bool
return data.row_set.is_columnar
def _load_schema(buf):
"""
Load a `pyarrow.Schema` from a buffer written to shared memory
Parameters
----------
buf : pyarrow.Buffer
Returns
-------
schema : pyarrow.Schema
"""
import pyarrow as pa
reader = pa.RecordBatchStreamReader(buf)
return reader.schema
def _load_data(buf, schema):
"""
Load a `pandas.DataFrame` from a buffer written to shared memory
Parameters
----------
buf : pyarrow.Buffer
shcema : pyarrow.Schema
Returns
-------
df : pandas.DataFrame
"""
import pyarrow as pa
message = pa.read_message(buf)
rb = pa.read_record_batch(message, schema)
return rb.to_pandas()
def _parse_tdf_gpu(tdf):
"""
Parse the results of a select ipc_gpu into a GpuDataFrame
Parameters
----------
tdf : TDataFrame
Returns
-------
gdf : GpuDataFrame
"""
import numpy as np
from pygdf.gpuarrow import GpuArrowReader
from pygdf.dataframe import DataFrame
from numba import cuda
from numba.cuda.cudadrv import drvapi
from .shm import load_buffer
ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle)
ipch = cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size)
ctx = cuda.current_context()
dptr = ipch.open(ctx)
schema_buffer = load_buffer(tdf.sm_handle, tdf.sm_size)
# TODO: extra copy.
schema_buffer = np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8)
dtype = np.dtype(np.byte)
darr = cuda.devicearray.DeviceNDArray(shape=dptr.size,
strides=dtype.itemsize,
dtype=dtype,
gpu_data=dptr)
reader = GpuArrowReader(schema_buffer, darr)
df = DataFrame()
for k, v in reader.to_dict().items():
df[k] = v
return df
def _bind_parameters(operation, parameters):
return (text(operation)
.bindparams(**parameters)
.compile(compile_kwargs={"literal_binds": True}))
| 27.952128 | 79 | 0.597146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,404 | 0.267174 |
16833799777639519b435db61702159dbc70cb57 | 20,687 | py | Python | featuretools/entityset/entity.py | rohit901/featuretools | 20bee224782acf94909c2bf33239fd5332a8c1de | [
"BSD-3-Clause"
] | 1 | 2021-07-30T16:03:48.000Z | 2021-07-30T16:03:48.000Z | featuretools/entityset/entity.py | rohit901/featuretools | 20bee224782acf94909c2bf33239fd5332a8c1de | [
"BSD-3-Clause"
] | 13 | 2021-03-04T19:29:21.000Z | 2022-01-21T10:49:20.000Z | featuretools/entityset/entity.py | rohit901/featuretools | 20bee224782acf94909c2bf33239fd5332a8c1de | [
"BSD-3-Clause"
] | 2 | 2021-02-09T21:37:48.000Z | 2021-12-22T16:13:27.000Z | import logging
import warnings
import dask.dataframe as dd
import numpy as np
import pandas as pd
from featuretools import variable_types as vtypes
from featuretools.utils.entity_utils import (
col_is_datetime,
convert_all_variable_data,
convert_variable_data,
get_linked_vars,
infer_variable_types
)
from featuretools.utils.gen_utils import import_or_none, is_instance
from featuretools.utils.wrangle import _check_time_type, _dataframes_equal
from featuretools.variable_types import Text, find_variable_types
ks = import_or_none('databricks.koalas')
logger = logging.getLogger('featuretools.entityset')
_numeric_types = vtypes.PandasTypes._pandas_numerics
_categorical_types = [vtypes.PandasTypes._categorical]
_datetime_types = vtypes.PandasTypes._pandas_datetimes
class Entity(object):
"""Represents an entity in a Entityset, and stores relevant metadata and data
An Entity is analogous to a table in a relational database
See Also:
:class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet`
"""
def __init__(self, id, df, entityset, variable_types=None,
index=None, time_index=None, secondary_time_index=None,
last_time_index=None, already_sorted=False, make_index=False,
verbose=False):
""" Create Entity
Args:
id (str): Id of Entity.
df (pd.DataFrame): Dataframe providing the data for the
entity.
entityset (EntitySet): Entityset for this Entity.
variable_types (dict[str -> type/str/dict[str -> type]]) : An entity's
variable_types dict maps string variable ids to types (:class:`.Variable`)
or type_string (str) or (type, kwargs) to pass keyword arguments to the Variable.
index (str): Name of id column in the dataframe.
time_index (str): Name of time column in the dataframe.
secondary_time_index (dict[str -> str]): Dictionary mapping columns
in the dataframe to the time index column they are associated with.
last_time_index (pd.Series): Time index of the last event for each
instance across all child entities.
make_index (bool, optional) : If True, assume index does not exist as a column in
dataframe, and create a new column of that name using integers the (0, len(dataframe)).
Otherwise, assume index exists in dataframe.
"""
_validate_entity_params(id, df, time_index)
created_index, index, df = _create_index(index, make_index, df)
self.id = id
self.entityset = entityset
self.data = {'df': df, 'last_time_index': last_time_index}
self.created_index = created_index
self._verbose = verbose
secondary_time_index = secondary_time_index or {}
self._create_variables(variable_types, index, time_index, secondary_time_index)
self.df = df[[v.id for v in self.variables]]
self.set_index(index)
self.time_index = None
if time_index:
self.set_time_index(time_index, already_sorted=already_sorted)
self.set_secondary_time_index(secondary_time_index)
def __repr__(self):
repr_out = u"Entity: {}\n".format(self.id)
repr_out += u" Variables:"
for v in self.variables:
repr_out += u"\n {} (dtype: {})".format(v.id, v.type_string)
shape = self.shape
repr_out += u"\n Shape:\n (Rows: {}, Columns: {})".format(
shape[0], shape[1])
return repr_out
@property
def shape(self):
'''Shape of the entity's dataframe'''
return self.df.shape
def __eq__(self, other, deep=False):
if self.index != other.index:
return False
if self.time_index != other.time_index:
return False
if self.secondary_time_index != other.secondary_time_index:
return False
if len(self.variables) != len(other.variables):
return False
if set(self.variables) != set(other.variables):
return False
if deep:
if self.last_time_index is None and other.last_time_index is not None:
return False
elif self.last_time_index is not None and other.last_time_index is None:
return False
elif self.last_time_index is not None and other.last_time_index is not None:
if not self.last_time_index.equals(other.last_time_index):
return False
if not _dataframes_equal(self.df, other.df):
return False
variables = {variable: (variable, ) for variable in self.variables}
for variable in other.variables:
variables[variable] += (variable, )
for self_var, other_var in variables.values():
if not self_var.__eq__(other_var, deep=True):
return False
return True
def __sizeof__(self):
return sum([value.__sizeof__() for value in self.data.values()])
@property
def df(self):
'''Dataframe providing the data for the entity.'''
return self.data["df"]
@df.setter
def df(self, _df):
self.data["df"] = _df
@property
def last_time_index(self):
'''
Time index of the last event for each instance across all child entities.
'''
return self.data["last_time_index"]
@last_time_index.setter
def last_time_index(self, lti):
self.data["last_time_index"] = lti
def __hash__(self):
return id(self.id)
def __getitem__(self, variable_id):
return self._get_variable(variable_id)
def _get_variable(self, variable_id):
"""Get variable instance
Args:
variable_id (str) : Id of variable to get.
Returns:
:class:`.Variable` : Instance of variable.
Raises:
RuntimeError : if no variable exist with provided id
"""
for v in self.variables:
if v.id == variable_id:
return v
raise KeyError("Variable: %s not found in entity" % (variable_id))
@property
def variable_types(self):
'''Dictionary mapping variable id's to variable types'''
return {v.id: type(v) for v in self.variables}
def convert_variable_type(self, variable_id, new_type,
convert_data=True,
**kwargs):
"""Convert variable in dataframe to different type
Args:
variable_id (str) : Id of variable to convert.
new_type (subclass of `Variable`) : Type of variable to convert to.
entityset (:class:`.BaseEntitySet`) : EntitySet associated with this entity.
convert_data (bool) : If True, convert underlying data in the EntitySet.
Raises:
RuntimeError : Raises if it cannot convert the underlying data
Examples:
>>> from featuretools.tests.testing_utils import make_ecommerce_entityset
>>> es = make_ecommerce_entityset()
>>> es["customers"].convert_variable_type("engagement_level", vtypes.Categorical)
"""
if convert_data:
# first, convert the underlying data (or at least try to)
self.df = convert_variable_data(df=self.df,
column_id=variable_id,
new_type=new_type,
**kwargs)
# replace the old variable with the new one, maintaining order
variable = self._get_variable(variable_id)
new_variable = new_type.create_from(variable)
self.variables[self.variables.index(variable)] = new_variable
def _create_variables(self, variable_types, index, time_index, secondary_time_index):
"""Extracts the variables from a dataframe
Args:
variable_types (dict[str -> types/str/dict[str -> type]]) : An entity's
variable_types dict maps string variable ids to types (:class:`.Variable`)
or type_strings (str) or (type, kwargs) to pass keyword arguments to the Variable.
index (str): Name of index column
time_index (str or None): Name of time_index column
secondary_time_index (dict[str: [str]]): Dictionary of secondary time columns
that each map to a list of columns that depend on that secondary time
"""
variables = []
variable_types = variable_types.copy() or {}
string_to_class_map = find_variable_types()
# TODO: Remove once Text has been removed from variable types
string_to_class_map[Text.type_string] = Text
for vid in variable_types.copy():
vtype = variable_types[vid]
if isinstance(vtype, str):
if vtype in string_to_class_map:
variable_types[vid] = string_to_class_map[vtype]
else:
variable_types[vid] = string_to_class_map['unknown']
warnings.warn("Variable type {} was unrecognized, Unknown variable type was used instead".format(vtype))
if index not in variable_types:
variable_types[index] = vtypes.Index
link_vars = get_linked_vars(self)
inferred_variable_types = infer_variable_types(self.df,
link_vars,
variable_types,
time_index,
secondary_time_index)
inferred_variable_types.update(variable_types)
for v in inferred_variable_types:
# TODO document how vtype can be tuple
vtype = inferred_variable_types[v]
if isinstance(vtype, tuple):
# vtype is (ft.Variable, dict_of_kwargs)
_v = vtype[0](v, self, **vtype[1])
else:
_v = inferred_variable_types[v](v, self)
variables += [_v]
# convert data once we've inferred
self.df = convert_all_variable_data(df=self.df,
variable_types=inferred_variable_types)
# make sure index is at the beginning
index_variable = [v for v in variables
if v.id == index][0]
self.variables = [index_variable] + [v for v in variables
if v.id != index]
def update_data(self, df, already_sorted=False,
recalculate_last_time_indexes=True):
'''Update entity's internal dataframe, optionaly making sure data is sorted,
reference indexes to other entities are consistent, and last_time_indexes
are consistent.
'''
if len(df.columns) != len(self.variables):
raise ValueError("Updated dataframe contains {} columns, expecting {}".format(len(df.columns),
len(self.variables)))
for v in self.variables:
if v.id not in df.columns:
raise ValueError("Updated dataframe is missing new {} column".format(v.id))
# Make sure column ordering matches variable ordering
self.df = df[[v.id for v in self.variables]]
self.set_index(self.index)
if self.time_index is not None:
self.set_time_index(self.time_index, already_sorted=already_sorted)
self.set_secondary_time_index(self.secondary_time_index)
if recalculate_last_time_indexes and self.last_time_index is not None:
self.entityset.add_last_time_indexes(updated_entities=[self.id])
self.entityset.reset_data_description()
def add_interesting_values(self, max_values=5, verbose=False):
"""
Find interesting values for categorical variables, to be used to
generate "where" clauses
Args:
max_values (int) : Maximum number of values per variable to add.
verbose (bool) : If True, print summary of interesting values found.
Returns:
None
"""
for variable in self.variables:
# some heuristics to find basic 'where'-able variables
if isinstance(variable, vtypes.Discrete):
variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype)
# TODO - consider removing this constraints
# don't add interesting values for entities in relationships
skip = False
for r in self.entityset.relationships:
if variable in [r.child_variable, r.parent_variable]:
skip = True
break
if skip:
continue
counts = self.df[variable.id].value_counts()
# find how many of each unique value there are; sort by count,
# and add interesting values to each variable
total_count = np.sum(counts)
counts[:] = counts.sort_values()[::-1]
for i in range(min(max_values, len(counts.index))):
idx = counts.index[i]
# add the value to interesting_values if it represents more than
# 25% of the values we have not seen so far
if len(counts.index) < 25:
if verbose:
msg = "Variable {}: Marking {} as an "
msg += "interesting value"
logger.info(msg.format(variable.id, idx))
variable.interesting_values = variable.interesting_values.append(pd.Series([idx]))
else:
fraction = counts[idx] / total_count
if fraction > 0.05 and fraction < 0.95:
if verbose:
msg = "Variable {}: Marking {} as an "
msg += "interesting value"
logger.info(msg.format(variable.id, idx))
variable.interesting_values = variable.interesting_values.append(pd.Series([idx]))
# total_count -= counts[idx]
else:
break
self.entityset.reset_data_description()
def delete_variables(self, variable_ids):
"""
Remove variables from entity's dataframe and from
self.variables
Args:
variable_ids (list[str]): Variables to delete
Returns:
None
"""
# check if variable is not a list
if not isinstance(variable_ids, list):
raise TypeError('variable_ids must be a list of variable names')
if len(variable_ids) == 0:
return
self.df = self.df.drop(variable_ids, axis=1)
for v_id in variable_ids:
v = self._get_variable(v_id)
self.variables.remove(v)
def set_time_index(self, variable_id, already_sorted=False):
# check time type
if not isinstance(self.df, pd.DataFrame) or self.df.empty:
time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype]
else:
time_to_check = self.df[variable_id].iloc[0]
time_type = _check_time_type(time_to_check)
if time_type is None:
raise TypeError("%s time index not recognized as numeric or"
" datetime" % (self.id))
if self.entityset.time_type is None:
self.entityset.time_type = time_type
elif self.entityset.time_type != time_type:
raise TypeError("%s time index is %s type which differs from"
" other entityset time indexes" %
(self.id, time_type))
if is_instance(self.df, (dd, ks), 'DataFrame'):
t = time_type # skip checking values
already_sorted = True # skip sorting
else:
t = vtypes.NumericTimeIndex
if col_is_datetime(self.df[variable_id]):
t = vtypes.DatetimeTimeIndex
# use stable sort
if not already_sorted:
# sort by time variable, then by index
self.df = self.df.sort_values([variable_id, self.index])
self.convert_variable_type(variable_id, t, convert_data=False)
self.time_index = variable_id
def set_index(self, variable_id, unique=True):
"""
Args:
variable_id (string) : Name of an existing variable to set as index.
unique (bool) : Whether to assert that the index is unique.
"""
if isinstance(self.df, pd.DataFrame):
self.df = self.df.set_index(self.df[variable_id], drop=False)
self.df.index.name = None
if unique:
assert self.df.index.is_unique, "Index is not unique on dataframe " \
"(Entity {})".format(self.id)
self.convert_variable_type(variable_id, vtypes.Index, convert_data=False)
self.index = variable_id
def set_secondary_time_index(self, secondary_time_index):
for time_index, columns in secondary_time_index.items():
if is_instance(self.df, (dd, ks), 'DataFrame') or self.df.empty:
time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype]
else:
time_to_check = self.df[time_index].head(1).iloc[0]
time_type = _check_time_type(time_to_check)
if time_type is None:
raise TypeError("%s time index not recognized as numeric or"
" datetime" % (self.id))
if self.entityset.time_type != time_type:
raise TypeError("%s time index is %s type which differs from"
" other entityset time indexes" %
(self.id, time_type))
if time_index not in columns:
columns.append(time_index)
self.secondary_time_index = secondary_time_index
def _create_index(index, make_index, df):
'''Handles index creation logic base on user input'''
created_index = None
if index is None:
# Case 1: user wanted to make index but did not specify column name
assert not make_index, "Must specify an index name if make_index is True"
# Case 2: make_index not specified but no index supplied, use first column
warnings.warn(("Using first column as index. "
"To change this, specify the index parameter"))
index = df.columns[0]
elif make_index and index in df.columns:
# Case 3: user wanted to make index but column already exists
raise RuntimeError("Cannot make index: index variable already present")
elif index not in df.columns:
if not make_index:
# Case 4: user names index, it is not in df. does not specify
# make_index. Make new index column and warn
warnings.warn("index {} not found in dataframe, creating new "
"integer column".format(index))
# Case 5: make_index with no errors or warnings
# (Case 4 also uses this code path)
if isinstance(df, dd.DataFrame):
df[index] = 1
df[index] = df[index].cumsum() - 1
elif is_instance(df, ks, 'DataFrame'):
df = df.koalas.attach_id_column('distributed-sequence', index)
else:
df.insert(0, index, range(len(df)))
created_index = index
# Case 6: user specified index, which is already in df. No action needed.
return created_index, index, df
def _validate_entity_params(id, df, time_index):
'''Validation checks for Entity inputs'''
assert isinstance(id, str), "Entity id must be a string"
assert len(df.columns) == len(set(df.columns)), "Duplicate column names"
for c in df.columns:
if not isinstance(c, str):
raise ValueError("All column names must be strings (Column {} "
"is not a string)".format(c))
if time_index is not None and time_index not in df.columns:
raise LookupError('Time index not found in dataframe')
| 42.391393 | 124 | 0.592353 | 17,724 | 0.85677 | 0 | 0 | 736 | 0.035578 | 0 | 0 | 6,999 | 0.338328 |
16848dd03e02c952cce813e4092be02064f38ca9 | 1,470 | py | Python | githubdl/url_helpers.py | wilvk/githubdl | 1dc8c1c0d93a8e4b8155aecf4f5e73e2931ed920 | [
"MIT"
] | 16 | 2018-06-20T00:01:40.000Z | 2022-01-24T08:16:17.000Z | githubdl/url_helpers.py | wilvk/githubdl | 1dc8c1c0d93a8e4b8155aecf4f5e73e2931ed920 | [
"MIT"
] | 12 | 2018-07-18T21:09:37.000Z | 2020-03-28T23:38:13.000Z | githubdl/url_helpers.py | wilvk/githubdl | 1dc8c1c0d93a8e4b8155aecf4f5e73e2931ed920 | [
"MIT"
] | null | null | null | import re
from urllib.parse import urlparse
import logging
def check_url_is_http(repo_url):
predicate = re.compile('^https?://.*$')
match = predicate.search(repo_url)
return False if match is None else True
def check_url_is_ssh(repo_url):
predicate = re.compile(r'^git\@.*\.git$')
match = predicate.search(repo_url)
return False if match is None else True
def get_domain_name_from_http_url(repo_url):
site_object = urlparse(repo_url)
return site_object.netloc
def get_repo_name_from_http_url(repo_url):
site_object = urlparse(repo_url)
parsed_string = re.sub(r'\.git$', '', site_object.path)
if parsed_string[0] == '/':
return parsed_string[1:]
return parsed_string
def get_repo_name_from_ssh_url(repo_url):
predicate = re.compile(r'(?<=\:)(.*)(?=\.)')
match = predicate.search(repo_url)
return match.group()
def get_domain_name_from_ssh_url(repo_url):
predicate = re.compile(r'(?<=\@)(.*)(?=\:)')
match = predicate.search(repo_url)
return match.group()
def validate_protocol_exists(is_ssh, is_http):
if not is_ssh and not is_http:
err_message = "Error: repository url provided is not http(s) or ssh"
logging.critical(err_message)
raise RuntimeError(err_message)
def check_url_protocol(repo_url):
is_ssh = check_url_is_ssh(repo_url)
is_http = check_url_is_http(repo_url)
validate_protocol_exists(is_ssh, is_http)
return (is_ssh, is_http)
| 31.276596 | 76 | 0.706803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.095238 |
168519bcca14cbc5945efcceae792622fe09d3d9 | 25,777 | py | Python | RECOVERED_FILES/root/ez-segway/simulator/ez_lib/cen_scheduler.py | AlsikeE/Ez | 2f84ac1896a5b6d8f467c14d3618274bdcfd2cad | [
"Apache-2.0"
] | null | null | null | RECOVERED_FILES/root/ez-segway/simulator/ez_lib/cen_scheduler.py | AlsikeE/Ez | 2f84ac1896a5b6d8f467c14d3618274bdcfd2cad | [
"Apache-2.0"
] | null | null | null | RECOVERED_FILES/root/ez-segway/simulator/ez_lib/cen_scheduler.py | AlsikeE/Ez | 2f84ac1896a5b6d8f467c14d3618274bdcfd2cad | [
"Apache-2.0"
] | 1 | 2021-05-08T02:23:00.000Z | 2021-05-08T02:23:00.000Z | import itertools
from ez_lib import ez_flow_tool
from collections import defaultdict
from ez_scheduler import EzScheduler
from ez_lib.ez_ob import CenUpdateInfo, UpdateNext
from misc import constants, logger
from domain.message import *
from collections import deque
from misc import global_vars
import time
import eventlet
mulog = logger.getLogger('cen_scheduler', constants.LOG_LEVEL)
class CenCtrlScheduler(EzScheduler):
def __init__(self, switches_, log_):
self.switches = switches_
super(CenCtrlScheduler, self).__init__(0, log_)
self.remaining_vol_of_dependency_loop_on_link = {}
self.received_updated_msg = defaultdict()
self.received_removed_msg = defaultdict()
########## Begin three properties are used for parallel processes ##########
self.no_of_pending_msgs = {}
self.notification_queues = {x: deque([]) for x in self.switches}
self.current_notification_time = {x: -1 for x in self.switches}
self.current_processing_time = {x: -1 for x in self.switches}
########### End three properties are used for parallel processes ###########
self.to_sames = defaultdict(list)
self.encounter_deadlock = False
self.do_segmentation = True
def reset(self):
super(CenCtrlScheduler, self).reset()
self.remaining_vol_of_dependency_loop_on_link = {}
self.received_updated_msg = defaultdict()
self.received_removed_msg = defaultdict()
########## Begin three properties are used for parallel processes ##########
self.no_of_pending_msgs = {}
self.notification_queues = {x: deque([]) for x in self.switches}
self.current_notification_time = {x: -1 for x in self.switches}
self.current_processing_time = {x: -1 for x in self.switches}
########### End three properties are used for parallel processes ###########
self.to_sames = defaultdict(list)
self.encounter_deadlock = False
self.do_segmentation = True
def __str__(self):
return "Centralized Controller"
@staticmethod
def init_logger():
return logger.getLogger("Centralized Controller", constants.LOG_LEVEL)
def create_dependency_graph(self, old_flows, new_flows):
time_start_computing = time.time() * 1000
ez_flow_tool.create_dependency_graph(old_flows, new_flows,
self.links_by_endpoints, self.segments_by_seg_path_id,
self.to_sames, do_segmentation=self.do_segmentation)
self.find_dependency_loop_and_sort_updates(self.links_by_endpoints, self.segments_by_seg_path_id)
self.log.debug(self.links_by_endpoints)
self.log.debug(self.segments_by_seg_path_id)
mulog.info("links by endpoints %s segs_by_segpath_id %s" % (self.links_by_endpoints,self.segments_by_seg_path_id))
# self.log.info("time to compute dependency graph: %s" % str(time() * 1000 - time_start_computing))
def process_coherent(self):
send_to_sames = set()
for key in self.to_sames.keys():
to_same = self.to_sames[key]
for sw in to_same:
send_to_sames.add(sw)
# for sw in send_to_sames:
# msg = NotificationMessage(0, sw, constants.COHERENT_MSG, 0)
# self.send_to_switch(msg, sw)
def compute_required_vol_for_dependency_loop(self, link):
self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] = 0
for add_op in link.to_adds_loop:
self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] \
+= self.segments_by_seg_path_id[add_op.seg_path_id].vol
def find_dependency_loop_and_sort_updates(self, links_by_endpoints, segments_by_seg_path_id):
# pool = eventlet.GreenPool()
mulog.info("start finding dependency loop and sort updates")
mulog.info(links_by_endpoints)
for sw in self.switches:
# pool.spawn_n(self.find_dependency_loop_and_sort_updates_by_sw, sw,
# links_by_endpoints, segments_by_seg_path_id)
self.find_dependency_loop_and_sort_updates_by_sw(sw, links_by_endpoints, segments_by_seg_path_id)
# pool.waitall()
# for link in links_by_endpoints.values():
# ez_flow_tool.compute_scheduling_info_for_a_link(link, links_by_endpoints, segments_by_seg_path_id)
# global_vars.finish_prioritizing_time = time.clock()
def find_dependency_loop_and_sort_updates_by_sw(self, sw, links_by_endpoints, segments_by_seg_path_id):
for link in links_by_endpoints.values():
if link.src == sw:
ez_flow_tool.find_dependency_loop_for_link(link, links_by_endpoints, segments_by_seg_path_id)
for link in links_by_endpoints.values():
if link.src == sw:
self.compute_required_vol_for_dependency_loop(link)
current_time = time.clock()
if global_vars.finish_computation_time < current_time:
global_vars.finish_computation_time = time.clock()
def execute_all_remove_only_updates(self, update_infos):
for l_segment in self.segments_by_seg_path_id.values():
old_sws = set(l_segment.old_seg)
old_sws.add(l_segment.init_sw)
seg_path_id = l_segment.seg_path_id
self.received_removed_msg[seg_path_id] = set()
if l_segment.remove_only:
if not update_infos.has_key(seg_path_id):
update_infos[seg_path_id] = CenUpdateInfo(seg_path_id,
l_segment.flow_src,
l_segment.flow_dst)
for sw in old_sws:
update_infos[seg_path_id].update_nexts[sw] = UpdateNext(l_segment.seg_path_id,
sw, constants.REMOVE_NEXT)
l_segment.update_status = constants.SENT_REMOVING
def update_message_queues(self, update_infos, process_update_info_func):
increased = set()
related_sws = set([])
for key in update_infos.keys():
update_info = update_infos[key]
# self.logger.info("Process update info %s at %d ms from starting" % (update_info, (time() - self.current_start_time)*1000))
assert update_info, CenUpdateInfo
for sw in update_infos[key].update_nexts.keys():
if sw not in increased:
self.current_notification_time[sw] += 1
increased.add(sw)
self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] = 0
#update_next = update_info.update_nexts[sw]
process_update_info_func(sw, update_info)
self.log.debug("add message in processing update_info: %s" % update_info)
self.log.debug("pending messages: %s" % str(self.no_of_pending_msgs))
related_sws.add(sw) #self.datapaths[sw + 1])
return related_sws
def increase_processing_time(self, sw):
self.current_processing_time[sw] += 1
def enque_msg_to_notification_queue(self, sw, msg):
self.notification_queues[sw].append(msg)
self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] += 1
def deque_msg_from_notification_queue(self, sw):
msg = self.notification_queues[sw].popleft()
self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] -= 1
return msg
def has_pending_msg_of_sw(self, sw):
return self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] > 0
# def check_all_capable_for_link(self, link, executable_segments_by_link):
# capable_segments = []
# done_loop = True
# endpoints = (link.src, link.dst)
# total_vol = 0
# for op in link.to_adds_loop:
# l_segment = self.segments_by_seg_path_id[op.seg_path_id]
# if l_segment.update_status == constants.NOTHING:
# done_loop = False
# total_vol += l_segment.vol
#
# def check_and_send_possible_update_by_link(self, update_infos):
# executable_segments_by_link = {}
# executable_link_by_segments = {}
# for link in self.links_by_endpoints.values():
# self.check_all_capable_for_link(link, executable_segments_by_link)
def total_pending_cycle_vol(self, link):
total_vol = 0
for add_op in link.to_adds + link.to_adds_loop + link.to_adds_only:
total_vol += self.segments_by_seg_path_id[add_op.seg_path_id].vol
return total_vol
def check_to_split(self, link, l_segment):
pass
def splittable_vol(self, seg_path_id):
# TODO: Update remaining_vol_of_loop when adding or removing segment
final_split_vol = 0
l_segment = self.segments_by_seg_path_id[seg_path_id]
for endpoints in l_segment.new_link_seg:
link = self.links_by_endpoints[endpoints]
is_add_only = False
for op in link.to_adds_only:
if op.seg_path_id == seg_path_id:
return 0
splittable, split_vol = self.check_to_split(link, l_segment)
if splittable and final_split_vol > split_vol > 0:
final_split_vol = split_vol
self.log.debug("capable %s" % l_segment)
return final_split_vol
def check_and_send_possible_split_updates(self, update_infos):
has_execution = True
while has_execution:
has_execution = False
for l_segment in self.segments_by_seg_path_id.values():
if l_segment.update_status != constants.NOTHING:
continue
seg_path_id = l_segment.seg_path_id
self.log.debug(l_segment)
split_vol = self.splittable_vol(l_segment.seg_path_id)
if split_vol > 0:
if not update_infos.has_key(seg_path_id):
update_infos[seg_path_id] = CenUpdateInfo(seg_path_id,
l_segment.flow_src,
l_segment.flow_dst)
update_info = update_infos[seg_path_id]
update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id,
l_segment.new_seg[0],
constants.UPDATE_NEXT)
for i in range(len(l_segment.new_seg) - 1):
# self.log.debug("send to sw%s" % str(l_segment.new_seg[i]))
next_sw = l_segment.new_seg[i + 1]
update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id,
next_sw,
constants.ADD_NEXT)
self.received_updated_msg[l_segment.seg_path_id] = set()
l_segment.update_status = constants.SENT_ADDING
l_segment.is_splitting = True
for pair in l_segment.new_link_seg:
self.log.info("avail_cap of link %s: %f, "
"give %f to segment %s" % (str(pair),
self.links_by_endpoints[pair].avail_cap,
l_segment.vol,
str(l_segment.seg_path_id)))
self.links_by_endpoints[pair].avail_cap -= split_vol
for u_op in self.links_by_endpoints[pair].to_adds_loop:
if u_op.seg_path_id == l_segment.seg_path_id:
self.remaining_vol_of_dependency_loop_on_link[pair] -= split_vol
count = 0
for l_segment in self.segments_by_seg_path_id.values():
if l_segment.update_status == constants.NOTHING:
count += 1
self.log.debug("number of flows that is not done anything %d" % count)
def check_possible_update_by_links(self, update_infos):
has_execution = True
while has_execution:
has_execution = False
for l_segment in self.segments_by_seg_path_id.values():
if l_segment.update_status != constants.NOTHING:
continue
seg_path_id = l_segment.seg_path_id
self.log.debug(l_segment)
if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock:
if not update_infos.has_key(seg_path_id):
update_infos[seg_path_id] = CenUpdateInfo(seg_path_id,
l_segment.flow_src,
l_segment.flow_dst)
update_info = update_infos[seg_path_id]
update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id,
l_segment.new_seg[0],
constants.UPDATE_NEXT)
for i in range(len(l_segment.new_seg) - 1):
next_sw = l_segment.new_seg[i + 1]
update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id,
next_sw,
constants.ADD_NEXT)
self.received_updated_msg[l_segment.seg_path_id] = set()
l_segment.update_status = constants.SENT_ADDING
for pair in l_segment.new_link_seg:
self.links_by_endpoints[pair].avail_cap -= l_segment.vol
for u_op in self.links_by_endpoints[pair].to_adds_loop:
if u_op.seg_path_id == l_segment.seg_path_id:
self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol
count = 0
for l_segment in self.segments_by_seg_path_id.values():
if l_segment.update_status == constants.NOTHING:
count += 1
self.log.debug("number of flows that is not done anything %d" % count)
def check_and_send_possible_updates(self, update_infos):
has_execution = True
while has_execution:
has_execution = False
for l_segment in self.segments_by_seg_path_id.values():
if l_segment.update_status != constants.NOTHING:
continue
seg_path_id = l_segment.seg_path_id
self.log.debug(l_segment)
mulog.info("chk&send psb_uds for linksegment %s"%l_segment)
if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock:
if not update_infos.has_key(seg_path_id):
update_infos[seg_path_id] = CenUpdateInfo(seg_path_id,
l_segment.flow_src,
l_segment.flow_dst)
update_info = update_infos[seg_path_id]
update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id,
l_segment.new_seg[0],
constants.UPDATE_NEXT)
for i in range(len(l_segment.new_seg) - 1):
next_sw = l_segment.new_seg[i + 1]
update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id,
next_sw,
constants.ADD_NEXT)
self.received_updated_msg[l_segment.seg_path_id] = set()
l_segment.update_status = constants.SENT_ADDING
for pair in l_segment.new_link_seg:
self.links_by_endpoints[pair].avail_cap -= l_segment.vol
for u_op in self.links_by_endpoints[pair].to_adds_loop:
if u_op.seg_path_id == l_segment.seg_path_id:
self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol
count = 0
for l_segment in self.segments_by_seg_path_id.values():
if l_segment.update_status == constants.NOTHING:
count += 1
self.log.debug("number of flows that is not done anything %d" % count)
def check_and_do_next_update(self, msg):
update_infos = defaultdict(CenUpdateInfo)
if not self.received_updated_msg.has_key(msg.seg_path_id):
self.received_updated_msg[msg.seg_path_id] = set()
self.received_updated_msg[msg.seg_path_id].add(msg.src_id)
self.log.debug("handle updated msg %s" % msg)
assert self.segments_by_seg_path_id.has_key(msg.seg_path_id), True
link_segment = self.segments_by_seg_path_id[msg.seg_path_id]
# self.log.info("receive updated msgs for segment %s, new_seg_length = %d"
# % (str(link_segment.seg_path_id), len(link_segment.new_seg)))
if link_segment.update_status == constants.SENT_ADDING \
and len(self.received_updated_msg[msg.seg_path_id]) == \
len(link_segment.new_seg):
self.finish_adding_new_path(link_segment, update_infos)
return update_infos
def finish_adding_new_path(self, link_segment, update_infos):
self.trace.time_using_new_path_by_seg_path_id[link_segment.seg_path_id] = time.time() * 1000
if len(link_segment.old_seg) < 1:
link_segment.update_status = constants.FINISH_ALL
else:
# self.log.info("receive enough updated msgs for segment %s" % str(link_segment.seg_path_id))
link_segment.update_status = constants.FINISH_ADDING
self.release_capacity_send_remove_msg_to_old_segment(update_infos, link_segment)
def remove_segment_and_check_to_update(self, msg):
assert isinstance(msg, NotificationMessage)
update_infos = defaultdict(CenUpdateInfo)
self.log.debug("handle removed msg %s" % msg)
self.received_removed_msg[msg.seg_path_id].add(msg.src_id)
link_segment = self.segments_by_seg_path_id[msg.seg_path_id]
next_idx = 0
if msg.src_id != link_segment.init_sw:
next_idx = link_segment.old_seg.index(msg.src_id) + 1
if next_idx < len(link_segment.old_seg):
dst = link_segment.old_seg[next_idx]
pair = (msg.src_id, dst)
self.links_by_endpoints[pair].avail_cap += link_segment.vol
# self.log.info("avail_cap of link %d->%d: %f, "
# "get from segment %s" % (msg.src_id, dst,
# self.links_by_endpoints[pair].avail_cap,
# str(link_segment.seg_path_id)))
if len(self.received_removed_msg[msg.seg_path_id]) >= len(link_segment.old_seg) - 1:
link_segment.update_status = constants.FINISH_ALL
self.log.debug("finish %s" % str(link_segment.seg_path_id))
self.check_and_send_possible_updates(update_infos)
return update_infos
def check_finish_update(self):
count = 0
finished = True
for link_segment in self.segments_by_seg_path_id.values():
if link_segment.update_status != constants.FINISH_ALL:
update_status = ''
if link_segment.update_status == constants.NOTHING:
count += 1
update_status = "NOTHING"
if link_segment.update_status == constants.SENT_ADDING:
self.log.debug("must receive %d more UPDATED msgs" % (len(link_segment.new_seg)-1))
self.log.debug("received from: %s" % self.received_updated_msg[link_segment.seg_path_id])
update_status = "SENT_ADDING"
elif link_segment.update_status == constants.SENT_REMOVING:
self.log.debug("must receive %d more REMOVED msgs" % (len(link_segment.old_seg)-1))
self.log.debug("received from: %s" % self.received_removed_msg[link_segment.seg_path_id])
update_status = "SENT REMOVING"
elif link_segment.update_status == constants.FINISH_ADDING:
update_status = "FINISH_ADDING"
elif link_segment.update_status == constants.FINISH_REMOVING:
update_status = "FINISH_REMOVING"
self.log.debug("segment %s is not finished! update_status %s." % (str(link_segment.seg_path_id), update_status))
# return False
finished = False
break
has_no_pending_barrier = self.has_not_pending_msg()
if not has_no_pending_barrier:
return constants.ON_GOING
elif not finished:
self.log.debug("number of flows that is not done anything %d" % count)
self.scheduling_mode = constants.CONGESTION_MODE
return constants.ENCOUNTER_DEADLOCK
else:
current_mode = self.scheduling_mode
self.scheduling_mode = constants.NORMAL_MODE
if current_mode == constants.CONGESTION_MODE:
return constants.FINISHED_WITH_DEADLOCK
else:
return constants.FINISHED_WITHOUT_DEADLOCK
def has_not_pending_msg(self):
self.log.debug("pending queue: %s" % str(self.no_of_pending_msgs))
for queue_len in self.no_of_pending_msgs.values():
if queue_len > 0:
return False
return True
def release_capacity_send_remove_msg_to_old_segment(self, update_infos, l_segment):
seg_path_id = l_segment.seg_path_id
if not update_infos.has_key(seg_path_id):
update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src,
l_segment.flow_dst)
pair = (l_segment.init_sw, l_segment.old_seg[0])
self.links_by_endpoints[pair].avail_cap += l_segment.vol
# self.log.info("avail_cap of link %d->%d: %f, "
# "get from segment %s" % (l_segment.init_sw,
# l_segment.old_seg[0],
# self.links_by_endpoints[pair].avail_cap,
# str(l_segment.seg_path_id)))
if len(l_segment.old_seg) > 1:
for i in range(len(l_segment.old_seg) - 1):
# self.log.debug("send to: %s" % l_segment.old_seg[i])
next_sw = l_segment.old_seg[i + 1]
update_infos[seg_path_id].update_nexts[l_segment.old_seg[i]] = UpdateNext(seg_path_id,
next_sw,
constants.REMOVE_NEXT)
self.received_removed_msg[l_segment.seg_path_id] = set()
l_segment.update_status = constants.SENT_REMOVING
else:
l_segment.update_status = constants.FINISH_ALL
def are_all_moving_in_ops_finished(self, link):
for u_op in link.to_adds + link.to_adds_loop:
current_state = self.segments_by_seg_path_id[u_op.seg_path_id].update_status
if current_state == constants.NOTHING \
or current_state == constants.SENT_ADDING:
return False
return True
def is_capable(self, seg_path_id):
# TODO: Update remaining_vol_of_loop when adding or removing segment
l_segment = self.segments_by_seg_path_id[seg_path_id]
for endpoints in l_segment.new_link_seg:
link = self.links_by_endpoints[endpoints]
is_dependency_loop_op = False
for op in link.to_adds_loop:
if op.seg_path_id == seg_path_id:
is_dependency_loop_op = True
break
is_add_only = False
for op in link.to_adds_only:
if op.seg_path_id == seg_path_id:
is_add_only = True
break
if (not is_dependency_loop_op and (link.avail_cap - l_segment.vol
< self.remaining_vol_of_dependency_loop_on_link[endpoints])) \
or (is_dependency_loop_op and link.avail_cap < l_segment.vol)\
or (is_add_only and (not self.are_all_moving_in_ops_finished(link)
or link.avail_cap < l_segment.vol)):
return False
self.log.debug("capable %s" % l_segment)
return True
| 54.039832 | 136 | 0.58141 | 25,386 | 0.984831 | 0 | 0 | 115 | 0.004461 | 0 | 0 | 3,672 | 0.142453 |
16863f0872927e8b824cd132c78fbf22829a951a | 892 | py | Python | src/trackbar.py | clovadev/opencv-python | f9c685f8dc658f630a9742f4dd55663bde03fe7d | [
"MIT"
] | null | null | null | src/trackbar.py | clovadev/opencv-python | f9c685f8dc658f630a9742f4dd55663bde03fe7d | [
"MIT"
] | null | null | null | src/trackbar.py | clovadev/opencv-python | f9c685f8dc658f630a9742f4dd55663bde03fe7d | [
"MIT"
] | null | null | null | import numpy as np
import cv2 as cv
def nothing(x):
pass
# Create a black image, a window
img = np.zeros((300, 512, 3), np.uint8)
cv.namedWindow('image')
# create trackbars for color change
cv.createTrackbar('R', 'image', 0, 255, nothing)
cv.createTrackbar('G', 'image', 0, 255, nothing)
cv.createTrackbar('B', 'image', 0, 255, nothing)
# create switch for ON/OFF functionality
switch = 'OFF/ON'
cv.createTrackbar(switch, 'image', 0, 1, nothing)
while True:
# get current positions of four trackbars
r = cv.getTrackbarPos('R', 'image')
g = cv.getTrackbarPos('G', 'image')
b = cv.getTrackbarPos('B', 'image')
s = cv.getTrackbarPos(switch, 'image')
# 스위치가 꺼져 있으면 흑백, 켜져 있으면 색상
if s == 0:
img[:] = 0
else:
img[:] = [b, g, r]
# 이미지 표시
cv.imshow('image', img)
if cv.waitKey(10) > 0:
break
cv.destroyAllWindows()
| 22.3 | 49 | 0.618834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 325 | 0.346482 |
1687efc3eb23ad09ae90d5260997fa4ec210ea9f | 1,246 | py | Python | aoc_2015/src/day20.py | ambertests/adventofcode | 140ed1d71ed647d30d1e6572964cab1e89dfd105 | [
"MIT"
] | null | null | null | aoc_2015/src/day20.py | ambertests/adventofcode | 140ed1d71ed647d30d1e6572964cab1e89dfd105 | [
"MIT"
] | null | null | null | aoc_2015/src/day20.py | ambertests/adventofcode | 140ed1d71ed647d30d1e6572964cab1e89dfd105 | [
"MIT"
] | null | null | null | from functools import reduce
# https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python
def factors(n):
step = 2 if n%2 else 1
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5)+1, step) if not n % i)))
def solve(target):
house_count = 0
deliveries = {}
complete = set()
pt1 = 0
pt2 = 0
while pt1 == 0 or pt2 == 0:
house_count += 1
gifts1 = 0
gifts2 = 0
elves = factors(house_count)
if pt1 == 0:
gifts1 = sum(elves)*10
if gifts1 >= target:
pt1 = house_count
if pt2 == 0:
working = elves.difference(complete)
for elf in working:
if elf in deliveries:
deliveries[elf] += 1
if deliveries[elf] == 50:
complete.add(elf)
else:
deliveries[elf] = 1
gifts2 = sum(working)*11
if gifts2 >= target:
pt2 = house_count
return pt1, pt2
# takes around 20s
pt1, pt2 = solve(29000000)
print("Part 1:", pt1)
print("Part 2:", pt2)
| 27.688889 | 125 | 0.50321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.129213 |
1688724e3867c7e8e39adb6579cee704e885e634 | 1,604 | py | Python | setup.py | jean/labels | dcb6f40fb4e222068e302202dd5d7d98b4771e4b | [
"MIT"
] | 1 | 2019-11-06T14:08:40.000Z | 2019-11-06T14:08:40.000Z | setup.py | jean/labels | dcb6f40fb4e222068e302202dd5d7d98b4771e4b | [
"MIT"
] | null | null | null | setup.py | jean/labels | dcb6f40fb4e222068e302202dd5d7d98b4771e4b | [
"MIT"
] | null | null | null | import pathlib
import setuptools
def read(*args: str) -> str:
file_path = pathlib.Path(__file__).parent.joinpath(*args)
return file_path.read_text("utf-8")
setuptools.setup(
name="labels",
version="0.3.0.dev0",
author="Raphael Pierzina",
author_email="[email protected]",
maintainer="Raphael Pierzina",
maintainer_email="[email protected]",
license="MIT",
url="https://github.com/hackebrot/labels",
project_urls={
"Repository": "https://github.com/hackebrot/labels",
"Issues": "https://github.com/hackebrot/labels/issues",
},
description="CLI app for managing GitHub labels for Python 3.6 and newer. 📝",
long_description=read("README.md"),
long_description_content_type="text/markdown",
packages=setuptools.find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
zip_safe=False,
python_requires=">=3.6",
install_requires=["click", "requests", "pytoml", "attrs"],
entry_points={"console_scripts": ["labels = labels.cli:labels"]},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Utilities",
],
keywords=["github", "command-line"],
)
| 34.12766 | 81 | 0.640898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 836 | 0.520224 |
168883ce786ac5e2bf642b55446a3bcf835eeaa8 | 275 | py | Python | colab/__init__.py | caseywstark/colab | e05293e45a657eda19d733bf05624a1613a7a9b7 | [
"MIT"
] | 1 | 2015-11-05T11:49:32.000Z | 2015-11-05T11:49:32.000Z | colab/__init__.py | caseywstark/colab | e05293e45a657eda19d733bf05624a1613a7a9b7 | [
"MIT"
] | null | null | null | colab/__init__.py | caseywstark/colab | e05293e45a657eda19d733bf05624a1613a7a9b7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__about__ = """
This project demonstrates a social networking site. It provides profiles,
friends, photos, blogs, tribes, wikis, tweets, bookmarks, swaps,
locations and user-to-user messaging.
In 0.5 this was called "complete_project".
"""
| 27.5 | 74 | 0.705455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.938182 |
1689397a49d0387c8d71492ecee794b05a45ba83 | 862 | py | Python | src/ralph/ui/forms/util.py | quamilek/ralph | bf7231ea096924332b874718b33cd1f43f9c783b | [
"Apache-2.0"
] | null | null | null | src/ralph/ui/forms/util.py | quamilek/ralph | bf7231ea096924332b874718b33cd1f43f9c783b | [
"Apache-2.0"
] | null | null | null | src/ralph/ui/forms/util.py | quamilek/ralph | bf7231ea096924332b874718b33cd1f43f9c783b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from ralph.business.models import Venture, VentureRole
def all_ventures():
yield '', '---------'
for v in Venture.objects.filter(show_in_ralph=True).order_by('path'):
yield (
v.id,
"%s[%s] %s" % (
'\u00A0' * 4 * v.path.count('/'), # u00A0 == 'no-break space'
v.symbol,
v.name,
)
)
def all_roles():
yield '', '---------'
for r in VentureRole.objects.order_by(
'-venture__is_infrastructure', 'venture__name',
'parent__parent__name', 'parent__name', 'name'
):
yield r.id, '{} / {}'.format(r.venture.name, r.full_name)
| 28.733333 | 78 | 0.558005 | 0 | 0 | 627 | 0.727378 | 0 | 0 | 0 | 0 | 199 | 0.230858 |
1689e31b5f0f44d60b97128a67d87b2730238b68 | 28 | py | Python | tests/syntax/missing_in_with_for.py | matan-h/friendly | 3ab0fc6541c837271e8865e247750007acdd18fb | [
"MIT"
] | 287 | 2019-04-08T13:18:29.000Z | 2021-03-14T19:10:21.000Z | tests/syntax/missing_in_with_for.py | matan-h/friendly | 3ab0fc6541c837271e8865e247750007acdd18fb | [
"MIT"
] | 191 | 2019-04-08T14:39:18.000Z | 2021-03-14T22:14:56.000Z | tests/syntax/missing_in_with_for.py | matan-h/friendly | 3ab0fc6541c837271e8865e247750007acdd18fb | [
"MIT"
] | 9 | 2019-04-08T12:54:08.000Z | 2020-11-20T02:26:27.000Z | for x range(4):
print(x)
| 9.333333 | 15 | 0.571429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
168b7cd601c412154d052fac8164eeb139aec911 | 4,769 | py | Python | services/users/manage.py | eventprotocol/event-protocol-webapp | 38ccdc63bc744576ebb3631b7e17cfd4a09216b6 | [
"MIT"
] | null | null | null | services/users/manage.py | eventprotocol/event-protocol-webapp | 38ccdc63bc744576ebb3631b7e17cfd4a09216b6 | [
"MIT"
] | 11 | 2020-09-05T14:16:23.000Z | 2022-03-03T22:33:14.000Z | services/users/manage.py | eventprotocol/event-protocol-webapp | 38ccdc63bc744576ebb3631b7e17cfd4a09216b6 | [
"MIT"
] | null | null | null | """
manage.py for flask application
"""
import unittest
import coverage
import os
from flask.cli import FlaskGroup
from project import create_app, db
from project.api.models import User
# Code coverage
COV = coverage.Coverage(
branch=True,
include='project/*',
omit=[
'project/tests/*',
'project/config.py',
]
)
COV.start()
app = create_app()
cli = FlaskGroup(create_app=create_app)
@cli.command()
def cov():
"""
Runs the unit tests with coverage
"""
tests = unittest.TestLoader().discover('project/tests')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
return 0
return -1
@cli.command()
def recreate_db():
"""
Destroys all db and recreates a new db
"""
db.drop_all()
db.create_all()
db.session.commit()
@cli.command()
def test():
"""
Runs test without code coverage
"""
tests = unittest.TestLoader().discover(
'project/tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
else:
return -1
@cli.command()
def seed_db():
"""
Seeds the database with some initial data
"""
user1 = User(
eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower())
user1.username = "Meeting Room Of The Century"
user1.email = "[email protected]"
user1.city_country = "Singapore, SG"
user1.tags = "Meeting Spaces"
user1.about = '''This is the best meeting space you will ever see'''
user1.seller_detail = '''We sell space'''
user1.buyer_detail = '''We are not buying'''
user2 = User(
eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower())
user2.username = "Makeup Till You Breakup"
user2.email = "[email protected]"
user2.city_country = "Singapore, SG"
user2.tags = "Stylist"
user2.about = '''Reimagine your looks with us'''
user2.seller_detail = '''We are serving looks tonight'''
user2.buyer_detail = '''We are not buying'''
user3 = User(
eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower())
user3.username = "Heart Attack Buffet"
user3.email = "[email protected]"
user3.city_country = "Singapore, SG"
user3.tags = "Buffet"
user3.about = '''Eat till you get a heart attack'''
user3.seller_detail = '''We sell food'''
user3.buyer_detail = '''We are not buying'''
user4 = User(
eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower())
user4.username = "Pleasant Photography"
user4.email = "[email protected]"
user4.city_country = "Singapore, SG"
user4.tags = "Photography"
user4.about = ('We are a group of photographers specialized in wedding'
'photography. '
'We have won numerous awards for our photos. '
'We will capture your '
'memories in ways you cannot imagine.')
user4.seller_detail = '''We sell photos'''
user4.buyer_detail = '''We are not buying'''
user5 = User(
eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower())
user5.username = "Epic Winebar"
user5.email = "[email protected]"
user5.city_country = "Singapore, SG"
user5.tags = "Bar, Restaurant"
user5.about = ('Award winnning winebar with the best selection of alcohol.'
'We serve delicious international cuisine, with fusion'
'dishes inspired from our travels. We are always ready for'
'your craziest events.')
user5.seller_detail = '''We sell wine'''
user5.buyer_detail = '''We are not buying'''
user6 = User(
eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower())
user6.username = "Dancers Who Dance"
user6.email = "[email protected]"
user6.city_country = "Singapore, SG"
user6.tags = "Performer"
user6.about = ('Dancers who dance are people who like to dance alot.'
'Give us music and we will dance for you.')
user6.seller_detail = '''We sell dance'''
user6.buyer_detail = '''We are not buying'''
db.session.add(user1)
db.session.add(user2)
db.session.add(user3)
db.session.add(user4)
db.session.add(user5)
db.session.add(user6)
db.session.commit()
if __name__ == '__main__':
cli()
| 29.621118 | 79 | 0.642063 | 0 | 0 | 0 | 0 | 4,298 | 0.901237 | 0 | 0 | 1,996 | 0.418536 |
168bb7123d253d48e67b56f36bbcad938db24dd7 | 1,750 | py | Python | keras_transformer/keras_transformer/training/custom_callbacks/CustomCheckpointer.py | erelcan/keras-transformer | ae88985dd4f1b5f91737e80c7e9c3157b60b4c4f | [
"Apache-2.0"
] | 3 | 2021-02-14T17:10:59.000Z | 2021-02-14T18:09:17.000Z | keras_transformer/keras_transformer/training/custom_callbacks/CustomCheckpointer.py | erelcan/keras-transformer | ae88985dd4f1b5f91737e80c7e9c3157b60b4c4f | [
"Apache-2.0"
] | null | null | null | keras_transformer/keras_transformer/training/custom_callbacks/CustomCheckpointer.py | erelcan/keras-transformer | ae88985dd4f1b5f91737e80c7e9c3157b60b4c4f | [
"Apache-2.0"
] | null | null | null | import os
from keras.callbacks import ModelCheckpoint
from keras_transformer.training.custom_callbacks.CustomCallbackABC import CustomCallbackABC
from keras_transformer.utils.io_utils import save_to_pickle
class CustomCheckpointer(ModelCheckpoint, CustomCallbackABC):
def __init__(self, workspace_path, artifacts, callbacks, **kwargs):
super().__init__(os.path.join(workspace_path, "model-{epoch:01d}.h5"), **kwargs)
self._workspace_path = workspace_path
self._artifacts = artifacts
self._completed_epoch = 0
self._callbacks = callbacks
def on_epoch_end(self, epoch, logs=None):
super().on_epoch_end(epoch, logs)
self._completed_epoch += 1
self.update_artifacts()
should_save = False
if self.epochs_since_last_save == 0:
if self.save_best_only:
current = logs.get(self.monitor)
if current == self.best:
should_save = True
else:
should_save = True
if should_save:
save_to_pickle(self._artifacts, os.path.join(self._workspace_path, "artifacts-" + str(epoch+1) + ".pkl"))
def update_artifacts(self):
for callback in self._callbacks:
self._artifacts["callbacks"][callback.get_name()] = callback.get_artifacts()
self._artifacts["callbacks"][self.get_name()] = self.get_artifacts()
def get_name(self):
return self.__class__.__name__
def get_artifacts(self):
return {"best_score": self.best, "completed_epoch": self._completed_epoch}
def prepare_from_artifacts(self, artifacts):
self.best = artifacts["best_score"]
self._completed_epoch = artifacts["completed_epoch"]
| 35 | 117 | 0.671429 | 1,540 | 0.88 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.068571 |
168c810ecd449bb3eb263646cbc454470f8c28e4 | 527 | py | Python | train_test_val.py | arashk7/Yolo5_Dataset_Generator | aeba58b51201b8521478c777b40c4d31f0c60be9 | [
"Apache-2.0"
] | null | null | null | train_test_val.py | arashk7/Yolo5_Dataset_Generator | aeba58b51201b8521478c777b40c4d31f0c60be9 | [
"Apache-2.0"
] | null | null | null | train_test_val.py | arashk7/Yolo5_Dataset_Generator | aeba58b51201b8521478c777b40c4d31f0c60be9 | [
"Apache-2.0"
] | null | null | null | import os
import shutil
input_dir = 'E:\Dataset\zhitang\Dataset_Zhitang_Yolo5'
output_dir = 'E:\Dataset\zhitang\Dataset_Zhitang_Yolo5\ZhitangYolo5'
in_img_dir = os.path.join(input_dir, 'Images')
in_label_dir = os.path.join(input_dir, 'Labels')
out_img_dir = os.path.join(output_dir, 'images')
out_label_dir = os.path.join(output_dir, 'labels')
splits = {'train','test','valid'}
files = os.listdir(in_img_dir)
count = len(files)
for f in files:
print(f)
src = os.path.join(input_dir,f)
shutil.copyfile(src, dst)
| 22.913043 | 68 | 0.736243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.282732 |
168cde4a792e9985c473078c1d3e1678761198e7 | 4,873 | py | Python | homeassistant/components/media_player/pjlink.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 4 | 2019-01-10T14:47:54.000Z | 2021-04-22T02:06:27.000Z | homeassistant/components/media_player/pjlink.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 6 | 2021-02-08T21:02:40.000Z | 2022-03-12T00:52:16.000Z | homeassistant/components/media_player/pjlink.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 3 | 2018-08-29T19:26:20.000Z | 2020-01-19T11:58:22.000Z | """
Support for controlling projector via the PJLink protocol.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.pjlink/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pypjlink2==1.2.0']
_LOGGER = logging.getLogger(__name__)
CONF_ENCODING = 'encoding'
DEFAULT_PORT = 4352
DEFAULT_ENCODING = 'utf-8'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
})
SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PJLink platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
encoding = config.get(CONF_ENCODING)
password = config.get(CONF_PASSWORD)
if 'pjlink' not in hass.data:
hass.data['pjlink'] = {}
hass_data = hass.data['pjlink']
device_label = "{}:{}".format(host, port)
if device_label in hass_data:
return
device = PjLinkDevice(host, port, name, encoding, password)
hass_data[device_label] = device
add_entities([device], True)
def format_input_source(input_source_name, input_source_number):
"""Format input source for display in UI."""
return "{} {}".format(input_source_name, input_source_number)
class PjLinkDevice(MediaPlayerDevice):
"""Representation of a PJLink device."""
def __init__(self, host, port, name, encoding, password):
"""Iinitialize the PJLink device."""
self._host = host
self._port = port
self._name = name
self._password = password
self._encoding = encoding
self._muted = False
self._pwstate = STATE_OFF
self._current_source = None
with self.projector() as projector:
if not self._name:
self._name = projector.get_name()
inputs = projector.get_inputs()
self._source_name_mapping = \
{format_input_source(*x): x for x in inputs}
self._source_list = sorted(self._source_name_mapping.keys())
def projector(self):
"""Create PJLink Projector instance."""
from pypjlink import Projector
projector = Projector.from_address(
self._host, self._port, self._encoding)
projector.authenticate(self._password)
return projector
def update(self):
"""Get the latest state from the device."""
with self.projector() as projector:
pwstate = projector.get_power()
if pwstate == 'off':
self._pwstate = STATE_OFF
else:
self._pwstate = STATE_ON
self._muted = projector.get_mute()[1]
self._current_source = \
format_input_source(*projector.get_input())
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._pwstate
@property
def is_volume_muted(self):
"""Return boolean indicating mute status."""
return self._muted
@property
def source(self):
"""Return current input source."""
return self._current_source
@property
def source_list(self):
"""Return all available input sources."""
return self._source_list
@property
def supported_features(self):
"""Return projector supported features."""
return SUPPORT_PJLINK
def turn_off(self):
"""Turn projector off."""
with self.projector() as projector:
projector.set_power('off')
def turn_on(self):
"""Turn projector on."""
with self.projector() as projector:
projector.set_power('on')
def mute_volume(self, mute):
"""Mute (true) of unmute (false) media player."""
with self.projector() as projector:
from pypjlink import MUTE_AUDIO
projector.set_mute(MUTE_AUDIO, mute)
def select_source(self, source):
"""Set the input source."""
source = self._source_name_mapping[source]
with self.projector() as projector:
projector.set_input(*source)
| 31.038217 | 78 | 0.65668 | 2,927 | 0.600657 | 0 | 0 | 678 | 0.139134 | 0 | 0 | 881 | 0.180792 |
168da4e09bd5b50aa5b8cd08e50f215c17b399b2 | 608 | py | Python | leetcode/regex_matching.py | Kaushalya/algo_journal | bcea8afda0dc86b36452378e3bcff9b0f57d6856 | [
"Apache-2.0"
] | null | null | null | leetcode/regex_matching.py | Kaushalya/algo_journal | bcea8afda0dc86b36452378e3bcff9b0f57d6856 | [
"Apache-2.0"
] | null | null | null | leetcode/regex_matching.py | Kaushalya/algo_journal | bcea8afda0dc86b36452378e3bcff9b0f57d6856 | [
"Apache-2.0"
] | null | null | null | # Level: Hard
def isMatch(s: str, p: str) -> bool:
if not p:
return not s
n_s = len(s)
n_p = len(p)
j = 0
i = -1
while i < n_s-1:
i = i+ 1
if j >= n_p:
return False
if p[j] == '*':
while s[i]==s[i-1]:
i += 1
j += 1
if p[j] == '.' or s[i] == p[j]:
j += 1
# continue
elif s[i] != p[j] and j<n_p-1:
j += 2
else:
return False
return True
if __name__ == "__main__":
ss = 'abbbbbc'
p = 'a*'
print(isMatch(ss, p)) | 17.882353 | 39 | 0.361842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.085526 |
168db9c8444379377b3a611c0a5f87f083f3ec4d | 3,217 | py | Python | tests/factories.py | luzik/waliki | b7db696075ceebb5676be61f44e2d806cc472255 | [
"BSD-3-Clause"
] | 324 | 2015-01-02T20:48:33.000Z | 2021-12-11T14:44:34.000Z | tests/factories.py | luzik/waliki | b7db696075ceebb5676be61f44e2d806cc472255 | [
"BSD-3-Clause"
] | 103 | 2015-01-02T03:01:34.000Z | 2020-04-02T19:03:53.000Z | tests/factories.py | luzik/waliki | b7db696075ceebb5676be61f44e2d806cc472255 | [
"BSD-3-Clause"
] | 84 | 2015-01-07T08:53:05.000Z | 2021-01-04T00:26:38.000Z | import factory
from django.contrib.auth.models import User, Group, Permission
from waliki.models import ACLRule, Page, Redirect
class UserFactory(factory.django.DjangoModelFactory):
username = factory.Sequence(lambda n: u'user{0}'.format(n))
password = factory.PostGenerationMethodCall('set_password', 'pass')
email = factory.LazyAttribute(lambda o: '%[email protected]' % o.username)
class Meta:
model = User
@factory.post_generation
def groups(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for group in extracted:
self.groups.add(group)
class GroupFactory(factory.django.DjangoModelFactory):
class Meta:
model = Group
name = factory.Sequence(lambda n: "Group #%s" % n)
@factory.post_generation
def users(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for user in extracted:
self.user_set.add(user)
class ACLRuleFactory(factory.django.DjangoModelFactory):
class Meta:
model = ACLRule
name = factory.Sequence(lambda n: u'Rule {0}'.format(n))
slug = factory.Sequence(lambda n: u'page{0}'.format(n))
@factory.post_generation
def permissions(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for perm in extracted:
if not isinstance(perm, Permission):
perm = Permission.objects.get(content_type__app_label='waliki', codename=perm)
self.permissions.add(perm)
@factory.post_generation
def users(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for user in extracted:
self.users.add(user)
@factory.post_generation
def groups(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for group in extracted:
self.groups.add(group)
class PageFactory(factory.django.DjangoModelFactory):
title = factory.Sequence(lambda n: u'Page {0}'.format(n))
slug = factory.Sequence(lambda n: u'page{0}'.format(n))
@factory.post_generation
def raw(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
self.raw = extracted
class Meta:
model = Page
class RedirectFactory(factory.django.DjangoModelFactory):
old_slug = factory.Sequence(lambda n: u'old-page{0}'.format(n))
new_slug = factory.Sequence(lambda n: u'new-page{0}'.format(n))
class Meta:
model = Redirect
| 28.723214 | 98 | 0.608642 | 3,073 | 0.955238 | 0 | 0 | 1,920 | 0.596829 | 0 | 0 | 512 | 0.159154 |
168dc722af15d363851566ae2eeabcf9ccc50653 | 68,372 | py | Python | nxt_editor/commands.py | dalteocraft/nxt_editor | 18992da7cfa89769568434ec08d787510e09f1c4 | [
"MIT"
] | 131 | 2020-12-03T08:01:26.000Z | 2022-03-07T03:41:37.000Z | nxt_editor/commands.py | dalteocraft/nxt_editor | 18992da7cfa89769568434ec08d787510e09f1c4 | [
"MIT"
] | 127 | 2020-12-07T21:43:02.000Z | 2022-02-17T22:31:14.000Z | nxt_editor/commands.py | dalteocraft/nxt_editor | 18992da7cfa89769568434ec08d787510e09f1c4 | [
"MIT"
] | 17 | 2020-12-08T08:06:44.000Z | 2021-11-18T05:40:11.000Z | # Built-in
import copy
import logging
import time
# External
from Qt.QtWidgets import QUndoCommand
# Internal
from nxt_editor import colors
from nxt_editor import user_dir
from nxt import nxt_path
from nxt.nxt_layer import LAYERS, SAVE_KEY
from nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict,
list_merger)
from nxt import nxt_io
from nxt import GRID_SIZE
import nxt_editor
logger = logging.getLogger(nxt_editor.LOGGER_NAME)
def processing(func):
def wrapper(self):
self.model.processing.emit(True)
func(self)
self.model.processing.emit(False)
return wrapper
class NxtCommand(QUndoCommand):
def __init__(self, model):
super(NxtCommand, self).__init__()
self.model = model
self.model.layer_saved.connect(self.reset_layer_effected)
self._layers_effected_by_me = {}
def _get_effects(self, layer_path):
"""Gets the effected state for a given layer with context to this
command. Since a single command can effect layers in different ways.
:param layer_path: string of layer real path
:return: (bool, bool) | (first_effected_by_undo, first_effected_by_redo)
"""
first_eff_by_undo = False
first_eff_by_redo = False
try:
first_eff_by_undo = self._layers_effected_by_me[layer_path]['undo']
except KeyError:
pass
try:
first_eff_by_redo = self._layers_effected_by_me[layer_path]['redo']
except KeyError:
pass
return first_eff_by_undo, first_eff_by_redo
def reset_layer_effected(self, layer_just_saved):
"""When the model marks a layer as saved we reset the class attr
`_first_effected_by_redo` to False. This makes sure the layer is
properly marked as unsaved even if we undo an action after saving it.
:param layer_just_saved: string of layer real path
:return: None
"""
eff_by_undo, eff_by_redo = self._get_effects(layer_just_saved)
where_were_at = self.model.undo_stack.index()
cur_cmd = self.model.undo_stack.command(max(0, where_were_at - 1))
if cur_cmd is self:
return
if layer_just_saved in self._layers_effected_by_me:
if eff_by_undo:
# This command has already been marked as undo effects the
# layer, meaning the layer has been saved and the undo queue
# was moved to an index before this command and the same
# layer was saved again.
eff_by_redo = True
eff_by_undo = False
else:
# Now the undo of this command effects the layer not the redo
eff_by_redo = False
eff_by_undo = True
self._layers_effected_by_me[layer_just_saved] = {'undo': eff_by_undo,
'redo': eff_by_redo}
def redo_effected_layer(self, layer_path):
"""Adds layer to the model's set of effected (unsaved) layers. If
this command was the first to effect the layer we mark it as such
by setting the class attr `_first_effected_by_redo` to True.
:param layer_path: string of layer real path
:return: None
"""
layer_unsaved = layer_path in self.model.effected_layers
eff_by_undo, eff_by_redo = self._get_effects(layer_path)
if not eff_by_undo and layer_unsaved:
return
if not eff_by_undo:
self._layers_effected_by_me[layer_path] = {'undo': False,
'redo': True}
self.model.effected_layers.add(layer_path)
else:
# Layer was saved and then undo was called, thus this redo has a
# net zero effect on the layer
try:
self.model.effected_layers.remove(layer_path)
except KeyError: # Removed by a save action
pass
def undo_effected_layer(self, layer_path):
"""Removes layer from the model's set of effected (unsaved) layers.
If the layer is not marked as effected in the model we mark it as
effected. This case happens when undo is called after a layer is saved.
:param layer_path: string of layer real path
:return: None
"""
eff_by_undo, eff_by_redo = self._get_effects(layer_path)
layer_saved = layer_path not in self.model.effected_layers
if layer_saved:
eff_by_undo = True
# Set redo to False since now its been saved & the undo effects it
eff_by_redo = False
self.model.effected_layers.add(layer_path)
elif eff_by_redo:
try:
self.model.effected_layers.remove(layer_path)
except KeyError: # Removed by a save action
pass
self._layers_effected_by_me[layer_path] = {'undo': eff_by_undo,
'redo': eff_by_redo}
class AddNode(NxtCommand):
"""Add a node to the graph"""
def __init__(self, name, data, parent_path, pos, model, layer_path):
super(AddNode, self).__init__(model)
self.name = name
self.data = data
self.parent_path = parent_path
self.layer_path = layer_path
self.stage = model.stage
# command data
self.pos = pos or [0.0, 0.0]
self.prev_selection = self.model.selection
# resulting node
self.node_path = None
self.created_node_paths = []
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
dirty_nodes = []
# delete any created nodes
for node_path in self.created_node_paths:
node = layer.lookup(node_path)
if node is not None:
_, dirty = self.stage.delete_node(node, layer,
remove_layer_data=False)
dirty_nodes += dirty
node = layer.lookup(self.node_path)
source_layer = self.stage.get_node_source_layer(node)
if source_layer.layer_idx() > 0:
rm_layer_data = True
else:
rm_layer_data = False
comp_layer = self.model.comp_layer
if node is not None:
# delete node
_, dirty = self.stage.delete_node(node, layer,
comp_layer=comp_layer,
remove_layer_data=rm_layer_data)
dirty_nodes += dirty
dirty_nodes += self.created_node_paths
dirty_nodes += [self.node_path]
self.undo_effected_layer(self.layer_path)
self.model.nodes_changed.emit(tuple(set(dirty_nodes)))
self.model.selection = self.prev_selection
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
self.created_node_paths = []
dirty_nodes = []
nodes, dirty = self.stage.add_node(name=self.name, data=self.data,
parent=self.parent_path,
layer=layer.layer_idx(),
comp_layer=self.model.comp_layer)
dirty_nodes += dirty
self.node_path = layer.get_node_path(nodes[0])
self.model._set_node_pos(node_path=self.node_path, pos=self.pos,
layer=layer)
self.model.nodes_changed.emit(tuple(set(dirty_nodes)))
self.model.selection = [self.node_path]
self.redo_effected_layer(layer.real_path)
self.setText('Added node: {}'.format(self.node_path))
class DeleteNode(NxtCommand):
def __init__(self, node_path, model, layer_path, other_removed_nodes):
"""Delete node from the layer at the layer path and the comp layer.
It is important to note that the other_removed_nodes
list must be shared by other DeleteNode commands in a command macro.
The list will be mutated by the stage as it deletes node, this
behavior is depended upon!
:param node_path: String of node path
:param model: StageModel
:param layer_path: String of layer realpath
:param other_removed_nodes: list of node paths that will be deleted
in this event loop.
"""
super(DeleteNode, self).__init__(model)
self.layer_path = layer_path
self.stage = model.stage
# get undo data
self.prev_selection = self.model.selection
self.prev_starts = []
self.prev_breaks = {}
self.node_path = node_path
self.node_data = {}
self.others = other_removed_nodes
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
comp_layer = self.model.comp_layer
parent = self.node_data['parent']
# We don't want to fix names because we know this node should be
# named what it was named when it was deleted
new_nodes, dirty = self.stage.add_node(name=self.node_data['name'],
data=self.node_data['save_dict'],
parent=parent,
layer=layer.layer_idx(),
comp_layer=comp_layer,
fix_names=False)
if self.node_data['break']:
self.model._add_breakpoint(self.node_path, layer)
self.model._add_breakpoint(self.node_path, self.stage.top_layer)
if self.node_data['start']:
self.model._add_start_node(self.node_path, layer)
# restore layer data
pos = self.node_data.get('pos')
if pos:
self.model.top_layer.positions[self.node_path] = pos
# This might be a bug? We don't touch the top layer in redo...
self.undo_effected_layer(self.stage.top_layer.real_path)
attr_display = self.node_data.get('attr_display')
if attr_display is not None:
self.model._set_attr_display_state(self.node_path, attr_display)
user_dir.breakpoints = self.prev_breaks
ancestor_tuple = self.node_data.get('ancestor_child_order')
if ancestor_tuple:
ancestor_path, ancestor_child_order = ancestor_tuple
ancestor = layer.lookup(ancestor_path)
if ancestor:
setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER,
ancestor_child_order)
self.model.selection = self.prev_selection
# Fixme: Does not account for rebuilding proxy nodes for the dirty nodes
dirty_set = tuple(set(dirty))
self.undo_effected_layer(self.layer_path)
if dirty_set != (self.node_path,):
self.model.update_comp_layer(rebuild=True)
else:
self.model.nodes_changed.emit(dirty_set)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
comp_layer = self.model.comp_layer
self.node_data = {}
self.prev_starts = self.model.get_start_nodes(layer)
self.prev_breaks = user_dir.breakpoints
dirty_nodes = []
node = layer.lookup(self.node_path)
# get node info
parent = getattr(node, INTERNAL_ATTRS.PARENT_PATH)
name = getattr(node, INTERNAL_ATTRS.NAME)
is_break = self.model.get_is_node_breakpoint(self.node_path, layer)
self.node_data = {'parent': parent, 'name': name,
'pos': self.model.get_node_pos(self.node_path),
'break': is_break}
closest_ancestor = layer.ancestors(self.node_path)
if closest_ancestor:
closest_ancestor = closest_ancestor[0]
else:
closest_ancestor = None
closest_ancestor_path = layer.get_node_path(closest_ancestor)
if closest_ancestor_path:
ancestor_child_order = getattr(closest_ancestor,
INTERNAL_ATTRS.CHILD_ORDER)
self.node_data['ancestor_child_order'] = (closest_ancestor_path,
ancestor_child_order[:])
# Attr display data
attr_display = self.model.get_attr_display_state(self.node_path)
if attr_display is not None:
self.node_data['attr_display'] = attr_display
# get layer data
is_start = self.model.get_is_node_start(self.node_path, layer)
self.node_data['start'] = is_start
self.node_data['save_dict'] = get_node_as_dict(node)
if self.node_data['break']:
self.model._remove_breakpoint(self.node_path, layer)
self.model._remove_breakpoint(self.node_path, self.stage.top_layer)
if self.node_data['start']:
self.model._remove_start_node(self.node_path, layer)
node = layer.lookup(self.node_path)
source_layer = self.stage.get_node_source_layer(node)
if source_layer.layer_idx() > 0:
rm_layer_data = True
else:
rm_layer_data = False
for p in self.others[:]:
self.others += comp_layer.get_node_dirties(p)
_, dirty = self.stage.delete_node(node, layer,
comp_layer=comp_layer,
remove_layer_data=rm_layer_data,
other_removed_nodes=self.others)
dirty_nodes += dirty + [self.node_path]
if self.node_path in self.model.selection:
fix_selection = self.model.selection[:]
fix_selection.remove(self.node_path)
self.model.selection = fix_selection
self.model.nodes_changed.emit(tuple(set(dirty_nodes)))
self.redo_effected_layer(layer.real_path)
self.setText("Delete node: {}".format(self.node_path))
class SetNodeAttributeData(NxtCommand):
"""Set attribute value"""
def __init__(self, node_path, attr_name, data, model, layer_path):
super(SetNodeAttributeData, self).__init__(model)
self.node_path = node_path
self.nice_attr_name = attr_name
self.attr_name = attr_name
self.data = data
self.stage = model.stage
self.layer_path = layer_path
self.created_node_paths = []
self.remove_attr = False
self.prev_data = {}
self.recomp = attr_name in INTERNAL_ATTRS.REQUIRES_RECOMP
self.return_value = None
self.prev_selection = model.selection
@processing
def undo(self):
start = time.time()
layer = self.model.lookup_layer(self.layer_path)
self.undo_effected_layer(layer.real_path)
comp = self.model.comp_layer
dirties = [self.node_path]
# delete any created nodes
for node_path in self.created_node_paths:
n = layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, layer=layer, comp_layer=comp,
remove_layer_data=False)
n = layer.lookup(self.node_path)
if n is not None:
if self.remove_attr:
self.stage.delete_node_attr(n, self.attr_name)
dirties += comp.get_node_dirties(self.node_path)
else:
result = self.stage.node_setattr_data(node=n,
attr=self.attr_name,
layer=layer, create=False,
comp_layer=comp,
**self.prev_data)
if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH:
dirties += result
if self.attr_name in INTERNAL_ATTRS.ALL:
dirties += comp.get_node_dirties(self.node_path)
changed_attrs = ()
for dirty in dirties:
attr_path = nxt_path.make_attr_path(dirty, self.attr_name)
changed_attrs += (attr_path,)
if self.recomp:
self.model.update_comp_layer(rebuild=self.recomp)
else:
if (self.remove_attr or self.created_node_paths or
self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH,
INTERNAL_ATTRS.PARENT_PATH)):
self.model.nodes_changed.emit(dirties)
else:
self.model.attrs_changed.emit(changed_attrs)
if not self.recomp:
changed = tuple([self.node_path] + self.created_node_paths)
self.model.nodes_changed.emit(changed)
self.model.selection = self.prev_selection
# undo_debug(self, start)
@processing
def redo(self):
start = time.time()
created_node = False
self.prev_selection = self.model.selection
layer = self.model.lookup_layer(self.layer_path)
self.redo_effected_layer(layer.real_path)
comp = self.model.comp_layer
self.remove_attr = False
self.created_node_paths = []
# get the node
node = layer.lookup(self.node_path)
dirties = [self.node_path]
if node is None:
parent_path = nxt_path.get_parent_path(self.node_path)
name = nxt_path.node_name_from_node_path(self.node_path)
if self.attr_name in INTERNAL_ATTRS.ALL:
self.return_value = INTERNAL_ATTRS.as_save_key(self.attr_name)
attr_data = {self.return_value: self.data.get(META_ATTRS.VALUE)}
else:
attr_data = {nxt_io.SAVE_KEY.ATTRS: {self.attr_name: self.data}}
self.return_value = self.attr_name
_, dirties = self.stage.add_node(name=name, data=attr_data,
parent=parent_path,
layer=layer.layer_idx(),
comp_layer=comp,
fix_names=False)
# Fixme: Targeted parenting would avoid the need for a recomp
if layer.descendants(self.node_path):
self.recomp = True
created_node = True
self.created_node_paths += [self.node_path]
node = layer.lookup(self.node_path)
self.prev_data = self.stage.get_node_attr_data(node, self.attr_name,
layer, quiet=True)
if self.prev_data:
self.prev_data = copy.deepcopy(self.prev_data)
# set attribute value this also adds the attribute if it does not exist
if not self.stage.node_attr_exists(node, self.attr_name):
self.remove_attr = True
if not created_node:
self.return_value = self.stage.node_setattr_data(node,
self.attr_name,
layer=layer,
create=True,
comp_layer=comp,
**self.data)
if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH:
dirties += self.return_value
if self.attr_name in INTERNAL_ATTRS.ALL:
dirties += comp.get_node_dirties(self.node_path)
if self.recomp:
self.model.update_comp_layer(rebuild=self.recomp)
else:
if (self.remove_attr or self.created_node_paths or
self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH,
INTERNAL_ATTRS.PARENT_PATH)):
self.model.nodes_changed.emit(dirties)
else:
changed_attrs = ()
for dirty in dirties:
attr_path = nxt_path.make_attr_path(dirty, self.attr_name)
changed_attrs += (attr_path,)
self.model.attrs_changed.emit(changed_attrs)
attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name)
val = str(self.data.get(META_ATTRS.VALUE))
self.setText("Set {} to {}".format(attr_path, val))
# redo_debug(self, start)
class SetNodeAttributeValue(SetNodeAttributeData):
def __init__(self, node_path, attr_name, value, model, layer_path):
data = {META_ATTRS.VALUE: value}
super(SetNodeAttributeValue, self).__init__(node_path, attr_name, data,
model, layer_path)
class RenameNode(SetNodeAttributeValue):
"""Rename node"""
def __init__(self, node_path, name, model, layer_path):
self.old_node_path = node_path
layer = model.lookup_layer(layer_path)
parent_path = nxt_path.get_parent_path(node_path)
new_name = model.stage.get_unique_node_name(name=name, layer=layer,
parent_path=parent_path,
layer_only=True)
super(RenameNode, self).__init__(node_path, INTERNAL_ATTRS.NAME,
new_name, model, layer_path)
def undo(self):
self.model.about_to_rename.emit()
self.prev_data['force'] = True
super(RenameNode, self).undo()
self.node_path = self.old_node_path
self.model.selection = [self.node_path]
def redo(self):
self.model.about_to_rename.emit()
super(RenameNode, self).redo()
self.node_path = self.return_value
self.model.selection = [self.node_path]
if self.model.get_is_node_start(self.node_path, self.model.comp_layer):
self.model.starts_changed.emit(self.model.get_start_nodes())
self.setText("{} renamed to {}".format(self.old_node_path,
self.return_value))
class DuplicateNodes(NxtCommand):
"""Duplicate nodes on this graph"""
def __init__(self, node_paths, descendants, model, source_layer_path,
target_layer_path):
# TODO: We should make another base command class that can be used to
# set multiple attr's data. That way duplicate can just be a
# setattr. The way it works now we can only set one attr's data at a
# time and duplicate needs to get local + INTERNAL number of attrs.
super(DuplicateNodes, self).__init__(model)
self.node_paths = node_paths
self.descendants = descendants
self.source_layer_path = source_layer_path
self.target_layer_path = target_layer_path
self.stage = model.stage
# get undo data
self.prev_selection = self.model.selection
# resulting nodes
self.new_node_paths = []
@processing
def undo(self):
target_layer = self.model.lookup_layer(self.target_layer_path)
# delete duplicated nodes
for node_path in self.new_node_paths:
n = target_layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, target_layer,
remove_layer_data=True)
self.model.selection = self.prev_selection
self.model.update_comp_layer(rebuild=True)
self.undo_effected_layer(target_layer.real_path)
@processing
def redo(self):
new_selection = []
self.new_node_paths = []
source_layer = self.model.lookup_layer(self.source_layer_path)
target_layer = self.model.lookup_layer(self.target_layer_path)
self.redo_effected_layer(target_layer.real_path)
for node_path in self.node_paths:
node = source_layer.lookup(node_path)
# duplicate node
new, dirty = self.stage.duplicate_node(node=node,
layer=target_layer,
descendants=self.descendants)
new_selection.append(target_layer.get_node_path(new[0]))
# process new nodes
for new_node in new:
# add new node path to the list and emit model signal
new_node_path = target_layer.get_node_path(new_node)
self.new_node_paths += [new_node_path]
# self.model.node_added.emit(new_node_path)
# set position
has_parent = self.model.node_has_parent(new_node_path,
target_layer)
if not has_parent and new_node_path != node_path:
pos = self.model.get_node_pos(node_path)
pos = [pos[0] + 20, pos[1] + 20]
self.model._set_node_pos(new_node_path, pos,
layer=target_layer)
self.model.selection = new_selection
self.model.update_comp_layer(rebuild=True)
if len(self.node_paths) == 1:
nodes_str = self.node_paths[0]
else:
nodes_str = 'nodes'
self.setText('Duplicated {}'.format(nodes_str))
class InstanceNode(SetNodeAttributeValue):
"""Instance nodes on this graph"""
def __init__(self, node_path, model, source_layer_path, target_layer_path):
src_name = nxt_path.node_name_from_node_path(node_path)
parent_path = nxt_path.get_parent_path(node_path)
new_name = model.stage.get_unique_node_name(src_name,
model.comp_layer,
parent_path=parent_path)
new_path = nxt_path.join_node_paths(parent_path, new_name)
self.new_path = new_path
super(InstanceNode, self).__init__(new_path,
INTERNAL_ATTRS.INSTANCE_PATH,
node_path, model, target_layer_path)
def redo(self):
node_path = self.data.get(META_ATTRS.VALUE)
layer = self.model.lookup_layer(self.layer_path)
new_pos = self.model.get_pos_offset(node_path, (GRID_SIZE * 16, 0),
layer)
self.model._set_node_pos(self.new_path, new_pos, layer)
super(InstanceNode, self).redo()
self.return_value = self.new_path
self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE)))
class SetNodesPosition(NxtCommand):
"""Move nodes"""
def __init__(self, node_positions, model, layer_path):
super(SetNodesPosition, self).__init__(model)
self.model = model
self.layer_path = layer_path
self.new_positions = node_positions
self.old_positions = {}
for path in self.new_positions.keys():
self.old_positions[path] = model.get_node_pos(path)
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
for node_path, old_pos in self.old_positions.items():
self.model._set_node_pos(node_path=node_path,
pos=old_pos, layer=layer)
self.undo_effected_layer(self.layer_path)
@processing
def redo(self):
delta_str = None
layer = self.model.lookup_layer(self.layer_path)
for node_path, new_pos in self.new_positions.items():
self.model._set_node_pos(node_path=node_path,
pos=new_pos, layer=layer)
if not delta_str:
pos = new_pos
prev_pos = self.old_positions[node_path]
# Only letting it set text once, relying on consistent delta.
x_delta = pos[0] - prev_pos[0]
y_delta = pos[1] - prev_pos[1]
delta_str = '{}, {}'.format(x_delta, y_delta)
if len(self.new_positions) == 1:
nodes_str = node_path
else:
nodes_str = 'nodes'
self.setText('Move {} {}'.format(nodes_str, delta_str))
self.redo_effected_layer(layer.real_path)
class SetSelection(QUndoCommand):
"""Select Nodes and Connections"""
def __init__(self, paths, model):
super(SetSelection, self).__init__()
self.new_paths = paths
self.model = model
self.prev_paths = self.model.selection
def undo(self):
self.model.selection = self.prev_paths
def redo(self):
self.model.selection = self.new_paths
self.setText('Set selection: {}'.format(str(self.new_paths)))
class AddSelection(SetSelection):
def __init__(self, paths, model):
self.added_paths = paths
curr_selection = model.selection
new_paths = curr_selection + paths
super(AddSelection, self).__init__(new_paths, model)
def redo(self):
super(AddSelection, self).redo()
self.setText('Add {} to selection'.format(self.added_paths))
class RemoveFromSelection(SetSelection):
def __init__(self, paths, model):
self.rem_paths = paths
new_selection = model.selection[:]
for path in paths:
try:
new_selection.remove(path)
except ValueError:
continue
super(RemoveFromSelection, self).__init__(new_selection, model)
def redo(self):
super(RemoveFromSelection, self).redo()
self.setText('Remove {} from selection'.format(self.rem_paths))
class LocalizeNodes(NxtCommand):
"""Localize nodes"""
def __init__(self, node_paths, model):
super(LocalizeNodes, self).__init__(model)
self.node_paths = node_paths
self.model = model
self.stage = model.stage
self.prev_selection = self.model.selection
self.prev_node_data = {}
self.created_node_paths = []
@processing
def undo(self):
for node_path in self.created_node_paths:
n = self.model.target_layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, layer=self.model.target_layer,
remove_layer_data=False)
layers = [self.model.target_layer]
for node_path, all_data in self.prev_node_data.items():
apply_data = {}
node = self.model.target_layer.lookup(node_path)
if not node:
continue
data = all_data['data']
child_order = all_data['data'].get('child_order', [])
apply_data['child_order'] = child_order
apply_data['attributes'] = data.get('attributes', {})
attrs_to_keep = apply_data['attributes'].keys()
apply_data['enabled'] = data.get('enabled')
if data.get('instance'):
apply_data['instance'] = data['instance']
self.stage.transfer_node_data(node, self.model.target_layer,
apply_data, self.model.comp_layer)
local_attrs = self.stage.get_node_local_attr_names(node_path,
layers)
for attr in local_attrs:
if attr not in attrs_to_keep:
self.stage.delete_node_attr(node=node, attr_name=attr)
self.model.update_comp_layer(rebuild=True)
self.undo_effected_layer(layers[0].real_path)
self.model.selection = self.prev_selection
@processing
def redo(self):
self.prev_node_data = {}
self.created_node_paths = []
layer = self.model.target_layer
for node_path in self.node_paths:
node_data = {}
display_node = self.model.comp_layer.lookup(node_path)
if not display_node:
continue
# add node if it doesn't exist on the target layer
target_node = self.model.target_layer.lookup(node_path)
if not target_node:
new_nodes, new_paths, dirty = _add_node_hierarchy(node_path,
self.model,
layer)
target_node = new_nodes[-1]
self.created_node_paths += new_paths
# self.model.node_added.emit(node_path)
# preserve original data
node_data['data'] = get_node_as_dict(target_node)
# localize source node
self.stage.transfer_node_data(target_node, self.model.target_layer,
display_node,
self.model.comp_layer)
self.prev_node_data[node_path] = node_data
self.model.update_comp_layer(rebuild=bool(self.created_node_paths))
self.redo_effected_layer(layer.real_path)
self.model.selection = self.prev_selection
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
self.setText('Localize {}'.format(str(path_str)))
class LocalizeUserAttr(SetNodeAttributeData):
"""Localize nodes"""
def __init__(self, node_path, attr_name, model, layer_path):
node = model.comp_layer.lookup(node_path)
data = model.stage.get_node_attr_data(node, attr_name,
model.comp_layer)
if META_ATTRS.SOURCE in data:
data.pop(META_ATTRS.SOURCE)
super(LocalizeUserAttr, self).__init__(node_path, attr_name, data,
model, layer_path)
class LocalizeCompute(SetNodeAttributeValue):
"""Localize nodes"""
def __init__(self, node_path, model, layer_path):
comp_layer = model.comp_layer
display_node = comp_layer.lookup(node_path)
code_lines = model.stage.get_node_code_lines(display_node, comp_layer)
super(LocalizeCompute, self).__init__(node_path,
INTERNAL_ATTRS.COMPUTE,
code_lines, model, layer_path)
def redo(self):
super(LocalizeCompute, self).redo()
self.setText("Localize compute on {}".format(self.node_path))
class LocalizeInstancePath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
inst_path = model.get_node_instance_path(node_path, model.comp_layer,
expand=False)
super(LocalizeInstancePath, self).__init__(node_path,
INTERNAL_ATTRS.INSTANCE_PATH,
inst_path, model, layer_path)
def redo(self):
super(LocalizeInstancePath, self).redo()
self.setText("Localize instance path to {}".format(self.node_path))
class RevertInstancePath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
super(RevertInstancePath, self).__init__(node_path,
INTERNAL_ATTRS.INSTANCE_PATH,
None, model, layer_path)
def redo(self):
super(RevertInstancePath, self).redo()
self.setText("Revert instance path on {}".format(self.node_path))
class LocalizeExecPath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
exec_path = model.get_node_exec_in(node_path)
super(LocalizeExecPath, self).__init__(node_path,
INTERNAL_ATTRS.EXECUTE_IN,
exec_path, model, layer_path)
def redo(self):
super(LocalizeExecPath, self).redo()
self.setText("Localize exec input on {}".format(self.node_path))
class RevertExecPath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
super(RevertExecPath, self).__init__(node_path,
INTERNAL_ATTRS.EXECUTE_IN, None,
model, layer_path)
def redo(self):
self.setText("Revert exec input on {}".format(self.node_path))
class RevertNode(DeleteNode):
"""Localize nodes"""
def __init__(self, node_path, model, layer_path, others):
super(RevertNode, self).__init__(node_path, model, layer_path, others)
self.rebuild = False # Tells the delete command not to re-comp
self.created_node_paths = []
self.node_path = node_path
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
# Remove our created empty nodes
for node_path in self.created_node_paths:
n = layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, layer, remove_layer_data=False)
super(RevertNode, self).undo()
self.model.update_comp_layer(rebuild=True)
self.model.selection = self.prev_selection
def redo(self):
self.created_node_paths = []
super(RevertNode, self).redo()
layer = self.model.lookup_layer(self.layer_path)
# Re-create the node as an empty node
new_nodes, new_paths, dirty = _add_node_hierarchy(self.node_path,
self.model, layer)
self.created_node_paths += new_paths
self.model.update_comp_layer(rebuild=bool(self.created_node_paths))
self.model.selection = self.prev_selection
self.setText('Revert {}'.format(self.node_path))
class ParentNodes(NxtCommand):
"""Parent Nodes"""
def __init__(self, node_paths, parent_node_path, model):
super(ParentNodes, self).__init__(model)
self.parent_node_path = parent_node_path
self.parent_node = None
self.model = model
self.stage = model.stage
self.node_paths = node_paths
# resulting nodes
self.node_path_data = {}
self.new_node_paths = []
self.created_node_paths = []
# get node selection for undo
self.prev_selection = self.model.selection
# get previous node data for all child nodes for undo
self.prev_node_data = {}
@processing
def undo(self):
layer = self.model.target_layer
self.undo_effected_layer(layer.real_path)
# undo parent
common_parent_nodes = {}
for old_path, node_data in self.prev_node_data.items():
prev_parent_path = node_data['parent']
prev_parent_node = layer.lookup(prev_parent_path)
new_path = self.node_path_data[old_path]
node = layer.lookup(new_path)
if prev_parent_path not in list(common_parent_nodes.keys()):
common_parent_nodes[prev_parent_path] = {node: old_path}
else:
common_parent_nodes[prev_parent_path][node] = old_path
child_order_tuple = node_data.get(INTERNAL_ATTRS.CHILD_ORDER)
if child_order_tuple:
ancestor_path, child_order = child_order_tuple
ancestor = layer.lookup(ancestor_path)
if ancestor:
self.stage.set_node_child_order(ancestor, child_order,
layer)
if new_path in list(self.model.top_layer.positions.keys()):
source_layer = self.stage.get_node_source_layer(node)
source_layer.positions.pop(new_path)
for parent_path, nodes_dict in common_parent_nodes.items():
self.stage.parent_nodes(nodes=list(nodes_dict.keys()),
parent_path=parent_path,
layer=layer)
for parent_path, nodes_dict in common_parent_nodes.items():
for node, old_path in nodes_dict.items():
node_data = self.prev_node_data[old_path]
# restore name
prev_name = node_data['name']
name = getattr(node, INTERNAL_ATTRS.NAME)
if name != prev_name:
self.stage.set_node_name(node, name=prev_name,
layer=layer, force=True)
# restore position
if self.parent_node_path != nxt_path.WORLD:
prev_pos = node_data['pos']
source_layer = self.stage.get_node_source_layer(node)
self.model._set_node_pos(old_path, prev_pos,
layer=source_layer)
# delete any created nodes
for node_path in self.created_node_paths:
node = layer.lookup(node_path)
if node is not None:
self.stage.delete_node(node, layer)
idx = 0
for old_node_path in self.node_paths:
new_node_path = self.new_node_paths[idx]
attr_state = self.model.remove_attr_display_state(new_node_path)
if attr_state is not None:
self.model._set_attr_display_state(old_node_path, attr_state)
idx += 1
self.model.update_comp_layer(rebuild=True)
self.model.selection = self.prev_selection
@processing
def redo(self):
self.prev_node_data = {}
self.node_path_data = {}
self.new_node_paths = []
self.created_node_paths = []
nodes = []
layer = self.model.target_layer
self.redo_effected_layer(layer.real_path)
for node_path in self.node_paths:
node = layer.lookup(node_path)
name = getattr(node, INTERNAL_ATTRS.NAME)
parent_path = getattr(node, INTERNAL_ATTRS.PARENT_PATH)
self.stage.get_node_data(node, layer)
node_data = self.stage.get_node_data(node, layer)
node_data['pos'] = self.model.get_node_pos(node_path)
node_data['name'] = name
node_data['parent'] = parent_path
parent_node = layer.lookup(parent_path)
ancestor_path = parent_path
child_order = []
if parent_node:
child_order = getattr(parent_node,
INTERNAL_ATTRS.CHILD_ORDER)
else:
ancestors = layer.ancestors(node_path)
if ancestors:
ancestor = ancestors[0]
ancestor_path = layer.get_node_path(ancestor)
child_order = self.stage.get_node_child_order(ancestor)
node_data[INTERNAL_ATTRS.CHILD_ORDER] = [ancestor_path,
child_order]
self.prev_node_data[node_path] = node_data
nodes += [node]
# get current node hierarchy information for each node. each node
# path is placed in a list of descendants for each top node so when
# they are un-parented each node can be placed visually beside it's
# original top node.
node_hierarchy_data = {}
if self.parent_node_path is nxt_path.WORLD:
for node_path in self.node_paths:
node = layer.lookup(node_path)
top_node = self.stage.get_top_node(node,
self.model.target_layer)
if top_node is None:
top_node = node
top_node_path = layer.get_node_path(top_node)
top_node_descendant_list = node_hierarchy_data.get(top_node, [])
top_node_descendant_list += [node]
node_hierarchy_data[top_node_path] = top_node_descendant_list
if not node_hierarchy_data:
return
# parent
self.node_path_data = self.stage.parent_nodes(nodes,
self.parent_node_path,
layer)
self.new_node_paths = list(self.node_path_data.values())
idx = 0
for new_node_path in self.new_node_paths:
old_node_path = self.node_paths[idx]
attr_state = self.model.remove_attr_display_state(old_node_path)
if attr_state is not None:
self.model._set_attr_display_state(new_node_path, attr_state)
# set position for un-parent
if self.parent_node_path == nxt_path.WORLD:
old_root = nxt_path.get_root_path(old_node_path)
new_pos = self.model.get_pos_offset(old_root, (GRID_SIZE * 14,
GRID_SIZE),
self.model.top_layer)
self.model._set_node_pos(new_node_path, new_pos, layer)
idx += 1
self.model.update_comp_layer(rebuild=True)
self.model.selection = list(self.node_path_data.values())
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
self.setText("Parent {} to {}".format(path_str, self.parent_node_path))
class AddAttribute(SetNodeAttributeData):
"""Add an attribute to a node."""
def __init__(self, node_path, attr_name, value, model, layer_path):
data = {META_ATTRS.VALUE: value}
super(AddAttribute, self).__init__(node_path, attr_name, data,
model, layer_path)
def redo(self):
super(AddAttribute, self).redo()
self.remove_attr = True
self.setText("Add {} attr to {}".format(self.attr_name,
self.node_path))
class DeleteAttribute(AddAttribute):
"""Delete attribute on a node"""
def __init__(self, node_path, attr_name, model, layer_path):
super(DeleteAttribute, self).__init__(node_path, attr_name, None,
model, layer_path)
# Get the data to be set if undo is called
layer = self.model.lookup_layer(self.layer_path)
node = layer.lookup(self.node_path)
self.data = self.stage.get_node_attr_data(node, self.attr_name, layer)
def undo(self):
super(DeleteAttribute, self).redo()
layer = self.model.lookup_layer(self.layer_path)
self.undo_effected_layer(layer.real_path)
def redo(self):
# Overload remove attr here to insure attr is deleted
self.remove_attr = True
super(DeleteAttribute, self).undo()
layer = self.model.lookup_layer(self.layer_path)
self.redo_effected_layer(layer.real_path)
self.setText("Remove {} attr from {}".format(self.attr_name,
self.node_path))
class RevertCompute(SetNodeAttributeValue):
"""Revert compute"""
def __init__(self, node_path, model, layer_path):
super(RevertCompute, self).__init__(node_path,
INTERNAL_ATTRS.COMPUTE, [], model,
layer_path)
def redo(self):
super(RevertCompute, self).redo()
self.setText("Revert compute on {}".format(self.node_path))
class RenameAttribute(NxtCommand):
"""Rename attribute"""
def __init__(self, node_path, attr_name, new_attr_name, model, layer_path):
super(RenameAttribute, self).__init__(model)
self.node_path = node_path
self.attr_name = attr_name
self.new_attr_name = new_attr_name
self.model = model
self.stage = model.stage
self.layer_path = layer_path
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
self.rename_attribute(layer, self.new_attr_name, self.attr_name)
self.undo_effected_layer(layer.real_path)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
self.rename_attribute(layer, self.attr_name, self.new_attr_name)
self.redo_effected_layer(layer.real_path)
def rename_attribute(self, layer, attr_name, new_attr_name):
node = layer.lookup(self.node_path)
self.stage.rename_node_attr(node, attr_name, new_attr_name, layer)
self.model.update_comp_layer()
old_name = nxt_path.make_attr_path(self.node_path, attr_name)
new_name = nxt_path.make_attr_path(self.node_path, new_attr_name)
self.setText("Rename {} to {}".format(old_name, new_name))
class SetAttributeComment(SetNodeAttributeData):
"""Set attribute comment"""
def __init__(self, node_path, attr_name, comment, model, layer_path):
data = {META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment}
super(SetAttributeComment, self).__init__(node_path, attr_name, data,
model, layer_path)
def redo(self):
super(SetAttributeComment, self).redo()
attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name)
self.setText("Changed comment on {}".format(attr_path))
class SetCompute(SetNodeAttributeValue):
"""Set node code value"""
def __init__(self, node_path, code_lines, model, layer_path):
super(SetCompute, self).__init__(node_path,
INTERNAL_ATTRS.COMPUTE,
code_lines, model, layer_path)
def redo(self):
super(SetCompute, self).redo()
self.setText("Changed compute on {}".format(self.node_path))
class SetNodeComment(SetNodeAttributeValue):
"""Set node comment"""
def __init__(self, node_path, comment, model, layer_path):
super(SetNodeComment, self).__init__(node_path,
INTERNAL_ATTRS.COMMENT,
comment, model, layer_path)
def redo(self):
super(SetNodeComment, self).redo()
self.setText("Changed comment on {}".format(self.node_path))
class SetNodeInstance(SetNodeAttributeValue):
"""Set node instance"""
def __init__(self, node_path, instance_path, model, layer_path):
super(SetNodeInstance, self).__init__(node_path,
INTERNAL_ATTRS.INSTANCE_PATH,
instance_path, model, layer_path)
def redo(self):
super(SetNodeInstance, self).redo()
txt = ("Set inst path on "
"{} to {}".format(self.node_path,
self.data.get(META_ATTRS.VALUE)))
self.setText(txt)
class SetNodeEnabledState(SetNodeAttributeValue):
"""Set node enabled state"""
def __init__(self, node_path, value, model, layer_path):
super(SetNodeEnabledState, self).__init__(node_path,
INTERNAL_ATTRS.ENABLED,
value, model, layer_path)
def redo(self):
super(SetNodeEnabledState, self).redo()
if self.data.get(META_ATTRS.VALUE):
self.setText("Enabled {}".format(self.node_path))
else:
self.setText("Disabled {}".format(self.node_path))
class SetNodeCollapse(NxtCommand):
"""Set the node collapse state"""
def __init__(self, node_paths, value,
model, layer_path):
super(SetNodeCollapse, self).__init__(model)
self.node_paths = node_paths
self.value = value
self.model = model
self.stage = model.stage
self.layer_path = layer_path
self.prev_values = {}
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
self.undo_effected_layer(layer.real_path)
for node_path, prev_value in self.prev_values.items():
layer.collapse[node_path] = prev_value
self.model.comp_layer.collapse[node_path] = prev_value
self.model.collapse_changed.emit(list(self.prev_values.keys()))
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
self.redo_effected_layer(layer.real_path)
self.prev_values = {}
for np in self.node_paths:
self.prev_values[np] = self.model.get_node_collapse(np, layer)
for node_path in self.node_paths:
layer.collapse[node_path] = self.value
self.model.comp_layer.collapse[node_path] = self.value
self.model.collapse_changed.emit(list(self.prev_values.keys()))
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
if self.value:
self.setText("Collapsed {}".format(path_str))
else:
self.setText("Expanded {}".format(path_str))
class SetNodeExecuteSources(SetNodeAttributeValue):
"""Set node execute sources"""
def __init__(self, node_path, exec_source, model, layer_path):
super(SetNodeExecuteSources, self).__init__(node_path,
INTERNAL_ATTRS.EXECUTE_IN,
exec_source, model,
layer_path)
def redo(self):
super(SetNodeExecuteSources, self).redo()
val = self.data.get(META_ATTRS.VALUE)
if val is None:
self.setText("Removed exec input for {}".format(self.node_path))
return
self.setText("Set {} exec input to {}".format(self.node_path, val))
class SetNodeBreakPoint(QUndoCommand):
"""Set node as a break point"""
def __init__(self, node_paths, value, model, layer_path):
super(SetNodeBreakPoint, self).__init__()
self.node_paths = node_paths
self.value = value
self.model = model
self.layer_path = layer_path
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
if not self.value:
func = self.model._add_breakpoint
else:
func = self.model._remove_breakpoint
for node_path in self.node_paths:
func(node_path, layer)
self.model.nodes_changed.emit(tuple(self.node_paths))
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
if self.value:
func = self.model._add_breakpoint
else:
func = self.model._remove_breakpoint
for node_path in self.node_paths:
func(node_path, layer)
self.model.nodes_changed.emit(tuple(self.node_paths))
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
if self.value:
self.setText("Add breakpoint to {}".format(path_str))
else:
self.setText("Remove breakpoint from {}".format(path_str))
class ClearBreakpoints(QUndoCommand):
"""Clear all the breakpoints for a given layer"""
def __init__(self, model, layer_path):
super(ClearBreakpoints, self).__init__()
self.model = model
self.layer_path = layer_path
self.prev_breaks = []
@processing
def undo(self):
user_dir.breakpoints[self.layer_path] = self.prev_breaks
self.model.nodes_changed.emit(tuple(self.prev_breaks))
@processing
def redo(self):
self.prev_breaks = user_dir.breakpoints.get(self.layer_path, [])
if self.layer_path in list(user_dir.breakpoints.keys()):
user_dir.breakpoints.pop(self.layer_path)
self.model.nodes_changed.emit(tuple(self.prev_breaks))
self.setText("Clear all breakpoints")
class SetNodeStartPoint(SetNodeAttributeValue):
"""Set this node as the execution start point"""
def __init__(self, node_path, value, model, layer_path):
super(SetNodeStartPoint, self).__init__(node_path,
INTERNAL_ATTRS.START_POINT,
value, model, layer_path)
class SetNodeChildOrder(SetNodeAttributeValue):
"""Set node child order"""
def __init__(self, node_path, child_order, model, layer_path):
super(SetNodeChildOrder, self).__init__(node_path,
INTERNAL_ATTRS.CHILD_ORDER,
child_order, model, layer_path)
def redo(self):
super(SetNodeChildOrder, self).redo()
self.setText("Change child order on {}".format(self.node_path))
class SetLayerAlias(NxtCommand):
"""Set Layer Alias"""
def __init__(self, alias, layer_path, model):
super(SetLayerAlias, self).__init__(model)
self.layer_path = layer_path
self.alias = alias
self.old_alias = ''
self.model = model
self.stage = model.stage
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
layer.set_alias(self.old_alias)
else:
layer.set_alias_over(self.old_alias)
self.undo_effected_layer(self.model.top_layer.real_path)
self.model.layer_alias_changed.emit(self.layer_path)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
self.old_alias = layer.get_alias(local=True)
layer.set_alias(self.alias)
else:
self.old_alias = layer.get_alias(fallback_to_local=False)
layer.set_alias_over(self.alias)
self.redo_effected_layer(self.model.top_layer.real_path)
self.model.layer_alias_changed.emit(self.layer_path)
self.setText("Set {} alias to {}".format(layer.filepath, self.alias))
class NewLayer(NxtCommand):
"""Add new layer"""
def __init__(self, file_path, file_name, idx, model, chdir):
super(NewLayer, self).__init__(model)
self.new_layer_path = None
self.model = model
self.stage = model.stage
self.insert_idx = idx
self.file_path = file_path
self.file_name = file_name
self.chdir = chdir
@processing
def undo(self):
new_layer = self.model.lookup_layer(self.new_layer_path)
if new_layer in self.stage._sub_layers:
self.undo_effected_layer(new_layer.parent_layer.real_path)
self.stage.remove_sublayer(new_layer)
self.model.update_comp_layer(rebuild=True)
self.model.set_target_layer(LAYERS.TOP)
self.undo_effected_layer(self.new_layer_path)
self.model.layer_removed.emit(self.new_layer_path)
@processing
def redo(self):
sub_layer_count = len(self.stage._sub_layers)
if 0 < self.insert_idx <= sub_layer_count:
parent_layer = self.stage._sub_layers[self.insert_idx - 1]
self.redo_effected_layer(parent_layer.real_path)
else:
parent_layer = None
layer_color_index = [str(k.name()) for k in colors.LAYER_COLORS]
open_layer_colors = []
for layer in self.stage._sub_layers:
color = layer.color
if color:
color = color.lower()
open_layer_colors += [color]
layer_color = layer_color_index[0]
for c in layer_color_index:
if c not in open_layer_colors:
layer_color = c
break
real_path = nxt_path.full_file_expand(self.file_path, start=self.chdir)
layer_data = {"parent_layer": parent_layer,
SAVE_KEY.FILEPATH: self.file_path,
SAVE_KEY.REAL_PATH: real_path,
SAVE_KEY.COLOR: layer_color,
SAVE_KEY.ALIAS: self.file_name
}
new_layer = self.stage.new_sublayer(layer_data=layer_data,
idx=self.insert_idx)
self.new_layer_path = new_layer.real_path
self.redo_effected_layer(new_layer.real_path)
# Fixme: The next 2 lines each build once
self.model.update_comp_layer(rebuild=True)
self.model.set_target_layer(self.new_layer_path)
self.model.layer_added.emit(self.new_layer_path)
self.setText("New layer {}".format(self.new_layer_path))
class ReferenceLayer(NxtCommand):
"""Refernce existing layer"""
def __init__(self, file_path, idx, model, chdir):
super(ReferenceLayer, self).__init__(model)
self.model = model
self.stage = model.stage
self.insert_idx = idx
self.file_path = file_path
self.real_path = nxt_path.full_file_expand(self.file_path, chdir)
@processing
def undo(self):
new_layer = self.model.lookup_layer(self.real_path)
if new_layer in self.stage._sub_layers:
self.undo_effected_layer(new_layer.parent_layer.real_path)
self.stage.remove_sublayer(new_layer)
self.model.set_target_layer(LAYERS.TOP)
self.model.update_comp_layer(rebuild=True)
self.model.layer_removed.emit(self.real_path)
@processing
def redo(self):
sub_layer_count = len(self.stage._sub_layers)
if 0 < self.insert_idx <= sub_layer_count:
parent_layer = self.stage._sub_layers[self.insert_idx - 1]
self.redo_effected_layer(parent_layer.real_path)
else:
parent_layer = None
layer_data = nxt_io.load_file_data(self.real_path)
extra_data = {"parent_layer": parent_layer,
"filepath": self.file_path,
"real_path": self.real_path,
"alias": layer_data['name']
}
layer_data.update(extra_data)
self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx)
# Fixme: The next 2 lines each build once
self.model.update_comp_layer(rebuild=True)
self.model.set_target_layer(self.real_path)
self.model.layer_added.emit(self.real_path)
self.setText("Added reference to {}".format(self.real_path))
class RemoveLayer(ReferenceLayer):
"""Remove existing layer"""
def __init__(self, layer_path, model):
idx = model.lookup_layer(layer_path).layer_idx()
super(RemoveLayer, self).__init__(layer_path, idx, model, None)
self.text = "Removed reference to {}".format(layer_path)
@processing
def undo(self):
super(RemoveLayer, self).redo()
self.setText(self.text)
@processing
def redo(self):
super(RemoveLayer, self).undo()
self.setText(self.text)
class MuteToggleLayer(NxtCommand):
"""Toggles muting an existing layer"""
def __init__(self, layer_path, model):
super(MuteToggleLayer, self).__init__(model)
self.layer_path = layer_path
self.model = model
self.layer_paths = []
def undo(self):
self.toggle_state()
for layer_path in self.layer_paths:
self.undo_effected_layer(layer_path)
def redo(self):
self.layer_paths = []
self.toggle_state()
for layer_path in self.layer_paths:
self.redo_effected_layer(layer_path)
@processing
def toggle_state(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
state = not layer.get_muted(local=True)
layer.set_muted(state)
self.layer_paths.append(layer.real_path)
else:
state = not layer.get_muted(local=False)
self.model.top_layer.set_mute_over(layer.filepath, state)
self.layer_paths.append(self.model.top_layer.real_path)
self.model.update_comp_layer(rebuild=True)
self.model.layer_mute_changed.emit((self.layer_path,))
self.setText("Toggle {} muted.".format(layer.get_alias()))
class SoloToggleLayer(NxtCommand):
"""Toggles soloing an existing layer"""
def __init__(self, layer_path, model):
super(SoloToggleLayer, self).__init__(model)
self.layer_path = layer_path
self.model = model
self.layer_paths = []
def undo(self):
self.toggle_state()
for layer_path in self.layer_paths:
self.undo_effected_layer(layer_path)
def redo(self):
self.layer_paths = []
self.toggle_state()
for layer_path in self.layer_paths:
self.redo_effected_layer(layer_path)
@processing
def toggle_state(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
state = not layer.get_soloed(local=True)
layer.set_soloed(state)
self.layer_paths.append(layer.real_path)
else:
state = not layer.get_soloed(local=False)
self.model.top_layer.set_solo_over(layer.filepath, state)
self.layer_paths.append(self.model.top_layer.real_path)
self.model.update_comp_layer(rebuild=True)
self.model.layer_solo_changed.emit((self.layer_path,))
self.setText("Toggle {} soloed.".format(layer.get_alias()))
class SetLayerColor(NxtCommand):
def __init__(self, color, layer_path, model):
"""Sets the color for a given layer, if the layer is not a top layer
the top layer store an overrides.
:param color: string of new layer alias (name)
:param layer_path: real path of layer
:param model: StageModel
"""
super(SetLayerColor, self).__init__(model)
self.layer_path = layer_path
self.color = color
self.old_color = ''
self.model = model
self.stage = model.stage
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
layer.color = self.old_color
else:
layer.set_color_over(self.old_color)
self.undo_effected_layer(self.model.top_layer.real_path)
self.model.layer_color_changed.emit(self.layer_path)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
self.old_color = layer.get_color(local=True)
layer.color = self.color
else:
self.old_color = layer.get_color(fallback_to_local=False)
layer.set_color_over(self.color)
self.redo_effected_layer(self.model.top_layer.real_path)
self.model.layer_color_changed.emit(self.layer_path)
self.setText("Set {} color to {}".format(layer.filepath, self.color))
def _add_node_hierarchy(base_node_path, model, layer):
stage = model.stage
comp_layer = model.comp_layer
new_node_paths = []
new_nodes = []
node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path)
new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy,
parent=None, layer=layer,
comp_layer=comp_layer)
for nn_p, n in new_node_table:
display_node = comp_layer.lookup(nn_p)
if display_node is not None:
display_child_order = getattr(display_node,
INTERNAL_ATTRS.CHILD_ORDER)
old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER)
new_child_order = list_merger(display_child_order,
old_child_order)
setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order)
new_node_paths += [nn_p]
new_nodes += [n]
return new_nodes, new_node_paths, dirty
def undo_debug(cmd, start):
update_time = str(int(round((time.time() - start) * 1000)))
logger.debug("Undo " + cmd.text() + " | " + update_time + "ms")
def redo_debug(cmd, start):
update_time = str(int(round((time.time() - start) * 1000)))
logger.debug(cmd.text() + " | " + update_time + "ms")
| 40.60095 | 80 | 0.597379 | 66,226 | 0.968613 | 0 | 0 | 36,879 | 0.539387 | 0 | 0 | 6,801 | 0.099471 |
168de834f7c08dea94c1b268f9213453f995fc3e | 6,642 | py | Python | mietrechtspraxis/mietrechtspraxis/doctype/arbitration_authority/arbitration_authority.py | libracore/mietrechtspraxis | 7b2320a70b98b086be136a86b1ab4fadfce215ff | [
"MIT"
] | 1 | 2021-07-15T13:25:23.000Z | 2021-07-15T13:25:23.000Z | mietrechtspraxis/mietrechtspraxis/doctype/arbitration_authority/arbitration_authority.py | libracore/mietrechtspraxis | 7b2320a70b98b086be136a86b1ab4fadfce215ff | [
"MIT"
] | 1 | 2022-01-27T13:30:56.000Z | 2022-01-27T13:30:56.000Z | mietrechtspraxis/mietrechtspraxis/doctype/arbitration_authority/arbitration_authority.py | libracore/mietrechtspraxis | 7b2320a70b98b086be136a86b1ab4fadfce215ff | [
"MIT"
] | 2 | 2021-08-14T22:23:08.000Z | 2021-09-08T09:31:51.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021, libracore AG and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from datetime import datetime
from PyPDF2 import PdfFileWriter
from frappe.utils.file_manager import save_file
class ArbitrationAuthority(Document):
pass
def _get_sb(**kwargs):
'''
call on [IP]/api/method/mietrechtspraxis.api.get_sb
Mandatory Parameter:
- token
- plz
'''
# check that token is present
try:
token = kwargs['token']
except:
# 400 Bad Request (Missing Token)
return raise_4xx(400, 'Bad Request', 'Token Required')
# check that token is correct
if not token == frappe.db.get_single_value('mietrechtspraxis API', 'token'):
# 401 Unauthorized (Invalid Token)
return raise_4xx(401, 'Unauthorized', 'Invalid Token')
# check that plz_city is present
try:
plz_city = kwargs['plz_city']
except:
# 400 Bad Request (Missing PLZ/City)
return raise_4xx(400, 'Bad Request', 'PLZ/City Required')
answer = []
# lookup for plz
city_results = frappe.db.sql("""
SELECT
`city`,
`municipality`,
`district`,
`canton`
FROM `tabPincode`
WHERE `pincode` = '{plz_city}'
ORDER BY `city` ASC
""".format(plz_city=plz_city), as_dict=True)
if len(city_results) < 1:
# lookup for city
city_results = frappe.db.sql("""
SELECT
`city`,
`municipality`,
`district`,
`canton`
FROM `tabPincode`
WHERE `city` LIKE '%{plz_city}%'
ORDER BY `city` ASC
""".format(plz_city=plz_city), as_dict=True)
if len(city_results) > 0:
for city in city_results:
data = {}
data['plz'] = city.plz
data['ort'] = city.city
data['gemeinde'] = city.municipality
data['bezirk'] = city.district
data['kanton'] = city.canton
data['allgemein'] = get_informations(city.canton)
data['schlichtungsbehoerde'] = frappe.db.sql("""
SELECT
`schlichtungsbehoerde`.`titel` AS `Titel`,
`schlichtungsbehoerde`.`telefon` AS `Telefon`,
`schlichtungsbehoerde`.`kuendigungstermine` AS `Kündigungstermine`,
`schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`,
`schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`,
`schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische Eingaben`,
`schlichtungsbehoerde`.`homepage` AS `Homepage`
FROM `tabArbitration Authority` AS `schlichtungsbehoerde`
LEFT JOIN `tabMunicipality Table` AS `geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent`
WHERE `geminendentbl`.`municipality` = '{municipality}'
""".format(municipality=city.municipality), as_dict=True)
answer.append(data)
if len(answer) > 0:
return raise_200(answer)
else:
# 404 Not Found
return raise_4xx(404, 'Not Found', 'No results')
else:
# 404 Not Found
return raise_4xx(404, 'Not Found', 'No results')
def get_informations(kanton):
search = frappe.db.sql("""
SELECT
`informationen`,
`homepage`,
`gesetzessammlung`,
`formulare`
FROM `tabKantonsinformationen`
WHERE `kanton` = '{kanton}'
""".format(kanton=kanton), as_dict=True)
if len(search) > 0:
result = search[0]
else:
result = {}
return result
def raise_4xx(code, title, message):
# 4xx Bad Request / Unauthorized / Not Found
return ['{code} {title}'.format(code=code, title=title), {
"error": {
"code": code,
"message": "{message}".format(message=message)
}
}]
def raise_200(answer):
return ['200 OK', answer]
@frappe.whitelist()
def get_sammel_pdf(no_letterhead=1):
frappe.enqueue(method=_get_sammel_pdf, queue='long', job_name='Schlichtungsbehörden Sammel-PDF', **{'no_letterhead': no_letterhead})
return
def _get_sammel_pdf(no_letterhead=1):
output = PdfFileWriter()
schlichtungsbehoerden = frappe.db.sql("""SELECT `name` FROM `tabArbitration Authority`""", as_dict=True)
for schlichtungsbehoerde in schlichtungsbehoerden:
output = frappe.get_print("Arbitration Authority", schlichtungsbehoerde.name, 'Datenüberprüfung', as_pdf = True, output = output, no_letterhead = no_letterhead)
output = frappe.get_print("Arbitration Authority", schlichtungsbehoerde.name, 'Fragebogen für Schlichtungsbehörden', as_pdf = True, output = output, no_letterhead = no_letterhead)
pdf = frappe.utils.pdf.get_file_data_from_writer(output)
now = datetime.now()
ts = "{0:04d}-{1:02d}-{2:02d}".format(now.year, now.month, now.day)
file_name = "{0}_{1}.pdf".format('SB_Sammel-PDF', ts)
save_file(file_name, pdf, '', '', is_private=1)
return
| 43.986755 | 187 | 0.483439 | 46 | 0.006919 | 0 | 0 | 205 | 0.030836 | 0 | 0 | 3,902 | 0.586943 |
168eb7379683dd807fa4203db108dc8a9b170baa | 323 | py | Python | easysockets/client_socket.py | Matthias1590/EasySockets | 70d33a04e862b682b87bdf2103bcc1d7da06994e | [
"MIT"
] | 2 | 2022-01-10T12:25:45.000Z | 2022-01-15T08:01:32.000Z | easysockets/client_socket.py | Matthias1590/EasySockets | 70d33a04e862b682b87bdf2103bcc1d7da06994e | [
"MIT"
] | null | null | null | easysockets/client_socket.py | Matthias1590/EasySockets | 70d33a04e862b682b87bdf2103bcc1d7da06994e | [
"MIT"
] | null | null | null | from .connection import Connection
import socket
class ClientSocket:
def __init__(self) -> None:
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect(self, host: str, port: int) -> Connection:
self.__socket.connect((host, port))
return Connection(self.__socket)
| 24.846154 | 73 | 0.696594 | 271 | 0.839009 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
168f0267685e958dd990feeae60a1086e6b78107 | 31,038 | py | Python | pxr/usd/usdGeom/testenv/testUsdGeomSchemata.py | yurivict/USD | 3b097e3ba8fabf1777a1256e241ea15df83f3065 | [
"Apache-2.0"
] | 1 | 2022-03-16T01:40:10.000Z | 2022-03-16T01:40:10.000Z | pxr/usd/usdGeom/testenv/testUsdGeomSchemata.py | yurivict/USD | 3b097e3ba8fabf1777a1256e241ea15df83f3065 | [
"Apache-2.0"
] | null | null | null | pxr/usd/usdGeom/testenv/testUsdGeomSchemata.py | yurivict/USD | 3b097e3ba8fabf1777a1256e241ea15df83f3065 | [
"Apache-2.0"
] | 1 | 2018-10-03T19:08:33.000Z | 2018-10-03T19:08:33.000Z | #!/pxrpythonsubst
#
# Copyright 2017 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
# pylint: disable=map-builtin-not-iterating
import sys, unittest
from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf
class TestUsdGeomSchemata(unittest.TestCase):
def test_Basic(self):
l = Sdf.Layer.CreateAnonymous()
stage = Usd.Stage.Open(l.identifier)
p = stage.DefinePrim("/Mesh", "Mesh")
self.assertTrue(p)
mesh = UsdGeom.Mesh(p)
self.assertTrue(mesh)
self.assertTrue(mesh.GetPrim())
self.assertTrue(not mesh.GetPointsAttr().Get(1))
self.assertEqual(p.GetTypeName(),
Usd.SchemaRegistry().GetSchemaTypeName(mesh._GetStaticTfType()))
#
# Make sure uniform access behaves as expected.
#
ori = p.GetAttribute("orientation")
# The generic orientation attribute should be automatically defined because
# it is a registered attribute of a well known schema. However, it's not
# yet authored at the current edit target.
self.assertTrue(ori.IsDefined())
self.assertTrue(not ori.IsAuthoredAt(ori.GetStage().GetEditTarget()))
# Author a value, and check that it's still defined, and now is in fact
# authored at the current edit target.
ori.Set(UsdGeom.Tokens.leftHanded)
self.assertTrue(ori.IsDefined())
self.assertTrue(ori.IsAuthoredAt(ori.GetStage().GetEditTarget()))
mesh.GetOrientationAttr().Set(UsdGeom.Tokens.rightHanded, 10)
# "leftHanded" should have been authored at Usd.TimeCode.Default, so reading the
# attribute at Default should return lh, not rh.
self.assertEqual(ori.Get(), UsdGeom.Tokens.leftHanded)
# The value "rightHanded" was set at t=10, so reading *any* time should
# return "rightHanded"
self.assertEqual(ori.Get(9.9), UsdGeom.Tokens.rightHanded)
self.assertEqual(ori.Get(10), UsdGeom.Tokens.rightHanded)
self.assertEqual(ori.Get(10.1), UsdGeom.Tokens.rightHanded)
self.assertEqual(ori.Get(11), UsdGeom.Tokens.rightHanded)
#
# Attribute name sanity check. We expect the names returned by the schema
# to match the names returned via the generic API.
#
self.assertTrue(len(mesh.GetSchemaAttributeNames()) > 0)
self.assertNotEqual(mesh.GetSchemaAttributeNames(True), mesh.GetSchemaAttributeNames(False))
for n in mesh.GetSchemaAttributeNames():
# apiName overrides
if n == "primvars:displayColor":
n = "displayColor"
elif n == "primvars:displayOpacity":
n = "displayOpacity"
name = n[0].upper() + n[1:]
self.assertTrue(("Get" + name + "Attr") in dir(mesh),
("Get" + name + "Attr() not found in: " + str(dir(mesh))))
def test_IsA(self):
# Author Scene and Compose Stage
l = Sdf.Layer.CreateAnonymous()
stage = Usd.Stage.Open(l.identifier)
# For every prim schema type in this module, validate that:
# 1. We can define a prim of its type
# 2. Its type and inheritance matches our expectations
# 3. At least one of its builtin properties is available and defined
# BasisCurves Tests
schema = UsdGeom.BasisCurves.Define(stage, "/BasisCurves")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # BasisCurves is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # BasisCurves is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # BasisCurves is not a Cylinder
self.assertTrue(schema.GetBasisAttr())
# Camera Tests
schema = UsdGeom.Camera.Define(stage, "/Camera")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Camera is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Camera is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Camera is not a Cylinder
self.assertTrue(schema.GetFocalLengthAttr())
# Capsule Tests
schema = UsdGeom.Capsule.Define(stage, "/Capsule")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Capsule is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Capsule is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Capsule is not a Cylinder
self.assertTrue(schema.GetAxisAttr())
# Cone Tests
schema = UsdGeom.Cone.Define(stage, "/Cone")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cone is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cone is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cone is not a Cylinder
self.assertTrue(schema.GetAxisAttr())
# Cube Tests
schema = UsdGeom.Cube.Define(stage, "/Cube")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cube is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cube is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cube is not a Cylinder
self.assertTrue(schema.GetSizeAttr())
# Cylinder Tests
schema = UsdGeom.Cylinder.Define(stage, "/Cylinder")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cylinder is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cylinder is a Xformable
self.assertTrue(prim.IsA(UsdGeom.Cylinder)) # Cylinder is a Cylinder
self.assertTrue(schema.GetAxisAttr())
# Mesh Tests
schema = UsdGeom.Mesh.Define(stage, "/Mesh")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertTrue(prim.IsA(UsdGeom.Mesh)) # Mesh is a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Mesh is a XFormable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Mesh is not a Cylinder
self.assertTrue(schema.GetFaceVertexCountsAttr())
# NurbsCurves Tests
schema = UsdGeom.NurbsCurves.Define(stage, "/NurbsCurves")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsCurves is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsCurves is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsCurves is not a Cylinder
self.assertTrue(schema.GetKnotsAttr())
# NurbsPatch Tests
schema = UsdGeom.NurbsPatch.Define(stage, "/NurbsPatch")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsPatch is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsPatch is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsPatch is not a Cylinder
self.assertTrue(schema.GetUKnotsAttr())
# Points Tests
schema = UsdGeom.Points.Define(stage, "/Points")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Points is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Points is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Points is not a Cylinder
self.assertTrue(schema.GetWidthsAttr())
# Scope Tests
schema = UsdGeom.Scope.Define(stage, "/Scope")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Scope is not a Mesh
self.assertFalse(prim.IsA(UsdGeom.Xformable)) # Scope is not a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Scope is not a Cylinder
# Scope has no builtins!
# Sphere Tests
schema = UsdGeom.Sphere.Define(stage, "/Sphere")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Sphere is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Sphere is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Sphere is not a Cylinder
self.assertTrue(schema.GetRadiusAttr())
# Xform Tests
schema = UsdGeom.Xform.Define(stage, "/Xform")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Xform is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Xform is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Xform is not a Cylinder
self.assertTrue(schema.GetXformOpOrderAttr())
def test_Fallbacks(self):
# Author Scene and Compose Stage
stage = Usd.Stage.CreateInMemory()
# Xformable Tests
identity = Gf.Matrix4d(1)
origin = Gf.Vec3f(0, 0, 0)
xform = UsdGeom.Xform.Define(stage, "/Xform") # direct subclass
xformOpOrder = xform.GetXformOpOrderAttr()
self.assertFalse(xformOpOrder.HasAuthoredValue())
# xformOpOrder has no fallback value
self.assertEqual(xformOpOrder.Get(), None)
self.assertFalse(xformOpOrder.HasFallbackValue())
# Try authoring and reverting...
xformOpOrderAttr = xform.GetPrim().GetAttribute(UsdGeom.Tokens.xformOpOrder)
self.assertTrue(xformOpOrderAttr)
self.assertEqual(xformOpOrderAttr.Get(), None)
opOrderVal = ["xformOp:transform"]
self.assertTrue(xformOpOrderAttr.Set(opOrderVal))
self.assertTrue(xformOpOrderAttr.HasAuthoredValue())
self.assertNotEqual(xformOpOrderAttr.Get(), None)
self.assertTrue(xformOpOrderAttr.Clear())
self.assertFalse(xformOpOrderAttr.HasAuthoredValue())
self.assertEqual(xformOpOrderAttr.Get(), None)
self.assertFalse(xformOpOrder.HasFallbackValue())
mesh = UsdGeom.Mesh.Define(stage, "/Mesh") # multiple ancestor hops
# PointBased and Curves
curves = UsdGeom.BasisCurves.Define(stage, "/Curves")
self.assertEqual(curves.GetNormalsInterpolation(), UsdGeom.Tokens.vertex)
self.assertEqual(curves.GetWidthsInterpolation(), UsdGeom.Tokens.vertex)
# Before we go, test that CreateXXXAttr performs as we expect in various
# scenarios
# Number 1: Sparse and non-sparse authoring on def'd prim
mesh.CreateDoubleSidedAttr(False, True)
self.assertFalse(mesh.GetDoubleSidedAttr().HasAuthoredValue())
mesh.CreateDoubleSidedAttr(False, False)
self.assertTrue(mesh.GetDoubleSidedAttr().HasAuthoredValue())
# Number 2: Sparse authoring demotes to dense for non-defed prim
overMesh = UsdGeom.Mesh(stage.OverridePrim('/overMesh'))
overMesh.CreateDoubleSidedAttr(False, True)
self.assertTrue(overMesh.GetDoubleSidedAttr().HasAuthoredValue())
self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), False)
overMesh.CreateDoubleSidedAttr(True, True)
self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True)
# make it a defined mesh, and sanity check it still evals the same
mesh2 = UsdGeom.Mesh.Define(stage, "/overMesh")
self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True)
# Check querying of fallback values.
sphere = UsdGeom.Sphere.Define(stage, "/Sphere")
radius = sphere.GetRadiusAttr()
self.assertTrue(radius.HasFallbackValue())
radiusQuery = Usd.AttributeQuery(radius)
self.assertTrue(radiusQuery.HasFallbackValue())
def test_DefineSchema(self):
s = Usd.Stage.CreateInMemory()
parent = s.OverridePrim('/parent')
self.assertTrue(parent)
# Make a subscope.
scope = UsdGeom.Scope.Define(s, '/parent/subscope')
self.assertTrue(scope)
# Assert that a simple find or create gives us the scope back.
self.assertTrue(s.OverridePrim('/parent/subscope'))
self.assertEqual(s.OverridePrim('/parent/subscope'), scope.GetPrim())
# Try to make a mesh at subscope's path. This transforms the scope into a
# mesh, since Define() always authors typeName.
mesh = UsdGeom.Mesh.Define(s, '/parent/subscope')
self.assertTrue(mesh)
self.assertTrue(not scope)
# Make a mesh at a different path, should work.
mesh = UsdGeom.Mesh.Define(s, '/parent/mesh')
self.assertTrue(mesh)
def test_BasicMetadataCases(self):
s = Usd.Stage.CreateInMemory()
spherePrim = UsdGeom.Sphere.Define(s, '/sphere').GetPrim()
radius = spherePrim.GetAttribute('radius')
self.assertTrue(radius.HasMetadata('custom'))
self.assertTrue(radius.HasMetadata('typeName'))
self.assertTrue(radius.HasMetadata('variability'))
self.assertTrue(radius.IsDefined())
self.assertTrue(not radius.IsCustom())
self.assertEqual(radius.GetTypeName(), 'double')
allMetadata = radius.GetAllMetadata()
self.assertEqual(allMetadata['typeName'], 'double')
self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying)
self.assertEqual(allMetadata['custom'], False)
# Author a custom property spec.
layer = s.GetRootLayer()
sphereSpec = layer.GetPrimAtPath('/sphere')
radiusSpec = Sdf.AttributeSpec(
sphereSpec, 'radius', Sdf.ValueTypeNames.Double,
variability=Sdf.VariabilityUniform, declaresCustom=True)
self.assertTrue(radiusSpec.custom)
self.assertEqual(radiusSpec.variability, Sdf.VariabilityUniform)
# Definition should win.
self.assertTrue(not radius.IsCustom())
self.assertEqual(radius.GetVariability(), Sdf.VariabilityVarying)
allMetadata = radius.GetAllMetadata()
self.assertEqual(allMetadata['typeName'], 'double')
self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying)
self.assertEqual(allMetadata['custom'], False)
# List fields on 'visibility' attribute -- should include 'allowedTokens',
# provided by the property definition.
visibility = spherePrim.GetAttribute('visibility')
self.assertTrue(visibility.IsDefined())
self.assertTrue('allowedTokens' in visibility.GetAllMetadata())
# Assert that attribute fallback values are returned for builtin attributes.
do = spherePrim.GetAttribute('primvars:displayOpacity')
self.assertTrue(do.IsDefined())
self.assertTrue(do.Get() is None)
def test_Camera(self):
from pxr import Gf
stage = Usd.Stage.CreateInMemory()
camera = UsdGeom.Camera.Define(stage, "/Camera")
self.assertTrue(camera.GetPrim().IsA(UsdGeom.Xformable)) # Camera is Xformable
self.assertEqual(camera.GetProjectionAttr().Get(), 'perspective')
camera.GetProjectionAttr().Set('orthographic')
self.assertEqual(camera.GetProjectionAttr().Get(), 'orthographic')
self.assertTrue(Gf.IsClose(camera.GetHorizontalApertureAttr().Get(),
0.825 * 25.4, 1e-5))
camera.GetHorizontalApertureAttr().Set(3.0)
self.assertEqual(camera.GetHorizontalApertureAttr().Get(), 3.0)
self.assertTrue(Gf.IsClose(camera.GetVerticalApertureAttr().Get(),
0.602 * 25.4, 1e-5))
camera.GetVerticalApertureAttr().Set(2.0)
self.assertEqual(camera.GetVerticalApertureAttr().Get(), 2.0)
self.assertEqual(camera.GetFocalLengthAttr().Get(), 50.0)
camera.GetFocalLengthAttr().Set(35.0)
self.assertTrue(Gf.IsClose(camera.GetFocalLengthAttr().Get(), 35.0, 1e-5))
self.assertEqual(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(1, 1000000))
camera.GetClippingRangeAttr().Set(Gf.Vec2f(5, 10))
self.assertTrue(Gf.IsClose(camera.GetClippingRangeAttr().Get(),
Gf.Vec2f(5, 10), 1e-5))
self.assertEqual(camera.GetClippingPlanesAttr().Get(), Vt.Vec4fArray())
cp = Vt.Vec4fArray([(1, 2, 3, 4), (8, 7, 6, 5)])
camera.GetClippingPlanesAttr().Set(cp)
self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp)
cp = Vt.Vec4fArray()
camera.GetClippingPlanesAttr().Set(cp)
self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp)
self.assertEqual(camera.GetFStopAttr().Get(), 0.0)
camera.GetFStopAttr().Set(2.8)
self.assertTrue(Gf.IsClose(camera.GetFStopAttr().Get(), 2.8, 1e-5))
self.assertEqual(camera.GetFocusDistanceAttr().Get(), 0.0)
camera.GetFocusDistanceAttr().Set(10.0)
self.assertEqual(camera.GetFocusDistanceAttr().Get(), 10.0)
def test_Points(self):
stage = Usd.Stage.CreateInMemory()
# Points Tests
schema = UsdGeom.Points.Define(stage, "/Points")
self.assertTrue(schema)
# Test that id's roundtrip properly, for big numbers, and negative numbers
ids = [8589934592, 1099511627776, 0, -42]
schema.CreateIdsAttr(ids)
resolvedIds = list(schema.GetIdsAttr().Get()) # convert VtArray to list
self.assertEqual(ids, resolvedIds)
def test_Revert_Bug111239(self):
# This used to test a change for Bug111239, but now tests that this
# fix has been reverted. We no longer allow the C++ typename be used as
# a prim's typename.
s = Usd.Stage.CreateInMemory()
sphere = s.DefinePrim('/sphere', typeName='Sphere')
tfTypeName = UsdGeom.Sphere._GetStaticTfType().typeName
self.assertEqual(tfTypeName, 'UsdGeomSphere')
usdGeomSphere = s.DefinePrim('/usdGeomSphere', typeName='tfTypeName')
self.assertTrue(UsdGeom.Sphere(sphere))
self.assertTrue('radius' in [a.GetName() for a in sphere.GetAttributes()])
self.assertFalse(UsdGeom.Sphere(usdGeomSphere))
self.assertFalse('radius' in [a.GetName() for a in usdGeomSphere.GetAttributes()])
def test_ComputeExtent(self):
from pxr import Gf
# Create some simple test cases
allPoints = [
[(1, 1, 0)], # Zero-Volume Extent Test
[(0, 0, 0)], # Simple Width Test
[(-1, -1, -1), (1, 1, 1)], # Multiple Width Test
[(-1, -1, -1), (1, 1, 1)], # Erroneous Widths/Points Test
# Complex Test, Many Points/Widths
[(3, -1, 5), (-1.5, 0, 3), (1, 3, -2), (2, 2, -4)],
]
allWidths = [
[0], # Zero-Volume Extent Test
[2], # Simple Width Test
[2, 4], # Multiple Width Test
[2, 4, 5], # Erroneous Widths/Points Test
[1, 2, 2, 1] # Complex Test, Many Points/Widths
]
pointBasedSolutions = [
[(1, 1, 0), (1, 1, 0)], # Zero-Volume Extent Test
[(0, 0, 0), (0, 0, 0)], # Simple Width Test
[(-1, -1, -1), (1, 1, 1)], # Multiple Width Test
# Erroneous Widths/Points Test -> Ok For Point-Based
[(-1, -1, -1), (1, 1, 1)],
[(-1.5, -1, -4), (3, 3, 5)] # Complex Test, Many Points/Widths
]
pointsSolutions = [
[(1, 1, 0), (1, 1, 0)], # Zero-Volume Extent Test
[(-1, -1, -1), (1, 1, 1)], # Simple Width Test
[(-2, -2, -2), (3, 3, 3)], # Multiple Width Test
# Erroneous Widths/Points Test -> Returns None
None,
[(-2.5, -1.5, -4.5), (3.5, 4, 5.5)] # Complex Test, Many Points/Widths
]
# Perform the correctness tests for PointBased and Points
# Test for empty points prims
emptyPoints = []
extremeExtentArr = UsdGeom.PointBased.ComputeExtent(emptyPoints)
# We need to map the contents of extremeExtentArr to floats from
# num.float32s due to the way Gf.Vec3f is wrapped out
# XXX: This is awful, it'd be nice to not do it
extremeExtentRange = Gf.Range3f(Gf.Vec3f(*map(float, extremeExtentArr[0])),
Gf.Vec3f(*map(float, extremeExtentArr[1])))
self.assertTrue(extremeExtentRange.IsEmpty())
# PointBased Test
numDataSets = len(allPoints)
for i in range(numDataSets):
pointsData = allPoints[i]
expectedExtent = pointBasedSolutions[i]
actualExtent = UsdGeom.PointBased.ComputeExtent(pointsData)
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Points Test
for i in range(numDataSets):
pointsData = allPoints[i]
widthsData = allWidths[i]
expectedExtent = pointsSolutions[i]
actualExtent = UsdGeom.Points.ComputeExtent(pointsData, widthsData)
if actualExtent is not None and expectedExtent is not None:
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Compute extent via generic UsdGeom.Boundable API
s = Usd.Stage.CreateInMemory()
pointsPrim = UsdGeom.Points.Define(s, "/Points")
pointsPrim.CreatePointsAttr(pointsData)
pointsPrim.CreateWidthsAttr(widthsData)
actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(
pointsPrim, Usd.TimeCode.Default())
if actualExtent is not None and expectedExtent is not None:
for a, b in zip(expectedExtent, list(actualExtent)):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Mesh Test
for i in range(numDataSets):
pointsData = allPoints[i]
expectedExtent = pointBasedSolutions[i]
# Compute extent via generic UsdGeom.Boundable API.
# UsdGeom.Mesh does not have its own compute extent function, so
# it should fall back to the extent for PointBased prims.
s = Usd.Stage.CreateInMemory()
meshPrim = UsdGeom.Mesh.Define(s, "/Mesh")
meshPrim.CreatePointsAttr(pointsData)
actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(
meshPrim, Usd.TimeCode.Default())
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Test UsdGeomCurves
curvesPoints = [
[(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve with 1 width
[(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve with 2 widths
[(0,0,0), (1,1,1), (2,1,1), (3,0,0)] # Test Curve with no width
]
curvesWidths = [
[1], # Test Curve with 1 width
[.5, .1], # Test Curve with 2 widths
[] # Test Curve with no width
]
curvesSolutions = [
[(-.5,-.5,-.5), (3.5,1.5,1.5)], # Test Curve with 1 width
[(-.25,-.25,-.25), (3.25,1.25,1.25)], # Test Curve with 2 widths (MAX)
[(0,0,0), (3,1,1)], # Test Curve with no width
]
# Perform the actual v. expected comparison
numDataSets = len(curvesPoints)
for i in range(numDataSets):
pointsData = curvesPoints[i]
widths = curvesWidths[i]
expectedExtent = curvesSolutions[i]
actualExtent = UsdGeom.Curves.ComputeExtent(pointsData, widths)
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Compute extent via generic UsdGeom.Boundable API
s = Usd.Stage.CreateInMemory()
nurbsCurvesPrim = UsdGeom.NurbsCurves.Define(s, "/NurbsCurves")
nurbsCurvesPrim.CreatePointsAttr(pointsData)
nurbsCurvesPrim.CreateWidthsAttr(widths)
actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(
nurbsCurvesPrim, Usd.TimeCode.Default())
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
basisCurvesPrim = UsdGeom.BasisCurves.Define(s, "/BasisCurves")
basisCurvesPrim.CreatePointsAttr(pointsData)
basisCurvesPrim.CreateWidthsAttr(widths)
actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(
basisCurvesPrim, Usd.TimeCode.Default())
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
def test_TypeUsage(self):
# Perform Type-Ness Checking for ComputeExtent
pointsAsList = [(0, 0, 0), (1, 1, 1), (2, 2, 2)]
pointsAsVec3fArr = Vt.Vec3fArray(pointsAsList)
comp = UsdGeom.PointBased.ComputeExtent
expectedExtent = comp(pointsAsVec3fArr)
actualExtent = comp(pointsAsList)
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
def test_Bug116593(self):
from pxr import Gf
s = Usd.Stage.CreateInMemory()
prim = s.DefinePrim('/sphere', typeName='Sphere')
# set with list of tuples
vec = [(1,2,2),(12,3,3)]
self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec))
self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2))
self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(12,3,3))
# set with Gf vecs
vec = [Gf.Vec3f(1,2,2), Gf.Vec3f(1,1,1)]
self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec))
self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2))
self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(1,1,1))
def test_Typed(self):
from pxr import Tf
xform = Tf.Type.FindByName("UsdGeomXform")
imageable = Tf.Type.FindByName("UsdGeomImageable")
geomModelAPI = Tf.Type.FindByName("UsdGeomModelAPI")
self.assertTrue(Usd.SchemaRegistry.IsTyped(xform))
self.assertTrue(Usd.SchemaRegistry.IsTyped(imageable))
self.assertFalse(Usd.SchemaRegistry.IsTyped(geomModelAPI))
def test_Concrete(self):
from pxr import Tf
xform = Tf.Type.FindByName("UsdGeomXform")
imageable = Tf.Type.FindByName("UsdGeomImageable")
geomModelAPI = Tf.Type.FindByName("UsdGeomModelAPI")
self.assertTrue(Usd.SchemaRegistry().IsConcrete(xform))
self.assertFalse(Usd.SchemaRegistry().IsConcrete(imageable))
self.assertFalse(Usd.SchemaRegistry().IsConcrete(geomModelAPI))
def test_Apply(self):
s = Usd.Stage.CreateInMemory('AppliedSchemas.usd')
root = s.DefinePrim('/hello')
self.assertEqual([], root.GetAppliedSchemas())
# Check duplicates
UsdGeom.MotionAPI.Apply(root)
self.assertEqual(['MotionAPI'], root.GetAppliedSchemas())
UsdGeom.MotionAPI.Apply(root)
self.assertEqual(['MotionAPI'], root.GetAppliedSchemas())
# Ensure duplicates aren't picked up
UsdGeom.ModelAPI.Apply(root)
self.assertEqual(['MotionAPI', 'GeomModelAPI'], root.GetAppliedSchemas())
# Verify that we get exceptions but don't crash when applying to the
# null prim.
with self.assertRaises(Tf.ErrorException):
self.assertFalse(UsdGeom.MotionAPI.Apply(Usd.Prim()))
with self.assertRaises(Tf.ErrorException):
self.assertFalse(UsdGeom.ModelAPI.Apply(Usd.Prim()))
def test_IsATypeless(self):
from pxr import Usd, Tf
s = Usd.Stage.CreateInMemory()
spherePrim = s.DefinePrim('/sphere', typeName='Sphere')
typelessPrim = s.DefinePrim('/regular')
types = [Tf.Type.FindByName('UsdGeomSphere'),
Tf.Type.FindByName('UsdGeomGprim'),
Tf.Type.FindByName('UsdGeomBoundable'),
Tf.Type.FindByName('UsdGeomXformable'),
Tf.Type.FindByName('UsdGeomImageable'),
Tf.Type.FindByName('UsdTyped')]
# Our sphere prim should return true on IsA queries for Sphere
# and everything it inherits from. Our plain prim should return false
# for all of them.
for t in types:
self.assertTrue(spherePrim.IsA(t))
self.assertFalse(typelessPrim.IsA(t))
def test_HasAPI(self):
from pxr import Usd, Tf
s = Usd.Stage.CreateInMemory()
prim = s.DefinePrim('/prim')
types = [Tf.Type.FindByName('UsdGeomMotionAPI'),
Tf.Type.FindByName('UsdGeomModelAPI')]
# Check that no APIs have yet been applied
for t in types:
self.assertFalse(prim.HasAPI(t))
# Apply our schemas to this prim
UsdGeom.ModelAPI.Apply(prim)
UsdGeom.MotionAPI.Apply(prim)
# Check that all our applied schemas show up
for t in types:
self.assertTrue(prim.HasAPI(t))
# Check that we get an exception for unknown and non-API types
with self.assertRaises(Tf.ErrorException):
prim.HasAPI(Tf.Type.Unknown)
with self.assertRaises(Tf.ErrorException):
prim.HasAPI(Tf.Type.FindByName('UsdGeomXform'))
with self.assertRaises(Tf.ErrorException):
prim.HasAPI(Tf.Type.FindByName('UsdGeomImageable'))
with self.assertRaises(Tf.ErrorException):
# Test with a non-applied API schema.
prim.HasAPI(Tf.Type.FindByName('UsdModelAPI'))
if __name__ == "__main__":
unittest.main()
| 42.69326 | 100 | 0.63055 | 29,800 | 0.960113 | 0 | 0 | 0 | 0 | 0 | 0 | 7,756 | 0.249887 |
168fdf67ec71ebdf125bbe9b6f5c14dad854391f | 1,310 | py | Python | round_robin_generator/matchup_times.py | avadavat/round_robin_generator | 242d522386f6af26db029232fcffb51004ff4c59 | [
"MIT"
] | null | null | null | round_robin_generator/matchup_times.py | avadavat/round_robin_generator | 242d522386f6af26db029232fcffb51004ff4c59 | [
"MIT"
] | 5 | 2020-04-26T19:44:41.000Z | 2020-05-01T16:26:06.000Z | round_robin_generator/matchup_times.py | avadavat/round_robin_generator | 242d522386f6af26db029232fcffb51004ff4c59 | [
"MIT"
] | null | null | null | import pandas as pd
from datetime import timedelta
def generate_times(matchup_df: pd.DataFrame, tournament_start_time, game_duration, game_stagger):
time_df = pd.DataFrame(index=matchup_df.index, columns=matchup_df.columns)
if game_stagger == 0:
for round_num in range(time_df.shape[0]):
round_key = 'Round ' + str(round_num + 1)
match_time = tournament_start_time + timedelta(minutes=(game_duration * round_num))
time_df.loc[round_key, :] = match_time.strftime('%I:%M%p')
return time_df
else:
"""
# Given the algorithm, at worst every player can play every (game duration + stagger time)
# This is b/c your opponent begins play one stagger count after you at the latest.
"""
for round_num in range(time_df.shape[0]):
round_key = 'Round ' + str(round_num + 1)
default_spread = [tournament_start_time + timedelta(minutes=game_num * game_stagger) for game_num in
range(time_df.shape[1])]
match_times = [
(def_time + timedelta(minutes=((game_duration + game_stagger) * round_num))).strftime('%I:%M%p') for
def_time in default_spread]
time_df.loc[round_key, :] = match_times
return time_df
| 48.518519 | 116 | 0.636641 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 239 | 0.182443 |
16904816a9bda6205128c0d91b67e3ab2be3d489 | 3,943 | py | Python | src/commands/locate_item.py | seisatsu/DennisMUD-ESP32 | b63d4b914c5e8d0f9714042997c64919b20be842 | [
"MIT"
] | 19 | 2018-10-02T03:58:46.000Z | 2021-04-09T13:09:23.000Z | commands/locate_item.py | seisatsu/Dennis | 8f1892f21beba6b21b4f7b9ba3062296bb1dc4b9 | [
"MIT"
] | 100 | 2018-09-22T22:54:35.000Z | 2021-04-16T17:46:34.000Z | src/commands/locate_item.py | seisatsu/DennisMUD-ESP32 | b63d4b914c5e8d0f9714042997c64919b20be842 | [
"MIT"
] | 1 | 2022-01-03T02:21:56.000Z | 2022-01-03T02:21:56.000Z | #######################
# Dennis MUD #
# locate_item.py #
# Copyright 2018-2020 #
# Michael D. Reiley #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
NAME = "locate item"
CATEGORIES = ["items"]
ALIASES = ["find item"]
USAGE = "locate item <item_id>"
DESCRIPTION = """Find out what room the item <item_id> is in, or who is holding it.
You can only locate an item that you own.
Wizards can locate any item.
Ex. `locate item 4`"""
def COMMAND(console, args):
# Perform initial checks.
if not COMMON.check(NAME, console, args, argc=1):
return False
# Perform argument type checks and casts.
itemid = COMMON.check_argtypes(NAME, console, args, checks=[[0, int]], retargs=0)
if itemid is None:
return False
# Check if the item exists.
thisitem = COMMON.check_item(NAME, console, itemid, owner=True, holding=False)
if not thisitem:
return False
# Keep track of whether we found anything in case the item is duplified and we can't return right away.
found_something = False
# Check if we are holding the item.
if itemid in console.user["inventory"]:
console.msg("{0}: {1} ({2}) is in your inventory.".format(NAME, thisitem["name"], thisitem["id"]))
# If the item is duplified we need to keep looking for other copies.
if not thisitem["duplified"]:
return True
found_something = True
# Check if someone else is holding the item.
for targetuser in console.database.users.all():
if targetuser["name"] == console.user["name"]:
continue
if itemid in targetuser["inventory"]:
console.msg("{0}: {1} ({2}) is in the inventory of: {3}.".format(NAME, thisitem["name"], thisitem["id"],
targetuser["name"]))
# If the item is duplified we need to keep looking for other copies.
if not thisitem["duplified"]:
return True
found_something = True
# Check if the item is in a room.
for targetroom in console.database.rooms.all():
if itemid in targetroom["items"]:
console.msg("{0}: {1} ({2}) is in room: {3} ({4})".format(NAME, thisitem["name"], thisitem["id"],
targetroom["name"], targetroom["id"]))
# If the item is duplified we need to keep looking for other copies.
if not thisitem["duplified"]:
return True
found_something = True
# Couldn't find the item.
if not found_something:
console.log.error("Item exists but has no location: {item}", item=itemid)
console.msg("{0}: ERROR: Item exists but has no location. Use `requisition` to fix this.".format(NAME))
return False
# Finished.
return True
| 41.072917 | 116 | 0.633274 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,326 | 0.589906 |
16904f40b9743948ab5dc6a0d2f55015295bc2fd | 2,787 | py | Python | modelling/scsb/models/monthly-comparisons.py | bcgov-c/wally | 264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06 | [
"Apache-2.0"
] | null | null | null | modelling/scsb/models/monthly-comparisons.py | bcgov-c/wally | 264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06 | [
"Apache-2.0"
] | null | null | null | modelling/scsb/models/monthly-comparisons.py | bcgov-c/wally | 264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06 | [
"Apache-2.0"
] | null | null | null | import json
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBRegressor
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error as MSE, r2_score
import math
# with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r') as f:
# data = json.load(f)
all_zones_df = pd.read_csv("../data/scsb_all_zones.csv")
zone_25_df = pd.read_csv("../data/scsb_zone_25.csv")
zone_26_df = pd.read_csv("../data/scsb_zone_26.csv")
zone_27_df = pd.read_csv("../data/scsb_zone_27.csv")
month_dependant_variables = ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist']
month_labels = [x[0:3] for x in month_dependant_variables]
data = zone_26_df
xgb_results = []
rfr_results = []
dtr_results = []
# calculate monthly estimations for 3 models
for dependant_month in month_dependant_variables:
features_df = data[['median_elevation', 'glacial_coverage', 'annual_precipitation', 'potential_evapo_transpiration', dependant_month]]
X = features_df.drop([dependant_month], axis=1)
y = features_df.get(dependant_month)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)
xgb = XGBRegressor(random_state=42)
xgb.fit(X_train, y_train)
xgb_results.append(xgb.predict(X))
rfr = RandomForestRegressor(random_state=42)
rfr.fit(X_train, y_train)
rfr_results.append(rfr.predict(X))
dtr = DecisionTreeRegressor(random_state=42)
dtr.fit(X_train, y_train)
dtr_results.append(dtr.predict(X))
# compare the outputs of scsb against the 3 models
for row_target_index in range(20):
xgb_row = []
rfr_row = []
dtr_row = []
for month in range(12):
xgb_row.append(xgb_results[month][row_target_index])
rfr_row.append(rfr_results[month][row_target_index])
dtr_row.append(dtr_results[month][row_target_index])
plt.plot(data[month_dependant_variables].iloc[row_target_index], '-', label='scsb', color='blue', alpha=0.5)
plt.plot(xgb_row, '-', label='xgboost', color='red', alpha=0.5)
plt.plot(rfr_row, '-', label='randomforest', color='green', alpha=0.5)
plt.plot(dtr_row, '-', label='decisiontree', color='purple', alpha=0.5)
plt.legend(loc='best')
plt.xticks(month_dependant_variables, month_labels)
plt.xlabel('Month')
plt.ylabel('Monthly Distribution')
name = data['name'].iloc[row_target_index]
plt.title(name)
plt.savefig('../plots/{}.png'.format(name))
plt.show()
| 38.708333 | 161 | 0.734482 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 675 | 0.242196 |
1690da2be65319bb6696ac8f2ce11540524171c2 | 14,922 | py | Python | src/week2-mlflow/AutoML/XGBoost-fake-news-automl.py | xzhnshng/databricks-zero-to-mlops | f1691c6f6137ad8b938e64cea4700c7011efb800 | [
"CC0-1.0"
] | null | null | null | src/week2-mlflow/AutoML/XGBoost-fake-news-automl.py | xzhnshng/databricks-zero-to-mlops | f1691c6f6137ad8b938e64cea4700c7011efb800 | [
"CC0-1.0"
] | null | null | null | src/week2-mlflow/AutoML/XGBoost-fake-news-automl.py | xzhnshng/databricks-zero-to-mlops | f1691c6f6137ad8b938e64cea4700c7011efb800 | [
"CC0-1.0"
] | null | null | null | # Databricks notebook source
# MAGIC %md
# MAGIC # XGBoost training
# MAGIC This is an auto-generated notebook. To reproduce these results, attach this notebook to the **10-3-ML-Cluster** cluster and rerun it.
# MAGIC - Compare trials in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false)
# MAGIC - Navigate to the parent notebook [here](#notebook/406583024052798) (If you launched the AutoML experiment using the Experiments UI, this link isn't very useful.)
# MAGIC - Clone this notebook into your project folder by selecting **File > Clone** in the notebook toolbar.
# MAGIC
# MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_
# COMMAND ----------
import mlflow
import databricks.automl_runtime
# Use MLflow to track experiments
mlflow.set_experiment("/Users/[email protected]/databricks_automl/label_news_articles_csv-2022_03_12-15_38")
target_col = "label"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Load Data
# COMMAND ----------
from mlflow.tracking import MlflowClient
import os
import uuid
import shutil
import pandas as pd
# Create temp directory to download input data from MLflow
input_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], "tmp", str(uuid.uuid4())[:8])
os.makedirs(input_temp_dir)
# Download the artifact and read it into a pandas DataFrame
input_client = MlflowClient()
input_data_path = input_client.download_artifacts("c2dfe80b419d4a8dbc88a90e3274369a", "data", input_temp_dir)
df_loaded = pd.read_parquet(os.path.join(input_data_path, "training_data"))
# Delete the temp data
shutil.rmtree(input_temp_dir)
# Preview data
df_loaded.head(5)
# COMMAND ----------
df_loaded.head(1).to_dict()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Select supported columns
# MAGIC Select only the columns that are supported. This allows us to train a model that can predict on a dataset that has extra columns that are not used in training.
# MAGIC `[]` are dropped in the pipelines. See the Alerts tab of the AutoML Experiment page for details on why these columns are dropped.
# COMMAND ----------
from databricks.automl_runtime.sklearn.column_selector import ColumnSelector
supported_cols = ["text_without_stopwords", "published", "language", "main_img_url", "site_url", "hasImage", "title_without_stopwords", "text", "title", "type", "author"]
col_selector = ColumnSelector(supported_cols)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Preprocessors
# COMMAND ----------
transformers = []
# COMMAND ----------
# MAGIC %md
# MAGIC ### Categorical columns
# COMMAND ----------
# MAGIC %md
# MAGIC #### Low-cardinality categoricals
# MAGIC Convert each low-cardinality categorical column into multiple binary columns through one-hot encoding.
# MAGIC For each input categorical column (string or numeric), the number of output columns is equal to the number of unique values in the input column.
# COMMAND ----------
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
one_hot_encoder = OneHotEncoder(handle_unknown="ignore")
transformers.append(("onehot", one_hot_encoder, ["published", "language", "site_url", "hasImage", "title", "title_without_stopwords", "text_without_stopwords"]))
# COMMAND ----------
# MAGIC %md
# MAGIC #### Medium-cardinality categoricals
# MAGIC Convert each medium-cardinality categorical column into a numerical representation.
# MAGIC Each string column is hashed to 1024 float columns.
# MAGIC Each numeric column is imputed with zeros.
# COMMAND ----------
from sklearn.feature_extraction import FeatureHasher
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
for feature in ["text", "main_img_url"]:
hash_transformer = Pipeline(steps=[
("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")),
(f"{feature}_hasher", FeatureHasher(n_features=1024, input_type="string"))])
transformers.append((f"{feature}_hasher", hash_transformer, [feature]))
# COMMAND ----------
# MAGIC %md
# MAGIC ### Text features
# MAGIC Convert each feature to a fixed-length vector using TF-IDF vectorization. The length of the output
# MAGIC vector is equal to 1024. Each column corresponds to one of the top word n-grams
# MAGIC where n is in the range [1, 2].
# COMMAND ----------
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
for col in {'type', 'author'}:
vectorizer = Pipeline(steps=[
("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")),
# Reshape to 1D since SimpleImputer changes the shape of the input to 2D
("reshape", FunctionTransformer(np.reshape, kw_args={"newshape":-1})),
("tfidf", TfidfVectorizer(decode_error="ignore", ngram_range = (1, 2), max_features=1024))])
transformers.append((f"text_{col}", vectorizer, [col]))
# COMMAND ----------
from sklearn.compose import ColumnTransformer
preprocessor = ColumnTransformer(transformers, remainder="passthrough", sparse_threshold=0)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Feature standardization
# MAGIC Scale all feature columns to be centered around zero with unit variance.
# COMMAND ----------
from sklearn.preprocessing import StandardScaler
standardizer = StandardScaler()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Train - Validation - Test Split
# MAGIC Split the input data into 3 sets:
# MAGIC - Train (60% of the dataset used to train the model)
# MAGIC - Validation (20% of the dataset used to tune the hyperparameters of the model)
# MAGIC - Test (20% of the dataset used to report the true performance of the model on an unseen dataset)
# COMMAND ----------
df_loaded.columns
# COMMAND ----------
from sklearn.model_selection import train_test_split
split_X = df_loaded.drop([target_col], axis=1)
split_y = df_loaded[target_col]
# Split out train data
X_train, split_X_rem, y_train, split_y_rem = train_test_split(split_X, split_y, train_size=0.6, random_state=799811440, stratify=split_y)
# Split remaining data equally for validation and test
X_val, X_test, y_val, y_test = train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Train classification model
# MAGIC - Log relevant metrics to MLflow to track runs
# MAGIC - All the runs are logged under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false)
# MAGIC - Change the model parameters and re-run the training cell to log a different trial to the MLflow experiment
# MAGIC - To view the full list of tunable hyperparameters, check the output of the cell below
# COMMAND ----------
from xgboost import XGBClassifier
help(XGBClassifier)
# COMMAND ----------
import mlflow
import sklearn
from sklearn import set_config
from sklearn.pipeline import Pipeline
set_config(display="diagram")
xgbc_classifier = XGBClassifier(
colsample_bytree=0.7324555878929649,
learning_rate=0.007636627530856404,
max_depth=7,
min_child_weight=6,
n_estimators=106,
n_jobs=100,
subsample=0.6972187716458148,
verbosity=0,
random_state=799811440,
)
model = Pipeline([
("column_selector", col_selector),
("preprocessor", preprocessor),
("standardizer", standardizer),
("classifier", xgbc_classifier),
])
# Create a separate pipeline to transform the validation dataset. This is used for early stopping.
pipeline = Pipeline([
("column_selector", col_selector),
("preprocessor", preprocessor),
("standardizer", standardizer),
])
mlflow.sklearn.autolog(disable=True)
X_val_processed = pipeline.fit_transform(X_val, y_val)
model
# COMMAND ----------
# Enable automatic logging of input samples, metrics, parameters, and models
mlflow.sklearn.autolog(log_input_examples=True, silent=True)
with mlflow.start_run(run_name="xgboost") as mlflow_run:
model.fit(X_train, y_train, classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False)
# Training metrics are logged by MLflow autologging
# Log metrics for the validation set
xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix="val_")
# Log metrics for the test set
xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix="test_")
# Display the logged metrics
xgbc_val_metrics = {k.replace("val_", ""): v for k, v in xgbc_val_metrics.items()}
xgbc_test_metrics = {k.replace("test_", ""): v for k, v in xgbc_test_metrics.items()}
display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics], index=["validation", "test"]))
# COMMAND ----------
# Patch requisite packages to the model environment YAML for model serving
import os
import shutil
import uuid
import yaml
None
import xgboost
from mlflow.tracking import MlflowClient
xgbc_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], str(uuid.uuid4())[:8])
os.makedirs(xgbc_temp_dir)
xgbc_client = MlflowClient()
xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, "model/conda.yaml", xgbc_temp_dir)
xgbc_model_env_str = open(xgbc_model_env_path)
xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader)
xgbc_parsed_model_env_str["dependencies"][-1]["pip"].append(f"xgboost=={xgboost.__version__}")
with open(xgbc_model_env_path, "w") as f:
f.write(yaml.dump(xgbc_parsed_model_env_str))
xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path="model")
shutil.rmtree(xgbc_temp_dir)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Feature importance
# MAGIC
# MAGIC SHAP is a game-theoretic approach to explain machine learning models, providing a summary plot
# MAGIC of the relationship between features and model output. Features are ranked in descending order of
# MAGIC importance, and impact/color describe the correlation between the feature and the target variable.
# MAGIC - Generating SHAP feature importance is a very memory intensive operation, so to ensure that AutoML can run trials without
# MAGIC running out of memory, we disable SHAP by default.<br />
# MAGIC You can set the flag defined below to `shap_enabled = True` and re-run this notebook to see the SHAP plots.
# MAGIC - To reduce the computational overhead of each trial, a single example is sampled from the validation set to explain.<br />
# MAGIC For more thorough results, increase the sample size of explanations, or provide your own examples to explain.
# MAGIC - SHAP cannot explain models using data with nulls; if your dataset has any, both the background data and
# MAGIC examples to explain will be imputed using the mode (most frequent values). This affects the computed
# MAGIC SHAP values, as the imputed samples may not match the actual data distribution.
# MAGIC
# MAGIC For more information on how to read Shapley values, see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html).
# COMMAND ----------
# Set this flag to True and re-run the notebook to see the SHAP plots
shap_enabled = True
# COMMAND ----------
if shap_enabled:
from shap import KernelExplainer, summary_plot
# SHAP cannot explain models using data with nulls.
# To enable SHAP to succeed, both the background data and examples to explain are imputed with the mode (most frequent values).
mode = X_train.mode().iloc[0]
# Sample background data for SHAP Explainer. Increase the sample size to reduce variance.
train_sample = X_train.sample(n=min(100, len(X_train.index))).fillna(mode)
# Sample a single example from the validation set to explain. Increase the sample size and rerun for more thorough results.
example = X_val.sample(n=1).fillna(mode)
# Use Kernel SHAP to explain feature importance on the example from the validation set.
predict = lambda x: model.predict_proba(pd.DataFrame(x, columns=X_train.columns))
explainer = KernelExplainer(predict, train_sample, link="logit")
shap_values = explainer.shap_values(example, l1_reg=False)
summary_plot(shap_values, example, class_names=model.classes_)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Inference
# MAGIC [The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative hub where teams can share ML models, work together from experimentation to online testing and production, integrate with approval and governance workflows, and monitor ML deployments and their performance. The snippets below show how to add the model trained in this notebook to the model registry and to retrieve it later for inference.
# MAGIC
# MAGIC > **NOTE:** The `model_uri` for the model already trained in this notebook can be found in the cell below
# MAGIC
# MAGIC ### Register to Model Registry
# MAGIC ```
# MAGIC model_name = "Example"
# MAGIC
# MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model"
# MAGIC registered_model_version = mlflow.register_model(model_uri, model_name)
# MAGIC ```
# MAGIC
# MAGIC ### Load from Model Registry
# MAGIC ```
# MAGIC model_name = "Example"
# MAGIC model_version = registered_model_version.version
# MAGIC
# MAGIC model = mlflow.pyfunc.load_model(model_uri=f"models:/{model_name}/{model_version}")
# MAGIC model.predict(input_X)
# MAGIC ```
# MAGIC
# MAGIC ### Load model without registering
# MAGIC ```
# MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model"
# MAGIC
# MAGIC model = mlflow.pyfunc.load_model(model_uri)
# MAGIC model.predict(input_X)
# MAGIC ```
# COMMAND ----------
# model_uri for the generated model
print(f"runs:/{ mlflow_run.info.run_id }/model")
# COMMAND ----------
# MAGIC %md
# MAGIC ### Loading model to make prediction
# COMMAND ----------
model_uri = f"runs:/51c0348482e042ea8e4b7983ab6bff99/model"
model = mlflow.pyfunc.load_model(model_uri)
#model.predict(input_X)
# COMMAND ----------
import pandas as pd
data = {'author': {0: 'bigjim.com'},
'published': {0: '2016-10-27T18:05:26.351+03:00'},
'title': {0: 'aliens are coming to invade earth'},
'text': {0: 'aliens are coming to invade earth'},
'language': {0: 'english'},
'site_url': {0: 'cnn.com'},
'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'},
'type': {0: 'bs'},
'title_without_stopwords': {0: 'aliens are coming to invade earth'},
'text_without_stopwords': {0: 'aliens are coming to invade earth'},
'hasImage': {0: 1.0}}
df = pd.DataFrame(data=data)
df.head()
# COMMAND ----------
model.predict(df)
# COMMAND ----------
| 36.753695 | 461 | 0.743399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,278 | 0.621767 |
1694a3aec6658351c14a81b2e91e92955b6cb8a7 | 341 | py | Python | lucky_guess/__init__.py | mfinzi/lucky-guess-chemist | 01898b733dc7d026f70d0cb6337309cb600502fb | [
"MIT"
] | null | null | null | lucky_guess/__init__.py | mfinzi/lucky-guess-chemist | 01898b733dc7d026f70d0cb6337309cb600502fb | [
"MIT"
] | null | null | null | lucky_guess/__init__.py | mfinzi/lucky-guess-chemist | 01898b733dc7d026f70d0cb6337309cb600502fb | [
"MIT"
] | null | null | null |
import importlib
import pkgutil
__all__ = []
for loader, module_name, is_pkg in pkgutil.walk_packages(__path__):
module = importlib.import_module('.'+module_name,package=__name__)
try:
globals().update({k: getattr(module, k) for k in module.__all__})
__all__ += module.__all__
except AttributeError: continue | 34.1 | 73 | 0.71261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.008798 |
1695439f6b89942d55b135dae20f140a0772199c | 3,727 | py | Python | shuffling_algorithm.py | BaptisteLafoux/aztec_tiling | 413acd8751b8178942e91fbee32987f02bc5c695 | [
"MIT"
] | null | null | null | shuffling_algorithm.py | BaptisteLafoux/aztec_tiling | 413acd8751b8178942e91fbee32987f02bc5c695 | [
"MIT"
] | null | null | null | shuffling_algorithm.py | BaptisteLafoux/aztec_tiling | 413acd8751b8178942e91fbee32987f02bc5c695 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 30 22:04:48 2020
@author: baptistelafoux
"""
import domino
import numpy as np
import numpy.lib.arraysetops as aso
def spawn_block(x, y):
if np.random.rand() > 0.5:
d1 = domino.domino(np.array([x, y]), np.array([x + 1, y]), np.array([0,-1]))
d2 = domino.domino(np.array([x, y + 1]), np.array([x + 1, y + 1]), np.array([0, 1]))
else:
d1 = domino.domino(np.array([x, y]), np.array([x, y + 1]), np.array([-1,0]))
d2 = domino.domino(np.array([x + 1, y]), np.array([x + 1, y + 1]), np.array([ 1,0]))
return [d1, d2]
def aztec_grid(order, only_new_blocks = True):
grid_X, grid_Y = np.meshgrid(np.arange(2 * order) - (2 * order - 1)/2 , np.arange(2 * order) - (2 * order - 1)/2)
center_pts = np.array([grid_X.flatten(), grid_Y.flatten()]).T
center_pts = center_pts[np.lexsort((center_pts[:,1], center_pts[:,0]))]
X = center_pts[:,0]
Y = center_pts[:,1]
if only_new_blocks: idx = (np.abs(X) + np.abs(Y) <= order) & (np.abs(X) + np.abs(Y) > order - 1)
else: idx = np.abs(X) + np.abs(Y) <= order
return X[idx], Y[idx]
def add_to_grid(tiles, grid):
for tile in tiles:
grid[tile.pt1[0], tile.pt1[1]] = tile
grid[tile.pt2[0], tile.pt2[1]] = tile
return grid
def generate_good_block(grid):
center_pts = np.array([*grid])
center_pts = center_pts[np.lexsort((center_pts[:, 1], center_pts[:, 0]))]
X = center_pts[:, 0]
Y = center_pts[:, 1]
for (x,y) in zip(X,Y):
try:
if ~grid[x, y]:
idx = [(x,y), (x+1,y), (x,y+1), (x+1,y+1)]
try:
should_create_a_block = ~np.sum(np.array(list(map(grid.get, idx))), dtype = bool)
if should_create_a_block:
grid = add_to_grid(spawn_block(x, y), grid)
except: pass
except: pass
return grid
def enlarge_grid_deprec(grid, order):
center_pts = [*grid]
X_aztec, Y_aztec = aztec_grid(order)
center_pts_aztec = [tuple([x,y]) for (x,y) in zip(X_aztec, Y_aztec)]
diff_array = set(center_pts_aztec) - set(center_pts)
if order > 1:
for x, y in list(diff_array):
grid[x, y] = False
else:
for (x,y) in zip(X_aztec, Y_aztec):
grid[x, y] = False
return grid
def enlarge_grid(grid, order):
X_aztec, Y_aztec = aztec_grid(order, True)
for (x,y) in zip(X_aztec, Y_aztec):
grid[x, y] = False
return grid
def move_tiles(grid, curr_order):
temp_grid = {}
for coord in grid:
if grid[coord] != False:
x1, y1 = grid[coord].pt1
x2, y2 = grid[coord].pt2
grid[coord].move()
temp_grid = add_to_grid([grid[coord]], temp_grid)
grid[x1, y1] = False
grid[x2, y2] = False
for coord in temp_grid:
grid[coord] = temp_grid[coord]
return grid
def destroy_bad_blocks(grid):
center_pts = np.array([*grid])
X = center_pts[:, 0]
Y = center_pts[:, 1]
for (x,y) in zip(X,Y):
try:
next_x, next_y = np.array([x, y]) + grid[x, y].v
if (grid[next_x, next_y] != False):
if all(grid[next_x, next_y].v == - grid[x, y].v):
grid[x, y ] = False
grid[next_x, next_y] = False
except: pass
return grid
| 26.81295 | 117 | 0.499866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.030319 |
1696d983057a2d937827a5a96f9b5500cb3c490c | 478 | py | Python | scripts/matrix_operations.py | h3ct0r/gas_mapping_example | 57bd8333b4832281fbb89019df440374e2b50b9b | [
"Unlicense"
] | 1 | 2022-02-28T21:55:23.000Z | 2022-02-28T21:55:23.000Z | scripts/matrix_operations.py | ArghyaChatterjee/gas_mapping_kerneldm | 57bd8333b4832281fbb89019df440374e2b50b9b | [
"Unlicense"
] | null | null | null | scripts/matrix_operations.py | ArghyaChatterjee/gas_mapping_kerneldm | 57bd8333b4832281fbb89019df440374e2b50b9b | [
"Unlicense"
] | 2 | 2021-12-14T05:15:18.000Z | 2022-02-28T21:55:10.000Z | import numpy as np
def get_position_of_minimum(matrix):
return np.unravel_index(np.nanargmin(matrix), matrix.shape)
def get_position_of_maximum(matrix):
return np.unravel_index(np.nanargmax(matrix), matrix.shape)
def get_distance_matrix(cell_grid_x, cell_grid_y, x, y):
return np.sqrt((x - cell_grid_x) ** 2 + (y - cell_grid_y) ** 2)
def get_distance_matrix_squared(cell_grid_x, cell_grid_y, x, y):
return (x - cell_grid_x) ** 2 + (y - cell_grid_y) ** 2
| 26.555556 | 67 | 0.725941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
16988b5f9f77ebb40b1eb37bef67f48bd826786d | 121 | py | Python | ShanghaiPower/build_up.py | biljiang/pyprojects | 10095c6b8f2f32831e8a36e122d1799f135dc5df | [
"MIT"
] | null | null | null | ShanghaiPower/build_up.py | biljiang/pyprojects | 10095c6b8f2f32831e8a36e122d1799f135dc5df | [
"MIT"
] | null | null | null | ShanghaiPower/build_up.py | biljiang/pyprojects | 10095c6b8f2f32831e8a36e122d1799f135dc5df | [
"MIT"
] | null | null | null | from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize(["license_chk.py"]))
| 20.166667 | 50 | 0.793388 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.132231 |
169a6a92aa8a5f8b13f2ca7a2bc5a3d4390e96a9 | 6,363 | py | Python | quantum/plugins/nicira/extensions/nvp_qos.py | yamt/neutron | f94126739a48993efaf1d1439dcd3dadb0c69742 | [
"Apache-2.0"
] | null | null | null | quantum/plugins/nicira/extensions/nvp_qos.py | yamt/neutron | f94126739a48993efaf1d1439dcd3dadb0c69742 | [
"Apache-2.0"
] | null | null | null | quantum/plugins/nicira/extensions/nvp_qos.py | yamt/neutron | f94126739a48993efaf1d1439dcd3dadb0c69742 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Aaron Rosen, Nicira Networks, Inc.
from abc import abstractmethod
from quantum.api import extensions
from quantum.api.v2 import attributes as attr
from quantum.api.v2 import base
from quantum.common import exceptions as qexception
from quantum import manager
# For policy.json/Auth
qos_queue_create = "create_qos_queue"
qos_queue_delete = "delete_qos_queue"
qos_queue_get = "get_qos_queue"
qos_queue_list = "get_qos_queues"
class DefaultQueueCreateNotAdmin(qexception.InUse):
message = _("Need to be admin in order to create queue called default")
class DefaultQueueAlreadyExists(qexception.InUse):
message = _("Default queue already exists.")
class QueueInvalidDscp(qexception.InvalidInput):
message = _("Invalid value for dscp %(data)s must be integer.")
class QueueMinGreaterMax(qexception.InvalidInput):
message = _("Invalid bandwidth rate, min greater than max.")
class QueueInvalidBandwidth(qexception.InvalidInput):
message = _("Invalid bandwidth rate, %(data)s must be a non negative"
" integer.")
class MissingDSCPForTrusted(qexception.InvalidInput):
message = _("No DSCP field needed when QoS workload marked trusted")
class QueueNotFound(qexception.NotFound):
message = _("Queue %(id)s does not exist")
class QueueInUseByPort(qexception.InUse):
message = _("Unable to delete queue attached to port.")
class QueuePortBindingNotFound(qexception.NotFound):
message = _("Port is not associated with lqueue")
def convert_to_unsigned_int_or_none(val):
if val is None:
return
try:
val = int(val)
if val < 0:
raise ValueError
except (ValueError, TypeError):
msg = _("'%s' must be a non negative integer.") % val
raise qexception.InvalidInput(error_message=msg)
return val
# Attribute Map
RESOURCE_ATTRIBUTE_MAP = {
'qos_queues': {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'default': {'allow_post': True, 'allow_put': False,
'convert_to': attr.convert_to_boolean,
'is_visible': True, 'default': False},
'name': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'min': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': '0',
'convert_to': convert_to_unsigned_int_or_none},
'max': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': None,
'convert_to': convert_to_unsigned_int_or_none},
'qos_marking': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['untrusted', 'trusted']},
'default': 'untrusted', 'is_visible': True},
'dscp': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': '0',
'convert_to': convert_to_unsigned_int_or_none},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None},
'is_visible': True},
},
}
QUEUE = 'queue_id'
RXTX_FACTOR = 'rxtx_factor'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
RXTX_FACTOR: {'allow_post': True,
'allow_put': False,
'is_visible': False,
'default': 1,
'convert_to': convert_to_unsigned_int_or_none},
QUEUE: {'allow_post': False,
'allow_put': False,
'is_visible': True,
'default': False}},
'networks': {QUEUE: {'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': False}}
}
class Nvp_qos(object):
"""Port Queue extension."""
@classmethod
def get_name(cls):
return "nvp-qos"
@classmethod
def get_alias(cls):
return "nvp-qos"
@classmethod
def get_description(cls):
return "NVP QoS extension."
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/nvp-qos/api/v2.0"
@classmethod
def get_updated(cls):
return "2012-10-05T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
plugin = manager.QuantumManager.get_plugin()
resource_name = 'qos_queue'
collection_name = resource_name.replace('_', '-') + "s"
params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict())
controller = base.create_resource(collection_name,
resource_name,
plugin, params, allow_bulk=False)
ex = extensions.ResourceExtension(collection_name,
controller)
exts.append(ex)
return exts
def get_extended_resources(self, version):
if version == "2.0":
return dict(EXTENDED_ATTRIBUTES_2_0.items() +
RESOURCE_ATTRIBUTE_MAP.items())
else:
return {}
class QueuePluginBase(object):
@abstractmethod
def create_qos_queue(self, context, queue):
pass
@abstractmethod
def delete_qos_queue(self, context, id):
pass
@abstractmethod
def get_qos_queue(self, context, id, fields=None):
pass
@abstractmethod
def get_qos_queues(self, context, filters=None, fields=None):
pass
| 31.191176 | 78 | 0.610718 | 2,786 | 0.437844 | 0 | 0 | 1,382 | 0.217193 | 0 | 0 | 2,220 | 0.348892 |
169b6898f6bda824a9456c155bd29a6f84fdb9e8 | 251 | py | Python | easyneuron/math/__init__.py | TrendingTechnology/easyneuron | b99822c7206a144a0ab61b3b6b5cddeaca1a3c6a | [
"Apache-2.0"
] | 1 | 2021-12-14T19:21:44.000Z | 2021-12-14T19:21:44.000Z | easyneuron/math/__init__.py | TrendingTechnology/easyneuron | b99822c7206a144a0ab61b3b6b5cddeaca1a3c6a | [
"Apache-2.0"
] | null | null | null | easyneuron/math/__init__.py | TrendingTechnology/easyneuron | b99822c7206a144a0ab61b3b6b5cddeaca1a3c6a | [
"Apache-2.0"
] | null | null | null | """easyneuron.math contains all of the maths tools that you'd ever need for your AI projects, when used alongside Numpy.
To suggest more to be added, please add an issue on the GitHub repo.
"""
from easyneuron.math.distance import euclidean_distance | 41.833333 | 120 | 0.788845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.772908 |
169c6caecdf841a261ae5cbf1ce633a03edb8b3a | 2,532 | py | Python | tests/unit/concurrently/test_TaskPackageDropbox_put.py | shane-breeze/AlphaTwirl | 59dbd5348af31d02e133d43fd5bfaad6b99a155e | [
"BSD-3-Clause"
] | null | null | null | tests/unit/concurrently/test_TaskPackageDropbox_put.py | shane-breeze/AlphaTwirl | 59dbd5348af31d02e133d43fd5bfaad6b99a155e | [
"BSD-3-Clause"
] | null | null | null | tests/unit/concurrently/test_TaskPackageDropbox_put.py | shane-breeze/AlphaTwirl | 59dbd5348af31d02e133d43fd5bfaad6b99a155e | [
"BSD-3-Clause"
] | null | null | null | # Tai Sakuma <[email protected]>
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.concurrently import TaskPackageDropbox
##__________________________________________________________________||
@pytest.fixture()
def workingarea():
return mock.MagicMock()
@pytest.fixture()
def dispatcher():
return mock.MagicMock()
@pytest.fixture()
def obj(workingarea, dispatcher):
ret = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01)
ret.open()
yield ret
ret.close()
##__________________________________________________________________||
def test_repr(obj):
repr(obj)
def test_open_terminate_close(workingarea, dispatcher):
obj = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01)
assert 0 == workingarea.open.call_count
assert 0 == workingarea.close.call_count
assert 0 == dispatcher.terminate.call_count
obj.open()
assert 1 == workingarea.open.call_count
assert 0 == workingarea.close.call_count
assert 0 == dispatcher.terminate.call_count
obj.terminate()
assert 1 == workingarea.open.call_count
assert 0 == workingarea.close.call_count
assert 1 == dispatcher.terminate.call_count
obj.close()
assert 1 == workingarea.open.call_count
assert 1 == workingarea.close.call_count
assert 1 == dispatcher.terminate.call_count
def test_put(obj, workingarea, dispatcher):
workingarea.put_package.side_effect = [0, 1] # pkgidx
dispatcher.run.side_effect = [1001, 1002] # runid
package0 = mock.MagicMock(name='package0')
package1 = mock.MagicMock(name='package1')
assert 0 == obj.put(package0)
assert 1 == obj.put(package1)
assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list
assert [mock.call(workingarea, 0), mock.call(workingarea, 1)] == dispatcher.run.call_args_list
def test_put_multiple(obj, workingarea, dispatcher):
workingarea.put_package.side_effect = [0, 1] # pkgidx
dispatcher.run_multiple.return_value = [1001, 1002] # runid
package0 = mock.MagicMock(name='package0')
package1 = mock.MagicMock(name='package1')
assert [0, 1] == obj.put_multiple([package0, package1])
assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list
assert [mock.call(workingarea, [0, 1])] == dispatcher.run_multiple.call_args_list
##__________________________________________________________________||
| 30.878049 | 98 | 0.742496 | 0 | 0 | 167 | 0.065956 | 312 | 0.123223 | 0 | 0 | 315 | 0.124408 |
169dfe6f123a1bb92dcedefda60fdcdf0dde5b42 | 3,497 | py | Python | networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py | gokarslan/networking-odl2 | 6a6967832b2c02dfcff6a9f0ab6e36472b849ce8 | [
"Apache-2.0"
] | null | null | null | networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py | gokarslan/networking-odl2 | 6a6967832b2c02dfcff6a9f0ab6e36472b849ce8 | [
"Apache-2.0"
] | null | null | null | networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py | gokarslan/networking-odl2 | 6a6967832b2c02dfcff6a9f0ab6e36472b849ce8 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from networking_odl.common import constants as odl_const
from networking_odl.dhcp import odl_dhcp_driver
from networking_odl.ml2 import mech_driver_v2
from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base
from oslo_config import cfg
load_tests = testscenarios.load_tests_apply_scenarios
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase):
def setUp(self):
super(OdlDhcpDriverTestCase, self).setUp()
cfg.CONF.set_override('enable_dhcp_service', True, 'ml2_odl')
self.mech = mech_driver_v2.OpenDaylightMechanismDriver()
self.mech.initialize()
def test_dhcp_flag_test(self):
self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service)
def test_dhcp_driver_load(self):
self.assertTrue(isinstance(self.mech.dhcp_driver,
odl_dhcp_driver.OdlDhcpDriver))
def test_dhcp_port_create_on_subnet_event(self):
data = self.get_network_and_subnet_context('10.0.50.0/24', True, True,
True)
subnet_context = data['subnet_context']
mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal(
subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE)
self.mech.journal.sync_pending_entries()
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNotNone(port)
def test_dhcp_delete_on_port_update_event(self):
data = self.get_network_and_subnet_context('10.0.50.0/24', True, True,
True)
subnet_context = data['subnet_context']
plugin = data['plugin']
self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context)
port_id = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNotNone(port_id)
port = plugin.get_port(data['context'], port_id)
port['fixed_ips'] = []
ports = {'port': port}
plugin.update_port(data['context'], port_id, ports)
mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal(
subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE, port)
self.mech.journal.sync_pending_entries()
port_id = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNone(port_id)
| 40.662791 | 78 | 0.637975 | 2,463 | 0.704318 | 0 | 0 | 0 | 0 | 0 | 0 | 917 | 0.262225 |
169ed0cf36c52beabffce88a57318686603b6c41 | 443 | py | Python | users/migrations/0002_auto_20191113_1352.py | Dragonite/djangohat | 68890703b1fc647785cf120ada281d6f3fcc4121 | [
"MIT"
] | 2 | 2019-11-15T05:07:24.000Z | 2019-11-15T10:27:48.000Z | users/migrations/0002_auto_20191113_1352.py | Dragonite/djangohat | 68890703b1fc647785cf120ada281d6f3fcc4121 | [
"MIT"
] | null | null | null | users/migrations/0002_auto_20191113_1352.py | Dragonite/djangohat | 68890703b1fc647785cf120ada281d6f3fcc4121 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.2 on 2019-11-13 13:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='users',
name='site_key',
field=models.CharField(blank=True, default='b7265a9e874f4068b0b48d45ef97595a', max_length=32, unique=True),
),
]
| 23.315789 | 119 | 0.625282 | 350 | 0.790068 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.268623 |
16a0f5c79d486ed958f66a4f801398499c8d9ff1 | 3,389 | py | Python | premium/backend/src/baserow_premium/api/admin/dashboard/views.py | cjh0613/baserow | 62871f5bf53c9d25446976031aacb706c0abe584 | [
"MIT"
] | 839 | 2020-07-20T13:29:34.000Z | 2022-03-31T21:09:16.000Z | premium/backend/src/baserow_premium/api/admin/dashboard/views.py | cjh0613/baserow | 62871f5bf53c9d25446976031aacb706c0abe584 | [
"MIT"
] | 28 | 2020-08-07T09:23:58.000Z | 2022-03-01T22:32:40.000Z | premium/backend/src/baserow_premium/api/admin/dashboard/views.py | cjh0613/baserow | 62871f5bf53c9d25446976031aacb706c0abe584 | [
"MIT"
] | 79 | 2020-08-04T01:48:01.000Z | 2022-03-27T13:30:54.000Z | from datetime import timedelta
from django.contrib.auth import get_user_model
from drf_spectacular.utils import extend_schema
from rest_framework.response import Response
from rest_framework.permissions import IsAdminUser
from rest_framework.views import APIView
from baserow.api.decorators import accept_timezone
from baserow.core.models import Group, Application
from baserow_premium.admin.dashboard.handler import AdminDashboardHandler
from .serializers import AdminDashboardSerializer
User = get_user_model()
class AdminDashboardView(APIView):
permission_classes = (IsAdminUser,)
@extend_schema(
tags=["Admin"],
operation_id="admin_dashboard",
description="Returns the new and active users for the last 24 hours, 7 days and"
" 30 days. The `previous_` values are the values of the period before, so for "
"example `previous_new_users_last_24_hours` are the new users that signed up "
"from 48 to 24 hours ago. It can be used to calculate an increase or decrease "
"in the amount of signups. A list of the new and active users for every day "
"for the last 30 days is also included.\n\nThis is a **premium** feature.",
responses={
200: AdminDashboardSerializer,
401: None,
},
)
@accept_timezone()
def get(self, request, now):
"""
Returns the new and active users for the last 24 hours, 7 days and 30 days.
The `previous_` values are the values of the period before, so for example
`previous_new_users_last_24_hours` are the new users that signed up from 48
to 24 hours ago. It can be used to calculate an increase or decrease in the
amount of signups. A list of the new and active users for every day for the
last 30 days is also included.
"""
handler = AdminDashboardHandler()
total_users = User.objects.filter(is_active=True).count()
total_groups = Group.objects.all().count()
total_applications = Application.objects.all().count()
new_users = handler.get_new_user_counts(
{
"new_users_last_24_hours": timedelta(hours=24),
"new_users_last_7_days": timedelta(days=7),
"new_users_last_30_days": timedelta(days=30),
},
include_previous=True,
)
active_users = handler.get_active_user_count(
{
"active_users_last_24_hours": timedelta(hours=24),
"active_users_last_7_days": timedelta(days=7),
"active_users_last_30_days": timedelta(days=30),
},
include_previous=True,
)
new_users_per_day = handler.get_new_user_count_per_day(
timedelta(days=30), now=now
)
active_users_per_day = handler.get_active_user_count_per_day(
timedelta(days=30), now=now
)
serializer = AdminDashboardSerializer(
{
"total_users": total_users,
"total_groups": total_groups,
"total_applications": total_applications,
"new_users_per_day": new_users_per_day,
"active_users_per_day": active_users_per_day,
**new_users,
**active_users,
}
)
return Response(serializer.data)
| 38.078652 | 88 | 0.649159 | 2,865 | 0.845382 | 0 | 0 | 2,785 | 0.821776 | 0 | 0 | 1,193 | 0.352021 |
16a20512bd62fea83ee40c49a4b7cc5fa386ce48 | 969 | py | Python | src/clientOld.py | dan3612812/socketChatRoom | b0d548477687de2d9fd521826db9ea75e528de5c | [
"MIT"
] | null | null | null | src/clientOld.py | dan3612812/socketChatRoom | b0d548477687de2d9fd521826db9ea75e528de5c | [
"MIT"
] | null | null | null | src/clientOld.py | dan3612812/socketChatRoom | b0d548477687de2d9fd521826db9ea75e528de5c | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
import sys
import socket
import time
import threading
import select
HOST = '192.168.11.98'
PORT = int(sys.argv[1])
queue = []
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
queue.append(s)
print("add client to queue")
def socketRecv():
while True:
data = s.recv(1024).decode("utf-8")
print(data)
time.sleep(0.1)
def inputJob():
while True:
data = input()
s.send(bytes(data, "utf-8"))
time.sleep(0.1)
socketThread = threading.Thread(target=socketRecv)
socketThread.start()
# inputThread = Thread(target=inputJob)
# inputThread.start()
try:
while True:
data = input()
s.send(bytes(data, "utf-8"))
time.sleep(0.1)
except KeyboardInterrupt or EOFError:
print("in except")
# s.close() # 關閉連線
socketThread.do_run = False
# socketThread.join()
# inputThread.join()
print("close thread")
sys.exit(0)
| 19.38 | 53 | 0.627451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.238485 |
16a205ccc4af00539940fcbe977b97f31972c365 | 6,296 | py | Python | plugins/anomali_threatstream/komand_anomali_threatstream/actions/import_observable/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/anomali_threatstream/komand_anomali_threatstream/actions/import_observable/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/anomali_threatstream/komand_anomali_threatstream/actions/import_observable/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Import observable(s) into Anomali ThreatStream with approval"
class Input:
FILE = "file"
OBSERVABLE_SETTINGS = "observable_settings"
class Output:
RESULTS = "results"
class ImportObservableInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"file": {
"$ref": "#/definitions/file",
"title": "File",
"description": "File of data to be imported into Anomali ThreatStream",
"order": 1
},
"observable_settings": {
"$ref": "#/definitions/observable_settings",
"title": "Observable Settings",
"description": "Settings needed for importing an observable that needs approval",
"order": 2
}
},
"required": [
"file"
],
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"observable_settings": {
"type": "object",
"title": "observable_settings",
"properties": {
"classification": {
"type": "string",
"title": "Classification",
"description": "Classification of the observable",
"default": "private",
"enum": [
"public",
"private"
],
"order": 4
},
"confidence": {
"type": "integer",
"title": "Confidence",
"description": "Confidence value assigned to the observable. Confidence score can range from 0-100, in increasing order of confidence",
"order": 1
},
"domain_mapping": {
"type": "string",
"title": "Domain Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 8
},
"email_mapping": {
"type": "string",
"title": "Email Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 10
},
"expiration_ts": {
"type": "string",
"title": "Expiration Time Stamp",
"displayType": "date",
"description": "Time stamp of when intelligence will expire on ThreatStream",
"format": "date-time",
"order": 5
},
"ip_mapping": {
"type": "string",
"title": "IP Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 7
},
"md5_mapping": {
"type": "string",
"title": "MD5 Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 11
},
"notes": {
"type": "array",
"title": "Notes",
"description": "Additional details for the observable. This information is displayed in the Tags column of the ThreatStream UI e.g ['note1', 'note2', 'note3']",
"items": {
"type": "string"
},
"order": 6
},
"severity": {
"type": "string",
"title": "Severity",
"description": "Severity you want to assign to the observable when it is imported",
"default": "",
"enum": [
"low",
"medium",
"high",
"very-high",
""
],
"order": 3
},
"source_confidence_weight": {
"type": "integer",
"title": "Source Confidence Weight",
"description": "Specifies the ratio between the amount of the source confidence of each observable and the ThreatStream confidence",
"order": 2
},
"threat_type": {
"type": "string",
"title": "Threat Type",
"description": "Type of threat associated with the imported observables",
"order": 13
},
"trustedcircles": {
"type": "array",
"title": "Trusted Circles",
"description": "ID of the trusted circle to which this threat data should be imported. If you want to import the threat data to multiple trusted circles, enter the list of comma-separated IDs e.g [1,2,3]",
"items": {
"type": "integer"
},
"order": 12
},
"url_mapping": {
"type": "string",
"title": "URL Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 9
}
},
"required": [
"classification"
]
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ImportObservableOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"results": {
"$ref": "#/definitions/import_observable_response",
"title": "Results",
"description": "Results from importing observable(s)",
"order": 1
}
},
"definitions": {
"import_observable_response": {
"type": "object",
"title": "import_observable_response",
"properties": {
"import_session_id": {
"type": "string",
"title": "Import Session ID",
"description": "ID for import session",
"order": 3
},
"job_id": {
"type": "string",
"title": "Job ID",
"description": "Job ID",
"order": 1
},
"success": {
"type": "boolean",
"title": "Success",
"description": "If import was successful",
"order": 2
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 28.488688 | 215 | 0.513977 | 6,207 | 0.985864 | 0 | 0 | 0 | 0 | 0 | 0 | 5,828 | 0.925667 |
16a25b6b94677a9d90afcb9439df38171a1429af | 25,083 | py | Python | trove/tests/unittests/quota/test_quota.py | citrix-openstack-build/trove | 52506396dd7bd095d1623d40cf2e67f2b478dc1d | [
"Apache-2.0"
] | null | null | null | trove/tests/unittests/quota/test_quota.py | citrix-openstack-build/trove | 52506396dd7bd095d1623d40cf2e67f2b478dc1d | [
"Apache-2.0"
] | null | null | null | trove/tests/unittests/quota/test_quota.py | citrix-openstack-build/trove | 52506396dd7bd095d1623d40cf2e67f2b478dc1d | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from mockito import mock, when, unstub, any, verify, never, times
from mock import Mock
from trove.quota.quota import DbQuotaDriver
from trove.quota.models import Resource
from trove.quota.models import Quota
from trove.quota.models import QuotaUsage
from trove.quota.models import Reservation
from trove.db.models import DatabaseModelBase
from trove.extensions.mgmt.quota.service import QuotaController
from trove.common import exception
from trove.common import cfg
from trove.quota.quota import run_with_quotas
from trove.quota.quota import QUOTAS
"""
Unit tests for the classes and functions in DbQuotaDriver.py.
"""
CONF = cfg.CONF
resources = {
Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'),
Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'),
}
FAKE_TENANT1 = "123456"
FAKE_TENANT2 = "654321"
class Run_with_quotasTest(testtools.TestCase):
def setUp(self):
super(Run_with_quotasTest, self).setUp()
self.quota_reserve_orig = QUOTAS.reserve
self.quota_rollback_orig = QUOTAS.rollback
self.quota_commit_orig = QUOTAS.commit
QUOTAS.reserve = Mock()
QUOTAS.rollback = Mock()
QUOTAS.commit = Mock()
def tearDown(self):
super(Run_with_quotasTest, self).tearDown()
QUOTAS.reserve = self.quota_reserve_orig
QUOTAS.rollback = self.quota_rollback_orig
QUOTAS.commit = self.quota_commit_orig
def test_run_with_quotas(self):
f = Mock()
run_with_quotas(FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f)
self.assertTrue(QUOTAS.reserve.called)
self.assertTrue(QUOTAS.commit.called)
self.assertFalse(QUOTAS.rollback.called)
self.assertTrue(f.called)
def test_run_with_quotas_error(self):
f = Mock(side_effect=Exception())
self.assertRaises(Exception, run_with_quotas, FAKE_TENANT1,
{'instances': 1, 'volumes': 5}, f)
self.assertTrue(QUOTAS.reserve.called)
self.assertTrue(QUOTAS.rollback.called)
self.assertFalse(QUOTAS.commit.called)
self.assertTrue(f.called)
class QuotaControllerTest(testtools.TestCase):
def setUp(self):
super(QuotaControllerTest, self).setUp()
context = mock()
context.is_admin = True
req = mock()
req.environ = mock()
when(req.environ).get(any()).thenReturn(context)
self.req = req
self.controller = QuotaController()
def tearDown(self):
super(QuotaControllerTest, self).tearDown()
unstub()
def test_update_unknown_resource(self):
body = {'quotas': {'unknown_resource': 5}}
self.assertRaises(exception.QuotaResourceUnknown,
self.controller.update, self.req, body,
FAKE_TENANT1, FAKE_TENANT2)
def test_update_resource_no_value(self):
quota = mock(Quota)
when(DatabaseModelBase).find_by(tenant_id=FAKE_TENANT2,
resource='instances').thenReturn(quota)
body = {'quotas': {'instances': None}}
result = self.controller.update(self.req, body, FAKE_TENANT1,
FAKE_TENANT2)
verify(quota, never).save()
self.assertEquals(200, result.status)
def test_update_resource_instance(self):
instance_quota = mock(Quota)
when(DatabaseModelBase).find_by(
tenant_id=FAKE_TENANT2,
resource='instances').thenReturn(instance_quota)
body = {'quotas': {'instances': 2}}
result = self.controller.update(self.req, body, FAKE_TENANT1,
FAKE_TENANT2)
verify(instance_quota, times=1).save()
self.assertTrue('instances' in result._data['quotas'])
self.assertEquals(200, result.status)
self.assertEquals(2, result._data['quotas']['instances'])
@testtools.skipIf(not CONF.trove_volume_support,
'Volume support is not enabled')
def test_update_resource_volume(self):
instance_quota = mock(Quota)
when(DatabaseModelBase).find_by(
tenant_id=FAKE_TENANT2,
resource='instances').thenReturn(instance_quota)
volume_quota = mock(Quota)
when(DatabaseModelBase).find_by(
tenant_id=FAKE_TENANT2,
resource='volumes').thenReturn(volume_quota)
body = {'quotas': {'instances': None, 'volumes': 10}}
result = self.controller.update(self.req, body, FAKE_TENANT1,
FAKE_TENANT2)
verify(instance_quota, never).save()
self.assertFalse('instances' in result._data['quotas'])
verify(volume_quota, times=1).save()
self.assertEquals(200, result.status)
self.assertEquals(10, result._data['quotas']['volumes'])
class DbQuotaDriverTest(testtools.TestCase):
def setUp(self):
super(DbQuotaDriverTest, self).setUp()
self.driver = DbQuotaDriver(resources)
self.orig_Quota_find_all = Quota.find_all
self.orig_QuotaUsage_find_all = QuotaUsage.find_all
self.orig_QuotaUsage_find_by = QuotaUsage.find_by
self.orig_Reservation_create = Reservation.create
self.orig_QuotaUsage_create = QuotaUsage.create
self.orig_QuotaUsage_save = QuotaUsage.save
self.orig_Reservation_save = Reservation.save
self.mock_quota_result = Mock()
self.mock_usage_result = Mock()
Quota.find_all = Mock(return_value=self.mock_quota_result)
QuotaUsage.find_all = Mock(return_value=self.mock_usage_result)
def tearDown(self):
super(DbQuotaDriverTest, self).tearDown()
Quota.find_all = self.orig_Quota_find_all
QuotaUsage.find_all = self.orig_QuotaUsage_find_all
QuotaUsage.find_by = self.orig_QuotaUsage_find_by
Reservation.create = self.orig_Reservation_create
QuotaUsage.create = self.orig_QuotaUsage_create
QuotaUsage.save = self.orig_QuotaUsage_save
Reservation.save = self.orig_Reservation_save
def test_get_defaults(self):
defaults = self.driver.get_defaults(resources)
self.assertEqual(CONF.max_instances_per_user,
defaults[Resource.INSTANCES])
self.assertEqual(CONF.max_volumes_per_user,
defaults[Resource.VOLUMES])
def test_get_quota_by_tenant(self):
FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
hard_limit=12)]
self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)
quota = self.driver.get_quota_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEquals(FAKE_TENANT1, quota.tenant_id)
self.assertEquals(Resource.INSTANCES, quota.resource)
self.assertEquals(12, quota.hard_limit)
def test_get_quota_by_tenant_default(self):
self.mock_quota_result.all = Mock(return_value=[])
quota = self.driver.get_quota_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEquals(FAKE_TENANT1, quota.tenant_id)
self.assertEquals(Resource.VOLUMES, quota.resource)
self.assertEquals(CONF.max_volumes_per_user, quota.hard_limit)
def test_get_all_quotas_by_tenant(self):
FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
hard_limit=22),
Quota(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
hard_limit=15)]
self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)
quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
quotas[Resource.INSTANCES].resource)
self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit)
self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource)
self.assertEquals(15, quotas[Resource.VOLUMES].hard_limit)
def test_get_all_quotas_by_tenant_with_all_default(self):
self.mock_quota_result.all = Mock(return_value=[])
quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
quotas[Resource.INSTANCES].resource)
self.assertEquals(CONF.max_instances_per_user,
quotas[Resource.INSTANCES].hard_limit)
self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource)
self.assertEquals(CONF.max_volumes_per_user,
quotas[Resource.VOLUMES].hard_limit)
def test_get_all_quotas_by_tenant_with_one_default(self):
FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
hard_limit=22)]
self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)
quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
quotas[Resource.INSTANCES].resource)
self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit)
self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource)
self.assertEquals(CONF.max_volumes_per_user,
quotas[Resource.VOLUMES].hard_limit)
def test_get_quota_usage_by_tenant(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=3,
reserved=1)]
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEquals(FAKE_TENANT1, usage.tenant_id)
self.assertEquals(Resource.VOLUMES, usage.resource)
self.assertEquals(3, usage.in_use)
self.assertEquals(1, usage.reserved)
def test_get_quota_usage_by_tenant_default(self):
FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)
self.mock_usage_result.all = Mock(return_value=[])
QuotaUsage.create = Mock(return_value=FAKE_QUOTA)
usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEquals(FAKE_TENANT1, usage.tenant_id)
self.assertEquals(Resource.VOLUMES, usage.resource)
self.assertEquals(0, usage.in_use)
self.assertEquals(0, usage.reserved)
def test_get_all_quota_usages_by_tenant(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=2,
reserved=1),
QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=1)]
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
usages[Resource.INSTANCES].resource)
self.assertEquals(2, usages[Resource.INSTANCES].in_use)
self.assertEquals(1, usages[Resource.INSTANCES].reserved)
self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource)
self.assertEquals(1, usages[Resource.VOLUMES].in_use)
self.assertEquals(1, usages[Resource.VOLUMES].reserved)
def test_get_all_quota_usages_by_tenant_with_all_default(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=0,
reserved=0),
QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_usage_result.all = Mock(return_value=[])
QuotaUsage.create = Mock(side_effect=FAKE_QUOTAS)
usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
usages[Resource.INSTANCES].resource)
self.assertEquals(0, usages[Resource.INSTANCES].in_use)
self.assertEquals(0, usages[Resource.INSTANCES].reserved)
self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource)
self.assertEquals(0, usages[Resource.VOLUMES].in_use)
self.assertEquals(0, usages[Resource.VOLUMES].reserved)
def test_get_all_quota_usages_by_tenant_with_one_default(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=0,
reserved=0)]
NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA)
usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
usages[Resource.INSTANCES].resource)
self.assertEquals(0, usages[Resource.INSTANCES].in_use)
self.assertEquals(0, usages[Resource.INSTANCES].reserved)
self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource)
self.assertEquals(0, usages[Resource.VOLUMES].in_use)
self.assertEquals(0, usages[Resource.VOLUMES].reserved)
def test_reserve(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=1,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=1)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
QuotaUsage.save = Mock()
Reservation.create = Mock()
delta = {'instances': 2, 'volumes': 3}
self.driver.reserve(FAKE_TENANT1, resources, delta)
_, kw = Reservation.create.call_args_list[0]
self.assertEquals(1, kw['usage_id'])
self.assertEquals(2, kw['delta'])
self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])
_, kw = Reservation.create.call_args_list[1]
self.assertEquals(2, kw['usage_id'])
self.assertEquals(3, kw['delta'])
self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])
def test_reserve_resource_unknown(self):
delta = {'instances': 10, 'volumes': 2000, 'Fake_resource': 123}
self.assertRaises(exception.QuotaResourceUnknown,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=0,
reserved=0),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
delta = {'instances': 1, 'volumes': CONF.max_volumes_per_user + 1}
self.assertRaises(exception.QuotaExceeded,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota_with_usage(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=1,
reserved=0),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
delta = {'instances': 5, 'volumes': 3}
self.assertRaises(exception.QuotaExceeded,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota_with_reserved(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=1,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
delta = {'instances': 4, 'volumes': 2}
self.assertRaises(exception.QuotaExceeded,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota_but_can_apply_negative_deltas(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=10,
reserved=0),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=50,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
QuotaUsage.save = Mock()
Reservation.create = Mock()
delta = {'instances': -1, 'volumes': -3}
self.driver.reserve(FAKE_TENANT1, resources, delta)
_, kw = Reservation.create.call_args_list[0]
self.assertEquals(1, kw['usage_id'])
self.assertEquals(-1, kw['delta'])
self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])
_, kw = Reservation.create.call_args_list[1]
self.assertEquals(2, kw['usage_id'])
self.assertEquals(-3, kw['delta'])
self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])
def test_commit(self):
Reservation.save = Mock()
QuotaUsage.save = Mock()
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=5,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=2)]
FAKE_RESERVATIONS = [Reservation(usage_id=1,
delta=1,
status=Reservation.Statuses.RESERVED),
Reservation(usage_id=2,
delta=2,
status=Reservation.Statuses.RESERVED)]
QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS)
self.driver.commit(FAKE_RESERVATIONS)
self.assertEqual(6, FAKE_QUOTAS[0].in_use)
self.assertEqual(1, FAKE_QUOTAS[0].reserved)
self.assertEqual(Reservation.Statuses.COMMITTED,
FAKE_RESERVATIONS[0].status)
self.assertEqual(3, FAKE_QUOTAS[1].in_use)
self.assertEqual(0, FAKE_QUOTAS[1].reserved)
self.assertEqual(Reservation.Statuses.COMMITTED,
FAKE_RESERVATIONS[1].status)
def test_rollback(self):
Reservation.save = Mock()
QuotaUsage.save = Mock()
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=5,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=2)]
FAKE_RESERVATIONS = [Reservation(usage_id=1,
delta=1,
status=Reservation.Statuses.RESERVED),
Reservation(usage_id=2,
delta=2,
status=Reservation.Statuses.RESERVED)]
QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS)
self.driver.rollback(FAKE_RESERVATIONS)
self.assertEqual(5, FAKE_QUOTAS[0].in_use)
self.assertEqual(1, FAKE_QUOTAS[0].reserved)
self.assertEqual(Reservation.Statuses.ROLLEDBACK,
FAKE_RESERVATIONS[0].status)
self.assertEqual(1, FAKE_QUOTAS[1].in_use)
self.assertEqual(0, FAKE_QUOTAS[1].reserved)
self.assertEqual(Reservation.Statuses.ROLLEDBACK,
FAKE_RESERVATIONS[1].status)
| 42.513559 | 79 | 0.574812 | 23,585 | 0.940278 | 0 | 0 | 941 | 0.037515 | 0 | 0 | 1,247 | 0.049715 |
16a2ce4183cf617439f69c8fd39f2dded2cf7d88 | 180 | py | Python | analisador_sintatico/blueprints/api/parsers.py | viniciusandd/uri-analisador-sintatico | b347f4293e4c60bd3b2c838c8cef0d75db2c0bec | [
"MIT"
] | null | null | null | analisador_sintatico/blueprints/api/parsers.py | viniciusandd/uri-analisador-sintatico | b347f4293e4c60bd3b2c838c8cef0d75db2c0bec | [
"MIT"
] | null | null | null | analisador_sintatico/blueprints/api/parsers.py | viniciusandd/uri-analisador-sintatico | b347f4293e4c60bd3b2c838c8cef0d75db2c0bec | [
"MIT"
] | null | null | null | from flask_restful import reqparse
def retornar_parser():
parser = reqparse.RequestParser()
parser.add_argument('sentenca', type=str, required=True)
return parser
| 25.714286 | 64 | 0.738889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.055556 |
16a3072f25578896e1189f9fac5976e0586e6b47 | 6,369 | py | Python | demo_large_image.py | gunlyungyou/AerialDetection | a5606acd8e9a5f7b10cd76bd4b0c3b8c7630fb26 | [
"Apache-2.0"
] | 9 | 2020-10-08T19:51:17.000Z | 2022-02-16T12:58:01.000Z | demo_large_image.py | gunlyungyou/AerialDetection | a5606acd8e9a5f7b10cd76bd4b0c3b8c7630fb26 | [
"Apache-2.0"
] | null | null | null | demo_large_image.py | gunlyungyou/AerialDetection | a5606acd8e9a5f7b10cd76bd4b0c3b8c7630fb26 | [
"Apache-2.0"
] | 8 | 2020-09-25T14:47:55.000Z | 2022-02-16T12:31:13.000Z | from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections
import mmcv
from mmcv import Config
from mmdet.datasets import get_dataset
import cv2
import os
import numpy as np
from tqdm import tqdm
import DOTA_devkit.polyiou as polyiou
import math
import pdb
CLASS_NAMES_KR = ('소형 선박', '대형 선박', '민간 항공기', '군용 항공기', '소형 승용차', '버스', '트럭', '기차', '크레인',
'다리', '정유탱크', '댐', '운동경기장', '헬리패드', '원형 교차로')
CLASS_NAMES_EN = ('small ship', 'large ship', 'civil airplane', 'military airplane', 'small car', 'bus', 'truck', 'train',
'crane', 'bridge', 'oiltank', 'dam', 'stadium', 'helipad', 'roundabout')
CLASS_MAP = {k:v for k, v in zip(CLASS_NAMES_KR, CLASS_NAMES_EN)}
def py_cpu_nms_poly_fast_np(dets, thresh):
obbs = dets[:, 0:-1]
x1 = np.min(obbs[:, 0::2], axis=1)
y1 = np.min(obbs[:, 1::2], axis=1)
x2 = np.max(obbs[:, 0::2], axis=1)
y2 = np.max(obbs[:, 1::2], axis=1)
scores = dets[:, 8]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
polys = []
for i in range(len(dets)):
tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1],
dets[i][2], dets[i][3],
dets[i][4], dets[i][5],
dets[i][6], dets[i][7]])
polys.append(tm_polygon)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
ovr = []
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
hbb_inter = w * h
hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)
h_inds = np.where(hbb_ovr > 0)[0]
tmp_order = order[h_inds + 1]
for j in range(tmp_order.size):
iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]])
hbb_ovr[h_inds[j]] = iou
try:
if math.isnan(ovr[0]):
pdb.set_trace()
except:
pass
inds = np.where(hbb_ovr <= thresh)[0]
order = order[inds + 1]
return keep
class DetectorModel():
def __init__(self,
config_file,
checkpoint_file):
# init RoITransformer
self.config_file = config_file
self.checkpoint_file = checkpoint_file
self.cfg = Config.fromfile(self.config_file)
self.data_test = self.cfg.data['test']
self.dataset = get_dataset(self.data_test)
self.classnames = self.dataset.CLASSES
self.model = init_detector(config_file, checkpoint_file, device='cuda:0')
def inference_single(self, imagname, slide_size, chip_size):
img = mmcv.imread(imagname)
height, width, channel = img.shape
slide_h, slide_w = slide_size
hn, wn = chip_size
# TODO: check the corner case
# import pdb; pdb.set_trace()
total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))]
for i in tqdm(range(int(width / slide_w + 1))):
for j in range(int(height / slide_h) + 1):
subimg = np.zeros((hn, wn, channel))
# print('i: ', i, 'j: ', j)
chip = img[j*slide_h:j*slide_h + hn, i*slide_w:i*slide_w + wn, :3]
subimg[:chip.shape[0], :chip.shape[1], :] = chip
chip_detections = inference_detector(self.model, subimg)
# print('result: ', result)
for cls_id, name in enumerate(self.classnames):
chip_detections[cls_id][:, :8][:, ::2] = chip_detections[cls_id][:, :8][:, ::2] + i * slide_w
chip_detections[cls_id][:, :8][:, 1::2] = chip_detections[cls_id][:, :8][:, 1::2] + j * slide_h
# import pdb;pdb.set_trace()
try:
total_detections[cls_id] = np.concatenate((total_detections[cls_id], chip_detections[cls_id]))
except:
import pdb; pdb.set_trace()
# nms
for i in range(len(self.classnames)):
keep = py_cpu_nms_poly_fast_np(total_detections[i], 0.1)
total_detections[i] = total_detections[i][keep]
return total_detections
def inference_single_vis(self, srcpath, dstpath, slide_size, chip_size):
detections = self.inference_single(srcpath, slide_size, chip_size)
classnames = [cls if cls not in CLASS_MAP else CLASS_MAP[cls] for cls in self.classnames]
img = draw_poly_detections(srcpath, detections, classnames, scale=1, threshold=0.3)
cv2.imwrite(dstpath, img)
if __name__ == '__main__':
#roitransformer = DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py',
# r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth')
#roitransformer = DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py',
# r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth')
roitransformer = DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py',
r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth')
from glob import glob
roksis = glob('data/roksi2020/val/images/*.png')
#target = roksis[1]
#out = target.split('/')[-1][:-4]+'_out.jpg'
#roitransformer.inference_single_vis(target,
# os.path.join('demo', out),
# (512, 512),
# (1024, 1024))
for target in roksis[:100]:
out = target.split('/')[-1][:-4]+'_out.jpg'
print(os.path.join('demo/fasterrcnn', out))
roitransformer.inference_single_vis(target,
os.path.join('demo/fasterrcnn', out),
(512, 512),
(1024, 1024))
#roitransformer.inference_single_vis(r'demo/P0009.jpg',
# r'demo/P0009_out.jpg',
# (512, 512),
# (1024, 1024))
| 43.326531 | 122 | 0.551892 | 2,515 | 0.388417 | 0 | 0 | 0 | 0 | 0 | 0 | 1,557 | 0.240463 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.