hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cd5e1e26e39c56d3ae62b8fd2032ab324293acc8 | 526 | py | Python | lib/redis_set_get.py | InformaticsResearchCenter/ITeung | 2e3f76294c3affca07934293cdeb46d6d618180a | [
"MIT"
] | null | null | null | lib/redis_set_get.py | InformaticsResearchCenter/ITeung | 2e3f76294c3affca07934293cdeb46d6d618180a | [
"MIT"
] | 37 | 2020-03-22T23:21:14.000Z | 2020-09-16T15:07:06.000Z | lib/redis_set_get.py | InformaticsResearchCenter/ITeung | 2e3f76294c3affca07934293cdeb46d6d618180a | [
"MIT"
] | 1 | 2020-09-08T11:31:30.000Z | 2020-09-08T11:31:30.000Z | import redis
def set(key, value, expired):
#use None if don't want to use expired time
try:
r = redis.Redis()
r.set(name=key, value=value, ex=expired)
return True, None
except Exception as e:
return False, f'{e}'
def get(key):
#key to get value
r = redis.Redis()
result=r.get(key)
if result:
return result.decode('utf-8')
else:
return None
def cekRedisToken(key):
if get(key) is not None:
return True
else:
return False | 21.04 | 48 | 0.579848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.138783 |
cd5e82adedde50cba3e364b3ccb25d0a6e80401a | 18,185 | py | Python | FTDISPI.py | g-i-wilson/spi-tools | 1c961a97572a366235f9f3b0517d8201fa8be371 | [
"MIT"
] | 1 | 2022-03-22T20:44:01.000Z | 2022-03-22T20:44:01.000Z | FTDISPI.py | g-i-wilson/spi-tools | 1c961a97572a366235f9f3b0517d8201fa8be371 | [
"MIT"
] | null | null | null | FTDISPI.py | g-i-wilson/spi-tools | 1c961a97572a366235f9f3b0517d8201fa8be371 | [
"MIT"
] | null | null | null | from pyftdi.spi import SpiController
from pyftdi.gpio import GpioSyncController
import serial
import time
import sys
import JSONFile
dbg = False
def createByteList(addrList, dataList):
newBytes = []
for byte in addrList:
newBytes.append(byte)
for byte in dataList:
newBytes.append(byte)
return newBytes
def printByteList(byteList):
str = ""
for byte in byteList:
str += hex(byte)+" "
return str
def readModifyWrite(old=[], mask=[], new=[]):
for i in range(len(old)):
new[i] = (old[i] & ~mask[i]) | new[i]
class bcolors:
WHITE = '\033[37m'
BLUE = '\033[94m'
GREEN = '\033[92m'
RED = '\033[31m'
RESET = '\033[0m'
def printByte(a, b):
for bit in range(8):
a_bit = (a >> (7-bit)) & 0x01
b_bit = (b >> (7-bit)) & 0x01
if a_bit and b_bit:
print("1", end='')
elif (not a_bit) and b_bit:
print(bcolors.GREEN, end='')
print("1", end='')
elif a_bit and (not b_bit):
print(bcolors.RED, end='')
print("0", end='')
else:
print("0", end='')
print(bcolors.RESET, end='')
print(end=' ')
def printReg(addrName, addr=[], data=[], old_data=None, note="", nameColWidth=18):
print( addrName+(" "*(nameColWidth-len(addrName))), end=' ')
for a in addr:
if old_data:
print(bcolors.GREEN, end='')
print("0x{:02x}".format(a), end=' ')
print(bcolors.RESET, end='')
else:
print("0x{:02x}".format(a), end=' ')
print(' | ', end='')
for i in range(len(data)):
print("0x{:02x}".format(data[i]), end=' ')
if old_data:
printByte(old_data[i], data[i])
else:
printByte(data[i], data[i])
print(' | '+note)
def printStruct(struct):
for name in struct:
if 'old_data' in struct[name]:
printReg(name, addr=struct[name]['addr_w'], data=struct[name]['data'], old_data=struct[name]['old_data'], note=struct[name]['info'])
else:
printReg(name, addr=struct[name]['addr_w'], data=struct[name]['data'], note=struct[name]['info'])
def bitMaskToBytes(bitStrArray):
data = []
mask = []
for bitStr in bitStrArray:
bit = 0x80
bitMask = 0x00
bitData = 0x00
for aChar in bitStr:
if aChar == '1' or aChar == '0':
bitMask += bit
if aChar == '1':
bitData += bit
bit = bit >> 1
data.append(bitData)
mask.append(bitMask)
return {"data":data, "mask":mask}
class GPIO:
def __init__(self, gpio, SCLK=0x10, MOSI=0x20, MISO=0x40, CS=0x80):
self.gpio = gpio
self.SCLK = SCLK
self.MOSI = MOSI
self.MISO = MISO
self.CS = CS
self.txList = []
self.readFlag = []
self.rxList = []
def transaction(self, byteList, readSize=0): # readSize 0 is simply a write
self.txList = [self.CS] # CS high, others low
self.readFlag = []
self.insertDelay(4)
self.clockLow()
self.csLow()
for aByte in byteList:
self.writeByte(aByte)
self.readFlag.append(False)
for i in range(readSize):
self.writeByte(0x00)
self.readFlag.append(True)
self.csHigh()
self.clockLow()
self.insertDelay(4)
self.transmit()
def insertDelay(self, d):
for i in range(d):
self.txList.append(self.txList[-1])
def clockLow(self):
self.txList.append( self.txList[-1] & ~self.SCLK )
def clockLowdataHigh(self):
self.txList.append( (self.txList[-1] & ~self.SCLK) | self.MOSI )
def clockLowdataLow(self):
self.txList.append( (self.txList[-1] & ~self.SCLK) & ~self.MOSI )
def clockHigh(self):
self.txList.append( self.txList[-1] | self.SCLK )
def csLow(self):
self.txList.append( self.txList[-1] & ~self.CS )
def csHigh(self):
self.txList.append( self.txList[-1] | self.CS )
def writeByte(self, aByte):
for i in range(8):
shiftPlaces = 7-i # MSB first "big endian"
# clock falling edge and data transition
if ((aByte >> shiftPlaces) & 0x01):
self.clockLowdataHigh()
else:
self.clockLowdataLow()
# clock rising edge
self.clockHigh()
def readByte(self):
self.writeByte(0x00, read=True)
def getTxList(self):
return self.txList
def transmit(self):
rxBytes = self.gpio.exchange( self.txList );
self.rxList = []
for byte in rxBytes:
self.rxList.append(byte)
def getRxList(self):
return self.rxList
def getReadFlag(self):
return self.readFlag
def read(self, byteList, readSize):
self.transaction(byteList, readSize)
rxByteList = self.readBitBang()
rxByteListOut = []
for i in range(len(self.readFlag)):
if self.readFlag[i]:
rxByteListOut.append(rxByteList[1][i])
return rxByteListOut
def write(self, byteList):
self.transaction(byteList)
def readBitBang(self):
mosiArray = []
misoArray = []
bitPlace = 7
mosiByte = 0x00
misoByte = 0x00
for a in range(len(self.rxList)):
if (not (self.rxList[a-1] & self.SCLK) and (self.rxList[a] & self.SCLK)): # rising edge
if (self.rxList[a] & self.MOSI):
mosiByte += (1 << bitPlace)
if (self.rxList[a] & self.MISO): # data=1
misoByte += (1 << bitPlace)
bitPlace -= 1
if bitPlace < 0:
mosiArray.append(mosiByte)
misoArray.append(misoByte)
mosiByte = 0x00
misoByte = 0x00
bitPlace = 7
if dbg:
print("MOSI: ")
print(mosiArray)
print("MISO: ")
print(misoArray)
return [mosiArray, misoArray]
class MPSSE:
def __init__(self, slave):
self.slave = slave
def write(self, byteList):
self.slave.exchange( \
out=byteList, \
readlen=0, \
start=True, \
stop=True, \
duplex=False, \
droptail=0 \
)
def read(self, byteList, readSize):
byteArray = self.slave.exchange( \
out=byteList, \
readlen=readSize, \
start=True, \
stop=True, \
duplex=False, \
droptail=0 \
)
byteList = []
for byte in byteArray:
byteList.append(byte)
return byteList
class UARTSPIBridge:
def __init__(self, port="/dev/ttyUSB0", baudrate="9600"):
self.serial = serial.Serial(port=port,baudrate=baudrate)
if self.serial.is_open:
self.serial.close()
self.serial.open()
def read(self, byteList, readLen):
# send write-length byte, read-length byte, and first data byte
self.serial.write( [ len(byteList) ] )
self.serial.write( [ readLen ] );
#print([ len(byteList) ])
#print([ readLen ])
if len(byteList) > 0:
self.serial.write( [ byteList[0] ] )
#print([ byteList[0] ])
#self.serial.write( [ 0x0A ] ) # newline char
#print([ 0x0A ])
self.serial.flush()
# time.sleep(0.1)
# read acknowledgement bytes
#print("hi0")
lenBytes = []
#print(self.serial.inWaiting())
while(not self.serial.inWaiting()):
pass
lenBytes.append( self.serial.read(1) )
#print("hi1")
#print(lenBytes)
# time.sleep(0.1)
while(not self.serial.inWaiting()):
pass
lenBytes.append( self.serial.read(1) )
#print("hi2")
#print(lenBytes)
if lenBytes[0] != len(byteList).to_bytes(1,'big') or lenBytes[1] != readLen.to_bytes(1,'big'):
print("Error communicating with UARTSPIBridge")
print("W length received: "+str(lenBytes[0])+" != "+str(byteList[0].to_bytes(1,'big')))
print("R length received: "+str(lenBytes[1])+" != "+str(byteList[1].to_bytes(1,'big')))
return []
# time.sleep(0.1)
while(not self.serial.inWaiting()):
pass
#print("hi3")
lastByte = self.serial.read(1) # ack byte from first data-write byte
#print(lastByte)
# write the remainder of byteList
for i in range(1,len(byteList)):
self.serial.write( [ outByte[i] ] )
#self.serial.write( [ 0x0A ] ) # newline char
self.serial.flush()
while(not self.serial.inWaiting()):
pass
lastByte = self.serial.read(1) # ack byte for each data-write byte
#print(lastByte)
inList = []
for i in range(readLen):
while(not self.serial.inWaiting()):
pass
inList.append( self.serial.read(1) )
#print(inList)
return inList
def write(self, byteList):
return self.read(byteList, 0);
class Interface:
def __init__(self, rwObject, defaultMap, currentState, previousState):
self.rwObject = rwObject
# default register map
defaultMapFile = JSONFile.load(defaultMap)
if not defaultMapFile.fileExists():
print("Unable to load "+defaultMap)
exit()
self.defaultMap = defaultMapFile.read()
# states for comparison
self.previousState = JSONFile.load(previousState)
if not self.previousState.fileExists():
print("Unable to load "+currentState)
exit()
self.currentState = JSONFile.load(currentState)
if not self.currentState.fileExists():
print("Unable to load "+currentState)
exit()
def writeRaw(self, byteList):
self.rwObject.write(byteList)
def fillDefaults(self, struct={}):
for name in struct:
if name in self.defaultMap.keys():
for key in self.defaultMap[name].keys():
if not key in struct[name].keys():
struct[name][key] = self.defaultMap[name][key]
def writeStruct(self, struct, display=False):
self.fillDefaults(struct)
for name in struct:
if 'mask' in struct[name]:
old = {name : {}}
self.readStruct(old)
struct[name]['old_data'] = old[name]['data']
readModifyWrite(old=struct[name]['old_data'], mask=struct[name]['mask'], new=struct[name]['data'])
if 'pre_w' in struct[name]:
for step in struct[name]['pre_w']: # ...is a list
for pre_name in step: # step is a dictionary with one key
if dbg:
print("Write: "+pre_name+", "+printByteList( createByteList(self.defaultMap[pre_name]['addr_w'], step[pre_name]) ))
self.rwObject.write( createByteList(self.defaultMap[pre_name]['addr_w'], step[pre_name]) )
if dbg:
print("Write: "+name+", "+printByteList(createByteList(struct[name]['addr_w'], struct[name]['data'])))
self.rwObject.write( createByteList(struct[name]['addr_w'], struct[name]['data']) )
if display:
printStruct(struct)
return struct
def readStruct(self, struct, display=False):
self.fillDefaults(struct)
for name in struct:
if 'pre_r' in struct[name]:
for step in struct[name]['pre_r']: # ...is a list
for pre_name in step: # step is a dictionary with one key
if dbg:
print("Write: "+pre_name+", "+printByteList( createByteList(self.defaultMap[pre_name]['addr_w'], step[pre_name]) ))
self.rwObject.write( createByteList(self.defaultMap[pre_name]['addr_w'], step[pre_name]) )
struct[name]['data'] = self.rwObject.read( struct[name]['addr_r'], len(struct[name]['data']) )
if dbg:
print("Read: "+name+", "+printByteList(createByteList(struct[name]['addr_r'], struct[name]['data'])))
if display:
printStruct(struct)
return struct
def readState(self, display=True):
self.previousState.write(self.currentState.read())
struct = {}
for name in self.defaultMap:
struct[name] = {}
self.currentState.write(self.readStruct(struct, display))
return self.currentState.read()
def compare(self, display=True, pre_display="", post_display=""):
self.readState(display=False)
comparison = {}
if len(self.previousState.read().keys()) == 0:
self.readState(display=False)
for name in self.currentState.read():
# aliases
prevData = self.previousState.read()[name]['data']
currData = self.currentState.read()[name]['data']
same = True
for i in range(len(currData)):
if currData[i] != prevData[i]:
same = False
if not same:
comparison[name] = {}
comparison[name]['data'] = currData
comparison[name]['old_data'] = prevData
self.fillDefaults(comparison)
if display and len(comparison.keys()) > 0:
if pre_display:
print(pre_display)
printStruct(comparison)
if post_display:
print(post_display)
return comparison
def trigger(self, display=True, pre_display="", delay=.25):
while(1):
comp = self.compare(display=False)
if len(comp.keys()) > 0:
if display:
print(pre_display)
printStruct(comp)
return comp
time.sleep(delay)
def writeDefault(self, display=True):
struct = self.writeStruct(self.defaultMap)
return self.readState(display)
def writeBits(self, name, bitStrings=[], display=True, compare=True):
if compare:
self.compare(display=display, pre_display="Changes before write:")
if display:
print("Writing...")
struct = self.writeStruct( { name : bitMaskToBytes(bitStrings) }, display )
if compare:
self.currentState.merge(struct) # also merges everything into struct
self.compare(display=display, pre_display="Changes after write:")
return {name: struct[name]} # return only this name
def writeBitsList(self, bitsList):
for bits in bitsList:
self.writeBits(name=bits[0], bitStrings=bits[1])
def writeCSV(self, csvFilePath):
print("Writing raw bytes from CSV file...")
csvFile = open(csvFilePath, "r")
print("Opened file: "+csvFilePath)
for line in csvFile.readlines():
byteList = []
for aByte in line.rstrip().split(','):
byteList.append( int(aByte,16) )
print(byteList)
self.writeRaw( byteList )
def ui_hex(str):
return int(str,16)
def uiLoopHelp():
print()
print("Command set:")
print()
print("write <REG_NAME> XXXX1010 1XXXXXX0 | Write bits (any char not 0 or 1 is a don't-care)")
print("writeRaw 0xXX 0xXX 0xXX | Write a raw sequence of bytes")
print("read <REG_NAME> | Read register")
print("all | Read all registers")
print("save <fileName> | Save registers to JSON file")
print("load <fileName> | Load and write registers from JSON file")
print("loadCSV <fileName> | Write bytes from CSV file (each line is one transaction)")
print("loadDefault | Load datasheet default JSON configuration")
print("help | Print this command set")
print("exit | Exit the program")
def uiLoop(spiObject, printHelp=True):
if printHelp:
uiLoopHelp()
jsonObject = None
ui = [""]
while (ui[0] != "exit"):
print("\n> ", end='')
ui = sys.stdin.readline().rstrip().split(' ')
if (ui[0] == "read"):
spiObject.readStruct({ ui[1] : {} }, display=True)
if (ui[0] == "write"):
dataRegs = []
for i in range(2,len(ui)):
dataRegs.append( ui[i] )
spiObject.writeBits( ui[1], dataRegs )
if (ui[0] == "all"):
spiObject.readState()
if (ui[0] == "compare"):
spiObject.compare()
if (ui[0] == "trigger"):
while(1):
spiObject.trigger(pre_display=chr(27)+"[2J")
time.sleep(1)
if (ui[0] == "save"):
if jsonObject is None:
if len(ui) > 1:
jsonObject = JSONFile.new(ui[1])
else:
jsonObject = JSONFile.new(input("\nSave as: "))
jsonObject.write( spiObject.readState() )
if (ui[0] == "load"):
if jsonObject is None:
jsonObject = JSONFile.load(ui[1])
spiObject.writeStruct(jsonObject.read())
spiObject.readState()
if (ui[0] == "loadCSV"):
spiObject.writeCSV(ui[1])
print("Comparing changes...")
spiObject.compare()
if (ui[0] == "writeRaw"):
print("Writing raw bytes...")
byteList = []
for i in range(1,len(ui)):
byteList.append( int(ui[i],16) )
print(byteList)
spiObject.writeRaw( byteList )
if (ui[0] == "loadDefault"):
spiObject.writeDefault()
if (ui[0] == "help"):
uiLoopHelp()
| 36.081349 | 144 | 0.534451 | 12,851 | 0.706681 | 0 | 0 | 0 | 0 | 0 | 0 | 2,685 | 0.147649 |
cd63c34fbdfbd183f707a4b54997655b51643809 | 3,417 | py | Python | src/onegov/gazette/views/groups.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/gazette/views/groups.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/gazette/views/groups.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | from morepath import redirect
from onegov.core.security import Private
from onegov.gazette import _
from onegov.gazette import GazetteApp
from onegov.gazette.forms import EmptyForm
from onegov.gazette.layout import Layout
from onegov.user import UserGroup
from onegov.user import UserGroupCollection
from onegov.user.forms import UserGroupForm
@GazetteApp.html(
model=UserGroupCollection,
template='groups.pt',
permission=Private
)
def view_groups(self, request):
""" View all the user groups.
This view is only visible by an admin.
"""
layout = Layout(self, request)
return {
'layout': layout,
'groups': self.query().all(),
'title': _('Groups'),
'new_group': request.link(self, name='new-group')
}
@GazetteApp.form(
model=UserGroupCollection,
name='new-group',
template='form.pt',
permission=Private,
form=UserGroupForm
)
def create_group(self, request, form):
""" Create a new user group.
This view is only visible by an admin.
"""
layout = Layout(self, request)
if form.submitted(request):
self.add(name=form.name.data)
request.message(_("Group added."), 'success')
return redirect(layout.manage_groups_link)
return {
'layout': layout,
'form': form,
'title': _("New Group"),
'cancel': layout.manage_groups_link
}
@GazetteApp.form(
model=UserGroup,
name='edit',
template='form.pt',
permission=Private,
form=UserGroupForm
)
def edit_group(self, request, form):
""" Edit a user group.
This view is only visible by an admin.
"""
layout = Layout(self, request)
if form.submitted(request):
form.update_model(self)
request.message(_("Group modified."), 'success')
return redirect(layout.manage_groups_link)
if not form.errors:
form.apply_model(self)
return {
'layout': layout,
'form': form,
'title': self.name,
'subtitle': _("Edit Group"),
'cancel': layout.manage_groups_link
}
@GazetteApp.form(
model=UserGroup,
name='delete',
template='form.pt',
permission=Private,
form=EmptyForm
)
def delete_group(self, request, form):
""" Delete a user group.
This view is only visible by an admin.
"""
layout = Layout(self, request)
if self.official_notices:
request.message(
_("There are official notices linked to this group!"),
'warning'
)
if self.users.count():
request.message(
_('Only groups without users may be deleted.'),
'alert'
)
return {
'layout': layout,
'title': self.name,
'subtitle': _("Delete Group"),
'show_form': False
}
if form.submitted(request):
UserGroupCollection(request.session).delete(self)
request.message(_("Group deleted."), 'success')
return redirect(layout.manage_groups_link)
return {
'message': _(
'Do you really want to delete "${item}"?',
mapping={'item': self.name}
),
'layout': layout,
'form': form,
'title': self.name,
'subtitle': _("Delete Group"),
'button_text': _("Delete Group"),
'button_class': 'alert',
'cancel': layout.manage_groups_link
}
| 23.244898 | 66 | 0.605502 | 0 | 0 | 0 | 0 | 3,061 | 0.895815 | 0 | 0 | 912 | 0.266901 |
cd6412162ab8b14b43aaaa41358897ddfe50fe48 | 136 | py | Python | dlpt/__init__.py | damogranlabs/dlpt | e4cdbaf4b5496ed985eb255b17294aa7cf3d35e4 | [
"MIT"
] | 5 | 2021-08-09T19:39:06.000Z | 2022-03-22T11:21:29.000Z | dlpt/__init__.py | damogranlabs/dlpt | e4cdbaf4b5496ed985eb255b17294aa7cf3d35e4 | [
"MIT"
] | null | null | null | dlpt/__init__.py | damogranlabs/dlpt | e4cdbaf4b5496ed985eb255b17294aa7cf3d35e4 | [
"MIT"
] | null | null | null | from . import utils
from . import pth
from . import proc
from . import log
from . import json
from . import time
from . import importer
| 17 | 22 | 0.742647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
cd64ffc5e28a3c1d060e7cdf2e73c1f3c1f202dd | 1,466 | py | Python | personal_utilities/fourier_filters.py | dbstein/personal_utilities | 3a4c7d2416b13a87f88fc0e400b299d648e1e541 | [
"Apache-2.0"
] | null | null | null | personal_utilities/fourier_filters.py | dbstein/personal_utilities | 3a4c7d2416b13a87f88fc0e400b299d648e1e541 | [
"Apache-2.0"
] | null | null | null | personal_utilities/fourier_filters.py | dbstein/personal_utilities | 3a4c7d2416b13a87f88fc0e400b299d648e1e541 | [
"Apache-2.0"
] | null | null | null | import numpy as np
class SimpleFourierFilter(object):
"""
Class to apply simple Fourier Filtration to a vector
Filter types:
'fraction' (requires kwarg: 'fraction' to be set)
'rule 36' (can set kwarg: 'power' but not necessary)
"""
def __init__(self, modes, filter_type, **kwargs):
self.n = modes.shape[0]
self.modes = modes
self.filter_type = filter_type
self._get_filter(**kwargs)
def __call__(self, fin, input_type='space', output_type='space'):
input_is_real = fin.dtype == float and input_type == 'space'
if input_type=='space':
fin = np.fft.fft(fin)
fout = fin*self.filter
if output_type == 'space':
fout = np.fft.ifft(fout)
if input_is_real:
fout = fout.real
return fout
def _get_filter(self, **kwargs):
if self.filter_type == 'fraction':
max_k = np.abs(self.modes).max()
self.filter = np.ones(self.n, dtype=float)
self.filter[np.abs(self.modes) > max_k*kwargs['fraction']] = 0.0
elif self.filter_type == 'rule 36':
max_k = np.abs(self.modes).max()
if 'power' in kwargs:
power36 = kwargs['power']
else:
power36 = 36
self.filter = np.exp(-power36*(np.abs(self.modes)/max_k)**power36)
else:
raise Exception('Filter type not defined.')
| 34.093023 | 78 | 0.563438 | 1,442 | 0.983629 | 0 | 0 | 0 | 0 | 0 | 0 | 311 | 0.212142 |
cd67c54b1e46edcb715070c6ab83abb9ea55fa6d | 1,178 | py | Python | sloth/simple.py | codacy-badger/sloth | a4f2118b2f19e55271613d43c785aaf4ab030b5e | [
"MIT"
] | 1 | 2021-02-11T12:14:23.000Z | 2021-02-11T12:14:23.000Z | src/sloth/simple.py | Legorooj/sloth | 47f6358349f8545fc475efab19edd6efda3ffbcd | [
"MIT"
] | null | null | null | src/sloth/simple.py | Legorooj/sloth | 47f6358349f8545fc475efab19edd6efda3ffbcd | [
"MIT"
] | null | null | null | # ----------------------------------------------------------------------------
# Copyright (c) 2020 Legorooj <[email protected]>
# Copyright (c) 2020 FluffyKoalas <github.com/fluffykoalas>
# This file and all others in this project are licensed under the MIT license.
# Please see the LICENSE file in the root of this repository for more details.
# ----------------------------------------------------------------------------
from .timers import Timer
from .raw import tests, runners
__all__ = [
'call_after', 'time_callable', 'time_eval', 'time_exec'
]
def call_after(seconds, func, args=None, kwargs=None):
Timer(seconds, func, args, kwargs).start()
def time_callable(func, n=2, *args, **kwargs):
test = tests.TestCallableWithArgs(func, *args, **kwargs)
runner = runners.AverageTest(test, n)
return runner.run()
def time_eval(snippet, n=2, gbls=None, lcls=None):
test = tests.TestEval(snippet, gbls, lcls)
runner = runners.AverageTest(test, n)
return runner.run()
def time_exec(snippet, n=2, gbls=None, lcls=None):
test = tests.TestExec(snippet, gbls, lcls)
runner = runners.AverageTest(test, n)
return runner.run()
| 32.722222 | 78 | 0.616299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 475 | 0.403226 |
cd682359aededb5fca5a5b75e857cce2e964a4f3 | 1,385 | py | Python | Final/P2Pchat.py | cainanBlack/csc321 | 9cebf9c3b61befda932732316b7406f1462c0bee | [
"MIT"
] | null | null | null | Final/P2Pchat.py | cainanBlack/csc321 | 9cebf9c3b61befda932732316b7406f1462c0bee | [
"MIT"
] | null | null | null | Final/P2Pchat.py | cainanBlack/csc321 | 9cebf9c3b61befda932732316b7406f1462c0bee | [
"MIT"
] | null | null | null | import netifaces
import argparse
import os
import zmq
import threading
def recieve(message):
ctx = zmq.Context.instance()
reciever = ctx.socket(zmq.SUB)
for last in range(1, 255):
reciever.connect("tcp://{0}.{1}:9000".format(message, last))
reciever.setsockopt(zmq.SUBSCRIBE, b'')
while True:
try:
print(reciever.recv_string())
except (KeyboardInterrupt, zmq.ContextTerminated):
break
def main():
parser = argparse.ArgumentParser()
parser.add_argument("interface", type=str, help="the network interface", choices=interfaces())
parser.add_argument("user", type=str, default=os.environ['USER'], nargs='?', help="Your username")
args = parser.parse_args()
inet = ifaddresses(args.interface)[AF_INET]
addr = inet[0]['addr']
message = addr.rsplit('.', 1)[0]
ctx = zmq.Context.instance()
recieve_thread = Thread(target=recieve, args=(message,))
recieve_thread.start()
serve = ctx.socket(zmq.PUB)
serve.bind("tcp://%s:9000" % args.interface)
print("starting chat on %s:9000 (%s.*)" % (args.interface, message))
while True:
try:
msg = raw_input()
serve.send_string("%s: %s" % (args.user, msg))
except KeyboardInterrupt:
break
serve.close(linger=0)
ctx.term()
if __name__ == '__main__':
main()
| 26.634615 | 102 | 0.626715 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.116968 |
cd68c200c93d96ecc3b7ad0ac3280311cd7d42ce | 1,822 | py | Python | src/playerprofile.py | MarinVlaic/AIBG | cf4960586bdb3c32f8e566c10f9f1e59e6f9ac2d | [
"MIT"
] | null | null | null | src/playerprofile.py | MarinVlaic/AIBG | cf4960586bdb3c32f8e566c10f9f1e59e6f9ac2d | [
"MIT"
] | null | null | null | src/playerprofile.py | MarinVlaic/AIBG | cf4960586bdb3c32f8e566c10f9f1e59e6f9ac2d | [
"MIT"
] | null | null | null | class PlayerProfile:
def __init__(self, id):
self.cities = []
self.resources = {
"SHEEP": 0,
"WOOD": 0,
"WHEAT": 0,
"CLAY": 0,
"IRON": 0
}
self.current_builder_intersection_position_id = None
self.id = id
self.owned_roads = set()
def get_score(self) -> int:
return sum(map(lambda x: x.level, self.cities))
def has_enough_resources(self, resource_dict):
for resource in resource_dict:
if resource_dict[resource] > self.resources[resource]:
return False
return True
def add_road(self, destination_intersection_id):
if self.current_builder_intersection_position_id > destination_intersection_id:
self.owned_roads.add((destination_intersection_id, self.current_builder_intersection_position_id))
else:
self.owned_roads.add((self.current_builder_intersection_position_id, destination_intersection_id))
def __eq__(self, other):
if other.id != self.id:
return False
else:
retval = set(self.cities) == set(other.cities)
if not retval:
return False
for resource in self.resources:
retval = retval and self.resources[resource] == other.resources[resource]
return retval
def check_road(self, id1, id2):
if id1 > id2:
return (id2, id1) in self.owned_roads
else:
return (id1, id2) in self.owned_roads
def update_resources(self):
for city in self.cities:
for tile in city.intersection.neighbouring_tiles:
if tile.type not in ("WATER", "DUST"):
self.resources[tile.type] += tile.weight * city.level
| 35.038462 | 110 | 0.593853 | 1,821 | 0.999451 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.024698 |
cd68c50658ac006c458874597809bf7939658dff | 83 | py | Python | examples/object/reference/change_list.py | zqmillet/kinopico_python_book | 0db4b0a904a1ba1b7e90cf971871e134941aeb65 | [
"MIT"
] | null | null | null | examples/object/reference/change_list.py | zqmillet/kinopico_python_book | 0db4b0a904a1ba1b7e90cf971871e134941aeb65 | [
"MIT"
] | null | null | null | examples/object/reference/change_list.py | zqmillet/kinopico_python_book | 0db4b0a904a1ba1b7e90cf971871e134941aeb65 | [
"MIT"
] | null | null | null | a = [1, 2, 3]
b = a
a[0] = 'gouliguojiashengsiyi'
print('a =', a)
print('b =', b)
| 11.857143 | 29 | 0.506024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.385542 |
cd6940cd949b8d012c79a302492e17dd59770ba1 | 2,267 | py | Python | source/CTRW.py | tangxiangong/ClassTop | fdafdafd165672ae464210fb8c66c70256d50956 | [
"MIT"
] | null | null | null | source/CTRW.py | tangxiangong/ClassTop | fdafdafd165672ae464210fb8c66c70256d50956 | [
"MIT"
] | null | null | null | source/CTRW.py | tangxiangong/ClassTop | fdafdafd165672ae464210fb8c66c70256d50956 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @Time : 2021/12/1 13:27
import numpy as np
from numpy import random
import matplotlib.pyplot as plt
from trajectory import Trajectory
from rnd import stable_rnd, skewed_stable_rnd
class CTRW(Trajectory):
def __init__(self, t_len, ind_waiting, ind_jump, init_position=0):
super(Trajectory, self).__init__()
self._T = t_len
self._alpha = ind_jump
self._beta = ind_waiting
self._x0 = init_position
self._simulate()
def _simulate(self):
if self._beta == 1:
waiting_time = random.exponential
else:
waiting_time = skewed_stable_rnd
self._t = np.zeros(1)
self._x = np.array([self._x0])
total_time = 0
current_position = self._x0
n = 1
while True:
n += 1
tau = waiting_time(self._beta)
if total_time + tau > self._T:
self._t = np.append(self._t, self._T)
self._x = np.append(self._x, current_position)
self._n = n
break
else:
xi = stable_rnd(self._alpha)
total_time += tau
current_position += xi
self._t = np.append(self._t, total_time)
self._x = np.append(self._x, current_position)
def plot(self):
plt.figure()
plt.step(self._t, self._x, where="post")
plt.show()
if __name__ == "__main__":
m1 = CTRW(100, 1, 2)
t1, x1 = m1.get()
fig1 = plt.figure(1)
plt.step(t1, x1, where="post")
plt.xlabel("t")
plt.ylabel("x")
fig1.savefig("../figures/ctrw1.png")
m2 = CTRW(100, 0.7, 2)
t2, x2 = m2.get()
fig2 = plt.figure(2)
plt.step(t2, x2, where="post")
plt.xlabel("t")
plt.ylabel("x")
fig2.savefig("../figures/ctrw2.png")
m3 = CTRW(100, 1, 1.5)
t3, x3 = m3.get()
fig3 = plt.figure(3)
plt.step(t3, x3, where="post")
plt.xlabel("t")
plt.ylabel("x")
fig3.savefig("../figures/ctrw3.png")
m4 = CTRW(100, 0.7, 1.5)
t4, x4 = m4.get()
fig4 = plt.figure(4)
plt.step(t4, x4, where="post")
plt.xlabel("t")
plt.ylabel("x")
fig4.savefig("../figures/ctrw4.png")
| 27.313253 | 70 | 0.549625 | 1,242 | 0.547861 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.098368 |
cd6b149fb6473adbe7fd7149968a3e8e9f36d8bd | 901 | py | Python | src/conversion_spec_file_reader.py | Eldar1205/exchanger-python-demo-app | 6733ff6044555f8c4639dc6e25baf4ef51401fd9 | [
"MIT"
] | 7 | 2021-08-31T09:03:39.000Z | 2021-09-27T13:45:58.000Z | src/conversion_spec_file_reader.py | Eldar1205/exchanger-python-demo-app | 6733ff6044555f8c4639dc6e25baf4ef51401fd9 | [
"MIT"
] | null | null | null | src/conversion_spec_file_reader.py | Eldar1205/exchanger-python-demo-app | 6733ff6044555f8c4639dc6e25baf4ef51401fd9 | [
"MIT"
] | null | null | null | import aiofiles
import asyncstdlib
from pydantic.types import PositiveFloat
from conversion_parameters import ConversionParameters
from conversion_spec import ConversionSpec, Currency
from conversion_spec_provider import ConversionSpecProvider
class ConversionSpecFileReader(ConversionSpecProvider):
async def provide_conversion_spec(self, conversion_parameters: ConversionParameters) -> ConversionSpec:
source_currency: Currency
target_currency: Currency
source_amounts: tuple[PositiveFloat]
async with aiofiles.open(conversion_parameters.file_path) as conversion_spec_file:
source_currency, target_currency, *source_amounts = await asyncstdlib.tuple(conversion_spec_file)
return ConversionSpec(
source_currency=source_currency,
target_currency=target_currency,
source_amounts=source_amounts,
)
| 37.541667 | 109 | 0.783574 | 653 | 0.72475 | 0 | 0 | 0 | 0 | 593 | 0.658158 | 0 | 0 |
cd6b1d33d27551aa6e7a920f48a0b7633b6280b3 | 3,931 | py | Python | Paris_G_1-2-3_v2.py | Gaspe-R/Rendez-vous-prefecture-Paris | e24d1bf0ae6ca5860ad858957c5e923c0ac3d85a | [
"MIT"
] | null | null | null | Paris_G_1-2-3_v2.py | Gaspe-R/Rendez-vous-prefecture-Paris | e24d1bf0ae6ca5860ad858957c5e923c0ac3d85a | [
"MIT"
] | null | null | null | Paris_G_1-2-3_v2.py | Gaspe-R/Rendez-vous-prefecture-Paris | e24d1bf0ae6ca5860ad858957c5e923c0ac3d85a | [
"MIT"
] | null | null | null | from sqlite3 import Date
from twilio.rest import Client
from datetime import datetime
from playsound import playsound
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import csv
import time
################################ "PREFCTURE DE PARIS" #####################################
######################## "Remise d'un titre de séjour étranger" ###########################
while True:
# New instance for Chrome
browser = webdriver.Chrome(ChromeDriverManager().install())
# Open the webpage
try:
browser.get('https://pprdv.interieur.gouv.fr/booking/create/989')
time.sleep(3)
# Save the window opener (current window, do not mistaken with tab... not the same)
main_window = browser.current_window_handle
# Accepter les cookies :
browser.find_element_by_xpath("//a[@onclick='javascript:accepter()']").click()
time.sleep(2)
# Click in checkbox "Veuillez cocher la case pour..." :
browser.find_element_by_xpath("//input[@name='condition']").click()
time.sleep(3)
# Click in the submit button :
browser.find_element_by_xpath("//input[@name='nextButton']").click()
time.sleep(3)
# Click in the radio button "Guichets 1-2 &3" :
browser.find_element_by_xpath("//input[@id='planning990']").click()
time.sleep(3)
# Click in the submit button 1 :
browser.find_element_by_xpath("//input[@type='submit']").click()
time.sleep(4)
##################################################
# Variables :
textNo = "Il n'existe plus de plage horaire libre pour votre demande de rendez-vous"
textOui = "Choix d'une plage horaire"
son = "./alert.wav" # ajouter le chemin de votre fichier audio pour l'alerte
url = browser.current_url
now = datetime.now()
Heure = now.strftime("%H:%M:%S")
Date = datetime.now().strftime("%d/%m/%Y")
#account Twilio :
account_sid = 'SID' # ajouter le SID Twilio
auth_token = 'token' # ajouter le Token Twilio
client = Client(account_sid, auth_token)
#log CSV:
header = ['Date', 'Heure', 'Préfecture', 'Disponibilité']
DataNo = [Date, Heure,'Paris G 1-2 et 3', 'Pas de Rendez-vous']
DataOui = [Date, Heure, 'Paris G 1-2 et 3', 'Rendez-vous Disponible']
##################################################
#Conditions :
if (textOui in browser.page_source):
browser.find_element_by_xpath("//input[@type='submit']").click()
print("")
print("RDV Disponible")
print("")
with open('./log.csv', 'a', newline='') as f: #ajouter le chemin de votre fichier log
writer = csv.writer(f)
writer.writerow(DataOui)
"""
# Send SMS Alert :
message = client.messages.create(
from_='votre numero twilio',
body = 'Rendez-vous prefecture disponible, https://pprdv.interieur.gouv.fr/booking/create/989',
to ='votre numero perso'
)
print(message.sid)
"""
#alert sound :
playsound(son)
time.sleep(900)
break
elif (textNo in browser.page_source):
playsound(son)
print("")
print("Pas de RDV")
print("")
with open('./log.csv', 'a', newline='') as f: #ajouter le chemin de votre fichier log
writer = csv.writer(f)
writer.writerow(DataNo)
time.sleep(30)
browser.quit()
except:
browser.quit()
time.sleep(60)
| 36.738318 | 126 | 0.522768 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,849 | 0.469886 |
cd6d9b2b982fd93ff60ad7ad2c61547c26a40708 | 3,022 | py | Python | movo_common/si_utils/src/si_utils/my_tf_listener.py | ALAN-NUS/kinova_movo | 05a0451f5c563359ae0ffe3280e1df85caec9e55 | [
"BSD-3-Clause"
] | 1 | 2021-03-26T06:33:28.000Z | 2021-03-26T06:33:28.000Z | movo_common/si_utils/src/si_utils/my_tf_listener.py | ALAN-NUS/kinova_movo | 05a0451f5c563359ae0ffe3280e1df85caec9e55 | [
"BSD-3-Clause"
] | null | null | null | movo_common/si_utils/src/si_utils/my_tf_listener.py | ALAN-NUS/kinova_movo | 05a0451f5c563359ae0ffe3280e1df85caec9e55 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import rospy
import math
import tf
import geometry_msgs.msg
from geometry_msgs.msg import PoseStamped
from si_utils.lx_transformerROS import my_transformer
if __name__ == '__main__':
rospy.init_node('my_tf_listener')
listener = tf.TransformListener()
# my_trans = my_transformer()
rate = rospy.Rate(10.0)
while not rospy.is_shutdown():
try:
# look1 = listener.lookupTransform('/left_ee_link', '/link1', rospy.Time(0))
# look2 = listener.lookupTransform('/base_link', '/left_ee_link', rospy.Time(0))
# look3 = listener.lookupTransform('/base_link', '/link1', rospy.Time(0))
# rospy.loginfo(look3)
# rospy.loginfo(look2)
pose = PoseStamped()
pose.header.frame_id = '/link1'
pose2 = listener.transformPose('/base_link', pose)
rospy.loginfo(pose2)
# (trans,rot) = listener.lookupTransform('/base_link', '/ar_marker_1', rospy.Time(0))
# (trans,rot) = listener.lookupTransform('/base_link', '/left_ee_link', rospy.Time(0))
# (trans1,rot1) = listener.lookupTransform('/movo_camera_color_optical_frame', '/ar_marker_17', rospy.Time(0))
# (trans,rot) = listener.lookupTransform('/base_link', '/movo_camera_color_optical_frame', rospy.Time(0))
# (trans,rot) = listener.lookupTransform('/movo_camera_color_optical_frame', '/base_link', rospy.Time(0))
# (trans,rot) = listener.lookupTransform('/base_link', '/ar_marker_1', rospy.Time(0))
# pose = PoseStamped()
# pose.header.frame_id = 'ar_marker_1'
# rospy.loginfo("========== First trans ===========")
# pose1 = listener.transformPose('/movo_camera_color_optical_frame', pose)
# rospy.loginfo(pose1)
# rospy.loginfo("========== Second trans ===========")
# rospy.loginfo(listener.transformPose('/base_link', pose1))
# print(trans)
# print(rot)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
print('test')
# rate.sleep()
'''
pose = PoseStamped()
pose.header.frame_id = '/ar_marker_17'
rospy.loginfo("========== First trans ===========")
listener.waitForTransform("/ar_marker_17", "/movo_camera_color_optical_frame", rospy.Time(), rospy.Duration(4.0))
pose1 = listener.transformPose('/movo_camera_color_optical_frame', pose)
rospy.loginfo(pose1)
rospy.loginfo("========== Second trans ===========")
rospy.loginfo(listener.transformPose('/base_link', pose1))
pose_nutStart_nut = PoseStamped()
pose_nutStart_nut.header.frame_id = '/nutStart'
pose_nutStart_ar = my_trans.tf.transformPose('/ar_marker_17', pose_nutStart_nut)
rospy.loginfo(pose_nutStart_ar)
pose_nutStart_ca = listener.transformPose('/movo_camera_color_optical_frame', pose_nutStart_ar)
rospy.loginfo(pose_nutStart_ca)
'''
| 35.139535 | 122 | 0.634348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,136 | 0.706817 |
cd6e8efff351684ee42b6f8c78aec9644cacd755 | 8,661 | py | Python | acme_tiny.py | dennydai/docker-letsencrypt | 898fa70665d321e527c7fcc463a57a66dbbdab26 | [
"MIT"
] | 22 | 2015-12-06T06:19:43.000Z | 2016-03-10T06:44:34.000Z | acme_tiny.py | dennydai/docker-letsencrypt | 898fa70665d321e527c7fcc463a57a66dbbdab26 | [
"MIT"
] | 1 | 2016-09-11T07:38:45.000Z | 2016-09-11T10:50:26.000Z | acme_tiny.py | dennydai/docker-letsencrypt | 898fa70665d321e527c7fcc463a57a66dbbdab26 | [
"MIT"
] | 4 | 2015-12-22T01:25:16.000Z | 2016-01-14T13:24:27.000Z | #!/usr/bin/env python
import argparse, subprocess, json, os, os.path, urllib2, sys, base64, binascii, time, \
hashlib, re, copy, textwrap
#CA = "https://acme-staging.api.letsencrypt.org"
CA = "https://acme-v01.api.letsencrypt.org"
def get_crt(account_key, csr, acme_dir):
# helper function base64 encode for jose spec
def _b64(b):
return base64.urlsafe_b64encode(b).replace("=", "")
# parse account key to get public key
sys.stderr.write("Parsing account key...")
proc = subprocess.Popen(["openssl", "rsa", "-in", account_key, "-noout", "-text"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
raise IOError("OpenSSL Error: {0}".format(err))
pub_hex, pub_exp = re.search(
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
out, re.MULTILINE|re.DOTALL).groups()
pub_mod = binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex))
pub_mod64 = _b64(pub_mod)
pub_exp = "{0:x}".format(int(pub_exp))
pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
pub_exp64 = _b64(binascii.unhexlify(pub_exp))
header = {
"alg": "RS256",
"jwk": {
"e": pub_exp64,
"kty": "RSA",
"n": pub_mod64,
},
}
accountkey_json = json.dumps(header['jwk'], sort_keys=True, separators=(',', ':'))
thumbprint = _b64(hashlib.sha256(accountkey_json).digest())
sys.stderr.write("parsed!\n")
# helper function make signed requests
def _send_signed_request(url, payload):
nonce = urllib2.urlopen(CA + "/directory").headers['Replay-Nonce']
payload64 = _b64(json.dumps(payload))
protected = copy.deepcopy(header)
protected.update({"nonce": nonce})
protected64 = _b64(json.dumps(protected))
proc = subprocess.Popen(["openssl", "dgst", "-sha256", "-sign", account_key],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate("{0}.{1}".format(protected64, payload64))
if proc.returncode != 0:
raise IOError("OpenSSL Error: {0}".format(err))
data = json.dumps({
"header": header,
"protected": protected64,
"payload": payload64,
"signature": _b64(out),
})
try:
resp = urllib2.urlopen(url, data)
return resp.getcode(), resp.read()
except urllib2.HTTPError as e:
return e.code, e.read()
# find domains
sys.stderr.write("Parsing CSR...")
proc = subprocess.Popen(["openssl", "req", "-in", csr, "-noout", "-text"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
raise IOError("Error loading {0}: {1}".format(csr, err))
domains = set([])
common_name = re.search(r"Subject:.*? CN=([^\s,;/]+)", out)
if common_name is not None:
domains.add(common_name.group(1))
subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out, re.MULTILINE|re.DOTALL)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"):
domains.add(san[4:])
sys.stderr.write("parsed!\n")
# get the certificate domains and expiration
sys.stderr.write("Registering account...")
code, result = _send_signed_request(CA + "/acme/new-reg", {
"resource": "new-reg",
"agreement": "https://letsencrypt.org/documents/LE-SA-v1.0.1-July-27-2015.pdf",
})
if code == 201:
sys.stderr.write("registered!\n")
elif code == 409:
sys.stderr.write("already registered!\n")
else:
raise ValueError("Error registering: {0} {1}".format(code, result))
# verify each domain
for domain in domains:
sys.stderr.write("Verifying {0}...".format(domain))
# get new challenge
code, result = _send_signed_request(CA + "/acme/new-authz", {
"resource": "new-authz",
"identifier": {
"type": "dns",
"value": domain,
},
})
if code != 201:
raise ValueError("Error registering: {0} {1}".format(code, result))
# make the challenge file
challenge = [c for c in json.loads(result)['challenges'] if c['type'] == "http-01"][0]
challenge['token'] = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
keyauthorization = "{0}.{1}".format(challenge['token'], thumbprint)
wellknown_path = os.path.join(acme_dir, challenge['token'])
wellknown_file = open(wellknown_path, "w")
wellknown_file.write(keyauthorization)
wellknown_file.close()
# check that the file is in place
wellknown_url = "http://{0}/.well-known/acme-challenge/{1}".format(
domain, challenge['token'])
try:
resp = urllib2.urlopen(wellknown_url)
assert resp.read().strip() == keyauthorization
except (urllib2.HTTPError, urllib2.URLError, AssertionError):
os.remove(wellknown_path)
raise ValueError("Wrote file to {0}, but couldn't download {1}".format(
wellknown_path, wellknown_url))
# notify challenge are met
code, result = _send_signed_request(challenge['uri'], {
"resource": "challenge",
"keyAuthorization": keyauthorization,
})
if code != 202:
raise ValueError("Error triggering challenge: {0} {1}".format(code, result))
# wait for challenge to be verified
while True:
try:
resp = urllib2.urlopen(challenge['uri'])
challenge_status = json.loads(resp.read())
except urllib2.HTTPError as e:
raise ValueError("Error checking challenge: {0} {1}".format(
e.code, json.loads(e.read())))
if challenge_status['status'] == "pending":
time.sleep(2)
elif challenge_status['status'] == "valid":
sys.stderr.write("verified!\n")
os.remove(wellknown_path)
break
else:
raise ValueError("{0} challenge did not pass: {1}".format(
domain, challenge_status))
# get the new certificate
sys.stderr.write("Signing certificate...")
proc = subprocess.Popen(["openssl", "req", "-in", csr, "-outform", "DER"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
csr_der, err = proc.communicate()
code, result = _send_signed_request(CA + "/acme/new-cert", {
"resource": "new-cert",
"csr": _b64(csr_der),
})
if code != 201:
raise ValueError("Error signing certificate: {0} {1}".format(code, result))
# return signed certificate!
sys.stderr.write("signed!\n")
return """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
"\n".join(textwrap.wrap(base64.b64encode(result), 64)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
This script automates the process of getting a signed TLS certificate from
Let's Encrypt using the ACME protocol. It will need to be run on your server
and have access to your private account key, so PLEASE READ THROUGH IT! It's
only ~200 lines, so it won't take long.
===Example Usage===
python acme_tiny.py --account-key ./account.key --csr ./domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > signed.crt
===================
===Example Crontab Renewal (once per month)===
0 0 1 * * python /path/to/acme_tiny.py --account-key /path/to/account.key --csr /path/to/domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > /path/to/signed.crt 2>> /var/log/acme_tiny.log
==============================================
""")
)
parser.add_argument("--account-key", required=True, help="path to your Let's Encrypt account private key")
parser.add_argument("--csr", required=True, help="path to your certificate signing request")
parser.add_argument("--acme-dir", required=True, help="path to the .well-known/acme-challenge/ directory")
args = parser.parse_args()
signed_crt = get_crt(args.account_key, args.csr, args.acme_dir)
sys.stdout.write(signed_crt)
| 43.522613 | 221 | 0.592772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,020 | 0.34869 |
cd6f2ad698e4fc98b32ec95e10035f7d48a91c97 | 3,667 | py | Python | esb/Tags.py | sgbalogh/esb.py | 06e8f86b94d5dadc628a0fbd396212649328864d | [
"MIT"
] | null | null | null | esb/Tags.py | sgbalogh/esb.py | 06e8f86b94d5dadc628a0fbd396212649328864d | [
"MIT"
] | null | null | null | esb/Tags.py | sgbalogh/esb.py | 06e8f86b94d5dadc628a0fbd396212649328864d | [
"MIT"
] | null | null | null |
class Tags:
class Thematic:
ACC_NOTE = "account:note"
DELIMITER = "delimiter:thematic"
FAM_SIBLINGS = "fam:siblings"
FAM_CHILDREN = "fam:children"
FAM_PARENTS = "fam:parents"
FAM_SPOUSE = "fam:spouse"
META_NO_REMARK = "meta:no-remarks"
META_PARENTHETICAL = "meta:parenthetical"
META_RECORD = "meta:record-reference"
SUBJ_AGE = "subj:age"
SUBJ_BIO = "subj:biographical-note"
SUBJ_EMIGRATION = "subj:emigration-event"
SUBJ_MARTIAL = "subj:marital-status"
SUBJ_NAME = "subj:name"
SUBJ_NATIVEOF = "subj:native-of"
SUBJ_OCCUPATION = "subj:occupation"
SUBJ_RESIDENCE = "subj:residence-info"
class Token:
END = "END"
START = "START"
DELIMITER = "t:DELIMITER"
EMIGRATION_ARRIVED = "t:emigration:ARRIVED"
EMIGRATION_VESSEL = "t:emigration:VESSEL"
EMIGRATION_VESSEL_HAS_ORIGIN = "t:emigration:VESSEL_HAS_ORIGIN"
EMIGRATION_VIA = "t:emigration:VIA"
LOCATION_DISTANCE = "t:location:DISTANCE"
LOCATION_DISTANCE_UNIT = "t:location:DISTANCE_UNIT"
LOCATION_FROM = "t:location:FROM"
LOCATION_NAME = "t:location:NAME"
META_ACCOUNT_CLOSED = "t:meta:ACCOUNT_CLOSED"
META_ACCOUNT_NUMBER = "t:meta:ACCOUNT_NUMBER"
META_IS_SAME_AS = "t:meta:IS_SAME_AS"
META_NO_REMARKS = "t:meta:NO_REMARKS"
META_SEE = "t:meta:SEE"
META_PARENTHETICAL = "meta:parenthetical"
PERSON_AGE = "t:person:AGE"
PERSON_AGE_YEAR = "t:person:AGE_YEAR"
PERSON_BROTHERS = "t:person:BROTHERS"
PERSON_CHILDREN = "t:person:CHILDREN"
PERSON_FATHER = "t:person:FATHER"
PERSON_HAS_NATIONALITY = "t:person:HAS_NATIONALITY"
PERSON_IS_DEAD = "t:person:IS_DEAD"
PERSON_IS_LIVING = "t:person:IS_LIVING"
PERSON_IS_SINGLE = "t:person:IS_SINGLE"
PERSON_IS_WIDOWED = "t:person:IS_WIDOWED"
PERSON_LOCATED_IN = "t:person:LOCATED_IN"
PERSON_MOTHER = "t:person:MOTHER"
PERSON_NAME = "t:person:NAME"
PERSON_NUMBER = "t:person:NUMBER"
PERSON_PARENTS = "t:person:PARENTS"
PERSON_SISTERS = "t:person:SISTERS"
PERSON_SON = "t:person:SON"
PERSON_HUSBAND = "t:person:HUSBAND"
PERSON_WIFE = "t:person:WIFE"
PERSON_STEP_SIBLING = "t:person:STEP_SIBLING"
PERSON_IS_MINOR = "t:person:IS_MINOR"
REL_HAS = "t:rel:HAS"
REL_HAS_HUSBAND = "t:rel:HAS_HUSBAND"
REL_HAS_WIFE = "t:rel:HAS_WIFE"
REL_HAS_SPOUSE = "t:rel:HAS_SPOUSE"
REL_IS_NATIVE_OF = "t:rel:IS_NATIVE_OF"
REL_IS_WIDOW_OF = "t:rel:IS_WIDOW_OF"
REL_IS_DAUGHTER_OF = "t:rel:IS_DAUGHTER_OF"
REL_TO = "t:rel:TO"
RESIDENTIAL_RESIDENCE = "t:residential:RESIDENCE"
RESIDENTIAL_CURRENTLY_LIVING_AT = "t:residential:CURRENTLY_LIVING_AT"
RESIDENTIAL_FORMERLY_LOCATED_AT = "t:residential:FORMERLY_LOCATED_AT"
RESIDENTIAL_LIVED_WITH = "t:residential:LIVED_WITH"
RESIDENTIAL_LIVES_WITH = "t:residential:LIVES_WITH"
SUBJ_IS_MAN = "t:subj:IS_MAN"
SUBJ_IS_WOMAN = "t:subj:IS_WOMAN"
TIME_DATE = "t:time:DATE"
TIME_MONTH = "t:time:MONTH"
TIME_YEAR = "t:time:YEAR"
TIME_DURATION_VALUE = "t:time:DURATION_VALUE"
TIME_DURATION_YEAR = "t:time:DURATION_YEAR"
TIME_ABOUT = "t:time:ABOUT"
UNKNOWN = "t:UNKNOWN"
WORK_EMPLOYER_NAME = "t:work:EMPLOYER_NAME"
WORK_OCCUPATION = "t:work:OCCUPATION"
WORK_WORKS_FOR = "t:work:WORKS_FOR"
BLANK = "t:BLANK"
| 35.95098 | 77 | 0.642214 | 3,665 | 0.999455 | 0 | 0 | 0 | 0 | 0 | 0 | 1,485 | 0.404963 |
cd6f36cb0dc0dd674280cb84b43ef766b0e9c395 | 14,691 | py | Python | Draft/08_compare_original_GBS_Exome_biallelic_variation_alle_count.py | peipeiwang6/Genomic_prediction_in_Switchgrass | 1fba3508c0d81d16e0629e3cf94ff4d174a85b13 | [
"MIT"
] | null | null | null | Draft/08_compare_original_GBS_Exome_biallelic_variation_alle_count.py | peipeiwang6/Genomic_prediction_in_Switchgrass | 1fba3508c0d81d16e0629e3cf94ff4d174a85b13 | [
"MIT"
] | null | null | null | Draft/08_compare_original_GBS_Exome_biallelic_variation_alle_count.py | peipeiwang6/Genomic_prediction_in_Switchgrass | 1fba3508c0d81d16e0629e3cf94ff4d174a85b13 | [
"MIT"
] | null | null | null | '''
imput1: exome capture, biallelic indel matrix
input2: exome capture, biallelic SNP matrix
input3: GBS, biallelic indel matrix
input4: GBS, biallelic SNP matrix
input5: allele count file for exome homozygous or heterozygous genotype
input6: allele count file for GBS homozygous or heterozygous genotype
input7: tetraploid or octaploid
'''
import sys,os
import numpy as np
exome_indel = open(sys.argv[1],'r').readlines()
exome_snp = open(sys.argv[2],'r').readlines()
gbs_indel = open(sys.argv[3],'r').readlines()
gbs_snp = open(sys.argv[4],'r').readlines()
EP = {} #EP[pos] = 1
for inl in exome_indel[1:]:
tem = inl.split('\t')
EP[tem[0] + '_' + tem[1]] = 1
for inl in exome_snp[1:]:
tem = inl.split('\t')
EP[tem[0] + '_' + tem[1]] = 1
S = {} #shared position, S[pos] = 1
for inl in gbs_indel[1:]:
tem = inl.split('\t')
if tem[0] + '_' + tem[1] in EP:
S[tem[0] + '_' + tem[1]] = 1
for inl in gbs_snp[1:]:
tem = inl.split('\t')
if tem[0] + '_' + tem[1] in EP:
S[tem[0] + '_' + tem[1]] = 1
E = {} # E[pos][ind] = A/T
G = {} # G[pos][ind] = A/T
EN = {} # EN[i] = ind
GN = {} # GN[i] = ind
IND = {} # IND[ind] = 1
tem = exome_indel[0].strip().split('\t')
for i in range(4,len(tem)):
EN[i] = tem[i]
IND[tem[i]] = 1
tem = gbs_indel[0].strip().split('\t')
for i in range(4,len(tem)):
GN[i] = tem[i]
for inl in exome_indel[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
E[pos] = {}
E[pos]['ref'] = tem[2]
E[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
E[pos][EN[i]] = tem[i]
for inl in exome_snp[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
E[pos] = {}
E[pos]['ref'] = tem[2]
E[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
E[pos][EN[i]] = tem[i]
for inl in gbs_indel[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
G[pos] = {}
G[pos]['ref'] = tem[2]
G[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
G[pos][GN[i]] = tem[i]
for inl in gbs_snp[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
G[pos] = {}
G[pos]['ref'] = tem[2]
G[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
G[pos][GN[i]] = tem[i]
out = open('Biallelic_variation_%s_Exome_VS_GBS.txt'%sys.argv[7],'w')
Ind = sorted(IND.keys())
title = 'Chr\tPos\tRef\tAlt'
for ind in Ind:
title = title + '\t' + ind
out.write(title + '\n')
for pos in S:
res = pos.split('_')[0] + '\t' + pos.split('_')[1]
if E[pos]['ref'] == G[pos]['ref']:
res = res + '\t' + E[pos]['ref']
else:
res = res + '\t' + E[pos]['ref'] + '|' + G[pos]['ref']
if E[pos]['alt'] == G[pos]['alt']:
res = res + '\t' + E[pos]['alt']
else:
res = res + '\t' + E[pos]['alt'] + '|' + G[pos]['alt']
for ind in Ind:
if E[pos][ind] == G[pos][ind] or (E[pos][ind].split('/')[0] == G[pos][ind].split('/')[1] and E[pos][ind].split('/')[1] == G[pos][ind].split('/')[0]):
res = res + '\t' + E[pos][ind]
else:
res = res + '\t' + E[pos][ind] + '|' + G[pos][ind]
out.write(res + '\n')
out.close()
ori_exome_indel = open(sys.argv[1],'r').readlines()
ori_exome_snp = open(sys.argv[2],'r').readlines()
ori_gbs_indel = open(sys.argv[3],'r').readlines()
ori_gbs_snp = open(sys.argv[4],'r').readlines()
ori_out = open('Shared_Biallelic_variation_%s_original_Exome_VS_GBS.txt'%sys.argv[7],'w')
out = open('Distribution_of_discrepancy_Biallelic_variation_%s_between_exome_and_GBS.txt'%sys.argv[7],'w')
ori_out.write(title + '\n')
O_exome = {}
O_gbs = {}
EN = {} # EN[i] = ind
GN = {} # GN[i] = ind
tem = ori_exome_indel[0].strip().split('\t')
for i in range(4,len(tem)):
EN[i] = tem[i]
IND[tem[i]] = 1
tem = ori_gbs_indel[0].strip().split('\t')
for i in range(4,len(tem)):
GN[i] = tem[i]
for inl in ori_exome_indel[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
O_exome[pos] = {}
O_exome[pos]['ref'] = tem[2]
O_exome[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
O_exome[pos][EN[i]] = tem[i]
for inl in ori_exome_snp[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
O_exome[pos] = {}
O_exome[pos]['ref'] = tem[2]
O_exome[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
O_exome[pos][EN[i]] = tem[i]
for inl in ori_gbs_indel[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
O_gbs[pos] = {}
O_gbs[pos]['ref'] = tem[2]
O_gbs[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
O_gbs[pos][GN[i]] = tem[i]
for inl in ori_gbs_snp[1:]:
tem = inl.strip().split('\t')
if tem[0] + '_' + tem[1] in S:
pos = tem[0] + '_' + tem[1]
O_gbs[pos] = {}
O_gbs[pos]['ref'] = tem[2]
O_gbs[pos]['alt'] = tem[3]
for i in range(4,len(tem)):
O_gbs[pos][GN[i]] = tem[i]
if sys.argv[7] == 'octaploid':
N1 = 0 ### Exome has variation, GBS is ./.
N2 = 0 ### have same variation
N3 = 0 ### Exome has heteo(AATT), GBS has homo
N3_02 = 0 ### Exome has heteo(ATTT or AAAT), GBS has homo
N3_03 = 0 ### Exome has heteo(ATTT or AAAT), GBS has hetero(AATT)
N4 = 0 ### Exome is ./., GBS has variation
N5 = 0 ### Exome has homo, GBS has heteo(AATT)
N5_02 = 0 ### Exome has homo, GBS has heteo(ATTT or AAAT)
N5_03 = 0 ### Exome has hetero(AATT), GBS has heteo(ATTT or AAAT)
N5_04 = 0 ### Exome has hetero(ATTT), GBS has heteo(TTTA)
N6 = 0 ### both are ./.
N7 = 0 ### both homo but different variation
out.write('Chr\tpos\tID\tExome_SNP\tGBS_SNP\tType\n')
for pos in S:
res = pos.split('_')[0] + '\t' + pos.split('_')[1]
if O_exome[pos]['ref'] == O_gbs[pos]['ref']:
res = res + '\t' + O_exome[pos]['ref']
else:
res = res + '\t' + O_exome[pos]['ref'] + '|' + O_gbs[pos]['ref']
print(pos)
if O_exome[pos]['alt'] == O_gbs[pos]['alt']:
res = res + '\t' + O_exome[pos]['alt']
else:
res = res + '\t' + O_exome[pos]['alt'] + '|' + O_gbs[pos]['alt']
print(pos)
for ind in Ind:
if O_exome[pos][ind] == O_gbs[pos][ind] or sorted(O_exome[pos][ind].split('/')) == sorted(O_gbs[pos][ind].split('/')):
res = res + '\t' + O_exome[pos][ind]
else:
res = res + '\t' + O_exome[pos][ind] + '|' + O_gbs[pos][ind]
### have same SNPs, AATT == TTAA, ATTT == TTTA
if (O_exome[pos][ind] == O_gbs[pos][ind] or sorted(O_exome[pos][ind].split('/')) == sorted(O_gbs[pos][ind].split('/'))) and O_exome[pos][ind]!= './././.':
N2 += 1
### both are ./.
elif O_exome[pos][ind] == O_gbs[pos][ind] and O_exome[pos][ind]== './././.':
N6 += 1
### Exome has SNPs, GBS is ./.
elif O_exome[pos][ind] != './././.' and O_gbs[pos][ind] == './././.':
N1 += 1
### Exome is ./., GBS has SNPs
elif O_exome[pos][ind] == './././.' and O_gbs[pos][ind] != './././.':
N4 += 1
### Exome has homo, GBS has hetero(AATT)
elif len(np.unique(O_exome[pos][ind].split('/'))) == 1 and len(np.unique(O_gbs[pos][ind].split('/'))) == 2 and O_exome[pos][ind]!= './.' and O_gbs[pos][ind].split('/')[1] != O_gbs[pos][ind].split('/')[2]:
N5 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_homo_GBS_hetero_AATT\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### Exome has homo, GBS has hetero(ATTT or AAAT)
elif len(np.unique(O_exome[pos][ind].split('/'))) == 1 and len(np.unique(O_gbs[pos][ind].split('/'))) == 2 and O_exome[pos][ind]!= './.' and O_gbs[pos][ind].split('/')[1] == O_gbs[pos][ind].split('/')[2]:
N5_02 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_homo_GBS_hetero_ATTT\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### Exome has AATT, GBS has hetero(ATTT or AAAT)
elif len(np.unique(O_exome[pos][ind].split('/'))) == 2 and len(np.unique(O_gbs[pos][ind].split('/'))) == 2 and O_gbs[pos][ind].split('/')[1] == O_gbs[pos][ind].split('/')[2] and O_exome[pos][ind].split('/')[1] != O_exome[pos][ind].split('/')[2]:
N5_03 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_hetero_AATT_GBS_hetero_ATTT\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### Exome has ATTT, GBS has heteroTTTA
elif len(np.unique(O_exome[pos][ind].split('/'))) == 2 and len(np.unique(O_gbs[pos][ind].split('/'))) == 2 and O_gbs[pos][ind].split('/')[1] == O_gbs[pos][ind].split('/')[2] and O_exome[pos][ind].split('/')[1] == O_exome[pos][ind].split('/')[2] and sorted(O_exome[pos][ind].split('/')) != sorted(O_gbs[pos][ind].split('/')):
N5_04 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_hetero_ATTT_GBS_hetero_AAAT\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### Exome has hetero(AATT), GBS has homo
elif len(np.unique(O_exome[pos][ind].split('/'))) == 2 and len(np.unique(O_gbs[pos][ind].split('/'))) == 1 and O_exome[pos][ind].split('/')[1] != O_exome[pos][ind].split('/')[2] and O_gbs[pos][ind] != './.':
N3 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_hetero_AATT_GBS_homo\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### Exome has hetero(ATTT or AAAT), GBS has homo
elif len(np.unique(O_exome[pos][ind].split('/'))) == 2 and len(np.unique(O_gbs[pos][ind].split('/'))) == 1 and O_exome[pos][ind].split('/')[1] == O_exome[pos][ind].split('/')[2] and O_gbs[pos][ind] != './.':
N3_02 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_hetero_ATTT_GBS_homo\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### Exome has hetero(ATTT or AAAT), GBS has hetero(AATT)
elif len(np.unique(O_exome[pos][ind].split('/'))) == 2 and len(np.unique(O_gbs[pos][ind].split('/'))) == 2 and O_exome[pos][ind].split('/')[1] == O_exome[pos][ind].split('/')[2] and O_gbs[pos][ind].split('/')[1] != O_gbs[pos][ind].split('/')[2] :
N3_03 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_hetero_ATTT_GBS_hetero_AATT\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### both homo, but diff
elif len(np.unique(O_exome[pos][ind].split('/'))) == 1 and len(np.unique(O_gbs[pos][ind].split('/'))) == 1 and O_exome[pos][ind]!=O_gbs[pos][ind] and O_exome[pos][ind] != './././.' and O_gbs[pos][ind]!= './././.':
N7 += 1
print([O_exome[pos][ind],O_gbs[pos][ind]])
out.write('%s\t%s\t%s\t%s\t%s\tBoth_homo_differ\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
ori_out.write(res + '\n')
ori_out.close()
out.close()
print([N1,N2,N3,N3_02,N3_03,N4,N5,N5_02,N5_03,N5_04,N6,N7])
if sys.argv[7] == 'tetraploid':
N1 = 0 ### Exome has SNPs, GBS is ./.
N2 = 0 ### have same SNPs
N3 = 0 ### Exome has heteo, GBS has homo
N4 = 0 ### Exome is ./., GBS has SNPs
N5 = 0 ### Exome has homo, GBS has heteo
N6 = 0 ### both are ./.
N7 = 0 ### both homo but different SNPs
out.write('Chr\tpos\tID\tExome_SNP\tGBS_SNP\tType\n')
for pos in S:
res = pos.split('_')[0] + '\t' + pos.split('_')[1]
if O_exome[pos]['ref'] == O_gbs[pos]['ref']:
res = res + '\t' + O_exome[pos]['ref']
else:
res = res + '\t' + O_exome[pos]['ref'] + '|' + O_gbs[pos]['ref']
if O_exome[pos]['alt'] == O_gbs[pos]['alt']:
res = res + '\t' + O_exome[pos]['alt']
else:
res = res + '\t' + O_exome[pos]['alt'] + '|' + O_gbs[pos]['alt']
for ind in Ind:
if O_exome[pos][ind] == O_gbs[pos][ind] or (O_exome[pos][ind].split('/')[0] == O_gbs[pos][ind].split('/')[1] and O_exome[pos][ind].split('/')[1] == O_gbs[pos][ind].split('/')[0]):
res = res + '\t' + O_exome[pos][ind]
else:
res = res + '\t' + O_exome[pos][ind] + '|' + O_gbs[pos][ind]
### have same SNPs
if (O_exome[pos][ind] == O_gbs[pos][ind] or (O_exome[pos][ind].split('/')[0] == O_gbs[pos][ind].split('/')[1] and O_exome[pos][ind].split('/')[1] == O_gbs[pos][ind].split('/')[0])) and O_exome[pos][ind]!= './.':
N2 += 1
### both are ./.
elif O_exome[pos][ind] == O_gbs[pos][ind] and O_exome[pos][ind]== './.':
N6 += 1
### Exome has SNPs, GBS is ./.
elif O_exome[pos][ind] != './.' and O_gbs[pos][ind] == './.':
N1 += 1
### Exome is ./., GBS has SNPs
elif O_exome[pos][ind] == './.' and O_gbs[pos][ind] != './.':
N4 += 1
### Exome has homo, GBS has hetero
elif O_exome[pos][ind].split('/')[0] == O_exome[pos][ind].split('/')[1] and O_exome[pos][ind]!= './.' and O_gbs[pos][ind].split('/')[0] != O_gbs[pos][ind].split('/')[1]:
N5 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_homo_GBS_hetero\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
### Exome has hetero, GBS has homo
elif O_exome[pos][ind].split('/')[0] != O_exome[pos][ind].split('/')[1] and O_gbs[pos][ind].split('/')[0] == O_gbs[pos][ind].split('/')[1] and O_gbs[pos][ind] != './.':
N3 += 1
out.write('%s\t%s\t%s\t%s\t%s\tExome_hetero_GBS_homo\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
else:
N7 += 1
print([O_exome[pos][ind],O_gbs[pos][ind]])
out.write('%s\t%s\t%s\t%s\t%s\tBoth_homo_differ\n'%(pos.split('_')[0],pos.split('_')[1],ind,O_exome[pos][ind],O_gbs[pos][ind]))
ori_out.write(res + '\n')
ori_out.close()
out.close()
print([N1,N2,N3,N4,N5,N6,N7])
inp = open('Distribution_of_discrepancy_Biallelic_variation_%s_between_exome_and_GBS.txt'%sys.argv[7],'r').readlines()
out = open('Distribution_of_discrepancy_Biallelic_variation_%s_between_exome_and_GBS_alle_count.txt'%sys.argv[7],'w')
P = {}
for inl in inp[1:]:
tem = inl.split('\t')
chr = tem[0]
pos = tem[1]
ind = tem[2]
if chr not in P:
P[chr] = {}
if pos not in P[chr]:
P[chr][pos] = {}
if ind not in P[chr][pos]:
P[chr][pos][ind] = [0,0,0,0]
Exome = open(sys.argv[5],'r')
inl = Exome.readline()
inl = Exome.readline()
while inl:
tem = inl.split('\t')
chr = tem[0]
pos = tem[1]
ind = tem[2]
if chr in P:
if pos in P[chr]:
if ind in P[chr][pos]:
P[chr][pos][ind][0] = int(tem[6])
P[chr][pos][ind][1] = int(tem[7])
inl = Exome.readline()
GBS = open(sys.argv[6],'r')
inl = GBS.readline()
inl = GBS.readline()
while inl:
tem = inl.split('\t')
chr = tem[0]
pos = tem[1]
ind = tem[2]
if chr in P:
if pos in P[chr]:
if ind in P[chr][pos]:
P[chr][pos][ind][2] = int(tem[6])
P[chr][pos][ind][3] = int(tem[7])
inl = GBS.readline()
out.write('Chr\tPos\tInd\tExome_SNP\tGBS_SNP\tType\tExome_alle_count\tExome_read_count\tGBS_alle_count\tGBS_read_count\n')
for inl in inp[1:]:
tem = inl.split('\t')
chr = tem[0]
pos = tem[1]
ind = tem[2]
if chr not in P:
P[chr] = {}
if pos not in P[chr]:
P[chr][pos] = {}
if ind not in P[chr][pos]:
P[chr][pos][ind] = [0,0,0,0]
out.write('%s\t%s\t%s\t%s\t%s\n'%(inl.strip(),P[chr][pos][ind][0],P[chr][pos][ind][1],P[chr][pos][ind][2],P[chr][pos][ind][3]))
out.close()
| 39.176 | 327 | 0.571166 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,881 | 0.264175 |
cd7225d8ec41e4d30a72fb83efb498273f5b3bbc | 132 | py | Python | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/viper/calculators/calc_frame.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 82 | 2016-06-29T17:24:43.000Z | 2021-04-16T06:49:17.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/viper/calculators/calc_frame.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 6 | 2022-01-12T18:22:08.000Z | 2022-03-25T10:19:27.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/viper/calculators/calc_frame.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 56 | 2016-08-02T10:50:50.000Z | 2021-07-19T08:57:34.000Z | from pyradioconfig.parts.bobcat.calculators.calc_frame import Calc_Frame_Bobcat
class calc_frame_viper(Calc_Frame_Bobcat):
pass | 33 | 79 | 0.863636 | 51 | 0.386364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
cd7529c73cff8550931b72e595537b4c1b291bee | 1,940 | py | Python | scripts/stats_wrapper.py | gpertea/regtools | a59d5dbd3268b0d83412e6fe81cf7e924c7bcb7c | [
"MIT"
] | 70 | 2015-08-05T21:32:51.000Z | 2021-11-26T13:26:33.000Z | scripts/stats_wrapper.py | gpertea/regtools | a59d5dbd3268b0d83412e6fe81cf7e924c7bcb7c | [
"MIT"
] | 145 | 2015-08-05T22:27:58.000Z | 2022-03-14T21:50:17.000Z | scripts/stats_wrapper.py | gpertea/regtools | a59d5dbd3268b0d83412e6fe81cf7e924c7bcb7c | [
"MIT"
] | 29 | 2015-08-01T02:19:40.000Z | 2021-12-16T20:02:40.000Z | import glob
import subprocess
import os
import argparse
import shutil
input_parser = argparse.ArgumentParser(
description="Run RegTools stats script",
)
input_parser.add_argument(
'tag',
help="Variant tag parameter used to run RegTools.",
)
args = input_parser.parse_args()
tag = args.tag
cwd = os.getcwd()
lines_per_file = 25000
smallfile = None
with open(f'all_splicing_variants_{tag}.bed', 'r') as bigfile:
header = bigfile.readline()
for lineno, line in enumerate(bigfile):
if lineno % lines_per_file == 0:
if smallfile:
smallfile.close()
small_filename = 'small_file_{}.txt'.format(lineno + lines_per_file)
smallfile = open(small_filename, "w")
smallfile.write(header)
smallfile.write(line)
if smallfile:
smallfile.close()
#get chunks
files = glob.glob('small_file_*')
files.sort()
number_of_in_files = len(files)
for file in files:
subprocess.run(f'Rscript --vanilla compare_junctions_hist_v2.R {tag} {file}', shell=True, check=True)
output_files = glob.glob("*_out.tsv")
output_files.sort()# glob lacks reliable ordering, so impose your own if output order matters
number_of_out_files = len(output_files)
if number_of_in_files == number_of_out_files:
with open(f'compare_junctions/hist/junction_pvalues_{tag}.tsv', 'wb') as outfile:
for i, fname in enumerate(output_files):
with open(fname, 'rb') as infile:
if i != 0:
infile.readline() # Throw away header on all but first file
# Block copy rest of file from input to output without parsing
shutil.copyfileobj(infile, outfile)
print(fname + " has been imported.")
else:
print("Number of output files doesn't match the number of input files that should have been processed")
files = glob.glob('small_file_*')
for file in files:
os.remove(file)
| 33.448276 | 107 | 0.676804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 601 | 0.309794 |
cd7892510c7f345ccc184879db2d6bb6e417c44a | 451 | py | Python | lib/model/utils/plt_loss.py | PhoneSix/Domain-Contrast | 5c674b581bce9beacf5bc0dd13113f33c4050495 | [
"MIT"
] | 4 | 2021-07-31T01:04:15.000Z | 2022-03-09T07:23:10.000Z | lib/model/utils/plt_loss.py | PhoneSix/Domain-Contrast | 5c674b581bce9beacf5bc0dd13113f33c4050495 | [
"MIT"
] | null | null | null | lib/model/utils/plt_loss.py | PhoneSix/Domain-Contrast | 5c674b581bce9beacf5bc0dd13113f33c4050495 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import os
def plt_loss(epoch, dir_, name, value):
if not os.path.exists(dir_):
os.makedirs(dir_)
axis = np.linspace(1,epoch,epoch)
label = '{}'.format(name)
fig = plt.figure()
plt.title(label)
plt.plot(axis, value)
# plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.grid(True)
plt.savefig('{}/{}.pdf'.format(dir_, name))
plt.close(fig) | 25.055556 | 47 | 0.62306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.104213 |
cd78d6e1151155e18754cebc1cc2d5b9e9efa63f | 3,267 | py | Python | ocellaris/utils/alarm.py | TormodLandet/Ocellaris | 6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58 | [
"Apache-2.0"
] | 1 | 2017-11-07T12:19:44.000Z | 2017-11-07T12:19:44.000Z | ocellaris/utils/alarm.py | TormodLandet/Ocellaris | 6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58 | [
"Apache-2.0"
] | null | null | null | ocellaris/utils/alarm.py | TormodLandet/Ocellaris | 6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58 | [
"Apache-2.0"
] | 2 | 2018-05-02T17:17:01.000Z | 2019-03-11T13:09:40.000Z | # Copyright (C) 2018-2019 Tormod Landet
# SPDX-License-Identifier: Apache-2.0
"""
A timeout context manager based on SIGALRM, Permits multiple
SIGALRM events to be queued.
Uses a `heapq` to store the objects to be called when an alarm signal is
raised, so that the next alarm is always at the top of the heap.
Note: SIGALRM does not work on Windows!
Code from ActiveState Python recipes
http://code.activestate.com/recipes/577600-queue-for-managing-multiple-sigalrm-alarms-concurr/
modified by stackoverflow user "James":
https://stackoverflow.com/a/34999808
"""
import heapq
import signal
from time import time
alarmlist = []
def __new_alarm(t, f, a, k):
return (t + time(), f, a, k)
def __next_alarm():
return int(round(alarmlist[0][0] - time())) if alarmlist else None
def __set_alarm():
return signal.alarm(max(__next_alarm(), 1))
class AlarmTimeoutError(Exception):
def __init__(self, message, name):
self.message = message
self.name = name
class AlarmTimeout:
"""
Context manager for timeouts
"""
def __init__(self, name, seconds=1, error_message='Timeout'):
self.name = name
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self):
raise AlarmTimeoutError(self.error_message, self.id_)
def __enter__(self):
self.this_alarm = alarm(self.seconds, self.handle_timeout)
def __exit__(self, type, value, traceback):
try:
cancel(self.this_alarm)
except ValueError:
pass
def __clear_alarm():
"""
Clear an existing alarm.
If the alarm signal was set to a callable other than our own, queue the
previous alarm settings.
"""
oldsec = signal.alarm(0)
oldfunc = signal.signal(signal.SIGALRM, __alarm_handler)
if oldsec > 0 and oldfunc != __alarm_handler:
heapq.heappush(alarmlist, (__new_alarm(oldsec, oldfunc, [], {})))
def __alarm_handler(*_args):
"""
Handle an alarm by calling any due heap entries and resetting the alarm.
Note that multiple heap entries might get called, especially if calling an
entry takes a lot of time.
"""
try:
nextt = __next_alarm()
while nextt is not None and nextt <= 0:
(_tm, func, args, keys) = heapq.heappop(alarmlist)
func(*args, **keys)
nextt = __next_alarm()
finally:
if alarmlist:
__set_alarm()
def alarm(sec, func, *args, **keys):
"""
Set an alarm.
When the alarm is raised in `sec` seconds, the handler will call `func`,
passing `args` and `keys`. Return the heap entry (which is just a big
tuple), so that it can be cancelled by calling `cancel()`.
"""
__clear_alarm()
try:
newalarm = __new_alarm(sec, func, args, keys)
heapq.heappush(alarmlist, newalarm)
return newalarm
finally:
__set_alarm()
def cancel(alarm):
"""
Cancel an alarm by passing the heap entry returned by `alarm()`.
It is an error to try to cancel an alarm which has already occurred.
"""
__clear_alarm()
try:
alarmlist.remove(alarm)
heapq.heapify(alarmlist)
finally:
if alarmlist:
__set_alarm()
| 25.724409 | 94 | 0.653811 | 692 | 0.211815 | 0 | 0 | 0 | 0 | 0 | 0 | 1,359 | 0.415978 |
cd79597c4dc624f2537254fe68c7bb39e5b6003c | 2,549 | py | Python | apps/insar.py | giswqs/streamlit-insar | e2c0897f01aeff96cd119cce8cf6dd3d8fb0e455 | [
"MIT"
] | 5 | 2021-12-14T23:28:36.000Z | 2022-02-27T14:35:29.000Z | apps/insar.py | giswqs/streamlit-insar | e2c0897f01aeff96cd119cce8cf6dd3d8fb0e455 | [
"MIT"
] | null | null | null | apps/insar.py | giswqs/streamlit-insar | e2c0897f01aeff96cd119cce8cf6dd3d8fb0e455 | [
"MIT"
] | null | null | null | import folium
import altair as alt
import leafmap.foliumap as leafmap
import pandas as pd
import streamlit as st
def app():
st.title("InSAR")
option = st.radio("Choose an option", ("Marker Cluster", "Circle Marker"))
m = leafmap.Map(
center=[29.7029, -95.3335], latlon_control=False, zoom=16, height=600
)
data = "data/insar_data.csv"
if option == "Circle Marker":
df = pd.read_csv(data, skiprows=0).head(100)
df.columns = [col.replace(" ", "_").replace(".", "_") for col in df.columns]
columns = df.columns.values.tolist()
tooltip_cols = [
"ID",
"LAT",
"LON",
"HEIGHT",
"HEIGHT_WRT_DEM",
"SIGMA_HEIGHT",
"COHER",
]
ts_cols = columns[16:82]
ts_df = df[ts_cols]
min_width = 100
max_width = 200
x = ("LON",)
y = ("LAT",)
radius = 5
i = 0
for row in df.itertuples():
html = ""
for p in tooltip_cols:
html = (
html
+ "<b>"
+ p
+ "</b>"
+ ": "
+ str(eval(str("row." + p)))
+ "<br>"
)
i_df = ts_df.iloc[[i]].transpose()
i_df.columns = ["value"]
i_df["date"] = i_df.index
i_df = i_df.reset_index()
graph = line = (
alt.Chart(i_df)
.mark_line(interpolate="basis")
.encode(
x="date",
y="value",
)
)
popup_html = folium.Popup(html, min_width=min_width, max_width=max_width)
tooltip_str = folium.Tooltip(html)
popup = folium.Popup().add_child(
folium.features.VegaLite(graph, width="50%")
)
folium.CircleMarker(
location=[row.LAT, row.LON],
radius=radius,
popup=popup,
tooltip=tooltip_str,
).add_to(m)
i += 1
elif option == "Marker Cluster":
df = pd.read_csv(data)
columns = [
"ID",
"LAT",
"LON",
"HEIGHT",
"HEIGHT_WRT_DEM",
"SIGMA_HEIGHT",
"COHER",
]
df = df[columns]
m.add_points_from_xy(df, x="LON", y="LAT", radius=5)
m.to_streamlit(height=600)
| 25.747475 | 85 | 0.433503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 325 | 0.127501 |
cd7a330fb695d24e5d3e2270fbbe2e1e0d11d2dc | 2,105 | py | Python | solve_net.py | a1exwang/theano-cnn-intro | 5f6ecdcb2908afb34a7d94e69b1d1ab13beb3c62 | [
"MIT"
] | null | null | null | solve_net.py | a1exwang/theano-cnn-intro | 5f6ecdcb2908afb34a7d94e69b1d1ab13beb3c62 | [
"MIT"
] | null | null | null | solve_net.py | a1exwang/theano-cnn-intro | 5f6ecdcb2908afb34a7d94e69b1d1ab13beb3c62 | [
"MIT"
] | null | null | null | from utils import LOG_INFO
import numpy as np
def data_iterator(x, y, batch_size, shuffle=True):
indx = range(len(x))
if shuffle:
np.random.shuffle(indx)
for start_idx in range(0, len(x), batch_size):
end_idx = min(start_idx + batch_size, len(x))
yield x[start_idx: end_idx], y[start_idx: end_idx]
def solve_net(model, train_x, train_y, test_x, test_y,
batch_size, max_epoch, disp_freq, test_freq):
iter_counter = 0
loss_list = []
accuracy_list = []
test_acc = []
test_loss = []
for k in range(max_epoch):
for x, y in data_iterator(train_x, train_y, batch_size):
iter_counter += 1
loss, accuracy = model.train(x, y)
loss_list.append(loss)
accuracy_list.append(accuracy)
if iter_counter % disp_freq == 0:
msg = 'Training iter %d, mean loss %.5f (batch loss %.5f), mean acc %.5f' % (iter_counter,
np.mean(loss_list),
loss_list[-1],
np.mean(accuracy_list))
LOG_INFO(msg)
loss_list = []
accuracy_list = []
if iter_counter % test_freq == 0:
LOG_INFO(' Testing...')
for tx, ty in data_iterator(test_x, test_y, batch_size, shuffle=False):
t_accuracy, t_loss = model.test(tx, ty)
test_acc.append(t_accuracy)
test_loss.append(t_loss)
msg = ' Testing iter %d, mean loss %.5f, mean acc %.5f' % (iter_counter,
np.mean(test_loss),
np.mean(test_acc))
LOG_INFO(msg)
test_acc = []
test_loss = []
| 39.716981 | 116 | 0.443705 | 0 | 0 | 288 | 0.136817 | 0 | 0 | 0 | 0 | 135 | 0.064133 |
cd7b0a77a1f93e1e0546019ec5051874f1e448ee | 1,199 | py | Python | playground/test1.py | mathee92/unirentalz | 803c58628ebda002e2c127db11fbaddf181ef394 | [
"MIT"
] | null | null | null | playground/test1.py | mathee92/unirentalz | 803c58628ebda002e2c127db11fbaddf181ef394 | [
"MIT"
] | null | null | null | playground/test1.py | mathee92/unirentalz | 803c58628ebda002e2c127db11fbaddf181ef394 | [
"MIT"
] | null | null | null | # -----------
# User Instructions
#
# Modify the valid_month() function to verify
# whether the data a user enters is a valid
# month. If the passed in parameter 'month'
# is not a valid month, return None.
# If 'month' is a valid month, then return
# the name of the month with the first letter
# capitalized.
#
import string
import cgi
months = ['January',
'February',
'March',
'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December']
def valid_month(month):
month = month.lower()
month = month.title()
if month in months:
return month
else:
return None
def valid_day(day):
if day and day.isdigit():
day = int(day)
if day > 0 and day <= 31:
return day
def valid_year(year):
if year and year.isdigit():
year = int(year)
if year > 1900 and year < 2020:
return year
def escape_html1(s):
for (i,o) in (("&", "&"),
(">", ">"),
("<", "<"),
('"', ""e;")):
s = s.replace(i,o)
return s
def escape_html2(s):
return cgi.escape(s, quote=True)
| 18.734375 | 46 | 0.539616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 447 | 0.372811 |
cd7da929a4d4176f292520c09ac6f877772c0b49 | 2,274 | py | Python | hookio/logs.py | Marak/hook.io-sdk-python | 722b04eb0832ef712d5dcd491899996088e1aa8b | [
"Unlicense"
] | 1 | 2021-06-15T11:52:44.000Z | 2021-06-15T11:52:44.000Z | hookio/logs.py | Marak/hook.io-sdk-python | 722b04eb0832ef712d5dcd491899996088e1aa8b | [
"Unlicense"
] | null | null | null | hookio/logs.py | Marak/hook.io-sdk-python | 722b04eb0832ef712d5dcd491899996088e1aa8b | [
"Unlicense"
] | null | null | null | import sys
import weakref
import json
import logging
from .utils import opt_json, Response2JSONLinesIterator
from six import StringIO
log = logging.getLogger(__name__)
class Logs:
def __init__(self, client):
self.client = weakref.proxy(client)
def read(self, url, raw=False, raw_data=True, **opts):
r = self.client.request('GET', url + '/logs', {}, **opts)
res = opt_json(r, raw)
if not raw and not raw_data and type(res) == list:
res = [json.loads(line) for line in res]
for row in res:
if 'data' in row:
row['data'] = json.loads(row['data'])
return res
def stream(self, url, raw=True, raw_data=True, streaming=True, **opts):
opts['streaming'] = streaming
if streaming:
opts.setdefault('stream_in', StringIO())
if not raw and callable(streaming):
def wrapper(line):
row = json.loads(line)
if not raw_data and 'data' in row:
row['data'] = json.loads(row['data'])
return streaming(row)
assert self.client.line_streaming, "Inconsistent API call"
opts['streaming'] = wrapper
log.debug("Will stream via wrapper")
r = self.client.request('GET', url + '/logs', {}, **opts)
if not raw and streaming and not callable(streaming):
log.debug("Will return iter_objects generator")
chunk_size = opts.get('chunk_size', self.client.chunk_size)
if raw_data:
func = None
else:
func = data_converted
return Response2JSONLinesIterator(r, converter=func, chunk_size=chunk_size)
return r
def flush(self, url, raw=False, **opts):
r = self.client.request('GET', url + '/logs?flush=true', {}, **opts)
return opt_json(r, raw)
def write(self, msg):
assert hasattr(sys.modules['__main__'], 'Hook'), \
"Writing logs supported only inside hook processing"
msg = {'type': 'log', 'payload': {'entry': msg}}
sys.stderr.write(json.dumps(msg) + '\n')
def data_converted(obj):
if 'data' in obj:
obj['data'] = json.loads(obj['data'])
return obj
| 35.53125 | 87 | 0.575638 | 1,992 | 0.875989 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.144679 |
cd7e21c2d43aa6b5ca80b05a26cc762c012f19a7 | 228 | py | Python | data/__init__.py | opconty/keras_std | 26cbe25c525128a067a97157bca0b060f40e5ec8 | [
"Apache-2.0"
] | 18 | 2019-07-16T10:54:29.000Z | 2021-03-16T00:34:23.000Z | data/__init__.py | opconty/keras_std | 26cbe25c525128a067a97157bca0b060f40e5ec8 | [
"Apache-2.0"
] | 2 | 2019-08-26T11:40:12.000Z | 2019-12-07T13:18:48.000Z | data/__init__.py | opconty/keras_std | 26cbe25c525128a067a97157bca0b060f40e5ec8 | [
"Apache-2.0"
] | 8 | 2019-07-17T08:26:10.000Z | 2021-03-16T00:34:17.000Z | #-*- coding:utf-8 -*-
#'''
# Created on 19-7-16 下午2:14
#
# @Author: Greg Gao(laygin)
#'''
from .synth_text import SynthTextConfig, SynthTextDataset
from .icdar13 import IcdarConfig, IcdarDataset
from .img_aug import resize_image | 25.333333 | 57 | 0.736842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.37931 |
cd7f21d270d7885499684e88d3eb5ad2fac11de9 | 6,376 | py | Python | alberto/annotation/train.py | lettomobile/DeepPoseKit | a922d2d99cd55d0a3909c1f3f8b2bf8c377ff503 | [
"Apache-2.0"
] | 1 | 2021-11-01T02:08:00.000Z | 2021-11-01T02:08:00.000Z | alberto/annotation/train.py | albertoursino/DeepPoseKit | a922d2d99cd55d0a3909c1f3f8b2bf8c377ff503 | [
"Apache-2.0"
] | null | null | null | alberto/annotation/train.py | albertoursino/DeepPoseKit | a922d2d99cd55d0a3909c1f3f8b2bf8c377ff503 | [
"Apache-2.0"
] | null | null | null | from alberto.annotation import annotation_set
from pandas import np
from deepposekit.io import TrainingGenerator, DataGenerator
from deepposekit.augment import FlipAxis
import imgaug.augmenters as iaa
import imgaug as ia
from deepposekit.models import StackedHourglass
from deepposekit.models import load_model
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
from deepposekit.callbacks import Logger, ModelCheckpoint
import time
from os.path import expanduser
HOME = annotation_set.HOME
IMAGE_SIZE = annotation_set.IMAGE_SIZE
TYPE = annotation_set.TYPE
data_generator = DataGenerator(
datapath=HOME + '/deepposekit-data/datasets/{}/annotation_set_{}_{}.h5'.format(TYPE, IMAGE_SIZE[0], IMAGE_SIZE[1]))
image, keypoints = data_generator[0]
plt.figure(figsize=(5, 5))
image = image[0] if image.shape[-1] is 3 else image[0, ..., 0]
cmap = None if image.shape[-1] is 3 else 'gray'
plt.imshow(image, cmap=cmap, interpolation='none')
for idx, jdx in enumerate(data_generator.graph):
if jdx > -1:
x1 = keypoints[0, idx, 0]
x2 = keypoints[0, jdx, 0]
if (0 <= x1 <= IMAGE_SIZE[0]) and (0 <= x2 <= IMAGE_SIZE[0]):
plt.plot(
[keypoints[0, idx, 0], keypoints[0, jdx, 0]],
[keypoints[0, idx, 1], keypoints[0, jdx, 1]],
'r-'
)
plt.scatter(keypoints[0, :, 0], keypoints[0, :, 1], c=np.arange(data_generator.keypoints_shape[0]), s=50, cmap=plt.cm.hsv, zorder=3)
plt.show()
# Augmentation
augmenter = []
augmenter.append(FlipAxis(data_generator, axis=0)) # flip image up-down
augmenter.append(FlipAxis(data_generator, axis=1)) # flip image left-right
sometimes = []
sometimes.append(iaa.Affine(scale={"x": (0.95, 1.05), "y": (0.95, 1.05)},
translate_percent={'x': (-0.05, 0.05), 'y': (-0.05, 0.05)},
shear=(-8, 8),
order=ia.ALL,
cval=ia.ALL,
mode=ia.ALL)
)
sometimes.append(iaa.Affine(scale=(0.8, 1.2),
mode=ia.ALL,
order=ia.ALL,
cval=ia.ALL)
)
augmenter.append(iaa.Sometimes(0.75, sometimes))
augmenter.append(iaa.Affine(rotate=(-180, 180),
mode=ia.ALL,
order=ia.ALL,
cval=ia.ALL)
)
augmenter = iaa.Sequential(augmenter)
# image, keypoints = data_generator[0]
# image, keypoints = augmenter(images=image, keypoints=keypoints)
# plt.figure(figsize=(5, 5))
# image = image[0] if image.shape[-1] is 3 else image[0, ..., 0]
# cmap = None if image.shape[-1] is 3 else 'gray'
# plt.imshow(image, cmap=cmap, interpolation='none')
# for idx, jdx in enumerate(data_generator.graph):
# if jdx > -1:
# x1 = keypoints[0, idx, 0]
# x2 = keypoints[0, jdx, 0]
# if (0 <= x1 <= IMAGE_SIZE[0]) and (0 <= x2 <= IMAGE_SIZE[0]):
# plt.plot(
# [keypoints[0, idx, 0], keypoints[0, jdx, 0]],
# [keypoints[0, idx, 1], keypoints[0, jdx, 1]],
# 'r-'
# )
plt.scatter(keypoints[0, :, 0], keypoints[0, :, 1], c=np.arange(data_generator.keypoints_shape[0]), s=50,
cmap=plt.cm.hsv, zorder=3)
# plt.show()
train_generator = TrainingGenerator(generator=data_generator,
downsample_factor=3,
augmenter=augmenter,
sigma=5,
validation_split=0,
use_graph=False,
random_seed=1,
graph_scale=1)
train_generator.get_config()
# n_keypoints = data_generator.keypoints_shape[0]
# batch = train_generator(batch_size=1, validation=False)[0]
# inputs = batch[0]
# outputs = batch[1]
# fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(10, 10))
# ax1.set_title('image')
# ax1.imshow(inputs[0, ..., 0], vmin=0, vmax=255)
#
# ax2.set_title('posture graph')
# ax2.imshow(outputs[0, ..., n_keypoints:-1].max(-1))
#
# ax3.set_title('keypoints confidence')
# ax3.imshow(outputs[0, ..., :n_keypoints].max(-1))
#
# ax4.set_title('posture graph and keypoints confidence')
# ax4.imshow(outputs[0, ..., -1], vmin=0)
# plt.show()
train_generator.on_epoch_end()
# Define a model
model = StackedHourglass(train_generator)
model.get_config()
# data_size = (10,) + data_generator.image_shape
# x = np.random.randint(0, 255, data_size, dtype="uint8")
# y = model.predict(x[:100], batch_size=100) # make sure the model is in GPU memory
# t0 = time.time()
# y = model.predict(x, batch_size=100, verbose=1)
# t1 = time.time()
# print(x.shape[0] / (t1 - t0))
# logger = Logger(validation_batch_size=10,
# # filepath saves the logger data to a .h5 file
# filepath=HOME + "/deepposekit-data/datasets/{}/log_densenet.h5".format(TYPE)
# )
# Remember, if you set validation_split=0 for your TrainingGenerator,
# which will just use the training set for model fitting,
# make sure to set monitor="loss" instead of monitor="val_loss".
reduce_lr = ReduceLROnPlateau(monitor="loss", factor=0.2, verbose=1, patience=20)
model_checkpoint = ModelCheckpoint(
HOME + "/deepposekit-data/datasets/{}/model_densenet.h5".format(TYPE),
monitor="loss",
# monitor="loss" # use if validation_split=0
verbose=1,
save_best_only=True,
)
early_stop = EarlyStopping(
monitor="loss",
# monitor="loss" # use if validation_split=0
min_delta=0.001,
patience=100,
verbose=1
)
callbacks = [early_stop, reduce_lr, model_checkpoint]
model.fit(
batch_size=5,
validation_batch_size=10,
callbacks=callbacks,
# epochs=1000, # Increase the number of epochs to train the model longer
epochs=50,
n_workers=8,
steps_per_epoch=None,
)
# model = load_model(
# HOME + "/deepposekit-data/datasets/{}/model_densenet.h5".format(TYPE),
# augmenter=augmenter,
# generator=data_generator,
# )
#
# model.fit(
# batch_size=2,
# validation_batch_size=10,
# callbacks=callbacks,
# epochs=50,
# n_workers=8,
# steps_per_epoch=None,
# )
| 32.697436 | 132 | 0.606336 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,693 | 0.422365 |
cd8237accaa927ddf6513747162736a47cc442f6 | 763 | py | Python | northpole/settings/local_staging.py | mhotwagner/northpole | 7d904d919aeb6a36549750ee0700578246896691 | [
"MIT"
] | null | null | null | northpole/settings/local_staging.py | mhotwagner/northpole | 7d904d919aeb6a36549750ee0700578246896691 | [
"MIT"
] | null | null | null | northpole/settings/local_staging.py | mhotwagner/northpole | 7d904d919aeb6a36549750ee0700578246896691 | [
"MIT"
] | null | null | null | from .base import *
from dotenv import load_dotenv
load_dotenv(dotenv_path='northpole/.staging.env', verbose=True)
ALLOWED_HOSTS = ['*']
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('POSTGRES_DB', 'northpole-staging'),
'USER': os.getenv('POSTGRES_USER'),
'PASSWORD': os.getenv('POSTGRES_PASSWORD'),
'HOST': os.getenv('POSTGRES_HOST'),
'PORT': os.getenv('POSTGRES_PORT', '5432'),
}
}
STATIC_ROOT = os.path.join(BASE_DIR, '..', 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, '..', 'static_source'),
)
MEDIA_ROOT = os.path.join(BASE_DIR, '..' 'media')
MEDIA_URL = '/media/'
| 25.433333 | 64 | 0.651376 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 319 | 0.418087 |
cd836f4eaf2d0f0894b304e6d9d109cacae91338 | 12,587 | py | Python | bc4py/bip32/bip32.py | namuyan/bc4py | 6484d356096261d0d57e9e1f5ffeae1f9a9865f3 | [
"MIT"
] | 12 | 2018-09-19T14:02:09.000Z | 2020-01-27T16:20:14.000Z | bc4py/bip32/bip32.py | kumacoinproject/bc4py | 6484d356096261d0d57e9e1f5ffeae1f9a9865f3 | [
"MIT"
] | 1 | 2020-03-19T16:57:30.000Z | 2020-03-19T16:57:30.000Z | bc4py/bip32/bip32.py | namuyan/bc4py | 6484d356096261d0d57e9e1f5ffeae1f9a9865f3 | [
"MIT"
] | 6 | 2018-11-13T17:20:14.000Z | 2020-02-15T11:46:52.000Z | #!/usr/bin/env python
#
# Copyright 2014 Corgan Labs
# See LICENSE.txt for distribution terms
#
from bc4py.bip32.base58 import check_decode, check_encode
from bc4py_extension import PyAddress
from ecdsa.curves import SECP256k1
from ecdsa.keys import SigningKey, VerifyingKey, square_root_mod_prime as mod_sqrt
from ecdsa.ecdsa import generator_secp256k1, int_to_string
from ecdsa.ellipticcurve import Point, INFINITY
from os import urandom
import hmac
import hashlib
import codecs
import struct
CURVE_GEN = generator_secp256k1 # Point class
CURVE_ORDER = CURVE_GEN.order() # int
FIELD_ORDER = SECP256k1.curve.p() # int
MIN_ENTROPY_LEN = 128 # bits
BIP32_HARDEN = 0x80000000 # choose from hardened set of child keys
EX_MAIN_PRIVATE = [codecs.decode('0488ade4', 'hex')] # Version strings for mainnet extended private keys
EX_MAIN_PUBLIC = [codecs.decode('0488b21e', 'hex'),
codecs.decode('049d7cb2', 'hex')] # Version strings for mainnet extended public keys
EX_TEST_PRIVATE = [codecs.decode('04358394', 'hex')] # Version strings for testnet extended private keys
EX_TEST_PUBLIC = [codecs.decode('043587CF', 'hex')] # Version strings for testnet extended public keys
WALLET_VERSION = b'\x80'
class Bip32(object):
__slots__ = ("secret", "public", "chain", "depth", "index", "parent_fpr", "path")
def __init__(self, secret, public, chain, depth, index, fpr, path):
self.secret: SigningKey = secret
self.public: VerifyingKey = public
self.chain: bytes = chain
self.depth: int = depth
self.index: int = index
self.parent_fpr: bytes = fpr
self.path: str = path
def __repr__(self):
key_type = "PUB" if self.secret is None else "SEC"
return "<BIP32-{} depth={} path={}>".format(key_type, self.depth, self.path)
@classmethod
def from_entropy(cls, entropy, is_public=False):
"""Create a BIP32Key using supplied entropy >= MIN_ENTROPY_LEN"""
if entropy is None:
entropy = urandom(MIN_ENTROPY_LEN // 8) # Python doesn't have os.random()
if not len(entropy) >= MIN_ENTROPY_LEN // 8:
raise ValueError("Initial entropy %i must be at least %i bits" % (len(entropy), MIN_ENTROPY_LEN))
i64 = hmac.new(b"Bitcoin seed", entropy, hashlib.sha512).digest()
il, ir = i64[:32], i64[32:]
# FIXME test Il for 0 or less than SECP256k1 prime field order
secret = SigningKey.from_string(il, SECP256k1)
public = secret.verifying_key
if is_public:
return cls(secret=None, public=public, chain=ir, depth=0, index=0, fpr=b'\0\0\0\0', path='m')
else:
return cls(secret=secret, public=public, chain=ir, depth=0, index=0, fpr=b'\0\0\0\0', path='m')
@classmethod
def from_extended_key(cls, key, is_public=False):
"""
Create a BIP32Key by importing from extended private or public key string
If public is True, return a public-only key regardless of input type.
"""
# Sanity checks
if isinstance(key, str):
raw = check_decode(key)
else:
raw = b'\x00\x00\x00\x00' + key
if len(raw) != 78:
raise ValueError("extended key format wrong length")
# Verify address version/type
version = raw[:4]
if version == b'\x00\x00\x00\x00':
is_testnet = None
is_pubkey = None
elif version in EX_MAIN_PRIVATE:
is_testnet = False
is_pubkey = False
elif version in EX_TEST_PRIVATE:
is_testnet = True
is_pubkey = False
elif version in EX_MAIN_PUBLIC:
is_testnet = False
is_pubkey = True
elif version in EX_TEST_PUBLIC:
is_testnet = True
is_pubkey = True
else:
raise ValueError("unknown extended key version")
# Extract remaining fields
depth = raw[4]
fpr = raw[5:9]
child = struct.unpack(">L", raw[9:13])[0]
chain = raw[13:45]
data = raw[45:78]
# check prefix of key
is_pubkey = (data[0] == 2 or data[0] == 3)
# Extract private key or public key point
if not is_pubkey:
secret = SigningKey.from_string(data[1:], SECP256k1)
public = secret.verifying_key
else:
# Recover public curve point from compressed key
# Python3 FIX
lsb = data[0] & 1 if type(data[0]) == int else ord(data[0]) & 1
x = int.from_bytes(data[1:], 'big')
ys = (x**3 + 7) % FIELD_ORDER # y^2 = x^3 + 7 mod p
y = mod_sqrt(ys, FIELD_ORDER)
if y & 1 != lsb:
y = FIELD_ORDER - y
secret = None
point = Point(SECP256k1.curve, x, y)
public = VerifyingKey.from_public_point(point, SECP256k1)
if not is_pubkey and is_public:
return cls(secret=None, public=public, chain=chain, depth=depth, index=child, fpr=fpr, path='m')
else:
return cls(secret=secret, public=public, chain=chain, depth=depth, index=child, fpr=fpr, path='m')
# Internal methods not intended to be called externally
def _hmac(self, data):
"""
Calculate the HMAC-SHA512 of input data using the chain code as key.
Returns a tuple of the left and right halves of the HMAC
"""
i64 = hmac.new(self.chain, data, hashlib.sha512).digest()
return i64[:32], i64[32:]
def CKDpriv(self, i):
"""
Create a child key of index 'i'.
If the most significant bit of 'i' is set, then select from the
hardened key set, otherwise, select a regular child key.
Returns a BIP32Key constructed with the child key parameters,
or None if i index would result in an invalid key.
"""
# Index as bytes, BE
i_str = struct.pack(">L", i)
# Data to HMAC
if i & BIP32_HARDEN:
data = b'\0' + self.get_private_key() + i_str
path = self.path + '/' + str(i % BIP32_HARDEN) + '\''
else:
data = self.get_public_key() + i_str
path = self.path + '/' + str(i)
# Get HMAC of data
(Il, Ir) = self._hmac(data)
# Construct new key material from Il and current private key
Il_int = int.from_bytes(Il, 'big')
if Il_int > CURVE_ORDER:
return None
sec_int = int.from_bytes(self.secret.to_string(), 'big')
k_int = (Il_int + sec_int) % CURVE_ORDER
if k_int == 0:
return None
# Construct and return a new BIP32Key
secret = SigningKey.from_string(int_to_string(k_int), SECP256k1)
public = secret.verifying_key
return Bip32(secret=secret, public=public, chain=Ir, depth=self.depth + 1, index=i, fpr=self.fingerprint(), path=path)
def CKDpub(self, i):
"""
Create a publicly derived child key of index 'i'.
If the most significant bit of 'i' is set, this is
an error.
Returns a BIP32Key constructed with the child key parameters,
or None if index would result in invalid key.
"""
if i & BIP32_HARDEN:
raise Exception("Cannot create a hardened child key using public child derivation")
# Data to HMAC. Same as CKDpriv() for public child key.
data = self.get_public_key() + struct.pack(">L", i)
# Get HMAC of data
(Il, Ir) = self._hmac(data)
# Construct curve point Il*G+K
Il_int = int.from_bytes(Il, 'big')
if Il_int >= CURVE_ORDER:
return None
point = Il_int*CURVE_GEN + self.public.pubkey.point
if point == INFINITY:
return None
public = VerifyingKey.from_public_point(point, SECP256k1)
# Construct and return a new BIP32Key
path = self.path + '/' + str(i)
return Bip32(
secret=None, public=public, chain=Ir, depth=self.depth + 1, index=i, fpr=self.fingerprint(), path=path)
def child_key(self, i):
"""
Create and return a child key of this one at index 'i'.
The index 'i' should be summed with BIP32_HARDEN to indicate
to use the private derivation algorithm.
"""
if self.secret is None:
return self.CKDpub(i)
else:
return self.CKDpriv(i)
def get_private_key(self) -> bytes:
if self.secret is None:
raise Exception("Publicly derived deterministic keys have no private half")
else:
return self.secret.to_string()
def get_public_key(self):
point: Point = self.public.pubkey.point
if point.y() & 1:
return b'\3' + int_to_string(point.x())
else:
return b'\2' + int_to_string(point.x())
def get_address(self, hrp, ver) -> PyAddress:
"""Return bech32 compressed address"""
return PyAddress.from_param(hrp, ver, self.identifier())
def identifier(self):
"""Return key identifier as string"""
pk = self.get_public_key()
return hashlib.new('ripemd160', hashlib.sha256(pk).digest()).digest()
def fingerprint(self):
"""Return key fingerprint as string"""
return self.identifier()[:4]
def extended_key(self, is_private=True, encoded=True, is_testnet=False):
"""Return extended private or public key as string, optionally base58 encoded"""
if self.secret is None and is_private is True:
raise Exception("Cannot export an extended private key from a public-only deterministic key")
if is_testnet:
version = EX_TEST_PRIVATE[0] if is_private else EX_TEST_PUBLIC[0]
else:
version = EX_MAIN_PRIVATE[0] if is_private else EX_MAIN_PUBLIC[0]
depth = self.depth.to_bytes(1, 'big')
fpr = self.parent_fpr
child = struct.pack('>L', self.index)
chain = self.chain
if self.secret is None or is_private is False:
# startswith b'\x02' or b'\x03'
data = self.get_public_key()
else:
# startswith b'\x00'
data = b'\x00' + self.get_private_key()
if encoded:
return check_encode(version + depth + fpr + child + chain + data)
else:
return depth + fpr + child + chain + data
def wallet_import_format(self, prefix=WALLET_VERSION):
"""Returns private key encoded for wallet import"""
if self.secret is None:
raise Exception("Publicly derived deterministic keys have no private half")
raw = prefix + self.get_private_key() + b'\x01' # Always compressed
return check_encode(raw)
def dump(self):
"""Dump key fields mimicking the BIP0032 test vector format"""
print(" * Identifier")
print(" * (hex): ", self.identifier().hex())
print(" * (fpr): ", self.fingerprint().hex())
print(" * (main addr):", self.get_address('bc', 0))
print(" * (path): ", self.path)
if self.secret:
print(" * Secret key")
print(" * (hex): ", self.get_private_key().hex())
print(" * (wif): ", self.wallet_import_format())
print(" * Public key")
print(" * (hex): ", self.get_public_key().hex())
print(" * Chain code")
print(" * (hex): ", self.chain.hex())
print(" * Serialized")
print(" * (pub hex): ", self.extended_key(is_private=False, encoded=False).hex())
print(" * (pub b58): ", self.extended_key(is_private=False, encoded=True))
if self.secret:
print(" * (prv hex): ", self.extended_key(is_private=True, encoded=False).hex())
print(" * (prv b58): ", self.extended_key(is_private=True, encoded=True))
def parse_bip32_path(path):
"""parse BIP32 format"""
r = list()
for s in path.split('/'):
if s == 'm':
continue
elif s.endswith("'") or s.endswith('h'):
r.append(int(s[:-1]) + BIP32_HARDEN)
else:
r.append(int(s))
return r
def struct_bip32_path(path):
"""struct BIP32 string path"""
s = 'm'
for p in path:
if p & BIP32_HARDEN:
s += "/{}'".format(p % BIP32_HARDEN)
else:
s += "/{}".format(p)
return s
__all__ = [
"BIP32_HARDEN",
"Bip32",
"parse_bip32_path",
"struct_bip32_path",
]
| 37.573134 | 126 | 0.593072 | 10,731 | 0.852546 | 0 | 0 | 3,346 | 0.26583 | 0 | 0 | 3,790 | 0.301104 |
cd83dd3751ba2089366bb8592c6a8484b3986736 | 1,167 | py | Python | lib/utils/useragent.py | cckuailong/pocsploit | fe4a3154e59d2bebd55ccfdf62f4f7efb21b5a2a | [
"MIT"
] | 106 | 2022-03-18T06:51:09.000Z | 2022-03-31T19:11:41.000Z | lib/utils/useragent.py | cckuailong/pocsploit | fe4a3154e59d2bebd55ccfdf62f4f7efb21b5a2a | [
"MIT"
] | 5 | 2022-03-27T07:37:32.000Z | 2022-03-31T13:56:11.000Z | lib/utils/useragent.py | cckuailong/pocsploit | fe4a3154e59d2bebd55ccfdf62f4f7efb21b5a2a | [
"MIT"
] | 30 | 2022-03-21T01:27:08.000Z | 2022-03-31T12:28:01.000Z | import random
from loguru import logger
from lib.vars.vars import conf, th, paths
from lib.vars.ua import UA_LIST
def get_random_agent():
return random.sample(UA_LIST, 1)[0]
def firefox():
return 'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0'
def ie():
return 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)'
def chrome():
return 'Mozilla/5.0 (Windows NT 5.2) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.122 Safari/534.30'
def opera():
return 'Opera/9.80 (Windows NT 5.1; U; zh-cn) Presto/2.9.168 Version/11.50'
def iphone():
return 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16'
def google_bot():
return 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
def msn_bot():
return 'msnbot/1.1 (+http://search.msn.com/msnbot.htm)'
def yahoo_bot():
return 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)'
| 28.463415 | 184 | 0.685518 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 759 | 0.650386 |
cd865fa7395cf48130baac47f65fb9a0acdb8fa6 | 1,378 | py | Python | etapa 2/gaussJacobi.py | jlucartc/MetodosNumericos20182 | d5610b95945ed6ec9b9bae6cd96672f4d616c1b9 | [
"MIT"
] | null | null | null | etapa 2/gaussJacobi.py | jlucartc/MetodosNumericos20182 | d5610b95945ed6ec9b9bae6cd96672f4d616c1b9 | [
"MIT"
] | null | null | null | etapa 2/gaussJacobi.py | jlucartc/MetodosNumericos20182 | d5610b95945ed6ec9b9bae6cd96672f4d616c1b9 | [
"MIT"
] | null | null | null | import numpy as np
from sympy import *
from math import *
from timeit import default_timer as timer
start = None
end = None
def maxXi(Xn,X):
n = None
d = None
for i in range(Xn.shape[0]):
if(np.copy(Xn[i,0]) != 0):
nk = abs(np.copy(Xn[i,0]) - np.copy(X[i,0]))/abs(np.copy(Xn[i,0]))
dk = abs(np.copy(Xn[i,0]))
if n == None or nk > n:
n = nk
if d == None or dk > d:
d = dk
return n/d
A = np.matrix(eval(input("Digite uma matriz : ")))
A = A.astype(float)
X = np.matrix(eval(input("Digite X : ")))
e = float(input("Digite a precisão: "))
B = np.copy(A[:,A.shape[1]-1])
A = np.delete(np.copy(A),A.shape[1]-1,1)
C = np.asmatrix(np.zeros([A.shape[0],A.shape[1]]))
C = C.astype(float)
G = np.copy(B)
for i in range(C.shape[0]):
for j in range(C.shape[1]):
if i != j:
C[i,j] = (np.copy(A[i,j])/np.copy(A[i,i]))*(-1)
G[i,0] = (np.copy(G[i,0]))/(np.copy(A[i,i]))
C[i,i] = 0
Xn = None
z = True
print("Matriz C:\n",C)
print("Matriz G:\n",G)
start = timer()
while(z):
Xn = (np.copy(C) @ np.copy(X)) + np.copy(G)
d = maxXi(np.copy(Xn),np.copy(X))
if(d < e):
z = False
else:
X = np.copy(Xn)
end = timer()
print("Resposta de Gauss-Jacobi: ")
print(Xn)
print("Tempo de execucao total: %e segundos" % (end - start))
| 18.621622 | 78 | 0.523948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.108049 |
cd8770a9a9b49ceb88698ef2075f53487bd2aca7 | 8,139 | py | Python | custom_libs/Project2/plotter.py | drkostas/COSC522 | 5731576301daf99ca7c3d382fe3ea8b1398008ff | [
"MIT"
] | 1 | 2021-12-22T14:29:42.000Z | 2021-12-22T14:29:42.000Z | custom_libs/Project2/plotter.py | drkostas/COSC522 | 5731576301daf99ca7c3d382fe3ea8b1398008ff | [
"MIT"
] | 3 | 2021-10-13T02:14:30.000Z | 2021-11-24T05:28:32.000Z | custom_libs/Project2/plotter.py | drkostas/COSC522 | 5731576301daf99ca7c3d382fe3ea8b1398008ff | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import numpy as np
class Plotter:
synth_tr: np.ndarray
synth_te: np.ndarray
pima_tr: np.ndarray
pima_te: np.ndarray
def __init__(self, synth_tr: np.ndarray, synth_te: np.ndarray, pima_tr: np.ndarray,
pima_te: np.ndarray):
self.synth_tr = synth_tr
self.synth_te = synth_te
self.pima_tr = pima_tr
self.pima_te = pima_te
def plot_dataset(self):
fig, ax = plt.subplots(1, 3, figsize=(11, 4))
plot_color = 'dodgerblue'
# synth_tr f1-f2 Scatter Plot
ax[0].scatter(self.synth_tr[:, 0][self.synth_tr[:, -1] == 0],
self.synth_tr[:, 1][self.synth_tr[:, -1] == 0],
color='royalblue', s=12, marker='o', label="Class 0")
ax[0].scatter(self.synth_tr[:, 0][self.synth_tr[:, -1] == 1],
self.synth_tr[:, 1][self.synth_tr[:, -1] == 1],
color='red', s=12, marker='o', label="Class 1")
ax[0].margins(0.1) # 1% padding in all directions
ax[0].set_title("Synth Dataset Scatter Plot")
ax[0].set_xlabel("Feature 1")
ax[0].set_ylabel("Feature 2")
ax[0].legend()
ax[0].grid(True)
# f1 Hist
hist, bins, patches = ax[1].hist(self.synth_tr[:, 0], density=True, bins=20, color=plot_color,
edgecolor='black',
linewidth=0.5) # density=False would make counts
ax[1].set_title("Synth Dataset Density Histogram")
ax[1].set_xlabel("Feature 1")
ax[1].set_ylabel("Density")
ax[1].margins(0.1) # 1% padding in all directions
# f2 Hist
hist, bins, patches = ax[2].hist(self.synth_tr[:, 1], density=True, bins=20, color=plot_color,
edgecolor='black',
linewidth=0.5) # density=False would make counts
ax[2].set_title("Synth Dataset Density Histogram")
ax[2].set_xlabel("Feature 2")
ax[2].set_ylabel("Density")
ax[2].margins(0.1) # 1% padding in all directions
fig.tight_layout()
fig.show()
@staticmethod
def plot_knn_overall_accuracies(synth_k_range, synth_accuracies, pima_k_range, pima_accuracies):
fig, ax = plt.subplots(2, 1, figsize=(9, 9))
# Synth Dataset
ax[0].plot(synth_k_range, synth_accuracies, label='Synthetic Dataset', color='deepskyblue')
ax[0].set_title('Overall Classification accuracy vs k for the Synthetic Dataset')
ax[0].set_xlabel('k')
ax[0].set_ylabel('Overall Classification Accuracy')
_ = ax[0].set_xticks(synth_k_range)
ax[0].legend()
# Pima Dataset
ax[1].plot(pima_k_range, pima_accuracies, label='Pima Dataset', color='orange')
ax[1].set_title('Overall Classification accuracy vs k for the Pima Dataset')
ax[1].set_xlabel('k')
ax[1].set_ylabel('Overall Classification Accuracy')
_ = ax[1].set_xticks(pima_k_range)
ax[1].legend()
# Show plot
fig.tight_layout()
fig.show()
@staticmethod
def plot_decision_boundaries(knn, h: float = 0.2):
# Init values statically from Project 1
a_eucl = -0.8326229483927666
b_eucl = 0.44378197841356054
a_maha = -0.13486408662390306
b_maha = 0.49454949088419903
A = -2.9353736949690252
B = -7.122064910873636
C = -9.131232270572491
D = -4.023021305932989
E = 29.777685196099192
F = -14.251862334038359
means = np.array([[-0.22147024, 0.32575494], [0.07595431, 0.68296891]])
means_center = np.array([-0.07275796159999995, 0.5043619269200001])
a_m = 1.2010238270880302
b_m = 0.591745972411956
# Plot the Decision Boundaries
fig, ax = plt.subplots(1, 1, figsize=(11, 9))
eucl_x_range = np.linspace(-0.8, 0.9, 50)
maha_x_range = np.linspace(-1, 1, 50)
quadr_x_range = np.linspace(-1.1, 1.1, 50)
quadr_y_range = np.linspace(-0.2, 1.1, 50)
# KNN Decision Boundaries
cmap_light = ListedColormap(['lightblue', 'moccasin'])
# KNN Decision Boundaries
x, y = knn.train_x, knn.train_y
x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1
y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
x_target = np.c_[xx.ravel(), yy.ravel()]
Z = knn.predict(x_target, only_x=True)
Z = Z.reshape(xx.shape)
knn_contour_plot = ax.contourf(xx, yy, Z, cmap=cmap_light)
# Class 0 Scatter plot
ax.scatter(x[:, 0][y == 0], x[:, 1][y == 0],
color='royalblue', s=10, label='Class 0')
# Class 1 Scatter plot
ax.scatter(x[:, 0][y == 1], x[:, 1][y == 1],
color='red', s=10, label='Class 1')
# Decision Boundaries
# Euclidean
ax.plot(eucl_x_range, a_eucl * eucl_x_range + b_eucl, color='orange',
label=f'Euclidean Decision Boundary')
# Mahalanobis
ax.plot(maha_x_range, a_maha * maha_x_range + b_maha, color='deepskyblue',
label=f'Mahalanobis Decision Boundary')
# Quadratic
x_quad, y_quad = np.meshgrid(quadr_x_range, quadr_y_range)
quadr_equation = A * x_quad ** 2 + B * y_quad ** 2 + C * x_quad * y_quad + D * x_quad + E * y_quad + F
quad_contour_plt = ax.contour(x_quad, y_quad, quadr_equation, [0],
colors='limegreen')
ax.clabel(quad_contour_plt, inline=1, fontsize=10)
quad_contour_plt.collections[0].set_label('Quadratic Decision Boundary')
# Line that links the means of the two classes
mline_x_range = np.linspace(means[0][0], means[1][0], 5)
ax.plot(mline_x_range, a_m * mline_x_range + b_m,
color='m', linestyle='dashed', label='Line linking the two means')
# Class 0 Mean value
ax.plot(means[0][0], means[0][1],
'bo', markersize=11, markeredgecolor='w', label='Class 0 Mean value')
# Class 1 Mean value
ax.plot(means[1][0], means[1][1],
'ro', markersize=11, markeredgecolor='w', label='Class 1 Mean value')
# Center of the linking line
ax.plot(means_center[0], means_center[1],
'mo', markersize=11, markeredgecolor='w',
label=f'Center of the linking line')
# Show figure
ax.set_title(
"The three Decision Boundaries plotted against the scatter plot of the two features")
# ax.axis('equal')
ax.set_xlim(-1.35, 1.3)
ax.set_ylim(-0.35, 1.15)
ax.set_xlabel("Feature 1")
ax.set_ylabel("Feature 2")
ax.legend(loc='upper left')
# ax.margins(0.1)
fig.show()
@staticmethod
def plot_membership_changes(kmeans_membership_changes, wta_membership_changes, epsilon):
fig, ax = plt.subplots(2, 1, figsize=(9, 9))
# Pima, Kmeans
kmeans_range = range(2, len(kmeans_membership_changes)+2)
ax[0].plot(kmeans_range, kmeans_membership_changes,
label=f'Kmeans', color='deepskyblue')
ax[0].set_title('Membership Changes per epoch for Kmeans on Pima Dataset')
ax[0].set_xlabel('Epoch')
ax[0].set_ylabel('Membership Changes')
_ = ax[0].set_xticks(kmeans_range)
ax[0].legend()
# Pima, WTA
wta_range = range(2, len(wta_membership_changes) + 2)
ax[1].plot(wta_range, wta_membership_changes,
label=f'WTA: epsilon={epsilon}', color='orange')
ax[1].set_title('Membership Changes per epoch for WTA on Pima Dataset')
ax[1].set_xlabel('Epoch')
ax[1].set_ylabel('Membership Changes')
_ = ax[1].set_xticks(wta_range)
ax[1].legend()
# Show plot
fig.tight_layout()
fig.show() | 44.966851 | 110 | 0.578327 | 8,041 | 0.987959 | 0 | 0 | 5,863 | 0.720359 | 0 | 0 | 1,776 | 0.218209 |
cd887102450875f1d2f5fd98ea87c44fd4dd0888 | 303 | py | Python | Python/8/SquareSum/square_sum.py | hwakabh/codewars | 7afce5a7424d35abc55c350301ac134f2d3edd3d | [
"MIT"
] | null | null | null | Python/8/SquareSum/square_sum.py | hwakabh/codewars | 7afce5a7424d35abc55c350301ac134f2d3edd3d | [
"MIT"
] | 6 | 2020-02-21T17:01:59.000Z | 2021-05-04T07:04:41.000Z | Python/8/SquareSum/square_sum.py | hwakabh/codewars | 7afce5a7424d35abc55c350301ac134f2d3edd3d | [
"MIT"
] | null | null | null | import sys
def square_sum(numbers):
return sum([n ** 2 for n in numbers])
if __name__ == "__main__":
if len(sys.argv) == 1:
nums = [int(e) for e in input('>>> Enter the numbers with comma-separeted: ').split(',')]
print(square_sum(numbers=nums))
else:
sys.exit(1) | 23.307692 | 97 | 0.590759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.194719 |
cd88a71325bc436cee432caaafbbae9750fb46b0 | 5,710 | py | Python | spatial_ops/lazy_loader.py | LucaMarconato/spatial_ops | 86e5b8557db9efa2ca263098ed47c16de05fab00 | [
"MIT"
] | null | null | null | spatial_ops/lazy_loader.py | LucaMarconato/spatial_ops | 86e5b8557db9efa2ca263098ed47c16de05fab00 | [
"MIT"
] | null | null | null | spatial_ops/lazy_loader.py | LucaMarconato/spatial_ops | 86e5b8557db9efa2ca263098ed47c16de05fab00 | [
"MIT"
] | null | null | null | import os
import pickle
from abc import ABC, abstractmethod
import h5py
import numpy as np
from .folders import get_pickle_lazy_loader_data_path, hdf5_lazy_loader_data_path
from .unpickler import CustomUnpickler
class LazyLoaderAssociatedInstance:
# when implementing this method in a derived class add an unique suffix (depending on the derived class and not
# on a specific instance of the derived class) so to ensure uniqueness among different classes which derive this
# class example, if we have a Dog with name 'Bobby' and a Cat with name 'Bobby', good unique identifiers are
# 'Cat_Bobby' and 'Dog_Bobby', not just 'Bobby'
def get_lazy_loader_unique_identifier(self) -> str:
raise NotImplementedError
class LazyLoader(ABC):
def __init__(self, associated_instance: LazyLoaderAssociatedInstance):
self.associated_instance = associated_instance
@abstractmethod
def get_resource_unique_identifier(self) -> str:
pass
@abstractmethod
def precompute(self):
pass
@abstractmethod
def delete_precomputation(self):
pass
@abstractmethod
def has_data_already_been_precomputed(self):
pass
def precompute_if_needed(self):
if not self.has_data_already_been_precomputed():
self.precompute()
@abstractmethod
def _load_precomputed_data(self):
pass
def load_data(self, store_precomputation_on_disk=True):
if self.associated_instance is None:
raise ValueError(f'self.associated_instance = {self.associated_instance}')
if not self.has_data_already_been_precomputed():
# print('precomputing')
data = self.precompute()
if data is None:
raise ValueError(f'data = {data}')
if store_precomputation_on_disk:
self._save_data(data)
return data
else:
# print('loading')
return self._load_precomputed_data()
@abstractmethod
def _save_data(self, data):
pass
class PickleLazyLoader(LazyLoader, ABC):
def get_pickle_path(self):
path = os.path.join(get_pickle_lazy_loader_data_path(),
self.associated_instance.get_lazy_loader_unique_identifier())
os.makedirs(path, exist_ok=True)
path = os.path.join(path, self.get_resource_unique_identifier() + '.pickle')
return path
def _load_precomputed_data(self):
pickle_path = self.get_pickle_path()
try:
data = CustomUnpickler(open(pickle_path, 'rb')).load()
# if the pickle is corrupted because a previous execution of the program was terminated while pickling a file
# then we want to delete the pickle file and recompute it
except EOFError:
self.delete_precomputation()
data = self.load_data()
return data
def has_data_already_been_precomputed(self):
pickle_path = self.get_pickle_path()
return os.path.isfile(pickle_path)
def delete_precomputation(self):
os.remove(self.get_pickle_path())
def _save_data(self, data):
pickle.dump(data, open(self.get_pickle_path(), 'wb'))
if not os.path.isfile(hdf5_lazy_loader_data_path):
f = h5py.File(hdf5_lazy_loader_data_path, 'w')
f.close()
class HDF5LazyLoader(LazyLoader, ABC):
# just for convenience, to have the path ready when deriving the subclass
def get_hdf5_file_path(self) -> str:
return hdf5_lazy_loader_data_path
def get_hdf5_resource_internal_path(self):
return self.associated_instance.get_lazy_loader_unique_identifier() \
+ '/' + self.get_resource_unique_identifier()
# using a singleton and opening the file only once in r+ mode would lead to better performance, but I will wait
# to see if the current performance are bad before implementing it
def _load_precomputed_data(self):
with h5py.File(self.get_hdf5_file_path(), 'r') as f:
data = np.array(f[self.get_hdf5_resource_internal_path()][...])
return data
def has_data_already_been_precomputed(self):
with h5py.File(self.get_hdf5_file_path(), 'r') as f:
return self.get_hdf5_resource_internal_path() in f
def delete_precomputation(self):
with h5py.File(self.get_hdf5_file_path(), 'r+') as f:
del f[self.get_hdf5_resource_internal_path()]
def _save_data(self, data):
with h5py.File(self.get_hdf5_file_path(), 'r+') as f:
f[self.get_hdf5_resource_internal_path()] = data
if __name__ == '__main__':
from spatial_ops.data import JacksonFischerDataset as jfd
from spatial_ops.data import Patient
patient = jfd.patients[15]
class NumberOfPlatesLoader0(PickleLazyLoader):
def get_resource_unique_identifier(self) -> str:
return 'example_quantity_pickle'
def precompute(self):
# just to enable the autocompletion within the ide
p: Patient = self.associated_instance
data = f'len = {len(p.plates)}'
return data
derived_quantity = NumberOfPlatesLoader0(patient)
print(derived_quantity.load_data())
class NumberOfPlatesLoader1(HDF5LazyLoader):
def get_resource_unique_identifier(self) -> str:
return 'example_quantity_hdf5'
def precompute(self):
p: Patient = self.associated_instance
data = f'len = {len(p.plates)}'
# data = np.zeros((2, 3))
return data
derived_quantity = NumberOfPlatesLoader1(patient)
# derived_quantity.delete_precomputation()
print(derived_quantity.load_data())
| 33.588235 | 117 | 0.680911 | 4,949 | 0.866725 | 0 | 0 | 403 | 0.070578 | 0 | 0 | 1,167 | 0.204378 |
cd89017afbf663624d11e9b8f48f90440b465747 | 27,270 | py | Python | connector/binance/websockets.py | firebird631/siis | 8d64e8fb67619aaa5c0a62fda9de51dedcd47796 | [
"PostgreSQL"
] | null | null | null | connector/binance/websockets.py | firebird631/siis | 8d64e8fb67619aaa5c0a62fda9de51dedcd47796 | [
"PostgreSQL"
] | null | null | null | connector/binance/websockets.py | firebird631/siis | 8d64e8fb67619aaa5c0a62fda9de51dedcd47796 | [
"PostgreSQL"
] | null | null | null | # @date 2020-01-31
# @author Frederic Scherma, All rights reserved without prejudices.
# @license Copyright (c) 2020 Dream Overflow
# Binance Websocket connector.
import json
import threading
import traceback
from autobahn.twisted.websocket import WebSocketClientFactory, WebSocketClientProtocol, connectWS
from twisted.internet import ssl, reactor # , reactor
from twisted.internet.protocol import ReconnectingClientFactory
from connector.binance.client import Client
from monitor.service import MonitorService
import logging
logger = logging.getLogger('siis.connector.binance.ws')
error_logger = logging.getLogger('siis.error.connector.binance.ws')
traceback_logger = logging.getLogger('siis.traceback.connector.binance.ws')
class BinanceClientProtocol(WebSocketClientProtocol):
def __init__(self, factory):
super().__init__()
self.factory = factory
def onOpen(self):
self.factory.protocol_instance = self
def onConnect(self, response):
subscriptions = self.factory.subscriptions
if subscriptions:
params = []
rid = 1
for subscription, pair in subscriptions.items():
if pair:
params += ["%s@%s" % (p.lower(), subscription) for p in pair]
# else:
# params.append(subscription)
data = {
"method": "SUBSCRIBE",
"params": params,
"id": rid
}
if params:
logger.debug("onConnect %s" % data)
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
self.sendMessage(payload, isBinary=False)
else:
logger.debug("onConnect %s" % '/'.join(subscriptions.keys()))
# reset the delay after reconnecting
self.factory.resetDelay()
def onMessage(self, payload, isBinary):
if not isBinary:
try:
payload_obj = json.loads(payload.decode('utf8'))
except ValueError:
pass
else:
try:
self.factory.callback(payload_obj)
except Exception as e:
error_logger.error(repr(e))
traceback_logger.error(traceback.format_exc())
# def connectionLost(self, reason):
# WebSocketClientProtocol.connectionLost(self, reason)
# subs = '/'.join(self.factory.subscriptions.keys())
# error_logger.error("Binance WS public connection lost for %s: Reason is %s" % (subs, reason))
class BinanceReconnectingClientFactory(ReconnectingClientFactory):
# set initial delay to a short time
initialDelay = 0.1
maxDelay = 10
maxRetries = 30
class BinanceClientFactory(WebSocketClientFactory, BinanceReconnectingClientFactory):
protocol = BinanceClientProtocol
_reconnect_error_payload = {
'e': 'error',
'm': 'Max reconnect retries reached'
}
def __init__(self, *args, subscription=None, pair=None, **kwargs):
WebSocketClientFactory.__init__(self, *args, **kwargs)
self.protocol_instance = None
self.base_client = None
# active pairs
self.subscriptions = {}
if subscription:
self.subscriptions[subscription] = set(pair or [])
def clientConnectionFailed(self, connector, reason):
if not self.reconnect:
return
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
def clientConnectionLost(self, connector, reason):
if not self.reconnect:
return
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
def buildProtocol(self, addr):
return BinanceClientProtocol(self)
class BinanceSocketManager(threading.Thread):
"""
Binance spot and futures WS socket and subscription manager.
@todo Reuse the same connection for multiplex to avoid multiple sockets (have to do like in the kraken WS).
Also have to be sure to stay connected after 24h.
"""
STREAM_URL = 'wss://stream.binance.com:9443/'
FUTURES_STREAM_URL = 'wss://fstream.binance.com/'
# FUTURES_STREAM_URL = 'wss://fstream3.binance.com'
WEBSOCKET_DEPTH_5 = '5'
WEBSOCKET_DEPTH_10 = '10'
WEBSOCKET_DEPTH_20 = '20'
DEFAULT_USER_TIMEOUT = 30 * 60 # 30 minutes
def __init__(self, client, user_timeout=DEFAULT_USER_TIMEOUT, futures=False):
"""Initialise the BinanceSocketManager
:param client: Binance API client
:type client: binance.Client
:param user_timeout: Custom websocket timeout
:type user_timeout: int
"""
threading.Thread.__init__(self, name="binance-ws")
self._next_id = 2 # 1 is for connect
self.factories = {}
self._conns = {}
self._user_timer = None
self._user_listen_key = None
self._user_callback = None
self._client = client
self._user_timeout = user_timeout
self._future = futures
self._url = BinanceSocketManager.FUTURES_STREAM_URL if futures else BinanceSocketManager.STREAM_URL
def _start_socket(self, id_, path, callback, prefix='ws/', subscription=None, pair=None):
try:
if id_ in self._conns: # path in self._conns:
return False
factory_url = self._url + prefix + path
factory = BinanceClientFactory(factory_url, subscription=subscription, pair=pair)
factory.base_client = self
factory.protocol = BinanceClientProtocol
factory.callback = callback
factory.reconnect = True
self.factories[id_] = factory
context_factory = ssl.ClientContextFactory()
# self._conns[path] = reactor.connectSSL(factory_url, 443 if self._future else 9443, factory,
# context_factory, 5.0)
# self._conns[path] = connectWS(factory, context_factory)
self._conns[id_] = connectWS(factory, context_factory)
except Exception as e:
logger.error(repr(e))
return path
def start_depth_socket(self, symbol, callback, depth=None):
"""Start a websocket for symbol market depth returning either a diff or a partial book
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#partial-book-depth-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param depth: optional Number of depth entries to return, default None. If passed returns a partial book instead of a diff
:type depth: str
:returns: connection key string if successful, False otherwise
Partial Message Format
.. code-block:: python
{
"lastUpdateId": 160, # Last update ID
"bids": [ # Bids to be updated
[
"0.0024", # price level to be updated
"10", # quantity
[] # ignore
]
],
"asks": [ # Asks to be updated
[
"0.0026", # price level to be updated
"100", # quantity
[] # ignore
]
]
}
Diff Message Format
.. code-block:: python
{
"e": "depthUpdate", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"U": 157, # First update ID in event
"u": 160, # Final update ID in event
"b": [ # Bids to be updated
[
"0.0024", # price level to be updated
"10", # quantity
[] # ignore
]
],
"a": [ # Asks to be updated
[
"0.0026", # price level to be updated
"100", # quantity
[] # ignore
]
]
}
"""
socket_name = symbol.lower() + '@depth'
if depth and depth != '1':
socket_name = '{}{}'.format(socket_name, depth)
return self._start_socket(socket_name, socket_name, callback, subscription='depth', pair=symbol.lower())
def start_kline_socket(self, symbol, callback, interval=Client.KLINE_INTERVAL_1MINUTE):
"""Start a websocket for symbol kline data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#klinecandlestick-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param interval: Kline interval, default KLINE_INTERVAL_1MINUTE
:type interval: str
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "kline", # event type
"E": 1499404907056, # event time
"s": "ETHBTC", # symbol
"k": {
"t": 1499404860000, # start time of this bar
"T": 1499404919999, # end time of this bar
"s": "ETHBTC", # symbol
"i": "1m", # interval
"f": 77462, # first trade id
"L": 77465, # last trade id
"o": "0.10278577", # open
"c": "0.10278645", # close
"h": "0.10278712", # high
"l": "0.10278518", # low
"v": "17.47929838", # volume
"n": 4, # number of trades
"x": false, # whether this bar is final
"q": "1.79662878", # quote volume
"V": "2.34879839", # volume of active buy
"Q": "0.24142166", # quote volume of active buy
"B": "13279784.01349473" # can be ignored
}
}
"""
socket_name = '{}@kline_{}'.format(symbol.lower(), interval)
return self._start_socket(socket_name, socket_name, callback, subscription='kline', pair=symbol.lower())
def start_miniticker_socket(self, callback, update_time=1000):
"""Start a miniticker websocket for all trades
This is not in the official Binance api docs, but this is what
feeds the right column on a ticker page on Binance.
:param callback: callback function to handle messages
:type callback: function
:param update_time: time between callbacks in milliseconds, must be 1000 or greater
:type update_time: int
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
[
{
'e': '24hrMiniTicker', # Event type
'E': 1515906156273, # Event time
's': 'QTUMETH', # Symbol
'c': '0.03836900', # close
'o': '0.03953500', # open
'h': '0.04400000', # high
'l': '0.03756000', # low
'v': '147435.80000000', # volume
'q': '5903.84338533' # quote volume
}
]
"""
return self._start_socket('!miniTicker', '!miniTicker@arr@{}ms'.format(update_time), callback,
subscription='!miniTicker')
def start_trade_socket(self, symbol, callback):
"""Start a websocket for symbol trade data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#trade-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "trade", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"t": 12345, # Trade ID
"p": "0.001", # Price
"q": "100", # Quantity
"b": 88, # Buyer order Id
"a": 50, # Seller order Id
"T": 123456785, # Trade time
"m": true, # Is the buyer the market maker?
"M": true # Ignore.
}
"""
return self._start_socket(symbol.lower() + '@trade', symbol.lower() + '@trade', callback,
subscription='trade', pair=symbol.lower())
def start_aggtrade_socket(self, symbol, callback):
"""Start a websocket for symbol trade data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#aggregate-trade-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "aggTrade", # event type
"E": 1499405254326, # event time
"s": "ETHBTC", # symbol
"a": 70232, # aggregated tradeid
"p": "0.10281118", # price
"q": "8.15632997", # quantity
"f": 77489, # first breakdown trade id
"l": 77489, # last breakdown trade id
"T": 1499405254324, # trade time
"m": false, # whether buyer is a maker
"M": true # can be ignored
}
"""
return self._start_socket(symbol.lower() + '@aggTrade', symbol.lower() + '@aggTrade', callback,
subscription='aggTrade', pair=symbol.lower())
def start_symbol_ticker_socket(self, symbol, callback):
"""Start a websocket for a symbol's ticker data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#individual-symbol-ticker-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "24hrTicker", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"p": "0.0015", # Price change
"P": "250.00", # Price change percent
"w": "0.0018", # Weighted average price
"x": "0.0009", # Previous day's close price
"c": "0.0025", # Current day's close price
"Q": "10", # Close trade's quantity
"b": "0.0024", # Best bid price
"B": "10", # Bid bid quantity
"a": "0.0026", # Best ask price
"A": "100", # Best ask quantity
"o": "0.0010", # Open price
"h": "0.0025", # High price
"l": "0.0010", # Low price
"v": "10000", # Total traded base asset volume
"q": "18", # Total traded quote asset volume
"O": 0, # Statistics open time
"C": 86400000, # Statistics close time
"F": 0, # First trade ID
"L": 18150, # Last trade Id
"n": 18151 # Total number of trades
}
"""
return self._start_socket(symbol.lower() + '@ticker', symbol.lower() + '@ticker', callback,
subscription='ticker', pair=symbol.lower())
def start_ticker_socket(self, callback):
"""Start a websocket for all ticker data
By default all markets are included in an array.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#all-market-tickers-stream
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
[
{
'F': 278610,
'o': '0.07393000',
's': 'BCCBTC',
'C': 1509622420916,
'b': '0.07800800',
'l': '0.07160300',
'h': '0.08199900',
'L': 287722,
'P': '6.694',
'Q': '0.10000000',
'q': '1202.67106335',
'p': '0.00494900',
'O': 1509536020916,
'a': '0.07887800',
'n': 9113,
'B': '1.00000000',
'c': '0.07887900',
'x': '0.07399600',
'w': '0.07639068',
'A': '2.41900000',
'v': '15743.68900000'
}
]
"""
return self._start_socket('!ticker@arr', '!ticker@arr', callback, subscription='!ticker@arr')
def start_book_ticker_socket(self, callback):
"""Start a websocket for all book ticker data
By default all markets are included in an array.
https://binance-docs.github.io/apidocs/futures/en/#all-market-tickers-streams
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
[
{
"u":400900217, // order book updateId
"s":"BNBUSDT", // symbol
"b":"25.35190000", // best bid price
"B":"31.21000000", // best bid qty
"a":"25.36520000", // best ask price
"A":"40.66000000" // best ask qty
}
]
"""
return self._start_socket('!bookTicker', '!bookTicker', callback, prefix="stream?streams=",
subscription='!bookTicker')
# def start_multiplex_socket(self, streams, callback):
# """Start a multiplexed socket using a list of socket names.
# User stream sockets can not be included.
#
# Symbols in socket name must be lowercase i.e bnbbtc@aggTrade, neobtc@ticker
#
# Combined stream events are wrapped as follows: {"stream":"<streamName>","data":<rawPayload>}
#
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md
#
# :param streams: list of stream names in lower case
# :type streams: list
# :param callback: callback function to handle messages
# :type callback: function
#
# :returns: connection key string if successful, False otherwise
#
# Message Format - see Binance API docs for all types
#
# """
# stream_path = 'streams={}'.format('/'.join(streams))
# return self._start_socket('multiplex', stream_path, callback, subscription='stream?')
def send_subscribe(self, id_, subscription, pair):
try:
factory = self.factories.get(id_)
if subscription and pair and factory:
if subscription not in factory.subscriptions:
factory.subscriptions[subscription] = set()
factory.subscriptions[subscription].update(pair)
# logger.info("send_subscribe %s / %s" % (id_, factory.protocol_instance))
if factory.protocol_instance:
rid = self._next_id
self._next_id += 1
# logger.info("2 send_subscribe %s" % id_)
data = {
"method": "SUBSCRIBE",
"params": ["%s@%s" % (p.lower(), subscription) for p in pair],
"id": rid
}
# logger.info("send_subscribe %s" % data)
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
factory.protocol_instance.sendMessage(payload, isBinary=False)
except Exception as e:
error_logger.error("%s : %s" % (subscription, repr(e)))
traceback_logger.error(traceback.format_exc())
def send_unsubscribe(self, id_, subscription, pair):
try:
factory = self.factories.get(id_)
if subscription and pair and factory:
if subscription not in factory.subscriptions:
factory.subscriptions[subscription] = set()
factory.subscriptions[subscription] = factory.subscriptions[subscription].difference(pair)
if factory.protocol_instance:
rid = self._next_id
self._next_id += 1
data = {
"method": "UNSUBSCRIBE",
"params": ["%s@%s" % (p.lower(), subscription) for p in pair],
"id": rid
}
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
factory.protocol_instance.sendMessage(payload, isBinary=False)
except Exception as e:
error_logger.error("%s : %s" % (subscription, repr(e)))
traceback_logger.error(traceback.format_exc())
def subscribe_public(self, subscription, pair, callback):
id_ = "_".join([subscription])
if id_ not in self._conns:
# stream_path = 'streams={}'.format('/'.join(subscription))
stream_path = 'streams={}'.format(subscription)
return self._start_socket(subscription, stream_path, callback, subscription=subscription, pair=pair)
else:
reactor.callFromThread(self.send_subscribe, id_, subscription, pair)
def unsubscribe_public(self, subscription, pair):
id_ = "_".join([subscription])
if id_ in self._conns:
reactor.callFromThread(self.send_unsubscribe, id_, subscription, pair)
def start_user_socket(self, callback):
"""Start a websocket for user data
https://www.binance.com/restapipub.html#user-wss-endpoint
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format - see Binance API docs for all types
"""
# Get the user listen key
user_listen_key = self._client.future_stream_get_listen_key() if self._future else self._client.stream_get_listen_key()
# and start the socket with this specific key
conn_key = self._start_user_socket(user_listen_key, callback)
return conn_key
def _start_user_socket(self, user_listen_key, callback):
# With this function we can start a user socket with a specific key
if self._user_listen_key:
# cleanup any sockets with this key
for conn_key in self._conns:
if len(conn_key) >= 60 and conn_key[:60] == self._user_listen_key:
self.stop_socket(conn_key)
break
self._user_listen_key = user_listen_key
self._user_callback = callback
conn_key = self._start_socket('user', self._user_listen_key, callback)
if conn_key:
# start timer to keep socket alive
self._start_user_timer()
return conn_key
def _start_user_timer(self):
self._user_timer = threading.Timer(self._user_timeout, self._keepalive_user_socket)
self._user_timer.setDaemon(True)
self._user_timer.start()
def _keepalive_user_socket(self):
try:
user_listen_key = self._client.future_stream_get_listen_key() if self._future else self._client.stream_get_listen_key()
except Exception as e:
# very rare exception ConnectTimeout
error_logger.error(repr(e))
# assume unchanged
user_listen_key = self._user_listen_key
# check if they key changed and
if user_listen_key != self._user_listen_key:
# Start a new socket with the key received
# `_start_user_socket` automatically cleanup open sockets
# and starts timer to keep socket alive
self._start_user_socket(user_listen_key, self._user_callback)
else:
# Restart timer only if the user listen key is not changed
self._start_user_timer()
def stop_socket(self, conn_key):
"""Stop a websocket given the connection key
:param conn_key: Socket connection key
:type conn_key: string
:returns: connection key string if successful, False otherwise
"""
if conn_key not in self._conns:
return
# disable reconnecting if we are closing
self._conns[conn_key].factory = WebSocketClientFactory(self._url + 'tmp_path')
self._conns[conn_key].disconnect()
del self._conns[conn_key]
# check if we have a user stream socket
if len(conn_key) >= 60 and conn_key[:60] == self._user_listen_key:
self._stop_user_socket()
def _stop_user_socket(self):
if not self._user_listen_key:
return
# stop the timer
self._user_timer.cancel()
self._user_timer = None
self._user_listen_key = None
def run(self):
MonitorService.use_reactor(installSignalHandlers=False)
def close(self):
"""Close all connections
"""
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {}
| 37.510316 | 136 | 0.541584 | 26,526 | 0.972717 | 0 | 0 | 0 | 0 | 0 | 0 | 15,647 | 0.573781 |
cd8a35bcbfb312cda1686fb97584510659ede9ae | 669 | py | Python | Basic Data Structures/array/ListSlicing.py | rush2catch/algorithms-leetcode | 38a5e6aa33d48fa14fe09c50c28a2eaabd736e55 | [
"MIT"
] | null | null | null | Basic Data Structures/array/ListSlicing.py | rush2catch/algorithms-leetcode | 38a5e6aa33d48fa14fe09c50c28a2eaabd736e55 | [
"MIT"
] | null | null | null | Basic Data Structures/array/ListSlicing.py | rush2catch/algorithms-leetcode | 38a5e6aa33d48fa14fe09c50c28a2eaabd736e55 | [
"MIT"
] | null | null | null | def list_slicing(nums, row, col):
new_matrix = []
minimal = row * col
if nums is []:
return None
elif (len(nums) % minimal is 0) and (len(nums) >= minimal):
for r in range(row):
new_matrix.append(nums[r * col : (r + 1) * col])
return new_matrix
else:
return nums
list_0 = [1, 2, 3, 6]
print(list_slicing(list_0, 1, 4))
print(list_slicing(list_0, 2, 4))
list_1 = [1, 2, 4, 5, 6, 9, 4, 6, 5, 8, 1, 4]
print(list_slicing(list_1, 3, 4))
print(list_slicing(list_1, 4, 3))
print(list_slicing(list_1, 2, 6))
print(list_slicing(list_1, 6, 2))
print(list_slicing(list_1, 5, 3))
print(list_slicing(list_1, 2, 5))
| 27.875 | 63 | 0.605381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
cd8abd04dc151085122e27f4484b76ecb7ff52ac | 225 | py | Python | OnePy/sys_module/base_riskmanager.py | Chandlercjy/OnePyfx | 9bd43b721d3f7352495b6ccab76bd533a3d2e8f2 | [
"MIT"
] | 321 | 2017-07-09T09:25:45.000Z | 2022-03-29T16:51:35.000Z | OnePy/sys_module/base_riskmanager.py | sunzhouhong/OnePy | 4e225945de297ba1211035a7b95b5094cdddc2a7 | [
"MIT"
] | 7 | 2017-08-23T12:10:29.000Z | 2020-03-26T12:56:09.000Z | OnePy/sys_module/base_riskmanager.py | sunzhouhong/OnePy | 4e225945de297ba1211035a7b95b5094cdddc2a7 | [
"MIT"
] | 134 | 2017-07-26T22:29:18.000Z | 2022-03-23T09:22:10.000Z | from OnePy.sys_module.metabase_env import OnePyEnvBase
class RiskManagerBase(OnePyEnvBase):
def __init__(self):
self.env.risk_managers.update({self.__class__.__name__: self})
def run(self):
pass
| 17.307692 | 70 | 0.715556 | 165 | 0.733333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
cd8aca443ef9f431942f1f2a5e259a12ad32107f | 3,057 | py | Python | tests/geographic/duplicates/test_find_grid_duplicates.py | PEM-Humboldt/regi0 | 0d64587d5d87f57cddfc7a67bb8baf74cd70adf2 | [
"MIT"
] | null | null | null | tests/geographic/duplicates/test_find_grid_duplicates.py | PEM-Humboldt/regi0 | 0d64587d5d87f57cddfc7a67bb8baf74cd70adf2 | [
"MIT"
] | 15 | 2022-02-03T11:38:37.000Z | 2022-03-09T23:23:04.000Z | tests/geographic/duplicates/test_find_grid_duplicates.py | PEM-Humboldt/regi0 | 0d64587d5d87f57cddfc7a67bb8baf74cd70adf2 | [
"MIT"
] | null | null | null | """
Test cases for the regi0.geographic.duplicates.find_grid_duplicates function.
"""
import numpy as np
import pandas as pd
from regi0.geographic.duplicates import find_grid_duplicates
def test_records_bounds_high_res(records):
result = find_grid_duplicates(
records,
"scientificName",
resolution=0.008333333767967150002,
)
expected = pd.Series(
[
True,
True,
True,
True,
True,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
]
)
pd.testing.assert_series_equal(result, expected, check_dtype=False)
def test_other_bounds_low_res(records):
result = find_grid_duplicates(
records,
"scientificName",
resolution=0.1333333402874744,
bounds=(-78.9909352282, -4.29818694419, -66.8763258531, 12.4373031682),
)
expected = pd.Series(
[
True,
True,
True,
True,
True,
True,
True,
False,
False,
True,
True,
True,
False,
False,
True,
True,
True,
False,
np.nan,
False,
False,
np.nan,
]
)
pd.testing.assert_series_equal(result, expected, check_dtype=False)
def test_keep_first(records):
result = find_grid_duplicates(
records, "scientificName", resolution=0.008333333767967150002, keep="first"
)
expected = pd.Series(
[
False,
True,
True,
True,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
]
)
pd.testing.assert_series_equal(result, expected, check_dtype=False)
def test_keep_last(records):
result = find_grid_duplicates(
records, "scientificName", resolution=0.008333333767967150002, keep="last"
)
expected = pd.Series(
[
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
]
)
pd.testing.assert_series_equal(result, expected, check_dtype=False)
| 21.082759 | 83 | 0.446189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.052993 |
cd8b45d655ef0b191b537030a3d9f0b1784aa23f | 772 | py | Python | kolibri/core/public/utils.py | FollonSaxBass/kolibri | 4cf820b14386aecc228fecff64c847bad407cbb1 | [
"MIT"
] | 2 | 2021-05-13T10:20:46.000Z | 2021-11-15T12:31:03.000Z | kolibri/core/public/utils.py | camellia26/kolibri | 7f1cb794c93f37e039be22f56a5ac1989ed22bde | [
"MIT"
] | 8 | 2021-05-21T15:31:24.000Z | 2022-02-24T15:02:14.000Z | kolibri/core/public/utils.py | camellia26/kolibri | 7f1cb794c93f37e039be22f56a5ac1989ed22bde | [
"MIT"
] | 1 | 2019-10-05T11:14:40.000Z | 2019-10-05T11:14:40.000Z | import platform
from django.core.exceptions import ObjectDoesNotExist
from morango.models import InstanceIDModel
import kolibri
def get_device_info():
"""Returns metadata information about the device"""
instance_model = InstanceIDModel.get_or_create_current_instance()[0]
try:
device_name = kolibri.core.device.models.DeviceSettings.objects.get().name
# When Koliri starts at the first time, and device hasn't been created
except ObjectDoesNotExist:
device_name = instance_model.hostname
info = {
"application": "kolibri",
"kolibri_version": kolibri.__version__,
"instance_id": instance_model.id,
"device_name": device_name,
"operating_system": platform.system(),
}
return info
| 28.592593 | 82 | 0.715026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.264249 |
cd8c005ad2ae492334e75e29d8ea3fae95bca95b | 1,372 | py | Python | mcpipy/cellcraft/config.py | cellcraft/cellcraft | 1cb2b152bb6433250cec43e2586f1b5d093ec6e5 | [
"MIT"
] | 2 | 2016-01-21T12:05:36.000Z | 2016-04-18T09:50:03.000Z | mcpipy/cellcraft/config.py | cellcraft/cellcraft | 1cb2b152bb6433250cec43e2586f1b5d093ec6e5 | [
"MIT"
] | 1 | 2016-05-13T13:08:28.000Z | 2016-05-13T13:08:28.000Z | mcpipy/cellcraft/config.py | cellcraft/cellcraft | 1cb2b152bb6433250cec43e2586f1b5d093ec6e5 | [
"MIT"
] | 3 | 2015-12-14T19:28:42.000Z | 2020-11-29T12:53:12.000Z | import os
import json
import logging
# cellcraft node
CELLCRAFT_NODE_URL="http://192.168.178.29:4534"
# path to cache where pickle files will be stored
PATH_RESOURCES='cellcraft/resources'
PATH_CACHE='cellcraft/resources/cache/'
PATH_TEST_CACHE='test/fixtures/cache/'
# path to fixtures
PATH_TO_FIXTURES="test/fixtures"
# path to cellpack structures after processing them
PATH_CELLPACK = 'cellcraft/resources/cellpack/'
# cellpack parameters
envelop_id = 22
# database name to store biological information and coordinates of structures
DB='cellcraft'
TEST_DB='test'
# fix maximum amount of structures saved on cache
MAXIMUM_NUM_STRUCTURES_CACHE = 8
# load block appear appearance json
def load_block_appearance():
with open(os.path.join(PATH_RESOURCES, "block_appearance.json")) as appearance_json:
block_appearance = json.load(appearance_json)
return block_appearance
current_env = os.environ.get('app_env')
root_logger = logging.getLogger()
current_env = 'test'
if current_env == 'cellcraft':
DB_HOST = '127.0.0.1'
DB_PORT = 27017
root_logger.setLevel(logging.INFO)
elif current_env == 'test':
DB_HOST = '127.0.0.1'
DB_PORT = 27017
root_logger.setLevel(logging.DEBUG)
else:
logging.warning('Please configure a environment using now default dev environment for config')
root_logger.setLevel(logging.DEBUG)
| 24.070175 | 98 | 0.764577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 632 | 0.460641 |
cd8c4a556bdf6a751d59f1d67ef4d0688f0e6844 | 9,123 | py | Python | ftpsync/pyftpsync.py | wengzy/pyftpsync | db6decb02bf3535fe87d90b45a6cc974dd356b04 | [
"MIT"
] | 86 | 2015-03-02T17:40:03.000Z | 2022-03-14T03:41:40.000Z | ftpsync/pyftpsync.py | wengzy/pyftpsync | db6decb02bf3535fe87d90b45a6cc974dd356b04 | [
"MIT"
] | 63 | 2015-04-12T19:01:52.000Z | 2022-01-19T00:57:51.000Z | ftpsync/pyftpsync.py | wengzy/pyftpsync | db6decb02bf3535fe87d90b45a6cc974dd356b04 | [
"MIT"
] | 25 | 2015-04-12T18:07:25.000Z | 2021-04-25T15:20:24.000Z | # -*- coding: utf-8 -*-
"""
Simple folder synchronization using FTP.
(c) 2012-2021 Martin Wendt; see https://github.com/mar10/pyftpsync
Licensed under the MIT license: https://www.opensource.org/licenses/mit-license.php
Usage examples:
> pyftpsync.py --help
> pyftpsync.py upload . ftps://example.com/myfolder
"""
import argparse
import platform
import sys
from pprint import pprint
from ftpsync import __version__
from ftpsync.cli_common import (
common_parser,
creds_parser,
matcher_parser,
verbose_parser,
)
from ftpsync.run_command import add_run_parser, handle_run_command
from ftpsync.scan_command import add_scan_parser
from ftpsync.synchronizers import (
BiDirSynchronizer,
DownloadSynchronizer,
UploadSynchronizer,
)
from ftpsync.targets import FsTarget, make_target
from ftpsync.tree_command import add_tree_parser
from ftpsync.util import (
DEBUG_FLAGS,
PYTHON_VERSION,
check_cli_verbose,
namespace_to_dict,
set_pyftpsync_logger,
)
# ===============================================================================
# run
# ===============================================================================
def run():
"""CLI main entry point."""
# Use print() instead of logging when running in CLI mode:
set_pyftpsync_logger(None)
parser = argparse.ArgumentParser(
description="Synchronize folders over FTP.",
epilog="See also https://github.com/mar10/pyftpsync",
parents=[verbose_parser],
)
# Note: we want to allow --version to be combined with --verbose. However
# on Py2, argparse makes sub-commands mandatory, unless `action="version"` is used.
if check_cli_verbose(3) > 3:
version_info = "pyftpsync/{} Python/{} {}".format(
__version__, PYTHON_VERSION, platform.platform()
)
else:
version_info = "{}".format(__version__)
parser.add_argument("-V", "--version", action="version", version=version_info)
subparsers = parser.add_subparsers(help="sub-command help")
# --- Create the parser for the "upload" command ---------------------------
sp = subparsers.add_parser(
"upload",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files to remote folder",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite remote files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["local", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove remote files if they don't exist locally",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove remote files if they don't exist locally "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="upload")
# --- Create the parser for the "download" command -------------------------
sp = subparsers.add_parser(
"download",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files from remote folder to local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite local files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove local files if they don't exist on remote target",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove local files if they don't exist on remote target "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="download")
# --- Create the parser for the "sync" command -----------------------------
sp = subparsers.add_parser(
"sync",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="synchronize new and modified files between remote folder and local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--resolve",
default="ask",
choices=["old", "new", "local", "remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.set_defaults(command="sync")
# --- Create the parser for the "run" command -----------------------------
add_run_parser(subparsers)
# --- Create the parser for the "scan" command -----------------------------
add_scan_parser(subparsers)
# --- Create the parser for the "tree" command -----------------------------
add_tree_parser(subparsers)
# --- Parse command line ---------------------------------------------------
args = parser.parse_args()
args.verbose -= args.quiet
del args.quiet
# print("verbose", args.verbose)
ftp_debug = 0
if args.verbose >= 6:
ftp_debug = 1
if args.debug:
if args.verbose < 4:
parser.error("'--debug' requires verbose level >= 4")
DEBUG_FLAGS.update(args.debug)
# Modify the `args` from the `pyftpsync.yaml` config:
if getattr(args, "command", None) == "run":
handle_run_command(parser, args)
if callable(getattr(args, "command", None)):
# scan_handler
try:
return args.command(parser, args)
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
elif not hasattr(args, "command"):
parser.error(
"missing command (choose from 'upload', 'download', 'run', 'sync', 'scan')"
)
# Post-process and check arguments
if hasattr(args, "delete_unmatched") and args.delete_unmatched:
args.delete = True
args.local_target = make_target(args.local, {"ftp_debug": ftp_debug})
if args.remote == ".":
parser.error("'.' is expected to be the local target (not remote)")
args.remote_target = make_target(args.remote, {"ftp_debug": ftp_debug})
if not isinstance(args.local_target, FsTarget) and isinstance(
args.remote_target, FsTarget
):
parser.error("a file system target is expected to be local")
# Let the command handler do its thing
opts = namespace_to_dict(args)
if args.command == "upload":
s = UploadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "download":
s = DownloadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "sync":
s = BiDirSynchronizer(args.local_target, args.remote_target, opts)
else:
parser.error("unknown command '{}'".format(args.command))
s.is_script = True
try:
s.run()
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
finally:
# Prevent sporadic exceptions in ftplib, when closing in __del__
s.local.close()
s.remote.close()
stats = s.get_stats()
if args.verbose >= 5:
pprint(stats)
elif args.verbose >= 1:
if args.dry_run:
print("(DRY-RUN) ", end="")
print(
"Wrote {}/{} files in {} directories, skipped: {}.".format(
stats["files_written"],
stats["local_files"],
stats["local_dirs"],
stats["conflict_files_skipped"],
),
end="",
)
if stats["interactive_ask"]:
print()
else:
print(" Elap: {}.".format(stats["elap_str"]))
return
# Script entry point
if __name__ == "__main__":
# Just in case...
from multiprocessing import freeze_support
freeze_support()
run()
| 31.350515 | 90 | 0.574044 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,743 | 0.410282 |
cd8d8365ca2301a760424dae1ee2e706688adc1f | 9,678 | py | Python | main/views.py | QingShuiXiFan/Style-Transfer | f79951323cdfd0c72f2157623209d9067376306b | [
"Apache-2.0"
] | null | null | null | main/views.py | QingShuiXiFan/Style-Transfer | f79951323cdfd0c72f2157623209d9067376306b | [
"Apache-2.0"
] | null | null | null | main/views.py | QingShuiXiFan/Style-Transfer | f79951323cdfd0c72f2157623209d9067376306b | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render, render_to_response, redirect
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, FileResponse
from django.urls import reverse
import os
from django.contrib.auth import authenticate, login, logout # 两个默认的用户认证和管理应用中的方法
from django.contrib import auth
from django.template import RequestContext
from .forms import LoginForm, RegistrationForm
from django.contrib.auth.models import User
import hashlib # python的哈希加密库
from django.contrib.auth.hashers import make_password, check_password # Django自带的哈希加密库
from django.core.mail import send_mail
import imghdr # 判断是否是图片类型
import time, datetime
from django.conf import settings
from .models import Pictures
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_DIR = "common_static"
GPU_ISACTIVATED = True
# Create your views here.
def index(request):
return render(request, "main/index.html")
def blog(request):
return render(request, 'main/blog.html')
def blogArticle(request):
return render(request, 'main/blogArticle.html')
def faq(request):
return render(request, 'main/faq.html')
def about(request):
return render(request, 'main/about.html')
def support(request):
return render(request, 'main/support.html')
# 获得访问者的ip
def get_request_ip(request):
try:
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0] # 所以这里是真实的ip
else:
ip = request.META.get('REMOTE_ADDR') # 这里获得代理ip
except:
ip = None
return ip
# 获取文件大小
def get_FileSize(filePath):
fsize = os.path.getsize(filePath)
fsize = fsize / float(1024 * 1024)
return round(fsize, 2)
def ajaxUpload(request):
if request.method == 'GET':
return render(request, 'main/ajaxUpload.html')
if request.method == 'POST':
# 获取访问用户的ip
ip = get_request_ip(request)
# =======上传内容图片==========
file_obj = request.FILES.get('file_obj', None) # 获得文件对象,如果没有文件,则默认为None
# 若没有上传图片
if not file_obj:
result = {"status": "no_file"}
return JsonResponse(result)
# 利用模型类 将图片要存放的路径存到数据库中
t = time.time() # 为文件名增加时间戳,用于独立标记每个文件
timeStamp = str(int(t))
p = Pictures()
p.pic = "tmpImages/" + timeStamp + '_' + file_obj.name # 文件路径字段
p.uploaded_timeStamp = timeStamp # 上传时间戳字段
p.ip = ip # 用户ip字段
p.save()
# 写入文件
picPath = settings.MEDIA_ROOT + "/tmpImages/" + timeStamp + '_' + file_obj.name
destination = open(picPath, 'wb+') # 打开特定的文件进行二进制的写操作
for chunk in file_obj.chunks(): # 分块写入文件
destination.write(chunk)
destination.close()
# 把地址和id写入session
request.session['uploaded_pic_path'] = str(p.pic)
request.session['uploaded_pic_id'] = str(p.id)
request.session.set_expiry(0) # 关闭浏览器就清掉session
picName = timeStamp + '_' + file_obj.name
data = {"status": "success", "picName": picName} # 返回data给前端,显示上传的图片
return JsonResponse(data)
# 风格化
def transfer(request):
if request.method == "GET":
request.session.flush() # 清除掉原有的session
return render(request, 'main/transfer.html')
if request.method == "POST": # 请求方法为POST时,进行处理
# 获取访问用户的ip
ip = get_request_ip(request)
style_name = str(request.POST.get('style_name')) # 获取select的value值,如scream,与文件名对应,如scream.ckpt
if style_name in ['la_muse','rain_princess','the_scream','the_shipwreck_of_the_minotaur','udnie','wave']:
ckpt_path = style_name + ".ckpt" # ckpt文件名
else:
ckpt_path = style_name
content_name = str(request.POST.get('picName')) # 获取内容图片名
generated_image_path = BASE_DIR + "/" + STATIC_DIR + "/media/download/tmpImages/" + content_name # 生成的图片路径
# 若风格化后的图像已存在,则将之删除
if (os.path.exists(generated_image_path)):
os.remove(generated_image_path)
# 执行evaluate.py程序
cmd = settings.PYTHON_VERSION + " evaluate.py --checkpoint examples/checkpoint/" + ckpt_path + \
" --in-path " + BASE_DIR + "/" + STATIC_DIR + "/media/upload/tmpImages/" + content_name + \
" --out-path " + BASE_DIR + "/" + STATIC_DIR + "/media/download/tmpImages/"
if (GPU_ISACTIVATED == True):
activate_gpu = 'activate tensorflow-gpu'
os.popen(activate_gpu + " && cd " + BASE_DIR + "/fast-style-transfer-master && " + cmd)
else:
os.popen("cd " + BASE_DIR + "/fast-style-transfer-master && " + cmd)
start_time = time.time()
while (os.path.exists(generated_image_path) == False):
time_used = time.time() - start_time
if time_used >= 60:
data = {"status": "time_out"}
return JsonResponse(data)
else:
time.sleep(1)
data = {"status": "success"} # 返回data给前端,显示上传的图片
return JsonResponse(data)
# 下载图片
def file_down(request):
file = open('', 'rb')
response = FileResponse(file)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="example.tar.gz"'
return response
def showImg(request):
return render(request, 'main/showImage.html')
def style2paint(request):
return render(request, 'main/style2paint.html')
def user_login(request):
if request.method == "GET":
login_form = LoginForm()
return render(request, 'main/login.html', {"form": login_form})
if request.method == "POST": # GET多用于数据查询,POST多用于数据写入或者更新等
login_form = LoginForm(request.POST) # request.POST是提交的表单数据所返回的类字典数据
if login_form.is_valid():
cd = login_form.cleaned_data
# user = authenticate(email=cd['email'],
# password=cd['password']) # 若authenticate()内键值对上号了,则返回一个实例对象,否则返回None
input_email = cd['email']
input_password = cd['password']
try:
user = User.objects.get(email=input_email)
if check_password(input_password, user.password): # 哈希加密
login(request, user) # 以上面返回的User实例对象作为参数,实现用户登录
return redirect('main:index')
else:
message = "抱歉,您的密码填写错误"
return render(request, 'main/login.html', {"message": message, "form": login_form})
except:
message = "用户不存在!"
return render(request, 'main/login.html', {"message": message, "form": login_form})
else:
message = "验证码输入错误"
return render(request, 'main/login.html', {"message": message, "form": login_form})
def user_logout(request):
logout(request) # 注销用户
return redirect("/main/")
def register(request):
if request.user.is_authenticated:
# 登录状态不允许注册。你可以修改这条原则!
return redirect("/main")
if request.method == "POST":
user_form = RegistrationForm(request.POST)
if user_form.is_valid(): # 获取数据
# <== 这里可以加一些判断逻辑 ==>
cd = user_form.cleaned_data
input_username = cd['username']
input_email = cd['email']
input_password = cd['password']
input_password2 = cd['password2']
if input_password != input_password2: # 判断两次密码是否相同
message = "两次输入的密码不同!"
return render(request, 'main/register.html', {"message": message, "form": user_form})
else:
same_name_user = User.objects.filter(username=input_username)
if same_name_user: # 用户名唯一
message = '该用户名已被注册,请使用别的用户名!'
return render(request, 'main/register.html', {"message": message, "form": user_form})
same_email_user = User.objects.filter(email=input_email)
if same_email_user: # 邮箱地址唯一
message = '该邮箱地址已被注册,请使用别的邮箱!'
return render(request, 'main/register.html', {"message": message, "form": user_form})
# 若邮箱可以注册,且信息填写无误
new_user = user_form.save(commit=False)
new_user.password = make_password(user_form.cleaned_data['password']) # 使用Django自带的哈希算法加密
new_user.save()
# send_mail('Subject here', 'Here is the message.', '[email protected]',['[email protected]'], fail_silently=False)
send_email_content = input_username + ',\n' + '\t你已经成功注册Style Transfer账号,以下是你的登录信息,请谨慎保存:\n' + '电子邮箱:' + input_email + '\n' + '密码:' + input_password + '\n\n' + 'www.styletransfer.cn'
send_mail('[Style Transfer] Registered Successfully!', send_email_content, '[email protected]',
[input_email],
fail_silently=False)
message = input_username + ",注册成功!"
return redirect('main:tip')
else:
message = "用户名已被使用"
return render(request, "main/register.html", {"message": message, "form": user_form})
user_form = RegistrationForm()
return render(request, "main/register.html", {"form": user_form})
def playground(request):
return render(request, 'main/playground.html')
def tip(request):
return render(request, 'main/tip.html')
def hash_code(s, salt='styletransfer'): # 哈希加密
h = hashlib.sha256()
s += salt
h.update(s.encode()) # update方法只接收bytes类型
return h.hexdigest()
| 36.247191 | 198 | 0.615726 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,938 | 0.361152 |
cd8e00f631a120690eef589a528899913c4b3443 | 781 | py | Python | edj/Spot_square.py | CircuitLaunch/Spot_Bootcamp | 47735ce474a59c5478099f6095b68c46b77d3da6 | [
"BSD-3-Clause"
] | null | null | null | edj/Spot_square.py | CircuitLaunch/Spot_Bootcamp | 47735ce474a59c5478099f6095b68c46b77d3da6 | [
"BSD-3-Clause"
] | null | null | null | edj/Spot_square.py | CircuitLaunch/Spot_Bootcamp | 47735ce474a59c5478099f6095b68c46b77d3da6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
from Spot import *
import time
from bosdyn.client import math_helpers
if __name__ == '__main__':
spot = Spot()
try:
# It's ALIVE!
spot.power_on()
spot.move_to(1.0, 0.0, 0.0, math_helpers.Quat(), duration=5.0)
time.sleep(5.0)
spot.move_to(0.0, 1.0, 0.0, math_helpers.Quat(), duration=5.0)
time.sleep(5.0)
spot.move_to(-1.0, 0.0, 0.0, math_helpers.Quat(), duration=5.0)
time.sleep(5.0)
spot.move_to(0.0, -1.0, 0.0, math_helpers.Quat(), duration=5.0)
time.sleep(5.0)
# Power down
spot.estop(graceful=True)
except:
print('Exception')
print('Trying to make Python GC the Spot object')
spot = None
time.sleep(5.0)
exit(0)
| 21.694444 | 71 | 0.577465 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.140845 |
cd8f015fd57b190763452236dbe2f747d3309b7f | 837 | py | Python | tests/test_set.py | maxslarsson/tennis-probability | f26021b305e2b8abd87acad846454f7ce02e9199 | [
"MIT"
] | null | null | null | tests/test_set.py | maxslarsson/tennis-probability | f26021b305e2b8abd87acad846454f7ce02e9199 | [
"MIT"
] | null | null | null | tests/test_set.py | maxslarsson/tennis-probability | f26021b305e2b8abd87acad846454f7ce02e9199 | [
"MIT"
] | null | null | null | import pytest
from tennis_probability import set, InvalidInput, InvalidProbability, NegativeNumber
def test_set():
assert set(0, 0, 0) == 0
assert set(0, 0, 0.50) == 0.5
assert set(0, 0, 1) == 1
# Test valid inputs
assert set(5, 3, 0.13) == 0.008146509339015371
assert set(2, 2, 0.37) == 0.024086243446167555
assert set(4, 1, 0.91) == 0.9999999999999992
# Test invalid inputs
with pytest.raises(InvalidInput):
set(10, 3, 0.2)
with pytest.raises(InvalidInput):
set(2, 812, 0.5)
with pytest.raises(InvalidInput):
set(5, 5, 0.51)
with pytest.raises(NegativeNumber):
set(-1, 0, 0.9)
# Test invalid probabilities
with pytest.raises(InvalidProbability):
set(2, 3, 1.0001)
with pytest.raises(InvalidProbability):
set(1, 0, -1.001)
| 27.9 | 84 | 0.628435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.081243 |
cd90fb8f4961d4f54d2eb80fcec8b04e412e1af3 | 626 | py | Python | sources/classic/messaging_kombu/handlers.py | variasov/classic_messaging_kombu | c4191f3d1f788a39f50dc137eca1b67f3ee2af20 | [
"MIT"
] | 1 | 2021-11-12T08:19:53.000Z | 2021-11-12T08:19:53.000Z | sources/classic/messaging_kombu/handlers.py | variasov/classic_messaging_kombu | c4191f3d1f788a39f50dc137eca1b67f3ee2af20 | [
"MIT"
] | null | null | null | sources/classic/messaging_kombu/handlers.py | variasov/classic_messaging_kombu | c4191f3d1f788a39f50dc137eca1b67f3ee2af20 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import Dict, Any, Callable
from kombu import Message
from classic.components import component
MessageBody = Dict[str, Any]
@component
class MessageHandler(ABC):
@abstractmethod
def handle(self, message: Message, body: MessageBody):
pass
@component
class SimpleMessageHandler(MessageHandler):
function: Callable[[Any], Any]
late_ack: bool = True
def handle(self, message: Message, body: MessageBody):
if not self.late_ack:
message.ack()
self.function(**body)
if self.late_ack:
message.ack()
| 18.969697 | 58 | 0.680511 | 423 | 0.675719 | 0 | 0 | 445 | 0.710863 | 0 | 0 | 0 | 0 |
cd937e31435e325df9a3ac8d8fa5487807539935 | 1,440 | py | Python | byceps/services/shop/order/event_service.py | GSH-LAN/byceps | ab8918634e90aaa8574bd1bb85627759cef122fe | [
"BSD-3-Clause"
] | 33 | 2018-01-16T02:04:51.000Z | 2022-03-22T22:57:29.000Z | byceps/services/shop/order/event_service.py | GSH-LAN/byceps | ab8918634e90aaa8574bd1bb85627759cef122fe | [
"BSD-3-Clause"
] | 7 | 2019-06-16T22:02:03.000Z | 2021-10-02T13:45:31.000Z | byceps/services/shop/order/event_service.py | GSH-LAN/byceps | ab8918634e90aaa8574bd1bb85627759cef122fe | [
"BSD-3-Clause"
] | 14 | 2019-06-01T21:39:24.000Z | 2022-03-14T17:56:43.000Z | """
byceps.services.shop.order.event_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from datetime import datetime
from typing import Sequence
from ....database import db
from .dbmodels.order_event import OrderEvent as DbOrderEvent, OrderEventData
from .transfer.models import OrderID
def create_event(
event_type: str, order_id: OrderID, data: OrderEventData
) -> None:
"""Create an order event."""
event = build_event(event_type, order_id, data)
db.session.add(event)
db.session.commit()
def create_events(
event_type: str, order_id: OrderID, datas: Sequence[OrderEventData]
) -> None:
"""Create a sequence of order events."""
events = [build_event(event_type, order_id, data) for data in datas]
db.session.add_all(events)
db.session.commit()
def build_event(
event_type: str, order_id: OrderID, data: OrderEventData
) -> DbOrderEvent:
"""Assemble, but not persist, an order event."""
now = datetime.utcnow()
return DbOrderEvent(now, event_type, order_id, data)
def get_events_for_order(order_id: OrderID) -> list[DbOrderEvent]:
"""Return the events for that order."""
return db.session \
.query(DbOrderEvent) \
.filter_by(order_id=order_id) \
.order_by(DbOrderEvent.occurred_at) \
.all()
| 26.181818 | 76 | 0.690972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 343 | 0.238194 |
cd95b58b744f084920dc507989ebf490290a8ec2 | 637 | py | Python | app/models/columns/suit.py | abcnever/euchre-game | 5446e345e0dfdcf83d5fe87c3d2cedc31b3ae669 | [
"MIT"
] | 1 | 2018-12-31T05:38:56.000Z | 2018-12-31T05:38:56.000Z | app/models/columns/suit.py | abcnever/euchre-game | 5446e345e0dfdcf83d5fe87c3d2cedc31b3ae669 | [
"MIT"
] | 4 | 2018-11-03T15:51:13.000Z | 2019-01-12T21:09:23.000Z | app/models/columns/suit.py | abcnever/euchre-game | 5446e345e0dfdcf83d5fe87c3d2cedc31b3ae669 | [
"MIT"
] | null | null | null | from attr import attrs, attrib
import enum
from .enum import EnumColumn
class Suit(EnumColumn):
class Enum(enum.Enum):
@attrs(frozen=True)
class _Suit():
suit_name = attrib()
ascii_icon = attrib()
spades = _Suit(
suit_name="Spades",
ascii_icon="♠"
)
clubs = _Suit(
suit_name="Clubs",
ascii_icon="♣"
)
diamonds = _Suit(
suit_name="Diamonds",
ascii_icon="\033[91m♦\0330m"
)
hearts = _Suit(
"Hearts",
ascii_icon="\033[91m♥\0330m"
)
| 21.233333 | 40 | 0.486656 | 570 | 0.883721 | 0 | 0 | 109 | 0.168992 | 0 | 0 | 81 | 0.125581 |
cd977d3ad4e8e4d9141853e4e08a51d0ffa0f771 | 1,881 | py | Python | dataset.py | sreza1/Diabetic-Retinopathy-Detection | 75f10423ef161d3040756253a8ba0b9012e391b7 | [
"MIT"
] | null | null | null | dataset.py | sreza1/Diabetic-Retinopathy-Detection | 75f10423ef161d3040756253a8ba0b9012e391b7 | [
"MIT"
] | null | null | null | dataset.py | sreza1/Diabetic-Retinopathy-Detection | 75f10423ef161d3040756253a8ba0b9012e391b7 | [
"MIT"
] | null | null | null | import config
import os
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from tqdm import tqdm
class DRDataset(Dataset):
def __init__(self, images_folder, path_to_csv, train=True, transform=None):
super().__init__()
self.data = pd.read_csv(path_to_csv)
self.images_folder = images_folder
self.image_files = os.listdir(images_folder)
self.transform = transform
self.train = train
def __len__(self):
return self.data.shape[0] if self.train else len(self.image_files)
def __getitem__(self, index):
if self.train:
image_file, label = self.data.iloc[index]
else:
# if test simply return -1 for label, I do this in order to
# re-use same dataset class for test set submission later on
image_file, label = self.image_files[index], -1
image_file = image_file.replace(".jpeg", "")
# if image_file[0]=="_":
# image_file=image_file[1:]
# elif image_file[:2] =="._":
# image_file=image_file[2:]
path = os.path.join(self.images_folder + "/", image_file+".jpeg")
image = np.array(Image.open(path))
if self.transform:
image= self.transform(image=image)["image"]
return image, label, image_file
if __name__ == "__main__":
"""
Test if everything works ok
"""
dataset = DRDataset(
images_folder="/data/images_resized_650",
path_to_csv="/data/trainLabels.csv",
transform = config.val_transforms
)
loader = DataLoader(
dataset=dataset, batch_size=32, num_workers=6, shuffle=True, pin_memory=True
)
for x, label, file in tqdm(loader):
print(x.shape)
print(label.shape)
import sys
sys.exit | 29.857143 | 84 | 0.617757 | 1,231 | 0.654439 | 0 | 0 | 0 | 0 | 0 | 0 | 362 | 0.192451 |
cd988eff24525966178311b4c694188e2f3b5038 | 507 | py | Python | server/server.py | Filipos27/Celebrity_classification | 802474516b9ecaee70c4019600572bbbbd8b582a | [
"MIT"
] | null | null | null | server/server.py | Filipos27/Celebrity_classification | 802474516b9ecaee70c4019600572bbbbd8b582a | [
"MIT"
] | null | null | null | server/server.py | Filipos27/Celebrity_classification | 802474516b9ecaee70c4019600572bbbbd8b582a | [
"MIT"
] | null | null | null | from flask import Flask, request, jsonify
import util
app= Flask(__name__)
@app.route("/classify_image",methods=["GET","POST"])
def classify_image():
image_data=request.form["image_data"]
response=jsonify(util.classify_image(image_data))
response.headers.add("Access-Control-Allow-Origin","*")
return response
if __name__ == "__main__":
print("Starting Python Flask Server For Celebrity Image Classification")
util.load_saved_artifacts()
app.run(port=5000)
| 28.166667 | 77 | 0.710059 | 0 | 0 | 0 | 0 | 256 | 0.504931 | 0 | 0 | 147 | 0.289941 |
cd99a356df7305e9c0faf645726124d17a3abcde | 41 | py | Python | app.py | blogsley/blogsley-flask-site | 40df6641cce8336d790549b7edac4f83d8b8fb8e | [
"MIT"
] | 1 | 2020-12-18T03:52:25.000Z | 2020-12-18T03:52:25.000Z | app.py | blogsley/blogsley-flask-site | 40df6641cce8336d790549b7edac4f83d8b8fb8e | [
"MIT"
] | 3 | 2020-05-04T07:46:54.000Z | 2022-02-10T19:39:19.000Z | app.py | blogsley/blogsley-flask-site | 40df6641cce8336d790549b7edac4f83d8b8fb8e | [
"MIT"
] | null | null | null | from blogsley_site.app import create_app
| 20.5 | 40 | 0.878049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
cd9a0d84b5dfdc848833f7aaab7d4b2009ed8946 | 421 | py | Python | awards/migrations/0003_project_project_photo.py | kimutaiamos/Gold-Awwards | 1bf12cf0cdbf250251664f067c5397160fa5ed41 | [
"MIT"
] | null | null | null | awards/migrations/0003_project_project_photo.py | kimutaiamos/Gold-Awwards | 1bf12cf0cdbf250251664f067c5397160fa5ed41 | [
"MIT"
] | null | null | null | awards/migrations/0003_project_project_photo.py | kimutaiamos/Gold-Awwards | 1bf12cf0cdbf250251664f067c5397160fa5ed41 | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-12-12 18:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awards', '0002_profile_profile_photo'),
]
operations = [
migrations.AddField(
model_name='project',
name='project_photo',
field=models.ImageField(default='Image', upload_to='projectpics/'),
),
]
| 22.157895 | 79 | 0.615202 | 330 | 0.783848 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.299287 |
cd9a1323c7a15a9388bdc8532ce60de3beb414fa | 7,827 | py | Python | tests/e2e/performance/csi_tests/test_pvc_bulk_clone_performance.py | annagitel/ocs-ci | 284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5 | [
"MIT"
] | 1 | 2021-03-12T09:01:36.000Z | 2021-03-12T09:01:36.000Z | tests/e2e/performance/csi_tests/test_pvc_bulk_clone_performance.py | annagitel/ocs-ci | 284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5 | [
"MIT"
] | 1 | 2021-08-30T20:06:00.000Z | 2021-09-30T20:05:46.000Z | tests/e2e/performance/csi_tests/test_pvc_bulk_clone_performance.py | annagitel/ocs-ci | 284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5 | [
"MIT"
] | null | null | null | """
Test to measure pvc scale creation time. Total pvc count would be 50, 1 clone per PVC
Total number of clones in bulk will be 50
"""
import logging
import pytest
from ocs_ci.utility import utils
from ocs_ci.ocs.perftests import PASTest
from ocs_ci.framework.testlib import performance
from ocs_ci.helpers import helpers, performance_lib
from ocs_ci.ocs import constants, scale_lib
from ocs_ci.ocs.resources import pvc, pod
from ocs_ci.ocs.resources.objectconfigfile import ObjectConfFile
log = logging.getLogger(__name__)
@performance
class TestBulkCloneCreation(PASTest):
"""
Base class for bulk creation of PVC clones
"""
@pytest.fixture()
def namespace(self, project_factory, interface_iterate):
"""
Create a new project
"""
proj_obj = project_factory()
self.namespace = proj_obj.namespace
self.interface = interface_iterate
@pytest.mark.usefixtures(namespace.__name__)
@pytest.mark.polarion_id("OCS-2621")
def test_bulk_clone_performance(self, namespace, tmp_path):
"""
Creates number of PVCs in a bulk using kube job
Write 60% of PVC capacity to each one of the created PVCs
Creates 1 clone per each PVC altogether in a bulk
Measuring time for bulk of clones creation
"""
pvc_count = 50
vol_size = "5Gi"
job_pod_file, job_pvc_file, job_clone_file = [None, None, None]
log.info(f"Start creating {self.interface} {pvc_count} PVC")
if self.interface == constants.CEPHBLOCKPOOL:
sc_name = constants.DEFAULT_STORAGECLASS_RBD
clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
elif self.interface == constants.CEPHFILESYSTEM:
sc_name = constants.DEFAULT_STORAGECLASS_CEPHFS
clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
try:
pvc_dict_list = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
no_of_pvc=pvc_count,
access_mode=constants.ACCESS_MODE_RWO,
sc_name=sc_name,
pvc_size=vol_size,
)
job_pvc_file = ObjectConfFile(
name="job_profile_pvc",
obj_dict_list=pvc_dict_list,
project=self.namespace,
tmp_path=tmp_path,
)
# Create kube_job
job_pvc_file.create(namespace=self.namespace)
# Check all the PVC reached Bound state
pvc_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
kube_job_obj=job_pvc_file,
namespace=self.namespace,
no_of_pvc=pvc_count,
)
logging.info(f"Number of PVCs in Bound state {len(pvc_bound_list)}")
# Kube_job to Create pod
pod_dict_list = scale_lib.attach_multiple_pvc_to_pod_dict(
pvc_list=pvc_bound_list,
namespace=self.namespace,
pvcs_per_pod=1,
start_io=False,
pod_yaml=constants.NGINX_POD_YAML,
)
job_pod_file = ObjectConfFile(
name="job_profile_pod",
obj_dict_list=pod_dict_list,
project=self.namespace,
tmp_path=tmp_path,
)
job_pod_file.create(namespace=self.namespace)
# Check all PODs in Running state
scale_lib.check_all_pod_reached_running_state_in_kube_job(
kube_job_obj=job_pod_file,
namespace=self.namespace,
no_of_pod=len(pod_dict_list),
timeout=90,
)
logging.info(f"Number of PODs in Running state {len(pod_dict_list)}")
total_files_size = self.run_fio_on_pvcs(vol_size)
clone_dict_list = scale_lib.construct_pvc_clone_yaml_bulk_for_kube_job(
pvc_dict_list, clone_yaml, sc_name
)
logging.info("Created clone dict list")
job_clone_file = ObjectConfFile(
name="job_profile_clone",
obj_dict_list=clone_dict_list,
project=self.namespace,
tmp_path=tmp_path,
)
# Create kube_job that creates clones
job_clone_file.create(namespace=self.namespace)
logging.info("Going to check bound status for clones")
# Check all the clones reached Bound state
clone_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
kube_job_obj=job_clone_file,
namespace=self.namespace,
no_of_pvc=pvc_count,
timeout=180,
)
logging.info(f"Number of clones in Bound state {len(clone_bound_list)}")
clone_objs = []
all_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
for clone_yaml in clone_dict_list:
name = clone_yaml["metadata"]["name"]
size = clone_yaml["spec"]["resources"]["requests"]["storage"]
logging.info(f"Clone {name} of size {size} created")
for pvc_obj in all_pvc_objs:
if pvc_obj.name == name:
clone_objs.append(pvc_obj)
assert len(clone_bound_list) == len(
clone_objs
), "Not all clones reached BOUND state, cannot measure time"
start_time = helpers.get_provision_time(
self.interface, clone_objs, status="start"
)
end_time = helpers.get_provision_time(
self.interface, clone_objs, status="end"
)
total_time = (end_time - start_time).total_seconds()
speed = round(total_files_size / total_time, 2)
logging.info(
f"Total creation time = {total_time} secs, data size = {total_files_size} MB, speed = {speed} MB/sec "
f"for {self.interface} clone in bulk of {pvc_count} clones."
)
# Finally is used to clean-up the resources created
# Irrespective of try block pass/fail finally will be executed.
finally:
# Cleanup activities
logging.info("Cleanup of all the resources created during test execution")
if job_pod_file:
job_pod_file.delete(namespace=self.namespace)
job_pod_file.wait_for_delete(
resource_name=job_pod_file.name, namespace=self.namespace
)
if job_clone_file:
job_clone_file.delete(namespace=self.namespace)
job_clone_file.wait_for_delete(
resource_name=job_clone_file.name, namespace=self.namespace
)
if job_pvc_file:
job_pvc_file.delete(namespace=self.namespace)
job_pvc_file.wait_for_delete(
resource_name=job_pvc_file.name, namespace=self.namespace
)
# Check ceph health status
utils.ceph_health_check(tries=20)
def run_fio_on_pvcs(self, pvc_size):
searched_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
pod_objs = pod.get_all_pods(namespace=self.namespace)
logging.info(f"Found {len(searched_pvc_objs)} PVCs")
pvc_size_int = int(pvc_size[:-2]) # without "Gi"
file_size_mb = int(pvc_size_int * 0.6) * constants.GB2MB
total_files_size = file_size_mb * len(searched_pvc_objs)
file_size_mb_str = str(file_size_mb) + "M"
logging.info(f"Writing file of size {file_size_mb_str} in each PVC")
for objs in pod_objs:
performance_lib.write_fio_on_pod(objs, file_size_mb_str)
return total_files_size
| 38.747525 | 118 | 0.614028 | 7,284 | 0.930625 | 0 | 0 | 7,297 | 0.932286 | 0 | 0 | 1,676 | 0.214131 |
269506ce70f1d96fec0d9578b84d16a8d1ec4d2d | 72 | py | Python | back/app/models/__init__.py | davidroeca/simple_graphql | a6b2b20b6458b6b2fa9363a542015ab42761bd98 | [
"MIT"
] | null | null | null | back/app/models/__init__.py | davidroeca/simple_graphql | a6b2b20b6458b6b2fa9363a542015ab42761bd98 | [
"MIT"
] | null | null | null | back/app/models/__init__.py | davidroeca/simple_graphql | a6b2b20b6458b6b2fa9363a542015ab42761bd98 | [
"MIT"
] | null | null | null | from .database import db
from .user import User
from .post import Post
| 14.4 | 24 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
269573fa22001d4ea70efb720d1a7ce5724057f7 | 3,520 | py | Python | tools/noise_reduction.py | 8igfive/MyASR | 565267d9df4b56cfad5107632146aab8150a962d | [
"MIT"
] | null | null | null | tools/noise_reduction.py | 8igfive/MyASR | 565267d9df4b56cfad5107632146aab8150a962d | [
"MIT"
] | null | null | null | tools/noise_reduction.py | 8igfive/MyASR | 565267d9df4b56cfad5107632146aab8150a962d | [
"MIT"
] | null | null | null | import argparse
from genericpath import exists
import os
import time
import re
from tqdm import tqdm
import numpy as np
from scipy.io import wavfile
from wiener_scalart import wienerScalart
TIME = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime())
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
WORKPLACE_DIR = os.path.dirname(CURRENT_DIR)
DUMP_DIR = os.path.join(WORKPLACE_DIR, os.path.join('dump', TIME))
DUMP_FEAT = 'feat_{}.scp'.format(TIME)
DUMP_TEXT = 'text_{}'.format(TIME)
FEAT_FORMAT = r'\s?(.+?)\s+(.+?\.wav)'
intMap = {np.dtype('int8') : (0x7f, -0x80),
np.dtype('int16') : (0x7fff, -0x8000),
np.dtype('int32') : (0x7fffffff, -0x8000000),
np.dtype('int64') : (0x7fffffffffffffff, -0x8000000000000000)}
def noise_reduct(args, filePath, dumpPath):
sampleRate, musicData = wavfile.read(filePath)
dataType = np.dtype('int16')
musicData.dtype = dataType # FIXME: wavfile 读取的结果数据类型可能有问题
if args.debug:
print(min(musicData), max(musicData), intMap[dataType][0] + 1)
if dataType in intMap:
musicData = musicData / (intMap[dataType][0] + 1)
if args.debug:
print(min(musicData), max(musicData))
newData = wienerScalart(musicData, sampleRate)
if dataType in intMap:
if args.debug:
print(min(newData), max(newData))
newData = newData * (intMap[dataType][0])
newData = newData.astype(dataType)
if args.debug:
print(max(newData), min(newData))
wavfile.write(dumpPath, sampleRate, newData)
def main(args):
if args.feat is None or args.text is None:
print('lack of feat file or text file')
return
if os.path.abspath(args.dumpFeat) != args.dumpFeat:
args.dumpFeat = os.path.join(DUMP_DIR, args.dumpFeat)
if os.path.abspath(args.dumpText) != args.dumpText:
args.dumpText = os.path.join(DUMP_DIR, args.dumpText)
if not os.path.exists(DUMP_DIR):
os.makedirs(DUMP_DIR)
with open(args.feat, 'r', encoding='utf8') as f:
dataPairs = re.findall(FEAT_FORMAT, f.read())
with open(args.dumpFeat, 'w', encoding='utf8') as f:
for i in tqdm(range(len(dataPairs))):
dataPair = dataPairs[i]
pathList = os.path.split(dataPair[1])
dumpPath = os.path.join(args.dumpDir, pathList[-1])
f.write('{} {}\n'.format(dataPair[0], dumpPath))
noise_reduct(args, dataPair[1], dumpPath)
with open(args.text, 'r', encoding='utf8') as fin:
with open(args.dumpText, 'w', encoding='utf8') as fout:
fout.write(fin.read())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--feat', type=str, default=None, help='feat file path')
parser.add_argument('-t', '--text', type=str, default=None, help='text file path')
parser.add_argument('-dd', '--dumpDir', type=str, default=DUMP_DIR, help='the directory where holds new .wav files')
parser.add_argument('-df', '--dumpFeat', type=str, default=os.path.join(DUMP_DIR, DUMP_FEAT), help='dump feat file path')
parser.add_argument('-dt', '--dumpText', type=str, default=os.path.join(DUMP_DIR, DUMP_TEXT), help='dump text file path')
parser.add_argument('-n', '--noiseLength', type=float, default=0.25, help='the noise time length at the beggining of the audio')
parser.add_argument('-db', '--debug', action='store_true', help='print debug message')
args = parser.parse_args()
main(args) | 40.930233 | 132 | 0.65142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 560 | 0.157835 |
2696d944b45b7b26bd7dbbe253779f41871a415a | 7,779 | py | Python | islandGen.py | Popcorn05/IslandGen | a06821c1db8f33befb1fb3db32fd2e18d323a23a | [
"MIT"
] | null | null | null | islandGen.py | Popcorn05/IslandGen | a06821c1db8f33befb1fb3db32fd2e18d323a23a | [
"MIT"
] | null | null | null | islandGen.py | Popcorn05/IslandGen | a06821c1db8f33befb1fb3db32fd2e18d323a23a | [
"MIT"
] | null | null | null | #Import libraries
import random
import os
import noise
import numpy
import math
import sys
from chunks import Chunks as chk
from PIL import Image
import subprocess
from scipy.misc import toimage
import threading
random.seed(os.urandom(6))
#Delete old chunks
filelist = [ f for f in os.listdir("world/") if f.endswith(".chunk") ] #Delete previous world files
for f in filelist:
os.remove(os.path.join("world/", f))
#Functions
def percentChance(chance):
n = random.randrange(101)
if (100 - n) < chance:
return(True)
else:
return(False)
def mapVal(inp, inpMin, inpMax, outMin, outMax):
return (inp - inpMin) * (outMax - outMin) / (inpMax - inpMin) + outMin
def createCircleGrad(gridSize): #Obsolete
#Create circular gradient (Obsolete)
center_x, center_y = gridSize // 2, gridSize // 2 #Define centre
circle_grad = numpy.zeros((gridSize,gridSize)) #Create array
for y in range(gridSize): #Loop array
for x in range(gridSize):
distx = abs(x - center_x) #Get distance from centre on x and y
disty = abs(y - center_y)
dist = math.sqrt(distx*distx + disty*disty) #Get the actual distance from centre (pythag)
circle_grad[y][x] = dist
max_grad = numpy.max(circle_grad)
circle_grad = circle_grad / max_grad #This is some weird math that I don't quite understand but it works
circle_grad -= 0.5
circle_grad *= 2.0
circle_grad = -circle_grad
for y in range(gridSize): #More weird math, I think its just amplifying anything that is above 0
for x in range(gridSize):
if circle_grad[y][x] > 0:
circle_grad[y][x] *= 20
max_grad = numpy.max(circle_grad)
circle_grad = circle_grad / max_grad #For some reason it's lowered again
return(circle_grad)
#Colours
dwaterCol = [54, 137, 245]
waterCol = [67, 146, 245]
dsandCol = [224, 214, 164]
sandCol = [247, 232, 176]
rockCol = [209, 209, 209]
grassCol = [37, 170, 77]
dgrassCol = [34, 161, 63]
treeCol = [10, 122, 42]
mountCol = [74, 62, 36]
mountRockCol = [56, 48, 30]
snowCol = [245, 254, 255]
#Control Variables
a = sys.argv
if len(a) > 1:
gridSize = int(a[1])
scale = float(a[2])
octaves = int(a[3])
persistance = float(a[4])
lacunarity = float(a[5])
thres = float(a[6])
else:
gridSize = 1024 #Side length
scale = 250.0
octaves = 6
persistance = 0.5
lacunarity = 2.0
thres = 0.08
#Generate base noise, Apply gradient
im = Image.open("gradient/circle_grad.png")
circle_grad = im.convert("L")
main = numpy.zeros((gridSize,gridSize)) #Init arrays
mainNoise = numpy.zeros_like(main)
seed = random.randint(0,200) #Gen seed
for y in range(gridSize):
for x in range(gridSize):
main[y][x] = noise.pnoise2(y/scale,x/scale,octaves=octaves,persistence=persistance,lacunarity=lacunarity,repeatx=gridSize,repeaty=gridSize,base=seed) #Set noise
mainNoise[y][x] = (main[y][x] * mapVal(circle_grad.getpixel((round((1024/gridSize)*x),round((1024/gridSize)*y))), 0, 255, -0.05, 1)) #Apply gradient to noise
if mainNoise[y][x] > 0:
mainNoise[y][x] *= 20 #Amplify
max_grad = numpy.max(mainNoise)
mainNoise = mainNoise / max_grad #Weird even out math thing
#Lay base
display = numpy.zeros((gridSize//16,gridSize//16)+(16,16)+(3,))
processed = numpy.zeros((gridSize//16,gridSize//16), dtype=bool)
passOver = numpy.zeros((gridSize//16,gridSize//16), dtype=bool)
import time
start = time.time()
for cy in range(gridSize//16):
for cx in range(gridSize//16):
print(str(cy) + " " + str(cx))
if processed[cy][cx] == False:
processed[cy][cx] = True
for y in range(16):
for x in range(16):
m = mainNoise[y + (16*cy)][x + (16*cx)] #Set iterator to value of main array and check if meets certain thresholds to set colours
if m < thres + 0.015:
m = dwaterCol
elif m < thres + 0.11:
m = waterCol
elif m < thres + 0.12:
m = dsandCol
passOver[cy][cx] = True
elif m < thres + 0.15:
m = sandCol
passOver[cy][cx] = True
elif m < thres + 0.28:
m = grassCol
passOver[cy][cx] = True
elif m < thres + 0.46:
m = dgrassCol
passOver[cy][cx] = True
elif m < thres + 0.78:
m = mountCol
passOver[cy][cx] = True
elif m < thres + 1.0:
m = snowCol
passOver[cy][cx] = True
display[cy][cx][y][x] = m
#Second pass (Natural features)
featSeed = random.randint(0,100) #Generate seed
for cy in range(gridSize//16):
for cx in range(gridSize//16):
if passOver[cy][cx] == True:
for y in range(16):
for x in range(16):
m = display[cy][cx][y][x]
p = noise.pnoise2((y + (cy * 16))/(scale/2.5),(x + (cx * 16))/(scale/2.5),octaves=10,persistence=0.55,lacunarity=1.55,repeatx=gridSize,repeaty=gridSize,base=featSeed) #Get pond noise
if all(m == grassCol) or all(m == dsandCol) or all(m == sandCol): #If light grass or beach generate pond
if p > 0.17:
if p < 0.25:
m = sandCol
elif p < 1.0:
m = waterCol
display[cy][cx][y][x] = m
#Third pass (Structures)
def addTree(arr,cx,cy,x,y,inpScale):
arr[cy][cx][y][x] = treeCol
n = y
while n < y+inpScale: #Loop through tree size (Only creates plus sign)
arr[cy][cx][min(n+1,15)][x] = treeCol
n += 1
n = y
while n > y-inpScale:
arr[cy][cx][max(n-1,0)][x] = treeCol
n -= 1
n = x
while n < x+inpScale:
arr[cy][cx][y][min(n+1,15)] = treeCol
n += 1
n = x
while n > x-inpScale:
arr[cy][cx][y][max(n-1,0)] = treeCol
n -= 1
def addRock(arr,cx,cy,x,y,inpScale,c):
arr[cy][cx][y][x] = c
arr[cy][cx][min(y+random.randint(0,1),15)][x] = c #Random whether one is placed, if 0 is gen the origin is painted over
arr[cy][cx][max(y-random.randint(0,1),0)][x] = c
arr[cy][cx][y][min(x+random.randint(0,1),15)] = c
arr[cy][cx][y][max(x-random.randint(0,1),0)] = c
structScale = int(scale // 200)
for cy in range(gridSize//16):
for cx in range(gridSize//16):
if passOver[cy][cx] == True:
for y in range(16):
for x in range(16): #Place rocks on beach and mountnain
m = display[cy][cx][y][x]
if all(m == sandCol):
if percentChance(2) == True:
addRock(display,cx,cy,x,y,structScale,rockCol)
elif all(m == grassCol):
if percentChance(5) == True:
addTree(display,cx,cy,x,y,structScale)
elif all(m == dgrassCol):
if percentChance(20) == True:
addTree(display,cx,cy,x,y,structScale)
elif all(m == mountCol):
if percentChance(0.01) == True:
addRock(display,cx,cy,x,y,structScale,mountRockCol)
#Save
for cy in range(gridSize//16):
for cx in range(gridSize//16):
chk.writeChunk(cx,cy,display)
#Display
toimage(chk.readChunkArray(gridSize,display)).show() | 33.530172 | 202 | 0.549556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,013 | 0.130222 |
26978b08939270913183c7dd0c609cfa2e52874f | 4,363 | py | Python | reagent/gym/tests/test_gym_replay_buffer.py | dmitryvinn/ReAgent | f98825b9d021ec353a1f9087840a05fea259bf42 | [
"BSD-3-Clause"
] | 1,156 | 2019-10-02T12:15:31.000Z | 2022-03-31T16:01:27.000Z | reagent/gym/tests/test_gym_replay_buffer.py | dmitryvinn/ReAgent | f98825b9d021ec353a1f9087840a05fea259bf42 | [
"BSD-3-Clause"
] | 448 | 2019-10-03T13:40:52.000Z | 2022-03-28T07:49:15.000Z | reagent/gym/tests/test_gym_replay_buffer.py | dmitryvinn/ReAgent | f98825b9d021ec353a1f9087840a05fea259bf42 | [
"BSD-3-Clause"
] | 214 | 2019-10-13T13:28:33.000Z | 2022-03-24T04:11:52.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import numpy.testing as npt
from reagent.core.parameters import ProblemDomain
from reagent.gym.envs import Gym
from reagent.gym.envs.wrappers.simple_minigrid import SimpleObsWrapper
from reagent.gym.utils import create_df_from_replay_buffer
from reagent.preprocessing.sparse_to_dense import PythonSparseToDenseProcessor
from reagent.test.base.horizon_test_base import HorizonTestBase
logger = logging.getLogger(__name__)
class TestEnv(SimpleObsWrapper):
"""
Wrap Gym environment in TestEnv to save the MiniGrid's
observation, action, reward and terminal in a list so that
we can check if replay buffer is working correctly
"""
def __init__(self, env):
self.env = env
self.action_space = self.env.action_space
# mdp_id, sequence_number, state, action, reward, terminal
self.sart = []
self.mdp_id = -1
self.sequence_number = 0
def seed(self, *args, **kwargs):
return self.env.seed(*args, **kwargs)
def reset(self, **kwargs):
self.mdp_id += 1
self.sequence_number = 0
res = self.env.reset(**kwargs)
self.sart.append([self.mdp_id, self.sequence_number, res, None, None, None])
return res
def step(self, action):
res = self.env.step(action)
(
_,
_,
last_state,
last_action,
last_reward,
last_terminal,
) = self.sart[-1]
assert (
last_state is not None
and last_action is None
and last_reward is None
and last_terminal is None
)
next_state, reward, terminal, _ = res
self.sart[-1][3] = action
self.sart[-1][4] = reward
self.sart[-1][5] = terminal
self.sequence_number += 1
self.sart.append(
[self.mdp_id, self.sequence_number, next_state, None, None, None]
)
return res
class TestGymReplayBuffer(HorizonTestBase):
def test_create_df_from_replay_buffer(self):
env_name = "MiniGrid-Empty-5x5-v0"
env = Gym(env_name=env_name)
state_dim = env.observation_space.shape[0]
# Wrap env in TestEnv
env = TestEnv(env)
problem_domain = ProblemDomain.DISCRETE_ACTION
DATASET_SIZE = 1000
multi_steps = None
DS = "2021-09-16"
# Generate data
df = create_df_from_replay_buffer(
env=env,
problem_domain=problem_domain,
desired_size=DATASET_SIZE,
multi_steps=multi_steps,
ds=DS,
shuffle_df=False,
)
self.assertEqual(len(df), DATASET_SIZE)
# Check data
preprocessor = PythonSparseToDenseProcessor(list(range(state_dim)))
for idx, row in df.iterrows():
df_mdp_id = row["mdp_id"]
env_mdp_id = str(env.sart[idx][0])
self.assertEqual(df_mdp_id, env_mdp_id)
df_seq_num = row["sequence_number"]
env_seq_num = env.sart[idx][1]
self.assertEqual(df_seq_num, env_seq_num)
df_state = preprocessor.process([row["state_features"]])[0][0].numpy()
env_state = env.sart[idx][2]
npt.assert_array_equal(df_state, env_state)
df_action = row["action"]
env_action = str(env.sart[idx][3])
self.assertEqual(df_action, env_action)
df_terminal = row["next_action"] == ""
env_terminal = env.sart[idx][5]
self.assertEqual(df_terminal, env_terminal)
if not df_terminal:
df_reward = float(row["reward"])
env_reward = float(env.sart[idx][4])
npt.assert_allclose(df_reward, env_reward)
df_next_state = preprocessor.process([row["next_state_features"]])[0][
0
].numpy()
env_next_state = env.sart[idx + 1][2]
npt.assert_array_equal(df_next_state, env_next_state)
df_next_action = row["next_action"]
env_next_action = str(env.sart[idx + 1][3])
self.assertEqual(df_next_action, env_next_action)
else:
del env.sart[idx + 1]
| 33.821705 | 86 | 0.60165 | 3,824 | 0.876461 | 0 | 0 | 0 | 0 | 0 | 0 | 528 | 0.121018 |
26982c5744b84289c1df298a112687956cf70fd9 | 515 | py | Python | Dataset/Leetcode/train/46/372.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/46/372.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/46/372.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution(object):
def XXX(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
def dfs(res, state, n):
if n == 0:
res.append(list(state))
return
for a in nums:
if a not in state:
state.append(a)
dfs(res, state, n-1)
state.pop()
res = []
dfs(res, [], len(nums))
return res
| 23.409091 | 40 | 0.365049 | 513 | 0.996117 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.149515 |
269836dc4bbab3746b4c28419c5eabb4308cb54c | 37 | py | Python | parser/__init__.py | jbudis/dante | 90177c33825d5f9ce3fba5463092fbcf20b72fe2 | [
"Apache-2.0"
] | 4 | 2018-09-28T14:50:47.000Z | 2021-08-09T12:46:12.000Z | parser/__init__.py | jbudis/dante | 90177c33825d5f9ce3fba5463092fbcf20b72fe2 | [
"Apache-2.0"
] | 6 | 2019-01-02T13:08:31.000Z | 2021-03-25T21:45:40.000Z | parser/__init__.py | jbudis/dante | 90177c33825d5f9ce3fba5463092fbcf20b72fe2 | [
"Apache-2.0"
] | 1 | 2017-12-12T10:38:26.000Z | 2017-12-12T10:38:26.000Z | from parser.readfile import ReadFile
| 18.5 | 36 | 0.864865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2698d48d436f6968997ba06d73587a502b5f3874 | 535 | py | Python | main.py | Cynicism-lab/hhu-daily-health-common | 1959beee61f2895166ac0be92b5817cbe278ef51 | [
"MIT"
] | 14 | 2021-02-03T14:38:15.000Z | 2022-02-05T08:48:41.000Z | main.py | zhang-zimin/nanwenguidaka | e426ecee8758d70b20cf2a77dc87a6f949196927 | [
"MIT"
] | 4 | 2021-04-15T15:14:29.000Z | 2022-03-15T02:29:04.000Z | main.py | zhang-zimin/nanwenguidaka | e426ecee8758d70b20cf2a77dc87a6f949196927 | [
"MIT"
] | 52 | 2021-02-03T14:38:17.000Z | 2022-03-29T09:19:12.000Z | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
from datetime import timezone
from datetime import timedelta
from datetime import datetime
import hhu
import os
# In[2]:
utc_time = datetime.utcnow().replace(tzinfo=timezone.utc)
sh_tz = timezone(timedelta(hours=8),name='Asia/Shanghai')
beijing_now = utc_time.astimezone(sh_tz)
datestr = datetime.strftime(beijing_now,'%F')
timestr = datetime.strftime(beijing_now,'%H:%M:%S')
year = datestr[0:4]
month = datestr[5:7]
day = datestr[8:10]
time = timestr
hhu.hhu()
| 17.258065 | 57 | 0.736449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.151402 |
269a18ede77adaabe0e01c16057d910f3519fa89 | 30,573 | py | Python | depparse.py | viadee/eric | 680508cc5bf2d322638c6cf2c466a06c3c1f33d4 | [
"BSD-3-Clause-Clear",
"Apache-2.0",
"CC0-1.0",
"BSD-3-Clause"
] | 4 | 2020-04-07T07:05:02.000Z | 2020-09-23T14:23:16.000Z | depparse.py | viadee/eric | 680508cc5bf2d322638c6cf2c466a06c3c1f33d4 | [
"BSD-3-Clause-Clear",
"Apache-2.0",
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | depparse.py | viadee/eric | 680508cc5bf2d322638c6cf2c466a06c3c1f33d4 | [
"BSD-3-Clause-Clear",
"Apache-2.0",
"CC0-1.0",
"BSD-3-Clause"
] | 1 | 2021-12-27T03:00:44.000Z | 2021-12-27T03:00:44.000Z | import pickle
import stanza
import test_stuff
from datetime import datetime
from dictionary import cd, dictionary, nlp_dictionary, ph_outcome, ph_key, ph_value, ph_dvalue, ph_subject
import eric_nlp
#does not do preprocessing
def depparse(sentences, pipeline):
output = ["OUTPUT:\n"]
roots = dict()
for sentence in sentences:
print(f"parsing sentence: '{sentence}'")
doc = pipeline(sentence)
#get max_width for pretty printing
max_width_word = 0
for word in sentence.split():
width = len(word)
if width > max_width_word:
max_width_word = width
append_data = []
for sent in doc.sentences:
sentence_words = ""
root = ""
max_width_deprel = 0
for word in sent.words:
if len(word.deprel) > max_width_deprel:
max_width_deprel = len(word.deprel)
for word in sent.words:
if word.head == 0:
root = word.text
append_data.append(f'id: {word.id}\tword: {word.text.ljust(max_width_word)}\tlemma: {word.lemma.ljust(max_width_word)}\tupos: {word.upos}\txpos: {word.xpos.ljust(3)}\thead id: {word.head}\thead: {sent.words[word.head-1].text.ljust(max_width_word) if word.head > 0 else "root".ljust(max_width_word)}\tdeprel: {word.deprel.ljust(max_width_deprel)}\tfeats: {word.feats}')
sentence_words += f"{word.text} "
#console and/or txt-file output
append_data.append("="*47 + "\n")
output.append(sentence_words)
output.append(f"Root: {root}")
output.extend(append_data)
if root.lower() in roots.keys():
roots[root.lower()] += 1
else:
roots[root.lower()] = 1
roots = {key: val for key, val in sorted(roots.items(), key=lambda item: item[1], reverse=True)}
print(output)
return output, roots
def init_stanza(lang):
print(f"loading stanza pipeline for language '{lang}'")
stanza.download(lang)
stanza_pipeline = stanza.Pipeline(lang=lang, processors="tokenize,mwt,pos,lemma,depparse")
print("successfully loaded stanza pipeline")
return stanza_pipeline
def init_stanza_from_pickle(filename):
with open(filename, "rb") as f:
stanza_pipeline = pickle.load(f)
return stanza_pipeline
'''
creates a matrix with:
columns: roots
rows: count how often that root occurs for a function
'''
def create_roots_matrix(roots, file_name, csv_sep = ";", empty_cell = "0"):
file_lines = []
first_line = f"{empty_cell}"
first = True
for root, functions in roots.items():
line = f"{root}"
tmp = [x["id"] for x in nlp_dictionary]
tmp.append("none")
for fct_id in tmp:
if first:
first_line += f"{csv_sep}{fct_id}"
if fct_id in functions.keys():
count = functions[fct_id]
else:
count = empty_cell
line += f"{csv_sep}{count}"
if first:
file_lines.append(first_line)
first = False
file_lines.append(line)
test_stuff.list_to_file(file_lines, file_name)
#all_roots is a dict from words to another dict from function ids to ints
#roots is expected to be a dict from words to ints
def extend_roots(all_roots, roots, fct_id):
for k, v in roots.items():
if k in all_roots.keys():
if fct_id in all_roots[k].keys():
print(f"DUPLICATE FUNCTION IN ROOTS: {fct_id} ; {k} ; {v}")
else:
all_roots[k][fct_id] = v
else:
print(f"adding new word: {k} from {fct_id} ;; {v}")
all_roots[k] = {fct_id: v}
return all_roots
#attempt 1: how many nodes do they share, regardless of node depth
def tree_compare_bad(tree1, tree2):
if len(tree1.words) < len(tree2.words):
small = tree1
big = tree2
else:
small = tree2
big = tree1
in_common = 0
used_ids = []
for leaf_s in small.words:
found_leaf_id = ""
for leaf_b in big.words:
if leaf_s.deprel == leaf_b.deprel and leaf_b.id not in used_ids:
found_leaf_id = leaf_b.id
break
if found_leaf_id:
in_common += 1
used_ids.append(found_leaf_id)
percentage = in_common * 100.0 / len(small.words)
return in_common, percentage
def tree_compare_bad_again(tree1, tree2):
bad_id = "0"
if len(tree1.words) < len(tree2.words):
small = tree1
big = tree2
else:
small = tree2
big = tree1
similar_counter = 0
used_ids = []
for word_b in big.words:
found_id = bad_id
for word_s in small.words:
if word_b.lemma == word_s.lemma and word_b.deprel == word_s.deprel and word_b.head == word_s.head and word_s.id not in used_ids:
found_id = word_s.id
if found_id != bad_id:
similar_counter += 1
used_ids.append(found_id)
percentage = similar_counter * 100.0 / len(small.words)
return similar_counter, percentage
#a tree is a list of dictionarys. every dictionary represents a word of the sentence. key-value-pairs are the attributes of that word.
def tree_compare(t1, t2):
return tree_compare_bad_again(t1, t2)
def get_word(wanted_id, words):
if wanted_id == "0":
return "root"
for word in words:
if word.id == wanted_id:
return word
return ""
'''
takes a tuple as in "deprel" in dictionary.nlp_dictionary.
returns list of tuples. if master_tuple was a simple tuple, the list only contains that tuple
if master_tuple has lists as elements, these get split so that every tuple in the returned list has only strings as elements
Example:
in: (["predict", "divinate"], "obl", ["data", "person"])
out: [
("predict", "obl", "data"),
("predict", "obl", "person"),
("divinate", "obl", "data"),
("divinate", "obl", "person")
]
note: returning list has x elements with x being the product of all three lengths. (here 2*1*2 = 4)
'''
def generate_sub_tuples(master_tuple):
ret_val = []
element_0 = master_tuple[0] if isinstance(master_tuple[0], list) else [master_tuple[0]]
element_1 = master_tuple[1] if isinstance(master_tuple[1], list) else [master_tuple[1]]
element_2 = master_tuple[2] if isinstance(master_tuple[2], list) else [master_tuple[2]]
for e_0 in element_0:
for e_1 in element_1:
for e_2 in element_2:
tpl = (e_0, e_1, e_2)
ret_val.append(tpl)
return ret_val
'''
takes a word-object of a depparse-word and a string element from a tuple (not a list-element. use generate_sub_tuples() first)
checks if dictionary.cd (by default "#") is in tuple_element. If so, it extracts which attribute (i.e. in front of "#") is wanted.
then returns the corresponding attribute value of word_object and the part right of "#" in tuple_element
if "#" was not in tuple_element, it returns tuple_element as it is and the default attribute of word_object
also needs an eric, to invoke replacement of placeholders
'''
def get_comparison_attributes(word_object, tuple_element, eric, default="text"):
#if word_object is a root_word, it will be a dictionary, as root words don't exist and are constructed synthetically in the function get_mother()
if isinstance(word_object, dict):
if cd in tuple_element:
splitted = tuple_element.split(cd)
ret_word_attribute = word_object[splitted[0]]
ret_tuple_attribute = splitted[1]
else:
ret_word_attribute = word_object[default]
ret_tuple_attribute = tuple_element
else:
if cd in tuple_element:
splitted = tuple_element.split(cd)
ret_word_attribute = getattr(word_object, splitted[0])
ret_tuple_attribute = splitted[1]
else:
ret_word_attribute = getattr(word_object, default)
ret_tuple_attribute = tuple_element
ret1, ret2 = replace_depparse_placeholders(ret_word_attribute, ret_tuple_attribute, eric)
return ret1, ret2
'''
word_attribute should be from the user input, tuple_attribute one element of a tuple from the depparse templates in dictionary.nlp_dictionary
it's called attribute, not element because it should only be called at the end of get_comparison_attributes() which extracts attributes from word objects (e.g. the lemma, upos or deprel, etc.)
word_attribute needs to be included even though it will not have any placeholders. In the case, that "<outcome>" is in tuple_attribute, word_attribute needs to be checked
if it is a different form of the possible outcomes. This gets checked via the eric.model_columns["class"]["phrasings"] dict which has all possible outcomes as keys (here "survived" and "died")
and stores different forms of those as the values of that dict as list. Here ["survive", "survives"] and ["die", "dies"].
'''
def replace_depparse_placeholders(word_attribute, tuple_attribute, eric):
ret_word_attribute, ret_tuple_attribute = word_attribute, tuple_attribute
if ret_tuple_attribute == ph_outcome:
if eric.placeholders[ph_outcome]:
ret_tuple_attribute = eric.placeholders[ph_outcome]
elif ret_tuple_attribute == ph_key:
is_in_placeholders = False
for k in eric.placeholders[ph_key].keys():
if k.lower() == ret_word_attribute.lower():
is_in_placeholders = True
break
if is_in_placeholders:
ret_tuple_attribute = ret_word_attribute
elif ret_tuple_attribute == ph_value:
is_in_placeholders = False
for v in eric.placeholders[ph_key].values():
if v and v.lower() == ret_word_attribute.lower():
is_in_placeholders = True
break
if is_in_placeholders:
ret_tuple_attribute = ret_word_attribute
return ret_word_attribute, ret_tuple_attribute
replace_depparse_placeholders("", "", "")
#looks for the head/mother node of word in tree and returns it (or a representing dictionary if head is root).
#returns dict since root is not really represented in the word objects of depparse
def get_mother(word, tree):
if word.head == 0:
return {
"id": "0",
"text": "root",
"lemma": "root",
"upos": None,
"xpos": None,
"head": None,
"deprel": None
}
else:
return tree.words[word.head-1]
#takes a depparse tree t and goes through the depparse tree templates in dictionary.nlp_dictionary
#returns a list of tuples (fct_id, tree template) with a tuple for every found match.
def get_matching_dictionary_trees(tree, eric):
mother_index = 0
deprel_index = 1
child_index = 2
all_matches = []
# test_stuff.logger(f"{tab*1}DADICT: {nlp_dictionary}")
for d in nlp_dictionary:
#test_stuff.logger(f"/////: {d['id'].upper()} ://///")
for depparse_template in d["depparse"]:
#test_stuff.logger(f"{tab*1}template: {depparse_template}")
used_words = [] #already matched words. saved to not use them twice
template_match = True #stays true unless at least one tuple in the demplate does not match
match_sub_tuples = [] #stores the sub_tuples that matched in this template. So when a total match is achieved, the used subtuples can be viewed
#if a depparse template is an empty list, it would always match, so skip it. this should never happen, if dictionary was created properly, but just to be safe
if len(depparse_template) == 0:
continue
for template_tuple in depparse_template:
#test_stuff.logger(f"{tab*2}tuple: {template_tuple}")
tuple_correct = False
sub_tuples = generate_sub_tuples(template_tuple)
for sub_tuple in sub_tuples:
#test_stuff.logger(f"{tab*3}sub_tuple: {sub_tuple[mother_index]}, {sub_tuple[deprel_index]}, {sub_tuple[child_index]}")
sub_tuple_correct = False
for word in tree.words:
if word.id in used_words:
#test_stuff.logger(f"{tab*4}{word.text.upper()}: >>>skipped<<<")
continue
#test_stuff.logger(f"{tab*4}{word.text.upper()}: id: {word.id} :: text: {word.text} :: lemma: {word.lemma} :: upos: {word.upos} :: xpos: {word.xpos} :: feats: {word.feats} :: head: {word.head} :: deprel: {word.deprel} :: misc: {word.misc}")
#the following get generated over function to use different attributes of the words (see function for more info)
child_val, tuple_child_val = get_comparison_attributes(word, sub_tuple[child_index], eric)
deprel_val, tuple_deprel_val = get_comparison_attributes(word, sub_tuple[deprel_index], eric, default="deprel")
#test_stuff.logger(f"{tab*5}vals: {child_val},{tuple_child_val}, {deprel_val}, {tuple_deprel_val}")
child_matched = True if child_val.lower() == tuple_child_val.lower() else False
deprel_matched = True if deprel_val.lower() == tuple_deprel_val.lower() else False
#just to not look up the mother if the match already failed
if child_matched and deprel_matched:
mother = get_mother(word, tree)
mother_val, tuple_mother_val = get_comparison_attributes(mother, sub_tuple[mother_index], eric)
mother_matched = True if mother_val.lower() == tuple_mother_val.lower() else False
else:
mother_matched = False
#if all three categories are a match, the subtuple is a match
if child_matched and deprel_matched and mother_matched:
used_words.append(word.id)
sub_tuple_correct = True
break #no need to match the other words. match next tuple instead
#if one of the sub_tuples is correct it's a match for the whole tuple, so no need to match the others
if sub_tuple_correct:
match_sub_tuples.append(sub_tuple)
tuple_correct = True
break
#if one tuple in a template does not match, the whole template does not match, so no need to go on
if not tuple_correct:
template_match = False
break
#collect all template matches
if template_match:
tmp = (d["id"], match_sub_tuples)
all_matches.append(tmp)
#returns a list of tuples with two elements each: 1st fct_id, 2nd the tree template that matched, i.e. a list of tuples
#largest template tree will be element 0
if eric.prioritise_negation:
ret_val = prioritise_negation(all_matches)
else:
ret_val = sorted(all_matches, key=lambda item: len(item[1]), reverse=True)
return ret_val
#expects a list of tuples with two elements each: 1st fct_id, 2nd the tree template that matched, i.e. a list of tuples
#that list should represend a ranking from most likely (lowest index) to least likey (highest index)
#it then goes through all templates and sorts them into templates that contain a lemma:not and and those that do not
#then creates a ranking again for both, separately
#then, both lists get concatenated with the negated tuples at the lower indices. So a short but negated template will have priority over a longer, non-negated one
#returns that list
def prioritise_negation(templates_list):
negated_tuples = []
non_negated_tuples = []
for template in templates_list:
negated = False
for tpl in template[1]:
head = tpl[0]
child = tpl[2]
if isinstance(head, list):
if f"lemma{cd}not" in head or "not" in head:
negated = True
break
else:
if f"lemma{cd}not" == head or "not" == head:
negated = True
break
if isinstance(child, list):
if f"lemma{cd}not" in child or "not" in child:
negated = True
break
else:
if f"lemma{cd}not" == child or "not" == child:
negated = True
break
if negated:
negated_tuples.append(template)
else:
non_negated_tuples.append(template)
negated_tuples = sorted(negated_tuples, key=lambda item: len(item[1]), reverse=True)
non_negated_tuples = sorted(non_negated_tuples, key=lambda item: len(item[1]), reverse=True)
ranked_list = negated_tuples + non_negated_tuples
return ranked_list
#t is a tree like in tree_compare(t1, t2)
def dictionary_templates_test(tree):
#indices of tuples in templates
tmother = 0 #mother node
tdeprel = 1 #dependency relation
tchild = 2 #child node
root = ""
for x in tree.words:
if x.head == 0:
root = x
break
if not root:
test_stuff.logger("no root found:")
test_stuff.logger(tree.words)
#test_stuff.logger("Testing Tree:")
for d in nlp_dictionary:
test_stuff.logger(f"MATCHING TO {d['id']}")
if "depparse" not in d.keys():
continue
for dep_template in d["depparse"]:
correct_tupel_counter = 0 #if correct match, correct_tupel_counter should be equal to the number of elements in dep_template
#test_stuff.logger(f"\t\t template {template_counter}")
for tup in dep_template:
found_mother = False
found_child = False
found_deprel = False
#test_stuff.logger(f"\t\t\t{tup}")
child_is_list = True if isinstance(tup[tchild], list) else False
deprel_is_list = True if isinstance(tup[tdeprel], list) else False
if tup[tmother] == "root":
root_correct = False
if child_is_list:
if root.text in tup[tchild]:
root_correct = True
elif root.text == tup[tchild]:
root_correct = True
#else:
#test_stuff.logger(f"\t\t\t\t {root.text} != {tup[tmother]}")
if root_correct:
found_mother = True
found_child = True
found_deprel = True
else:
#see if you find current tuple in t
for word in tree.words:
#check if word is a child node
if child_is_list:
if word.text in tup[tchild]:
found_child = True
else:
if word.text == tup[tchild]:
found_child = True
#check if mother and deprel match
#mother is a dictionary, just like a word
mother = get_word(f"{word.head}", tree.words)
if isinstance(mother, str):
mother_text = mother
else:
mother_text = mother.text
found_mother = True
if mother_text == tup[tmother]:
#check if deprel matches
if deprel_is_list:
if word.deprel in tup[tdeprel]:
found_deprel = True
else:
if word.deprel == tup[tdeprel]:
found_deprel = True
if found_mother and found_deprel and found_child:
break
if found_mother and found_deprel and found_child:
#test_stuff.logger("\t\t\t\t\t Tupel correct!")
correct_tupel_counter += 1
if correct_tupel_counter == len(dep_template):
#test_stuff.logger(f"///Found match ({d['id']}): {dep_template}\n")
return f"///Found match: {dep_template}\n"
else:
#test_stuff.logger(f"NO MATCH. mother: {found_mother}, deprel: {found_deprel}, child: {found_child}")
'''
("root", "root", "predicted"),
("predicted", "nsubj:pass", f"upos{category_tag}NOUN")
'''
def sentence_similarity(sent1, sent2, pipeline):
t1 = pipeline(sent1).sentences[0]
t2 = pipeline(sent2).sentences[0]
total, percent = tree_compare(t1, t2)
return total, percent
def print_depparsed_sentences(sentences, language="en", pipeline=""):
if not pipeline:
pipeline = init_stanza(language)
if isinstance(sentences, str):
sentences = [sentences]
output, _ = depparse(sentences, pipeline)
for i, o in enumerate(output):
print(f"{i}: {o}")
def debug_depparsed_sentences_to_console():
pipeline = init_stanza("de")
eric = eric_nlp.Eric_nlp()
sentence_list = ["Used sentences:"]
print("Please provide input:")
while True:
# for usr_in in whiletrue:
usr_in = input()
if not usr_in:
print("no input given")
continue
elif usr_in.lower() in ["exit", "exit()", "quit", "quit()", "end", "end()"]:
break
sentence_list.append(usr_in)
preprocessed = eric.preprocessing(usr_in, "usr_input")
print(f"preprocessed: {preprocessed}")
out, _ = depparse([preprocessed], pipeline)
root = ""
for o in out:
if "id: 0" in o:
finder = "word: "
ender = "lemma: "
index = o.find(finder) + len(finder)
index_end = o.find(ender)
root = o[index:index_end].strip()
if not root:
root = "root not found"
print(f"Root: {root}")
for o in out[3:]:
print(o)
print("Goodbye")
for sent in sentence_list:
print(sent)
def main():
debug_depparsed_sentences_to_console
quit()
input_language = "en"
stanza_pipeline = init_stanza(input_language)
eric = eric_nlp.Eric_nlp()
input_path = "data\\"
input_files = [f"{input_path}umfrage_input_{x}_cleaned.txt" for x in range(1,5)]
input_files.append(f"{input_path}manually_added.txt")
output_path = "output\\depparse\\data_analysis\\"
roots_out_file = f"{output_path}roots.csv"
input_accumulated = test_stuff.merge_input_files(input_files)#{x["id"]: x["key_sentences"] for x in nlp_dictionary}
input_accumulated = list(set(input_accumulated))
input_accumulated_as_dict = {}
for x in input_accumulated:
if x[0] in input_accumulated_as_dict.keys():
input_accumulated_as_dict[x[0]].append(x[1])
else:
input_accumulated_as_dict[x[0]] = [x[1]]
all_roots = dict() #keys are root words and the values are dicts where the keys are the function_id
for fct_id, unpreprocessed_sentences in input_accumulated_as_dict.items():
preprocessed_sentences = [eric.preprocessing(x, "usr_input") for x in unpreprocessed_sentences]
dep_output, roots = depparse(preprocessed_sentences, stanza_pipeline)
preface = [f"{v}: {k}" for k, v in roots.items()]
#extend all_roots
all_roots = extend_roots(all_roots, roots, fct_id)
all_output = ["Used Input:"] + input_files + ["\n"] + preface + dep_output
for o in all_output:
print(o)
create_roots_matrix(all_roots, roots_out_file, empty_cell="")
print(all_roots)
#for infi in input_files:
# input_data =
# test_input = [x[1] for x in test_stuff.read_input_from_file(f[0])]
# test_output = depparse("en", test_input)
# test_stuff.list_to_file(test_output, f[1])
def read_sentences_from_output(output_file):
stop_words = ["OUTPUT:", "Root:", "id:"]
file_lines = test_stuff.get_file_lines(output_file)
sentences = list()
for line in file_lines:
if line != "" and not line[0].isdigit() and line[0] != "=":
splitted = line.split()
if splitted[0] not in stop_words:
sentences.append(line)
return list(set(sentences))
'''
if you thought of new sentence while analysing the output and just depparsed them over debug console and included them in the output_file,
this function will help. It can read your originally used input again, then the output file, compare sentences and store all new ones, i.e. the manually analysed sentences in a new input_file.
Also, it will then overwrite the output file to update the root counts
'''
def update_depparse_output(input_files, output_file_overwrite, passed_fct_id, output_file_new_sentences="data\\manually_added.txt", sp=""):
#input_accumulated.extend([("why", "Why did you predict this outcome?"), ("why", "Why did you predict the outcome?")])
#1 get all three as dictionaries {passed_fct_id: list of sentences}
#1.1 originally used input
lines = test_stuff.merge_input_files(input_files)
lines = list(set(lines))
input_accumulated = convert_input_tuples_to_dict(lines)
#1.2 modified output
lines = read_sentences_from_output(output_file_overwrite)
output_accumulated = {passed_fct_id: lines}
#1.3 existing manually added sentences
lines = test_stuff.merge_input_files([output_file_new_sentences])
lines = list(set(lines))
manual_accumulated = convert_input_tuples_to_dict(lines)
#2 look for sentences in output_accumulated, that do not exist in input_accumulated and append these to manual_accumulated if they not already exist there
eric = eric_nlp.Eric_nlp()
for fct_id, sentences in output_accumulated.items():
if fct_id in input_accumulated.keys():
preprocessed_inputs = [eric.preprocessing(x, "usr_input") for x in input_accumulated[fct_id]]
for sent in sentences:
sentence = eric.preprocessing(sent, "usr_input")
if sentence not in preprocessed_inputs:
if fct_id in manual_accumulated.keys():
if sentence not in manual_accumulated[fct_id]:
manual_accumulated[fct_id].append(sentence)
else:
manual_accumulated[fct_id] = [sentence]
else:
#all are new sentences
if fct_id in manual_accumulated.keys():
if sentence not in manual_accumulated[fct_id]:
manual_accumulated[fct_id].append(sentence)
else:
manual_accumulated[fct_id] = [sentence]
#4 write manual_accumulated to data\\manually_added.txt (or sth else, if argument was given)
out= []
for fct_id, sentences in manual_accumulated.items():
out.append(f"[{fct_id}]")
out.extend(sentences)
out.append("")
test_stuff.list_to_file(out, output_file_new_sentences)
#5 update the output file
#5.1 get all sentences for fct_id from manually_added.txt and the input files
if not sp:
sp = init_stanza("en")
all_sentences = []
if passed_fct_id in manual_accumulated.keys():
all_sentences.extend(manual_accumulated[passed_fct_id])
if passed_fct_id in input_accumulated.keys():
all_sentences.extend(input_accumulated[passed_fct_id])
all_sentences = [eric.preprocessing(x, "usr_input") for x in all_sentences]
out, roots = depparse(all_sentences, sp)
preface = [f"{v}: {k}" for k, v in roots.items()]
all_out = preface + out
test_stuff.list_to_file(all_out, output_file_overwrite)
def convert_input_tuples_to_dict(input_tuples):
ret_val = dict()
for fct_id, sentence in input_tuples:
if fct_id in ret_val.keys():
if sentence not in ret_val[fct_id]:
ret_val[fct_id].append(sentence)
else:
ret_val[fct_id] = [sentence]
return ret_val
def test_some_sentences():
sp = init_stanza("en")
sentences = []
words = ["more", "less", "lower", "higher", "greater"]
more = [f"what if fare was {x} than 300 instead" for x in words]
sentences.extend(more)
more = [f"what if age was {x} than 44 instead" for x in words]
sentences.extend(more)
more = [f"what if age was {x} 44" for x in ["over", "under"]]
sentences.extend(more)
more = [f"what if age was {x}" for x in words]
sentences.extend(more)
out, _ = depparse(sentences, sp)
for o in out:
print(o)
if __name__ == "__main__":
#main()
debug_depparsed_sentences_to_console()
quit()
lines = test_stuff.read_input_from_file("data\\wrongly_accused.txt")
sentences = [x[1] for x in lines]
for s in sentences:
print(s)
print("//////////")
sp = init_stanza("en")
out, root = depparse(sentences, sp)
test_stuff.list_to_file(out, "output\\depparse\\wrongly_accused_out.txt")
quit()
#test_some_sentences()
for d in nlp_dictionary:
print(d["id"])
try:
x = d['depparse'][0]
print("\t---")
except Exception as e:
print("\tNOTHING")
sp = init_stanza("en")
input_files = [f"data\\umfrage_input_{x}_cleaned.txt" for x in range(1,5)]
fct = "whatif-gl"
update_depparse_output(input_files, f"output\\depparse\\{fct}.txt", fct, "data\\manually_added.txt", sp=sp)
| 40.387054 | 384 | 0.603735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,768 | 0.319498 |
269ad28a75a19ae401ecbe624997f530c5904d6d | 706 | py | Python | ch03/simple_cbow_pytorch.py | tomy-0000/deep-learning-from-scratch-2 | 3d3d7fd614b8021499ffc103199be5e32622717e | [
"MIT"
] | null | null | null | ch03/simple_cbow_pytorch.py | tomy-0000/deep-learning-from-scratch-2 | 3d3d7fd614b8021499ffc103199be5e32622717e | [
"MIT"
] | null | null | null | ch03/simple_cbow_pytorch.py | tomy-0000/deep-learning-from-scratch-2 | 3d3d7fd614b8021499ffc103199be5e32622717e | [
"MIT"
] | null | null | null | # coding: utf-8
import torch.nn as nn
class SimpleCBOW(nn.Module):
def __init__(self, vocab_size, hidden_size):
super(SimpleCBOW, self).__init__()
V, H = vocab_size, hidden_size
self.in_layer = nn.Linear(V, H, bias=False)
self.out_layer = nn.Linear(H, V, bias=False)
self.loss_layer = nn.CrossEntropyLoss()
def forward(self, contexts, target):
h0 = self.in_layer(contexts[:, 0])
h1 = self.in_layer(contexts[:, 1])
h = (h0 + h1) * 0.5
score = self.out_layer(h)
loss = self.loss_layer(score, target)
return loss
@property
def word_vecs(self):
return self.in_layer.weight.detach().numpy().T
| 27.153846 | 54 | 0.609065 | 665 | 0.941926 | 0 | 0 | 89 | 0.126062 | 0 | 0 | 15 | 0.021246 |
269c16b6afd598ff0e05a59d38e14e46ebde748b | 7,814 | py | Python | modules/input_output.py | nicolasying/WordNet-Embeddings | a6a5782dca97376e487df41fb83542729f284197 | [
"MIT"
] | null | null | null | modules/input_output.py | nicolasying/WordNet-Embeddings | a6a5782dca97376e487df41fb83542729f284197 | [
"MIT"
] | null | null | null | modules/input_output.py | nicolasying/WordNet-Embeddings | a6a5782dca97376e487df41fb83542729f284197 | [
"MIT"
] | null | null | null | # coding=utf-8
#! /usr/bin/env python3.4
"""
MIT License
Copyright (c) 2018 NLX-Group
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This code reads wordnet data and index files
data_file_reader(file_name):
extract data from wordnet data files saved in "data/input" directory
output is
1- a dictionary with
key = synsetoffsets
data = (synsetWrds, synsetConnections, synsetRelationTypes, connectedSynsetPos, gloss)
2- and offset_list
[email protected]
"""
import os, sys
import numpy as np
from progressbar import ProgressBar, Percentage, Bar
def data_file_reader(file_name, lang):
print(" Working on " + file_name)
if lang == "Dutch":
path = os.getcwd() + '/data/input/Dutch_wnet/'
elif lang == "Portuguese":
path = os.getcwd() + '/data/input/Portuguese_wnet/'
else:
path = os.getcwd() + '/data/input/English_wnet/'
fl = open(path + file_name)
src = fl.readlines()
fl.close()
file_data = {}
offset_list = []
all_word = set()
amb_word = set()
for lineNum in range(len(src)):
dataLine = src[lineNum]
if dataLine[0:2] == " ": #or " 000 " in dataLine: # comments or synset with no relations
continue
else:
synsetWrds = []
synsetConnections = []
synsetRelationTypes = []
connectedSynsetPos = []
dataLineParts = dataLine.split(" ")
wrdCnt = int(dataLineParts[3], 16)
indx = 4
for i in range(wrdCnt):
synsetWrds.append(dataLineParts[indx])
"""
if dataLineParts[indx] not in all_word:
all_word.add(dataLineParts[indx])
else:
amb_word.add(dataLineParts[indx])
"""
indx += 2
connCnt = int(dataLineParts[indx])
indx += 1
for i in range(connCnt):
synsetRelationTypes.append(dataLineParts[indx])
indx += 1
synsetConnections.append(dataLineParts[indx])
indx += 1
connectedSynsetPos.append(dataLineParts[indx])
indx += 1
# the next field is 0000 or 000
indx += 1
gloss = dataLine.split("|")[1]
gloss = gloss.replace("\n","")
gloss = gloss.replace("'","''")
data = (synsetWrds, synsetConnections, synsetRelationTypes, connectedSynsetPos, gloss)
file_data.update({dataLineParts[0]:data})
offset_list.append(dataLineParts[0])
#if dataLineParts[0] in synsetConnections:
# print(" self loop", dataLineParts[0])
#print("number of extracted words: ", len(all_word), ", ", len(amb_word), "of which are ambiguous")
return file_data, offset_list
def emb_writer(emb_matrix, word_list, dim, iter, feature_name, for_WSD, main_path):
try:
if emb_matrix == []:
print("no changes was made to the previously saved file")
else:
out_file = open(main_path + "embeddings_" + iter + ".txt", "w")
out_file.write("%d %d\n" % (len(word_list), dim))
if "pyspark" not in str(type(emb_matrix)):
if dim > len(emb_matrix[0]):
dim = len(emb_matrix[0])
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(word_list))
for i in pbar(range(len(word_list))):
if for_WSD:
wrd = word_list[i].split("\t")[0]
else:
wrd = word_list[i]
emb = ""
for j in range(dim):
emb += str(emb_matrix[i][j]) + " "
emb += "\n"
emb = emb.replace(" \n", "\n")
out_file.write(wrd + " " + emb)
else:
i = 0
for row in emb_matrix.collect():
wrd = word_list[i].split("\t")[0]
i += 1
emb = row.asDict()
out_file.write(wrd + " " + str(emb[feature_name]).replace("[","").replace("]","").replace(","," ") + "\n")
out_file.close()
print("\n-------------------------------------------------------------")
print("Vector Embeddings are created and saved in \data\output folder")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("Unexpected error:", exc_value)
def array_writer(matrix, fname, type, main_path):
try:
print (" Saving %s data into a file"%(fname))
path = main_path + fname
if type == "txt":
np.savetxt(path, matrix)
else:
np.save(path, matrix)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("Unexpected error:", exc_value)
print(" COULDN'T SAVE THE %s FILE"%(fname))
def array_loader(fname, main_path):
path = main_path + fname + ".npy"
mat_data = np.load(path)
return(mat_data)
def info_writer(dim,wrd_cnt, non_zero, for_WSD, main_path):
path = main_path + 'last_run_info'
info = open(path,"w")
info.write("dim: %d\n" % (dim[0]))
info.write("for_WSD: %s\n" % (str(for_WSD)))
info.write("wrd_cnt: %d\n" % (wrd_cnt))
info.write("non_zero: %d\n" % (non_zero))
info.close()
def info_reader(main_path):
path = main_path+'last_run_info'
info = open(path)
data = info.readlines()
info.close()
dim = data[0].split(" ")[1].replace("\n","")
for_WSD = data[1].split(" ")[1].replace("\n","")
if for_WSD == "True":
for_WSD = True
else:
for_WSD = False
wrd_cnt = data[2].split(" ")[1].replace("\n","")
non_zero = data[3].split(" ")[1].replace("\n","")
return dim, for_WSD, wrd_cnt,non_zero
def log_writer(log, description, only_one_word, only_once, equal_weight, for_WSD, accepted_rel, iter, vec_dim):
try:
log.write("Only one word from each synset: %s \n" %(only_one_word))
log.write("Only one sense of each word: %s\n" %(only_once))
log.write("Equal weight for different relation types: %s\n" %(str(equal_weight)))
log.write("Different vectors for each sense of ambiguous words: %s \n" %(str(for_WSD)))
log.write("Accepted relations: %s \n" %(str(accepted_rel)))
log.write("Random walk method (infinite or itterative): %s \n" %(iter))
log.write("Vector dimension: %d\n" % (vec_dim))
if description != "":
log.write("Description: %s\n" % (description))
log.write("\n-----------------------------\n")
except:
print(" COULDN'T UPDATE THE LOG FILE")
| 36.344186 | 126 | 0.572562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,005 | 0.384566 |
269c9b7326d9d1d9ee7b7fac3ea362f209cd1e0f | 1,599 | py | Python | tests/test_formats.py | antmicro/raviewer | 7529664d37e994d4c2f4c450a5577b79d73c4bb0 | [
"Apache-2.0"
] | 12 | 2021-11-18T09:38:34.000Z | 2022-03-24T19:33:44.000Z | tests/test_formats.py | antmicro/raviewer | 7529664d37e994d4c2f4c450a5577b79d73c4bb0 | [
"Apache-2.0"
] | 1 | 2022-02-14T12:07:02.000Z | 2022-03-21T19:29:11.000Z | tests/test_formats.py | antmicro/raviewer | 7529664d37e994d4c2f4c450a5577b79d73c4bb0 | [
"Apache-2.0"
] | null | null | null | """Module for testing formats on resources entities"""
from raviewer.src.core import (get_displayable, load_image, parse_image)
from terminaltables import AsciiTable
from raviewer.image.color_format import AVAILABLE_FORMATS
import os
import pkg_resources
import time
import pytest
@pytest.fixture
def formats():
return AVAILABLE_FORMATS
def test_all(formats):
"""Test all formats"""
print("Testing all formats, It may take a while...")
table_data = [["Format", "Passed", "Performance"]]
start_range = 800
end_range = 810
for color_format in formats.keys():
file_path = pkg_resources.resource_filename('resources',
color_format + "_1000_750")
passed_results = 0
format_performance = 0
start = time.time()
for width in range(start_range, end_range):
try:
if not os.path.exists(file_path):
break
img = load_image(file_path)
img = parse_image(img.data_buffer, color_format, width)
get_displayable(img)
passed_results += 1
except:
continue
end = time.time()
#Stats
format_performance = "{:.3f}".format(round(end - start, 3))
table_data.append([
color_format, "{}/{}".format(passed_results,
end_range - start_range),
format_performance
])
table = AsciiTable(table_data)
table.title = 'Test all formats'
print(table.table)
| 31.352941 | 79 | 0.590994 | 0 | 0 | 0 | 0 | 59 | 0.036898 | 0 | 0 | 211 | 0.131957 |
269e4231c77fb4d48c447cd279a2ecdca59c4ba8 | 702 | py | Python | kqueen_ui/utils/wrappers.py | Richard-Barrett/kqueen-ui | f6b41edddd5130e5cd79773640942e9a824bbae1 | [
"MIT"
] | null | null | null | kqueen_ui/utils/wrappers.py | Richard-Barrett/kqueen-ui | f6b41edddd5130e5cd79773640942e9a824bbae1 | [
"MIT"
] | null | null | null | kqueen_ui/utils/wrappers.py | Richard-Barrett/kqueen-ui | f6b41edddd5130e5cd79773640942e9a824bbae1 | [
"MIT"
] | null | null | null | from flask import request
from flask import redirect
from flask import session
from flask import url_for
from functools import wraps
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not session.get('user', {}).get('id', None):
return redirect(url_for('ui.login', next=request.url))
return f(*args, **kwargs)
return decorated_function
def superadmin_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not session.get('user', {}).get('role', 'member') == 'superadmin':
return redirect(url_for('ui.login', next=request.url))
return f(*args, **kwargs)
return decorated_function
| 29.25 | 77 | 0.660969 | 0 | 0 | 0 | 0 | 444 | 0.632479 | 0 | 0 | 62 | 0.088319 |
269f222cd807eb64aa23f3a0beb347f29492e7b2 | 4,089 | py | Python | dqc/utils/safeops.py | Jaikinator/dqc | 47c964c7d1323a35f4f69521d40476c41843810e | [
"Apache-2.0"
] | 39 | 2021-05-31T17:01:23.000Z | 2022-03-23T19:20:35.000Z | dqc/utils/safeops.py | Jaikinator/dqc | 47c964c7d1323a35f4f69521d40476c41843810e | [
"Apache-2.0"
] | 14 | 2021-09-01T13:39:11.000Z | 2022-03-13T16:45:39.000Z | dqc/utils/safeops.py | Jaikinator/dqc | 47c964c7d1323a35f4f69521d40476c41843810e | [
"Apache-2.0"
] | 6 | 2021-07-16T09:08:29.000Z | 2022-02-24T01:13:54.000Z | import math
import torch
from typing import Union, Optional, Tuple
from dqc.utils.datastruct import ZType
eps = 1e-12
########################## safe operations ##########################
def safepow(a: torch.Tensor, p: torch.Tensor, eps: float = 1e-12) -> torch.Tensor:
if torch.any(a < 0):
raise RuntimeError("safepow only works for positive base")
base = torch.sqrt(a * a + eps * eps) # soft clip
return base ** p
def safenorm(a: torch.Tensor, dim: int, eps: float = 1e-15) -> torch.Tensor:
# calculate the 2-norm safely
return torch.sqrt(torch.sum(a * a + eps * eps, dim=dim))
########################## occupation number gradients ##########################
def occnumber(a: ZType,
n: Optional[int] = None,
dtype: torch.dtype = torch.double,
device: torch.device = torch.device('cpu')) -> torch.Tensor:
# returns the occupation number (maxed at 1) where the total sum of the
# output equals to a with length of the output is n
def _get_floor_and_ceil(aa: Union[int, float]) -> Tuple[int, int]:
# get the ceiling and flooring of aa
if isinstance(aa, int):
ceil_a: int = aa
floor_a: int = aa
else: # floor
ceil_a = int(math.ceil(aa))
floor_a = int(math.floor(aa))
return floor_a, ceil_a
if isinstance(a, torch.Tensor):
assert a.numel() == 1
floor_a, ceil_a = _get_floor_and_ceil(a.item())
else: # int or float
floor_a, ceil_a = _get_floor_and_ceil(a)
# get the length of the tensor output
if n is None:
nlength = ceil_a
else:
nlength = n
assert nlength >= ceil_a, "The length of occupation number must be at least %d" % ceil_a
if isinstance(a, torch.Tensor):
res = _OccNumber.apply(a, floor_a, ceil_a, nlength, dtype, device)
else:
res = _construct_occ_number(a, floor_a, ceil_a, nlength, dtype=dtype, device=device)
return res
def _construct_occ_number(a: float, floor_a: int, ceil_a: int, nlength: int,
dtype: torch.dtype, device: torch.device) -> torch.Tensor:
res = torch.zeros(nlength, dtype=dtype, device=device)
res[:floor_a] = 1
if ceil_a > floor_a:
res[ceil_a - 1] = a - floor_a
return res
class _OccNumber(torch.autograd.Function):
@staticmethod
def forward(ctx, a: torch.Tensor, # type: ignore
floor_a: int, ceil_a: int, nlength: int,
dtype: torch.dtype, device: torch.device) -> torch.Tensor:
res = _construct_occ_number(float(a.item()), floor_a, ceil_a, nlength, dtype=dtype, device=device)
ctx.ceil_a = ceil_a
return res
@staticmethod
def backward(ctx, grad_res: torch.Tensor): # type: ignore
grad_a = grad_res[ctx.ceil_a - 1]
return (grad_a,) + (None,) * 5
########################## other tensor ops ##########################
def safe_cdist(a: torch.Tensor, b: torch.Tensor, add_diag_eps: bool = False,
diag_inf: bool = False):
# returns the L2 pairwise distance of a and b
# a: (*BA, na, ndim)
# b: (*BB, nb, ndim)
# returns: (*BAB, na, nb)
square_mat = a.shape[-2] == b.shape[-2]
dtype = a.dtype
device = a.device
ab = a.unsqueeze(-2) - b.unsqueeze(-3) # (*BAB, na, nb, ndim)
# add the diagonal with a small eps to safeguard from nan
if add_diag_eps:
if not square_mat:
raise ValueError("Enabling add_diag_eps for non-square result matrix is invalid")
ab = ab + torch.eye(ab.shape[-2], dtype=dtype, device=device).unsqueeze(-1) * eps
ab = ab.norm(dim=-1) # (*BAB, na, nb)
# replace the diagonal with infinite (usually used for coulomb matrix)
if diag_inf:
if not square_mat:
raise ValueError("Enabling diag_inf for non-square result matrix is invalid")
infdiag = torch.eye(ab.shape[-1], dtype=dtype, device=device)
idiag = infdiag.diagonal()
idiag[:] = float("inf")
ab = ab + infdiag
return ab
| 36.837838 | 106 | 0.595011 | 563 | 0.137686 | 0 | 0 | 510 | 0.124725 | 0 | 0 | 1,002 | 0.245048 |
269fd1b0bc7030c4e1f6c761faa1320701f6d9dc | 4,713 | py | Python | extra_envs/extra_envs/envs/point.py | Fanshaoliu/safe_rl | 16ab54bebb70a86a80e1bfadb62656afb1547965 | [
"MIT"
] | 13 | 2021-06-19T03:19:36.000Z | 2022-03-29T10:44:37.000Z | extra_envs/extra_envs/envs/point.py | Fanshaoliu/safe_rl | 16ab54bebb70a86a80e1bfadb62656afb1547965 | [
"MIT"
] | 5 | 2021-06-16T20:06:51.000Z | 2021-12-14T22:55:54.000Z | extra_envs/extra_envs/envs/point.py | Fanshaoliu/safe_rl | 16ab54bebb70a86a80e1bfadb62656afb1547965 | [
"MIT"
] | 4 | 2021-11-03T13:30:08.000Z | 2022-01-05T11:16:47.000Z | import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
class PointEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 30}
def __init__(self, mass=1., target_dist=5., xlim=2.5, cost_smoothing=0.):
self.mass = mass
self.dt = 0.1
self.target_dist = target_dist
self.world_width = 1.5*2*target_dist
self.max_speed = 2.
self.lim = np.array([xlim, self.world_width])
high_state = np.array([self.world_width, self.world_width, 1., 1.],
dtype=np.float32)
self.action_space = spaces.Box(low=-1., high=1., shape=(2,), dtype=np.float32)
self.observation_space = spaces.Box(low=-high_state, high=high_state,
dtype=np.float32)
self.reward_range = (-1., 1.)
self.cost_smoothing = cost_smoothing
self.seed()
self.state = None
self.viewer = None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
posn = self.np_random.uniform(low=-0.1, high=0.1, size=2)
self.state = np.concatenate([posn, [0., 0.]]).astype(np.float32)
return np.array(self.state)
def get_state(self):
return np.array(self.state)
def step(self, a):
a = np.squeeze(a)
a = np.clip(a, self.action_space.low[0], self.action_space.high[0])
pos, vel = self.state[:2], self.state[2:]
rew = self.state[-2:].dot([-self.state[1], self.state[0]])
rew /= (1. + np.abs(np.linalg.norm(self.state[:2]) - self.target_dist))
# Normalizing to range [-1, 1]
rew /= self.max_speed*self.target_dist
# State update
pos += vel*self.dt + a*self.dt**2 / (2*self.mass)
vel += a*self.dt/self.mass
# Ensure agent is within reasonable range
vel[np.isclose(vel, 0)] = 0.
# Clip speed, if necessary
speed = np.linalg.norm(self.state[-2:])
if speed > self.max_speed:
self.state[-2:] *= self.max_speed / speed
done = (np.abs(pos) > self.lim).any() # constraint violation
distance = self.dist_to_unsafe()
cost = (float(distance == 0.)
if self.cost_smoothing == 0.
else max(0, 1 - distance/self.cost_smoothing))
info = dict(cost=cost, distance=distance)
return np.array(self.state), rew, done, info
def dist_to_unsafe(self):
return max(0, self.signed_dist_to_unsafe())
def signed_dist_to_unsafe(self):
return min(self.lim[0] - self.state[0], self.lim[0] + self.state[0],
self.lim[1] - self.state[1], self.lim[1] + self.state[1])
def render(self, mode='human'):
viewer_size = 500
center, scale = viewer_size // 2, viewer_size / self.world_width
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(viewer_size, viewer_size)
self.ring_trans = rendering.Transform((viewer_size/2, viewer_size/2))
self.ring = rendering.make_circle(self.target_dist*scale, res=100, filled=False)
self.ring.set_color(0., 0.8, 0.)
self.ring.add_attr(self.ring_trans)
self.viewer.add_geom(self.ring)
self.left_boundary = rendering.Line(start=(center - scale*self.lim[0], 0),
end=(center - scale*self.lim[0],
viewer_size))
self.left_boundary.set_color(0.8, 0., 0.)
self.viewer.add_geom(self.left_boundary)
self.right_boundary = rendering.Line(start=(center + scale*self.lim[0], 0),
end=(center + scale*self.lim[0],
viewer_size))
self.right_boundary.set_color(0.8, 0., 0.)
self.viewer.add_geom(self.right_boundary)
self.agent = rendering.make_circle(scale*0.1, res=100)
self.agent_trans = rendering.Transform((viewer_size/2, viewer_size/2))
self.agent.add_attr(self.agent_trans)
self.viewer.add_geom(self.agent)
if self.state is None:
return None
posn = self.state[:2]
self.agent_trans.set_translation(center + scale*posn[0], center + scale*posn[1])
return self.viewer.render(return_rgb_array=(mode == 'rgb_array'))
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
| 38.317073 | 92 | 0.569064 | 4,628 | 0.981965 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.044133 |
26a2b53d4301d0838e61a2558348848ec6cc08bb | 2,855 | py | Python | src/model/model_basic.py | trungnt13/digisami_journal | 671486d0fe7b65cad80daf8e8b96d475245c5fed | [
"Apache-2.0"
] | null | null | null | src/model/model_basic.py | trungnt13/digisami_journal | 671486d0fe7b65cad80daf8e8b96d475245c5fed | [
"Apache-2.0"
] | null | null | null | src/model/model_basic.py | trungnt13/digisami_journal | 671486d0fe7b65cad80daf8e8b96d475245c5fed | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function, division, absolute_import
from odin import nnet as N, backend as K
import tensorflow as tf
@N.ModelDescriptor
def fnn(X, gender, topic, y, nb_classes):
f = N.Sequence([
N.Flatten(outdim=2),
N.Dense(num_units=1024, b_init=None),
N.BatchNorm(activation=tf.nn.relu),
N.Dense(num_units=1024, b_init=None),
N.BatchNorm(activation=tf.nn.relu),
N.Dense(num_units=512, b_init=None),
N.BatchNorm(activation=tf.nn.relu),
N.Dense(num_units=nb_classes),
], debug=True)
y_logits = f(X)
y_prob = tf.nn.softmax(y_logits)
return {'prob': y_prob, 'logit': y_logits}
@N.ModelDescriptor
def fnngen(X, gender, topic, y, nb_classes):
f = N.Sequence([
N.Flatten(outdim=2),
N.Dense(num_units=1024, b_init=None),
N.BatchNorm(activation=tf.nn.relu),
N.Dense(num_units=1024, b_init=None),
N.BatchNorm(activation=tf.nn.relu),
N.Dense(num_units=512, b_init=None),
N.BatchNorm(activation=tf.nn.relu),
N.Dense(num_units=nb_classes),
], debug=True)
X = tf.concat((X, gender), axis=-1)
y_logits = f(X)
y_prob = tf.nn.softmax(y_logits)
return {'prob': y_prob, 'logit': y_logits}
@N.ModelDescriptor
def fnntpc(X, gender, topic, y, nb_classes):
f = N.Sequence([
N.Flatten(outdim=2),
N.Dense(num_units=1024, b_init=None),
N.BatchNorm(activation=tf.nn.relu),
N.Dense(num_units=1024, b_init=None),
N.BatchNorm(activation=tf.nn.relu),
N.Dense(num_units=512, b_init=None),
N.BatchNorm(activation=tf.nn.relu),
N.Dense(num_units=nb_classes),
], debug=True)
X = tf.concat((X, topic), axis=-1)
y_logits = f(X)
y_prob = tf.nn.softmax(y_logits)
return {'prob': y_prob, 'logit': y_logits}
@N.ModelDescriptor
def fnnall(X, gender, topic, y, nb_classes):
f = N.Sequence([
N.Flatten(outdim=2),
N.Dense(num_units=1024, b_init=None),
N.BatchNorm(activation=tf.nn.relu),
N.Dense(num_units=1024, b_init=None),
N.BatchNorm(activation=tf.nn.relu),
N.Dense(num_units=512, b_init=None),
N.BatchNorm(activation=tf.nn.relu),
N.Dense(num_units=nb_classes),
], debug=True)
X = tf.concat((X, gender, topic), axis=-1)
y_logits = f(X)
y_prob = tf.nn.softmax(y_logits)
return {'prob': y_prob, 'logit': y_logits}
@N.ModelDescriptor
def fnn0(X, gender, topic, y, nb_classes):
f = N.Sequence([
N.Flatten(outdim=2),
N.Dense(num_units=1024, b_init=None),
N.Dense(num_units=1024, b_init=None),
N.Dense(num_units=512, b_init=None),
N.Dense(num_units=nb_classes),
], debug=True)
y_logits = f(X)
y_prob = tf.nn.softmax(y_logits)
return {'prob': y_prob, 'logit': y_logits}
| 31.722222 | 64 | 0.62732 | 0 | 0 | 0 | 0 | 2,709 | 0.948862 | 0 | 0 | 65 | 0.022767 |
26a4b307dfc5e770b6a75ad04eed2871c82ad2a0 | 174 | py | Python | Python/CrashCourse/mongodb-quickstart-course/src/starter_code_snake_bnb/src/infrastructure/state.py | Mujtaba-Biyabani/Research | 211f57812786a151ce8c3b1c3ed379fd1ba4e2f9 | [
"Unlicense"
] | 52 | 2017-12-06T10:44:05.000Z | 2021-11-24T10:44:16.000Z | Python/CrashCourse/mongodb-quickstart-course/src/starter_code_snake_bnb/src/infrastructure/state.py | Mujtaba-Biyabani/Research | 211f57812786a151ce8c3b1c3ed379fd1ba4e2f9 | [
"Unlicense"
] | 8 | 2018-01-29T08:32:34.000Z | 2019-01-02T05:56:51.000Z | Python/CrashCourse/mongodb-quickstart-course/src/starter_code_snake_bnb/src/infrastructure/state.py | Mujtaba-Biyabani/Research | 211f57812786a151ce8c3b1c3ed379fd1ba4e2f9 | [
"Unlicense"
] | 125 | 2018-11-20T07:56:22.000Z | 2022-03-16T06:32:42.000Z | active_account = None
def reload_account():
global active_account
if not active_account:
return
# TODO: pull owner account from the database.
pass
| 15.818182 | 49 | 0.683908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.258621 |
26a6c6bf63ea22f6476519c7dcbec3b7eb479136 | 550 | py | Python | generators/activerecord/templates/models/abstract_model.py | afeiship/gx | 9a3b566f64147eb1b6ffc753e2ebdf676f8ddef6 | [
"MIT"
] | null | null | null | generators/activerecord/templates/models/abstract_model.py | afeiship/gx | 9a3b566f64147eb1b6ffc753e2ebdf676f8ddef6 | [
"MIT"
] | null | null | null | generators/activerecord/templates/models/abstract_model.py | afeiship/gx | 9a3b566f64147eb1b6ffc753e2ebdf676f8ddef6 | [
"MIT"
] | null | null | null | from orator import Model
import pendulum
class AbstractModel(Model):
__guarded__ = []
@classmethod
def find_or_new_by(cls, options):
entity = cls.find_by(options)
if not entity:
entity = cls()
for k in options:
v = options[k]
setattr(entity, k, v)
return entity
@classmethod
def find_by(cls, options):
return cls.where(options).first()
# normalize timezone
def fresh_timestamp(self):
return pendulum.now('Asia/Shanghai')
| 22 | 44 | 0.589091 | 506 | 0.92 | 0 | 0 | 344 | 0.625455 | 0 | 0 | 35 | 0.063636 |
26a919609f85889cbb4dc125397fcb5ff64f815f | 220 | py | Python | blank/build.py | davidbelliott/whitgl | bfa1a33b0059b722a773e513653ff802c8dc7d6f | [
"MIT"
] | 26 | 2016-01-09T16:46:53.000Z | 2021-05-17T02:41:13.000Z | blank/build.py | davidbelliott/whitgl | bfa1a33b0059b722a773e513653ff802c8dc7d6f | [
"MIT"
] | null | null | null | blank/build.py | davidbelliott/whitgl | bfa1a33b0059b722a773e513653ff802c8dc7d6f | [
"MIT"
] | 3 | 2016-02-19T06:05:19.000Z | 2021-03-10T02:19:35.000Z | import os
import sys
joinp = os.path.join
sys.path.insert(0, 'whitgl')
sys.path.insert(0, joinp('whitgl', 'input'))
import build
sys.path.insert(0, 'input')
import ninja_syntax
build.do_game('Game', '', ['png','ogg'])
| 18.333333 | 44 | 0.690909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.218182 |
26aabfb0114adf3aa767a0e26c7a937d741efc5e | 9,018 | py | Python | factom_core/blocks/entry_credit_block.py | sourcery-ai-bot/factom-core | 186dca550d98d758e9f8dab878e6382153efeaf3 | [
"MIT"
] | null | null | null | factom_core/blocks/entry_credit_block.py | sourcery-ai-bot/factom-core | 186dca550d98d758e9f8dab878e6382153efeaf3 | [
"MIT"
] | null | null | null | factom_core/blocks/entry_credit_block.py | sourcery-ai-bot/factom-core | 186dca550d98d758e9f8dab878e6382153efeaf3 | [
"MIT"
] | null | null | null | import hashlib
import struct
from dataclasses import dataclass, field
from typing import Dict, List, Union
from factom_core.block_elements.balance_increase import BalanceIncrease
from factom_core.block_elements.chain_commit import ChainCommit
from factom_core.block_elements.entry_commit import EntryCommit
from factom_core.utils import varint
from .directory_block import DirectoryBlock
ECIDTypes = Union[ChainCommit, EntryCommit, int]
@dataclass
class EntryCreditBlockHeader:
CHAIN_ID = bytes.fromhex("000000000000000000000000000000000000000000000000000000000000000c")
body_hash: bytes
prev_header_hash: bytes
prev_full_hash: bytes
height: int
expansion_area: bytes
object_count: int
body_size: int
def __post_init__(self):
# TODO: value assertions
pass
def marshal(self) -> bytes:
buf = bytearray()
buf.extend(EntryCreditBlockHeader.CHAIN_ID)
buf.extend(self.body_hash)
buf.extend(self.prev_header_hash)
buf.extend(self.prev_full_hash)
buf.extend(struct.pack(">I", self.height))
buf.extend(varint.encode(len(self.expansion_area)))
buf.extend(self.expansion_area)
buf.extend(struct.pack(">Q", self.object_count))
buf.extend(struct.pack(">Q", self.body_size))
return bytes(buf)
@classmethod
def unmarshal(cls, raw: bytes):
h, data = EntryCreditBlockHeader.unmarshal_with_remainder(raw)
assert len(data) == 0, "Extra bytes remaining!"
return h
@classmethod
def unmarshal_with_remainder(cls, raw: bytes):
chain_id, data = raw[:32], raw[32:]
assert chain_id == EntryCreditBlockHeader.CHAIN_ID
body_hash, data = data[:32], data[32:]
prev_header_hash, data = data[:32], data[32:]
prev_full_hash, data = data[:32], data[32:]
height, data = struct.unpack(">I", data[:4])[0], data[4:]
header_expansion_size, data = varint.decode(data)
header_expansion_area, data = (
data[:header_expansion_size],
data[header_expansion_size:],
)
object_count, data = struct.unpack(">Q", data[:8])[0], data[8:]
body_size, data = struct.unpack(">Q", data[:8])[0], data[8:]
return (
EntryCreditBlockHeader(
body_hash=body_hash,
prev_header_hash=prev_header_hash,
prev_full_hash=prev_full_hash,
height=height,
expansion_area=header_expansion_area,
object_count=object_count,
body_size=body_size,
),
data,
)
@dataclass
class EntryCreditBlockBody:
objects: Dict[int, List[ECIDTypes]] = field(default_factory=dict)
def __post_init__(self):
# TODO: value assertions
pass
def marshal(self):
buf = bytearray()
for minute, objects in self.objects.items():
for o in objects:
if isinstance(o, int):
buf.append(0x00)
buf.append(o)
elif isinstance(o, ChainCommit):
buf.append(ChainCommit.ECID)
buf.extend(o.marshal())
elif isinstance(o, EntryCommit):
buf.append(EntryCommit.ECID)
buf.extend(o.marshal())
elif isinstance(o, BalanceIncrease):
buf.append(BalanceIncrease.ECID)
buf.extend(o.marshal())
else:
raise ValueError("Invalid ECID type!")
buf.append(0x01)
buf.append(minute)
return bytes(buf)
@classmethod
def unmarshal(cls, raw: bytes, object_count: int):
body, data = cls.unmarshal_with_remainder(raw, object_count)
assert len(data) == 0, "Extra bytes remaining!"
return body
@classmethod
def unmarshal_with_remainder(cls, raw: bytes, object_count: int):
data = raw
objects = {} # map of minute --> objects array
current_minute_objects = []
for _ in range(object_count):
ecid, data = data[0], data[1:]
if ecid == 0x00:
server_index, data = data[0], data[1:]
current_minute_objects.append(server_index)
elif ecid == 0x01:
minute, data = data[0], data[1:]
objects[minute] = current_minute_objects
current_minute_objects = []
elif ecid == ChainCommit.ECID:
chain_commit, data = (
data[: ChainCommit.BITLENGTH],
data[ChainCommit.BITLENGTH :],
)
chain_commit = ChainCommit.unmarshal(chain_commit)
current_minute_objects.append(chain_commit)
elif ecid == EntryCommit.ECID:
entry_commit, data = (
data[: EntryCommit.BITLENGTH],
data[EntryCommit.BITLENGTH :],
)
entry_commit = EntryCommit.unmarshal(entry_commit)
current_minute_objects.append(entry_commit)
elif ecid == BalanceIncrease.ECID:
balance_increase, data = BalanceIncrease.unmarshal_with_remainder(data)
current_minute_objects.append(balance_increase)
else:
raise ValueError
return EntryCreditBlockBody(objects=objects), data
def construct_header(self, prev_header_hash: bytes, prev_full_hash: bytes, height: int) -> EntryCreditBlockHeader:
object_count = 0
for object_list in self.objects.values():
object_count += len(object_list) + 1
marshalled_body = self.marshal()
return EntryCreditBlockHeader(
body_hash=hashlib.sha256(marshalled_body).digest(),
prev_header_hash=prev_header_hash,
prev_full_hash=prev_full_hash,
height=height,
expansion_area=b"",
object_count=object_count,
body_size=len(marshalled_body),
)
@dataclass
class EntryCreditBlock:
header: EntryCreditBlockHeader
body: EntryCreditBlockBody
_cached_header_hash: bytes = None
def __post_init__(self):
# TODO: value assertions
pass
@property
def header_hash(self):
if self._cached_header_hash is not None:
return self._cached_header_hash
self._cached_header_hash = hashlib.sha256(self.header.marshal()).digest()
return self._cached_header_hash
@property
def full_hash(self):
return hashlib.sha256(self.marshal()).digest()
def marshal(self):
"""Marshals the directory block according to the byte-level representation shown at
https://github.com/FactomProject/FactomDocs/blob/master/factomDataStructureDetails.md#entry-credit-block
Data returned does not include contextual metadata, such as timestamp or the pointer to the
next entry-credit block.
"""
buf = bytearray()
buf.extend(self.header.marshal())
buf.extend(self.body.marshal())
return bytes(buf)
@classmethod
def unmarshal(cls, raw: bytes):
"""Returns a new EntryCreditBlock object, unmarshalling given bytes according to:
https://github.com/FactomProject/FactomDocs/blob/master/factomDataStructureDetails.md#entry-credit-block
Useful for working with a single ecblock out of context, pulled directly from a factomd database for instance.
EntryCreditBlock created will not include contextual metadata, such as timestamp or the pointer to the
next entry-credit block.
"""
block, data = cls.unmarshal_with_remainder(raw)
assert len(data) == 0, "Extra bytes remaining!"
return block
@classmethod
def unmarshal_with_remainder(cls, raw: bytes):
header, data = EntryCreditBlockHeader.unmarshal_with_remainder(raw)
body, data = EntryCreditBlockBody.unmarshal_with_remainder(data, header.object_count)
return EntryCreditBlock(header=header, body=body), data
def add_context(self, directory_block: DirectoryBlock):
pass
def to_dict(self):
return {
"header_hash": self.header_hash.hex(),
"body_hash": self.header.body_hash.hex(),
"prev_header_hash": self.header.prev_header_hash.hex(),
"prev_full_hash": self.header.prev_full_hash.hex(),
"height": self.header.height,
"expansion_area": self.header.expansion_area.hex(),
"object_count": self.header.object_count,
"body_size": self.header.body_size,
"objects": {
minute: [o if type(o) is int else o.to_dict() for o in objects]
for minute, objects in self.body.objects.items()
},
}
def __str__(self):
return "{}(height={})".format(self.__class__.__name__, self.header.height)
| 36.216867 | 118 | 0.622422 | 8,536 | 0.946551 | 0 | 0 | 8,569 | 0.950211 | 0 | 0 | 1,234 | 0.136837 |
26aacebd7375e02a1085b024169371e35da36aab | 59 | py | Python | project1/tests/q0.py | ucsb-int5/int5-f19-notebooks | 5b3d1ee6964d9357f211f4706787403ec5a3079c | [
"MIT"
] | 1 | 2019-10-02T21:43:12.000Z | 2019-10-02T21:43:12.000Z | project1/tests/q0.py | ucsb-int5/int5-f19-notebooks | 5b3d1ee6964d9357f211f4706787403ec5a3079c | [
"MIT"
] | 3 | 2019-12-14T06:20:14.000Z | 2019-12-14T07:12:33.000Z | project1/tests/q0.py | ucsb-int5/int5-f19-notebooks | 5b3d1ee6964d9357f211f4706787403ec5a3079c | [
"MIT"
] | 3 | 2019-10-02T18:36:06.000Z | 2019-12-03T18:16:45.000Z | test = { 'name': 'q0',
'points': 0,
'suites': []} | 19.666667 | 24 | 0.389831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.440678 |
26ab3cad8e013a6c3ea0f8a7d1ac3f6bf89a579f | 24,208 | py | Python | tests/core/inst_utils.py | cornell-brg/lizard | 7f9a78a913e64b5cfdee3a26223539ad225bd6da | [
"BSD-3-Clause"
] | 50 | 2019-05-22T08:43:15.000Z | 2022-03-21T23:58:50.000Z | tests/core/inst_utils.py | cornell-brg/lizard | 7f9a78a913e64b5cfdee3a26223539ad225bd6da | [
"BSD-3-Clause"
] | 1 | 2019-07-27T18:51:52.000Z | 2019-08-02T01:20:22.000Z | tests/core/inst_utils.py | cornell-brg/lizard | 7f9a78a913e64b5cfdee3a26223539ad225bd6da | [
"BSD-3-Clause"
] | 11 | 2019-12-26T06:00:48.000Z | 2022-03-27T02:29:35.000Z | #=========================================================================
# inst_utils
#=========================================================================
# Includes helper functions to simplify creating assembly tests.
from pymtl import *
from tests.context import lizard
#-------------------------------------------------------------------------
# print_asm
#-------------------------------------------------------------------------
# Pretty print a generated assembly syntax
def print_asm(asm_code):
# If asm_code is a single string, then put it in a list to simplify the
# rest of the logic.
asm_code_list = asm_code
if isinstance(asm_code, str):
asm_code_list = [asm_code]
# Create a single list of lines
asm_list = []
for asm_seq in asm_code_list:
asm_list.extend(asm_seq.splitlines())
# Print the assembly. Remove duplicate blank lines.
prev_blank_line = False
for asm in asm_list:
if asm.strip() == "":
if not prev_blank_line:
print asm
prev_blank_line = True
else:
prev_blank_line = False
print asm
#-------------------------------------------------------------------------
# gen_nops
#-------------------------------------------------------------------------
def gen_nops(num_nops):
if num_nops > 0:
return "nop\n" + (" nop\n" * (num_nops - 1))
else:
return ""
#-------------------------------------------------------------------------
# gen_word_data
#-------------------------------------------------------------------------
def gen_word_data(data_list):
data_str = ".data\n"
for data in data_list:
data_str += ".word {}\n".format(data)
return data_str
#-------------------------------------------------------------------------
# gen_hword_data
#-------------------------------------------------------------------------
def gen_hword_data(data_list):
data_str = ".data\n"
for data in data_list:
data_str += ".hword {}\n".format(data)
return data_str
#-------------------------------------------------------------------------
# gen_byte_data
#-------------------------------------------------------------------------
def gen_byte_data(data_list):
data_str = ".data\n"
for data in data_list:
data_str += ".byte {}\n".format(data)
return data_str
#-------------------------------------------------------------------------
# gen_rr_src01_template
#-------------------------------------------------------------------------
# Template for register-register instructions. We first write src0
# register and then write the src1 register before executing the
# instruction under test. We parameterize the number of nops after
# writing both src registers and the instruction under test to enable
# using this template for testing various bypass paths. We also
# parameterize the register specifiers to enable using this template to
# test situations where the srce registers are equal and/or equal the
# destination register.
def gen_rr_src01_template(num_nops_src0, num_nops_src1, num_nops_dest, reg_src0,
reg_src1, inst, src0, src1, result):
return """
# Move src0 value into register
csrr {reg_src0}, mngr2proc < {src0}
{nops_src0}
# Move src1 value into register
csrr {reg_src1}, mngr2proc < {src1}
{nops_src1}
# Instruction under test
{inst} x3, {reg_src0}, {reg_src1}
{nops_dest}
# Check the result
csrw proc2mngr, x3 > {result}
""".format(
nops_src0=gen_nops(num_nops_src0),
nops_src1=gen_nops(num_nops_src1),
nops_dest=gen_nops(num_nops_dest),
**locals())
#-------------------------------------------------------------------------
# gen_rr_src10_template
#-------------------------------------------------------------------------
# Similar to the above template, except that we reverse the order in
# which we write the two src registers.
def gen_rr_src10_template(num_nops_src0, num_nops_src1, num_nops_dest, reg_src0,
reg_src1, inst, src0, src1, result):
return """
# Move src1 value into register
csrr {reg_src1}, mngr2proc < {src1}
{nops_src1}
# Move src0 value into register
csrr {reg_src0}, mngr2proc < {src0}
{nops_src0}
# Instruction under test
{inst} x3, {reg_src0}, {reg_src1}
{nops_dest}
# Check the result
csrw proc2mngr, x3 > {result}
""".format(
nops_src0=gen_nops(num_nops_src0),
nops_src1=gen_nops(num_nops_src1),
nops_dest=gen_nops(num_nops_dest),
**locals())
#-------------------------------------------------------------------------
# gen_rr_dest_dep_test
#-------------------------------------------------------------------------
# Test the destination bypass path by varying how many nops are
# inserted between the instruction under test and reading the destination
# register with a csrr instruction.
def gen_rr_dest_dep_test(num_nops, inst, src0, src1, result):
return gen_rr_src01_template(0, 8, num_nops, "x1", "x2", inst, src0, src1,
result)
#-------------------------------------------------------------------------
# gen_rr_src1_dep_test
#-------------------------------------------------------------------------
# Test the source 1 bypass paths by varying how many nops are inserted
# between writing the src1 register and reading this register in the
# instruction under test.
def gen_rr_src1_dep_test(num_nops, inst, src0, src1, result):
return gen_rr_src01_template(8 - num_nops, num_nops, 0, "x1", "x2", inst,
src0, src1, result)
#-------------------------------------------------------------------------
# gen_rr_src0_dep_test
#-------------------------------------------------------------------------
# Test the source 0 bypass paths by varying how many nops are inserted
# between writing the src0 register and reading this register in the
# instruction under test.
def gen_rr_src0_dep_test(num_nops, inst, src0, src1, result):
return gen_rr_src10_template(num_nops, 8 - num_nops, 0, "x1", "x2", inst,
src0, src1, result)
#-------------------------------------------------------------------------
# gen_rr_srcs_dep_test
#-------------------------------------------------------------------------
# Test both source bypass paths at the same time by varying how many nops
# are inserted between writing both src registers and reading both
# registers in the instruction under test.
def gen_rr_srcs_dep_test(num_nops, inst, src0, src1, result):
return gen_rr_src01_template(0, num_nops, 0, "x1", "x2", inst, src0, src1,
result)
#-------------------------------------------------------------------------
# gen_rr_src0_eq_dest_test
#-------------------------------------------------------------------------
# Test situation where the src0 register specifier is the same as the
# destination register specifier.
def gen_rr_src0_eq_dest_test(inst, src0, src1, result):
return gen_rr_src01_template(0, 0, 0, "x3", "x2", inst, src0, src1, result)
#-------------------------------------------------------------------------
# gen_rr_src1_eq_dest_test
#-------------------------------------------------------------------------
# Test situation where the src1 register specifier is the same as the
# destination register specifier.
def gen_rr_src1_eq_dest_test(inst, src0, src1, result):
return gen_rr_src01_template(0, 0, 0, "x1", "x3", inst, src0, src1, result)
#-------------------------------------------------------------------------
# gen_rr_src0_eq_src1_test
#-------------------------------------------------------------------------
# Test situation where the src register specifiers are the same.
def gen_rr_src0_eq_src1_test(inst, src, result):
return gen_rr_src01_template(0, 0, 0, "x1", "x1", inst, src, src, result)
#-------------------------------------------------------------------------
# gen_rr_srcs_eq_dest_test
#-------------------------------------------------------------------------
# Test situation where all three register specifiers are the same.
def gen_rr_srcs_eq_dest_test(inst, src, result):
return gen_rr_src01_template(0, 0, 0, "x3", "x3", inst, src, src, result)
#-------------------------------------------------------------------------
# gen_rr_value_test
#-------------------------------------------------------------------------
# Test the actual operation of a register-register instruction under
# test. We assume that bypassing has already been tested.
def gen_rr_value_test(inst, src0, src1, result):
return gen_rr_src01_template(0, 0, 0, "x1", "x2", inst, src0, src1, result)
#-------------------------------------------------------------------------
# gen_rimm_template
#-------------------------------------------------------------------------
# Template for register-immediate instructions. We first write the src
# register before executing the instruction under test. We parameterize
# the number of nops after writing the src register and the instruction
# under test to enable using this template for testing various bypass
# paths. We also parameterize the register specifiers to enable using
# this template to test situations where the srce registers are equal
# and/or equal the destination register.
def gen_rimm_template(num_nops_src, num_nops_dest, reg_src, inst, src, imm,
result):
return """
# Move src value into register
csrr {reg_src}, mngr2proc < {src}
{nops_src}
# Instruction under test
{inst} x3, {reg_src}, {imm}
{nops_dest}
# Check the result
csrw proc2mngr, x3 > {result}
""".format(
nops_src=gen_nops(num_nops_src),
nops_dest=gen_nops(num_nops_dest),
**locals())
#-------------------------------------------------------------------------
# gen_rimm_dest_dep_test
#-------------------------------------------------------------------------
# Test the destination bypass path by varying how many nops are
# inserted between the instruction under test and reading the destination
# register with a csrr instruction.
def gen_rimm_dest_dep_test(num_nops, inst, src, imm, result):
return gen_rimm_template(8, num_nops, "x1", inst, src, imm, result)
#-------------------------------------------------------------------------
# gen_rimm_src_dep_test
#-------------------------------------------------------------------------
# Test the source bypass paths by varying how many nops are inserted
# between writing the src register and reading this register in the
# instruction under test.
def gen_rimm_src_dep_test(num_nops, inst, src, imm, result):
return gen_rimm_template(num_nops, 0, "x1", inst, src, imm, result)
#-------------------------------------------------------------------------
# gen_rimm_src_eq_dest_test
#-------------------------------------------------------------------------
# Test situation where the src register specifier is the same as the
# destination register specifier.
def gen_rimm_src_eq_dest_test(inst, src, imm, result):
return gen_rimm_template(0, 0, "x3", inst, src, imm, result)
#-------------------------------------------------------------------------
# gen_rimm_value_test
#-------------------------------------------------------------------------
# Test the actual operation of a register-immediate instruction under
# test. We assume that bypassing has already been tested.
def gen_rimm_value_test(inst, src, imm, result):
return gen_rimm_template(0, 0, "x1", inst, src, imm, result)
#-------------------------------------------------------------------------
# gen_imm_template
#-------------------------------------------------------------------------
# Template for immediate instructions. We parameterize the number of nops
# after the instruction under test to enable using this template for
# testing various bypass paths.
def gen_imm_template(num_nops_dest, inst, imm, result):
return """
# Instruction under test
{inst} x3, {imm}
{nops_dest}
# Check the result
csrw proc2mngr, x3 > {result}
""".format(
nops_dest=gen_nops(num_nops_dest), **locals())
#-------------------------------------------------------------------------
# gen_imm_dest_dep_test
#-------------------------------------------------------------------------
# Test the destination bypass path by varying how many nops are
# inserted between the instruction under test and reading the destination
# register with a csrr instruction.
def gen_imm_dest_dep_test(num_nops, inst, imm, result):
return gen_imm_template(num_nops, inst, imm, result)
#-------------------------------------------------------------------------
# gen_imm_value_test
#-------------------------------------------------------------------------
# Test the actual operation of an immediate instruction under test. We
# assume that bypassing has already been tested.
def gen_imm_value_test(inst, imm, result):
return gen_imm_template(0, inst, imm, result)
#-------------------------------------------------------------------------
# gen_br2_template
#-------------------------------------------------------------------------
# Template for branch instructions with two sources. We test two forward
# branches and one backwards branch. The way we actually do the test is
# we update a register to reflect the control flow; certain bits in this
# register are set at different points in the program. Then we can check
# the control flow bits at the end to see if only the bits we expect are
# set (i.e., the program only executed those points that we expect). Note
# that test also makes sure that the instruction in the branch delay slot
# is _not_ executed.
# We currently need the id to create labels unique to this test. We might
# eventually allow local labels (e.g., 1f, 1b) as in gas.
gen_br2_template_id = 0
def gen_br2_template(num_nops_src0, num_nops_src1, reg_src0, reg_src1, inst,
src0, src1, taken):
# Determine the expected control flow pattern
if taken:
control_flow_pattern = 0b101010
else:
control_flow_pattern = 0b111111
# Create unique labels
global gen_br2_template_id
id_a = "label_{}".format(gen_br2_template_id + 1)
id_b = "label_{}".format(gen_br2_template_id + 2)
id_c = "label_{}".format(gen_br2_template_id + 3)
gen_br2_template_id += 3
return """
# x3 will track the control flow pattern
addi x3, x0, 0
# Move src0 value into register
csrr {reg_src0}, mngr2proc < {src0}
{nops_src0}
# Move src1 value into register
csrr {reg_src1}, mngr2proc < {src1}
{nops_src1}
{inst} {reg_src0}, {reg_src1}, {id_a} # br -.
addi x3, x3, 0b000001 # |
# |
{id_b}: # <---+-.
addi x3, x3, 0b000010 # | |
# | |
{inst} {reg_src0}, {reg_src1}, {id_c} # br -+-+-.
addi x3, x3, 0b000100 # | | |
# | | |
{id_a}: # <---' | |
addi x3, x3, 0b001000 # | |
# | |
{inst} {reg_src0}, {reg_src1}, {id_b} # br ---' |
addi x3, x3, 0b010000 # |
# |
{id_c}: # <-------'
addi x3, x3, 0b100000 #
# Check the control flow pattern
csrw proc2mngr, x3 > {control_flow_pattern}
""".format(
nops_src0=gen_nops(num_nops_src0),
nops_src1=gen_nops(num_nops_src1),
**locals())
#-------------------------------------------------------------------------
# gen_br2_src1_dep_test
#-------------------------------------------------------------------------
# Test the source 1 bypass paths by varying how many nops are inserted
# between writing the src1 register and reading this register in the
# instruction under test.
def gen_br2_src1_dep_test(num_nops, inst, src0, src1, taken):
return gen_br2_template(8 - num_nops, num_nops, "x1", "x2", inst, src0, src1,
taken)
#-------------------------------------------------------------------------
# gen_br2_src0_dep_test
#-------------------------------------------------------------------------
# Test the source 0 bypass paths by varying how many nops are inserted
# between writing the src0 register and reading this register in the
# instruction under test.
def gen_br2_src0_dep_test(num_nops, inst, src0, src1, result):
return gen_br2_template(num_nops, 0, "x1", "x2", inst, src0, src1, result)
#-------------------------------------------------------------------------
# gen_br2_srcs_dep_test
#-------------------------------------------------------------------------
# Test both source bypass paths at the same time by varying how many nops
# are inserted between writing both src registers and reading both
# registers in the instruction under test.
def gen_br2_srcs_dep_test(num_nops, inst, src0, src1, result):
return gen_br2_template(0, num_nops, "x1", "x2", inst, src0, src1, result)
#-------------------------------------------------------------------------
# gen_br2_src0_eq_src1_test
#-------------------------------------------------------------------------
# Test situation where the src register specifiers are the same.
def gen_br2_src0_eq_src1_test(inst, src, result):
return gen_br2_template(0, 0, "x1", "x1", inst, src, src, result)
#-------------------------------------------------------------------------
# gen_br2_value_test
#-------------------------------------------------------------------------
# Test the correct branch resolution based on various source values.
def gen_br2_value_test(inst, src0, src1, taken):
return gen_br2_template(0, 0, "x1", "x2", inst, src0, src1, taken)
# ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
gen_jal_template_id = 0
def gen_jal_dest_dep_test(num_nops_dest):
global gen_jal_template_id
id_a = "label_{}".format(gen_jal_template_id + 1)
gen_jal_template_id += 1
return """
# Use r3 to track the control flow pattern
addi x3, x0, 0 # 0x0200
jal x1, {id_a} # 0x0204
addi x3, x3, 0b01 # 0x0208
{nops_dest}
{id_a}:
# Check the link address
csrw proc2mngr, x1 > 0x0208
addi x3, x3, 0b10
# Only the second bit should be set if jump was taken
csrw proc2mngr, x3 > 0b10
""".format(
nops_dest=gen_nops(num_nops_dest), **locals())
#-------------------------------------------------------------------------
# gen_ld_template
#-------------------------------------------------------------------------
# Template for load instructions. We first write the base register before
# executing the instruction under test. We parameterize the number of
# nops after writing the base register and the instruction under test to
# enable using this template for testing various bypass paths. We also
# parameterize the register specifiers to enable using this template to
# test situations where the base register is equal to the destination
# register.
def gen_ld_template(num_nops_base, num_nops_dest, reg_base, inst, offset, base,
result):
return """
# Move base value into register
csrr {reg_base}, mngr2proc < {base}
{nops_base}
# Instruction under test
{inst} x3, {offset}({reg_base})
{nops_dest}
# Check the result
csrw proc2mngr, x3 > {result}
""".format(
nops_base=gen_nops(num_nops_base),
nops_dest=gen_nops(num_nops_dest),
**locals())
#-------------------------------------------------------------------------
# gen_ld_dest_dep_test
#-------------------------------------------------------------------------
# Test the destination bypass path by varying how many nops are
# inserted between the instruction under test and reading the destination
# register with a csrr instruction.
def gen_ld_dest_dep_test(num_nops, inst, base, result):
return gen_ld_template(8, num_nops, "x1", inst, 0, base, result)
#-------------------------------------------------------------------------
# gen_ld_base_dep_test
#-------------------------------------------------------------------------
# Test the base register bypass paths by varying how many nops are
# inserted between writing the base register and reading this register in
# the instruction under test.
def gen_ld_base_dep_test(num_nops, inst, base, result):
return gen_ld_template(num_nops, 0, "x1", inst, 0, base, result)
#-------------------------------------------------------------------------
# gen_ld_base_eq_dest_test
#-------------------------------------------------------------------------
# Test situation where the base register specifier is the same as the
# destination register specifier.
def gen_ld_base_eq_dest_test(inst, base, result):
return gen_ld_template(0, 0, "x3", inst, 0, base, result)
#-------------------------------------------------------------------------
# gen_ld_value_test
#-------------------------------------------------------------------------
# Test the actual operation of a register-register instruction under
# test. We assume that bypassing has already been tested.
def gen_ld_value_test(inst, offset, base, result):
return gen_ld_template(0, 0, "x1", inst, offset, base, result)
# ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def sum_i(i):
if i <= 0:
return 0
ret = 0
for j in range(0, i + 1):
ret += j
return ret
#-------------------------------------------------------------------------
# gen_st_template
#-------------------------------------------------------------------------
# Template for load instructions. We first write the base register before
# executing the instruction under test. We parameterize the number of
# nops after writing the base register and the instruction under test to
# enable using this template for testing various bypass paths. We also
# parameterize the register specifiers to enable using this template to
# test situations where the base register is equal to the destination
# register.
def gen_st_base_data_template(num_nops_data, num_nops_dest, reg_base, reg_data,
inst, offset, base, data):
return """
# Move base value into register
csrr {reg_base}, mngr2proc < {base}
csrr {reg_data}, mngr2proc < {data}
{nops_data}
# Instruction under test
{inst} {reg_data}, {offset}({reg_base})
{nops_dest}
# Check the result
lwu x3, {offset}({reg_base})
csrw proc2mngr, x3 > {data}
""".format(
nops_data=gen_nops(num_nops_data),
nops_dest=gen_nops(num_nops_dest),
**locals())
def gen_st_data_base_template(num_nops_base, num_nops_dest, reg_base, reg_data,
inst, offset, base, data):
return """
# Move base value into register
csrr {reg_data}, mngr2proc < {data}
csrr {reg_base}, mngr2proc < {base}
{nops_base}
# Instruction under test
{inst} {reg_data}, {offset}({reg_base})
{nops_dest}
# Check the result
lwu x3, {offset}({reg_base})
csrw proc2mngr, x3 > {data}
""".format(
nops_base=gen_nops(num_nops_base),
nops_dest=gen_nops(num_nops_dest),
**locals())
# test dependency in load of same address as store
def gen_st_dest_dep_test(num_nops, inst, base, data):
return gen_st_base_data_template(8, num_nops, "x1", "x2", inst, 0, base, data)
def gen_st_base_dep_test(num_nops, inst, base, data):
return gen_st_data_base_template(num_nops, 8, "x1", "x2", inst, 0, base, data)
def gen_st_data_dep_test(num_nops, inst, base, data):
return gen_st_base_data_template(num_nops, 8, "x1", "x2", inst, 0, base, data)
def gen_st_base_eq_data_test(inst, base, data):
return gen_st_base_data_template(0, 0, "x1", "x1", inst, 0, base, data)
def gen_st_value_test(inst, offset, base, data):
return gen_st_base_data_template(0, 0, "x1", "x2", inst, offset, base, data)
| 33.207133 | 80 | 0.527801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16,601 | 0.685765 |
26ab7ad4f1a2c561a6b79626d7ff5c41db51de8f | 365 | py | Python | python/concepts/compile-runtime.py | shanavas786/coding-fu | 2f29781ab793b5932a951259afffbbe0078e8a8a | [
"CC0-1.0"
] | 1 | 2021-02-12T03:49:13.000Z | 2021-02-12T03:49:13.000Z | python/concepts/compile-runtime.py | shanavas786/coding-fu | 2f29781ab793b5932a951259afffbbe0078e8a8a | [
"CC0-1.0"
] | null | null | null | python/concepts/compile-runtime.py | shanavas786/coding-fu | 2f29781ab793b5932a951259afffbbe0078e8a8a | [
"CC0-1.0"
] | null | null | null | def func(arg1, arg2=dict()):
print('entering func')
# arg3 is evaluated at compile time of inner
# so it capture arg3 as {}
def inner(arg3=arg2):
# arg1 is evaluted when inner is called
# so it uses the value of arg1 at that time
# whic is None
print("arg1", arg1, "arg3", arg3)
arg1 = arg2 = None
return inner
inn = func(1)
inn()
| 20.277778 | 47 | 0.635616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.528767 |
26ac0f2a19c349ef5a8b08d5da941091d8465553 | 487 | py | Python | alpinelib/aws/aws_lambda.py | nbcnews/alpinelib | 8e0d065611b69fdc431ca30ca1a257516670bcf9 | [
"MIT"
] | null | null | null | alpinelib/aws/aws_lambda.py | nbcnews/alpinelib | 8e0d065611b69fdc431ca30ca1a257516670bcf9 | [
"MIT"
] | null | null | null | alpinelib/aws/aws_lambda.py | nbcnews/alpinelib | 8e0d065611b69fdc431ca30ca1a257516670bcf9 | [
"MIT"
] | null | null | null | import boto3
from .. import logging
logger = logging.getFormattedLogger()
lambda_client = boto3.client('lambda', region_name='us-west-2')
def invoke(function_name, message):
try:
response = lambda_client.invoke(
FunctionName=function_name,
InvocationType='Event',
Payload=message
)
return response
except Exception as e:
logger.exception("Failed to invoke lambda {}.".format(function_name))
raise e
| 24.35 | 77 | 0.648871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.112936 |
26ad9a93696193c618815ae5d8967a74a464da8c | 766 | py | Python | test/test_lazy.py | sixty-north/python-transducers | 575357e3a17ff3b4c757967afd396bf0ea042c08 | [
"MIT"
] | 54 | 2015-10-02T02:45:36.000Z | 2021-06-22T04:40:33.000Z | test/test_lazy.py | sixty-north/python-transducers | 575357e3a17ff3b4c757967afd396bf0ea042c08 | [
"MIT"
] | 3 | 2017-06-11T13:39:18.000Z | 2017-06-12T06:07:24.000Z | test/test_lazy.py | sixty-north/python-transducers | 575357e3a17ff3b4c757967afd396bf0ea042c08 | [
"MIT"
] | 9 | 2015-10-28T23:36:50.000Z | 2019-01-11T13:47:05.000Z | import unittest
from transducer.functional import compose
from transducer.lazy import transduce
from transducer.transducers import (mapping, filtering, taking, dropping_while, distinct)
class TestComposedTransducers(unittest.TestCase):
def test_chained_transducers(self):
result = transduce(transducer=compose(
mapping(lambda x: x*x),
filtering(lambda x: x % 5 != 0),
taking(6),
dropping_while(lambda x: x < 15),
distinct()),
iterable=range(20))
expected = [16, 36, 49]
for r, e in zip(result, expected):
self.assertEqual(r, e)
if __name__ == '__main__':
unittest.main()
| 31.916667 | 89 | 0.574413 | 529 | 0.690601 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.013055 |
26adf150baad599be77596f054bbe7e015db202c | 2,246 | py | Python | cmake_pc_hooks/cppcheck.py | Takishima/cmake-pre-commit-hooks | a6d96865602f68f413f7f368aa1dbbb8bf495109 | [
"Apache-2.0"
] | 2 | 2021-08-10T21:48:05.000Z | 2022-02-28T11:46:51.000Z | cmake_pc_hooks/cppcheck.py | Takishima/cmake-pre-commit-hooks | a6d96865602f68f413f7f368aa1dbbb8bf495109 | [
"Apache-2.0"
] | null | null | null | cmake_pc_hooks/cppcheck.py | Takishima/cmake-pre-commit-hooks | a6d96865602f68f413f7f368aa1dbbb8bf495109 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2021 Damien Nguyen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper script for cppcheck."""
import sys
from pathlib import Path
from ._utils import Command
class CppcheckCmd(Command):
"""Class for the cppcheck command."""
command = "cppcheck"
lookbehind = "Cppcheck "
def __init__(self, args):
"""Initialize a CppcheckCmd object."""
super().__init__(self.command, self.lookbehind, args)
self.parse_args(args)
# quiet for stdout purposes
self.add_if_missing(["-q"])
# make cppcheck behave as expected for pre-commit
self.add_if_missing(["--error-exitcode=1"])
# Enable all of the checks
self.add_if_missing(["--enable=all"])
# Force location of compile database
self.add_if_missing([f'--project={Path(self.build_dir, "compile_commands.json")}'])
def _parse_output(self, result):
"""
Parse output and check whether some errors occurred.
Args:
result (namedtuple): Result from calling a command
Returns:
False if no errors were detected, True in all other cases.
"""
# Useless error see https://stackoverflow.com/questions/6986033
useless_error_part = "Cppcheck cannot find all the include files"
result.stderr = [line for line in result.stderr.splitlines(keepends=True) if useless_error_part not in line]
return result.returncode != 0
def main(argv=None):
"""
Run command.
Args:
argv (:obj:`list` of :obj:`str`): list of arguments
"""
if argv is None:
argv = sys.argv
cmd = CppcheckCmd(argv)
cmd.run()
if __name__ == "__main__":
main()
| 29.946667 | 116 | 0.662066 | 1,286 | 0.572573 | 0 | 0 | 0 | 0 | 0 | 0 | 1,412 | 0.628673 |
26aec7ce88a039f97165462d31ff1b237e27b183 | 4,468 | py | Python | django-todo-list/tasks/models.py | HimadriP/Orderly | e9464756c585e385f23436fc79acd2064b443432 | [
"MIT"
] | 1 | 2019-07-29T19:52:29.000Z | 2019-07-29T19:52:29.000Z | django-todo-list/tasks/models.py | HimadriP/Orderly | e9464756c585e385f23436fc79acd2064b443432 | [
"MIT"
] | null | null | null | django-todo-list/tasks/models.py | HimadriP/Orderly | e9464756c585e385f23436fc79acd2064b443432 | [
"MIT"
] | null | null | null | from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse_lazy
from django.utils import timezone
from django.forms.util import to_current_timezone
from model_utils import Choices
class TimeStampedModel(models.Model):
"""
An abstract model for the common fields 'created' and 'last_modified'.
"""
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Task(TimeStampedModel):
"""
Model that represent a task.
"""
PRIORITY_CHOICES = Choices((1, 'low', 'Low'),
(2, 'medium', 'Medium'),
(3, 'high', 'High'))
TYPE_CHOICES = Choices((1, 'bug', 'Bug'),
(2, 'enhancement', 'Enhancement'),
(3, 'task', 'Task'),
(4, 'proposal', 'Proposal'))
STATUS_CHOICES = Choices((1, 'incomplete', 'Incomplete'),
(2, 'ready_for_review', 'Ready for Review'),
(3, 'complete', 'Complete'))
created_by = models.ForeignKey(settings.AUTH_USER_MODEL,
null=True,
blank=True,
editable=False,
related_name='tasks')
title = models.CharField(max_length=255)
description = models.TextField(blank=True)
due_date = models.DateField(null=True)
module = models.CharField(max_length=100, blank=True)
priority = models.PositiveIntegerField(choices=PRIORITY_CHOICES,
default=PRIORITY_CHOICES.low)
assigned_user = models.ForeignKey(settings.AUTH_USER_MODEL,
null=True,
blank=True,
verbose_name="Assigned To",
related_name='assigned_tasks')
type = models.PositiveIntegerField(choices=TYPE_CHOICES,
default=TYPE_CHOICES.task)
status = models.PositiveIntegerField(choices=STATUS_CHOICES,
default=STATUS_CHOICES.incomplete,
editable=False)
# Time at which user submitted it for review
completed_at = models.DateTimeField(null=True,
blank=True,
editable=False)
reviewed_by = models.ForeignKey(settings.AUTH_USER_MODEL,
null=True,
blank=True,
editable=False,
related_name='reviewed_tasks')
class Meta:
ordering = ['-created']
def is_due(self):
"""
Return True if this task crossed due date, otherwise false.
"""
# Convert to current tz, otherwise we are comparing with utc. the date
# will be entered respect to our current tz
date_now = to_current_timezone(timezone.now()).date()
if not self.is_complete() and self.due_date < date_now:
return True
else:
return False
def is_due_today(self):
"""
Check if the task due date is today
"""
date_now = to_current_timezone(timezone.now()).date()
if self.due_date == date_now:
return True
else:
return False
def is_complete(self):
"""
Returns True if the task is marked as completed.
"""
if self.status == self.STATUS_CHOICES.complete:
return True
else:
return False
def is_ready_for_review(self):
"""
Returns True if the task is marked as ready for review.
"""
if self.status == self.STATUS_CHOICES.ready_for_review:
return True
else:
return False
def is_incomplete(self):
"""
Returns True if the task is marked as not completed.
"""
if self.status == self.STATUS_CHOICES.incomplete:
return True
else:
return False
def get_absolute_url(self):
return reverse_lazy('task_detail', kwargs={'pk': self.pk})
def __str__(self):
return self.title
| 36.622951 | 78 | 0.530215 | 4,234 | 0.947628 | 0 | 0 | 0 | 0 | 0 | 0 | 921 | 0.206132 |
26af8dafdbc00b0bb2091823b9a4a72611dc7cfc | 521 | py | Python | apps/boards/apps.py | julianwachholz/thefarland | c7259311fafb60beba167422eefd0d0c5d362514 | [
"WTFPL"
] | null | null | null | apps/boards/apps.py | julianwachholz/thefarland | c7259311fafb60beba167422eefd0d0c5d362514 | [
"WTFPL"
] | null | null | null | apps/boards/apps.py | julianwachholz/thefarland | c7259311fafb60beba167422eefd0d0c5d362514 | [
"WTFPL"
] | null | null | null | from django.apps import AppConfig
from django.db.models.signals import post_save, post_delete
from . import signals
class BoardsAppConfig(AppConfig):
name = 'apps.boards'
def ready(self):
Board = self.get_model('Board')
Thread = self.get_model('Thread')
Post = self.get_model('Post')
post_save.connect(signals.thread_post_save, sender=Thread)
post_save.connect(signals.post_post_save, sender=Post)
post_delete.connect(signals.thread_post_delete, sender=Thread)
| 30.647059 | 70 | 0.71785 | 402 | 0.771593 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.065259 |
26af8f12a06f8edb90f5fc54b553edce179f388f | 2,445 | py | Python | danmu.py | wjhtime/douyu_danmu_python | 432198f86bc9f6facd7ef531f301e8c7c8a9285f | [
"MIT"
] | 4 | 2018-12-15T10:35:20.000Z | 2019-06-04T20:20:32.000Z | danmu.py | wjhtime/douyu_danmu_python | 432198f86bc9f6facd7ef531f301e8c7c8a9285f | [
"MIT"
] | null | null | null | danmu.py | wjhtime/douyu_danmu_python | 432198f86bc9f6facd7ef531f301e8c7c8a9285f | [
"MIT"
] | 2 | 2019-04-29T08:20:08.000Z | 2020-05-19T09:51:19.000Z | '''
利用斗鱼弹幕 api
尝试抓取斗鱼tv指定房间的弹幕
'''
import multiprocessing
import socket
import time
import re
import signal
# 构造socket连接,和斗鱼api服务器相连接
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostbyname("openbarrage.douyutv.com")
port = 8601
client.connect((host, port))
# 弹幕查询正则表达式
danmu_re = re.compile(b'txt@=(.+?)/cid@')
username_re = re.compile(b'nn@=(.+?)/txt@')
def send_req_msg(msgstr):
'''构造并发送符合斗鱼api的请求'''
msg = msgstr.encode('utf-8')
data_length = len(msg) + 8
code = 689
# 构造协议头
msgHead = int.to_bytes(data_length, 4, 'little') \
+ int.to_bytes(data_length, 4, 'little') + \
int.to_bytes(code, 4, 'little')
client.send(msgHead)
sent = 0
while sent < len(msg):
tn = client.send(msg[sent:])
sent = sent + tn
def DM_start(roomid):
# 构造登录授权请求
msg = 'type@=loginreq/roomid@={}/\0'.format(roomid)
send_req_msg(msg)
# 构造获取弹幕消息请求
msg_more = 'type@=joingroup/rid@={}/gid@=-9999/\0'.format(roomid)
send_req_msg(msg_more)
while True:
# 服务端返回的数据
data = client.recv(1024)
# 通过re模块找发送弹幕的用户名和内容
danmu_username = username_re.findall(data)
danmu_content = danmu_re.findall(data)
if not data:
break
else:
for i in range(0, len(danmu_content)):
try:
# 输出信息
print('[{}]:{}'.format(danmu_username[0].decode(
'utf8'), danmu_content[0].decode(encoding='utf8')))
except:
continue
def keeplive():
'''
保持心跳,15秒心跳请求一次
'''
while True:
msg = 'type@=keeplive/tick@=' + str(int(time.time())) + '/\0'
send_req_msg(msg)
print('发送心跳包')
time.sleep(15)
def logout():
'''
与斗鱼服务器断开连接
关闭线程
'''
msg = 'type@=logout/'
send_req_msg(msg)
print('已经退出服务器')
def signal_handler(signal, frame):
'''
捕捉 ctrl+c的信号 即 signal.SIGINT
触发hander:
登出斗鱼服务器
关闭进程
'''
p1.terminate()
p2.terminate()
logout()
print('Bye')
if __name__ == '__main__':
#room_id = input('请输入房间ID: ')
# lpl
room_id = 288016
# 开启signal捕捉
signal.signal(signal.SIGINT, signal_handler)
# 开启弹幕和心跳进程
p1 = multiprocessing.Process(target=DM_start, args=(room_id,))
p2 = multiprocessing.Process(target=keeplive)
p1.start()
p2.start()
| 21.447368 | 75 | 0.578323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 977 | 0.347811 |
26afa6ab00539bb702ecf9ce1071e801dd9694da | 3,828 | py | Python | 03_spider_douyin/spider_douyin.py | theThreeKingdom/python-exercises | fc08a7bbb9d6b53d5761b9e1017f293bff4e26db | [
"Apache-2.0"
] | null | null | null | 03_spider_douyin/spider_douyin.py | theThreeKingdom/python-exercises | fc08a7bbb9d6b53d5761b9e1017f293bff4e26db | [
"Apache-2.0"
] | null | null | null | 03_spider_douyin/spider_douyin.py | theThreeKingdom/python-exercises | fc08a7bbb9d6b53d5761b9e1017f293bff4e26db | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2020/4/1 0:48
# @Author : Nixin
# @Email : [email protected]
# @File : spider_douyin.py
# @Software: PyCharm
import requests, re, sys, os, time, random, socket
import http.client
from bs4 import BeautifulSoup
def get_html(url, data=None):
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
timeout = random.choice(range(80, 100))
while True:
try:
response = requests.get(url, headers=header, timeout=timeout)
response.encoding = 'utf-8'
break
except socket.timeout as e:
print(e)
time.sleep(random.choice(range(20, 60)))
except socket.error as e:
print(e)
time.sleep(random.choice(range(20, 60)))
except http.client.BadStatusLine as e:
print(e)
time.sleep(random.choice(range(30, 60)))
except http.client.IncompleteRead as e:
print(e)
time.sleep(random.choice(range(20, 60)))
# print(response.text)
return response.text
def download_douyin(num, url):
rsp = get_html(url)
patt = 'playAddr: "(.*?)",'
play = re.compile(patt).findall(rsp)[0].replace("playwm", "play")
if not play.startswith('http'):
return 0
print(type(play))
print("url="+play)
header = {
'Accept': '*/*',
'Accept-Encoding': 'identity;q=1, *;q=0',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1'
}
res = requests.get(play, stream=True, headers=header)
path = 'E:/nixin/douyin/video/20200419/'
if not os.path.exists(path):
os.makedirs(path)
pathinfo = 'E:/nixin/douyin/video/20200419/%d.mp4' % num # %d 用于整数输出 %s用于字符串输出
total_size = int(res.headers['Content-Length'])
print('这是视频的总大小:', total_size)
temp_size = 0
if res.status_code == 200:
with open(pathinfo, 'wb') as file:
# file.write(res.content)
# print(pathinfo + '下载完成啦啦啦啦啦')
# 当流下载时,下面是优先推荐的获取内容方式,iter_content()函数就是得到文件的内容,指定chunk_size=1024,大小可以自己设置哟,设置的意思就是下载一点流写一点流到磁盘中
for chunk in res.iter_content(chunk_size=1024):
if chunk:
temp_size += len(chunk)
file.write(chunk)
file.flush() # 刷新缓存
# 下载进度条部分start
done = int(50 * temp_size / total_size)
# print('百分比:',done)
# 调用标准输出刷新命令行,看到\r回车符了吧
# 相当于把每一行重新刷新一遍
sys.stdout.write("\r[%s%s] %d%%" % (
'█' * done, ' ' * (50 - done), 100 * temp_size / total_size) + " 文件:" + pathinfo + " 下载完成")
sys.stdout.flush() # 刷新缓存
# 下载进度条部分end
print('\n') # 每一条打印在屏幕上换行输出
return 1
pass
def batch_download_douyin(start, pathtxt):
with open(pathtxt) as f:
f_url_list = f.readlines() # 得到的是一个list类型
for a in f_url_list:
print(a.strip())
if download_douyin(start, a.strip()) > 0:
start += 1
time.sleep(random.choice(range(3, 6)))
pass
if __name__ == '__main__':
# download_douyin(56, "https://v.douyin.com/3wV6PQ")
batch_download_douyin(80, "E:/nixin/douyin/video/20200419/1.txt")
pass
| 33.876106 | 163 | 0.562696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,702 | 0.405818 |
26b0dec6991fd98013cf8dc45f05ed08b2f7cb49 | 479 | py | Python | hooks/relations/juju-info/requires.py | xenefix/RoutingPolicyRule | fb0c815c3fc049f63491a3ca56416c8ba0fe692c | [
"Apache-2.0"
] | null | null | null | hooks/relations/juju-info/requires.py | xenefix/RoutingPolicyRule | fb0c815c3fc049f63491a3ca56416c8ba0fe692c | [
"Apache-2.0"
] | null | null | null | hooks/relations/juju-info/requires.py | xenefix/RoutingPolicyRule | fb0c815c3fc049f63491a3ca56416c8ba0fe692c | [
"Apache-2.0"
] | null | null | null | from charms.reactive import RelationBase
from charms.reactive import scopes
from charms.reactive import hook
from charms.reactive import when
class TestRelation(RelationBase):
scope = scopes.GLOBAL
@hook('{requires:test}-relation-joined')
def joined(self):
self.set_state('{relation_name}.ready')
@when('juju-info.ready')
def test(self):
self.set_local('call_count', self.get_local('call_count', 0) + 1)
self.set_state('relation')
| 26.611111 | 73 | 0.707724 | 334 | 0.697286 | 0 | 0 | 263 | 0.549061 | 0 | 0 | 107 | 0.223382 |
26b107fd23e87c597f676bc069f5e3b5b448d1e3 | 593 | py | Python | suricata-4.1.4/python/suricata/ctl/test_filestore.py | runtest007/dpdk_surcata_4.1.1 | 5abf91f483b418b5d9c2dd410b5c850d6ed95c5f | [
"MIT"
] | 77 | 2019-06-17T07:05:07.000Z | 2022-03-07T03:26:27.000Z | suricata-4.1.4/python/suricata/ctl/test_filestore.py | clockdad/DPDK_SURICATA-4_1_1 | 974cc9eb54b0b1ab90eff12a95617e3e293b77d3 | [
"MIT"
] | 22 | 2019-07-18T02:32:10.000Z | 2022-03-24T03:39:11.000Z | suricata-4.1.4/python/suricata/ctl/test_filestore.py | clockdad/DPDK_SURICATA-4_1_1 | 974cc9eb54b0b1ab90eff12a95617e3e293b77d3 | [
"MIT"
] | 49 | 2019-06-18T03:31:56.000Z | 2022-03-13T05:23:10.000Z | from __future__ import print_function
import unittest
import filestore
class PruneTestCase(unittest.TestCase):
def test_parse_age(self):
self.assertEqual(filestore.parse_age("1s"), 1)
self.assertEqual(filestore.parse_age("1m"), 60)
self.assertEqual(filestore.parse_age("1h"), 3600)
self.assertEqual(filestore.parse_age("1d"), 86400)
with self.assertRaises(filestore.InvalidAgeFormatError) as err:
filestore.parse_age("1")
with self.assertRaises(filestore.InvalidAgeFormatError) as err:
filestore.parse_age("1y")
| 31.210526 | 71 | 0.706577 | 518 | 0.873524 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.038786 |
26b279e2de1f9a9dd6eeef9e0736a69c9c2cc2f9 | 1,027 | py | Python | chmap/examples/development/example109_store-creds.py | predsci/CHD | 35f29d1b62861f4ffed57b38d18689b282664bcf | [
"Apache-2.0"
] | 3 | 2021-06-29T00:23:47.000Z | 2021-09-17T18:29:05.000Z | chmap/examples/development/example109_store-creds.py | predsci/CHD | 35f29d1b62861f4ffed57b38d18689b282664bcf | [
"Apache-2.0"
] | null | null | null | chmap/examples/development/example109_store-creds.py | predsci/CHD | 35f29d1b62861f4ffed57b38d18689b282664bcf | [
"Apache-2.0"
] | 1 | 2021-12-08T06:26:18.000Z | 2021-12-08T06:26:18.000Z | # This is a little bit clunky, but is a better solution than writing passwords into
import os
from cryptography.fernet import Fernet
# cred_dir = os.path.join(os.path.dirname(os.getcwd()), "settings")
cred_dir = '/Users/cdowns/work/imac_local/CoronalHoles/mysql_credentials'
key_file = os.path.join(cred_dir, "e_key.bin")
# Generate a new local encryption key if needed
if not os.path.exists(key_file):
key = Fernet.generate_key()
# print(key)
with open(key_file, 'wb') as file_object:
file_object.write(key)
else:
with open(key_file, 'rb') as file_object:
for line in file_object:
key = line
# User inputs password interactively so it is never saved
passw = input("Enter a password to encrypt and save: ")
cipher_suite = Fernet(key)
ciphered_text = cipher_suite.encrypt(passw.encode()) # required to be bytes
creds_file = os.path.join(cred_dir, "e_cred.bin")
print("Writing credential file")
with open(creds_file, 'wb') as file_object:
file_object.write(ciphered_text)
| 31.121212 | 83 | 0.728335 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 450 | 0.438169 |
26b4086be5d9ece68d83031748858af8aef6a984 | 663 | py | Python | indjections/packages/django-allauth.py | pandichef/indjections | e8f0e62cf648607ed50330ac69dd12e9fc0cf710 | [
"BSD-2-Clause"
] | 7 | 2020-07-20T00:33:15.000Z | 2020-07-24T04:14:53.000Z | indjections/packages/django-allauth.py | pandichef/indjections | e8f0e62cf648607ed50330ac69dd12e9fc0cf710 | [
"BSD-2-Clause"
] | 17 | 2020-07-20T03:17:06.000Z | 2020-07-27T07:36:25.000Z | indjections/packages/django-allauth.py | pandichef/indjections | e8f0e62cf648607ed50330ac69dd12e9fc0cf710 | [
"BSD-2-Clause"
] | null | null | null | settings = """
try:
AUTHENTICATION_BACKENDS += [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
except NameError:
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
INSTALLED_APPS += [
"django.contrib.sites", # not installed by default
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
]
SITE_ID = 1
"""
urls = """
from django.urls import include
urlpatterns += [path("accounts/", include("allauth.urls"))]
"""
| 24.555556 | 62 | 0.680241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 642 | 0.968326 |
26b4665a5f013ded26bc910df476a322704eda91 | 475 | py | Python | teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/logcat/pagefactory/logcat_template_path.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | 6 | 2018-11-26T08:42:52.000Z | 2020-06-01T08:33:48.000Z | teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/logcat/pagefactory/logcat_template_path.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | null | null | null | teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/logcat/pagefactory/logcat_template_path.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | 1 | 2019-01-22T06:45:36.000Z | 2019-01-22T06:45:36.000Z | #coding=utf-8
'''
Created on 2015-10-10
@author: Devuser
'''
class LogcatPagePath(object):
left_nav_template_path="home/home_left_nav.html"
logger_page_path="logcat/logcat_index.html"
logger_list_page="logcat/logcat_list_page.html"
logger_list_controll="logcat/logcat_loger_list_controll.html"
logger_content_container="logcat/logcat_logger_content.html"
class LogcatCommonPath(object):
logger_log_js="common/logcat_log.js"
| 19 | 65 | 0.751579 | 385 | 0.810526 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.501053 |
26b56046672f411c1c88bcbb0a2ebddb8ba65691 | 176 | py | Python | competitive-programming/kattis/heimavinna.py | sanchopanca/coding-for-pleasure | fed1910e8a5a4241bd55aed333afd79b4405a71d | [
"MIT"
] | null | null | null | competitive-programming/kattis/heimavinna.py | sanchopanca/coding-for-pleasure | fed1910e8a5a4241bd55aed333afd79b4405a71d | [
"MIT"
] | null | null | null | competitive-programming/kattis/heimavinna.py | sanchopanca/coding-for-pleasure | fed1910e8a5a4241bd55aed333afd79b4405a71d | [
"MIT"
] | null | null | null | s = 0
problems = input().strip().split(';')
for p in problems:
if '-' in p:
a, b = map(int, p.split('-'))
s += b - a + 1
else:
s += 1
print(s)
| 16 | 37 | 0.420455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.051136 |
26b76d047c1414efdb3d56d1cf6e2c55efd68449 | 745 | py | Python | icepll.py | carlosedp/fusesoc-generators | 4ee343ce0013952bd89d6986bfb5ed861b2cf6b2 | [
"MIT"
] | null | null | null | icepll.py | carlosedp/fusesoc-generators | 4ee343ce0013952bd89d6986bfb5ed861b2cf6b2 | [
"MIT"
] | null | null | null | icepll.py | carlosedp/fusesoc-generators | 4ee343ce0013952bd89d6986bfb5ed861b2cf6b2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from fusesoc.capi2.generator import Generator
import subprocess
class IcepllGenerator(Generator):
def run(self):
fin = self.config.get('freq_in', 12)
fout = self.config.get('freq_out', 60)
module = self.config.get('module', False)
filename = self.config.get('filename', 'pll.v' if module else 'pll.vh')
args = ['icepll', '-f', filename, '-i', str(fin), '-o', str(fout)]
if module:
args.append('-m')
rc = subprocess.call(args)
if rc:
exit(1)
self.add_files([{filename : {'file_type' : 'verilogSource',
'is_include_file' : not module}}])
g = IcepllGenerator()
g.run()
g.write()
| 31.041667 | 79 | 0.555705 | 620 | 0.832215 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.18255 |
26b7cad7376280941d68826426ca4c361383c494 | 351 | py | Python | templates/checkcel/BrasExplor_Pictures_template.py | mboudet/braskoload | 8f39f2d68638a683e0d8e5065b33a218afe370cb | [
"MIT"
] | null | null | null | templates/checkcel/BrasExplor_Pictures_template.py | mboudet/braskoload | 8f39f2d68638a683e0d8e5065b33a218afe370cb | [
"MIT"
] | null | null | null | templates/checkcel/BrasExplor_Pictures_template.py | mboudet/braskoload | 8f39f2d68638a683e0d8e5065b33a218afe370cb | [
"MIT"
] | null | null | null | from checkcel import Checkplate
from checkcel.validators import SetValidator, NoValidator
from collections import OrderedDict
class MyTemplate(Checkplate):
validators = OrderedDict([
("name@Population", NoValidator()),
("Picture", NoValidator()),
("Type", SetValidator(valid_values=["whole plant", "population"]))
])
| 29.25 | 74 | 0.706553 | 222 | 0.632479 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.162393 |
26bc7d495f1e995a46390a4be5f8a0a4a460b0ab | 2,823 | py | Python | src/shark/poker/player.py | twoodruff01/shark | 6c183de1993492b614eff332548f5c0f57facda2 | [
"Apache-2.0"
] | null | null | null | src/shark/poker/player.py | twoodruff01/shark | 6c183de1993492b614eff332548f5c0f57facda2 | [
"Apache-2.0"
] | null | null | null | src/shark/poker/player.py | twoodruff01/shark | 6c183de1993492b614eff332548f5c0f57facda2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Thomas Woodruff
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import cli
class Player():
def __init__(self, index, agent):
self.index = index
self.hand = list()
self.agent = agent()
self.chips = 500 # TODO: Chips could be a class of their own or enums (discretisation).
self.is_little_blind = False
self.is_big_blind = False
self.folded = False
def receive_card(self, card):
self.hand.append(card)
def get_action(self, game_state):
'''
TODO: Add the timeout in here.
TODO: Can use some fancy multiprocessing here if you want, to implement strict timeouts.
TODO: Pass in modified copy of self (to avoid changing chips).
'''
return self.agent.get_action(game_state, self)
def small_blind_bet(self):
'''
TODO: Do something if they've run out of money or don't have enough.
TODO: Allow changing the blind sizes somehow.
'''
self.is_little_blind = True
if self.chips < cli.BUY_IN // 2:
raise NotImplementedError("small blind doesn't have enough chips")
elif self.chips == cli.BUY_IN // 2:
raise NotImplementedError("small blind has exactly sufficient chips")
else:
self.chips -= cli.BUY_IN // 2
return cli.BUY_IN // 2
def big_blind_bet(self):
'''
TODO: Do something if they've run out of money or don't have enough.
TODO: Allow changing the blind sizes somehow.
'''
self.is_big_blind = True
if self.chips < cli.BUY_IN:
raise NotImplementedError("big blind doesn't have enough chips")
elif self.chips == cli.BUY_IN // 2:
raise NotImplementedError("big blind has exactly sufficient chips")
else:
self.chips -= cli.BUY_IN
return cli.BUY_IN
def has_funds(self, amount):
return amount <= self.chips
def take_bet(self, amount):
if self.chips - amount < 0:
raise Exception('tried to decrement player chips below 0')
self.chips -= amount
def __str__(self):
return f'{[str(c) for c in self.hand]}'
| 37.144737 | 106 | 0.616366 | 2,195 | 0.777542 | 0 | 0 | 0 | 0 | 0 | 0 | 1,408 | 0.49876 |
26bd34791b254cf4bcb5957b49692dda6546cfa1 | 1,059 | py | Python | BUNKURO/BUNKURO.py | kantoku-code/Fusion360_BUNKURO | 0c83f2ab57f03c83fcad98b85b59792360f7a804 | [
"MIT"
] | 1 | 2022-03-18T13:06:57.000Z | 2022-03-18T13:06:57.000Z | BUNKURO/BUNKURO.py | kantoku-code/Fusion360_BUNKURO | 0c83f2ab57f03c83fcad98b85b59792360f7a804 | [
"MIT"
] | null | null | null | BUNKURO/BUNKURO.py | kantoku-code/Fusion360_BUNKURO | 0c83f2ab57f03c83fcad98b85b59792360f7a804 | [
"MIT"
] | null | null | null | # Author-kantoku
# Description-コンポーネント毎に分割してクローン作るよ!
# Fusion360API Python
import adsk.core
import traceback
try:
from . import config
from .apper import apper
from .commands.BUNKUROCore import BUNKUROCore
# Create our addin definition object
my_addin = apper.FusionApp(config.app_name, config.company_name, False)
my_addin.root_path = config.app_path
my_addin.add_command(
'ぶんくろ',
BUNKUROCore,
{
'cmd_description': 'コンポーネント毎に分割してクローン作るよ!',
'cmd_id': 'bunkuro',
'workspace': 'FusionSolidEnvironment',
'toolbar_panel_id': 'UtilityPanel',
'cmd_resources': 'BUNKURO',
'command_visible': True,
'command_promoted': False,
'create_feature': False,
}
)
except:
app = adsk.core.Application.get()
ui = app.userInterface
if ui:
ui.messageBox('Initialization: {}'.format(traceback.format_exc()))
def run(context):
my_addin.run_app()
def stop(context):
my_addin.stop_app()
| 23.021739 | 75 | 0.634561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 421 | 0.367044 |
26bd97f61aa8677a52c1050fed16514074e239fd | 2,004 | py | Python | hp_steam_data/src/main.py | wangzhefeng/data-analysis | e502ac49ae1bc8287243e1faf51c467cc4d4187c | [
"MIT"
] | null | null | null | hp_steam_data/src/main.py | wangzhefeng/data-analysis | e502ac49ae1bc8287243e1faf51c467cc4d4187c | [
"MIT"
] | null | null | null | hp_steam_data/src/main.py | wangzhefeng/data-analysis | e502ac49ae1bc8287243e1faf51c467cc4d4187c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import os
PROJECT_PATH = "/mnt/e/dev/test/hp_steam_data/"
DATA_PATH = os.path.join(PROJECT_PATH, "data")
RESULT_PATH = os.path.join(PROJECT_PATH, "result")
def get_origin_data():
"""
origin data
"""
# raw data
eturb_m1_data = pd.read_csv("/mnt/e/dev/test/hp_steam_data/data/eturb_m1_1min_metrics-0817.csv", header = 0, index_col = None)
eturb_m2_data = pd.read_csv("/mnt/e/dev/test/hp_steam_data/data/eturb_m2_1min_metrics-0817.csv", header = 0, index_col = None)
boiler_m1_data = pd.read_csv("/mnt/e/dev/test/hp_steam_data/data/boiler_m1_1min_outlet_steam_flow.csv", header = 0, index_col = None)
boiler_m3_data = pd.read_csv("/mnt/e/dev/test/hp_steam_data/data/boiler_m3_1min_outlet_steam_flow.csv", header = 0, index_col = None)
steampipeline_p1_data = pd.read_csv("/mnt/e/dev/test/hp_steam_data/data/steampipeline_p1_1min_hp_steam_pressure.csv", header = 0, index_col = None)
# data aggregate
df = pd.DataFrame()
# eturb_m1
df["eturb_m1_steam_flow_in"] = eturb_m1_data["ExtCondensTurbineOP.steam_flow_in"]
df["eturb_m2_steam_flow_in"] = eturb_m2_data["ExtCondensTurbineOP.steam_flow_in"]
df["boiler_m1_outlet_steam_flow"] = boiler_m1_data["CFBoilerOP.outlet_steam_flow"]
df["boiler_m3_outlet_steam_flow"] = boiler_m3_data["CFBoilerOP.outlet_steam_flow"]
df["steampipeline_p1_hp_steam_pressure"] = steampipeline_p1_data["SteamPipelineOP.hp_steam_pressure"]
df["boiler_steam_flow"] = df["boiler_m1_outlet_steam_flow"] + df["boiler_m3_outlet_steam_flow"]
df["turbine_steam_flow"] = df["eturb_m1_steam_flow_in"] + df["eturb_m2_steam_flow_in"]
df = df.reset_index(drop = True)
return df
def main():
# print(os.listdir(DATA_PATH))
df = get_origin_data()
print(df.head())
# df.to_csv("/mnt/e/dev/test/hp_steam_data/result/steam_pressure_data.csv", index = None)
if __name__ == "__main__":
main()
| 36.436364 | 151 | 0.733034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,095 | 0.546407 |
26be0d11d5b6b76d30765326da6c34a562d5f111 | 621 | py | Python | stock_experiment_sbb_with_vectors_of_ones.py | dvirg/auctions | da706f3d11b9582c7f811de9f50b96b43ac8cbd0 | [
"MIT"
] | 1 | 2021-11-20T19:27:45.000Z | 2021-11-20T19:27:45.000Z | stock_experiment_sbb_with_vectors_of_ones.py | dvirg/auctions | da706f3d11b9582c7f811de9f50b96b43ac8cbd0 | [
"MIT"
] | null | null | null | stock_experiment_sbb_with_vectors_of_ones.py | dvirg/auctions | da706f3d11b9582c7f811de9f50b96b43ac8cbd0 | [
"MIT"
] | null | null | null | #!python3
"""
Simulation experiment for our AAAI 2020 paper, with recipes that are vectors of ones.
Comparing McAfee's double auction to our SBB auctions.
Author: Dvir Gilor
Since: 2020-08
"""
from experiment_stock import experiment
from mcafee_protocol import mcafee_trade_reduction
from trade_reduction_protocol import budget_balanced_trade_reduction
from ascending_auction_protocol import budget_balanced_ascending_auction
import sys
results_file = "stock/results/experiment_sbb_with_vectors_of_ones_stock.csv"
experiment(results_file,budget_balanced_ascending_auction, "SBB Ascending Prices", recipe=4*(1,))
| 27 | 97 | 0.84219 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.446055 |
26be6c749fedecdd29350e7dc06917fe50136ca1 | 556 | py | Python | Modulo-02/ex058/ex058.py | Matheus-Henrique-Burey/Curso-de-Python | 448aebaab96527affa1e45897a662bb0407c11c6 | [
"MIT"
] | null | null | null | Modulo-02/ex058/ex058.py | Matheus-Henrique-Burey/Curso-de-Python | 448aebaab96527affa1e45897a662bb0407c11c6 | [
"MIT"
] | null | null | null | Modulo-02/ex058/ex058.py | Matheus-Henrique-Burey/Curso-de-Python | 448aebaab96527affa1e45897a662bb0407c11c6 | [
"MIT"
] | null | null | null | from random import randint
print('=-' * 15)
print('ADIVINHE EM QUE NUMERO ESTOU PENÇANDO')
print('=-' * 15)
pc = randint(0, 10)
num = 11
cont = 0
while pc != num:
num = int(input('Sera que voce consegue acertar o numero que pensei, entre 0, 10: '))
if num == pc:
print('PARABES!!! VOCE ACERTOU')
else:
if num < pc:
print('Mais...', end=' ')
else:
print('Menos...', end=' ')
print('Tente novamente')
print('-' * 20)
cont += 1
print(f'Voce tentou {cont} vezes para acertar')
| 23.166667 | 89 | 0.546763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.40395 |
26c07cd4c709d13692e520d5fa627ce985733c5a | 3,172 | py | Python | sfc_models/examples/scripts/deprecated/ex20170108_model_PC.py | MachineLP/SFC_models | d438a4e3e88534a206c761cda7a3f6a58ac3a0ac | [
"Apache-2.0"
] | 21 | 2016-11-03T12:30:50.000Z | 2022-03-24T06:54:14.000Z | sfc_models/examples/scripts/deprecated/ex20170108_model_PC.py | MachineLP/SFC_models | d438a4e3e88534a206c761cda7a3f6a58ac3a0ac | [
"Apache-2.0"
] | 1 | 2019-04-02T02:01:27.000Z | 2019-04-07T21:07:10.000Z | sfc_models/examples/scripts/deprecated/ex20170108_model_PC.py | MachineLP/SFC_models | d438a4e3e88534a206c761cda7a3f6a58ac3a0ac | [
"Apache-2.0"
] | 12 | 2016-11-03T12:30:57.000Z | 2021-09-14T23:08:23.000Z | """
ex20170108_model_PC.py
Create Model PC (Godley & Lavoie Chapter 4).
Copyright 2017 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sfc_models.examples.Quick2DPlot import Quick2DPlot
from sfc_models.models import *
from sfc_models.sector import Market
from sfc_models.sector_definitions import Household, Treasury, CentralBank, TaxFlow, FixedMarginBusiness, DepositMarket, \
MoneyMarket
def main():
# Create model, which holds all entities
mod = Model()
# Create first country - Canada. (This model only has one country.)
can = Country(mod, 'CA', 'Canada')
# Create sectors
tre = Treasury(can, 'TRE', 'Treasury')
cb = CentralBank(can, 'CB', 'Central Bank')
hh = Household(can, 'HH', 'Household')
# A literally non-profit business sector
bus = FixedMarginBusiness(can, 'BUS', 'Business Sector')
# Create the linkages between sectors - tax flow, markets - labour ('LAB'), goods ('GOOD')
tax = TaxFlow(can, 'TF', 'TaxFlow', .2)
labour = Market(can, 'LAB', 'Labour market')
goods = Market(can, 'GOOD', 'Goods market')
# Add the financial markets
# GOV -> issuing sector
mm = MoneyMarket(can)
dep = DepositMarket(can)
# --------------------------------------------
# Financial asset demand equations
# Need to call this before we set the demand functions for
mod._GenerateFullSectorCodes()
# Need the full variable name for 'F' in household
hh_F = hh.GetVariableName('F')
hh.AddVariable('DEM_MON', 'Demand for Money', '0.5 * ' + hh_F)
hh.AddVariable('DEM_DEP', 'Demand for deposits', '0.5 * ' + hh_F)
# -----------------------------------------------------------------
# Need to set the exogenous variables
# Government demand for Goods ("G" in economist symbology)
mod.AddExogenous('TRE', 'DEM_GOOD', '[20.,] * 105')
mod.AddExogenous('DEP', 'r', '[0.0,] * 5 + [0.04]*100')
mod.AddInitialCondition('HH', 'F', 80.)
# Build the model
# Output is put into two files, based on the file name passed into main() ['out_SIM_Machine_Model']
# (1) [out_YYY]_log.txt: Log file
# (2) [out_YYY].py: File that solves the system of equations
mod.MaxTime = 100
eqns = mod._main_deprecated('out_ex20170108_model_PC')
# Only import after the file is created (which is unusual).
import out_ex20170108_model_PC as SFCmod
obj = SFCmod.SFCModel()
obj.main()
obj.WriteCSV('out_ex20170103_model_PC.csv')
Quick2DPlot(obj.t[1:], obj.GOOD_SUP_GOOD[1:], 'Goods supplied (national production Y)')
Quick2DPlot(obj.t[1:], obj.HH_F[1:], 'Household Financial Assets (F)')
if __name__ == '__main__':
main()
| 39.65 | 122 | 0.669294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,973 | 0.622005 |
26c4a3b8183960d6aad7bfb1532a7a0f533eda4e | 4,087 | py | Python | flask_simpleview/__init__.py | jackwardell/Flask-SimpleView | 813d5dbe6353f77016c893caff526abae8487492 | [
"Apache-2.0"
] | null | null | null | flask_simpleview/__init__.py | jackwardell/Flask-SimpleView | 813d5dbe6353f77016c893caff526abae8487492 | [
"Apache-2.0"
] | null | null | null | flask_simpleview/__init__.py | jackwardell/Flask-SimpleView | 813d5dbe6353f77016c893caff526abae8487492 | [
"Apache-2.0"
] | null | null | null | import re
import flask
import flask.views
from functools import wraps
def camel_case_to_snake_case(word):
"""very simple mechanism for turning CamelCase words into snake_case"""
return re.sub(r"(?<!^)(?=[A-Z])", "_", word).lower()
def camel_case_to_slug_case(word):
"""very simple mechanism for turning CamelCase words into slug-case"""
return re.sub(r"(?<!^)(?=[A-Z])", "-", word).lower()
class NoTemplate(Exception):
"""exception for when no template is passed either as a param or in the class"""
pass
class SkeletonMixin:
"""Mixin for the flask Skeleton (shared methods between Flask and Blueprint)"""
def add_url_rule(self, *args, **kwargs):
raise NotImplementedError()
def add_view(self, view):
# make all the elements for the adding of a url_rule
# make rule
rule = view.make_rule()
# make endpoint
endpoint = view.make_endpoint()
# make view_func
view_func = view.as_view(endpoint)
# make a list of all the view functions to add
view_funcs = []
# iterate through all the registered methods
for method in view.methods:
# get the function itself
func = getattr(view, method.lower())
# if the function has been decorated with a __rule_extension__
# we can grab it and make the extended rule
if hasattr(func, "__rule_extension__"):
# make the extended rule
extended_rule = rule + func.__rule_extension__
# save the new rule and view func as params for add_url_rule
params = {"rule": extended_rule, "view_func": view_func}
else:
# else we do it without the rule extension
params = {"rule": rule, "view_func": view_func}
# append the method to the list of view funcs
view_funcs.append(params)
# finally, iterate through the view_funcs and add the url_rule
for params in view_funcs:
self.add_url_rule(**params)
def add_api(self, api):
return self.add_view(api)
class Flask(flask.Flask, SkeletonMixin):
"""The flask.Flask application"""
pass
class Blueprint(flask.Blueprint, SkeletonMixin):
"""The flask.Blueprint blueprint"""
pass
class ViewConstructor:
"""mechanism for construction of endpoint and rule"""
methods = []
@classmethod
def get_name(cls):
return getattr(cls, "name", cls.__name__)
@classmethod
def make_endpoint(cls):
return getattr(cls, "endpoint", camel_case_to_snake_case(cls.get_name()))
@classmethod
def make_rule(cls):
return getattr(cls, "rule", "/" + camel_case_to_slug_case(cls.get_name()))
@classmethod
def iter_methods(cls):
for method in cls.methods:
func = getattr(cls, method.lower())
def extends_rule(rule):
def extend_rule(func):
# assert False
# cls, method = func.__qualname__.split('.')
func.__rule_extension__ = rule
return func
# @wraps(func)
# def decorator(*args, **kwargs):
# return func(*args, **kwargs)
#
# return decorator
return extend_rule
class SimpleView(flask.views.MethodView, ViewConstructor):
def render_template(self, *optional_template_name_or_list, **context):
if not hasattr(self, "template") and not optional_template_name_or_list:
raise NoTemplate("No template passed or found on the view")
template_name_or_list = (
optional_template_name_or_list[0]
if optional_template_name_or_list
else self.template
)
return flask.render_template(template_name_or_list, **context)
def __getattr__(self, attr):
return getattr(flask, attr)
def __repr__(self):
rv = '<{}(rule="{}", endpoint="{}", methods={})>'.format(
self.__class__.__name__, self.rule, self.endpoint, self.methods
)
return rv
API = View = SimpleView
| 28.381944 | 84 | 0.62711 | 3,274 | 0.801077 | 0 | 0 | 448 | 0.109616 | 0 | 0 | 1,342 | 0.328358 |
26c5a0a8bb014c980c7a75f56eb95838d11757a4 | 2,287 | py | Python | qingcloud/cli/iaas_client/actions/cluster/deploy_app_version.py | knktc/qingcloud-cli | 2be8bba43e08bd7a76e1326ece871386cc9b5b55 | [
"Apache-2.0"
] | 11 | 2015-05-27T19:52:36.000Z | 2021-04-15T09:07:39.000Z | qingcloud/cli/iaas_client/actions/cluster/deploy_app_version.py | knktc/qingcloud-cli | 2be8bba43e08bd7a76e1326ece871386cc9b5b55 | [
"Apache-2.0"
] | 7 | 2017-07-19T05:05:03.000Z | 2019-04-25T07:18:04.000Z | qingcloud/cli/iaas_client/actions/cluster/deploy_app_version.py | knktc/qingcloud-cli | 2be8bba43e08bd7a76e1326ece871386cc9b5b55 | [
"Apache-2.0"
] | 19 | 2016-03-15T07:31:47.000Z | 2021-07-26T09:31:33.000Z | # =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.iaas import constants as const
from qingcloud.cli.iaas_client.actions.base import BaseAction
class DeployAppVersionAction(BaseAction):
action = const.ACTION_DEPLOY_APP_VERSION
command = 'deploy-app-version'
usage = '%(prog)s -v <version_id> -c <conf> [-d <debug>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-v', '--version_id', dest='version_id',
action='store', type=str, default=None,
help='the ID of application version which you want to create.')
parser.add_argument('-c', '--conf', dest='conf',
action="store", type=str, default=None,
help='the json format string of config to create the cluster')
parser.add_argument('-d', '--debug', dest='debug',
action="store", type=int, default=0,
help='whether to open debug mode [0 or 1]')
@classmethod
def build_directive(cls, options):
if options.version_id is None:
print('error: version_id should be specified.')
return None
if options.conf is None:
print('error: conf should be specified.')
return None
directive = {
"version_id": options.version_id,
"conf": options.conf,
"debug": options.debug}
return directive
| 40.122807 | 91 | 0.567118 | 1,343 | 0.587232 | 0 | 0 | 1,147 | 0.50153 | 0 | 0 | 1,224 | 0.535199 |
26c5a8a0093950f0c391d5d30211cf53ae6f042f | 5,022 | py | Python | padevents/events.py | muffin-rice/pad-cogs | 820ecf08f9569a3d7cf3264d0eb9567264b42edf | [
"MIT"
] | 2 | 2020-09-25T01:57:21.000Z | 2020-10-02T13:46:48.000Z | padevents/events.py | muffin-rice/pad-cogs | 820ecf08f9569a3d7cf3264d0eb9567264b42edf | [
"MIT"
] | 43 | 2020-08-29T06:16:39.000Z | 2020-10-29T12:00:15.000Z | padevents/events.py | muffin-rice/pad-cogs | 820ecf08f9569a3d7cf3264d0eb9567264b42edf | [
"MIT"
] | 6 | 2020-08-31T04:37:55.000Z | 2020-10-19T05:09:17.000Z | import datetime
from datetime import timedelta
from typing import Callable, Collection, TYPE_CHECKING
import pytz
from tsutils.formatting import normalize_server_name
from tsutils.time import JP_TIMEZONE, KR_TIMEZONE, NA_TIMEZONE
from padevents.enums import DungeonType, EventLength
if TYPE_CHECKING:
from dbcog.models.scheduled_event_model import ScheduledEventModel
SUPPORTED_SERVERS = ["JP", "NA", "KR"]
SERVER_TIMEZONES = {
"JP": JP_TIMEZONE,
"NA": NA_TIMEZONE,
"KR": KR_TIMEZONE,
}
class Event:
def __init__(self, scheduled_event: "ScheduledEventModel"):
self.key = scheduled_event.event_id
self.server = SUPPORTED_SERVERS[scheduled_event.server_id]
self.open_datetime: datetime = scheduled_event.open_datetime
self.close_datetime: datetime = scheduled_event.close_datetime
self.group = scheduled_event.group_name
self.dungeon = scheduled_event.dungeon
self.dungeon_name = self.dungeon.name_en if self.dungeon else 'unknown_dungeon'
self.clean_dungeon_name = self.dungeon.clean_name_en if self.dungeon else 'unknown_dungeon'
self.dungeon_type = DungeonType(self.dungeon.dungeon_type) if self.dungeon else DungeonType.Unknown
@property
def event_length(self) -> EventLength:
# This is a little off. I don't know how exact these things are.
length = self.close_datetime - self.open_datetime
if length > timedelta(days=8):
return EventLength.special
if length > timedelta(days=2):
return EventLength.weekly
if length > timedelta(hours=20):
return EventLength.daily
return EventLength.limited
def start_from_now_sec(self) -> float:
now = datetime.datetime.now(pytz.utc)
return (self.open_datetime - now).total_seconds()
def end_from_now_sec(self) -> float:
now = datetime.datetime.now(pytz.utc)
return (self.close_datetime - now).total_seconds()
def is_started(self):
"""True if past the open time for the event."""
return self.start_from_now_sec() <= 0
def is_finished(self):
"""True if past the close time for the event."""
return self.end_from_now_sec() <= 0
def start_from_now_discord(self) -> str:
return f"<t:{int(self.open_datetime.timestamp())}:R>"
def end_from_now_discord(self) -> str:
return f"<t:{int(self.close_datetime.timestamp())}:R>"
def end_from_now_full_min(self) -> str:
days, sec = divmod(self.end_from_now_sec(), 86400)
hours, sec = divmod(sec, 3600)
minutes, sec = divmod(sec, 60)
if days > 0:
return '{:2}d {:2}h'.format(int(days), int(hours))
elif hours > 0:
return '{:2}h {:2}m'.format(int(hours), int(minutes))
else:
return '{:2}m'.format(int(minutes))
def group_long_name(self):
return self.group.upper() if self.group is not None else "ALL"
def to_partial_event(self, pe):
group = self.group_long_name()[0] if self.group is not None else " "
if self.is_started():
return "`" + group + " " + self.clean_dungeon_name + " " * (
max(24 - len(self.clean_dungeon_name), 0)) + "-`" + self.end_from_now_discord()
else:
return "`" + group + " " + self.clean_dungeon_name + " " * (
max(24 - len(self.clean_dungeon_name), 0)) + "-`" + self.start_from_now_discord()
def __repr__(self):
return f"Event<{self.clean_dungeon_name} ({self.group} {self.server})>"
class EventList:
def __init__(self, event_list: Collection[Event]):
self.event_list = event_list
def with_func(self, func: Callable[[Event], bool]) -> "EventList":
return EventList(list(filter(func, self.event_list)))
def with_server(self, *servers):
servers = {normalize_server_name(s) for s in servers}
return self.with_func(lambda e: e.server in servers)
def with_type(self, *event_types):
return self.with_func(lambda e: e.event_type in event_types)
def with_length(self, *event_lengths):
return self.with_func(lambda e: e.event_length in event_lengths)
def with_dungeon_type(self, *dungeon_types):
return self.with_func(lambda e: e.dungeon_type in dungeon_types)
def is_grouped(self):
return self.with_func(lambda e: e.group is not None)
def pending_only(self):
return self.with_func(lambda e: not e.is_started())
def active_only(self):
return self.with_func(lambda e: e.is_started() and not e.is_finished())
def today_only(self, server: str):
server_timezone = SERVER_TIMEZONES[normalize_server_name(server)]
today = datetime.datetime.now(server_timezone).date()
return self.with_func(lambda e: e.open_datetime.astimezone(server_timezone).date() == today)
def __iter__(self):
return iter(self.event_list)
def __bool__(self):
return bool(self.event_list)
| 36.656934 | 107 | 0.666866 | 4,509 | 0.897849 | 0 | 0 | 452 | 0.090004 | 0 | 0 | 474 | 0.094385 |
26c624cf265c7c07d22297b36c515787f4a26b33 | 92 | py | Python | proton_decay_study/config/__init__.py | HEP-DL/proton_decay_study | e7b087a123b893254401f897100f656588bf0c19 | [
"MIT"
] | 2 | 2018-01-02T02:58:50.000Z | 2021-11-17T07:26:25.000Z | proton_decay_study/config/__init__.py | HEP-DL/proton_decay_study | e7b087a123b893254401f897100f656588bf0c19 | [
"MIT"
] | 415 | 2017-04-27T21:16:01.000Z | 2022-03-27T23:01:21.000Z | proton_decay_study/config/__init__.py | HEP-DL/proton_decay_study | e7b087a123b893254401f897100f656588bf0c19 | [
"MIT"
] | 2 | 2018-01-03T18:38:45.000Z | 2019-10-03T19:27:29.000Z |
class Config(object):
"""
Represents configuration of network training
"""
pass
| 11.5 | 48 | 0.663043 | 89 | 0.967391 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.630435 |
26c6baf54f78e9c92b1e52fb48aafcc91b720d02 | 1,409 | py | Python | server/getSert.py | sviridovt/WIE | 9af6d3dff7e774f5e332e6c77eadde815d4c375d | [
"MIT"
] | 1 | 2021-09-03T11:36:02.000Z | 2021-09-03T11:36:02.000Z | server/getSert.py | sviridovt/WIE | 9af6d3dff7e774f5e332e6c77eadde815d4c375d | [
"MIT"
] | null | null | null | server/getSert.py | sviridovt/WIE | 9af6d3dff7e774f5e332e6c77eadde815d4c375d | [
"MIT"
] | 1 | 2021-09-03T11:36:04.000Z | 2021-09-03T11:36:04.000Z | # allows to import RSA lib from different dir
import sys
# inserts path to access RSA encryption lib
# sys.path.insert(0, '../RSAEncryption')
import socket
import json
from libs.communication import sendEncrypted, recvEncrypted, sendData, readData
from libs.RSAKeys import readPrivateKey
from libs.EncryptedSocket import EncryptedSocket
from libs.settings import *
HOST = '127.0.0.1'
PORT = 4444
printDebug = True
SSID = "SecureCanes"
def readData(conn):
packetFile = open("packetText.txt", mode = 'a+')
recvd = 0
while True:
mess = conn.recv(512).decode('utf-8')
if len(mess) < 512:
packetFile.write(mess)
break
recvd += len(mess)
packetFile.write(mess)
# packetFile.close()
#packetFile = open("packetText.txt", mode = 'r')
serverData = packetFile.read(recvd)
return serverData
# sending data
def sendData(conn, data):
dataFile = open("sendData.txt", mode = 'a+')
dataFile.write(data)
while True:
packet = dataFile.read(512)
if len(packet) < 512:
conn.send(packet.encode('utf-8'))
sent += len(packet)
dataFile.close()
break
sent += len(packet)
conn.send(packet.encode('utf-8'))
return sent
def renewCert(pubKey, SSID):
# Encrypted Sockets
s = EncryptedSocket(HOST, PORT)
# send SSID
s.send(SSID)
# receive certificate
cert = s.read()
fl = open(CERT_FILE, 'w+')
fl.write(cert)
s.close()
| 21.029851 | 79 | 0.675656 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 348 | 0.246984 |
26c71f804645b9d738d4394f797c6533de859d14 | 7,085 | py | Python | code/billiard_game_multi_ball.py | ifsheldon/billiard_game | 1ce13d39158734efd76e617bba2bb319d5498c3f | [
"BSD-2-Clause"
] | null | null | null | code/billiard_game_multi_ball.py | ifsheldon/billiard_game | 1ce13d39158734efd76e617bba2bb319d5498c3f | [
"BSD-2-Clause"
] | null | null | null | code/billiard_game_multi_ball.py | ifsheldon/billiard_game | 1ce13d39158734efd76e617bba2bb319d5498c3f | [
"BSD-2-Clause"
] | null | null | null | import taichi as ti
import numpy as np
from functools import partial
from itertools import combinations
from billiard_game_dual_ball import normalize_vector, two_ball_collides, calc_next_pos_and_velocity, \
calc_after_collision_velocity, rectify_positions_in_collision, rectify_positions_and_velocities
# Constants
WHITE = 0xFFFFFF
RED = 0xFF0000
GREEN = 0x00FF00
BLUE = 0x0000FF
# wc for world space x[0.0, ratio], y[0.0, 1.0]
# sc for screen space [0.0, 1.0]^2
# Constant parameters
RESOLUTION = (1230, 750)
RATIO = RESOLUTION[0] / RESOLUTION[1] # x/y
FPS = 60
CUE_BALL_IDX = 0
STICK_LENGTH_SC = 0.1
DRAG_COEFFICIENT = 0.03
G = 9.8
CUE_BALL_MAX_SPEED_WC = 1.0
BALL_PIXEL_RADIUS = 10
HOLE_PIXEL_RADIUS = 15
num_balls = 1
# Derived parameters
ball_radius_wc = BALL_PIXEL_RADIUS / RESOLUTION[1]
hole_radius_wc = HOLE_PIXEL_RADIUS / RESOLUTION[1]
x_begin_wc = 0.0
x_end_wc = RATIO
y_begin_wc = 0.0
y_end_wc = 1.0
def score(hole_center_positions, ball_position):
# Don't care now
diff = hole_center_positions - ball_position.reshape(1, 2)
square_dist = (diff ** 2).sum(axis=-1)
radii_square_sum = (0.8 * ball_radius_wc + hole_radius_wc) ** 2
return np.any(square_dist <= radii_square_sum)
def place_balls_wc(span_wc, offset_wc):
# No need now
ball_pos_wc = np.zeros((num_balls, 2))
for i in range(num_balls):
ball_i_pos_wc = np.random.rand(2) * span_wc + offset_wc
if i != CUE_BALL_IDX:
while two_ball_collides(ball_pos_wc[CUE_BALL_IDX], ball_i_pos_wc, ball_radius_wc):
ball_i_pos_wc = np.random.rand(2) * span_wc + offset_wc
ball_pos_wc[i] = ball_i_pos_wc
return ball_pos_wc
if __name__ == "__main__":
ti.init(ti.cpu)
print("Press A to kick the cue ball")
wc_to_sc_multiplier = np.array([1 / RATIO, 1]) # transform to [0,1]^ screen space
sc_to_wc_multiplier = np.array([RATIO, 1])
virtual_bound_x = np.array([ball_radius_wc, x_end_wc - ball_radius_wc])
virtual_bound_y = np.array([ball_radius_wc, y_end_wc - ball_radius_wc])
dx_wc = x_end_wc / 2.
dy_wc = y_end_wc / 2.
hole_pos_x = np.arange(3) * dx_wc
hole_pos_y = np.arange(3) * dy_wc
hole_pos_x, hole_pos_y = np.meshgrid(hole_pos_x, hole_pos_y)
hole_center_positions_wc = np.stack([hole_pos_x, hole_pos_y], axis=-1).reshape(-1, 2) # (3, 3, 2) -> (9, 2)
hole_center_positions_wc = np.delete(hole_center_positions_wc, 4, axis=0)
hole_center_positions_sc = hole_center_positions_wc * wc_to_sc_multiplier.reshape(1, 2)
ball_velocities_wc = np.zeros((num_balls, 2))
ball_visible = np.ones(num_balls, dtype=bool)
span_wc = np.array([virtual_bound_x[1] - virtual_bound_x[0], virtual_bound_y[1] - virtual_bound_y[0]])
offset_wc = np.array([virtual_bound_x[0], virtual_bound_y[0]])
ball_pos_wc = place_balls_wc(span_wc, offset_wc)
gui = ti.GUI("billiard_game_multi_ball", RESOLUTION)
gui.fps_limit = FPS
delta_t = 1.0 / FPS
boundary_begin_wc = np.array([
[x_begin_wc, y_begin_wc],
[x_begin_wc, y_begin_wc],
[x_end_wc, y_end_wc],
[x_end_wc, y_end_wc]
])
boundary_end_wc = np.array([
[x_end_wc, y_begin_wc],
[x_begin_wc, y_end_wc],
[x_end_wc, y_begin_wc],
[x_begin_wc, y_end_wc]
])
# a convenient partial function of rectify_positions_and_velocities
rectify_pv = partial(rectify_positions_and_velocities,
virtual_bound_x[0], virtual_bound_x[1],
virtual_bound_y[0], virtual_bound_y[1])
ball_pairs = list(combinations(range(num_balls), 2))
ball_color_indices = np.ones(num_balls)
ball_color_indices[CUE_BALL_IDX] = 0
ball_colors = [WHITE, RED]
while gui.running:
gui.clear(GREEN)
hit_ball = gui.get_event(ti.GUI.PRESS) and gui.is_pressed("a")
cue_ball_pos_sc = ball_pos_wc[CUE_BALL_IDX] * wc_to_sc_multiplier
# the current setting is only when all balls are stationary, the mouse is available
if np.allclose((ball_velocities_wc ** 2).sum(-1), 0., rtol=0.001, atol=0.001) and ball_visible[CUE_BALL_IDX]:
rod_dir_sc, length = normalize_vector(gui.get_cursor_pos() - cue_ball_pos_sc)
rod_line = rod_dir_sc * min(STICK_LENGTH_SC, length)
gui.line(cue_ball_pos_sc, cue_ball_pos_sc + rod_line, radius=2)
if hit_ball:
ball_velocities_wc[CUE_BALL_IDX] = (rod_dir_sc * sc_to_wc_multiplier) \
* CUE_BALL_MAX_SPEED_WC * (min(STICK_LENGTH_SC,
length) / STICK_LENGTH_SC) # modify the speed with a multiplier dependent on the distance between mouse and the cue ball
# for i in range(num_balls): # for each ball, if score() returns True, set this ball invisible
# # Not care now
# if score(hole_center_positions_wc, ball_pos_wc[i]):
# ball_visible[i] = False
# ball_velocities_wc[i] = 0.
# No need to care about this in verilog
gui.lines(begin=boundary_begin_wc, end=boundary_end_wc, radius=2)
gui.circles(ball_pos_wc[ball_visible] * wc_to_sc_multiplier.reshape(1, 2),
radius=BALL_PIXEL_RADIUS,
palette=ball_colors,
palette_indices=ball_color_indices[ball_visible])
gui.circles(hole_center_positions_sc, radius=HOLE_PIXEL_RADIUS, color=0)
gui.show()
for i in range(num_balls): # unroll this loop for the two ball case
if not ball_visible[i]:
continue
next_pos_wc, next_velocity_wc = calc_next_pos_and_velocity(ball_pos_wc[i], ball_velocities_wc[i],
delta_t, DRAG_COEFFICIENT, G)
next_pos_wc, next_velocity_wc = rectify_pv(next_pos_wc, next_velocity_wc)
ball_pos_wc[i] = next_pos_wc
ball_velocities_wc[i] = next_velocity_wc
for ball_i, ball_j in ball_pairs: # only one iteration for the two ball case, since we have only one pair
if not ball_visible[ball_i] or not ball_visible[ball_j]:
continue
ball_i_pos_wc = ball_pos_wc[ball_i]
ball_j_pos_wc = ball_pos_wc[ball_j]
if two_ball_collides(ball_i_pos_wc, ball_j_pos_wc, ball_radius_wc):
ball_i_pos_wc, ball_j_pos_wc = rectify_positions_in_collision(ball_i_pos_wc, ball_j_pos_wc,
ball_radius_wc)
ball_i_v_wc = ball_velocities_wc[ball_i]
ball_j_v_wc = ball_velocities_wc[ball_j]
ball_i_v_wc, ball_j_v_wc = calc_after_collision_velocity(ball_i_pos_wc, ball_j_pos_wc,
ball_i_v_wc, ball_j_v_wc)
ball_velocities_wc[ball_i] = ball_i_v_wc
ball_velocities_wc[ball_j] = ball_j_v_wc
| 44.006211 | 203 | 0.651941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 926 | 0.130699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.