max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
somaticseq/utilities/dockered_pipelines/bamSimulator/bamSurgeon/convert_nonStandardBasesInVcfs.py | bioinform/somaticseq | 159 | 12698491 | #!/usr/bin/env python3
import sys, re
for line_i in sys.stdin:
if line_i.startswith('#'):
print(line_i, end='')
else:
item = line_i.rstrip().split('\t')
item[3] = re.sub(r'[^gctanGCTAN,0-9]', 'N', item[3])
item[4] = re.sub(r'[^gctanGCTAN,0-9]', 'N', item[4])
line_out = '\t'.join(item)
print(line_out)
|
src/amuse/community/ph4/binary_hist.py | rknop/amuse | 131 | 12698524 | from pylab import *
import sys
import re
if __name__ == '__main__':
if len(sys.argv) != 3:
print("usage: python binary_hist.py <input filename> <time of histogram>")
sys.exit(1)
else:
fname = sys.argv[1]
time = float(sys.argv[2])
f = open(fname, "r")
inblock = False
EkTs = []
for line in f:
if re.search("%%% time= (\d+\.\d*)", line):
if float(re.search("%%% time= (\d+\.\d*)", line).group(1)) == time:
inblock = True
if inblock and re.search("%%% .*E/kT=(-?\d+\.\d+)", line):
EkTs.append(float(re.search("%%%.*E/kT=(-?\d+\.\d+)",
line).group(1)))
if inblock and re.search("%%% Emul/E", line):
inblock = False
f.close()
if len(EkTs) > 0:
hist(EkTs)
show()
else:
print("No binaries found at time = %f." % time)
|
xmodaler/modeling/layers/positionwise_feedforward.py | cclauss/xmodaler | 830 | 12698527 | """
From original at https://github.com/aimagelab/meshed-memory-transformer/blob/master/models/transformer/utils.py
Original copyright of AImageLab code below, modifications by <NAME>, Copyright 2021.
"""
# Copyright (c) 2019, AImageLab
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ["PositionWiseFeedForward"]
class PositionWiseFeedForward(nn.Module):
'''
Position-wise feed forward layer
'''
def __init__(
self,
*,
d_model: int,
d_ff: int,
dropout: float
):
super(PositionWiseFeedForward, self).__init__()
#self.identity_map_reordering = identity_map_reordering
self.fc1 = nn.Linear(d_model, d_ff)
self.fc2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(p=dropout) if dropout > 0. else None
self.dropout_2 = nn.Dropout(p=dropout) if dropout > 0. else None
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, inputs):
# if self.identity_map_reordering:
# out = self.layer_norm(input)
# out = self.fc2(self.dropout_2(F.relu(self.fc1(out))))
# out = input + self.dropout(torch.relu(out))
#else:
out = F.relu(self.fc1(inputs))
if self.dropout_2:
out = self.dropout_2(out)
out = self.fc2(out)
if self.dropout:
out = self.dropout(out)
out = self.layer_norm(inputs + out)
return out
|
ulid/api/microsecond.py | xkortex/ulid | 303 | 12698535 | <reponame>xkortex/ulid<gh_stars>100-1000
"""
ulid/api/microsecond
~~~~~~~~~~~~~~~~~~~~
Contains the public API of the `ulid` package using the microsecond provider.
"""
from .. import consts, providers, ulid
from . import api
API = api.Api(providers.MICROSECOND)
create = API.create
from_bytes = API.from_bytes
from_int = API.from_int
from_randomness = API.from_randomness
from_str = API.from_str
from_timestamp = API.from_timestamp
from_uuid = API.from_uuid
new = API.new
parse = API.parse
MIN_TIMESTAMP = consts.MIN_TIMESTAMP
MAX_TIMESTAMP = consts.MAX_TIMESTAMP
MIN_RANDOMNESS = consts.MIN_RANDOMNESS
MAX_RANDOMNESS = consts.MAX_RANDOMNESS
MIN_ULID = consts.MIN_ULID
MAX_ULID = consts.MAX_ULID
Timestamp = ulid.Timestamp
Randomness = ulid.Randomness
ULID = ulid.ULID
__all__ = api.ALL
|
supervised-oie-benchmark/oie_readers/propsReader.py | acoli-repo/OpenIE_Stanovsky_Dagan | 117 | 12698550 | from oie_readers.oieReader import OieReader
from oie_readers.extraction import Extraction
class PropSReader(OieReader):
def __init__(self):
self.name = 'PropS'
def read(self, fn):
d = {}
with open(fn) as fin:
for line in fin:
if not line.strip():
continue
data = line.strip().split('\t')
confidence, text, rel = data[:3]
curExtraction = Extraction(pred = rel, sent = text, confidence = float(confidence))
for arg in data[4::2]:
curExtraction.addArg(arg)
d[text] = d.get(text, []) + [curExtraction]
self.oie = d
self.normalizeConfidence()
def normalizeConfidence(self):
''' Normalize confidence to resemble probabilities '''
EPSILON = 1e-3
self.confidences = [extraction.confidence for sent in self.oie for extraction in self.oie[sent]]
maxConfidence = max(self.confidences)
minConfidence = min(self.confidences)
denom = maxConfidence - minConfidence + (2*EPSILON)
for sent, extractions in self.oie.items():
for extraction in extractions:
extraction.confidence = ( (extraction.confidence - minConfidence) + EPSILON) / denom
|
scripts/external_libs/scapy-2.4.3/scapy/contrib/automotive/uds.py | timgates42/trex-core | 956 | 12698577 | #! /usr/bin/env python
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
# scapy.contrib.description = Unified Diagnostic Service (UDS)
# scapy.contrib.status = loads
import struct
from scapy.fields import ByteEnumField, StrField, ConditionalField, \
BitEnumField, BitField, XByteField, FieldListField, \
XShortField, X3BytesField, XIntField, ByteField, \
ShortField, ObservableDict, XShortEnumField, XByteEnumField
from scapy.packet import Packet, bind_layers
from scapy.config import conf
from scapy.error import log_loading
from scapy.utils import PeriodicSenderThread
"""
UDS
"""
try:
if conf.contribs['UDS']['treat-response-pending-as-answer']:
pass
except KeyError:
log_loading.info("Specify \"conf.contribs['UDS'] = "
"{'treat-response-pending-as-answer': True}\" to treat "
"a negative response 'requestCorrectlyReceived-"
"ResponsePending' as answer of a request. \n"
"The default value is False.")
conf.contribs['UDS'] = {'treat-response-pending-as-answer': False}
class UDS(Packet):
services = ObservableDict(
{0x10: 'DiagnosticSessionControl',
0x11: 'ECUReset',
0x14: 'ClearDiagnosticInformation',
0x19: 'ReadDTCInformation',
0x22: 'ReadDataByIdentifier',
0x23: 'ReadMemoryByAddress',
0x24: 'ReadScalingDataByIdentifier',
0x27: 'SecurityAccess',
0x28: 'CommunicationControl',
0x2A: 'ReadDataPeriodicIdentifier',
0x2C: 'DynamicallyDefineDataIdentifier',
0x2E: 'WriteDataByIdentifier',
0x2F: 'InputOutputControlByIdentifier',
0x31: 'RoutineControl',
0x34: 'RequestDownload',
0x35: 'RequestUpload',
0x36: 'TransferData',
0x37: 'RequestTransferExit',
0x3D: 'WriteMemoryByAddress',
0x3E: 'TesterPresent',
0x50: 'DiagnosticSessionControlPositiveResponse',
0x51: 'ECUResetPositiveResponse',
0x54: 'ClearDiagnosticInformationPositiveResponse',
0x59: 'ReadDTCInformationPositiveResponse',
0x62: 'ReadDataByIdentifierPositiveResponse',
0x63: 'ReadMemoryByAddressPositiveResponse',
0x64: 'ReadScalingDataByIdentifierPositiveResponse',
0x67: 'SecurityAccessPositiveResponse',
0x68: 'CommunicationControlPositiveResponse',
0x6A: 'ReadDataPeriodicIdentifierPositiveResponse',
0x6C: 'DynamicallyDefineDataIdentifierPositiveResponse',
0x6E: 'WriteDataByIdentifierPositiveResponse',
0x6F: 'InputOutputControlByIdentifierPositiveResponse',
0x71: 'RoutineControlPositiveResponse',
0x74: 'RequestDownloadPositiveResponse',
0x75: 'RequestUploadPositiveResponse',
0x76: 'TransferDataPositiveResponse',
0x77: 'RequestTransferExitPositiveResponse',
0x7D: 'WriteMemoryByAddressPositiveResponse',
0x7E: 'TesterPresentPositiveResponse',
0x83: 'AccessTimingParameter',
0x84: 'SecuredDataTransmission',
0x85: 'ControlDTCSetting',
0x86: 'ResponseOnEvent',
0x87: 'LinkControl',
0xC3: 'AccessTimingParameterPositiveResponse',
0xC4: 'SecuredDataTransmissionPositiveResponse',
0xC5: 'ControlDTCSettingPositiveResponse',
0xC6: 'ResponseOnEventPositiveResponse',
0xC7: 'LinkControlPositiveResponse',
0x7f: 'NegativeResponse'})
name = 'UDS'
fields_desc = [
XByteEnumField('service', 0, services)
]
def answers(self, other):
"""DEV: true if self is an answer from other"""
if other.__class__ == self.__class__:
return (other.service + 0x40) == self.service or \
(self.service == 0x7f and
self.requestServiceId == other.service and
(self.negativeResponseCode != 0x78 or
conf.contribs['UDS']['treat-response-pending-as-answer']))
return 0
def hashret(self):
if self.service == 0x7f:
return struct.pack('B', self.requestServiceId)
return struct.pack('B', self.service & ~0x40)
# ########################DSC###################################
class UDS_DSC(Packet):
diagnosticSessionTypes = {
0x00: 'ISOSAEReserved',
0x01: 'defaultSession',
0x02: 'programmingSession',
0x03: 'extendedDiagnosticSession',
0x04: 'safetySystemDiagnosticSession',
0x7F: 'ISOSAEReserved'}
name = 'DiagnosticSessionControl'
fields_desc = [
ByteEnumField('diagnosticSessionType', 0, diagnosticSessionTypes)
]
bind_layers(UDS, UDS_DSC, service=0x10)
class UDS_DSCPR(Packet):
name = 'DiagnosticSessionControlPositiveResponse'
fields_desc = [
ByteEnumField('diagnosticSessionType', 0,
UDS_DSC.diagnosticSessionTypes),
StrField('sessionParameterRecord', B"")
]
bind_layers(UDS, UDS_DSCPR, service=0x50)
# #########################ER###################################
class UDS_ER(Packet):
resetTypes = {
0x00: 'ISOSAEReserved',
0x01: 'hardReset',
0x02: 'keyOffOnReset',
0x03: 'softReset',
0x04: 'enableRapidPowerShutDown',
0x05: 'disableRapidPowerShutDown',
0x7F: 'ISOSAEReserved'}
name = 'ECUReset'
fields_desc = [
ByteEnumField('resetType', 0, resetTypes)
]
bind_layers(UDS, UDS_ER, service=0x11)
class UDS_ERPR(Packet):
name = 'ECUResetPositiveResponse'
fields_desc = [
ByteEnumField('resetType', 0, UDS_ER.resetTypes),
ConditionalField(ByteField('powerDownTime', 0),
lambda pkt: pkt.resetType == 0x04)
]
bind_layers(UDS, UDS_ERPR, service=0x51)
# #########################SA###################################
class UDS_SA(Packet):
name = 'SecurityAccess'
fields_desc = [
ByteField('securityAccessType', 0),
ConditionalField(StrField('securityAccessDataRecord', B""),
lambda pkt: pkt.securityAccessType % 2 == 1),
ConditionalField(StrField('securityKey', B""),
lambda pkt: pkt.securityAccessType % 2 == 0)
]
bind_layers(UDS, UDS_SA, service=0x27)
class UDS_SAPR(Packet):
name = 'SecurityAccessPositiveResponse'
fields_desc = [
ByteField('securityAccessType', 0),
ConditionalField(StrField('securitySeed', B""),
lambda pkt: pkt.securityAccessType % 2 == 1),
]
bind_layers(UDS, UDS_SAPR, service=0x67)
# #########################CC###################################
class UDS_CC(Packet):
controlTypes = {
0x00: 'enableRxAndTx',
0x01: 'enableRxAndDisableTx',
0x02: 'disableRxAndEnableTx',
0x03: 'disableRxAndTx'
}
name = 'CommunicationControl'
fields_desc = [
ByteEnumField('controlType', 0, controlTypes),
BitEnumField('communicationType0', 0, 2,
{0: 'ISOSAEReserved',
1: 'normalCommunicationMessages',
2: 'networkManagmentCommunicationMessages',
3: 'networkManagmentCommunicationMessages and '
'normalCommunicationMessages'}),
BitField('communicationType1', 0, 2),
BitEnumField('communicationType2', 0, 4,
{0: 'Disable/Enable specified communication Type',
1: 'Disable/Enable specific subnet',
2: 'Disable/Enable specific subnet',
3: 'Disable/Enable specific subnet',
4: 'Disable/Enable specific subnet',
5: 'Disable/Enable specific subnet',
6: 'Disable/Enable specific subnet',
7: 'Disable/Enable specific subnet',
8: 'Disable/Enable specific subnet',
9: 'Disable/Enable specific subnet',
10: 'Disable/Enable specific subnet',
11: 'Disable/Enable specific subnet',
12: 'Disable/Enable specific subnet',
13: 'Disable/Enable specific subnet',
14: 'Disable/Enable specific subnet',
15: 'Disable/Enable network'})
]
bind_layers(UDS, UDS_CC, service=0x28)
class UDS_CCPR(Packet):
name = 'CommunicationControlPositiveResponse'
fields_desc = [
ByteEnumField('controlType', 0, UDS_CC.controlTypes)
]
bind_layers(UDS, UDS_CCPR, service=0x68)
# #########################TP###################################
class UDS_TP(Packet):
name = 'TesterPresent'
fields_desc = [
ByteField('subFunction', 0)
]
bind_layers(UDS, UDS_TP, service=0x3E)
class UDS_TPPR(Packet):
name = 'TesterPresentPositiveResponse'
fields_desc = [
ByteField('zeroSubFunction', 0)
]
bind_layers(UDS, UDS_TPPR, service=0x7E)
# #########################ATP###################################
class UDS_ATP(Packet):
timingParameterAccessTypes = {
0: 'ISOSAEReserved',
1: 'readExtendedTimingParameterSet',
2: 'setTimingParametersToDefaultValues',
3: 'readCurrentlyActiveTimingParameters',
4: 'setTimingParametersToGivenValues'
}
name = 'AccessTimingParameter'
fields_desc = [
ByteEnumField('timingParameterAccessType', 0,
timingParameterAccessTypes),
ConditionalField(StrField('timingParameterRequestRecord', B""),
lambda pkt: pkt.timingParameterAccessType == 0x4)
]
bind_layers(UDS, UDS_ATP, service=0x83)
class UDS_ATPPR(Packet):
name = 'AccessTimingParameterPositiveResponse'
fields_desc = [
ByteEnumField('timingParameterAccessType', 0,
UDS_ATP.timingParameterAccessTypes),
ConditionalField(StrField('timingParameterResponseRecord', B""),
lambda pkt: pkt.timingParameterAccessType == 0x3)
]
bind_layers(UDS, UDS_ATPPR, service=0xC3)
# #########################SDT###################################
class UDS_SDT(Packet):
name = 'SecuredDataTransmission'
fields_desc = [
StrField('securityDataRequestRecord', B"")
]
bind_layers(UDS, UDS_SDT, service=0x84)
class UDS_SDTPR(Packet):
name = 'SecuredDataTransmissionPositiveResponse'
fields_desc = [
StrField('securityDataResponseRecord', B"")
]
bind_layers(UDS, UDS_SDTPR, service=0xC4)
# #########################CDTCS###################################
class UDS_CDTCS(Packet):
DTCSettingTypes = {
0: 'ISOSAEReserved',
1: 'on',
2: 'off'
}
name = 'ControlDTCSetting'
fields_desc = [
ByteEnumField('DTCSettingType', 0, DTCSettingTypes),
StrField('DTCSettingControlOptionRecord', B"")
]
bind_layers(UDS, UDS_CDTCS, service=0x85)
class UDS_CDTCSPR(Packet):
name = 'ControlDTCSettingPositiveResponse'
fields_desc = [
ByteEnumField('DTCSettingType', 0, UDS_CDTCS.DTCSettingTypes)
]
bind_layers(UDS, UDS_CDTCSPR, service=0xC5)
# #########################ROE###################################
# TODO: improve this protocol implementation
class UDS_ROE(Packet):
eventTypes = {
0: 'doNotStoreEvent',
1: 'storeEvent'
}
name = 'ResponseOnEvent'
fields_desc = [
ByteEnumField('eventType', 0, eventTypes),
ByteField('eventWindowTime', 0),
StrField('eventTypeRecord', B"")
]
bind_layers(UDS, UDS_ROE, service=0x86)
class UDS_ROEPR(Packet):
name = 'ResponseOnEventPositiveResponse'
fields_desc = [
ByteEnumField('eventType', 0, UDS_ROE.eventTypes),
ByteField('numberOfIdentifiedEvents', 0),
ByteField('eventWindowTime', 0),
StrField('eventTypeRecord', B"")
]
bind_layers(UDS, UDS_ROEPR, service=0xC6)
# #########################LC###################################
class UDS_LC(Packet):
linkControlTypes = {
0: 'ISOSAEReserved',
1: 'verifyBaudrateTransitionWithFixedBaudrate',
2: 'verifyBaudrateTransitionWithSpecificBaudrate',
3: 'transitionBaudrate'
}
name = 'LinkControl'
fields_desc = [
ByteEnumField('linkControlType', 0, linkControlTypes),
ConditionalField(ByteField('baudrateIdentifier', 0),
lambda pkt: pkt.linkControlType == 0x1),
ConditionalField(ByteField('baudrateHighByte', 0),
lambda pkt: pkt.linkControlType == 0x2),
ConditionalField(ByteField('baudrateMiddleByte', 0),
lambda pkt: pkt.linkControlType == 0x2),
ConditionalField(ByteField('baudrateLowByte', 0),
lambda pkt: pkt.linkControlType == 0x2)
]
bind_layers(UDS, UDS_LC, service=0x87)
class UDS_LCPR(Packet):
name = 'LinkControlPositiveResponse'
fields_desc = [
ByteEnumField('linkControlType', 0, UDS_LC.linkControlTypes)
]
bind_layers(UDS, UDS_LCPR, service=0xC7)
# #########################RDBI###################################
class UDS_RDBI(Packet):
dataIdentifiers = ObservableDict()
name = 'ReadDataByIdentifier'
fields_desc = [
FieldListField("identifiers", [],
XShortEnumField('dataIdentifier', 0,
dataIdentifiers))
]
bind_layers(UDS, UDS_RDBI, service=0x22)
class UDS_RDBIPR(Packet):
name = 'ReadDataByIdentifierPositiveResponse'
fields_desc = [
XShortEnumField('dataIdentifier', 0,
UDS_RDBI.dataIdentifiers),
]
bind_layers(UDS, UDS_RDBIPR, service=0x62)
# #########################RMBA###################################
class UDS_RMBA(Packet):
name = 'ReadMemoryByAddress'
fields_desc = [
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4),
]
bind_layers(UDS, UDS_RMBA, service=0x23)
class UDS_RMBAPR(Packet):
name = 'ReadMemoryByAddressPositiveResponse'
fields_desc = [
StrField('dataRecord', None, fmt="B")
]
bind_layers(UDS, UDS_RMBAPR, service=0x63)
# #########################RSDBI###################################
class UDS_RSDBI(Packet):
name = 'ReadScalingDataByIdentifier'
fields_desc = [
XShortField('dataIdentifier', 0)
]
bind_layers(UDS, UDS_RSDBI, service=0x24)
# TODO: Implement correct scaling here, instead of using just the dataRecord
class UDS_RSDBIPR(Packet):
name = 'ReadScalingDataByIdentifierPositiveResponse'
fields_desc = [
XShortField('dataIdentifier', 0),
ByteField('scalingByte', 0),
StrField('dataRecord', None, fmt="B")
]
bind_layers(UDS, UDS_RSDBIPR, service=0x64)
# #########################RDBPI###################################
class UDS_RDBPI(Packet):
transmissionModes = {
0: 'ISOSAEReserved',
1: 'sendAtSlowRate',
2: 'sendAtMediumRate',
3: 'sendAtFastRate',
4: 'stopSending'
}
name = 'ReadDataByPeriodicIdentifier'
fields_desc = [
ByteEnumField('transmissionMode', 0, transmissionModes),
ByteField('periodicDataIdentifier', 0),
StrField('furtherPeriodicDataIdentifier', 0, fmt="B")
]
bind_layers(UDS, UDS_RDBPI, service=0x2A)
# TODO: Implement correct scaling here, instead of using just the dataRecord
class UDS_RDBPIPR(Packet):
name = 'ReadDataByPeriodicIdentifierPositiveResponse'
fields_desc = [
ByteField('periodicDataIdentifier', 0),
StrField('dataRecord', None, fmt="B")
]
bind_layers(UDS, UDS_RDBPIPR, service=0x6A)
# #########################DDDI###################################
# TODO: Implement correct interpretation here,
# instead of using just the dataRecord
class UDS_DDDI(Packet):
name = 'DynamicallyDefineDataIdentifier'
fields_desc = [
ByteField('definitionMode', 0),
StrField('dataRecord', 0, fmt="B")
]
bind_layers(UDS, UDS_DDDI, service=0x2C)
class UDS_DDDIPR(Packet):
name = 'DynamicallyDefineDataIdentifierPositiveResponse'
fields_desc = [
ByteField('definitionMode', 0),
XShortField('dynamicallyDefinedDataIdentifier', 0)
]
bind_layers(UDS, UDS_DDDIPR, service=0x6C)
# #########################WDBI###################################
class UDS_WDBI(Packet):
name = 'WriteDataByIdentifier'
fields_desc = [
XShortEnumField('dataIdentifier', 0,
UDS_RDBI.dataIdentifiers)
]
bind_layers(UDS, UDS_WDBI, service=0x2E)
class UDS_WDBIPR(Packet):
name = 'WriteDataByIdentifierPositiveResponse'
fields_desc = [
XShortEnumField('dataIdentifier', 0,
UDS_RDBI.dataIdentifiers),
]
bind_layers(UDS, UDS_WDBIPR, service=0x6E)
# #########################WMBA###################################
class UDS_WMBA(Packet):
name = 'WriteMemoryByAddress'
fields_desc = [
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4),
StrField('dataRecord', b'\x00', fmt="B"),
]
bind_layers(UDS, UDS_WMBA, service=0x3D)
class UDS_WMBAPR(Packet):
name = 'WriteMemoryByAddressPositiveResponse'
fields_desc = [
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4)
]
bind_layers(UDS, UDS_WMBAPR, service=0x7D)
# #########################CDTCI###################################
class UDS_CDTCI(Packet):
name = 'ClearDiagnosticInformation'
fields_desc = [
ByteField('groupOfDTCHighByte', 0),
ByteField('groupOfDTCMiddleByte', 0),
ByteField('groupOfDTCLowByte', 0),
]
bind_layers(UDS, UDS_CDTCI, service=0x14)
# #########################RDTCI###################################
class UDS_RDTCI(Packet):
reportTypes = {
0: 'ISOSAEReserved',
1: 'reportNumberOfDTCByStatusMask',
2: 'reportDTCByStatusMask',
3: 'reportDTCSnapshotIdentification',
4: 'reportDTCSnapshotRecordByDTCNumber',
5: 'reportDTCSnapshotRecordByRecordNumber',
6: 'reportDTCExtendedDataRecordByDTCNumber',
7: 'reportNumberOfDTCBySeverityMaskRecord',
8: 'reportDTCBySeverityMaskRecord',
9: 'reportSeverityInformationOfDTC',
10: 'reportSupportedDTC',
11: 'reportFirstTestFailedDTC',
12: 'reportFirstConfirmedDTC',
13: 'reportMostRecentTestFailedDTC',
14: 'reportMostRecentConfirmedDTC',
15: 'reportMirrorMemoryDTCByStatusMask',
16: 'reportMirrorMemoryDTCExtendedDataRecordByDTCNumber',
17: 'reportNumberOfMirrorMemoryDTCByStatusMask',
18: 'reportNumberOfEmissionsRelatedOBDDTCByStatusMask',
19: 'reportEmissionsRelatedOBDDTCByStatusMask',
20: 'reportDTCFaultDetectionCounter',
21: 'reportDTCWithPermanentStatus'
}
name = 'ReadDTCInformation'
fields_desc = [
ByteEnumField('reportType', 0, reportTypes),
ConditionalField(XByteField('DTCStatusMask', 0),
lambda pkt: pkt.reportType in [0x01, 0x02, 0x0f,
0x11, 0x12, 0x13]),
ConditionalField(ByteField('DTCHighByte', 0),
lambda pkt: pkt.reportType in [0x3, 0x4, 0x6,
0x10, 0x09]),
ConditionalField(ByteField('DTCMiddleByte', 0),
lambda pkt: pkt.reportType in [0x3, 0x4, 0x6,
0x10, 0x09]),
ConditionalField(ByteField('DTCLowByte', 0),
lambda pkt: pkt.reportType in [0x3, 0x4, 0x6,
0x10, 0x09]),
ConditionalField(ByteField('DTCSnapshotRecordNumber', 0),
lambda pkt: pkt.reportType in [0x3, 0x4, 0x5]),
ConditionalField(ByteField('DTCExtendedDataRecordNumber', 0),
lambda pkt: pkt.reportType in [0x6, 0x10]),
ConditionalField(ByteField('DTCSeverityMask', 0),
lambda pkt: pkt.reportType in [0x07, 0x08]),
ConditionalField(ByteField('DTCStatusMask', 0),
lambda pkt: pkt.reportType in [0x07, 0x08]),
]
bind_layers(UDS, UDS_RDTCI, service=0x19)
class UDS_RDTCIPR(Packet):
name = 'ReadDTCInformationPositiveResponse'
fields_desc = [
ByteEnumField('reportType', 0, UDS_RDTCI.reportTypes),
ConditionalField(XByteField('DTCStatusAvailabilityMask', 0),
lambda pkt: pkt.reportType in [0x01, 0x07, 0x11,
0x12, 0x02, 0x0A,
0x0B, 0x0C, 0x0D,
0x0E, 0x0F, 0x13,
0x15]),
ConditionalField(ByteEnumField('DTCFormatIdentifier', 0,
{0: 'ISO15031-6DTCFormat',
1: 'UDS-1DTCFormat',
2: 'SAEJ1939-73DTCFormat',
3: 'ISO11992-4DTCFormat'}),
lambda pkt: pkt.reportType in [0x01, 0x07,
0x11, 0x12]),
ConditionalField(ShortField('DTCCount', 0),
lambda pkt: pkt.reportType in [0x01, 0x07,
0x11, 0x12]),
ConditionalField(StrField('DTCAndStatusRecord', 0),
lambda pkt: pkt.reportType in [0x02, 0x0A, 0x0B,
0x0C, 0x0D, 0x0E,
0x0F, 0x13, 0x15]),
ConditionalField(StrField('dataRecord', 0),
lambda pkt: pkt.reportType in [0x03, 0x04, 0x05,
0x06, 0x08, 0x09,
0x10, 0x14])
]
bind_layers(UDS, UDS_RDTCIPR, service=0x59)
# #########################RC###################################
class UDS_RC(Packet):
routineControlTypes = {
0: 'ISOSAEReserved',
1: 'startRoutine',
2: 'stopRoutine',
3: 'requestRoutineResults'
}
name = 'RoutineControl'
fields_desc = [
ByteEnumField('routineControlType', 0, routineControlTypes),
XShortField('routineIdentifier', 0),
StrField('routineControlOptionRecord', 0, fmt="B"),
]
bind_layers(UDS, UDS_RC, service=0x31)
class UDS_RCPR(Packet):
name = 'RoutineControlPositiveResponse'
fields_desc = [
ByteEnumField('routineControlType', 0,
UDS_RC.routineControlTypes),
XShortField('routineIdentifier', 0),
StrField('routineStatusRecord', 0, fmt="B"),
]
bind_layers(UDS, UDS_RCPR, service=0x71)
# #########################RD###################################
class UDS_RD(Packet):
dataFormatIdentifiers = {
0: 'noCompressionNoEncryption'
}
name = 'RequestDownload'
fields_desc = [
ByteEnumField('dataFormatIdentifier', 0, dataFormatIdentifiers),
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4)
]
bind_layers(UDS, UDS_RD, service=0x34)
class UDS_RDPR(Packet):
name = 'RequestDownloadPositiveResponse'
fields_desc = [
ByteEnumField('routineControlType', 0,
UDS_RC.routineControlTypes),
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
StrField('maxNumberOfBlockLength', 0, fmt="B"),
]
bind_layers(UDS, UDS_RDPR, service=0x74)
# #########################RU###################################
class UDS_RU(Packet):
name = 'RequestUpload'
fields_desc = [
ByteEnumField('dataFormatIdentifier', 0,
UDS_RD.dataFormatIdentifiers),
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4)
]
bind_layers(UDS, UDS_RU, service=0x35)
class UDS_RUPR(Packet):
name = 'RequestUploadPositiveResponse'
fields_desc = [
ByteEnumField('routineControlType', 0,
UDS_RC.routineControlTypes),
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
StrField('maxNumberOfBlockLength', 0, fmt="B"),
]
bind_layers(UDS, UDS_RUPR, service=0x75)
# #########################TD###################################
class UDS_TD(Packet):
name = 'TransferData'
fields_desc = [
ByteField('blockSequenceCounter', 0),
StrField('transferRequestParameterRecord', 0, fmt="B")
]
bind_layers(UDS, UDS_TD, service=0x36)
class UDS_TDPR(Packet):
name = 'TransferDataPositiveResponse'
fields_desc = [
ByteField('blockSequenceCounter', 0),
StrField('transferResponseParameterRecord', 0, fmt="B")
]
bind_layers(UDS, UDS_TDPR, service=0x76)
# #########################RTE###################################
class UDS_RTE(Packet):
name = 'RequestTransferExit'
fields_desc = [
StrField('transferRequestParameterRecord', 0, fmt="B")
]
bind_layers(UDS, UDS_RTE, service=0x37)
class UDS_RTEPR(Packet):
name = 'RequestTransferExitPositiveResponse'
fields_desc = [
StrField('transferResponseParameterRecord', 0, fmt="B")
]
bind_layers(UDS, UDS_RTEPR, service=0x77)
# #########################IOCBI###################################
class UDS_IOCBI(Packet):
name = 'InputOutputControlByIdentifier'
fields_desc = [
XShortField('dataIdentifier', 0),
ByteField('controlOptionRecord', 0),
StrField('controlEnableMaskRecord', 0, fmt="B")
]
bind_layers(UDS, UDS_IOCBI, service=0x2F)
class UDS_IOCBIPR(Packet):
name = 'InputOutputControlByIdentifierPositiveResponse'
fields_desc = [
XShortField('dataIdentifier', 0),
StrField('controlStatusRecord', 0, fmt="B")
]
bind_layers(UDS, UDS_IOCBIPR, service=0x6F)
# #########################NRC###################################
class UDS_NRC(Packet):
negativeResponseCodes = {
0x00: 'positiveResponse',
0x10: 'generalReject',
0x11: 'serviceNotSupported',
0x12: 'subFunctionNotSupported',
0x13: 'incorrectMessageLengthOrInvalidFormat',
0x14: 'responseTooLong',
0x20: 'ISOSAEReserved',
0x21: 'busyRepeatRequest',
0x22: 'conditionsNotCorrect',
0x23: 'ISOSAEReserved',
0x24: 'requestSequenceError',
0x25: 'noResponseFromSubnetComponent',
0x26: 'failurePreventsExecutionOfRequestedAction',
0x31: 'requestOutOfRange',
0x33: 'securityAccessDenied',
0x35: 'invalidKey',
0x36: 'exceedNumberOfAttempts',
0x37: 'requiredTimeDelayNotExpired',
0x70: 'uploadDownloadNotAccepted',
0x71: 'transferDataSuspended',
0x72: 'generalProgrammingFailure',
0x73: 'wrongBlockSequenceCounter',
0x78: 'requestCorrectlyReceived-ResponsePending',
0x7E: 'subFunctionNotSupportedInActiveSession',
0x7F: 'serviceNotSupportedInActiveSession',
0x80: 'ISOSAEReserved',
0x81: 'rpmTooHigh',
0x82: 'rpmTooLow',
0x83: 'engineIsRunning',
0x84: 'engineIsNotRunning',
0x85: 'engineRunTimeTooLow',
0x86: 'temperatureTooHigh',
0x87: 'temperatureTooLow',
0x88: 'vehicleSpeedTooHigh',
0x89: 'vehicleSpeedTooLow',
0x8a: 'throttle/PedalTooHigh',
0x8b: 'throttle/PedalTooLow',
0x8c: 'transmissionRangeNotInNeutral',
0x8d: 'transmissionRangeNotInGear',
0x8e: 'ISOSAEReserved',
0x8f: 'brakeSwitch(es)NotClosed',
0x90: 'shifterLeverNotInPark',
0x91: 'torqueConverterClutchLocked',
0x92: 'voltageTooHigh',
0x93: 'voltageTooLow',
}
name = 'NegativeResponseCode'
fields_desc = [
XByteEnumField('requestServiceId', 0, UDS.services),
ByteEnumField('negativeResponseCode', 0, negativeResponseCodes)
]
bind_layers(UDS, UDS_NRC, service=0x7f)
# ##################################################################
# ######################## UTILS ###################################
# ##################################################################
class UDS_TesterPresentSender(PeriodicSenderThread):
def __init__(self, sock, pkt=UDS() / UDS_TP(), interval=2):
""" Thread to send TesterPresent messages packets periodically
Args:
sock: socket where packet is sent periodically
pkt: packet to send
interval: interval between two packets
"""
PeriodicSenderThread.__init__(self, sock, pkt, interval)
|
venv/Lib/site-packages/statsmodels/tsa/interp/tests/test_denton.py | EkremBayar/bayar | 6,931 | 12698598 | <reponame>EkremBayar/bayar
import numpy as np
from statsmodels.tsa.interp import dentonm
def test_denton_quarterly():
# Data and results taken from IMF paper
indicator = np.array([98.2, 100.8, 102.2, 100.8, 99.0, 101.6,
102.7, 101.5, 100.5, 103.0, 103.5, 101.5])
benchmark = np.array([4000.,4161.4])
x_imf = dentonm(indicator, benchmark, freq="aq")
imf_stata = np.array([969.8, 998.4, 1018.3, 1013.4, 1007.2, 1042.9,
1060.3, 1051.0, 1040.6, 1066.5, 1071.7, 1051.0])
np.testing.assert_almost_equal(imf_stata, x_imf, 1)
def test_denton_quarterly2():
# Test denton vs stata. Higher precision than other test.
zQ = np.array([50,100,150,100] * 5)
Y = np.array([500,400,300,400,500])
x_denton = dentonm(zQ, Y, freq="aq")
x_stata = np.array([64.334796,127.80616,187.82379,120.03526,56.563894,
105.97568,147.50144,89.958987,40.547201,74.445963,
108.34473,76.66211,42.763347,94.14664,153.41596,
109.67405,58.290761,122.62556,190.41409,128.66959])
np.testing.assert_almost_equal(x_denton, x_stata, 5)
if __name__ == "__main__":
import pytest
pytest.main([__file__, '-vvs', '-x', '--pdb'])
|
unit_testing_course/lesson2/task2/comparison_assertions.py | behzod/pycharm-courses | 213 | 12698605 | <gh_stars>100-1000
import random
import unittest
from tested_code import random_not_42, find_foo, \
random_float_between_inclusive, random_float_between_noninclusive
class TestRandomNot42(unittest.TestCase):
def test_many_values(self):
"""call the function 100 times and make sure the result isn't 42"""
for n_attempt in range(100):
value = random_not_42()
self.assertNotEqual(value, 42)
class TestFindFoo(unittest.TestCase):
"""tests for the find_foo() function
find_foo(s) returns an object if "foo" is a sub-string of s,
and None otherwise.
"""
# valid_names = [
# 'foo',
# 'Bar',
# 'foorBar',
# 'foo_bar',
# '_fooBar',
# 'foo1',
# 'foo_',
# ]
#
# invalid_names = [
# '1foo',
# 'foo-bar',
# '$foo',
# 'foo bar',
# 'foo+bar4ever',
# ]
strings_with_foo = [
'foo',
'aaa foo bbb',
'aaa foo',
'foo bbb',
'no foo for you, come back oen year!'
]
strings_without_foo = [
'boo',
'aaa bbb',
'four',
]
def test_identical(self):
"""check that find_foo finds 'foo' in 'foo'"""
self.assertIsNotNone(find_foo('foo'))
def test_strings_with_foo(self):
"""check that find_foo finds 'foo' in all of the strings with 'foo'"""
for s in self.strings_with_foo:
self.assertIsNotNone(find_foo(s))
def test_strings_without_foo(self):
"""check that find_foo finds 'foo' in all of the strings with 'foo'"""
for s in self.strings_without_foo:
self.assertIsNone(find_foo(s))
class TestRandomFloatBetweenInclusive(unittest.TestCase):
def test_random_values(self):
for i in range(100):
start = random.random()
end = random.random()
if start > end:
start, end = end, start
value = random_float_between_inclusive(start, end)
self.assertGreaterEqual(value, start)
self.assertLessEqual(value, end)
class TestRandomFloatBetweenNoninclusive(unittest.TestCase):
def test_random_values(self):
for i in range(100):
start = random.random()
end = random.random()
if start > end:
start, end = end, start
value = random_float_between_noninclusive(start, end)
self.assertGreater(value, start)
self.assertLess(value, end)
|
tests/test_reactjs.py | Dhandarah/dukpy | 363 | 12698636 | # -*- coding: utf-8 -*-
import unittest
import dukpy
class TestReactJS(unittest.TestCase):
def test_hello_world(self):
jsx = dukpy.jsx_compile('var react_hello = <h1>Hello, world!</h1>;')
jsi = dukpy.JSInterpreter()
result = jsi.evaljs([
'''
var React = require('react/react'),
ReactDOM = require('react/react-dom-server');
''',
jsx,
'ReactDOM.renderToStaticMarkup(react_hello, null);'
])
assert result == '<h1>Hello, world!</h1>', res
def test_jsx_mixed(self):
code = '''
var React = require('react/react'),
ReactDOM = require('react/react-dom-server');
ReactDOM.renderToStaticMarkup(<h1>Hello, world!</h1>, null);
'''
jsx = dukpy.jsx_compile(code)
res = dukpy.evaljs(jsx)
assert res == '<h1>Hello, world!</h1>', res
def test_react_binding(self):
code = '''
var React = require('react/react'),
ReactDOM = require('react/react-dom-server');
var HelloWorld = React.createClass({
render: function() {
return (
<div className="helloworld">
Hello {this.props.data.name}
</div>
);
}
});
ReactDOM.renderToStaticMarkup(<HelloWorld data={dukpy.data}/>, null);
'''
jsx = dukpy.jsx_compile(code)
res = dukpy.evaljs(jsx, data={'id': 1, 'name': "Alessandro"})
assert res == '<div class="helloworld">Hello Alessandro</div>', res
def test_jsx6(self):
code = '''
import React from 'react/react';
var ReactDOM = require('react/react-dom-server');
class HelloWorld extends React.Component {
render() {
return (
<div className="helloworld">
Hello {this.props.data.name}
</div>
);
}
}
ReactDOM.renderToStaticMarkup(<HelloWorld data={dukpy.data}/>, null);
'''
jsx = dukpy.jsx_compile(code)
res = dukpy.evaljs(jsx, data={'id': 1, 'name': "Alessandro"})
assert res == '<div class="helloworld">Hello Alessandro</div>', res
|
src/lib/error_logger.py | chrismayemba/covid-19-open-data | 430 | 12698639 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import logging
import os
from functools import lru_cache
from typing import Callable
from pandas import Series
from pandas._libs.missing import NAType
# Based on recipe for structured logging
# https://docs.python.org/3/howto/logging-cookbook.html#implementing-structured-logging
class LogEncoder(json.JSONEncoder):
# pylint: disable=method-hidden
def default(self, o):
if isinstance(o, set):
return tuple(o)
elif isinstance(o, str):
return o.encode("unicode_escape").decode("ascii")
elif isinstance(o, Series):
return o.to_dict()
elif isinstance(o, NAType):
return None
elif isinstance(o, Exception):
return f"{o.__class__.__name__}: {str(o)}"
return super(LogEncoder, self).default(o)
class StructuredMessage:
def __init__(self, message, **kwargs):
self._kwargs = kwargs
self._kwargs["message"] = message
@lru_cache()
def __str__(self):
return LogEncoder().encode(self._kwargs)
class ErrorLogger:
"""
Simple class to be inherited by other classes to add error logging functions.
"""
name: str
""" Name of the logger, defaults to the class name. """
logger: logging.Logger
""" Instance of logger which will be used. Each ErrorLogger instance has its own Logger. """
def __init__(self, name: str = None):
# Default to the classname
self.name = name or self.__class__.__name__
# Create an instance of logger
self.logger = logging.getLogger(self.name)
# Read logging level from env variable, default to INFO
level_name = os.getenv("LOG_LEVEL") or "INFO"
self.logger.setLevel(getattr(logging, level_name, logging.INFO))
# Only add a handler if it does not already have one
if not self.logger.hasHandlers():
# Configure the handler to use our preferred logging format
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(message)s"))
self.logger.addHandler(handler)
self.log_debug(f"Initialized logger {self.name} with level {level_name}")
def timestamp(self) -> str:
return datetime.datetime.now().isoformat()[:24]
def _log_msg(self, log_func: Callable, msg: str, **kwargs) -> None:
log_func(
StructuredMessage(
msg,
logname=self.name,
timestamp=self.timestamp(),
# TODO: consider whether we should keep classname or if logname is sufficient
classname=self.__class__.__name__,
loglevel=log_func.__name__,
**kwargs,
)
)
def log_error(self, msg: str, **kwargs) -> None:
self._log_msg(self.logger.error, msg, **kwargs)
def log_warning(self, msg: str, **kwargs) -> None:
self._log_msg(self.logger.warning, msg, **kwargs)
def log_info(self, msg: str, **kwargs) -> None:
self._log_msg(self.logger.info, msg, **kwargs)
def log_debug(self, msg: str, **kwargs) -> None:
self._log_msg(self.logger.debug, msg, **kwargs)
|
tests/unit/systems/nfs/server_test.py | gamechanger/dusty | 421 | 12698719 | <gh_stars>100-1000
import os
import tempfile
from mock import Mock, patch
from dusty.systems.nfs import server
from dusty import constants
from ....testcases import DustyTestCase
class TestNFSServer(DustyTestCase):
def setUp(self):
super(TestNFSServer, self).setUp()
def tearDown(self):
super(TestNFSServer, self).tearDown()
@patch('dusty.systems.config_file.get_dusty_config_section')
def test_get_current_exports(self, fake_get_dusty_config_section):
fake_get_dusty_config_section.return_value = 'export numba 1\n/private/etc/some/repo 192.168.59.103 -alldirs -maproot=0:0\n'
expected_current_exports = set(['export numba 1\n', '/private/etc/some/repo 192.168.59.103 -alldirs -maproot=0:0\n'])
self.assertEqual(expected_current_exports, server._get_current_exports())
def test_maproot_for_repo(self):
fake_repo = Mock()
fake_repo.local_path = tempfile.mkdtemp()
expected_maproot = '{}:{}'.format(os.stat(fake_repo.local_path).st_uid, os.stat(fake_repo.local_path).st_gid)
self.assertEqual(expected_maproot, server._maproot_for_repo(fake_repo))
def test_write_exports_config(self):
exports_set = set(['export1\n', 'export2\n'])
constants.EXPORTS_PATH = tempfile.mkstemp()[1]
|
finding cycle in linked list/cycle.py | love-ode/cs-algorithms | 239 | 12698739 | class Node:
# Constructor to initialize the node object
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
# Function to initialize head
def __init__(self):
self.head = None
# Function to insert a new node at the beginning
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
# Utility function to prit the linked LinkedList
def printList(self):
temp = self.head
while(temp):
print (temp.data),
temp = temp.next
def detectLoop(self):
slow_p = self.head
fast_p = self.head
while(slow_p and fast_p and fast_p.next):
slow_p = slow_p.next
fast_p = fast_p.next.next
if slow_p == fast_p:
print ("Found Loop")
return
print ("Not Found Loop")
# Driver program for testing
llist = LinkedList()
llist.push(20)
llist.push(4)
llist.push(15)
llist.push(10)
# Create a loop for testing
llist.head.next.next.next.next = llist.head
llist.detectLoop()
|
gui/rqt_quad_gui/src/rqt_quad_gui/quad_widget_common.py | danielemorra98/rpg_quadrotor_control | 419 | 12698777 | #!/usr/bin/env python
from python_qt_binding import loadUi
qt_version_below_5 = False
try:
# Starting from Qt 5 QWidget is defined in QtWidgets and not QtGui anymore
from python_qt_binding import QtWidgets
from python_qt_binding.QtWidgets import QWidget
except:
from python_qt_binding import QtGui
from python_qt_binding.QtGui import QWidget
qt_version_below_5 = True
from python_qt_binding import QtCore
from .quad_name_widget import QuadNameWidget
class QuadWidgetCommon(QWidget):
def __init__(self):
super(QuadWidgetCommon, self).__init__()
# the name widget is separate since we need to access it directly
self._name_widget = QuadNameWidget(self)
if qt_version_below_5:
self._column_1 = QtGui.QVBoxLayout()
self._column_2 = QtGui.QVBoxLayout()
else:
self._column_1 = QtWidgets.QVBoxLayout()
self._column_2 = QtWidgets.QVBoxLayout()
def setup_gui(self, two_columns=True):
if qt_version_below_5:
widget_layout = QtGui.QHBoxLayout()
else:
widget_layout = QtWidgets.QHBoxLayout()
widget_layout.addLayout(self._column_1)
if two_columns:
widget_layout.addLayout(self._column_2)
if qt_version_below_5:
main_layout = QtGui.QHBoxLayout()
else:
main_layout = QtWidgets.QHBoxLayout()
main_layout = QtWidgets.QVBoxLayout()
main_layout.addLayout(widget_layout)
self._column_1.setAlignment(QtCore.Qt.AlignTop)
if two_columns:
self._column_2.setAlignment(QtCore.Qt.AlignTop)
widget_layout.setAlignment(QtCore.Qt.AlignTop)
main_layout.setAlignment(QtCore.Qt.AlignTop)
self.setLayout(main_layout)
self._update_info_timer = QtCore.QTimer(self)
self._update_info_timer.timeout.connect(self.update_gui)
self._update_info_timer.start(100)
def get_list_of_plugins(self):
quad_plugins = []
for i in range(1, self._column_1.count()):
quad_plugins.append(self._column_1.itemAt(i).widget())
for i in range(0, self._column_2.count()):
quad_plugins.append(self._column_2.itemAt(i).widget())
return quad_plugins
def connect(self):
quad_name = self._name_widget.getQuadName()
self.setWindowTitle(quad_name)
for plugin in self.get_list_of_plugins():
plugin.connect(quad_name)
def disconnect(self):
self.setWindowTitle("RPG Quad Gui")
for plugin in self.get_list_of_plugins():
plugin.disconnect()
def update_gui(self):
for plugin in self.get_list_of_plugins():
plugin.update_gui()
def getQuadName(self):
return self._name_widget.getQuadName()
def setQuadName(self, quadname):
self._name_widget.setQuadName(quadname)
|
synapse_btcv_abdominal_ct_segmentation/train.py | andythai/models | 128 | 12698824 | <reponame>andythai/models<filename>synapse_btcv_abdominal_ct_segmentation/train.py
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import argparse
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from dltk.core.metrics import dice
from dltk.core.losses import sparse_balanced_crossentropy
from dltk.networks.segmentation.unet import residual_unet_3d
from dltk.networks.segmentation.unet import asymmetric_residual_unet_3d
from dltk.networks.segmentation.fcn import residual_fcn_3d
from dltk.core.activations import leaky_relu
from dltk.io.abstract_reader import Reader
from reader import read_fn
import json
# PARAMS
EVAL_EVERY_N_STEPS = 1000
EVAL_STEPS = 1
NUM_CLASSES = 14
NUM_CHANNELS = 1
BATCH_SIZE = 4
SHUFFLE_CACHE_SIZE = 128
MAX_STEPS = 100000
# MODEL
def model_fn(features, labels, mode, params):
"""Summary
Args:
features (TYPE): Description
labels (TYPE): Description
mode (TYPE): Description
params (TYPE): Description
Returns:
TYPE: Description
"""
# 1. create a model and its outputs
filters = params["filters"]
strides = params["strides"]
num_residual_units = params["num_residual_units"]
loss_type = params["loss"]
net = params["net"]
def lrelu(x):
return leaky_relu(x, 0.1)
if net == 'fcn':
net_output_ops = residual_fcn_3d(
features['x'], NUM_CLASSES,
num_res_units=num_residual_units,
filters=filters,
strides=strides,
activation=lrelu,
mode=mode)
elif net == 'unet':
net_output_ops = residual_unet_3d(
features['x'], NUM_CLASSES,
num_res_units=num_residual_units,
filters=filters,
strides=strides,
activation=lrelu,
mode=mode)
elif net == 'asym_unet':
net_output_ops = asymmetric_residual_unet_3d(
features['x'],
NUM_CLASSES,
num_res_units=num_residual_units,
filters=filters,
strides=strides,
activation=lrelu,
mode=mode)
# 1.1 Generate predictions only (for `ModeKeys.PREDICT`)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode, predictions=net_output_ops,
export_outputs={'out': tf.estimator.export.PredictOutput(
net_output_ops)})
# 2. set up a loss function
if loss_type == 'ce':
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=net_output_ops['logits'], labels=labels['y'])
loss = tf.reduce_mean(ce)
elif loss_type == 'balce':
loss = sparse_balanced_crossentropy(
net_output_ops['logits'], labels['y'])
# 3. define a training op and ops for updating
# moving averages (i.e. for batch normalisation)
global_step = tf.train.get_global_step()
if params["opt"] == 'adam':
optimiser = tf.train.AdamOptimizer(
learning_rate=params["learning_rate"], epsilon=1e-5)
elif params["opt"] == 'momentum':
optimiser = tf.train.MomentumOptimizer(
learning_rate=params["learning_rate"], momentum=0.9)
elif params["opt"] == 'rmsprop':
optimiser = tf.train.RMSPropOptimizer(
learning_rate=params["learning_rate"], momentum=0.9)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimiser.minimize(loss, global_step=global_step)
# 4.1 (optional) create custom image summaries for tensorboard
my_image_summaries = {}
my_image_summaries['feat_t1'] = tf.expand_dims(
features['x'][:, 0, :, :, 0], 3)
my_image_summaries['labels'] = tf.expand_dims(
tf.cast(labels['y'], tf.float32)[:, 0, :, :], 3)
my_image_summaries['predictions'] = tf.expand_dims(
tf.cast(net_output_ops['y_'], tf.float32)[:, 0, :, :], 3)
[tf.summary.image(name, image)
for name, image in my_image_summaries.items()]
# 4.2 (optional) create custom metric summaries for tensorboard
dice_tensor = tf.py_func(
dice, [net_output_ops['y_'], labels['y'],
tf.constant(NUM_CLASSES)], tf.float32)
[tf.summary.scalar('dsc_l{}'.format(i), dice_tensor[i])
for i in range(NUM_CLASSES)]
# 5. Return EstimatorSpec object
return tf.estimator.EstimatorSpec(
mode=mode, predictions=net_output_ops,
loss=loss, train_op=train_op,
eval_metric_ops=None)
def train(args):
np.random.seed(42)
tf.set_random_seed(42)
print('Setting up...')
with open(args.config) as f:
run_config = json.load(f)
# Parse csv files for file names
train_filenames = pd.read_csv(
args.train_csv, dtype=object, keep_default_na=False,
na_values=[]).as_matrix()
val_filenames = pd.read_csv(
args.val_csv, dtype=object, keep_default_na=False,
na_values=[]).as_matrix()
# Set up a data reader to handle the file i/o.
reader_params = {
'n_examples': 32,
'example_size': [64, 64, 64],
'extract_examples': True
}
reader_example_shapes = {
'features': {'x': reader_params['example_size'] + [NUM_CHANNELS, ]},
'labels': {'y': reader_params['example_size']}}
reader = Reader(read_fn, {'features': {'x': tf.float32},
'labels': {'y': tf.int32}})
# Get input functions and queue initialisation hooks
# for training and validation data
train_input_fn, train_qinit_hook = reader.get_inputs(
train_filenames,
tf.estimator.ModeKeys.TRAIN,
example_shapes=reader_example_shapes,
batch_size=BATCH_SIZE,
shuffle_cache_size=SHUFFLE_CACHE_SIZE,
params=reader_params)
val_input_fn, val_qinit_hook = reader.get_inputs(
val_filenames,
tf.estimator.ModeKeys.EVAL,
example_shapes=reader_example_shapes,
batch_size=BATCH_SIZE,
shuffle_cache_size=min(SHUFFLE_CACHE_SIZE, EVAL_STEPS),
params=reader_params)
config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# Instantiate the neural network estimator
nn = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=args.save_path,
params=run_config,
config=tf.estimator.RunConfig(session_config=config))
# Hooks for validation summaries
val_summary_hook = tf.contrib.training.SummaryAtEndHook(
os.path.join(args.save_path, 'eval'))
step_cnt_hook = tf.train.StepCounterHook(
every_n_steps=EVAL_EVERY_N_STEPS, output_dir=args.save_path)
print('Starting training...')
try:
for _ in range(MAX_STEPS // EVAL_EVERY_N_STEPS):
nn.train(
input_fn=train_input_fn,
hooks=[train_qinit_hook, step_cnt_hook],
steps=EVAL_EVERY_N_STEPS)
results_val = nn.evaluate(
input_fn=val_input_fn,
hooks=[val_qinit_hook, val_summary_hook],
steps=EVAL_STEPS)
print('Step = {}; val loss = {:.5f};'.format(
results_val['global_step'], results_val['loss']))
except KeyboardInterrupt:
pass
print('Stopping now.')
export_dir = nn.export_savedmodel(
export_dir_base=args.save_path,
serving_input_receiver_fn=reader.serving_input_receiver_fn(reader_example_shapes))
print('Model saved to {}.'.format(export_dir))
if __name__ == '__main__':
# Set up argument parser
parser = argparse.ArgumentParser(
description='Example: Synapse CT example segmentation training script')
parser.add_argument('--resume', default=False, action='store_true')
parser.add_argument('--verbose', default=False, action='store_true')
parser.add_argument('--cuda_devices', '-c', default='0')
parser.add_argument('--save_path', '-p', default='/tmp/synapse_ct_seg/')
parser.add_argument('--train_csv', default='train.csv')
parser.add_argument('--val_csv', default='val.csv')
parser.add_argument('--config', default="config.json")
args = parser.parse_args()
# Set verbosity
if args.verbose:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.logging.set_verbosity(tf.logging.INFO)
else:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR)
# GPU allocation options
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_devices
# Create model save path
os.system("rm -rf %s" % args.save_path)
os.system("mkdir -p %s" % args.save_path)
# Call training
train(args)
|
src/probflow/data/data_generator.py | chiragnagpal/probflow | 134 | 12698829 | <gh_stars>100-1000
import multiprocessing as mp
from abc import abstractmethod
from probflow.utils.base import BaseDataGenerator
class DataGenerator(BaseDataGenerator):
"""Abstract base class for a data generator, which uses multiprocessing
to load the data in parallel.
TODO
User needs to implement:
* :meth:`~__init__`
* :meth:`~n_samples`
* :meth:`~batch_size`
* :meth:`~get_batch`
And can optionally implement:
* :meth:`~on_epoch_start`
* :meth:`~on_epoch_end`
"""
def __init__(self, num_workers=None):
self.num_workers = num_workers
@abstractmethod
def get_batch(self, index):
"""Generate one batch of data"""
def __getitem__(self, index):
"""Generate one batch of data"""
# No multiprocessing
if self.num_workers is None:
return self.get_batch(index)
# Multiprocessing
else:
# Start the next worker
pid = index + self.num_workers
if pid < len(self):
self._workers[pid].start()
# Return data from the multiprocessing queue
return self._queue.get()
def __iter__(self):
"""Get an iterator over batches"""
# Multiprocessing?
if self.num_workers is not None:
def get_data(index, queue):
queue.put(self.get_batch(index))
# Create the queue and worker processes
self._queue = mp.Queue()
self._workers = [
mp.Process(target=get_data, args=(i, self._queue))
for i in range(len(self))
]
# Start the first num_workers workers
for i in range(min(self.num_workers, len(self))):
self._workers[i].start()
# Keep track of what batch we're on
self._batch = -1
# Return iterator
return self
def __next__(self):
"""Get the next batch"""
self._batch += 1
if self._batch < len(self):
return self[self._batch]
else:
raise StopIteration()
|
deep_sort-master/evaluate_motchallenge.py | riciche/SimpleCVReproduction | 923 | 12698830 | # vim: expandtab:ts=4:sw=4
import argparse
import os
import deep_sort_app
def parse_args():
""" Parse command line arguments.
"""
parser = argparse.ArgumentParser(description="MOTChallenge evaluation")
parser.add_argument(
"--mot_dir", help="Path to MOTChallenge directory (train or test)",
required=True)
parser.add_argument(
"--detection_dir", help="Path to detections.", default="detections",
required=True)
parser.add_argument(
"--output_dir", help="Folder in which the results will be stored. Will "
"be created if it does not exist.", default="results")
parser.add_argument(
"--min_confidence", help="Detection confidence threshold. Disregard "
"all detections that have a confidence lower than this value.",
default=0.0, type=float)
parser.add_argument(
"--min_detection_height", help="Threshold on the detection bounding "
"box height. Detections with height smaller than this value are "
"disregarded", default=0, type=int)
parser.add_argument(
"--nms_max_overlap", help="Non-maxima suppression threshold: Maximum "
"detection overlap.", default=1.0, type=float)
parser.add_argument(
"--max_cosine_distance", help="Gating threshold for cosine distance "
"metric (object appearance).", type=float, default=0.2)
parser.add_argument(
"--nn_budget", help="Maximum size of the appearance descriptors "
"gallery. If None, no budget is enforced.", type=int, default=100)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
os.makedirs(args.output_dir, exist_ok=True)
sequences = os.listdir(args.mot_dir)
for sequence in sequences:
print("Running sequence %s" % sequence)
sequence_dir = os.path.join(args.mot_dir, sequence)
detection_file = os.path.join(args.detection_dir, "%s.npy" % sequence)
output_file = os.path.join(args.output_dir, "%s.txt" % sequence)
deep_sort_app.run(
sequence_dir, detection_file, output_file, args.min_confidence,
args.nms_max_overlap, args.min_detection_height,
args.max_cosine_distance, args.nn_budget, display=False)
|
examples/benchmark.py | Cam2337/snap-python | 242 | 12698865 | #!/usr/bin/python
# benchmark.py
#
# Author: <NAME>, Spring 2013
# Description:
# - Loads SNAP as a Python module.
# - Randomly generates a graph of specified size and type, and saves
# or loads the graph if it has already been created.
# - Benchmarks a number of "is this a good graph?" tests on the graph,
# calculating the amount of time required and appends to a file.
#
# usage: benchmark.py [-h] [-v] [-r RANGE] [-d] [-t GRAPH_TYPES]
# [-n NUM_ITERATIONS] [-o OUTPUT_FILE] [-g]
#
# optional arguments:
# -h, --help show this help message and exit
# -v, --verbose increase output verbosity
# -r RANGE, --range RANGE
# range (4-6) (10^4 to 10^6 nodes)
# -d, --deterministic deterministic benchmark
# -t GRAPH_TYPES, --graph_types GRAPH_TYPES
# Graph types, comma separated. Available: rand_ungraph,
# rand_ngraph, rmat, pref, sw
# -n NUM_ITERATIONS, --num_iterations NUM_ITERATIONS
# number of iterations
# -o OUTPUT_FILE, --output_file OUTPUT_FILE
# file to output results
# -g, --generate generate new graphs
#
# Examples:
# 1. Use default arguments.
# $ python menchmark.py
# 2. Generate deterministic RMAT graphs from 10^2-10^3 nodes, and
# run 3 times, outputing to results.txt.
# $ python benchmark.py -v -n 3 -g -d -r 2-3 -t rmat -o results/results.txt
#
import os.path
import sys
import argparse
from socket import gethostname
from time import clock
from datetime import datetime
sys.path.append("../swig")
import snap
PROPERTY_TYPES = [1, 10] # 1=Triads, 10=BFS
# Comma-separated, graph types:
# 'rmat' - R-MAT
# 'pref' - preferential attachment
# 'sw' - small world
# 'rand_ungraph' - random undirected
# 'rand_ngraph' - random directed
# 'rand_neanet' - random directed attribute
# 'syn_ngraph' - random directed
# 'syn_negraph' - synthetic multi-edge
# 'syn_neanet' - synthetic directed multi-edge attribute
DEFAULT_TYPES = "rand_neanet"
# Average is 1, non-average is 0.
DEFAULT_DEGREES = "1-2" # Default is 10x and 100x edges/node
DEFAULT_WRITE = False
SW_REWIRE_PROB = 0.1
SYNTHETIC_DELTA = 10
# Exponent range (e.g. 10^x to 10^y)
DEFAULT_VERBOSE=True
DEFAULT_RANGE = '3-4'
DEFAULT_ITERATIONS = 1
# Hostname for results
HOSTNAME = gethostname()
RESULTS_DIR = 'results'
DEFAULT_RESULTS_FILE = os.path.join(RESULTS_DIR, 'results%s.txt' % \
datetime.now().strftime('%m%d-%H%M%S'))
def benchmark_ngraph(Graph):
'''
Perform benchmark tests for Directed Graphs
'''
results = {}
results['num_nodes'] = Graph.GetNodes()
results['num_edges'] = Graph.GetEdges()
for degree in range(0, 11):
num = snap.NodesGTEDegree_PNGraph(Graph, degree)
percent_deg = float(num) / results['num_nodes']
results['deg_gte_%d' % degree] = num
results['deg_gte_%d_percent' % degree] = percent_deg
# Check for over-weighted nodes
results['max_degree'] = snap.GetMxDegNId(Graph)
num = snap.NodesGTEDegree_PNGraph(Graph, results['max_degree'])
results['max_degree_num'] = num
results['max_wcc_percent'] = snap.MxWccSz_PNGraph(Graph) \
/ results['num_nodes']
results['max_scc_percent'] = snap.MxSccSz_PNGraph(Graph).GetNodes() \
/ results['num_nodes']
return results
def benchmark_ungraph(Graph):
'''
Perform benchmark tests for Undirected Graphs
'''
results = {}
results['num_nodes'] = Graph.GetNodes()
results['num_edges'] = Graph.GetEdges()
for degree in range(0,11):
num = snap.NodesGTEDegree_PUNGraph(Graph, degree)
percent_deg = float(num) / results['num_nodes']
results['deg_gte_%d' % degree] = num
results['deg_gte_%d_percent' % degree] = percent_deg
# Check for over-weighted nodes
results['max_degree'] = snap.MxDegree_PUNGraph(Graph)
num = snap.NodesGTEDegree_PUNGraph(Graph, results['max_degree'])
results['max_degree_num'] = num
results['max_wcc_percent'] = snap.MxWccSz_PUNGraph(Graph) \
/ results['num_nodes']
results['max_scc_percent'] = snap.MxSccSz_PUNGraph(Graph).GetNodes() \
/ results['num_nodes']
# TODO: Calculate graph skew
return results
def benchmark_neanet(Graph):
'''
Perform benchmark tests for Directed Attribute Graphs
'''
results = {}
results['num_nodes'] = Graph.GetNodes()
results['num_edges'] = Graph.GetEdges()
for degree in range(0, 11):
num = snap.NodesGTEDegree(Graph, degree)
percent_deg = float(num) / results['num_nodes']
results['deg_gte_%d' % degree] = num
results['deg_gte_%d_percent' % degree] = percent_deg
# Check for over-weighted nodes
results['max_degree'] = snap.MxDegree(Graph)
num = snap.NodesGTEDegree(Graph, results['max_degree'])
results['max_degree_num'] = num
results['max_wcc_percent'] = snap.GetMxWccSz(Graph) \
/ results['num_nodes']
results['max_scc_percent'] = snap.GetMxSccSz(Graph).GetNodes() \
/ results['num_nodes']
return results
def convert_graph(Graph, TypeSrc, TypeDst):
'''
Converts a GRAPH from type TYPESRC to a TYPEDST and returns the new graph
'''
pass
def generate_graph(NNodes, NEdges, Model, Type, Rnd):
if Model == 'rand_ungraph':
# GnRndGnm returns error, so manually generate
#Graph = snap.GenRndGnm_PUNGraph(NNodes, NEdges, 0)
Graph = snap.GenRndGnm(snap.PUNGraph, NNodes, NEdges, 0)
elif Model == 'rand_ngraph':
#Graph = snap.GenRndGnm_PNGraph(NNodes, NEdges, 1)
Graph = snap.GenRndGnm(snap.PNGraph, NNodes, NEdges, 1)
elif Model == 'rand_neanet':
print "1", NNodes, NEdges
#Graph = snap.GenRndGnm_PNEANet(NNodes, NEdges, 1)
Graph = snap.GenRndGnm(snap.PNEANet, NNodes, NEdges, 1)
print "2"
print "3", Graph.GetNodes(), Graph.GetEdges()
elif Model == 'syn_neanet':
Graph = snap.GenSyntheticGraph(NNodes, NEdges/NNodes,
SYNTHETIC_DELTA)
elif Model == 'syn_ngraph':
Graph = snap.GenSyntheticGraph_PNGraph(NNodes, NEdges/NNodes,
SYNTHETIC_DELTA)
elif Model == 'rmat':
Graph = snap.GenRMat(NNodes, NEdges, 0.40, 0.25, 0.2, Rnd)
elif Model == 'sw':
Graph = snap.GenSmallWorld(NNodes, NNodes/NEdges, 0.1)
elif Model == 'pref':
Graph = snap.GenPrefAttach(NNodes, NNodes/NEdges)
return Graph
def run_tests(num_iterations=3, min_nodes_exponent=3, max_nodes_exponent=4):
'''
Perform tests with specified exponent range
'''
if verbose:
print "Running results from %e to %e" % (min_nodes_exponent,
max_nodes_exponent)
Rnd = snap.TRnd()
for exp in range(min_nodes_exponent,max_nodes_exponent+1):
for n in range(num_iterations):
if verbose:
print "Iteration: %d of %d" % (n+1, num_iterations)
# Random number of nodes of degree i
NNodes = 10**exp;
for avg_deg in range(min_degree_edges, max_degree_edges+1):
for g in graph_types:
if deterministic:
if verbose:
print "Deterministic mode, putting seed"
else:
if verbose:
print "Non-deterministic mode"
Rnd.PutSeed(0)
if verbose: print "Using average degree of 10^%d" % avg_deg
NEdges = NNodes*(10**avg_deg)
Graph = None
if g in ['rmat', 'rand_ngraph', 'syn_ngraph','syn_negraph']:
Type = "directed"
elif g in ['sw', 'pref', 'rand_ungraph']:
Type = "undirected"
elif g in ['rand_neanet', 'syn_neanet']:
Type = "attribute"
else:
print "Unknown graph type: %s" % g
sys.exit(1)
StartTime = clock()
FName = os.path.join(RESULTS_DIR, "%s_10e%d_deg%d_%d.graph" %
(g, exp, NEdges/NNodes, n))
if not generate:
if os.path.exists(FName):
try:
if verbose:
print "Loading '%s' from ...'%s'" % (g, FName),
sys.stdout.flush()
FIn = snap.TFIn(snap.TStr(FName))
if Type == "directed":
Graph = snap.PNGraph_New()
elif Type == "undirected":
Graph = snap.PUNGraph_New()
elif Type == "attribute":
Graph = snap.PNEANet_New()
Graph = Graph.Load(FIn)
if verbose: print "done"
if verbose:
print "Re-loaded graph with %d Nodes and %d Edges" % \
(Graph.GetNodes(), Graph.GetEdges())
except Exception, e:
print "Unable to load graph file, '%s': %s" % (FName, str(e))
# else:
# print "File not found: %s" % FName
if not Graph:
try:
# User wants to re-generate graph, or no graph data available.
if verbose:
print "Generating '%s %s' graph with %e nodes, %e edges..." % \
(Type, g, NNodes, NEdges),
sys.stdout.flush()
Graph = generate_graph(NNodes, NEdges, g, Type, Rnd)
if verbose: print "done"
if opt_write:
# Save the graph
if verbose:
print "Saving '%s' graph to file '%s'..." % (g, FName),
sys.stdout.flush()
if Graph:
FOut = snap.TFOut(snap.TStr(FName))
Graph.__ref__().Save(FOut) # Save as TUNGraph or TNGraph
FOut.Flush()
if verbose: print "done"
except Exception, e:
print "Unable to generate/save graph file, '%s': %s" % \
(FName, str(e))
continue
TimeGenerate = clock() - StartTime
print "Running tests...",
sys.stdout.flush()
StartTime = clock()
if Type == 'directed':
results = benchmark_ngraph(Graph)
elif Type == 'undirected':
results = benchmark_ungraph(Graph)
elif Type == 'attribute':
results = benchmark_neanet(Graph)
if verbose: print "done"
TimeElapsed = clock() - StartTime
print "Elapsed Time = %.4f sec" % TimeElapsed
row_header = ["Hostname", "Model", "Type", "Nodes", "Edges",
"StartTime", "Generation Time", "Run Time"]
print "Header: %s" % " ".join(row_header)
import csv
with open(results_file, 'a+') as csvfile:
writer = csv.writer(csvfile)
if verbose:
print "Writing to '%s'..." % results_file,
sys.stdout.flush()
row = [HOSTNAME, g, Type, NNodes, NEdges,
datetime.now().strftime("%d/%b/%Y:%H:%M:%S"),
TimeGenerate, TimeElapsed]
if verbose: print "done"
print "Time Data: %s" % repr(row)
writer.writerow(row)
print "-"*75
def main():
global results_dir, verbose, deterministic, generate, graph_types, \
hostname, num_iterations, results_file, \
min_degree_edges, max_degree_edges, opt_write
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", default=DEFAULT_VERBOSE,
action="store_true", dest="verbose",
help="increase output verbosity")
parser.add_argument("-r", "--range", default=DEFAULT_RANGE,
help="range (4-6) (10^4 to 10^6 nodes)")
parser.add_argument("-e", "--edges_deg", default=DEFAULT_DEGREES,
help="range of degrees (e.g \"2-3\" => (10^1 to 10^3 edges per node)")
parser.add_argument("-d", "--deterministic", default=False,
action="store_true", dest="deterministic",
help="deterministic benchmark")
parser.add_argument("-t", "--graph_types", default=DEFAULT_TYPES,
help='''
Graph types, comma separated.
Available: rand_ungraph, rand_ngraph, rmat, pref, sw''')
parser.add_argument("-n", "--num_iterations", type=int,
default=DEFAULT_ITERATIONS, help="number of iterations")
parser.add_argument("-o", "--output_file",
default=DEFAULT_RESULTS_FILE,
help="file to output results")
parser.add_argument("-g", "--generate", default=False,
action="store_true", dest="generate",
help="generate new graphs")
parser.add_argument("-w", "--write_graph", default=DEFAULT_WRITE,
action="store_true", dest="write",
help="save graph")
args = parser.parse_args()
verbose = args.verbose
generate = args.generate
deterministic = args.deterministic
results_file = args.output_file
num_iterations = args.num_iterations
graph_types = args.graph_types.split(",")
min_degree_edges = int(args.edges_deg.split("-")[0])
max_degree_edges = int(args.edges_deg.split("-")[-1])
opt_write = args.write
print "Edge degree = 10^%d to 10^%d edges/node" % \
(min_degree_edges, max_degree_edges)
if verbose:
print "Hostname: %s" % HOSTNAME
min = int(args.range.split("-")[0])
max = int(args.range.split("-")[-1])
print "Node range = 10^%d to 10^%d" % (min, max)
if not os.path.exists(RESULTS_DIR):
print "Creating results directory %s" % RESULTS_DIR
os.makedirs(RESULTS_DIR)
run_tests(num_iterations, min, max)
if __name__ == "__main__":
main()
|
automation/batch.py | CrackerCat/HexraysToolbox | 346 | 12698878 | #!/usr/bin/env python
try:
from idaapi import *
except:
import sys, os, argparse, subprocess, logging, threading, time, signal
sigint_count = 0
cur_thread_count = 0
def sig_handler(signum, frame):
global sigint_count
global cur_thread_count
msg = "SIGINT: "
if not sigint_count:
logging.warning("%saborting..." % msg)
else:
if sigint_count > 2:
logging.error("DUDE WHY DON'T YOU JUST CHILL!?!?")
logging.warning("%sI am alive and waiting for %d IDA instances to finish" % (msg, cur_thread_count))
sigint_count += 1
return
def process_files(ida_path, in_path, out_path, script_path, threads, compress):
global cur_thread_count
if threads < 1:
return
input_files = list()
for root, dirs, files in os.walk(in_path):
for f in files:
input_files.append(os.path.join(root, f))
total_files = len(input_files)
cur_file = 0
logging.info("Starting to process %d files (max %d concurrent threads)" % (total_files, threads))
lock = threading.Lock()
thread_exit_evt = threading.Event()
signal.signal(signal.SIGINT, sig_handler)
while not sigint_count and len(input_files):
with lock:
n = cur_thread_count
while n < threads:
if not len(input_files):
break
f = input_files.pop(0)
cur_file += 1
cmdline = "%s -o\"%s\" -A -c %s -S\"%s\" \"%s\"" % (
ida_path,
os.path.join(out_path, os.path.basename(f))+".idb",
"-P+" if compress else "",
script_path,
f)
logging.debug("Running %s" % cmdline)
logging.info("Thread %d/%d: processing file %d/%d - \"%s\"" % (threading.active_count(),
threads,
cur_file,
total_files,
f))
with lock:
cur_thread_count += 1
ida_instance(cmdline, thread_exit_evt, lock).start()
with lock:
n = cur_thread_count
logging.debug("Threshold reached / no more files in queue. Waiting...")
while not sigint_count and not thread_exit_evt.wait(1.0):
pass
thread_exit_evt.clear()
with lock:
n = cur_thread_count
while n > 0:
logging.info("Waiting for %d more IDA instances to finish" % (n))
while not thread_exit_evt.wait(1):
pass
thread_exit_evt.clear()
with lock:
n = cur_thread_count
return
class ida_instance(threading.Thread):
def __init__(self, cmdline, thread_exit_evt, lock):
threading.Thread.__init__(self)
self.cmdline = cmdline
self.thread_exit_evt = thread_exit_evt
self.lock = lock
return
def run_ida_instance(self):
global cur_thread_count
cp = subprocess.run(self.cmdline)
logging.debug("IDA instance terminated (exit code %d)" % (cp.returncode))
with self.lock:
cur_thread_count -= 1
self.thread_exit_evt.set()
return cp.returncode
def run(self):
self.run_ida_instance()
return
def run_batch_mode():
parser = argparse.ArgumentParser()
parser.add_argument("idapath",
type=str,
help="path to IDA executable (ida/ida64/idat/idat64/...)")
parser.add_argument("inpath",
type=str,
help="input path containing files to scan")
parser.add_argument("outpath",
type=str,
help="output path. idb/i64 files and logs will be stored here")
parser.add_argument("-t", "--threads", type=int,
default=3,
help="maximum number of concurrent IDA instances (default=3)")
parser.add_argument("-l", "--loglevel", type=str,
default="INFO",
help="log level: INFO, DEBUG (default: INFO)")
parser.add_argument("-c", "--compress", action="store_true",
help="compress IDA database")
args = parser.parse_args()
numeric_level = getattr(logging, args.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(
format="[%(asctime)s] [%(levelname)s]\t%(message)s",
level=numeric_level,
datefmt="%H:%M:%S")
script_path = os.path.abspath(sys.argv[0])
if " " in script_path:
logging.error("This script must not be run from a path that contains whitespace characters!")
sys.exit(1)
process_files(args.idapath, args.inpath, args.outpath, script_path, args.threads, args.compress)
logging.info("Exiting")
return
run_batch_mode()
sys.exit(0)
# IDAPython specific code starts here
import hxtb
import logging
def get_callers_to(func_name):
"""returns list of functions calling 'func_name'"""
ea = get_name_ea(BADADDR, func_name)
if ea == BADADDR:
# return empty list
return list()
xrefs = CodeRefsTo(ea, False)
funcs = [get_func(xref).start_ea for xref in xrefs if get_func(xref)]
return list(set(funcs))
def run_query_02():
logging.info("-" * 80)
logging.info("Query start: 0x3300")
q = lambda func, item: (item.op is cot_num and
item.numval() == 0x3300)
matches = hxtb.query_db(q)
if len(matches):
for m in matches:
logging.info("Match: %s" % m)
else:
logging.info("Nothing found")
logging.info("Query end: 0x3300")
logging.info("-" * 80)
return True
def run_query_01():
"""find calls to WinHttpSetOption() where 2nd argument has the
WINHTTP_OPTION_SECURITY_FLAGS flags set
"""
logging.info("-" * 80)
logging.info("Query start: WinHttpSetOption")
callsites = get_callers_to("WinHttpSetOption")
if len(callsites):
q = lambda func, item: (item.op is cot_call and
item.x.op is cot_obj and
get_name(item.x.obj_ea) == "WinHttpSetOption" and
item.a[1].op is cot_num and
item.a[1].numval() & 0x1f == 0x1f)
matches = hxtb.query(q, ea_list=callsites)
if len(matches):
for m in matches:
logging.info("Match: %s" % m)
else:
logging.info("No calls resolvable")
else:
logging.info("No calls resolvable")
logging.info("Query end: WinHttpSetOption")
logging.info("-" * 80)
return True
def ida_context_main():
logging.basicConfig(
filename="%s.log" % os.path.splitext(get_idb_path())[0],
format="[ %(asctime)s ] [%(levelname)s]\t%(message)s",
level=logging.DEBUG,
datefmt="%Y-%m-%d %H:%M:%S")
logging.info("=" * 80)
logging.info("Input file: %s" % get_input_file_path())
logging.info("IDA database: %s" % get_idb_path())
if init_hexrays_plugin():
# taken from alysis.idc
inf_set_af((inf_get_af() | AF_DODATA | AF_FINAL) & BADADDR)
auto_mark_range(0, BADADDR, AU_FINAL)
logging.info("Waiting for disassembly to finish")
auto_wait()
logging.info("Done")
logging.info("Running queries now")
# queries go here
run_query_01()
run_query_02()
else:
logging.error("Decompiler unavailable")
logging.info("Scan process completed. Exiting.\n")
qexit(0)
return
ida_context_main()
|
loggibud/v1/plotting/plot_solution.py | thiagopbueno/loggibud | 143 | 12698892 | <reponame>thiagopbueno/loggibud
"""Plots solution routes"""
from typing import List, Iterable, Optional
import folium
import numpy as np
import polyline
import requests
from loggibud.v1.types import CVRPSolution, Point
from loggibud.v1.distances import OSRMConfig
# All available map colors
MAP_COLORS = (
"black",
"blue",
"darkred",
"purple",
"red",
"orange",
"green",
"pink",
"darkblue",
"beige",
"gray",
"lightgreen",
"lightblue",
"lightgray",
"cadetblue",
)
def plot_cvrp_solution_routes(
solution: CVRPSolution,
route_indices_to_plot: Optional[List[int]] = None,
config: Optional[OSRMConfig] = None,
) -> None:
"""Plot solution routes in a map along the streets
Parameters
----------
solution
A solution to any solver with the vehicles routes to plot
route_indices_to_plot
If specified, selects a smaller subset of routes to plot by their
indices. This can be useful to reduce the clutter in case of a
solution with too many vehicles
config
OSRM configuration
"""
config = config or OSRMConfig()
# Initialize map centered at the mean of the origins
origins_mean = np.mean(
[
(vehicle.origin.lat, vehicle.origin.lng)
for vehicle in solution.vehicles
],
axis=0,
)
m = folium.Map(
location=origins_mean,
zoom_start=12,
tiles="cartodbpositron",
)
num_vehicles = len(solution.vehicles)
route_indices_to_plot = route_indices_to_plot or range(num_vehicles)
vehicles_subset = [solution.vehicles[i] for i in route_indices_to_plot]
for i, vehicle in enumerate(vehicles_subset):
vehicle_color = MAP_COLORS[i % len(MAP_COLORS)]
# Plot origin
origin = (vehicle.origin.lat, vehicle.origin.lng)
folium.CircleMarker(origin, color="red", radius=3, weight=5).add_to(m)
# Plot street outlines
wiring = _route_wiring(vehicle.circuit, config)
folium.PolyLine(
wiring, color=vehicle_color, weight=1.0, popup=f"Vehicle {i}"
).add_to(m)
# Plot the deliveries as regular points
for delivery in vehicle.deliveries:
folium.Circle(
location=(delivery.point.lat, delivery.point.lng),
radius=10,
fill=True,
color=vehicle_color,
popup=(
f"Vehicle {i} ({delivery.point.lat}, {delivery.point.lng})"
),
).add_to(m)
return m
def _route_wiring(points: Iterable[Point], config):
coords_uri = ";".join(f"{point.lng},{point.lat}" for point in points)
response = requests.get(
f"{config.host}/route/v1/driving/{coords_uri}?overview=simplified",
timeout=config.timeout_s,
)
data = response.json()
line = data["routes"][0]["geometry"]
return [(lat, lng) for lat, lng in polyline.decode(line)]
def plot_cvrp_solution(
solution: CVRPSolution, route_indices_to_plot: Optional[List[int]] = None
) -> None:
"""Plot solution deliveries in a map
This is a simplified version showing only the edges between each delivery.
It does not require an OSRM server configuration.
Parameters
----------
solution
A solution to any solver with the vehicles routes to plot
route_indices_to_plot
If specified, selects a smaller subset of routes to plot by their
indices. This can be useful to reduce the clutter in case of a
solution with too many vehicles
"""
# Initialize map centered at the mean of the origins
origins_mean = np.mean(
[
(vehicle.origin.lat, vehicle.origin.lng)
for vehicle in solution.vehicles
],
axis=0,
)
m = folium.Map(
location=origins_mean,
zoom_start=12,
tiles="cartodbpositron",
)
num_vehicles = len(solution.vehicles)
route_indices_to_plot = route_indices_to_plot or range(num_vehicles)
vehicles_subset = [solution.vehicles[i] for i in route_indices_to_plot]
for i, vehicle in enumerate(vehicles_subset):
origin = (vehicle.origin.lat, vehicle.origin.lng)
folium.CircleMarker(origin, color="red", radius=3, weight=5).add_to(m)
vehicle_color = MAP_COLORS[i % len(MAP_COLORS)]
vehicle_coords = [(point.lat, point.lng) for point in vehicle.circuit]
folium.Polygon(
vehicle_coords,
popup=f"Vehicle {i}",
color=vehicle_color,
weight=1,
).add_to(m)
return m
|
utils/mobjects/DelaunayTrianglation.py | AStarySky/manim_sandbox | 366 | 12698898 | # from @有一种悲伤叫颓废
"""
注:
1. 主要用来求三角剖分和维诺图,算法的思路可以看我的这期视频:https://www.bilibili.com/video/BV1Ck4y1z7VT
2. 时间复杂度O(nlogn),一般情况应该够用,如发现bug请联系颓废
3. 只需导入两个函数:DelaunayTrianglation(求德劳内三角剖分), Voronoi(求维诺图)
"""
import numpy as np
from manimlib.mobject.types.vectorized_mobject import VGroup
from manimlib.constants import PI
from manimlib.utils.config_ops import digest_config
from manimlib.mobject.geometry import Dot, Line, Polygon
from manimlib.scene.scene import Scene
from manimlib.utils.space_ops import normalize
#import time
#import math
#from manimlib.imports import *
#from manim_sandbox.utils.imports import *
# 以下比例建议2不要乱改,精度大,或者小,都有可能出bug
# 小于误差则等
ev = np.exp(1)**PI/1000000000
ev_sq = ev**2
# 无穷大
Infinity = 333
# 判断两个点是否相等,小于误差的平方,则相等,O(1)
def point_is_equal(p, q):
p, q = np.array(p), np.array(q)
# 两点距离的平方小于误差的平方,则相等
if np.dot(q-p, q-p) < ev_sq:
return True
return False
# b在向量pq左为正,右为负,O(1)
def cross2(p, q, b):
'''
叉积公式
\begin{align}
ToLeft(p, q, b)=\begin{vmatrix}
x_p & y_p & 1\\
x_q & y_q & 1\\
x_b & y_b & 1\\
\end{vmatrix}\end{align}
'''
return p[0]*q[1] - p[1]*q[0] + \
q[0]*b[1] - q[1]*b[0] + \
b[0]*p[1] - b[1]*p[0]
# 忽略误差,b在向量pq左为正,右为负,O(1)
def ToLeft(p, q, b):
a = cross2(p, q, b)
# 小于误差,认为在向量上
if abs(a) < ev:
return 0
# 隐含else abs(a) >= ev:
return a
# 点d在三角形pqb内,返回True,O(1)
def InTriangle(p, q, b, d):
tl1 = ToLeft(p, q, d)
if abs(tl1) < ev:
tl2 = ToLeft(q, b, d)
tl3 = ToLeft(b, p, d)
if tl2 < ev and tl3 < ev or tl2 > -ev and tl3 > -ev:
return True
return False
if tl1 > ev:
if ToLeft(q, b, d) > -ev and ToLeft(b, p, d) > -ev:
return True
return False
if tl1 < -ev:
if ToLeft(q, b, d) < ev and ToLeft(b, p, d) < ev:
return True
return False
# 点d在三点p,q,b的外接圆内,返回True,O(1)
def InCircle(p, q, b, d):
'''
点与三点圆关系
\begin{align}
InCircle(p, q, b, d)=\begin{vmatrix}
x_p & y_p & x_p^2+y_p^2 & 1\\
x_q & y_q & x_q^2+y_q^2 & 1\\
x_b & y_b & x_b^2+y_b^2 & 1\\
x_d & y_d & x_d^2+y_d^2 & 1\\
\end{vmatrix}\end{align}
'''
a13 = p[0]**2+p[1]**2
a23 = q[0]**2+q[1]**2
a33 = b[0]**2+b[1]**2
a43 = d[0]**2+d[1]**2
det = np.linalg.det([
[p[0], p[1], a13, 1],
[q[0], q[1], a23, 1],
[b[0], b[1], a33, 1],
[d[0], d[1], a43, 1],
])
if det < -ev:
return True
return False
# 三点外接圆圆心,O(1)
def CircumcircleCenter(p, q, b):
'''
\begin{align}
&三点外接圆圆心公式\\
&x=\frac{1}{2}\begin{vmatrix}
1 & x_p^2+y_p^2 & y_p\\
1 & x_q^2+y_q^2 & y_q\\
1 & x_b^2+y_b^2 & y_b\\
\end{vmatrix}/\begin{vmatrix}
1 & x_p & y_p\\
1 & x_q & y_q\\
1 & x_b & y_b\\
\end{vmatrix}\\
&y=\frac{1}{2}\begin{vmatrix}
1 & x_p & x_p^2+y_p^2\\
1 & x_q & x_q^2+y_q^2\\
1 & x_b & x_b^2+y_b^2\\
\end{vmatrix}/\begin{vmatrix}
1 & x_p & y_p\\
1 & x_q & y_q\\
1 & x_b & y_b\\
\end{vmatrix}
\end{align}
'''
a1 = p[0]**2+p[1]**2
a2 = q[0]**2+q[1]**2
a3 = b[0]**2+b[1]**2
det1 = np.linalg.det([
[1, p[0], p[1]],
[1, q[0], q[1]],
[1, b[0], b[1]],
])
if det1 == 0:
print("三点共线")
return None
det2 = np.linalg.det([
[1, a1, p[1]],
[1, a2, q[1]],
[1, a3, b[1]],
])
det3 = np.linalg.det([
[1, p[0], a1],
[1, q[0], a2],
[1, b[0], a3],
])
return np.array([det2/det1, det3/det1, 0])/2
# 面
class Face():
def __init__(self, halfedge):
# 标记访问面
self.Visit = False
# 属于这个面的一个半边
self.HalfEdge = halfedge
# 面对应的桶
self.Bucket = None
# 外接圆圆心,求维诺图的时候用到
self.Center = None
# 顶点
class Vertice():
def __init__(self, point):
# 顶点坐标
self.Point = point
# 由顶点引出的一条半边
self.HalfEdge = None
# 半边
class HalfEdge():
def __init__(self, start, end):
# 标记访问
self.Visit = False
# 边的起点
self.Start = start
# 边的终点
self.End = end
# 边的孪生兄弟
self.Twin = None
# 半边所在的平面
self.Face = None
# 边的前驱
self.Pre = None
# 边的后继
self.Suc = None
# 桶
class Bucket():
def __init__(self, points):
# 桶装的点
self.Points = points
# 桶对应的面
self.Face = None
# 初始化无穷大的网,O(1)
def InitInfNet(points = None):
# 初始化无穷远点
# 逆时针
infv1 = Vertice(np.array([Infinity, 0, 0]))
infv2 = Vertice(np.array([0, Infinity, 0]))
infv3 = Vertice(np.array([-Infinity, -Infinity, 0]))
# 初始化无穷远半边
halfedge1 = HalfEdge(infv1, infv2)
halfedge2 = HalfEdge(infv2, infv3)
halfedge3 = HalfEdge(infv3, infv1)
# 初始化点引出的边
infv1.HalfEdge = halfedge1
infv2.HalfEdge = halfedge2
infv3.HalfEdge = halfedge3
# 初始化无穷大面
face1 = Face(halfedge1)
# 初始化无穷半边的前驱,后继,和所在的面
halfedge1.Pre = halfedge3
halfedge1.Suc = halfedge2
halfedge1.Face = face1
halfedge2.Pre = halfedge1
halfedge2.Suc = halfedge3
halfedge2.Face = face1
halfedge3.Pre = halfedge2
halfedge3.Suc = halfedge1
halfedge3.Face = face1
# 初始化桶,此桶囊括了所有的点
bucket1 = Bucket(points)
bucket1.Face = face1
# 面对应的桶
face1.Bucket = bucket1
return face1
# 得到多边形的带符号面积,对于不自交的多边形,正表示逆时针多边形,负表示顺时针多边形,特殊考虑0,O(n)
def get_polygon_directed_area(polygon):
a = polygon.get_vertices()
l = len(a)
return 1 / 2 * sum([a[i][0] * a[(i + 1) % l][1] - a[(i + 1) % l][0] * a[i][1] for i in range(l)])
# 边翻转,O(1)
def EdgeFlipping(halfedge):
# 记录面的旧visit值
visitvalue = halfedge.Face.Visit
# 待翻转边所在的四边形的顶点
v1 = halfedge.Start
v2 = halfedge.Twin.Suc.End
v3 = halfedge.End
v4 = halfedge.Suc.End
# 顶点的坐标
p1 = v1.Point
p2 = v2.Point
p3 = v3.Point
p4 = v4.Point
# 待翻转边所在的四边形的边,ei由vi引出
e1 = halfedge.Twin.Suc
e2 = halfedge.Twin.Pre
e3 = halfedge.Suc
e4 = halfedge.Pre
# 修改顶点引出的边为非翻转的边(待翻转边所在的四边形的边)
v1.HalfEdge = e1
v2.HalfEdge = e2
v3.HalfEdge = e3
v4.HalfEdge = e4
# 待翻转边所在的四边形的两个桶中的点
oldpoints = [*halfedge.Face.Bucket.Points, *halfedge.Twin.Face.Bucket.Points]
# 重新分桶
newpoints1, newpoints2 = [], []
for oldpoint in oldpoints:
if InTriangle(p1, p2, p4, oldpoint):
newpoints1.append(oldpoint)
else:
newpoints2.append(oldpoint)
# 重新构造的面,逆时针
newface1, newface2 = Face(e1), Face(e2)
newface1.Visit = visitvalue
newface2.Visit = visitvalue
# 构造翻转后的边
e5, e6 = HalfEdge(v2, v4), HalfEdge(v4, v2)
e5.Twin = e6
e6.Twin = e5
e5.Visit = visitvalue
e6.Visit = visitvalue
# 构造newface1的边
e1.Suc = e5
e5.Suc = e4
e4.Suc = e1
e1.Pre = e4
e4.Pre = e5
e5.Pre = e1
# 构造newface2的边
e2.Suc = e3
e3.Suc = e6
e6.Suc = e2
e2.Pre = e6
e6.Pre = e3
e3.Pre = e2
# 边指向newface1
e1.Face = newface1
e4.Face = newface1
e5.Face = newface1
# 边指向newface2
e2.Face = newface2
e3.Face = newface2
e6.Face = newface2
# 构造两个新桶,并维持桶和面的联系
bucket1 = Bucket(newpoints1)
bucket2 = Bucket(newpoints2)
bucket1.Face = newface1
bucket2.Face = newface2
newface1.Bucket = bucket1
newface2.Bucket = bucket2
# 点vo撕裂面face,O(1)
def ClipFace(face, vo, remainedpoints):
visitvalue = face.Visit
hf1 = face.HalfEdge
hf2 = hf1.Suc
hf3 = hf2.Suc
# 剪开面
clipface1 = Face(hf1)
clipface2 = Face(hf2)
clipface3 = Face(hf3)
clipface1.Visit = visitvalue
clipface2.Visit = visitvalue
clipface3.Visit = visitvalue
# face1
hf1_pre = HalfEdge(vo, hf1.Start)
hf1_suc = HalfEdge(hf1.End, vo)
hf1_pre.Visit = visitvalue
hf1_suc.Visit = visitvalue
hf1.Pre = hf1_pre
hf1.Suc = hf1_suc
hf1_pre.Pre = hf1_suc
hf1_pre.Suc = hf1
hf1_suc.Pre = hf1
hf1_suc.Suc = hf1_pre
hf1.Face = clipface1
hf1_pre.Face = clipface1
hf1_suc.Face = clipface1
# face2
hf2_pre = HalfEdge(vo, hf2.Start)
hf2_suc = HalfEdge(hf2.End, vo)
hf2_pre.Visit = visitvalue
hf2_suc.Visit = visitvalue
hf2.Pre = hf2_pre
hf2.Suc = hf2_suc
hf2_pre.Pre = hf2_suc
hf2_pre.Suc = hf2
hf2_suc.Pre = hf2
hf2_suc.Suc = hf2_pre
hf2.Face = clipface2
hf2_pre.Face = clipface2
hf2_suc.Face = clipface2
# face3
hf3_pre = HalfEdge(vo, hf3.Start)
hf3_suc = HalfEdge(hf3.End, vo)
hf3_pre.Visit = visitvalue
hf3_suc.Visit = visitvalue
hf3.Pre = hf3_pre
hf3.Suc = hf3_suc
hf3_pre.Pre = hf3_suc
hf3_pre.Suc = hf3
hf3_suc.Pre = hf3
hf3_suc.Suc = hf3_pre
hf3.Face = clipface3
hf3_pre.Face = clipface3
hf3_suc.Face = clipface3
vo.HalfEdge = hf1_pre
# twin
hf1_pre.Twin = hf3_suc
hf3_suc.Twin = hf1_pre
hf2_pre.Twin = hf1_suc
hf1_suc.Twin = hf2_pre
hf3_pre.Twin = hf2_suc
hf2_suc.Twin = hf3_pre
## 点放入桶
# 桶所在三角形的顶点
point = vo.Point
p1 = hf1.Start.Point
p2 = hf2.Start.Point
p3 = hf3.Start.Point
# 拆分桶
clipbucketps1, clipbucketps2, clipbucketps3 = [], [], []
for eachpoint in remainedpoints:
if InTriangle(p1, p2, point, eachpoint):
clipbucketps1.append(eachpoint)
elif InTriangle(p2, p3, point, eachpoint):
clipbucketps2.append(eachpoint)
else:
clipbucketps3.append(eachpoint)
# 撕裂的平面关联桶
clipbucket1 = Bucket(clipbucketps1)
clipbucket2 = Bucket(clipbucketps2)
clipbucket3 = Bucket(clipbucketps3)
clipface1.Bucket = clipbucket1
clipface2.Bucket = clipbucket2
clipface3.Bucket = clipbucket3
clipbucket1.Face = clipface1
clipbucket2.Face = clipface2
clipbucket3.Face = clipface3
return clipface1, clipface2, clipface3
# 访问网,O(n)
def VisitNet(face):
visitvalue = face.Visit
notvisitvalue = not visitvalue
faces = [face]
# 访问过
face.Visit = notvisitvalue
delaunaynet = []
while faces:
eachface = faces[-1]
faces.pop(-1)
# 面所在的三条边
e1 = eachface.HalfEdge
e2 = e1.Suc
e3 = e2.Suc
## 将正在访问的面的三个相邻的面加入faces
eis = [e1, e2, e3]
for ei in eis:
# ei的孪生兄弟
eiTwin = ei.Twin
# ei未被访问过
if ei.Visit == visitvalue:
ls, le = ei.Start.Point, ei.End.Point
if abs(ls[0]) != Infinity and abs(ls[1]) != Infinity and abs(le[0]) != Infinity and abs(le[1]) != Infinity:
delaunaynet.append([ls, le])
ei.Visit = notvisitvalue
if eiTwin:
faces.append(eiTwin.Face)
# 访问过
eiTwin.Face.Visit = notvisitvalue
eiTwin.Visit = notvisitvalue
return delaunaynet
# 访问三角形,O(n)
def VisitTriangles(face):
# 访问网
visitvalue = face.Visit
notvisitvalue = not visitvalue
faces = [face]
# 访问过
face.Visit = notvisitvalue
delaunaynet = VGroup()
while faces:
eachface = faces[-1]
faces.pop(-1)
# 面所在的三条边
e1 = eachface.HalfEdge
e2 = e1.Suc
e3 = e2.Suc
# 标记访问过
e1.Visit = notvisitvalue
e2.Visit = notvisitvalue
e3.Visit = notvisitvalue
# 面对三个点
p1 = e1.Start.Point
p2 = e2.Start.Point
p3 = e3.Start.Point
delaunaynet.add(Polygon(p1, p2, p3))
ei = [e1, e2, e3]
for each in ei:
et = each.Twin
if et:
etf = et.Face
# 未访问过
if etf.Visit == visitvalue:
# 访问过
etf.Visit = notvisitvalue
faces.append(etf)
return delaunaynet
# 访问维诺图,O(n)
def VisitVoronoi(face):
visitvalue = face.Visit
notvisitvalue = not visitvalue
faces = [face]
# 访问过
face.Visit = notvisitvalue
voronoi = []
while faces:
eachface = faces[-1]
faces.pop(-1)
# 面所在的三条边
e1 = eachface.HalfEdge
e2 = e1.Suc
e3 = e2.Suc
## 将正在访问的面的三个相邻的面加入faces
eis = [e1, e2, e3]
for ei in eis:
# ei的孪生兄弟
eiTwin = ei.Twin
# ei未被访问过
if ei.Visit == visitvalue:
ei.Visit = notvisitvalue
if eiTwin:
ls, le = ei.Start.Point, ei.End.Point
if abs(ls[0]) != Infinity and abs(ls[1]) != Infinity and abs(le[0]) != Infinity and abs(le[1]) != Infinity:
efc, etfc = ei.Face.Center, eiTwin.Face.Center
ese = eiTwin.Suc.End.Point
# 边的对点是无穷点
if abs(ese[0]) == Infinity or abs(ese[1]) == Infinity:
eis, eie = np.array(ei.Start.Point), np.array(ei.End.Point)
vertical = np.cross(eie - eis, np.array([0, 0, 1]))
vertical = normalize(vertical)
vertical = Infinity * vertical
newle = efc + vertical
voronoi.append([efc, newle])
else:
voronoi.append([efc, etfc])
faces.append(eiTwin.Face)
# 访问过
eiTwin.Face.Visit = notvisitvalue
eiTwin.Visit = notvisitvalue
return voronoi
# 给网加圆心,O(n)
def InitNetCircumcircleCenter(face):
# 访问网
visitvalue = face.Visit
notvisitvalue = not visitvalue
faces = [face]
# 访问过
face.Visit = notvisitvalue
#delaunaynet = VGroup()
while faces:
eachface = faces[-1]
faces.pop(-1)
# 面所在的三条边
e1 = eachface.HalfEdge
e2 = e1.Suc
e3 = e2.Suc
# 标记访问过
e1.Visit = notvisitvalue
e2.Visit = notvisitvalue
e3.Visit = notvisitvalue
# 面对三个点
p1 = e1.Start.Point
p2 = e2.Start.Point
p3 = e3.Start.Point
# 赋值圆心
if eachface.Center is None:
eachface.Center = CircumcircleCenter(p1, p2, p3)
#delaunaynet.add(Polygon(p1, p2, p3))
eis = [e1, e2, e3]
for ei in eis:
eit = ei.Twin
if eit:
eitf = eit.Face
# 未访问过
if eitf.Visit == visitvalue:
# 访问过
eitf.Visit = notvisitvalue
faces.append(eitf)
# 构造网,O(nlogn)
def ConstructNet(points=None):
face1 = InitInfNet(points)
infedge = face1.HalfEdge
buckets = [face1.Bucket]
while buckets:
# 取桶
bucket = buckets[-1]
buckets.pop(-1)
# 取桶的点
point = bucket.Points[-1]
bucket.Points.pop(-1)
vo = Vertice(point)
# 桶所在三角形的边
crpface = bucket.Face
hf1 = crpface.HalfEdge
hf2 = hf1.Suc
hf3 = hf2.Suc
# 撕裂面
ClipFace(crpface, vo, bucket.Points)
# 看看是否要边翻转
edges = [hf1, hf2, hf3]
while edges:
eachedge = edges[-1]
edges.pop(-1)
eachedgetwin = eachedge.Twin
if eachedgetwin:
trip1 = vo.Point
trip2 = eachedgetwin.Start.Point
trip3 = eachedgetwin.End.Point
trip4 = eachedgetwin.Suc.End.Point
if InCircle(trip1, trip2, trip3, trip4):
etfb = eachedgetwin.Face.Bucket
if len(etfb.Points) > 0:
buckets.remove(etfb)
edges.append(eachedgetwin.Pre)
edges.append(eachedgetwin.Suc)
EdgeFlipping(eachedge)
# 遍历点周围的所有边,把桶加入
ringvisit = vo.HalfEdge
currvisit = ringvisit.Twin.Suc
while currvisit != ringvisit:
currbucket = currvisit.Face.Bucket
if len(currbucket.Points) > 0:
buckets.append(currbucket)
currvisit = currvisit.Twin.Suc
currbucket = currvisit.Face.Bucket
if len(currbucket.Points) > 0:
buckets.append(currbucket)
return infedge.Face
# 得到某点在网中的面
def get_point_posface(point, net):
# 访问网
visitvalue = net.Visit
notvisitvalue = not visitvalue
faces = [net]
# 访问过
net.Visit = notvisitvalue
# 位置
#posface = None
mark = True
while faces:
eachface = faces[-1]
faces.pop(-1)
# 面所在的三条边
e1 = eachface.HalfEdge
e2 = e1.Suc
e3 = e2.Suc
# 标记访问过
e1.Visit = notvisitvalue
e2.Visit = notvisitvalue
e3.Visit = notvisitvalue
# 面对三个点
p1 = e1.Start.Point
p2 = e2.Start.Point
p3 = e3.Start.Point
# 位置未找到
if mark:
if InTriangle(p1, p2, p3, point):
posface = eachface
ei = [e1, e2, e3]
for each in ei:
et = each.Twin
if et:
etf = et.Face
# 未访问过
if etf.Visit == visitvalue:
# 访问过
etf.Visit = notvisitvalue
faces.append(etf)
return posface
# 在网中插入点,O(n)
def net_insert_point(point, net):
# 点所在的面
posface = get_point_posface(point, net)
posface.Bucket.Points.append(point)
infedge = posface.HalfEdge
buckets = [posface.Bucket]
while buckets:
# 取桶
bucket = buckets[-1]
buckets.pop(-1)
# 取桶的点
point = bucket.Points[-1]
bucket.Points.pop(-1)
vo = Vertice(point)
# 桶所在三角形的边
crpface = bucket.Face
hf1 = crpface.HalfEdge
hf2 = hf1.Suc
hf3 = hf2.Suc
# 撕裂面
ClipFace(crpface, vo, bucket.Points)
# 看看是否要边翻转
edges = [hf1, hf2, hf3]
while edges:
eachedge = edges[-1]
edges.pop(-1)
eachedgetwin = eachedge.Twin
if eachedgetwin:
trip1 = vo.Point
trip2 = eachedgetwin.Start.Point
trip3 = eachedgetwin.End.Point
trip4 = eachedgetwin.Suc.End.Point
if InCircle(trip1, trip2, trip3, trip4):
etfb = eachedgetwin.Face.Bucket
if len(etfb.Points) > 0:
buckets.remove(etfb)
edges.append(eachedgetwin.Pre)
edges.append(eachedgetwin.Suc)
EdgeFlipping(eachedge)
# 遍历点周围的所有边,把桶加入
ringvisit = vo.HalfEdge
currvisit = ringvisit.Twin.Suc
while currvisit != ringvisit:
currbucket = currvisit.Face.Bucket
if len(currbucket.Points) > 0:
buckets.append(currbucket)
currvisit = currvisit.Twin.Suc
currbucket = currvisit.Face.Bucket
if len(currbucket.Points) > 0:
buckets.append(currbucket)
return infedge.Face
# 在网中插入点,并设置外心,O(n)
def net_insert_point_and_set_circumcirclecenter(point, net):
# 点所在的面,O(n)
posface = get_point_posface(point, net)
vo = Vertice(point)
# 桶所在三角形的边
crpface = posface
hf1 = crpface.HalfEdge
hf2 = hf1.Suc
hf3 = hf2.Suc
# 撕裂面
ClipFace(crpface, vo, [])
# 设置外心
hf1.Face.Center = CircumcircleCenter(hf1.Start.Point, hf1.End.Point, point)
hf2.Face.Center = CircumcircleCenter(hf2.Start.Point, hf2.End.Point, point)
hf3.Face.Center = CircumcircleCenter(hf3.Start.Point, hf3.End.Point, point)
# 看看是否要边翻转,O(6)
edges = [hf1, hf2, hf3]
while edges:
eachedge = edges[-1]
edges.pop(-1)
eachedgetwin = eachedge.Twin
if eachedgetwin:
trip1 = vo.Point
trip2 = eachedgetwin.Start.Point
trip3 = eachedgetwin.End.Point
trip4 = eachedgetwin.Suc.End.Point
if InCircle(trip1, trip2, trip3, trip4):
edges.append(eachedgetwin.Pre)
edges.append(eachedgetwin.Suc)
efv1 = eachedge.Suc
efv2 = eachedgetwin.Suc
EdgeFlipping(eachedge)
efv1.Face.Center = CircumcircleCenter(trip1, trip2, trip4)
efv2.Face.Center = CircumcircleCenter(trip1, trip3, trip4)
return vo.HalfEdge.Face
# 德劳内三角网,O(nlogn)
class DelaunayTrianglation(VGroup):
def __init__(self, *points, **kwargs):
digest_config(self, kwargs)
self.net = ConstructNet(list(points))
self.kwargs = kwargs
VGroup.__init__(self, *[Line(*each, **kwargs) for each in self.VisitNet()])
# 获取网的顶点对,即用坐标表示的线
def VisitNet(self):
return VisitNet(self.net)
def VisitTriangles(self):
return VGroup(*VisitTriangles(self.net), **self.kwargs)
# 获取网
def GetNet(self):
return self.net
# 插入节点
def InsertPoint(self, point):
net_insert_point(point, self.net)
self.become(VGroup(*[Line(*each, **self.kwargs) for each in self.VisitNet()]))
return self
# 维诺图,O(n)+O(nlogn)=O(nlogn)
class Voronoi(VGroup):
def __init__(self, *points, **kwargs):
digest_config(self, kwargs)
self.kwargs = kwargs
self.net = DelaunayTrianglation(*points).GetNet()
InitNetCircumcircleCenter(self.net)
self.voronoi = self.VisitVoronoi()
VGroup.__init__(self, *[Line(*each, **kwargs) for each in self.voronoi])
def VisitVoronoi(self):
return VisitVoronoi(self.net)
# 获取网
def GetNet(self):
return self.net
# 插入节点
def InsertPoint(self, point):
net_insert_point_and_set_circumcirclecenter(point, self.net)
self.voronoi = self.VisitVoronoi()
self.become(VGroup(*[Line(*each, **self.kwargs) for each in self.voronoi]))
return self
# 测试类
class test(Scene):
def construct(self):
np.random.seed(2007)
points = [
[np.random.randint(-70000, 70000)/10500, np.random.randint(-38000, 38000)/10500, 0] for i in range(800)
]
#points = [UL, UP, UR, LEFT, ORIGIN, RIGHT, DL, DOWN, DR]
#points = [UL, DR, UR, DL]
dots = [Dot(p).scale(0.5) for p in points]
self.add(*dots)
start = time.perf_counter()
net = Voronoi(*points)
self.add(net)
end = time.perf_counter()
print(end - start)
'''
p1, p2, p3 = DL, UL, UR
p4 = DR
p5 = ORIGIN
p6 = UL/2
p7 = UL
p8 = UL*2
print(InTriangle(p1, p2, p3, p4))
print(InTriangle(p1, p2, p3, p5))
print(InTriangle(p1, p2, p3, p6))
print(InTriangle(p1, p2, p3, p7))
print(InTriangle(p1, p2, p3, p8))
print(InCircle(p1, p2, p3, p4))
print(InCircle(p1, p2, p3, p5))
print(InCircle(p1, p2, p3, p6))
print(InCircle(p1, p2, p3, p7))
print(InCircle(p1, p2, p3, p8))
'''
'''
infnet = InitInfNet()
he1 = infnet.HalfEdge
he2 = he1.Suc
he3 = he2.Suc
print(get_polygon_directed_area(Polygon(he1.Start.Point, he2.Start.Point, he3.Start.Point)))
'''
'''
np.random.seed(2007)
points = [
[np.random.randint(-70000, 70000)/10500, np.random.randint(-38000, 38000)/10500, 0] for i in range(1000)
]
#points = [UL, UP, UR, LEFT, ORIGIN, RIGHT, DL, DOWN, DR]
#points = [UL, DR, UR, DL]
dots = [Dot(p) for p in points]
#self.add(*dots)
start = time.perf_counter()
delaunay = ConstructNet(self, points)
net = VisitNet(delaunay)
end = time.perf_counter()
print(end - start)
self.add(net)
'''
'''
np.random.seed(2000007)
points = [
[np.random.randint(-70000, 70000)/10000, np.random.randint(-38000, 38000)/10000, 0] for i in range(7)
]
dots = [Dot(p) for p in points]
self.add(*dots)
start = time.perf_counter()
delaunay = InitInfNet(points)
#print(points[0])
net1, net2, net3 = ClipFace(delaunay, Vertice(points[0]), points[1:])
net = VisitTriangles(net1)
end = time.perf_counter()
print(end - start)
self.add(net)
'''
'''
p1, p2, p3, p4 = UL, UR*2, DR, DL*2
v1, v2, v3, v4 = Vertice(p1), Vertice(p2), Vertice(p3), Vertice(p4)
he1 = HalfEdge(v1, v2)
he2 = HalfEdge(v2, v3)
he3 = HalfEdge(v3, v4)
he4 = HalfEdge(v4, v1)
he5 = HalfEdge(v3, v1)
he6 = HalfEdge(v1, v3)
he1.Suc = he2
he2.Pre = he1
he2.Suc = he5
he5.Pre = he2
he5.Suc = he1
he1.Pre = he5
he3.Suc = he4
he4.Pre = he3
he4.Suc = he6
he6.Pre = he4
he6.Suc = he3
he3.Pre = he6
bucket1 = Bucket([UR+RIGHT/5, UR+LEFT/5])
bucket2 = Bucket([])
face1 = Face(he1)
face1.Bucket = bucket1
bucket1.Face = face1
he1.Face = face1
he2.Face = face1
he5.Face = face1
face2 = Face(he3)
face2.Bucket = bucket2
bucket2.Face = face2
he3.Face = face2
he4.Face = face2
he6.Face = face2
he5.Twin = he6
he6.Twin = he5
EdgeFlipping(he5)
start = time.perf_counter()
net = VisitInfNet(face1)
end = time.perf_counter()
print(end - start)
print(get_polygon_directed_area(Polygon(face1.HalfEdge.Start.Point, face1.HalfEdge.Suc.Start.Point,
face1.HalfEdge.Suc.Suc.Start.Point)))
print(get_polygon_directed_area(Polygon(face2.HalfEdge.Start.Point, face2.HalfEdge.Suc.Start.Point,
face2.HalfEdge.Suc.Suc.Start.Point)))
self.add(net)
'''
#p1, p2, p3, p4 = UL, UR, DR, DL
#print(InTriangle(p1, p2, p3, ORIGIN), InTriangle(p1, p2, p3, UR/2), InTriangle(p1, p2, p3, p4))
'''
start = time.perf_counter()
print(
InCircle(p1, p2, p3, p4),
InCircle(p1, p2, p3, ORIGIN),
InCircle(p1, p2, p3, p4+LEFT)
)
end = time.perf_counter()
print(end - start)
start = time.perf_counter()
print(
InCircle2(p1, p2, p3, p4),
InCircle2(p1, p2, p3, ORIGIN),
InCircle2(p1, p2, p3, p4+LEFT)
)
end = time.perf_counter()
print(end - start)
'''
self.wait()
|
zentral/contrib/monolith/migrations/0001_initial.py | janheise/zentral | 634 | 12698901 | <reponame>janheise/zentral
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-19 10:55
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('inventory', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Catalog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, unique=True)),
('priority', models.PositiveIntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('archived_at', models.DateTimeField(blank=True, null=True)),
],
options={
'ordering': ('-priority', 'name'),
},
),
migrations.CreateModel(
name='Manifest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('catalogs', models.ManyToManyField(to='monolith.Catalog')),
('meta_business_unit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.MetaBusinessUnit')),
],
),
migrations.CreateModel(
name='PkgInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('archived_at', models.DateTimeField(blank=True, null=True)),
('catalogs', models.ManyToManyField(to='monolith.Catalog')),
],
),
migrations.CreateModel(
name='PkgInfoCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='PkgInfoName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='SubManifest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('description', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('managed_installs', models.ManyToManyField(related_name='_submanifest_managed_installs_+', to='monolith.PkgInfoName')),
('managed_uninstalls', models.ManyToManyField(related_name='_submanifest_managed_uninstalls_+', to='monolith.PkgInfoName')),
('managed_updates', models.ManyToManyField(related_name='_submanifest_managed_updates_+', to='monolith.PkgInfoName')),
('meta_business_unit', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='inventory.MetaBusinessUnit')),
('optional_installs', models.ManyToManyField(related_name='_submanifest_optional_installs_+', to='monolith.PkgInfoName')),
],
),
migrations.AddField(
model_name='pkginfo',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='monolith.PkgInfoCategory'),
),
migrations.AddField(
model_name='pkginfo',
name='name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='monolith.PkgInfoName'),
),
migrations.AddField(
model_name='manifest',
name='sub_manifests',
field=models.ManyToManyField(to='monolith.SubManifest'),
),
]
|
posthog/migrations/0161_property_defs_search.py | csmatar/posthog | 7,409 | 12698915 | # Generated by Django 3.1.12 on 2021-07-16 13:04
from django.contrib.postgres.indexes import GinIndex
from django.contrib.postgres.operations import TrigramExtension
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("posthog", "0160_organization_domain_whitelist"),
]
operations = [
TrigramExtension(),
migrations.AddIndex(
model_name="eventdefinition",
index=GinIndex(fields=["name"], name="index_event_definition_name", opclasses=["gin_trgm_ops"]),
),
migrations.AddIndex(
model_name="propertydefinition",
index=GinIndex(fields=["name"], name="index_property_definition_name", opclasses=["gin_trgm_ops"]),
),
]
|
Algo and DSA/LeetCode-Solutions-master/Python/minimum-skips-to-arrive-at-meeting-on-time.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 12698942 | <filename>Algo and DSA/LeetCode-Solutions-master/Python/minimum-skips-to-arrive-at-meeting-on-time.py
# Time: O(n^2)
# Space: O(n)
class Solution(object):
def minSkips(self, dist, speed, hoursBefore):
"""
:type dist: List[int]
:type speed: int
:type hoursBefore: int
:rtype: int
"""
def ceil(a, b):
return (a+b-1)//b
dp = [0]*((len(dist)-1)+1) # dp[i]: (min time by i skips) * speed
for i, d in enumerate(dist):
for j in reversed(xrange(len(dp))):
dp[j] = ceil(dp[j]+d, speed)*speed if i < len(dist)-1 else dp[j]+d
if j-1 >= 0:
dp[j] = min(dp[j], dp[j-1]+d)
target = hoursBefore*speed
for i in xrange(len(dist)):
if dp[i] <= target:
return i
return -1
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/cuda/cudadrv/error.py | jeikabu/lumberyard | 1,738 | 12698946 | <filename>dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/cuda/cudadrv/error.py
from __future__ import print_function, absolute_import, division
class CudaDriverError(Exception):
pass
class CudaSupportError(ImportError):
pass
class NvvmError(Exception):
def __str__(self):
return '\n'.join(map(str, self.args))
class NvvmSupportError(ImportError):
pass
|
raspberryio/userprofile/admin.py | cvautounix/raspberryio | 113 | 12698947 | from django.contrib import admin
from raspberryio.userprofile import models as userprofile
class ProfileAdmin(admin.ModelAdmin):
model = userprofile.Profile
admin.site.register(userprofile.Profile, ProfileAdmin)
|
python/fate_arch/federation/transfer_variable/_transfer_variable.py | hubert-he/FATE | 3,787 | 12698986 | <gh_stars>1000+
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import typing
from typing import Union
from fate_arch.common import Party, profile
from fate_arch.common.log import getLogger
from fate_arch.federation.transfer_variable._auth import _check_variable_auth_conf
from fate_arch.federation.transfer_variable._cleaner import IterationGC
from fate_arch.federation.transfer_variable._namespace import FederationTagNamespace
from fate_arch.session import get_latest_opened
__all__ = ["Variable", "BaseTransferVariables"]
LOGGER = getLogger()
class Variable(object):
"""
variable to distinguish federation by name
"""
__disable_auth_check = False
__instances: typing.MutableMapping[str, 'Variable'] = {}
@classmethod
def _disable_auth_check(cls):
"""
used in auth conf generation, don't call this in real application
"""
cls.__disable_auth_check = True
@classmethod
def get_or_create(cls, name, create_func: typing.Callable[[], 'Variable']) -> 'Variable':
if name not in cls.__instances:
value = create_func()
cls.__instances[name] = value
return cls.__instances[name]
def __init__(self, name: str,
src: typing.Tuple[str, ...],
dst: typing.Tuple[str, ...]):
if name in self.__instances:
raise RuntimeError(
f"{self.__instances[name]} with {name} already initialized, which expected to be an singleton object.")
if not self.__disable_auth_check:
auth_src, auth_dst = _check_variable_auth_conf(name)
if set(src) != set(auth_src) or set(dst) != set(auth_dst):
raise RuntimeError(f"Variable {name} auth error, "
f"acquired: src={src}, dst={dst}, allowed: src={auth_src}, dst={auth_dst}")
assert len(name.split(".")) >= 3, "incorrect name format, should be `module_name.class_name.variable_name`"
self._name = name
self._src = src
self._dst = dst
self._get_gc = IterationGC()
self._remote_gc = IterationGC()
self._use_short_name = True
self._short_name = self._get_short_name(self._name)
@staticmethod
def _get_short_name(name):
fix_sized = hashlib.blake2b(name.encode('utf-8'), digest_size=10).hexdigest()
_, right = name.rsplit('.', 1)
return f"hash.{fix_sized}.{right}"
# copy never create a new instance
def __copy__(self):
return self
# deepcopy never create a new instance
def __deepcopy__(self, memo):
return self
def set_preserve_num(self, n):
self._get_gc.set_capacity(n)
self._remote_gc.set_capacity(n)
return self
def disable_auto_clean(self):
self._get_gc.disable()
self._remote_gc.disable()
return self
def clean(self):
self._get_gc.clean()
self._remote_gc.clean()
def remote_parties(self,
obj,
parties: Union[typing.List[Party], Party],
suffix: Union[typing.Any, typing.Tuple] = tuple()):
"""
remote object to specified parties
Parameters
----------
obj: object or table
object or table to remote
parties: typing.List[Party]
parties to remote object/table to
suffix: str or tuple of str
suffix used to distinguish federation with in variable
Returns
-------
None
"""
session = get_latest_opened()
if isinstance(parties, Party):
parties = [parties]
if not isinstance(suffix, tuple):
suffix = (suffix,)
tag = FederationTagNamespace.generate_tag(*suffix)
for party in parties:
if party.role not in self._dst:
raise RuntimeError(f"not allowed to remote object to {party} using {self._name}")
local = session.parties.local_party.role
if local not in self._src:
raise RuntimeError(f"not allowed to remote object from {local} using {self._name}")
name = self._short_name if self._use_short_name else self._name
timer = profile.federation_remote_timer(name, self._name, tag, local, parties)
session.federation.remote(v=obj, name=name, tag=tag, parties=parties, gc=self._remote_gc)
timer.done(session.federation)
self._remote_gc.gc()
def get_parties(self,
parties: Union[typing.List[Party], Party],
suffix: Union[typing.Any, typing.Tuple] = tuple()):
"""
get objects/tables from specified parties
Parameters
----------
parties: typing.List[Party]
parties to remote object/table to
suffix: str or tuple of str
suffix used to distinguish federation with in variable
Returns
-------
list
a list of objects/tables get from parties with same order of ``parties``
"""
session = get_latest_opened()
if not isinstance(parties, list):
parties = [parties]
if not isinstance(suffix, tuple):
suffix = (suffix,)
tag = FederationTagNamespace.generate_tag(*suffix)
for party in parties:
if party.role not in self._src:
raise RuntimeError(f"not allowed to get object from {party} using {self._name}")
local = session.parties.local_party.role
if local not in self._dst:
raise RuntimeError(f"not allowed to get object to {local} using {self._name}")
name = self._short_name if self._use_short_name else self._name
timer = profile.federation_get_timer(name, self._name, tag, local, parties)
rtn = session.federation.get(name=name, tag=tag, parties=parties, gc=self._get_gc)
timer.done(session.federation)
self._get_gc.gc()
return rtn
def remote(self, obj, role=None, idx=-1, suffix=tuple()):
"""
send obj to other parties.
Args:
obj: object to be sent
role: role of parties to sent to, use one of ['Host', 'Guest', 'Arbiter', None].
The default is None, means sent values to parties regardless their party role
idx: id of party to sent to.
The default is -1, which means sent values to parties regardless their party id
suffix: additional tag suffix, the default is tuple()
"""
party_info = get_latest_opened().parties
if idx >= 0 and role is None:
raise ValueError("role cannot be None if idx specified")
# get subset of dst roles in runtime conf
if role is None:
parties = party_info.roles_to_parties(self._dst, strict=False)
else:
if isinstance(role, str):
role = [role]
parties = party_info.roles_to_parties(role)
if idx >= 0:
parties = parties[idx]
return self.remote_parties(obj=obj, parties=parties, suffix=suffix)
def get(self, idx=-1, suffix=tuple()):
"""
get obj from other parties.
Args:
idx: id of party to get from.
The default is -1, which means get values from parties regardless their party id
suffix: additional tag suffix, the default is tuple()
Returns:
object or list of object
"""
src_parties = get_latest_opened().parties.roles_to_parties(roles=self._src, strict=False)
if isinstance(idx, list):
rtn = self.get_parties(parties=[src_parties[i] for i in idx], suffix=suffix)
elif isinstance(idx, int):
rtn = self.get_parties(parties=src_parties, suffix=suffix) if idx < 0 else \
self.get_parties(parties=src_parties[idx], suffix=suffix)[0]
else:
raise ValueError(f"illegal idx type: {type(idx)}, supported types: int or list of int")
return rtn
class BaseTransferVariables(object):
def __init__(self, *args):
pass
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
@staticmethod
def set_flowid(flowid):
"""
set global namespace for federations.
Parameters
----------
flowid: str
namespace
Returns
-------
None
"""
FederationTagNamespace.set_namespace(str(flowid))
def _create_variable(self, name: str, src: typing.Iterable[str], dst: typing.Iterable[str]) -> Variable:
full_name = f"{self.__module__}.{self.__class__.__name__}.{name}"
return Variable.get_or_create(full_name, lambda: Variable(name=full_name, src=tuple(src), dst=tuple(dst)))
@staticmethod
def all_parties():
"""
get all parties
Returns
-------
list
list of parties
"""
return get_latest_opened().parties.all_parties
@staticmethod
def local_party():
"""
indicate local party
Returns
-------
Party
party this program running on
"""
return get_latest_opened().parties.local_party
|
crawler/spiders/cnproxy.py | xelzmm/proxy_server_crawler | 112 | 12699002 | from scrapy.spiders import Spider
from scrapy.http import Request
from scrapy.selector import Selector
from crawler.items import ProxyIPItem
class CnProxySpider(Spider):
name = "cnproxy"
allowed_domains = ["cn-proxy.com"]
start_urls = [
"http://cn-proxy.com/",
"http://cn-proxy.com/archives/218"
]
referer = "http://cn-proxy.com/"
def start_requests(self):
for item in self.start_urls:
yield Request(url=item, headers={'Referer': self.referer})
def parse(self, response):
ip_list = response.xpath('//table[@class="sortable"]/tbody/tr')
for ip in ip_list:
item = ProxyIPItem()
item['ip'] = ip.xpath('td[1]/text()').extract()[0]
item['port'] = ip.xpath('td[2]/text()').extract()[0]
item['type'] = 'http'
yield item
|
nebullvm/installers/__init__.py | nebuly-ai/nebullvm | 821 | 12699011 | # flake8: noqa
from nebullvm.installers.installers import (
install_tvm,
install_tensor_rt,
install_openvino,
install_onnxruntime,
)
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
clearly/client/client.py | lowercase00/clearly | 344 | 12699023 | <gh_stars>100-1000
import functools
from collections import namedtuple
from datetime import datetime
from typing import Any, Callable, Iterable, Optional, Tuple, Union
import grpc
from about_time import about_time
from about_time.core import HandleStats
# noinspection PyProtectedMember
from celery.states import FAILURE, PROPAGATE_STATES, REJECTED, RETRY, REVOKED, SUCCESS
from .code_highlighter import traceback_highlighter_factory, typed_code
from .display_modes import ModeTask, ModeWorker, find_mode
from ..protos.clearly_pb2 import CaptureRequest, FilterTasksRequest, FilterWorkersRequest, Null, \
PatternFilter, TaskMessage, WorkerMessage
from ..protos.clearly_pb2_grpc import ClearlyServerStub
from ..utils.colors import Colors
from ..utils.env_params import get_env_int_tuple
from ..utils.safe_compiler import safe_compile_text
from ..utils.worker_states import HEARTBEAT, ONLINE
HEADER_SIZE = 8
HEADER_PADDING, HEADER_ALIGN = ' ' * HEADER_SIZE, '>{}'.format(HEADER_SIZE)
EMPTY = Colors.DIM(':)')
DIM_NONE = Colors.CYAN_DIM('None')
TRACEBACK_HIGHLIGHTER = traceback_highlighter_factory()
Modes = namedtuple('Modes', 'tasks workers')
def set_user_friendly_errors(fn: Callable[..., None]) -> Callable[..., None]:
@functools.wraps(fn)
def inner(self: 'ClearlyClient', *args, **kwargs):
try:
fn(self, *args, **kwargs)
except grpc.RpcError as e:
if self._debug:
raise
# noinspection PyUnresolvedReferences
print('{}: {} ({})'.format(
Colors.BOLD('Server communication error'),
Colors.RED(e.details()),
Colors.DIM(e.code())
))
except UserWarning as e:
print(Colors.RED(e))
return inner
class ClearlyClient:
"""Main client object, which interfaces with the Clearly server backend, sends
commands and displays captured events.
Attributes:
_debug: if True, let the ugly errors be seen, humanizes them otherwise
_stub: the rpc communication stub instance
_modes: the current tasks and workers display modes
"""
def __init__(self, host: str = 'localhost', port: int = 12223, debug: bool = False):
"""Construct a Clearly Client instance.
Args:
host: the hostname of the server
port: the port of the server
"""
self._debug = debug
channel = grpc.insecure_channel('{}:{}'.format(host, port))
self._stub = ClearlyServerStub(channel)
self._modes = Modes(ModeTask.FAILURE, ModeWorker.WORKER)
self._modes = self._get_display_modes(get_env_int_tuple('CLI_DISPLAY_MODES', None))
def capture_tasks(self, tasks: Optional[str] = None,
mode: Union[None, int, ModeTask] = None) -> None:
"""Start capturing task events in real time, so you can instantly see exactly
what your publishers and workers are doing. Filter as much as you can to find
what you need, and don't worry as the Clearly Server will still seamlessly
handle all tasks updates.
Currently, you can filter tasks by name, uuid, routing key or state.
Insert an '!' in the first position to select those that do not match criteria.
This runs in the foreground. Press CTRL+C at any time to stop it.
Args:
tasks: a simple pattern to filter tasks
ex.: 'email' to find values containing that word anywhere
'failure|rejected|revoked' to find tasks with problem
'^trigger|^email' to find values starting with any of those words
'trigger.*123456' to find values with those words in that sequence
'!^trigger|^email' to filter values not starting with both those words
mode: an optional display mode to present data
See Also:
ClearlyClient#display_modes()
"""
self.capture(tasks=tasks, modes=mode, workers='!')
def capture_workers(self, workers: Optional[str] = None,
mode: Union[None, int, ModeWorker] = None) -> None:
"""Start capturing worker events in real time, so you can instantly see exactly
what your workers states are. Filter as much as you can to find
what you need, and don't worry as the Clearly Server will still seamlessly
handle all tasks and workers updates.
Currently, you can filter workers by hostname.
Insert an '!' in the first position to select those that do not match criteria.
This runs in the foreground. Press CTRL+C at any time to stop it.
Args:
workers: a simple pattern to filter workers
ex.: 'email' to find values containing that word anywhere
'service|priority' to find values containing any of those words
'!service|priority' to find values not containing both those words
mode: an optional display mode to present data
See Also:
ClearlyClient#display_modes()
"""
self.capture(workers=workers, modes=mode, tasks='!')
@set_user_friendly_errors
def capture(self, tasks: Optional[str] = None, workers: Optional[str] = None,
modes: Union[None, int, ModeTask, ModeWorker, Tuple] = None) -> None:
"""Start capturing all events in real time, so you can instantly see exactly
what your publishers and workers are doing. Filter as much as you can to find
what you need, and don't worry as the Clearly Server will still seamlessly
handle all tasks and workers updates.
This runs in the foreground. Press CTRL+C at any time to stop it.
Args:
tasks: the pattern to filter tasks
workers: the pattern to filter workers
modes: optional display modes to present data
send one or a tuple, as described in display_modes()
See Also:
ClearlyClient#capture_tasks()
ClearlyClient#capture_workers()
ClearlyClient#display_modes()
"""
tasks_filter = ClearlyClient._parse_pattern(tasks)
workers_filter = ClearlyClient._parse_pattern(workers)
if not tasks_filter and not workers_filter:
raise UserWarning('Nothing would be selected.')
mode = self._get_display_modes(modes)
request = CaptureRequest(
tasks_capture=tasks_filter, workers_capture=workers_filter,
)
try:
for realtime in self._stub.capture_realtime(request):
if realtime.HasField('task'):
self._display_task(realtime.task, mode.tasks)
elif realtime.HasField('worker'):
self._display_worker(realtime.worker, mode.workers)
else:
print('unknown event:', realtime)
break
except KeyboardInterrupt: # pragma: no cover
pass
@set_user_friendly_errors
def tasks(self, tasks: Optional[str] = None, mode: Union[None, int, ModeTask] = None,
limit: Optional[int] = None, reverse: bool = True) -> None:
"""Fetch current data from past tasks.
Note that the `limit` field is just a hint, it may not be accurate.
Also, the total number of tasks fetched may be slightly different from
the server `max_tasks` setting.
Args:
tasks: the pattern to filter tasks
mode: an optional display mode to present data
limit: the maximum number of events to fetch, fetches all if None or 0 (default)
reverse: if True (default), shows the most recent first
See Also:
ClearlyClient#capture_tasks()
ClearlyClient#display_modes()
"""
tasks_filter = ClearlyClient._parse_pattern(tasks)
if not tasks_filter:
raise UserWarning('Nothing would be selected.')
mode = self._get_display_modes(mode)
request = FilterTasksRequest(
tasks_filter=tasks_filter, limit=limit, reverse=reverse
)
at = about_time(self._stub.filter_tasks(request))
for task in at:
self._display_task(task, mode.tasks)
ClearlyClient._fetched_info(at)
@set_user_friendly_errors
def workers(self, workers: Optional[str] = None,
mode: Union[None, int, ModeWorker] = None) -> None:
"""Fetch current data from known workers.
Args:
workers: the pattern to filter workers
mode: an optional display mode to present data
See Also:
ClearlyClient#capture_workers()
ClearlyClient#display_modes()
"""
workers_filter = ClearlyClient._parse_pattern(workers)
if not workers_filter:
raise UserWarning('Nothing would be selected.')
mode = self._get_display_modes(mode)
request = FilterWorkersRequest(workers_filter=workers_filter)
at = about_time(self._stub.filter_workers(request))
for worker in at:
self._display_worker(worker, mode.workers)
ClearlyClient._fetched_info(at)
@set_user_friendly_errors
def seen_tasks(self) -> None:
"""Fetch a list of seen task types."""
task_types = self._stub.seen_tasks(Null()).task_types
for i, task_type in enumerate(task_types, 1):
print(Colors.DIM(i), Colors.BLUE(task_type))
@set_user_friendly_errors
def reset_tasks(self) -> None:
"""Reset stored tasks."""
self._stub.reset_tasks(Null())
print(Colors.BLUE('Ok'))
@set_user_friendly_errors
def metrics(self) -> None:
"""List some metrics about the celery cluster and Clearly itself.
Shows:
Tasks processed: actual number of tasks processed, including retries
Events processed: total number of events processed
Tasks stored: number of currently stored tasks
Workers stored: number of workers seen, including offline
"""
stats = self._stub.get_metrics(Null())
print(Colors.DIM('Processed:'),
'\ttasks', Colors.RED(stats.task_count),
'\tevents', Colors.RED(stats.event_count))
print(Colors.DIM('Stored:'),
'\ttasks', Colors.RED(stats.len_tasks),
'\tworkers', Colors.RED(stats.len_workers))
def _get_display_modes(self, modes: Union[None, int, ModeTask, ModeWorker, Tuple] = None) \
-> Modes:
if not isinstance(modes, tuple):
modes = (modes,)
elif len(modes) > 2:
raise UserWarning('At most two display modes, was sent {}'
.format(len(modes)))
modes = sorted(x for x in (find_mode(to) for to in modes) if x)
if not modes:
return self._modes
if len(modes) == 2 and isinstance(modes[0], type(modes[1])):
raise UserWarning('Two modes of the same type?')
if isinstance(modes[0], ModeTask):
return Modes(modes[0], modes[1] if len(modes) == 2 else self._modes.workers)
return Modes(self._modes.tasks, modes[0])
@set_user_friendly_errors
def display_modes(self, *modes: Union[None, int, ModeTask, ModeWorker, Tuple]) -> None:
"""Show available display modes, including the currently selected ones, or
change the current task/worker modes, sending one or two arguments of any type.
See that constant number beside modes? You can rapidly set modes with them!
Args:
modes: a display mode to set, either task or worker, or its constant number
send two to set both display modes in one call
"""
if modes:
self._modes = self._get_display_modes(modes)
modes = ('tasks', ModeTask, self._modes.tasks), ('workers', ModeWorker, self._modes.workers)
for title, klass, var_mode in modes:
print(Colors.BLUE(title))
for d in klass:
print(' {} {:8} {}: {}'.format(
d == var_mode and '*' or ' ', d.name,
Colors.ORANGE_BOLD(d.value, '>2'), Colors.YELLOW(d.description)))
@staticmethod
def _fetched_info(at: HandleStats) -> None: # pragma: no cover
print('{} {} in {} ({})'.format(
Colors.DIM('fetched:'), Colors.BOLD(at.count),
Colors.GREEN(at.duration_human), Colors.GREEN(at.throughput_human)
))
@staticmethod
def _parse_pattern(pattern: str) -> PatternFilter:
if not isinstance(pattern, (type(None), str)):
raise UserWarning('Invalid pattern.')
pattern = (pattern or '').strip()
negate = pattern.startswith('!')
pattern = pattern[negate:].strip() or '.'
if negate and pattern == '.':
return
return PatternFilter(pattern=pattern, negate=negate)
@staticmethod
def _display_task(task: TaskMessage, mode: ModeTask) -> None:
params, success, error = mode.spec
ts = datetime.fromtimestamp(task.timestamp)
print(Colors.DIM(ts.strftime('%H:%M:%S.%f')[:-3]), end=' ')
if not task.state:
routing_key = task.routing_key or EMPTY
print(Colors.BLUE(task.name),
Colors.MAGENTA(routing_key[len(task.name):] or '-'
if routing_key.startswith(task.name)
else routing_key),
Colors.DIM(task.uuid))
else:
print(ClearlyClient._task_state(task.state),
Colors.BLUE_DIM(task.retries),
end=' ')
print(Colors.BLUE(task.name), Colors.DIM(task.uuid))
show_outcome = (task.state in PROPAGATE_STATES and error) \
or (task.state == SUCCESS and success)
first_seen = bool(params) and not task.state
params_outcome = params is not False and show_outcome
if first_seen or params_outcome:
print(Colors.DIM('args:', HEADER_ALIGN),
typed_code(safe_compile_text(task.args), wrap=False) or EMPTY)
print(Colors.DIM('kwargs:', HEADER_ALIGN),
typed_code(safe_compile_text(task.kwargs), wrap=False) or EMPTY)
if show_outcome:
if task.state == SUCCESS:
result = safe_compile_text(task.result)
outcome = ' '.join((Colors.CYAN_DIM('<{}>'.format(task.result_meta)),
EMPTY if result is None else typed_code(result)))
else:
outcome = TRACEBACK_HIGHLIGHTER(task.traceback) \
.replace('\n', '\n' + HEADER_PADDING).strip()
print(Colors.DIM('==>', HEADER_ALIGN), outcome)
@staticmethod
def _display_worker(worker: WorkerMessage, mode: ModeWorker) -> None:
stats, = mode.spec
if worker.timestamp:
ts = datetime.fromtimestamp(worker.timestamp)
print(Colors.DIM(ts.strftime('%H:%M:%S.%f')[:-3]), end=' ')
print(ClearlyClient._worker_state(worker.state),
Colors.CYAN_DIM(worker.hostname),
Colors.YELLOW_DIM(str(worker.pid)))
if stats:
print(Colors.DIM('sw:', HEADER_ALIGN),
Colors.CYAN_DIM(' '.join((worker.sw_sys, worker.sw_ident))),
Colors.ORANGE(worker.sw_ver))
print(Colors.DIM('load:', HEADER_ALIGN),
ClearlyClient._item_list(worker.loadavg),
Colors.DIM('processed:'), worker.processed or DIM_NONE)
heartbeats = [datetime.fromtimestamp(x).strftime('%H:%M:%S.%f')[:-3]
for x in worker.heartbeats or []]
print(Colors.DIM('heartb:', HEADER_ALIGN),
'{}{}'.format(Colors.ORANGE(worker.freq),
Colors.DIM('s')),
ClearlyClient._item_list(heartbeats))
@staticmethod
def _item_list(items: Iterable[Any], color: Callable[[str], str] = str) -> str:
return '{}{}{}'.format(
Colors.MAGENTA('['),
Colors.MAGENTA(', ').join(map(color, items)),
Colors.MAGENTA(']'),
)
@staticmethod
def _task_state(state: str) -> None:
if state == SUCCESS: # final state in BOLD
return Colors.GREEN_BOLD(state, HEADER_ALIGN)
if state in (FAILURE, REVOKED, REJECTED): # final too
return Colors.RED_BOLD(state, HEADER_ALIGN)
if state == RETRY: # transient state with a failure.
return Colors.ORANGE(state, HEADER_ALIGN)
return Colors.YELLOW(state, HEADER_ALIGN) # transient states
@staticmethod
def _worker_state(state: str) -> None:
result = state
if state == HEARTBEAT:
return Colors.GREEN(result)
if state == ONLINE:
return Colors.GREEN_BOLD(result)
return Colors.RED_BOLD(result)
|
Find_Stocks/stock_data_sms.py | vhn0912/Finance | 441 | 12699037 | import smtplib
import datetime
import numpy as np
import pandas as pd
from email.mime.text import MIMEText
from yahoo_fin import stock_info as si
from pandas_datareader import DataReader
from email.mime.multipart import MIMEMultipart
from bs4 import BeautifulSoup
from urllib.request import urlopen, Request
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from time import sleep
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import talib
# Define list of stocks
stock_list = ['AAPL', 'MSFT', 'AMZN']
# for the tradingview recommendation
# options are: '1m', '5m', '15m', '1h', '4h', '1D', '1W', '1M
interval = "1M"
# Chromedriver Path
path = '/Users/shashank/Documents/Code/Python/Finance/chromedriver.exe'
# Chromedriver Options
options = Options()
options.add_argument("--headless")
webdriver = webdriver.Chrome(executable_path=path, options=options)
# Define start and end dates
start = datetime.datetime.now() - datetime.timedelta(days=365)
end = datetime.datetime.now()
def sendMessage(text):
message = text
email = ""
pas = ""
sms_gateway = ''
smtp = "smtp.gmail.com"
port = 587
server = smtplib.SMTP(smtp,port)
server.starttls()
server.login(email,pas)
msg = MIMEMultipart()
msg['From'] = email
msg['To'] = sms_gateway
msg['Subject'] = "Stock Data\n"
body = "{}\n".format(message)
msg.attach(MIMEText(body, 'plain'))
sms = msg.as_string()
server.sendmail(email,sms_gateway,sms)
server.quit()
print ('done')
def getData(list_of_stocks):
for stock in list_of_stocks:
df = DataReader(stock, 'yahoo', start, end)
print (stock)
# Current Price
price = si.get_live_price('{}'.format(stock))
price = round(price, 2)
# Sharpe Ratio
x = 5000
y = (x)
stock_df = df
stock_df['Norm return'] = stock_df['Adj Close'] / stock_df.iloc[0]['Adj Close']
allocation = float(x/y)
stock_df['Allocation'] = stock_df['Norm return'] * allocation
stock_df['Position'] = stock_df['Allocation'] * x
pos = [df['Position']]
val = pd.concat(pos, axis=1)
val.columns = ['WMT Pos']
val['Total Pos'] = val.sum(axis=1)
val.tail(1)
val['Daily Return'] = val['Total Pos'].pct_change(1)
Sharpe_Ratio = val['Daily Return'].mean() / val['Daily Return'].std()
A_Sharpe_Ratio = (252**0.5) * Sharpe_Ratio
A_Sharpe_Ratio = round(A_Sharpe_Ratio, 2)
# News Sentiment
finwiz_url = 'https://finviz.com/quote.ashx?t='
news_tables = {}
url = finwiz_url + stock
req = Request(url=url,headers={'user-agent': 'my-app/0.0.1'})
response = urlopen(req)
html = BeautifulSoup(response, features="lxml")
news_table = html.find(id='news-table')
news_tables[stock] = news_table
parsed_news = []
# Iterate through the news
for file_name, news_table in news_tables.items():
for x in news_table.findAll('tr'):
text = x.a.get_text()
date_scrape = x.td.text.split()
if len(date_scrape) == 1:
time = date_scrape[0]
else:
date = date_scrape[0]
time = date_scrape[1]
ticker = file_name.split('_')[0]
parsed_news.append([ticker, date, time, text])
vader = SentimentIntensityAnalyzer()
columns = ['ticker', 'date', 'time', 'headline']
dataframe = pd.DataFrame(parsed_news, columns=columns)
scores = dataframe['headline'].apply(vader.polarity_scores).tolist()
scores_df = pd.DataFrame(scores)
dataframe = dataframe.join(scores_df, rsuffix='_right')
dataframe['date'] = pd.to_datetime(dataframe.date).dt.date
dataframe = dataframe.set_index('ticker')
sentiment = round(dataframe['compound'].mean(), 2)
# TradingView Recommendation
try:
#Declare variable
analysis = []
#Open tradingview's site
webdriver.get("https://s.tradingview.com/embed-widget/technical-analysis/?locale=en#%7B%22interval%22%3A%22{}%22%2C%22width%22%3A%22100%25%22%2C%22isTransparent%22%3Afalse%2C%22height%22%3A%22100%25%22%2C%22symbol%22%3A%22{}%22%2C%22showIntervalTabs%22%3Atrue%2C%22colorTheme%22%3A%22dark%22%2C%22utm_medium%22%3A%22widget_new%22%2C%22utm_campaign%22%3A%22technical-analysis%22%7D".format(interval, ticker))
webdriver.refresh()
#Wait for site to load elements
while len(webdriver.find_elements_by_class_name("speedometerSignal-pyzN--tL")) == 0:
sleep(0.1)
#Recommendation
recommendation_element = webdriver.find_element_by_class_name("speedometerSignal-pyzN--tL")
analysis.append(recommendation_element.get_attribute('innerHTML'))
#Counters
counter_elements = webdriver.find_elements_by_class_name("counterNumber-3l14ys0C")
#Sell
analysis.append(int(counter_elements[0].get_attribute('innerHTML')))
#Neutral
analysis.append(int(counter_elements[1].get_attribute('innerHTML')))
#Buy
analysis.append(int(counter_elements[2].get_attribute('innerHTML')))
signal = analysis[0]
except:
signal = 'None'
# Beta
df = DataReader(stock,'yahoo',start, end)
dfb = DataReader('^GSPC','yahoo',start, end)
rts = df.resample('M').last()
rbts = dfb.resample('M').last()
dfsm = pd.DataFrame({'s_adjclose' : rts['Adj Close'],
'b_adjclose' : rbts['Adj Close']},
index=rts.index)
dfsm[['s_returns','b_returns']] = dfsm[['s_adjclose','b_adjclose']]/\
dfsm[['s_adjclose','b_adjclose']].shift(1) -1
dfsm = dfsm.dropna()
covmat = np.cov(dfsm["s_returns"],dfsm["b_returns"])
beta = covmat[0,1]/covmat[1,1]
beta = round(beta, 2)
# Relative Strength Index
df["rsi"] = talib.RSI(df["Close"])
values = df["rsi"].tail(14)
value = values.mean()
rsi = round(value, 2)
output = ("\nTicker: " + str(stock) + "\nCurrent Price : " + str(price) + "\nSharpe Ratio: " + str(A_Sharpe_Ratio) + "\nNews Sentiment: " + str(sentiment) + "\nTradingView Rec for {}: ".format(interval) + str(signal) + "\nRelative Strength Index: " + str(rsi) + "\nBeta Value for 1 Year: " + str(beta))
sendMessage(output)
if __name__ == '__main__':
getData(stock_list) |
larq/snapshots/snap_quantized_variable_test.py | sfalkena/larq | 496 | 12699080 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_repr[eager] 1'] = "<QuantizedVariable 'x:0' shape=() dtype=float32 quantizer=<lambda> numpy=0.0>"
snapshots['test_repr[eager] 2'] = "<QuantizedVariable 'x:0' shape=() dtype=float32 quantizer=Quantizer numpy=0.0>"
snapshots['test_repr[eager] 3'] = "<QuantizedVariable 'x:0' shape=() dtype=float32 precision=1 numpy=0.0>"
snapshots['test_repr[graph] 1'] = "<QuantizedVariable 'x:0' shape=() dtype=float32 quantizer=<lambda>>"
snapshots['test_repr[graph] 2'] = "<QuantizedVariable 'x:0' shape=() dtype=float32 quantizer=Quantizer>"
snapshots['test_repr[graph] 3'] = "<QuantizedVariable 'x:0' shape=() dtype=float32 precision=1>"
|
examples/logical_interconnects.py | doziya/hpeOneView | 107 | 12699131 | # -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from pprint import pprint
from hpOneView.oneview_client import OneViewClient
from hpOneView.exceptions import HPOneViewException
from examples.config_loader import try_load_from_file
config = {
"ip": "<oneview_ip>",
"credentials": {
"userName": "<username>",
"password": "<password>"
}
}
# To run this example, a logical interconnect name is required
logical_interconnect_name = ""
# To install the firmware driver, a firmware driver name is required
firmware_driver_name = ""
# An Enclosure name must be set to create/delete an interconnect at a given location
enclosure_name = ""
# Define the scope name to add the logical interconnect to it
scope_name = ""
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
# Get by name
print("\nGet a logical interconnect by name")
logical_interconnect = oneview_client.logical_interconnects.get_by_name(logical_interconnect_name)
pprint(logical_interconnect)
# Get installed firmware
print("\nGet the installed firmware for a logical interconnect that matches the specified name.")
firmwares = oneview_client.firmware_drivers.get_by('name', firmware_driver_name)
firmware = None
if firmwares:
firmware = firmwares[0]
# Get scope to be added
print("\nGet the scope that matches the specified name.")
scope = oneview_client.scopes.get_by_name(scope_name)
print("\nGet the enclosure that matches the specified name.")
enclosures = oneview_client.enclosures.get_by('name', enclosure_name)
enclosure = None
if enclosures:
enclosure = enclosures[0]
# Install the firmware to a logical interconnect
if firmware:
print("\nInstall the firmware to a logical interconnect that matches the specified ID.")
firmware_to_install = dict(
command="Update",
sppUri=firmware['uri']
)
installed_firmware = oneview_client.logical_interconnects.install_firmware(firmware_to_install,
logical_interconnect['uri'])
pprint(installed_firmware)
# Performs a patch operation
# Endpoint supported only in api-versions 500 and below.
if scope and (oneview_client.api_version <= 500):
print("\nPatches the logical interconnect adding one scope to it")
updated_logical_interconnect = oneview_client.logical_interconnects.patch(logical_interconnect['uri'],
'replace',
'/scopeUris',
[scope['uri']])
pprint(updated_logical_interconnect)
# Get all logical interconnects
print("\nGet all logical interconnects")
logical_interconnects = oneview_client.logical_interconnects.get_all()
for logical_interconnect in logical_interconnects:
print(' Name: {name}').format(**logical_interconnect)
logical_interconnect = logical_interconnects[0]
# Get a logical interconnect by name
logical_interconnect = oneview_client.logical_interconnects.get_by_name(logical_interconnect['name'])
print("\nFound logical interconnect by name {name}.\n URI: {uri}").format(**logical_interconnect)
print("\nGet the Ethernet interconnect settings for the logical interconnect")
ethernet_settings = oneview_client.logical_interconnects.get_ethernet_settings(logical_interconnect['uri'])
pprint(ethernet_settings)
# Update the Ethernet interconnect settings for the logical interconnect
ethernet_settings = logical_interconnect['ethernetSettings'].copy()
ethernet_settings['macRefreshInterval'] = 10
logical_interconnect = oneview_client.logical_interconnects.update_ethernet_settings(logical_interconnect['uri'],
ethernet_settings,
force=True)
print("\nUpdated the ethernet settings")
print(" with attribute 'macRefreshInterval' = {macRefreshInterval}".format(**logical_interconnect['ethernetSettings']))
# Update the internal networks on the logical interconnect
ethernet_network_options = {
"name": "OneViewSDK Test Ethernet Network on Logical Interconnect",
"vlanId": 200,
"ethernetNetworkType": "Tagged",
"purpose": "General",
"smartLink": False,
"privateNetwork": False,
"connectionTemplateUri": None,
}
ethernet_networks = oneview_client.ethernet_networks.get_by('name', ethernet_network_options['name'])
if len(ethernet_networks) > 0:
ethernet_network = ethernet_networks[0]
else:
ethernet_network = oneview_client.ethernet_networks.create(ethernet_network_options)
logical_interconnect = oneview_client.logical_interconnects.update_internal_networks(logical_interconnect['uri'],
[ethernet_network['uri']])
print("\nUpdated internal networks on the logical interconnect")
print(" with attribute 'internalNetworkUris' = {internalNetworkUris}".format(**logical_interconnect))
# Get the internal VLAN IDs
print("\nGet the internal VLAN IDs for the provisioned networks on the logical interconnect")
internal_vlans = oneview_client.logical_interconnects.get_internal_vlans(logical_interconnect['uri'])
pprint(internal_vlans)
# Update the interconnect settings
# End-point supported only in api-versions 500 and below.
if oneview_client.api_version <= 500:
print("\nUpdates the interconnect settings on the logical interconnect")
interconnect_settings = {
'ethernetSettings': logical_interconnect['ethernetSettings'].copy(),
'fcoeSettings': {}
}
interconnect_settings['ethernetSettings']['macRefreshInterval'] = 7
logical_interconnect = oneview_client.logical_interconnects.update_settings(logical_interconnect['uri'],
interconnect_settings)
print("Updated interconnect settings on the logical interconnect")
print(" with attribute 'macRefreshInterval' = {macRefreshInterval}".format(**logical_interconnect['ethernetSettings']))
pprint(logical_interconnect)
# Get the SNMP configuration for the logical interconnect
print("\nGet the SNMP configuration for the logical interconnect")
snmp_configuration = oneview_client.logical_interconnects.get_snmp_configuration(logical_interconnect['uri'])
pprint(snmp_configuration)
# Update the SNMP configuration for the logical interconnect
try:
print("\nUpdate the SNMP configuration for the logical interconnect")
snmp_configuration['enabled'] = True
logical_interconnect = oneview_client.logical_interconnects.update_snmp_configuration(logical_interconnect['uri'],
snmp_configuration)
interconnect_snmp = logical_interconnect['snmpConfiguration']
print(" Updated SNMP configuration at uri: {uri}\n with 'enabled': '{enabled}'".format(**interconnect_snmp))
except HPOneViewException as e:
print(e.msg)
# Get a collection of uplink ports from the member interconnects which are eligible for assignment to an analyzer port
print("\nGet a collection of uplink ports from the member interconnects which are eligible for assignment to "
"an analyzer port on the logical interconnect")
unassigned_uplink_ports = oneview_client.logical_interconnects.get_unassigned_uplink_ports(logical_interconnect['uri'])
pprint(unassigned_uplink_ports)
# Get the port monitor configuration of a logical interconnect
print("\nGet the port monitor configuration of a logical interconnect")
monitor_configuration = oneview_client.logical_interconnects.get_port_monitor(logical_interconnect['uri'])
pprint(monitor_configuration)
# Update port monitor configuration of a logical interconnect
try:
print("\nUpdate the port monitor configuration of a logical interconnect")
monitor_configuration['enablePortMonitor'] = True
logical_interconnect = oneview_client.logical_interconnects.update_port_monitor(
logical_interconnect['uri'], monitor_configuration)
print(" Updated port monitor at uri: {uri}\n with 'enablePortMonitor': '{enablePortMonitor}'".format(
**logical_interconnect['portMonitor']))
except HPOneViewException as e:
print(e.msg)
# Get the telemetry configuration of the logical interconnect
print("\nGet the telemetry configuration of the logical interconnect")
telemetry_configuration_uri = logical_interconnect['telemetryConfiguration']['uri']
telemetry_configuration = oneview_client.logical_interconnects.get_telemetry_configuration(telemetry_configuration_uri)
pprint(telemetry_configuration)
print("\nUpdate the telemetry configuration")
telemetry_config = {
"sampleCount": 12,
"enableTelemetry": True,
"sampleInterval": 300
}
logical_interconnect_updated = oneview_client.logical_interconnects.update_telemetry_configurations(
configuration=telemetry_config, tc_id_or_uri=telemetry_configuration_uri)
pprint(logical_interconnect_updated)
# Update the configuration on the logical interconnect
print("\nUpdate the configuration on the logical interconnect")
logical_interconnect = oneview_client.logical_interconnects.update_configuration(logical_interconnect['uri'])
print(" Done.")
# Return the logical interconnect to a consistent state
print("\nReturn the logical interconnect to a consistent state")
logical_interconnect = oneview_client.logical_interconnects.update_compliance(logical_interconnect['uri'])
print(" Done. The current consistency state is {consistencyStatus}.".format(**logical_interconnect))
# Create an interconnect at a specified location
if enclosure['uri']:
print("\nCreate an interconnect at the specified location")
bay = 1
location = {
"locationEntries": [
{"type": "Enclosure", "value": enclosure['uri']},
{"type": "Bay", "value": bay}
]
}
interconnect = oneview_client.logical_interconnects.create_interconnect(location)
pprint(interconnect)
oneview_client.logical_interconnects.delete_interconnect(enclosure['uri'], bay)
print("\nThe interconnect was successfully deleted.")
# Generate the forwarding information base dump file for the logical interconnect
print("\nGenerate the forwarding information base dump file for the logical interconnect")
fwd_info_datainfo = oneview_client.logical_interconnects.create_forwarding_information_base(logical_interconnect['uri'])
pprint(fwd_info_datainfo)
# Get the forwarding information base data for the logical interconnect
print("\nGet the forwarding information base data for the logical interconnect")
fwd_information = oneview_client.logical_interconnects.get_forwarding_information_base(logical_interconnect['uri'])
pprint(fwd_information)
# Get the QoS aggregated configuration for the logical interconnect.
print("\nGets the QoS aggregated configuration for the logical interconnect.")
qos = oneview_client.logical_interconnects.get_qos_aggregated_configuration(logical_interconnect['uri'])
pprint(qos)
# Update the QOS aggregated configuration
try:
print("\nUpdate QoS aggregated settings on the logical interconnect")
qos['activeQosConfig']['configType'] = 'Passthrough'
li = oneview_client.logical_interconnects.update_qos_aggregated_configuration(logical_interconnect['uri'], qos)
pprint(li['qosConfiguration'])
except HPOneViewException as e:
print(e.msg)
|
scanners/zap-advanced/scanner/zapclient/context/__init__.py | kevin-yen/secureCodeBox | 488 | 12699148 | <reponame>kevin-yen/secureCodeBox
# SPDX-FileCopyrightText: 2021 iteratec GmbH
#
# SPDX-License-Identifier: Apache-2.0
"""
context
A Python package containing secureCodeBox specific ZAPv2 Client extensions to configure ZAP API contexts.
"""
__all__ = ['zap_context', 'zap_context_authentication']
from .zap_context import ZapConfigureContext
from .zap_context_authentication import ZapConfigureContextAuthentication |
ira/configuration.py | ShuaiW/kaggle-heart | 182 | 12699154 | <reponame>ShuaiW/kaggle-heart
import importlib
_config = None
_subconfig = None
def set_configuration(config_name):
global _config
_config = importlib.import_module("configurations.%s" % config_name)
print "loaded", _config
def set_subconfiguration(config_name):
global _subconfig
_subconfig = importlib.import_module("configurations.%s" % config_name)
print "loaded", _subconfig
def config():
return _config
def subconfig():
return _subconfig
|
tests/test_commands/test_package_show.py | OliverHofkens/dephell | 1,880 | 12699163 | <reponame>OliverHofkens/dephell
# built-in
import json
# external
import pytest
# project
from dephell.commands import PackageShowCommand
from dephell.config import Config
@pytest.mark.allow_hosts()
def test_package_show_command(capsys):
config = Config()
config.attach({
'level': 'WARNING',
'silent': True,
'nocolors': True,
})
command = PackageShowCommand(argv=['textdistance'], config=config)
result = command()
assert result is True
captured = capsys.readouterr()
output = json.loads(captured.out)
assert output['name'] == 'textdistance'
assert output['license'] == 'MIT'
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/o/old_division_floats.py | ciskoinch8/vimrc | 463 | 12699175 | from __future__ import print_function
print(float(1) / 2)
|
stp_core/loop/motor.py | andkononykhin/plenum | 148 | 12699185 | <gh_stars>100-1000
from stp_core.common.log import getlogger
from stp_core.loop.looper import Prodable
from stp_core.loop.startable import Status
logger = getlogger()
# TODO: move it to plenum-util repo
class Motor(Prodable):
"""
Base class for Prodable that includes status management.
Subclasses are responsible for changing status from starting to started.
"""
def __init__(self):
"""
Motor is initialized with a status of Stopped.
"""
self._status = Status.stopped
def get_status(self) -> Status:
"""
Return the current status
"""
return self._status
def set_status(self, value):
"""
Set the status of the motor to the specified value if not already set.
"""
if not self._status == value:
old = self._status
self._status = value
logger.info("{} changing status from {} to {}".format(self, old.name, value.name))
self._statusChanged(old, value)
status = property(fget=get_status, fset=set_status)
def isReady(self):
"""
Is the status in Status.ready()?
"""
return self.status in Status.ready()
def isGoing(self):
"""
Is the status in Status.going()?
"""
return self.status in Status.going()
def start(self, loop):
"""
Set the status to Status.starting
"""
self.status = Status.starting
def stop(self, *args, **kwargs):
"""
Set the status to Status.stopping and also call `onStopping`
with the provided args and kwargs.
"""
if self.status in (Status.stopping, Status.stopped):
logger.debug("{} is already {}".format(self, self.status.name))
else:
self.status = Status.stopping
self.onStopping(*args, **kwargs)
self.status = Status.stopped
def _statusChanged(self, old, new):
"""
Perform some actions based on whether this node is ready or not.
:param old: the previous status
:param new: the current status
"""
raise NotImplementedError("{} must implement this method".format(self))
def onStopping(self, *args, **kwargs):
"""
A series of actions to be performed when stopping the motor.
"""
raise NotImplementedError("{} must implement this method".format(self))
async def prod(self, limit) -> int:
raise NotImplementedError("{} must implement this method".format(self))
|
src/ralph/data_center/__init__.py | DoNnMyTh/ralph | 1,668 | 12699189 | default_app_config = 'ralph.data_center.apps.DataCenterConfig'
|
setup.py | zwlanpishu/MCD | 158 | 12699211 | #!/usr/bin/python
"""A setuptools-based script for distributing and installing mcd."""
# Copyright 2014, 2015, 2016, 2017 <NAME>
# This file is part of mcd.
# See `License` for details of license and warranty.
import os
import numpy as np
from setuptools import setup
from setuptools.extension import Extension
from setuptools.command.sdist import sdist as _sdist
cython_locs = [
('mcd', 'metrics_fast'),
]
with open('README.rst') as readme_file:
long_description = readme_file.read()
requires = [ line.rstrip('\n') for line in open('requirements.txt') ]
# see "A note on setup.py" in README.rst for an explanation of the dev file
dev_mode = os.path.exists('dev')
if dev_mode:
from Cython.Distutils import build_ext
from Cython.Build import cythonize
class sdist(_sdist):
"""A cythonizing sdist command.
This class is a custom sdist command which ensures all cython-generated
C files are up-to-date before running the conventional sdist command.
"""
def run(self):
cythonize([ os.path.join(*loc)+'.pyx' for loc in cython_locs ])
_sdist.run(self)
cmdclass = {'build_ext': build_ext, 'sdist': sdist}
ext_modules = [
Extension('.'.join(loc), [os.path.join(*loc)+'.pyx'],
extra_compile_args=['-Wno-unused-but-set-variable', '-O3'],
include_dirs=[np.get_include()])
for loc in cython_locs
]
else:
cmdclass = {}
ext_modules = [
Extension('.'.join(loc), [os.path.join(*loc)+'.c'],
extra_compile_args=['-Wno-unused-but-set-variable', '-O3'],
include_dirs=[np.get_include()])
for loc in cython_locs
]
setup(
name='mcd',
version='0.5.dev1',
description='Mel cepstral distortion (MCD) computations in python.',
url='http://github.com/MattShannon/mcd',
author='<NAME>',
author_email='<EMAIL>',
license='3-clause BSD (see License file)',
packages=['mcd'],
install_requires=requires,
scripts=[
os.path.join('bin', 'dtw_synth'),
os.path.join('bin', 'get_mcd_dtw'),
os.path.join('bin', 'get_mcd_plain'),
],
long_description=long_description,
cmdclass=cmdclass,
ext_modules=ext_modules,
)
|
dataflows/helpers/iterable_loader.py | cschloer/dataflows | 160 | 12699220 | import itertools
import decimal
import datetime
from datapackage import Package, Resource
from tableschema.storage import Storage
from .. import DataStreamProcessor
class iterable_storage(Storage):
SAMPLE_SIZE = 100
def __init__(self, iterable):
super(iterable_storage, self).__init__()
self.iterable = iterable
self.schema = None
def connect(self, name): pass
def buckets(self): pass
def create(self): pass
def delete(self): pass
def read(self): pass
def write(self): pass
def field_type(self, values):
types = set()
for value in values:
if isinstance(value, str):
types.add('string')
elif isinstance(value, bool):
types.add('boolean')
elif isinstance(value, int):
types.add('integer')
elif isinstance(value, (float, decimal.Decimal)):
types.add('number')
elif isinstance(value, list):
types.add('array')
elif isinstance(value, dict):
types.add('object')
elif isinstance(value, datetime.datetime):
types.add('datetime')
elif isinstance(value, datetime.date):
types.add('date')
elif value is None:
pass
else:
assert 'Unknown Python type: %r' % value
if len(types) != 1:
return 'any'
else:
return types.pop()
def describe(self, _, descriptor=None):
if descriptor is not None:
return descriptor
if self.schema is None:
try:
sample = list(itertools.islice(self.iterable, self.SAMPLE_SIZE))
rec = sample[0]
self.iterable = itertools.chain(sample, self.iterable)
self.schema = dict(
fields=[
dict(name=name,
type=self.field_type([s.get(name) for s in sample]))
for name in rec.keys()
]
)
except Exception:
self.schema = dict(fields=[])
return self.schema
def iter(self, _):
return self.iterable
class iterable_loader(DataStreamProcessor):
def __init__(self, iterable, name=None):
super(iterable_loader, self).__init__()
self.iterable = iterable
self.name = name
self.exc = None
def handle_iterable(self):
mode = None
try:
for x in self.iterable:
if mode is None:
assert isinstance(x, (dict, list, tuple)), 'Bad item %r' % x
mode = dict if isinstance(x, dict) else list
assert isinstance(x, mode)
if mode == dict:
yield x
else:
yield dict(zip(('col{}'.format(i) for i in range(len(x))), x))
except Exception as e:
self.exc = e
raise
def process_datapackage(self, dp: Package):
name = self.name
if name is None:
name = 'res_{}'.format(len(dp.resources) + 1)
self.res = Resource(dict(
name=name,
path='{}.csv'.format(name)
), storage=iterable_storage(self.handle_iterable()))
self.res.infer()
if self.exc is not None:
raise self.exc
dp.descriptor.setdefault('resources', []).append(self.res.descriptor)
return dp
def process_resources(self, resources):
yield from super(iterable_loader, self).process_resources(resources)
yield self.res.iter(keyed=True)
|
ppgan/models/generators/drn.py | wangna11BD/PaddleGAN | 6,852 | 12699230 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import math
import paddle
import paddle.nn as nn
from .builder import GENERATORS
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2D(in_channels,
out_channels,
kernel_size,
padding=(kernel_size // 2),
bias_attr=bias)
class MeanShift(nn.Conv2D):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = paddle.to_tensor(rgb_std)
self.weight.set_value(paddle.eye(3).reshape([3, 3, 1, 1]))
self.weight.set_value(self.weight / (std.reshape([3, 1, 1, 1])))
mean = paddle.to_tensor(rgb_mean)
self.bias.set_value(sign * rgb_range * mean / std)
self.weight.trainable = False
self.bias.trainable = False
class DownBlock(nn.Layer):
def __init__(self,
negval,
n_feats,
n_colors,
scale,
nFeat=None,
in_channels=None,
out_channels=None):
super(DownBlock, self).__init__()
if nFeat is None:
nFeat = n_feats
if in_channels is None:
in_channels = n_colors
if out_channels is None:
out_channels = n_colors
dual_block = [
nn.Sequential(
nn.Conv2D(in_channels,
nFeat,
kernel_size=3,
stride=2,
padding=1,
bias_attr=False), nn.LeakyReLU(negative_slope=negval))
]
for _ in range(1, int(math.log2(scale))):
dual_block.append(
nn.Sequential(
nn.Conv2D(nFeat,
nFeat,
kernel_size=3,
stride=2,
padding=1,
bias_attr=False),
nn.LeakyReLU(negative_slope=negval)))
dual_block.append(
nn.Conv2D(nFeat,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False))
self.dual_module = nn.Sequential(*dual_block)
def forward(self, x):
x = self.dual_module(x)
return x
## Channel Attention (CA) Layer
class CALayer(nn.Layer):
def __init__(self, channel, reduction=16):
super(CALayer, self).__init__()
# global average pooling: feature --> point
self.avg_pool = nn.AdaptiveAvgPool2D(1)
# feature channel downscale and upscale --> channel weight
self.conv_du = nn.Sequential(
nn.Conv2D(channel,
channel // reduction,
1,
padding=0,
bias_attr=True), nn.ReLU(),
nn.Conv2D(channel // reduction,
channel,
1,
padding=0,
bias_attr=True), nn.Sigmoid())
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_du(y)
return x * y
class RCAB(nn.Layer):
def __init__(self,
conv,
n_feat,
kernel_size,
reduction=16,
bias=True,
bn=False,
act=nn.ReLU(),
res_scale=1):
super(RCAB, self).__init__()
modules_body = []
for i in range(2):
modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn: modules_body.append(nn.BatchNorm2D(n_feat))
if i == 0: modules_body.append(act)
modules_body.append(CALayer(n_feat, reduction))
self.body = nn.Sequential(*modules_body)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x)
res += x
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias))
m.append(nn.PixelShuffle(2))
if bn: m.append(nn.BatchNorm2D(n_feats))
if act == 'relu':
m.append(nn.ReLU())
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias))
m.append(nn.PixelShuffle(3))
if bn: m.append(nn.BatchNorm2D(n_feats))
if act == 'relu':
m.append(nn.ReLU())
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
@GENERATORS.register()
class DRNGenerator(nn.Layer):
"""DRNGenerator"""
def __init__(
self,
scale,
n_blocks=30,
n_feats=16,
n_colors=3,
rgb_range=255,
negval=0.2,
kernel_size=3,
conv=default_conv,
):
super(DRNGenerator, self).__init__()
self.scale = scale
self.phase = len(scale)
act = nn.ReLU()
self.upsample = nn.Upsample(scale_factor=max(scale),
mode='bicubic',
align_corners=False)
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = MeanShift(rgb_range, rgb_mean, rgb_std)
self.head = conv(n_colors, n_feats, kernel_size)
self.down = [
DownBlock(negval, n_feats, n_colors, 2, n_feats * pow(2, p),
n_feats * pow(2, p), n_feats * pow(2, p + 1))
for p in range(self.phase)
]
self.down = nn.LayerList(self.down)
up_body_blocks = [[
RCAB(conv, n_feats * pow(2, p), kernel_size, act=act)
for _ in range(n_blocks)
] for p in range(self.phase, 1, -1)]
up_body_blocks.insert(0, [
RCAB(conv, n_feats * pow(2, self.phase), kernel_size, act=act)
for _ in range(n_blocks)
])
# The fisrt upsample block
up = [[
Upsampler(conv, 2, n_feats * pow(2, self.phase), act=False),
conv(n_feats * pow(2, self.phase),
n_feats * pow(2, self.phase - 1),
kernel_size=1)
]]
# The rest upsample blocks
for p in range(self.phase - 1, 0, -1):
up.append([
Upsampler(conv, 2, 2 * n_feats * pow(2, p), act=False),
conv(2 * n_feats * pow(2, p),
n_feats * pow(2, p - 1),
kernel_size=1)
])
self.up_blocks = nn.LayerList()
for idx in range(self.phase):
self.up_blocks.append(nn.Sequential(*up_body_blocks[idx], *up[idx]))
# tail conv that output sr imgs
tail = [conv(n_feats * pow(2, self.phase), n_colors, kernel_size)]
for p in range(self.phase, 0, -1):
tail.append(conv(n_feats * pow(2, p), n_colors, kernel_size))
self.tail = nn.LayerList(tail)
self.add_mean = MeanShift(rgb_range, rgb_mean, rgb_std, 1)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
# upsample x to target sr size
x = self.upsample(x)
# preprocess
x = self.sub_mean(x)
x = self.head(x)
# down phases,
copies = []
for idx in range(self.phase):
copies.append(x)
x = self.down[idx](x)
# up phases
sr = self.tail[0](x)
sr = self.add_mean(sr)
results = [sr]
for idx in range(self.phase):
# upsample to SR features
x = self.up_blocks[idx](x)
# concat down features and upsample features
x = paddle.concat((x, copies[self.phase - idx - 1]), 1)
# output sr imgs
sr = self.tail[idx + 1](x)
sr = self.add_mean(sr)
results.append(sr)
return results
|
backend/logs/analyze_analytics_logs.py | sleepingAnt/viewfinder | 645 | 12699231 | # Copyright 2013 Viewfinder Inc. All Rights Reserved.
"""Run analysis over all merged user analytics logs.
Computes speed percentiles for full asset scans (only those lasting more than 1s for more accurate numbers).
Automatically finds the list of merged logs in S3. If --start_date=YYYY-MM-DD is specified, only analyze logs
starting from a week before that date (we give user logs that much time to get uploaded).
Usage:
# Analyze all logs.
python -m viewfinder.backend.logs.analyze_analytics_logs
# Analyze logs from a specific start date.
python -m viewfinder.backend.logs.analyze_analytics_logs --start_date=2012-12-15
Other options:
-require_lock: default=True: hold the job:analyze_analytics lock during processing.
-smart_scan: default=False: determine the start date from previous run summaries.
-hours_between_runs: default=0: don't run if last successful run started less than this many hours ago.
"""
__author__ = '<EMAIL> (<NAME>)'
import cStringIO
import json
import logging
import numpy
import os
import sys
import time
import traceback
from collections import defaultdict, Counter
from tornado import gen, options
from viewfinder.backend.base import constants, main, statistics, util
from viewfinder.backend.base.dotdict import DotDict
from viewfinder.backend.db import db_client
from viewfinder.backend.db.job import Job
from viewfinder.backend.logs import logs_util
from viewfinder.backend.storage.object_store import ObjectStore
from viewfinder.backend.storage import store_utils
# TODO(marc): automatic date detection (eg: find latest metric entry and process from 30 days before).
options.define('start_date', default=None, help='Start date (filename start key). May be overridden by smart_scan.')
options.define('dry_run', default=True, help='Do not update dynamodb metrics table')
options.define('compute_today', default=False, help='Do not compute statistics for today, logs will be partial')
options.define('require_lock', type=bool, default=True,
help='attempt to grab the job:analyze_analytics lock before running. Exit if acquire fails.')
options.define('smart_scan', type=bool, default=False,
help='determine start_date from previous successful runs.')
options.define('hours_between_runs', type=int, default=0,
help='minimum time since start of last successful run (with dry_run=False)')
class DayStats(object):
def __init__(self, day):
self.day = day
self._scan_durations = []
self._long_scan_speeds = []
self._photos_scanned = []
# Number of unique users recording an event on this day.
self.event_users = Counter()
# Number of occurrences of an event aggregated across all users.
self.total_events = Counter()
def AddScan(self, version, photos, duration):
self._scan_durations.append(duration)
self._photos_scanned.append(photos)
if duration > 1.0:
self._long_scan_speeds.append(photos / duration)
def AddEvents(self, counters):
for name, count in counters.iteritems():
self.total_events[name] += count
self.event_users[name] += 1
def PrintSummary(self):
logging.info('Day: %s\n %s' % (self.day, statistics.FormatStats(self._long_scan_speeds, percentiles=[90,95,99])))
def ScanDurationPercentile(self, percentile):
return numpy.percentile(self._scan_durations, percentile)
def LongScanSpeedPercentile(self, percentile):
return numpy.percentile(self._long_scan_speeds, percentile)
def PhotosScannedPercentile(self, percentile):
return numpy.percentile(self._photos_scanned, percentile)
@gen.engine
def ProcessFiles(merged_store, filenames, callback):
"""Fetch and process each file contained in 'filenames'."""
@gen.engine
def _ProcessOneFile(contents, day_stats):
"""Iterate over the contents of a processed file: one entry per line. Increment stats for specific entries."""
buf = cStringIO.StringIO(contents)
buf.seek(0)
ui_events = Counter()
while True:
line = buf.readline()
if not line:
break
parsed = json.loads(line)
if not parsed:
continue
if 'version' not in parsed:
continue
# TODO(marc): lookup the user's device ID in dynamodb and get device model.
payload = parsed['payload']
if 'name' in payload:
if payload['name'] == '/assets/scan' and payload['type'] == 'full':
day_stats.AddScan(parsed['version'], payload['num_scanned'], payload['elapsed'])
elif payload['name'].startswith('/ui/'):
ui_events[payload['name']] += 1
if ui_events:
ui_events['/ui/anything'] += 1
day_stats.AddEvents(ui_events)
buf.close()
today = util.NowUTCToISO8601()
# Group filenames by day.
files_by_day = defaultdict(list)
for filename in filenames:
_, day, user = filename.split('/')
if options.options.compute_today or today != day:
files_by_day[day].append(filename)
# Compute per-day totals. Toss them into a list, we'll want it sorted.
stats_by_day = {}
for day in sorted(files_by_day.keys()):
# We don't really need to process days in-order, but it's nicer.
files = files_by_day[day]
day_stats = DayStats(day)
for f in files:
contents = ''
try:
contents = yield gen.Task(merged_store.Get, f)
except Exception as e:
logging.error('Error fetching file %s: %r' % (f, e))
continue
_ProcessOneFile(contents, day_stats)
if len(day_stats._long_scan_speeds) == 0:
continue
dd = DotDict()
for p in [1, 5, 10, 25, 50, 75, 90, 95, 99]:
dd['user_analytics.scans_gt1s_speed_percentile.%.2d' % p] = day_stats.LongScanSpeedPercentile(p)
dd['user_analytics.scans_duration_percentile.%.2d' % p] = day_stats.ScanDurationPercentile(p)
dd['user_analytics.scans_num_photos_percentile.%.2d' % p] = day_stats.PhotosScannedPercentile(p)
dd['user_analytics.ui.event_users'] = day_stats.event_users
dd['user_analytics.ui.total_events'] = day_stats.total_events
stats_by_day[day] = dd
callback(stats_by_day)
@gen.engine
def GetMergedLogsFileList(merged_store, marker, callback):
"""Fetch the list of file names from S3."""
registry_dir = os.path.join(logs_util.UserAnalyticsLogsPaths.kMergedLogsPrefix,
logs_util.UserAnalyticsLogsPaths.kRegistryDir)
def _WantFile(filename):
return not filename.startswith(registry_dir)
base_path = logs_util.UserAnalyticsLogsPaths.kMergedLogsPrefix + '/'
marker = os.path.join(base_path, marker) if marker is not None else None
file_list = yield gen.Task(store_utils.ListAllKeys, merged_store, prefix=base_path, marker=marker)
files = [f for f in file_list if _WantFile(f)]
files.sort()
logging.info('found %d merged log files, analyzing %d' % (len(file_list), len(files)))
callback(files)
@gen.engine
def RunOnce(client, job, callback):
"""Get list of files and call processing function."""
merged_store = ObjectStore.GetInstance(logs_util.UserAnalyticsLogsPaths.MERGED_LOGS_BUCKET)
start_date = options.options.start_date
if options.options.smart_scan:
# Search for successful full-scan run in the last week.
last_run = yield gen.Task(job.FindLastSuccess, with_payload_key='stats.last_day')
if last_run is None:
logging.info('No previous successful scan found, rerun with --start_date')
callback(None)
return
last_run_start = last_run['start_time']
if util.HoursSince(last_run_start) < options.options.hours_between_runs:
logging.info('Last successful run started at %s, less than %d hours ago; skipping.' %
(time.asctime(time.localtime(last_run_start)), options.options.hours_between_runs))
callback(None)
return
last_day = last_run['stats.last_day']
# Set scan_start to start of previous run - 30d (we need 30 days' worth of data to properly compute
# 30-day active users. Add an extra 3 days just in case we had some missing logs during the last run.
start_time = util.ISO8601ToUTCTimestamp(last_day, hour=12) - constants.SECONDS_PER_WEEK
start_date = util.TimestampUTCToISO8601(start_time)
logging.info('Last successful analyze_analytics run (%s) scanned up to %s, setting analysis start date to %s' %
(time.asctime(time.localtime(last_run_start)), last_day, start_date))
# Fetch list of merged logs.
files = yield gen.Task(GetMergedLogsFileList, merged_store, start_date)
day_stats = yield gen.Task(ProcessFiles, merged_store, files)
# Write per-day stats to dynamodb.
if len(day_stats) > 0:
hms = logs_util.kDailyMetricsTimeByLogType['analytics_logs']
yield gen.Task(logs_util.UpdateMetrics, client, day_stats, dry_run=options.options.dry_run, hms_tuple=hms)
last_day = sorted(day_stats.keys())[-1]
callback(last_day)
else:
callback(None)
@gen.engine
def _Start(callback):
"""Grab a lock on job:analyze_analytics and call RunOnce. If we get a return value, write it to the job summary."""
client = db_client.DBClient.Instance()
job = Job(client, 'analyze_analytics')
if options.options.require_lock:
got_lock = yield gen.Task(job.AcquireLock)
if got_lock == False:
logging.warning('Failed to acquire job lock: exiting.')
callback()
return
result = None
job.Start()
try:
result = yield gen.Task(RunOnce, client, job)
except:
# Failure: log run summary with trace.
typ, val, tb = sys.exc_info()
msg = ''.join(traceback.format_exception(typ, val, tb))
logging.info('Registering failed run with message: %s' % msg)
yield gen.Task(job.RegisterRun, Job.STATUS_FAILURE, failure_msg=msg)
else:
if result is not None and not options.options.dry_run:
# Successful run with data processed and not in dry-run mode: write run summary.
stats = DotDict()
stats['last_day'] = result
logging.info('Registering successful run with stats: %r' % stats)
yield gen.Task(job.RegisterRun, Job.STATUS_SUCCESS, stats=stats)
finally:
yield gen.Task(job.ReleaseLock)
callback()
if __name__ == '__main__':
sys.exit(main.InitAndRun(_Start))
|
src/picktrue/gui/pinry_importer.py | winkidney/PickTrue | 118 | 12699235 | <reponame>winkidney/PickTrue<gh_stars>100-1000
import time
import tkinter as tk
from picktrue.gui.toolkit import ProgressBar, StatusBar, NamedInput, FileBrowse, info, FilePathBrowse, PasswordInput
from picktrue.pinry.importer import PinryImporter
from picktrue.utils import run_as_thread
class PinryImporterGUI(tk.Frame):
title = "导入到Pinry"
def __init__(self, *args, **kwargs):
super(PinryImporterGUI, self).__init__(*args, **kwargs)
self._url = NamedInput(self, name="Pinry部署地址")
self._min_size = NamedInput(self, name="最小上传大小(KB)(低于此值的文件不上传,不限制请留空)")
self._username = NamedInput(self, name="用户名")
self._password = PasswordInput(self, name="密码")
self._csv_file = FilePathBrowse(self, store_name="import_csv", text_label="CSV文件文件路径")
self.btn_group = self.build_buttons()
self._importer = None
self.progress = ProgressBar(self)
self.status = StatusBar(self)
self.start_update()
def _get_importer(self):
min_size = self._min_size.get_input()
if min_size:
try:
min_size = int(min_size)
except Exception:
info("最小文件上传大小应该是整数")
else:
min_size = None
return PinryImporter(
base_url=self._url.get_input(),
username=self._username.get_input(),
password=self._password.get_input(),
min_upload_size_kb=min_size,
)
def build_buttons(self):
btn_args = dict(
height=1,
)
btn_group = tk.Frame(self)
buttons = [
tk.Button(
btn_group,
text=text,
command=command,
**btn_args
)
for text, command in (
("测试登录", self._test_login),
("开始导入", self._start_import),
)
]
for index, btn in enumerate(buttons):
btn.grid(column=index, row=0, sticky=tk.N)
btn_group.pack(fill=tk.BOTH, expand=1)
return btn_group
def _test_login(self):
importer = self._get_importer()
if importer.test_login() is True:
info("登录成功")
else:
info("情检查用户名密码以及部署路径是否可访问")
def _start_import(self):
self._importer = self._get_importer()
run_as_thread(
self._importer.do_import,
self._csv_file.get_path(),
name="import2pinry"
)
def start_update(self):
run_as_thread(self._update_loop)
def _update_loop(self):
while True:
time.sleep(0.1)
self.update_progress()
def update_progress(self):
if self._importer is not None:
self.progress.update_progress(
self._importer.done_pins,
self._importer.total_pins,
)
self.status.set(self._importer.status_text())
else:
self.progress.update_progress(0, 0)
self.status.set("待机...")
|
datapackage_pipelines/lib/add_resource.py | gperonato/datapackage-pipelines | 109 | 12699238 | from datapackage_pipelines.wrapper import ingest, spew
import os
from datapackage_pipelines.utilities.resources import PATH_PLACEHOLDER, PROP_STREAMED_FROM
parameters, datapackage, res_iter = ingest()
if datapackage is None:
datapackage = {}
datapackage.setdefault('resources', [])
for param in ['url', 'name']:
assert param in parameters, \
"You must define {} in your parameters".format(param)
url = parameters.pop('url')
if url.startswith('env://'):
env_var = url[6:]
env_url = os.environ.get(env_var)
assert env_url is not None, \
"Missing Value - " \
"Please set your '%s' environment variable" % env_var
url = env_url
if 'path' not in parameters:
parameters['path'] = PATH_PLACEHOLDER
parameters[PROP_STREAMED_FROM] = url
datapackage['resources'].append(parameters)
spew(datapackage, res_iter)
|
enaml/qt/docking/dock_overlay.py | xtuzy/enaml | 1,080 | 12699254 | <filename>enaml/qt/docking/dock_overlay.py<gh_stars>1000+
#------------------------------------------------------------------------------
# Copyright (c) 2013-2017, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Atom, Bool, Int, Float, Typed
from enaml.qt.QtCore import Qt, QPoint, QRect, QTimer, QPropertyAnimation
from enaml.qt.QtGui import QPainter
from enaml.qt.QtWidgets import QWidget, QStyle, QStyleOption
from .q_guide_rose import QGuideRose
from .q_dock_bar import QDockBar
from .q_dock_container import QDockContainer
from .q_dock_splitter import QDockSplitterHandle
from .q_dock_tab_widget import QDockTabWidget
class QDockRubberBand(QWidget):
""" A custom rubber band widget for use with the dock overlay.
This class is stylable from Qt style sheets.
"""
def __init__(self, parent=None):
""" Initialize a QDockRubberBand.
Parameters
----------
parent : QWidget, optional
The parent of the dock rubber band.
"""
super(QDockRubberBand, self).__init__(parent)
self.setWindowFlags(Qt.ToolTip | Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
def paintEvent(self, event):
""" Handle the paint event for the dock rubber band.
"""
painter = QPainter(self)
opt = QStyleOption()
opt.initFrom(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, painter, self)
class DockOverlay(Atom):
""" An object which manages the overlays for dock widgets.
This manager handles the state transitions for the overlays. The
transitions are performed on a slightly-delayed timer to provide
a more fluid user interaction experience.
"""
# PySide requires weakrefs for using bound methods as slots.
# PyQt doesn't, but executes unsafe code if not using weakrefs.
__slots__ = '__weakref__'
#: The size of the rubber band when docking on the border, in px.
border_size = Int(60)
#: The delay to use when triggering the rose timer, in ms.
rose_delay = Int(30)
#: The delay to use when triggering the band timer, in ms.
band_delay = Int(50)
#: The target opacity to use when making the band visible.
band_target_opacity = Float(1.0)
#: The duration of the band visibilty animation, in ms.
band_vis_duration = Int(100)
#: the duration of the band geometry animation, in ms.
band_geo_duration = Int(100)
#: The overlayed guide rose.
_rose = Typed(QGuideRose, ())
#: The overlayed rubber band.
_band = Typed(QDockRubberBand, ())
#: The property animator for the rubber band geometry.
_geo_animator = Typed(QPropertyAnimation)
#: The property animator for the rubber band visibility.
_vis_animator = Typed(QPropertyAnimation)
#: The target mode to apply to the rose on timeout.
_target_rose_mode = Int(QGuideRose.Mode.NoMode)
#: The target geometry to apply to rubber band on timeout.
_target_band_geo = Typed(QRect, factory=lambda: QRect())
#: The value of the last guide which was hit in the rose.
_last_guide = Int(-1)
#: A flag indicating whether it is safe to show the band.
_show_band = Bool(False)
#: The hover position of the mouse to use for state changes.
_hover_pos = Typed(QPoint, factory=lambda: QPoint())
#: The timer for changing the state of the rose.
_rose_timer = Typed(QTimer)
#: The timer for changing the state of the band.
_band_timer = Typed(QTimer)
def __init__(self, parent=None):
""" Initialize a DockOverlay.
Parameters
----------
parent : QWidget, optional
The parent of the overlay. This will be used as the parent
widget for the dock rubber band. The overlay guides do not
have a parent.
"""
self._band = QDockRubberBand(parent)
#--------------------------------------------------------------------------
# Default Value Methods
#--------------------------------------------------------------------------
def _default__rose_timer(self):
""" Create the default timer for the rose state changes.
"""
timer = QTimer()
timer.setSingleShot(True)
timer.timeout.connect(self._on_rose_timer)
return timer
def _default__band_timer(self):
""" Create the default timer for the band state changes.
"""
timer = QTimer()
timer.setSingleShot(True)
timer.timeout.connect(self._on_band_timer)
return timer
def _default__geo_animator(self):
""" Create the default property animator for the rubber band.
"""
p = QPropertyAnimation(self._band, b'geometry')
p.setDuration(self.band_geo_duration)
return p
def _default__vis_animator(self):
""" Create the default property animator for the rubber band.
"""
p = QPropertyAnimation(self._band, b'windowOpacity')
p.setDuration(self.band_vis_duration)
p.finished.connect(self._on_vis_finished)
return p
#--------------------------------------------------------------------------
# Timer Handlers
#--------------------------------------------------------------------------
def _on_rose_timer(self):
""" Handle the timeout event for the internal rose timer.
This handler transitions the rose to its new state and updates
the position of the rubber band.
"""
rose = self._rose
rose.setMode(self._target_rose_mode)
rose.mouseOver(self._hover_pos)
self._show_band = True
self._update_band_state()
def _on_band_timer(self):
""" Handle the timeout event for the internal band timer.
This handler updates the position of the rubber band.
"""
self._update_band_state()
#--------------------------------------------------------------------------
# Animation Handlers
#--------------------------------------------------------------------------
def _on_vis_finished(self):
""" Handle the 'finished' signal from the visibility animator.
This handle will hide the rubber band when its opacity is 0.
"""
band = self._band
if band.windowOpacity() == 0.0:
band.hide()
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def _update_band_state(self):
""" Refresh the geometry and visible state of the rubber band.
The state will be updated using animated properties to provide
a nice fluid user experience.
"""
# A valid geometry indicates that the rubber should be shown on
# the screen. An invalid geometry means it should be hidden. If
# the validity is changed during animation, the animators are
# restarted using the current state as their starting point.
band = self._band
geo = self._target_band_geo
if geo.isValid() and self._show_band:
# If the band is already hidden, the geometry animation can
# be bypassed since the band can be located anywhere.
if band.isHidden():
band.setGeometry(geo)
self._start_vis_animator(self.band_target_opacity)
self._rose.raise_()
else:
self._start_vis_animator(self.band_target_opacity)
self._start_geo_animator(geo)
else:
self._start_vis_animator(0.0)
def _start_vis_animator(self, opacity):
""" (Re)start the visibility animator.
Parameters
----------
opacity : float
The target opacity of the target object.
"""
animator = self._vis_animator
if animator.state() == animator.Running:
animator.stop()
target = animator.targetObject()
if target.isHidden() and opacity != 0.0:
target.setWindowOpacity(0.0)
target.show()
animator.setStartValue(target.windowOpacity())
animator.setEndValue(opacity)
animator.start()
def _start_geo_animator(self, geo):
""" (Re)start the visibility animator.
Parameters
----------
geo : QRect
The target geometry for the target object.
"""
animator = self._geo_animator
if animator.state() == animator.Running:
animator.stop()
target = animator.targetObject()
animator.setStartValue(target.geometry())
animator.setEndValue(geo)
animator.start()
def _band_geometry(self, widget, guide):
""" Compute the geometry for an overlay rubber band.
Parameters
----------
widget : QWidget
The widget to which the band geometry should be fit.
guide : Guide
The rose guide under the mouse. This determines how the
geometry of the band will be fit to the widget.
"""
Guide = QGuideRose.Guide
if guide == Guide.NoGuide:
return QRect()
# border hits
border_size = self.border_size
rect = widget.contentsRect()
if guide == Guide.BorderNorth:
rect.setHeight(border_size)
elif guide == Guide.BorderEast:
rect.setLeft(rect.right() + 1 - border_size)
elif guide == Guide.BorderSouth:
rect.setTop(rect.bottom() + 1 - border_size)
elif guide == Guide.BorderWest:
rect.setWidth(border_size)
# For the next 4 conditions `widget` will be a QDockArea
elif guide == Guide.BorderExNorth:
bar_rect = widget.dockBarGeometry(QDockBar.North)
if bar_rect.isValid():
rect = bar_rect
else:
rect.setHeight(border_size / 2)
elif guide == Guide.BorderExEast:
bar_rect = widget.dockBarGeometry(QDockBar.East)
if bar_rect.isValid():
rect = bar_rect
else:
rect.setLeft(rect.right() + 1 - border_size / 2)
elif guide == Guide.BorderExSouth:
bar_rect = widget.dockBarGeometry(QDockBar.South)
if bar_rect.isValid():
rect = bar_rect
else:
rect.setTop(rect.bottom() + 1 - border_size / 2)
elif guide == Guide.BorderExWest:
bar_rect = widget.dockBarGeometry(QDockBar.West)
if bar_rect.isValid():
rect = bar_rect
else:
rect.setWidth(border_size / 2)
# compass hits
elif guide == Guide.CompassNorth:
rect.setHeight(rect.height() / 3)
elif guide == Guide.CompassEast:
rect.setLeft(2 * rect.width() / 3)
elif guide == Guide.CompassSouth:
rect.setTop(2 * rect.height() / 3)
elif guide == Guide.CompassWest:
rect.setWidth(rect.width() / 3)
elif guide == Guide.CompassCenter:
pass # nothing to do
elif guide == Guide.CompassExNorth:
pass # nothing to do
elif guide == Guide.CompassExEast:
pass # nothing to do
elif guide == Guide.CompassExSouth:
pass # nothing to do
elif guide == Guide.CompassExWest:
pass # nothing to do
# splitter handle hits
elif guide == Guide.SplitHorizontal:
wo, r = divmod(border_size - rect.width(), 2)
rect.setWidth(2 * (wo + r) + rect.width())
rect.moveLeft(rect.x() - (wo + r))
elif guide == Guide.SplitVertical:
ho, r = divmod(border_size - widget.height(), 2)
rect.setHeight(2 * (ho + r) + rect.height())
rect.moveTop(rect.y() - (ho + r))
# single center
elif guide == Guide.AreaCenter:
pass # nothing to do
# default no-op
else:
return QRect()
pt = widget.mapToGlobal(rect.topLeft())
return QRect(pt, rect.size())
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def guide_at(self, pos):
""" Get the dock guide for a given position.
Parameters
----------
pos : QPoint
The position of interest, expressed in global coordinates.
Returns
-------
result : Guide
The guide enum which lies under the given point.
"""
rose = self._rose
pos = rose.mapFromGlobal(pos)
return rose.guideAt(pos)
def hide(self):
""" Hide the overlay.
This method will stop the timers and set the visibility of the
guide rose and the rubber band to False.
"""
self._rose_timer.stop()
self._band_timer.stop()
self._rose.hide()
self._band.hide()
def mouse_over_widget(self, widget, pos, empty=False):
""" Update the overlays based on the mouse position.
This handler should be invoked when the mouse hovers over a
single widget (such as a floating dock container) as opposed to
an area of docked widgets. The guide rose will be displayed in
the center of the widget with no border guides.
Parameters
----------
widget : QWidget
The widget under the mouse.
pos : QPoint
The hover position, expressed in the local coordinates of
the widget.
empty : bool, optional
Whether the widget represents an empty widget. If this is
True, a single center guide will be shown instead of the
guide rose.
"""
Mode = QGuideRose.Mode
rose = self._rose
target_mode = Mode.AreaCenter if empty else Mode.CompassEx
self._target_rose_mode = target_mode
if rose.mode() != target_mode:
rose.setMode(Mode.NoMode)
self._rose_timer.start(self.rose_delay)
self._band_timer.start(self.band_delay)
origin = widget.mapToGlobal(QPoint(0, 0))
geo = QRect(origin, widget.size())
dirty = rose.geometry() != geo
if dirty:
rose.hide()
rose.setMode(Mode.NoMode)
rose.setGeometry(geo)
guide = rose.guideAt(pos, target_mode)
if dirty or guide != self._last_guide:
self._last_guide = guide
self._target_band_geo = self._band_geometry(widget, guide)
self._band_timer.start(self.band_delay)
rose.setCenterPoint(QPoint(geo.width() / 2, geo.height() / 2))
rose.mouseOver(pos)
rose.show()
def mouse_over_area(self, area, widget, pos):
""" Update the overlays based on the mouse position.
Parameters
----------
area : QDockArea
The dock area which contains the dock items onto which
the overlay will be displayed.
widget : QWidget
The dock widget in the area which is under the mouse, or
None if there is no relevant widget.
pos : QPoint
The hover position, expressed in the local coordinates of
the overlayed dock area.
"""
Mode = QGuideRose.Mode
Guide = QGuideRose.Guide
pane = area.centralPane()
pos = pane.mapFrom(area, pos)
if widget is None:
if area.centralWidget() is None:
self.mouse_over_widget(pane, pos, empty=True)
return
# Compute the target mode for the guide rose based on the dock
# widget which lies under the mouse position.
target_mode = Mode.Border
if isinstance(widget, QDockContainer):
target_mode |= Mode.CompassEx
elif isinstance(widget, QDockTabWidget):
target_mode |= Mode.Compass
elif isinstance(widget, QDockSplitterHandle):
if widget.orientation() == Qt.Horizontal:
target_mode |= Mode.SplitHorizontal
else:
target_mode |= Mode.SplitVertical
# Get the local area coordinates for the center of the widget.
center = widget.mapTo(pane, QPoint(0, 0))
center += QPoint(widget.width() / 2, widget.height() / 2)
# Update the state of the rose. If it is to be hidden, it is
# done so immediately. If the target mode is different from
# the current mode, the rose is hidden and the state changes
# are collapsed on a timer.
rose = self._rose
self._hover_pos = pos
self._show_band = True
self._target_rose_mode = target_mode
if target_mode != rose.mode():
rose.setMode(Mode.Border)
self._rose_timer.start(self.rose_delay)
self._show_band = False
# Update the geometry of the rose if needed. This ensures that
# the rose does not change geometry while visible.
origin = pane.mapToGlobal(QPoint(0, 0))
geo = QRect(origin, pane.size())
dirty = rose.geometry() != geo
if dirty:
rose.hide()
rose.setMode(Mode.NoMode)
rose.setGeometry(geo)
# Hit test the rose and update the target geometry for the
# rubber band if the target guide has changed.
rose.setCenterPoint(center)
guide = rose.guideAt(pos, target_mode)
if dirty or guide != self._last_guide:
self._last_guide = guide
if guide >= Guide.BorderNorth and guide <= Guide.BorderWest:
band_geo = self._band_geometry(pane, guide)
elif guide >= Guide.BorderExNorth and guide <= Guide.BorderExWest:
band_geo = self._band_geometry(area, guide)
else:
band_geo = self._band_geometry(widget, guide)
self._target_band_geo = band_geo
self._band_timer.start(self.band_delay)
# Finally, make the rose visible and issue a mouseover command
# so that the guides are highlighted.
rose.mouseOver(pos)
rose.show()
|
tradingview_ta/__init__.py | fluxguardian/python-tradingview-ta | 294 | 12699310 | from .main import TA_Handler, TradingView, Analysis, Interval, Exchange, get_multiple_analysis, __version__
from .technicals import Recommendation, Compute
|
deploy/app/main/views.py | wuchaohua/WeeklyReport | 131 | 12699321 | <reponame>wuchaohua/WeeklyReport
#coding:utf-8
from datetime import date
from flask import request, Response, redirect, url_for, current_app
from flask_admin.model import typefmt
from flask_admin.contrib.sqla import ModelView
from flask_babelex import lazy_gettext as _
from flask_login import current_user
import os
from werkzeug.utils import secure_filename
from . import main
from .. import admin, db
from ..models import Permission, User, Role, Report, Department
from ..utils import permission_required, is_allowed_file, clean_html
from sqlalchemy.exc import OperationalError
@main.route('/', methods=['GET', 'POST'])
def index():
# check if the database is initialized.
try:
User.query.all()
except OperationalError:
db.create_all()
Role.insert_roles()
Department.insert_departments()
if not current_user.is_authenticated:
return redirect(url_for('auth.login'))
return redirect(url_for('report.read'))
@main.route("/upload/", methods=["POST"])
@permission_required(Permission.WRITE_REPORT)
def upload():
img = request.files.get('image')
if img and is_allowed_file(img.filename):
filename = secure_filename(img.filename)
img.save(os.path.join(current_app.config['UPLOAD_FOLDER'], filename))
img_url = request.url_root + current_app.config['IMAGE_UPLOAD_DIR'] + filename
res = Response(img_url)
current_app.logger.info(
'{} uploaded image'.format(current_user.email))
else:
res = Response(_("Failed Uploading"))
res.headers["ContentType"] = "text/html"
res.headers["Charset"] = "utf-8"
current_app.logger.error(
'{} failed uploading image'.format(current_user.email))
return res
class WeeklyReportModelView(ModelView):
base_template = '/base.html'
def is_accessible(self):
return current_user.is_admin
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for('main.index'))
class UserAdminView(WeeklyReportModelView):
column_labels = dict(email='邮箱', username='姓名',
is_ignored='不参与统计',
role='角色', department='部门')
form_columns = column_list = [
'email', 'username', 'is_ignored', 'role', 'department']
can_delete = True
can_create = False
form_widget_args = {
'email': {
'readonly': True
},
}
def on_model_delete(self, model):
current_app.logger.info(
'{} deleted user:{}'.format(current_user.email, model))
for report in Report.query.filter_by(author_id=model.id):
db.session.delete(report)
db.session.commit()
class RoleAdminView(WeeklyReportModelView):
column_labels = dict(name='名称', users='成员')
form_columns = ['name', 'users']
column_list = ['name']
can_create = False
can_edit = True
can_delete = False
form_widget_args = {
'name': {
'readonly': True
},
}
class DepartmentAdminView(WeeklyReportModelView):
column_labels = dict(name='名称', users='成员')
form_columns = ['name', 'users']
can_edit = True
can_delete = False
class ReportAdminView(WeeklyReportModelView):
column_labels = dict(year=u'年份', week_count=u'周次',
created_at=u'创建时间', last_content=u'上周计划', content=u'内容',
author=u'员工', department=u'部门')
column_list = ('author', 'department', 'year', 'week_count', 'last_content',
'content', 'created_at')
column_default_sort = ('created_at', True)
column_searchable_list = ('week_count',)
form_columns = ['created_at', 'week_count', 'year', 'content']
list_template = '/admin/model/report_list_template.html'
can_edit = True
can_export = True
export_types=['xls']
form_widget_args = {
'year': {
'readonly': True
},
'last_content': {
'readonly': True
},
'created_at': {
'readonly': True
},
}
def date_format(view, value):
return value.strftime('%Y-%m-%d')
def author_format(v, c, m, p):
return str(m.author)
def department_format(v, c, m, p):
return str(m.department)
def format_last_content(v, c, m, p):
if m.last_content:
return clean_html(m.last_content)
return ''
def format_content(v, c, m, p):
if m.content:
return clean_html(m.content)
return ''
def format_created_at(v, c, m, p):
return m.created_at.strftime('%Y-%m-%d')
REPORT_FORMATTERS = dict(typefmt.BASE_FORMATTERS)
REPORT_FORMATTERS.update({
date: date_format,
})
column_type_formatters = REPORT_FORMATTERS
EXPORT_REPORT_FORMATTERS = dict(typefmt.BASE_FORMATTERS)
EXPORT_REPORT_FORMATTERS.update({
"author": author_format,
"department": department_format,
"last_content": format_last_content,
"content": format_content,
"created_at":format_created_at,
})
column_formatters_export = EXPORT_REPORT_FORMATTERS
admin.add_view(UserAdminView(User, db.session, name='用户'))
admin.add_view(RoleAdminView(Role, db.session, name='角色'))
admin.add_view(ReportAdminView(Report, db.session, name='周报', endpoint="reports"))
admin.add_view(DepartmentAdminView(Department, db.session, name='部门'))
|
monailabel/utils/others/generic.py | IntroAI-termproject/MONAILabel | 214 | 12699340 | <reponame>IntroAI-termproject/MONAILabel
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
import mimetypes
import os
import pathlib
import shutil
import subprocess
import torch.cuda
logger = logging.getLogger(__name__)
def file_ext(name) -> str:
return "".join(pathlib.Path(name).suffixes)
def remove_file(path: str) -> None:
if path and os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.unlink(path)
def get_basename(path):
"""Gets the basename of a file.
Ref: https://stackoverflow.com/questions/8384737/extract-file-name-from-path-no-matter-what-the-os-path-format
"""
head, tail = os.path.split(path)
return tail or os.path.basename(head)
def run_command(command, args=None, plogger=None):
plogger = plogger if plogger else logger
cmd = [command]
if args:
args = [str(a) for a in args]
cmd.extend(args)
plogger.info("Running Command:: {}".format(" ".join(cmd)))
process = subprocess.Popen(
cmd,
# stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
env=os.environ.copy(),
)
while process.poll() is None:
line = process.stdout.readline()
line = line.rstrip()
if line:
plogger.info(line.rstrip()) if plogger else print(line)
plogger.info("Return code: {}".format(process.returncode))
process.stdout.close()
return process.returncode
def init_log_config(log_config, app_dir, log_file):
if not log_config or not os.path.exists(log_config):
default_log_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
default_config = os.path.realpath(os.path.join(default_log_dir, "logging.json"))
log_dir = os.path.join(app_dir, "logs")
log_config = os.path.join(log_dir, "logging.json")
os.makedirs(log_dir, exist_ok=True)
# if not os.path.exists(log_config):
shutil.copy(default_config, log_config)
with open(log_config, "r") as f:
c = f.read()
c = c.replace("${LOGDIR}", log_dir.replace("\\", r"\\"))
c = c.replace("${LOGFILE}", os.path.join(log_dir, log_file).replace("\\", r"\\"))
with open(log_config, "w") as f:
f.write(c)
return log_config
def get_mime_type(file):
m_type = mimetypes.guess_type(file, strict=False)
logger.debug(f"Guessed Mime Type for Image: {m_type}")
if m_type is None or m_type[0] is None:
m_type = "application/octet-stream"
else:
m_type = m_type[0]
logger.debug(f"Final Mime Type: {m_type}")
return m_type
def file_checksum(file, algo="SHA256"):
if algo not in ["SHA256", "SHA512", "MD5"]:
raise ValueError("unsupported hashing algorithm %s" % algo)
with open(file, "rb") as content:
hash = hashlib.new(algo)
while True:
chunk = content.read(8192)
if not chunk:
break
hash.update(chunk)
return f"{algo}:{hash.hexdigest()}"
def gpu_memory_map():
"""Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
logger.info("Using nvidia-smi command")
if shutil.which("nvidia-smi") is None:
logger.info("nvidia-smi command didn't work! - Using default image size [128, 128, 64]")
return {0: 4300}
result = subprocess.check_output(
["nvidia-smi", "--query-gpu=memory.free", "--format=csv,nounits,noheader"], encoding="utf-8"
)
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().split("\n")]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def gpu_count():
return torch.cuda.device_count()
|
src/JPDA_matching.py | reinforcementdriving/JRMOT_ROS | 112 | 12699344 | # vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from linear_assignment import min_marg_matching
import pdb
def get_unmatched(all_idx, matches, i, marginalization=None):
assigned = [match[i] for match in matches]
unmatched = set(all_idx) - set(assigned)
if marginalization is not None:
# from 1 for dummy node
in_gate_dets = np.nonzero(np.sum(
marginalization[:, 1:], axis=0))[0].tolist()
unmatched = [d for d in unmatched if d not in in_gate_dets]
return list(unmatched)
class Matcher:
def __init__(self, detections, marginalizations, confirmed_tracks,
matching_strategy,
assignment_threshold=None):
self.detections = detections
self.marginalizations = marginalizations
self.confirmed_tracks = confirmed_tracks
self.assignment_threshold = assignment_threshold
self.detection_indices = np.arange(len(detections))
self.matching_strategy = matching_strategy
def match(self):
self.get_matches()
self.get_unmatched_tracks()
self.get_unmatched_detections()
return self.matches, self.unmatched_tracks, self.unmatched_detections
def get_matches(self):
if self.matching_strategy == "max_and_threshold":
self.max_and_threshold_matching()
elif self.matching_strategy == "hungarian":
self.hungarian()
elif self.matching_strategy == "max_match":
self.max_match()
elif self.matching_strategy == "none":
self.matches = []
else:
raise Exception('Unrecognized matching strategy: {}'.
format(self.matching_strategy))
def get_unmatched_tracks(self):
self.unmatched_tracks = get_unmatched(self.confirmed_tracks,
self.matches, 0)
def get_unmatched_detections(self):
self.unmatched_detections = get_unmatched(self.detection_indices, self.matches, 1, self.marginalizations)
def max_match(self):
self.matches = []
if self.marginalizations.shape[0] == 0:
return
detection_map = {}
for i, track_idx in enumerate(self.confirmed_tracks):
marginalization = self.marginalizations[i,:]
detection_id = np.argmax(marginalization) - 1 # subtract one for dummy
if detection_id < 0:
continue
if detection_id not in detection_map.keys():
detection_map[detection_id] = track_idx
else:
cur_track = detection_map[detection_id]
track_update = track_idx if self.marginalizations[track_idx, detection_id] > self.marginalizations[cur_track, detection_id] else cur_track
detection_map[detection_id] = track_update
threshold_p = marginalization[detection_id + 1]
if threshold_p < self.assignment_threshold:
continue
for detection in detection_map.keys():
self.matches.append((detection_map[detection], detection))
def max_and_threshold_matching(self):
self.matches = []
if self.marginalizations.shape[0] == 0:
return
for i, track_idx in enumerate(self.confirmed_tracks):
marginalization = self.marginalizations[i,:]
detection_id = np.argmax(marginalization) - 1 # subtract one for dummy
if detection_id < 0:
continue
threshold_p = marginalization[detection_id + 1]
if threshold_p < self.assignment_threshold:
continue
self.matches.append((track_idx, detection_id))
def hungarian(self):
self.matches, _, _ = min_marg_matching(self.marginalizations,
self.confirmed_tracks,
self.assignment_threshold)
|
CondTools/Hcal/test/HcalInterpolatedPulseDBReader_cfg.py | ckamtsikis/cmssw | 852 | 12699363 | <filename>CondTools/Hcal/test/HcalInterpolatedPulseDBReader_cfg.py<gh_stars>100-1000
database = "sqlite_file:hcalPulse.db"
tag = "test"
outputfile = "hcalPulse_dbread.bbin"
import FWCore.ParameterSet.Config as cms
process = cms.Process('HcalInterpolatedPulseDBRead')
process.source = cms.Source('EmptySource')
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1))
process.load("CondCore.CondDB.CondDB_cfi")
process.CondDB.connect = database
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
process.CondDB,
toGet = cms.VPSet(cms.PSet(
record = cms.string("HcalInterpolatedPulseCollRcd"),
tag = cms.string(tag)
))
)
process.dumper = cms.EDAnalyzer(
'HcalInterpolatedPulseDBReader',
outputFile = cms.string(outputfile)
)
process.p = cms.Path(process.dumper)
|
cfgov/v1/management/commands/sync_document_storage.py | Colin-Seifer/consumerfinance.gov | 156 | 12699400 | <filename>cfgov/v1/management/commands/sync_document_storage.py
from django.core.management.base import BaseCommand
from wagtail.documents import get_document_model
from ._sync_storage_base import SyncStorageCommandMixin
class Command(SyncStorageCommandMixin, BaseCommand):
def get_storage_directories(self):
return ["documents"]
def get_queryset(self):
return get_document_model().objects.all()
|
packages/pyright-internal/src/tests/samples/self5.py | Jasha10/pyright | 3,934 | 12699425 | <filename>packages/pyright-internal/src/tests/samples/self5.py
# This sample tests the use of `Self` when used within a property
# or class property.
from typing_extensions import Self
class A:
@property
def one(self) -> Self:
...
@classmethod
@property
def two(cls) -> type[Self]:
...
class B(A):
...
reveal_type(A().one, expected_text="A")
reveal_type(A.two, expected_text="Type[A]")
reveal_type(B().one, expected_text="B")
reveal_type(B.two, expected_text="Type[B]")
|
gammapy/scripts/tests/test_main.py | JohannesBuchner/gammapy | 155 | 12699436 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from gammapy import __version__
from gammapy.scripts.main import cli
from gammapy.utils.testing import run_cli
def test_cli_no_args():
# No arguments should print help
result = run_cli(cli, [])
assert "Usage" in result.output
def test_cli_help():
result = run_cli(cli, ["--help"])
assert "Usage" in result.output
def test_cli_version():
result = run_cli(cli, ["--version"])
assert f"gammapy version {__version__}" in result.output
def test_check_logging():
result = run_cli(cli, ["check", "logging"])
assert f"output" in result.output
|
utildialog/numinputdialog.py | dragondjf/QMarkdowner | 115 | 12699498 | <reponame>dragondjf/QMarkdowner
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from qframer.qt import QtGui
from qframer.qt import QtCore
from basedialog import BaseDialog
class numinputDialog(BaseDialog):
def __init__(self, styleoptions, parent=None):
super(numinputDialog, self).__init__(styleoptions, parent)
# url内容输入
self.numwidget = QtGui.QWidget()
num_mainlayout = QtGui.QGridLayout()
self.numLabel = QtGui.QLabel(u'Q的防区个数:')
self.numspinbox = QtGui.QSpinBox(self.numwidget)
self.ipLabel = QtGui.QLabel(u'Q的下位机IP:')
self.ipIn = QtGui.QLineEdit()
num_mainlayout.addWidget(self.numLabel, 0, 0)
num_mainlayout.addWidget(self.numspinbox, 0, 1)
#num_mainlayout.addWidget(self.ipLabel, 1, 0)
#num_mainlayout.addWidget(self.ipIn, 1, 1)
self.numwidget.setLayout(num_mainlayout)
#确认按钮布局
self.enterwidget = QtGui.QWidget()
self.pbEnter = QtGui.QPushButton(u'确定', self)
self.pbCancel = QtGui.QPushButton(u'取消', self)
self.pbEnter.clicked.connect(self.enter)
self.pbCancel.clicked.connect(self.reject)
enterwidget_mainlayout = QtGui.QGridLayout()
enterwidget_mainlayout.addWidget(self.pbEnter, 0, 0)
enterwidget_mainlayout.addWidget(self.pbCancel, 0, 1)
self.enterwidget.setLayout(enterwidget_mainlayout)
self.layout().addWidget(self.numwidget)
self.layout().addWidget(self.enterwidget)
self.resize(self.width(), self.height())
def enter(self):
self.accept() # 关闭对话框并返回1
def numinput(value, start, end, step, options):
dialog = numinputDialog(options)
dialog.numspinbox.setValue(value)
dialog.numspinbox.setRange(start, end)
dialog.numspinbox.setSingleStep(step)
dialog.numspinbox.setFocusPolicy(QtCore.Qt.NoFocus)
if dialog.exec_():
return True, int(dialog.numspinbox.value())#, str(dialog.ipIn.text())
else:
return False, int(dialog.numspinbox.value())#, str(dialog.ipIn.text())
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
styleoptions = {
'title': u'退出设置',
'windowicon': os.sep.join([os.path.dirname(__file__), 'utildialogskin', 'images', 'bg.jpg']),
'minsize': (400, 300),
'size': (400, 300),
'logo_title': u'智能光纤云终端管理平台',
'logo_img_url': os.sep.join([os.path.dirname(__file__), 'utildialogskin', 'images', 'bg.jpg'])
}
print numinput(6, 2, 8, 2, styleoptions)
sys.exit(app.exec_())
|
clib/clib_mininet_test.py | boldsort/faucet | 393 | 12699505 | <reponame>boldsort/faucet
#!/usr/bin/env python3
"""Mininet tests for clib client library functionality.
* must be run as root
* you can run a specific test case only, by adding the class name of the test
case to the command. Eg ./clib_mininet_test.py FaucetUntaggedIPv4RouteTest
It is strongly recommended to run these tests via Docker, to ensure you have
all dependencies correctly installed. See ../docs/.
"""
from clib_mininet_test_main import test_main
import clib_mininet_tests
if __name__ == '__main__':
test_main([clib_mininet_tests.__name__])
|
jraph/examples/basic.py | baskaransri/jraph | 871 | 12699510 | <gh_stars>100-1000
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A basic graphnet example.
This example just explains the bare mechanics of the library.
"""
import logging
from absl import app
import jax
import jraph
import numpy as np
MASK_BROKEN_MSG = ("Support for jax.mask is currently broken. This is not a "
"jraph error.")
def run():
"""Runs basic example."""
# Creating graph tuples.
# Creates a GraphsTuple from scratch containing a single graph.
# The graph has 3 nodes and 2 edges.
# Each node has a 4-dimensional feature vector.
# Each edge has a 5-dimensional feature vector.
# The graph itself has a 6-dimensional feature vector.
single_graph = jraph.GraphsTuple(
n_node=np.asarray([3]), n_edge=np.asarray([2]),
nodes=np.ones((3, 4)), edges=np.ones((2, 5)),
globals=np.ones((1, 6)),
senders=np.array([0, 1]), receivers=np.array([2, 2]))
logging.info("Single graph %r", single_graph)
# Creates a GraphsTuple from scratch containing a single graph with nested
# feature vectors.
# The graph has 3 nodes and 2 edges.
# The feature vector can be arbitrary nested types of dict, list and tuple,
# or any other type you registered with jax.tree_util.register_pytree_node.
nested_graph = jraph.GraphsTuple(
n_node=np.asarray([3]), n_edge=np.asarray([2]),
nodes={"a": np.ones((3, 4))}, edges={"b": np.ones((2, 5))},
globals={"c": np.ones((1, 6))},
senders=np.array([0, 1]), receivers=np.array([2, 2]))
logging.info("Nested graph %r", nested_graph)
# Creates a GraphsTuple from scratch containing a 2 graphs using an implicit
# batch dimension.
# The first graph has 3 nodes and 2 edges.
# The second graph has 1 nodes and 1 edges.
# Each node has a 4-dimensional feature vector.
# Each edge has a 5-dimensional feature vector.
# The graph itself has a 6-dimensional feature vector.
implicitly_batched_graph = jraph.GraphsTuple(
n_node=np.asarray([3, 1]), n_edge=np.asarray([2, 1]),
nodes=np.ones((4, 4)), edges=np.ones((3, 5)),
globals=np.ones((2, 6)),
senders=np.array([0, 1, 3]), receivers=np.array([2, 2, 3]))
logging.info("Implicitly batched graph %r", implicitly_batched_graph)
# Batching graphs can be challenging. There are in general two approaches:
# 1. Implicit batching: Independent graphs are combined into the same
# GraphsTuple first, and the padding is added to the combined graph.
# 2. Explicit batching: Pad all graphs to a maximum size, stack them together
# using an explicit batch dimension followed by jax.vmap.
# Both approaches are shown below.
# Creates a GraphsTuple from two existing GraphsTuple using an implicit
# batch dimension.
# The GraphsTuple will contain three graphs.
implicitly_batched_graph = jraph.batch(
[single_graph, implicitly_batched_graph])
logging.info("Implicitly batched graph %r", implicitly_batched_graph)
# Creates multiple GraphsTuples from an existing GraphsTuple with an implicit
# batch dimension.
graph_1, graph_2, graph_3 = jraph.unbatch(implicitly_batched_graph)
logging.info("Unbatched graphs %r %r %r", graph_1, graph_2, graph_3)
# Creates a padded GraphsTuple from an existing GraphsTuple.
# The padded GraphsTuple will contain 10 nodes, 5 edges, and 4 graphs.
# Three graphs are added for the padding.
# First an dummy graph which contains the padding nodes and edges and secondly
# two empty graphs without nodes or edges to pad out the graphs.
padded_graph = jraph.pad_with_graphs(
single_graph, n_node=10, n_edge=5, n_graph=4)
logging.info("Padded graph %r", padded_graph)
# Creates a GraphsTuple from an existing padded GraphsTuple.
# The previously added padding is removed.
single_graph = jraph.unpad_with_graphs(padded_graph)
logging.info("Unpadded graph %r", single_graph)
# Creates a GraphsTuple containing a 2 graphs using an explicit batch
# dimension.
# An explicit batch dimension requires more memory, but can simplify
# the definition of functions operating on the graph.
# Explicitly batched graphs require the GraphNetwork to be transformed
# by jax.vmap.
# Using an explicit batch requires padding all feature vectors to
# the maximum size of nodes and edges.
# The first graph has 3 nodes and 2 edges.
# The second graph has 1 nodes and 1 edges.
# Each node has a 4-dimensional feature vector.
# Each edge has a 5-dimensional feature vector.
# The graph itself has a 6-dimensional feature vector.
explicitly_batched_graph = jraph.GraphsTuple(
n_node=np.asarray([[3], [1]]), n_edge=np.asarray([[2], [1]]),
nodes=np.ones((2, 3, 4)), edges=np.ones((2, 2, 5)),
globals=np.ones((2, 1, 6)),
senders=np.array([[0, 1], [0, -1]]),
receivers=np.array([[2, 2], [0, -1]]))
logging.info("Explicitly batched graph %r", explicitly_batched_graph)
# Running a graph propagation steps.
# First define the update functions for the edges, nodes and globals.
# In this example we use the identity everywhere.
# For Graph neural networks, each update function is typically a neural
# network.
def update_edge_fn(
edge_features,
sender_node_features,
receiver_node_features,
globals_):
"""Returns the update edge features."""
del sender_node_features
del receiver_node_features
del globals_
return edge_features
def update_node_fn(
node_features,
aggregated_sender_edge_features,
aggregated_receiver_edge_features,
globals_):
"""Returns the update node features."""
del aggregated_sender_edge_features
del aggregated_receiver_edge_features
del globals_
return node_features
def update_globals_fn(
aggregated_node_features,
aggregated_edge_features,
globals_):
del aggregated_node_features
del aggregated_edge_features
return globals_
# Optionally define custom aggregation functions.
# In this example we use the defaults (so no need to define them explicitly).
aggregate_edges_for_nodes_fn = jraph.segment_sum
aggregate_nodes_for_globals_fn = jraph.segment_sum
aggregate_edges_for_globals_fn = jraph.segment_sum
# Optionally define attention logit function and attention reduce function.
# This can be used for graph attention.
# The attention function calculates attention weights, and the apply
# attention function calculates the new edge feature given the weights.
# We don't use graph attention here, and just pass the defaults.
attention_logit_fn = None
attention_reduce_fn = None
# Creates a new GraphNetwork in its most general form.
# Most of the arguments have defaults and can be omitted if a feature
# is not used.
# There are also predefined GraphNetworks available (see models.py)
network = jraph.GraphNetwork(
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn,
update_global_fn=update_globals_fn,
attention_logit_fn=attention_logit_fn,
aggregate_edges_for_nodes_fn=aggregate_edges_for_nodes_fn,
aggregate_nodes_for_globals_fn=aggregate_nodes_for_globals_fn,
aggregate_edges_for_globals_fn=aggregate_edges_for_globals_fn,
attention_reduce_fn=attention_reduce_fn)
# Runs graph propagation on (implicitly batched) graphs.
updated_graph = network(single_graph)
logging.info("Updated graph from single graph %r", updated_graph)
updated_graph = network(nested_graph)
logging.info("Updated graph from nested graph %r", nested_graph)
updated_graph = network(implicitly_batched_graph)
logging.info("Updated graph from implicitly batched graph %r", updated_graph)
updated_graph = network(padded_graph)
logging.info("Updated graph from padded graph %r", updated_graph)
# JIT-compile graph propagation.
# Use padded graphs to avoid re-compilation at every step!
jitted_network = jax.jit(network)
updated_graph = jitted_network(padded_graph)
logging.info("(JIT) updated graph from padded graph %r", updated_graph)
logging.info("basic.py complete!")
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
run()
if __name__ == "__main__":
app.run(main)
|
bikeshed/stringEnum/__init__.py | saschanaz/bikeshed | 775 | 12699516 | from .StringEnum import StringEnum
|
qf_lib/backtesting/contract/contract_to_ticker_conversion/ib_bloomberg_mapper.py | webclinic017/qf-lib | 198 | 12699518 | # Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qf_lib.backtesting.contract.contract import Contract
from qf_lib.backtesting.contract.contract_to_ticker_conversion.base import ContractTickerMapper
from qf_lib.common.tickers.tickers import BloombergTicker
class IB_Bloomberg_ContractTickerMapper(ContractTickerMapper):
"""
BloombergTicker - IB Contract mapper that can be used for live trading.
It is using the "SMART" exchange for all products
Parameters
-----------
bbg_suffix: str
suffix added after the first part of the BBG ticker. For example: "US Equity", "PW Equity", etc
security_type: str
corresponds to the security type that is used to create Contract. For example:
use "STK" for stocks, ETFs and ETNs,
use "CMDTY" for commodities,
use "BOND" for bonds
use "OPT" for options
use "FUT" for futures
"""
def __init__(self, bbg_suffix: str, security_type: str):
self.bbg_suffix = bbg_suffix
self.security_type = security_type
def contract_to_ticker(self, contract: Contract, strictly_to_specific_ticker=True) -> BloombergTicker:
return BloombergTicker(ticker=contract.symbol)
def ticker_to_contract(self, ticker: BloombergTicker) -> Contract:
split_ticker = ticker.ticker.split()
return Contract(symbol=split_ticker[0], security_type=self.security_type, exchange="SMART")
|
ghostwriter/reporting/migrations/0016_auto_20201017_0014.py | bbhunter/Ghostwriter | 601 | 12699553 | <reponame>bbhunter/Ghostwriter
# Generated by Django 3.0.10 on 2020-10-17 00:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reporting', '0015_auto_20201016_1756'),
]
operations = [
migrations.RemoveField(
model_name='report',
name='template',
),
migrations.AddField(
model_name='report',
name='docx_template',
field=models.ForeignKey(help_text='Select the Word template to use for this report', limit_choices_to={'doc_type__iexact': 'docx'}, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='reporttemplate_docx_set', to='reporting.ReportTemplate'),
),
migrations.AddField(
model_name='report',
name='pptx_template',
field=models.ForeignKey(help_text='Select the PowerPoint template to use for this report', limit_choices_to={'doc_type__iexact': 'pptx'}, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='reporttemplate_pptx_set', to='reporting.ReportTemplate'),
),
migrations.AlterField(
model_name='finding',
name='finding_guidance',
field=models.TextField(blank=True, help_text='Provide notes for your team that describes how the finding is intended to be used or edited during editing', null=True, verbose_name='Finding Guidance'),
),
]
|
boto3_type_annotations_with_docs/boto3_type_annotations/organizations/paginator.py | cowboygneox/boto3_type_annotations | 119 | 12699555 | <gh_stars>100-1000
from typing import Dict
from typing import List
from botocore.paginate import Paginator
class ListAWSServiceAccessForOrganization(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_aws_service_access_for_organization`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListAWSServiceAccessForOrganization>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'EnabledServicePrincipals': [
{
'ServicePrincipal': 'string',
'DateEnabled': datetime(2015, 1, 1)
},
],
}
**Response Structure**
- *(dict) --*
- **EnabledServicePrincipals** *(list) --*
A list of the service principals for the services that are enabled to integrate with your organization. Each principal is a structure that includes the name and the date that it was enabled for integration with AWS Organizations.
- *(dict) --*
A structure that contains details of a service principal that is enabled to integrate with AWS Organizations.
- **ServicePrincipal** *(string) --*
The name of the service principal. This is typically in the form of a URL, such as: `` *servicename* .amazonaws.com`` .
- **DateEnabled** *(datetime) --*
The date that the service principal was enabled for integration with AWS Organizations.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListAccounts(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_accounts`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListAccounts>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Accounts': [
{
'Id': 'string',
'Arn': 'string',
'Email': 'string',
'Name': 'string',
'Status': 'ACTIVE'|'SUSPENDED',
'JoinedMethod': 'INVITED'|'CREATED',
'JoinedTimestamp': datetime(2015, 1, 1)
},
],
}
**Response Structure**
- *(dict) --*
- **Accounts** *(list) --*
A list of objects in the organization.
- *(dict) --*
Contains information about an AWS account that is a member of an organization.
- **Id** *(string) --*
The unique identifier (ID) of the account.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for an account ID string requires exactly 12 digits.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of the account.
For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* .
- **Email** *(string) --*
The email address associated with the AWS account.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for this parameter is a string of characters that represents a standard Internet email address.
- **Name** *(string) --*
The friendly name of the account.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ that is used to validate this parameter is a string of any of the characters in the ASCII character range.
- **Status** *(string) --*
The status of the account in the organization.
- **JoinedMethod** *(string) --*
The method by which the account joined the organization.
- **JoinedTimestamp** *(datetime) --*
The date the account became a part of the organization.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListAccountsForParent(Paginator):
def paginate(self, ParentId: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_accounts_for_parent`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListAccountsForParent>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ParentId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Accounts': [
{
'Id': 'string',
'Arn': 'string',
'Email': 'string',
'Name': 'string',
'Status': 'ACTIVE'|'SUSPENDED',
'JoinedMethod': 'INVITED'|'CREATED',
'JoinedTimestamp': datetime(2015, 1, 1)
},
],
}
**Response Structure**
- *(dict) --*
- **Accounts** *(list) --*
A list of the accounts in the specified root or OU.
- *(dict) --*
Contains information about an AWS account that is a member of an organization.
- **Id** *(string) --*
The unique identifier (ID) of the account.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for an account ID string requires exactly 12 digits.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of the account.
For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* .
- **Email** *(string) --*
The email address associated with the AWS account.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for this parameter is a string of characters that represents a standard Internet email address.
- **Name** *(string) --*
The friendly name of the account.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ that is used to validate this parameter is a string of any of the characters in the ASCII character range.
- **Status** *(string) --*
The status of the account in the organization.
- **JoinedMethod** *(string) --*
The method by which the account joined the organization.
- **JoinedTimestamp** *(datetime) --*
The date the account became a part of the organization.
:type ParentId: string
:param ParentId: **[REQUIRED]**
The unique identifier (ID) for the parent root or organization unit (OU) whose accounts you want to list.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListChildren(Paginator):
def paginate(self, ParentId: str, ChildType: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_children`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListChildren>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ParentId='string',
ChildType='ACCOUNT'|'ORGANIZATIONAL_UNIT',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Children': [
{
'Id': 'string',
'Type': 'ACCOUNT'|'ORGANIZATIONAL_UNIT'
},
],
}
**Response Structure**
- *(dict) --*
- **Children** *(list) --*
The list of children of the specified parent container.
- *(dict) --*
Contains a list of child entities, either OUs or accounts.
- **Id** *(string) --*
The unique identifier (ID) of this child entity.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a child ID string requires one of the following:
* Account: a string that consists of exactly 12 digits.
* Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that contains the OU) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.
- **Type** *(string) --*
The type of this child entity.
:type ParentId: string
:param ParentId: **[REQUIRED]**
The unique identifier (ID) for the parent root or OU whose children you want to list.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a parent ID string requires one of the following:
* Root: a string that begins with \"r-\" followed by from 4 to 32 lower-case letters or digits.
* Organizational unit (OU): a string that begins with \"ou-\" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second \"-\" dash and from 8 to 32 additional lower-case letters or digits.
:type ChildType: string
:param ChildType: **[REQUIRED]**
Filters the output to include only the specified child type.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListCreateAccountStatus(Paginator):
def paginate(self, States: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_create_account_status`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListCreateAccountStatus>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
States=[
'IN_PROGRESS'|'SUCCEEDED'|'FAILED',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'CreateAccountStatuses': [
{
'Id': 'string',
'AccountName': 'string',
'State': 'IN_PROGRESS'|'SUCCEEDED'|'FAILED',
'RequestedTimestamp': datetime(2015, 1, 1),
'CompletedTimestamp': datetime(2015, 1, 1),
'AccountId': 'string',
'GovCloudAccountId': 'string',
'FailureReason': 'ACCOUNT_LIMIT_EXCEEDED'|'EMAIL_ALREADY_EXISTS'|'INVALID_ADDRESS'|'INVALID_EMAIL'|'CONCURRENT_ACCOUNT_MODIFICATION'|'INTERNAL_FAILURE'
},
],
}
**Response Structure**
- *(dict) --*
- **CreateAccountStatuses** *(list) --*
A list of objects with details about the requests. Certain elements, such as the accountId number, are present in the output only after the account has been successfully created.
- *(dict) --*
Contains the status about a CreateAccount or CreateGovCloudAccount request to create an AWS account or an AWS GovCloud (US) account in an organization.
- **Id** *(string) --*
The unique identifier (ID) that references this request. You get this value from the response of the initial CreateAccount request to create the account.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for an create account request ID string requires "car-" followed by from 8 to 32 lower-case letters or digits.
- **AccountName** *(string) --*
The account name given to the account when it was created.
- **State** *(string) --*
The status of the request.
- **RequestedTimestamp** *(datetime) --*
The date and time that the request was made for the account creation.
- **CompletedTimestamp** *(datetime) --*
The date and time that the account was created and the request completed.
- **AccountId** *(string) --*
If the account was created successfully, the unique identifier (ID) of the new account.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for an account ID string requires exactly 12 digits.
- **GovCloudAccountId** *(string) --*
- **FailureReason** *(string) --*
If the request failed, a description of the reason for the failure.
* ACCOUNT_LIMIT_EXCEEDED: The account could not be created because you have reached the limit on the number of accounts in your organization.
* EMAIL_ALREADY_EXISTS: The account could not be created because another AWS account with that email address already exists.
* INVALID_ADDRESS: The account could not be created because the address you provided is not valid.
* INVALID_EMAIL: The account could not be created because the email address you provided is not valid.
* INTERNAL_FAILURE: The account could not be created because of an internal failure. Try again later. If the problem persists, contact Customer Support.
:type States: list
:param States:
A list of one or more states that you want included in the response. If this parameter is not present, then all requests are included in the response.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListHandshakesForAccount(Paginator):
def paginate(self, Filter: Dict = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_handshakes_for_account`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListHandshakesForAccount>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filter={
'ActionType': 'INVITE'|'ENABLE_ALL_FEATURES'|'APPROVE_ALL_FEATURES'|'ADD_ORGANIZATIONS_SERVICE_LINKED_ROLE',
'ParentHandshakeId': 'string'
},
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Handshakes': [
{
'Id': 'string',
'Arn': 'string',
'Parties': [
{
'Id': 'string',
'Type': 'ACCOUNT'|'ORGANIZATION'|'EMAIL'
},
],
'State': 'REQUESTED'|'OPEN'|'CANCELED'|'ACCEPTED'|'DECLINED'|'EXPIRED',
'RequestedTimestamp': datetime(2015, 1, 1),
'ExpirationTimestamp': datetime(2015, 1, 1),
'Action': 'INVITE'|'ENABLE_ALL_FEATURES'|'APPROVE_ALL_FEATURES'|'ADD_ORGANIZATIONS_SERVICE_LINKED_ROLE',
'Resources': [
{
'Value': 'string',
'Type': 'ACCOUNT'|'ORGANIZATION'|'ORGANIZATION_FEATURE_SET'|'EMAIL'|'MASTER_EMAIL'|'MASTER_NAME'|'NOTES'|'PARENT_HANDSHAKE',
'Resources': {'... recursive ...'}
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **Handshakes** *(list) --*
A list of Handshake objects with details about each of the handshakes that is associated with the specified account.
- *(dict) --*
Contains information that must be exchanged to securely establish a relationship between two accounts (an *originator* and a *recipient* ). For example, when a master account (the originator) invites another account (the recipient) to join its organization, the two accounts exchange information as a series of handshake requests and responses.
**Note:** Handshakes that are CANCELED, ACCEPTED, or DECLINED show up in lists for only 30 days after entering that state After that they are deleted.
- **Id** *(string) --*
The unique identifier (ID) of a handshake. The originating account creates the ID when it initiates the handshake.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for handshake ID string requires "h-" followed by from 8 to 32 lower-case letters or digits.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of a handshake.
For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* .
- **Parties** *(list) --*
Information about the two accounts that are participating in the handshake.
- *(dict) --*
Identifies a participant in a handshake.
- **Id** *(string) --*
The unique identifier (ID) for the party.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for handshake ID string requires "h-" followed by from 8 to 32 lower-case letters or digits.
- **Type** *(string) --*
The type of party.
- **State** *(string) --*
The current state of the handshake. Use the state to trace the flow of the handshake through the process from its creation to its acceptance. The meaning of each of the valid values is as follows:
* **REQUESTED** : This handshake was sent to multiple recipients (applicable to only some handshake types) and not all recipients have responded yet. The request stays in this state until all recipients respond.
* **OPEN** : This handshake was sent to multiple recipients (applicable to only some policy types) and all recipients have responded, allowing the originator to complete the handshake action.
* **CANCELED** : This handshake is no longer active because it was canceled by the originating account.
* **ACCEPTED** : This handshake is complete because it has been accepted by the recipient.
* **DECLINED** : This handshake is no longer active because it was declined by the recipient account.
* **EXPIRED** : This handshake is no longer active because the originator did not receive a response of any kind from the recipient before the expiration time (15 days).
- **RequestedTimestamp** *(datetime) --*
The date and time that the handshake request was made.
- **ExpirationTimestamp** *(datetime) --*
The date and time that the handshake expires. If the recipient of the handshake request fails to respond before the specified date and time, the handshake becomes inactive and is no longer valid.
- **Action** *(string) --*
The type of handshake, indicating what action occurs when the recipient accepts the handshake. The following handshake types are supported:
* **INVITE** : This type of handshake represents a request to join an organization. It is always sent from the master account to only non-member accounts.
* **ENABLE_ALL_FEATURES** : This type of handshake represents a request to enable all features in an organization. It is always sent from the master account to only *invited* member accounts. Created accounts do not receive this because those accounts were created by the organization's master account and approval is inferred.
* **APPROVE_ALL_FEATURES** : This type of handshake is sent from the Organizations service when all member accounts have approved the ``ENABLE_ALL_FEATURES`` invitation. It is sent only to the master account and signals the master that it can finalize the process to enable all features.
- **Resources** *(list) --*
Additional information that is needed to process the handshake.
- *(dict) --*
Contains additional data that is needed to process a handshake.
- **Value** *(string) --*
The information that is passed to the other party in the handshake. The format of the value string must match the requirements of the specified type.
- **Type** *(string) --*
The type of information being passed, specifying how the value is to be interpreted by the other party:
* ``ACCOUNT`` - Specifies an AWS account ID number.
* ``ORGANIZATION`` - Specifies an organization ID number.
* ``EMAIL`` - Specifies the email address that is associated with the account that receives the handshake.
* ``OWNER_EMAIL`` - Specifies the email address associated with the master account. Included as information about an organization.
* ``OWNER_NAME`` - Specifies the name associated with the master account. Included as information about an organization.
* ``NOTES`` - Additional text provided by the handshake initiator and intended for the recipient to read.
- **Resources** *(list) --*
When needed, contains an additional array of ``HandshakeResource`` objects.
:type Filter: dict
:param Filter:
Filters the handshakes that you want included in the response. The default is all types. Use the ``ActionType`` element to limit the output to only a specified type, such as ``INVITE`` , ``ENABLE_ALL_FEATURES`` , or ``APPROVE_ALL_FEATURES`` . Alternatively, for the ``ENABLE_ALL_FEATURES`` handshake that generates a separate child handshake for each member account, you can specify ``ParentHandshakeId`` to see only the handshakes that were generated by that parent request.
- **ActionType** *(string) --*
Specifies the type of handshake action.
If you specify ``ActionType`` , you cannot also specify ``ParentHandshakeId`` .
- **ParentHandshakeId** *(string) --*
Specifies the parent handshake. Only used for handshake types that are a child of another type.
If you specify ``ParentHandshakeId`` , you cannot also specify ``ActionType`` .
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for handshake ID string requires \"h-\" followed by from 8 to 32 lower-case letters or digits.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListHandshakesForOrganization(Paginator):
def paginate(self, Filter: Dict = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_handshakes_for_organization`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListHandshakesForOrganization>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filter={
'ActionType': 'INVITE'|'ENABLE_ALL_FEATURES'|'APPROVE_ALL_FEATURES'|'ADD_ORGANIZATIONS_SERVICE_LINKED_ROLE',
'ParentHandshakeId': 'string'
},
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Handshakes': [
{
'Id': 'string',
'Arn': 'string',
'Parties': [
{
'Id': 'string',
'Type': 'ACCOUNT'|'ORGANIZATION'|'EMAIL'
},
],
'State': 'REQUESTED'|'OPEN'|'CANCELED'|'ACCEPTED'|'DECLINED'|'EXPIRED',
'RequestedTimestamp': datetime(2015, 1, 1),
'ExpirationTimestamp': datetime(2015, 1, 1),
'Action': 'INVITE'|'ENABLE_ALL_FEATURES'|'APPROVE_ALL_FEATURES'|'ADD_ORGANIZATIONS_SERVICE_LINKED_ROLE',
'Resources': [
{
'Value': 'string',
'Type': 'ACCOUNT'|'ORGANIZATION'|'ORGANIZATION_FEATURE_SET'|'EMAIL'|'MASTER_EMAIL'|'MASTER_NAME'|'NOTES'|'PARENT_HANDSHAKE',
'Resources': {'... recursive ...'}
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **Handshakes** *(list) --*
A list of Handshake objects with details about each of the handshakes that are associated with an organization.
- *(dict) --*
Contains information that must be exchanged to securely establish a relationship between two accounts (an *originator* and a *recipient* ). For example, when a master account (the originator) invites another account (the recipient) to join its organization, the two accounts exchange information as a series of handshake requests and responses.
**Note:** Handshakes that are CANCELED, ACCEPTED, or DECLINED show up in lists for only 30 days after entering that state After that they are deleted.
- **Id** *(string) --*
The unique identifier (ID) of a handshake. The originating account creates the ID when it initiates the handshake.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for handshake ID string requires "h-" followed by from 8 to 32 lower-case letters or digits.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of a handshake.
For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* .
- **Parties** *(list) --*
Information about the two accounts that are participating in the handshake.
- *(dict) --*
Identifies a participant in a handshake.
- **Id** *(string) --*
The unique identifier (ID) for the party.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for handshake ID string requires "h-" followed by from 8 to 32 lower-case letters or digits.
- **Type** *(string) --*
The type of party.
- **State** *(string) --*
The current state of the handshake. Use the state to trace the flow of the handshake through the process from its creation to its acceptance. The meaning of each of the valid values is as follows:
* **REQUESTED** : This handshake was sent to multiple recipients (applicable to only some handshake types) and not all recipients have responded yet. The request stays in this state until all recipients respond.
* **OPEN** : This handshake was sent to multiple recipients (applicable to only some policy types) and all recipients have responded, allowing the originator to complete the handshake action.
* **CANCELED** : This handshake is no longer active because it was canceled by the originating account.
* **ACCEPTED** : This handshake is complete because it has been accepted by the recipient.
* **DECLINED** : This handshake is no longer active because it was declined by the recipient account.
* **EXPIRED** : This handshake is no longer active because the originator did not receive a response of any kind from the recipient before the expiration time (15 days).
- **RequestedTimestamp** *(datetime) --*
The date and time that the handshake request was made.
- **ExpirationTimestamp** *(datetime) --*
The date and time that the handshake expires. If the recipient of the handshake request fails to respond before the specified date and time, the handshake becomes inactive and is no longer valid.
- **Action** *(string) --*
The type of handshake, indicating what action occurs when the recipient accepts the handshake. The following handshake types are supported:
* **INVITE** : This type of handshake represents a request to join an organization. It is always sent from the master account to only non-member accounts.
* **ENABLE_ALL_FEATURES** : This type of handshake represents a request to enable all features in an organization. It is always sent from the master account to only *invited* member accounts. Created accounts do not receive this because those accounts were created by the organization's master account and approval is inferred.
* **APPROVE_ALL_FEATURES** : This type of handshake is sent from the Organizations service when all member accounts have approved the ``ENABLE_ALL_FEATURES`` invitation. It is sent only to the master account and signals the master that it can finalize the process to enable all features.
- **Resources** *(list) --*
Additional information that is needed to process the handshake.
- *(dict) --*
Contains additional data that is needed to process a handshake.
- **Value** *(string) --*
The information that is passed to the other party in the handshake. The format of the value string must match the requirements of the specified type.
- **Type** *(string) --*
The type of information being passed, specifying how the value is to be interpreted by the other party:
* ``ACCOUNT`` - Specifies an AWS account ID number.
* ``ORGANIZATION`` - Specifies an organization ID number.
* ``EMAIL`` - Specifies the email address that is associated with the account that receives the handshake.
* ``OWNER_EMAIL`` - Specifies the email address associated with the master account. Included as information about an organization.
* ``OWNER_NAME`` - Specifies the name associated with the master account. Included as information about an organization.
* ``NOTES`` - Additional text provided by the handshake initiator and intended for the recipient to read.
- **Resources** *(list) --*
When needed, contains an additional array of ``HandshakeResource`` objects.
:type Filter: dict
:param Filter:
A filter of the handshakes that you want included in the response. The default is all types. Use the ``ActionType`` element to limit the output to only a specified type, such as ``INVITE`` , ``ENABLE-ALL-FEATURES`` , or ``APPROVE-ALL-FEATURES`` . Alternatively, for the ``ENABLE-ALL-FEATURES`` handshake that generates a separate child handshake for each member account, you can specify the ``ParentHandshakeId`` to see only the handshakes that were generated by that parent request.
- **ActionType** *(string) --*
Specifies the type of handshake action.
If you specify ``ActionType`` , you cannot also specify ``ParentHandshakeId`` .
- **ParentHandshakeId** *(string) --*
Specifies the parent handshake. Only used for handshake types that are a child of another type.
If you specify ``ParentHandshakeId`` , you cannot also specify ``ActionType`` .
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for handshake ID string requires \"h-\" followed by from 8 to 32 lower-case letters or digits.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListOrganizationalUnitsForParent(Paginator):
def paginate(self, ParentId: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_organizational_units_for_parent`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListOrganizationalUnitsForParent>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ParentId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'OrganizationalUnits': [
{
'Id': 'string',
'Arn': 'string',
'Name': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **OrganizationalUnits** *(list) --*
A list of the OUs in the specified root or parent OU.
- *(dict) --*
Contains details about an organizational unit (OU). An OU is a container of AWS accounts within a root of an organization. Policies that are attached to an OU apply to all accounts contained in that OU and in any child OUs.
- **Id** *(string) --*
The unique identifier (ID) associated with this OU.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for an organizational unit ID string requires "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that contains the OU) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of this OU.
For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* .
- **Name** *(string) --*
The friendly name of this OU.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ that is used to validate this parameter is a string of any of the characters in the ASCII character range.
:type ParentId: string
:param ParentId: **[REQUIRED]**
The unique identifier (ID) of the root or OU whose child OUs you want to list.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a parent ID string requires one of the following:
* Root: a string that begins with \"r-\" followed by from 4 to 32 lower-case letters or digits.
* Organizational unit (OU): a string that begins with \"ou-\" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second \"-\" dash and from 8 to 32 additional lower-case letters or digits.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListParents(Paginator):
def paginate(self, ChildId: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_parents`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListParents>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ChildId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Parents': [
{
'Id': 'string',
'Type': 'ROOT'|'ORGANIZATIONAL_UNIT'
},
],
}
**Response Structure**
- *(dict) --*
- **Parents** *(list) --*
A list of parents for the specified child account or OU.
- *(dict) --*
Contains information about either a root or an organizational unit (OU) that can contain OUs or accounts in an organization.
- **Id** *(string) --*
The unique identifier (ID) of the parent entity.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a parent ID string requires one of the following:
* Root: a string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.
* Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.
- **Type** *(string) --*
The type of the parent entity.
:type ChildId: string
:param ChildId: **[REQUIRED]**
The unique identifier (ID) of the OU or account whose parent containers you want to list. Do not specify a root.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a child ID string requires one of the following:
* Account: a string that consists of exactly 12 digits.
* Organizational unit (OU): a string that begins with \"ou-\" followed by from 4 to 32 lower-case letters or digits (the ID of the root that contains the OU) followed by a second \"-\" dash and from 8 to 32 additional lower-case letters or digits.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListPolicies(Paginator):
def paginate(self, Filter: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_policies`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListPolicies>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filter='SERVICE_CONTROL_POLICY',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Policies': [
{
'Id': 'string',
'Arn': 'string',
'Name': 'string',
'Description': 'string',
'Type': 'SERVICE_CONTROL_POLICY',
'AwsManaged': True|False
},
],
}
**Response Structure**
- *(dict) --*
- **Policies** *(list) --*
A list of policies that match the filter criteria in the request. The output list does not include the policy contents. To see the content for a policy, see DescribePolicy .
- *(dict) --*
Contains information about a policy, but does not include the content. To see the content of a policy, see DescribePolicy .
- **Id** *(string) --*
The unique identifier (ID) of the policy.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a policy ID string requires "p-" followed by from 8 to 128 lower-case letters or digits.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of the policy.
For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* .
- **Name** *(string) --*
The friendly name of the policy.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ that is used to validate this parameter is a string of any of the characters in the ASCII character range.
- **Description** *(string) --*
The description of the policy.
- **Type** *(string) --*
The type of policy.
- **AwsManaged** *(boolean) --*
A boolean value that indicates whether the specified policy is an AWS managed policy. If true, then you can attach the policy to roots, OUs, or accounts, but you cannot edit it.
:type Filter: string
:param Filter: **[REQUIRED]**
Specifies the type of policy that you want to include in the response.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListPoliciesForTarget(Paginator):
def paginate(self, TargetId: str, Filter: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_policies_for_target`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListPoliciesForTarget>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
TargetId='string',
Filter='SERVICE_CONTROL_POLICY',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Policies': [
{
'Id': 'string',
'Arn': 'string',
'Name': 'string',
'Description': 'string',
'Type': 'SERVICE_CONTROL_POLICY',
'AwsManaged': True|False
},
],
}
**Response Structure**
- *(dict) --*
- **Policies** *(list) --*
The list of policies that match the criteria in the request.
- *(dict) --*
Contains information about a policy, but does not include the content. To see the content of a policy, see DescribePolicy .
- **Id** *(string) --*
The unique identifier (ID) of the policy.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a policy ID string requires "p-" followed by from 8 to 128 lower-case letters or digits.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of the policy.
For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* .
- **Name** *(string) --*
The friendly name of the policy.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ that is used to validate this parameter is a string of any of the characters in the ASCII character range.
- **Description** *(string) --*
The description of the policy.
- **Type** *(string) --*
The type of policy.
- **AwsManaged** *(boolean) --*
A boolean value that indicates whether the specified policy is an AWS managed policy. If true, then you can attach the policy to roots, OUs, or accounts, but you cannot edit it.
:type TargetId: string
:param TargetId: **[REQUIRED]**
The unique identifier (ID) of the root, organizational unit, or account whose policies you want to list.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a target ID string requires one of the following:
* Root: a string that begins with \"r-\" followed by from 4 to 32 lower-case letters or digits.
* Account: a string that consists of exactly 12 digits.
* Organizational unit (OU): a string that begins with \"ou-\" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second \"-\" dash and from 8 to 32 additional lower-case letters or digits.
:type Filter: string
:param Filter: **[REQUIRED]**
The type of policy that you want to include in the returned list.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListRoots(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_roots`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListRoots>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Roots': [
{
'Id': 'string',
'Arn': 'string',
'Name': 'string',
'PolicyTypes': [
{
'Type': 'SERVICE_CONTROL_POLICY',
'Status': 'ENABLED'|'PENDING_ENABLE'|'PENDING_DISABLE'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **Roots** *(list) --*
A list of roots that are defined in an organization.
- *(dict) --*
Contains details about a root. A root is a top-level parent node in the hierarchy of an organization that can contain organizational units (OUs) and accounts. Every root contains every AWS account in the organization. Each root enables the accounts to be organized in a different way and to have different policy types enabled for use in that root.
- **Id** *(string) --*
The unique identifier (ID) for the root.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a root ID string requires "r-" followed by from 4 to 32 lower-case letters or digits.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of the root.
For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* .
- **Name** *(string) --*
The friendly name of the root.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ that is used to validate this parameter is a string of any of the characters in the ASCII character range.
- **PolicyTypes** *(list) --*
The types of policies that are currently enabled for the root and therefore can be attached to the root or to its OUs or accounts.
.. note::
Even if a policy type is shown as available in the organization, you can separately enable and disable them at the root level by using EnablePolicyType and DisablePolicyType . Use DescribeOrganization to see the availability of the policy types in that organization.
- *(dict) --*
Contains information about a policy type and its status in the associated root.
- **Type** *(string) --*
The name of the policy type.
- **Status** *(string) --*
The status of the policy type as it relates to the associated root. To attach a policy of the specified type to a root or to an OU or account in that root, it must be available in the organization and enabled for that root.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListTargetsForPolicy(Paginator):
def paginate(self, PolicyId: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_targets_for_policy`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListTargetsForPolicy>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PolicyId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Targets': [
{
'TargetId': 'string',
'Arn': 'string',
'Name': 'string',
'Type': 'ACCOUNT'|'ORGANIZATIONAL_UNIT'|'ROOT'
},
],
}
**Response Structure**
- *(dict) --*
- **Targets** *(list) --*
A list of structures, each of which contains details about one of the entities to which the specified policy is attached.
- *(dict) --*
Contains information about a root, OU, or account that a policy is attached to.
- **TargetId** *(string) --*
The unique identifier (ID) of the policy target.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a target ID string requires one of the following:
* Root: a string that begins with "r-" followed by from 4 to 32 lower-case letters or digits.
* Account: a string that consists of exactly 12 digits.
* Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits.
- **Arn** *(string) --*
The Amazon Resource Name (ARN) of the policy target.
For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* .
- **Name** *(string) --*
The friendly name of the policy target.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ that is used to validate this parameter is a string of any of the characters in the ASCII character range.
- **Type** *(string) --*
The type of the policy target.
:type PolicyId: string
:param PolicyId: **[REQUIRED]**
The unique identifier (ID) of the policy for which you want to know its attachments.
The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a policy ID string requires \"p-\" followed by from 8 to 128 lower-case letters or digits.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
|
test/test_numpy_utils.py | tirkarthi/mathics-core | 1,920 | 12699567 | <reponame>tirkarthi/mathics-core<filename>test/test_numpy_utils.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from mathics.builtin.numpy_utils import (
stack,
unstack,
concat,
vectorize,
conditional,
clip,
array,
choose,
)
from mathics.builtin.numpy_utils import (
minimum,
maximum,
dot_t,
mod,
floor,
sqrt,
allclose,
)
@conditional
def _test_simple_conditional(t):
if t > 0.5:
return t + 1.0
else:
return -t
@conditional
def _test_complex_conditional(t):
if t > 10:
return t * 10 + 1
elif t > 3:
return t * 10
elif t <= 3:
return -1
class Numpy(unittest.TestCase):
def testUnstack(self):
# flat lists remain flat lists.
self.assertEqualArrays(unstack([1, 2, 3]), [1, 2, 3])
# lists of lists get unstacked.
self.assertEqualArrays(unstack([[1, 2], [3, 4]]), [[1, 3], [2, 4]])
# matrices stay matrices, e.g. each r, g, b
# components is split into grayscale images.
self.assertEqualArrays(
unstack([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
[[[1, 4], [7, 10]], [[2, 5], [8, 11]], [[3, 6], [9, 12]]],
)
def testStackUnstackIdentity(self):
a = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]
b = [1, 2, 3]
c = [[1, 2], [3, 4]]
for m in (a, b, c):
self.assertEqualArrays(stack(*unstack(m)), m)
def testConcatSimple(self):
# concat concatenates arrays.
self.assertEqualArrays(concat([1], [2]), [1, 2])
def testConcatComplex(self):
# concat concatenates the most inner axis.
a = [[[1, 2], [4, 5]], [[7, 8], [10, 11]]]
b = [[[3], [6]], [[9], [12]]]
c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]
self.assertEqualArrays(concat(a, b), c)
def testChooseSimple(self):
# select a single value from a list of values.
options = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
for i in range(len(options)):
self.assertEqual(choose(i, *options), options[i])
def testChooseComplex(self):
def m(i):
return [[10 * i + 0, 10 * i + 1], [10 * i + 2, 10 * i + 3]]
selector = [[0, 1], [1, 2]]
a = choose(selector, (m(1), m(2), m(3)), (m(4), m(5), m(6)), (m(7), m(8), m(9)))
# choose() operates on column after column of the options matrix, i.e.
# in the example above, the selector is first applied to (m(1), m(4), m(7)),
# then to (m(2), m(5), m(8)), and so on. let's calculate the result for the
# first column:
# selector (integers indicate from which option to take a value):
# [0 1]
# [1 2]
# option #0 / column 1 (i.e. m(1)):
# [10 11]
# [12 13]
# option #1 / column 1 (i.e. m(4)):
# [40 41]
# [42 43]
# option #2 / column 1 (i.e. m(7)):
# [70 71]
# [72 73]
# choose() now picks the right value from each options depending on selector:
# [10 41]
# [42 73]
self.assertEqual(len(a), 3)
self.assertEqualArrays(a[0], [[10, 41], [42, 73]])
self.assertEqualArrays(a[1], [[20, 51], [52, 83]])
self.assertEqualArrays(a[2], [[30, 61], [62, 93]])
def testClip(self):
a = array([[[-0.1, 0.6], [-1.8, -0.4]], [[0.1, 0.8], [1.1, 0.5]]])
a = clip(a, 0, 1)
self.assertEqualArrays(a, [[[0.0, 0.6], [0.0, 0.0]], [[0.1, 0.8], [1.0, 0.5]]])
def testDot(self):
self.assertEqual(dot_t([1, 2, 3], [4, 5, 6]), 32)
self.assertEqualArrays(
dot_t([1, 2, 3], [[4, 5, 6], [7, 8, 9], [10, 11, 12]]), [32, 50, 68]
)
def testMod(self):
self.assertEqualArrays(
mod(array([[10, 20], [30, 40]]), [[7, 7], [7, 7]]), [[3, 6], [2, 5]]
)
def testMaximum(self):
self.assertEqualArrays(
maximum([[1, 2], [3, 4]], [[-1, 4], [-8, 5]], [[8, -4], [1, 10]]),
[[8, 4], [3, 10]],
)
def testMinimum(self):
self.assertEqualArrays(
minimum([[1, 2], [3, 4]], [[-1, 4], [-8, 5]], [[8, -4], [1, 10]]),
[[-1, -4], [-8, 4]],
)
def testFloor(self):
self.assertEqualArrays(
floor([[1.2, 5.8], [-1.2, 3.5]]), [[1.0, 5.0], [-2.0, 3.0]]
)
def testSqrt(self):
self.assertEqualArrays(sqrt([[9, 100], [25, 16]]), [[3, 10], [5, 4]])
def testSimpleConditional(self):
a = array([[[0.1, 0.6], [1.8, 0.4]], [[-0.1, -0.8], [1.1, 0.5]]])
a = vectorize(a, 0, _test_simple_conditional)
self.assertEqualArrays(
a, [[[-0.1, 1.6], [2.8, -0.4]], [[0.1, 0.8], [2.1, -0.5]]]
)
def testConditionalComplex(self):
a = array([[[1, 2], [4, 5]], [[7, 8], [10, 11]]])
a = vectorize(a, 0, _test_complex_conditional)
self.assertEqualArrays(a, [[[-1, -1], [40, 50]], [[70, 80], [100, 111]]])
def assertEqualArrays(self, a, b):
self.assertEqual(allclose(a, b), True)
if __name__ == "__main__":
unittest.main()
|
tests/conftest.py | BrendaH/django-machina | 572 | 12699572 | import os
import shutil
import pytest
from . import settings
@pytest.yield_fixture(scope='session', autouse=True)
def empty_media():
""" Removes the directories inside the MEDIA_ROOT that could have been filled during tests. """
yield
for candidate in os.listdir(settings.MEDIA_ROOT):
path = os.path.join(settings.MEDIA_ROOT, candidate)
try:
shutil.rmtree(path)
except OSError:
pass
|
tests/rest/test_api_execute_logs.py | DmitryRibalka/monitorrent | 465 | 12699579 | <filename>tests/rest/test_api_execute_logs.py
from builtins import range
import json
import falcon
from mock import MagicMock
from ddt import ddt, data
from tests import RestTestBase
from monitorrent.rest.execute_logs import ExecuteLogs
class ExecuteLogsTest(RestTestBase):
def test_get_all(self):
entries = [{}, {}, {}]
count = 3
log_manager = MagicMock()
log_manager.get_log_entries = MagicMock(return_value=(entries, count))
# noinspection PyTypeChecker
execute_logs = ExecuteLogs(log_manager)
self.api.add_route('/api/execute/logs', execute_logs)
body = self.simulate_request('/api/execute/logs', query_string='take=10', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(entries, result['data'])
self.assertEqual(count, result['count'])
def test_get_paged(self):
# count should be less than 30
count = 23
entries = [{'i': i} for i in range(count)]
def get_log_entries(skip, take):
return entries[skip:skip + take], count
log_manager = MagicMock()
log_manager.get_log_entries = MagicMock(side_effect=get_log_entries)
# noinspection PyTypeChecker
execute_logs = ExecuteLogs(log_manager)
self.api.add_route('/api/execute/logs', execute_logs)
body = self.simulate_request('/api/execute/logs', query_string='take=10', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(entries[0:10], result['data'])
self.assertEqual(count, result['count'])
body = self.simulate_request('/api/execute/logs', query_string='take=10&skip=0', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(entries[0:10], result['data'])
self.assertEqual(count, result['count'])
body = self.simulate_request('/api/execute/logs', query_string='take=10&skip=10', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(entries[10:20], result['data'])
self.assertEqual(count, result['count'])
body = self.simulate_request('/api/execute/logs', query_string='take=10&skip=20', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
# assume that count is less then 30
self.assertEqual(entries[20:count], result['data'])
self.assertEqual(count, result['count'])
def test_bad_requests(self):
entries = [{}, {}, {}]
count = 3
log_manager = MagicMock()
log_manager.get_log_entries = MagicMock(return_value=(entries, count))
# noinspection PyTypeChecker
execute_logs = ExecuteLogs(log_manager)
self.api.add_route('/api/execute/logs', execute_logs)
self.simulate_request('/api/execute/logs')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take is required')
self.simulate_request('/api/execute/logs', query_string='take=abcd')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take should be int')
self.simulate_request('/api/execute/logs', query_string='take=10&skip=abcd')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'skip should be int')
self.simulate_request('/api/execute/logs', query_string='take=101')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take should be less or equal to 100')
self.simulate_request('/api/execute/logs', query_string='take=-10')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take should be greater than 0')
self.simulate_request('/api/execute/logs', query_string='take=0')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take should be greater than 0')
self.simulate_request('/api/execute/logs', query_string='take=10&skip=-1')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'skip should be greater or equal to 0')
|
_old/BeetlejuiceMachine/beetlejuicemachine.py | tigefa4u/reddit | 444 | 12699584 | #/u/GoldenSights
import praw # simple interface to the reddit API, also handles rate limiting of requests
import time
import sqlite3
'''USER CONFIGURATION'''
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
SUBREDDIT = "GoldTesting"
#This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..."
PARENTSTRING = "beetlejuice"
#This is the string you're looking for
REPLYSTRING = "We are deeply sorry, but <NAME> can't join you in this comment thread right now. Would you like to leave a message?"
#This will be put in reply
DEPTHREQ = 3
#How many comments down to take action
MAXPOSTS = 100
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 20
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
'''All done!'''
WAITS = str(WAIT)
try:
import bot
USERAGENT = bot.aG
except ImportError:
pass
sql = sqlite3.connect('sql.db')
print('Loaded SQL Database')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(ID TEXT, DEPTH INT)')
cur.execute('CREATE INDEX IF NOT EXISTS oldpost_index ON oldposts(id)')
print('Loaded Completed table')
sql.commit()
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def scanSub():
print('Scanning ' + SUBREDDIT)
subreddit = r.get_subreddit(SUBREDDIT)
comments = list(subreddit.get_comments(limit=MAXPOSTS))
comments.reverse()
for comment in comments:
cid = comment.fullname
cur.execute('SELECT * FROM oldposts WHERE ID=?', [cid])
if not cur.fetchone():
try:
cauthor = comment.author.name
if cauthor.lower() != r.user.name.lower():
cbody = comment.body.lower()
if PARENTSTRING.lower() in cbody:
if 't3_' in comment.parent_id:
#is a root comment on the post
cdepth = 0
else:
cur.execute('SELECT * FROM oldposts WHERE ID=?', [comment.parent_id])
fetch = cur.fetchone()
if not fetch:
cdepth = 0
else:
cdepth = fetch[1] + 1
print(cid, '- Depth:', cdepth)
if cdepth >= DEPTHREQ-1:
print('\tAttempting to reply')
cur.execute('SELECT * FROM oldposts WHERE ID=?', [comment.link_id])
if cur.fetchone():
print('\tAlready posted in this thread')
else:
comment.reply(REPLYSTRING)
print('\tSuccess')
cur.execute('INSERT INTO oldposts VALUES(?, ?)', [comment.link_id, 0])
else:
#Does not contain interest
cdepth = -1
print(cid, '- Depth:', cdepth)
else:
#Will not reply to self
cdepth=-1
pass
except AttributeError:
#Author is deleted
cdepth = 0
cur.execute('INSERT INTO oldposts VALUES(?, ?)', [cid, cdepth])
sql.commit()
while True:
try:
scanSub()
except Exception as e:
print('An error has occured:', e)
print('Running again in ' + WAITS + ' seconds \n')
sql.commit()
time.sleep(WAIT)
|
pyfacebook/api/instagram_business/resource/comment.py | sns-sdks/python-facebook | 181 | 12699588 | """
Apis for comment.
"""
from typing import Dict, Optional, Union
import pyfacebook.utils.constant as const
from pyfacebook.api.base_resource import BaseResource
from pyfacebook.models.ig_business_models import IgBusComment, IgBusReply, IgBusReplies
from pyfacebook.utils.params_utils import enf_comma_separated
class IGBusinessComment(BaseResource):
def get_info(
self,
comment_id: str,
fields: Optional[Union[str, list, tuple]] = None,
return_json: bool = False,
) -> Union[IgBusComment, dict]:
"""
Get information about a Business comment.
:param comment_id: ID for Comment.
:param fields: Comma-separated id string for data fields which you want.
You can also pass this with an id list, tuple.
:param return_json: Set to false will return a dataclass for IgBusComment.
Or return json data. Default is false.
:return: Business comment information.
"""
if fields is None:
fields = const.IG_BUSINESS_MEDIA_PUBLIC_FIELDS
data = self.client.get_object(
object_id=comment_id,
fields=enf_comma_separated(field="fields", value=fields),
)
if return_json:
return data
else:
return IgBusComment.new_from_json_dict(data=data)
def get_batch(
self,
ids: Optional[Union[str, list, tuple]],
fields: Optional[Union[str, list, tuple]] = None,
return_json: bool = False,
) -> Union[Dict[str, IgBusComment], dict]:
"""
Get batch business comment information by ids
:param ids: IDs for the comments.
:param fields: Comma-separated id string for data fields which you want.
You can also pass this with an id list, tuple.
:param return_json: Set to false will return a dict of dataclass for IgBusComment.
Or return json data. Default is false.
:return: Business medias information.
"""
ids = enf_comma_separated(field="ids", value=ids)
if fields is None:
fields = const.IG_BUSINESS_COMMENT_PUBLIC_FIELDS
data = self.client.get_objects(
ids=ids, fields=enf_comma_separated(field="fields", value=fields)
)
if return_json:
return data
else:
return {
comment_id: IgBusComment.new_from_json_dict(item)
for comment_id, item in data.items()
}
def get_replies(
self,
comment_id: str,
fields: Optional[Union[str, list, tuple]] = None,
count: Optional[int] = 10,
limit: Optional[int] = 10,
return_json: bool = False,
) -> Union[IgBusReplies, dict]:
"""
Getting All Replies (Comments) on a Comment
:param comment_id: ID for the comment.
:param fields: Comma-separated id string for data fields which you want.
You can also pass this with an id list, tuple.
:param count: The total count for you to get data.
:param limit: Each request retrieve objects count.
It should no more than 100. Default is None will use api default limit.
:param return_json: Set to false will return a dataclass for IgBusReplies.
Or return json data. Default is false.
:return: Comment replies response information.
"""
if fields is None:
fields = const.IG_BUSINESS_REPLY_PUBLIC_FIELDS
data = self.client.get_full_connections(
object_id=comment_id,
connection="replies",
fields=enf_comma_separated(field="fields", value=fields),
count=count,
limit=limit,
)
if return_json:
return data
else:
return IgBusReplies.new_from_json_dict(data)
class IGBusinessReply(BaseResource):
def get_info(
self,
reply_id: str,
fields: Optional[Union[str, list, tuple]] = None,
return_json: bool = False,
) -> Union[IgBusReply, dict]:
"""
Get information about a Business reply.
:param reply_id: ID for reply.
:param fields: Comma-separated id string for data fields which you want.
You can also pass this with an id list, tuple.
:param return_json: Set to false will return a dataclass for IgBusReply.
Or return json data. Default is false.
:return: Business reply information.
"""
if fields is None:
fields = const.IG_BUSINESS_REPLY_PUBLIC_FIELDS
data = self.client.get_object(
object_id=reply_id,
fields=enf_comma_separated(field="fields", value=fields),
)
if return_json:
return data
else:
return IgBusComment.new_from_json_dict(data=data)
def get_batch(
self,
ids: Optional[Union[str, list, tuple]],
fields: Optional[Union[str, list, tuple]] = None,
return_json: bool = False,
) -> Union[Dict[str, IgBusReply], dict]:
"""
Get batch business replies information by ids
:param ids: IDs for the replies.
:param fields: Comma-separated id string for data fields which you want.
You can also pass this with an id list, tuple.
:param return_json: Set to false will return a dict of dataclass for IgBusReply.
Or return json data. Default is false.
:return: Business replies information.
"""
ids = enf_comma_separated(field="ids", value=ids)
if fields is None:
fields = const.IG_BUSINESS_REPLY_PUBLIC_FIELDS
data = self.client.get_objects(
ids=ids, fields=enf_comma_separated(field="fields", value=fields)
)
if return_json:
return data
else:
return {
reply_id: IgBusReply.new_from_json_dict(item)
for reply_id, item in data.items()
}
|
cassandra_snapshotter/utils.py | kadamanas93/cassandra_snapshotter | 203 | 12699592 | import sys
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import argparse
import functools
import subprocess
LZOP_BIN = 'lzop'
PV_BIN = 'pv'
S3_CONNECTION_HOSTS = {
'us-east-1': 's3.amazonaws.com',
'us-east-2': 's3-us-east-2.amazonaws.com',
'us-west-2': 's3-us-west-2.amazonaws.com',
'us-west-1': 's3-us-west-1.amazonaws.com',
'eu-central-1': 's3-eu-central-1.amazonaws.com',
'eu-west-1': 's3-eu-west-1.amazonaws.com',
'ap-southeast-1': 's3-ap-southeast-1.amazonaws.com',
'ap-southeast-2': 's3-ap-southeast-2.amazonaws.com',
'ap-northeast-1': 's3-ap-northeast-1.amazonaws.com',
'ap-south-1': 's3.ap-south-1.amazonaws.com',
'sa-east-1': 's3-sa-east-1.amazonaws.com',
'cn-north-1': 's3.cn-north-1.amazonaws.com.cn'
}
base_parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
base_parser.add_argument('-v', '--verbose',
action='store_true',
help='increase output verbosity')
def add_s3_arguments(arg_parser):
"""
Adds common S3 argument to a parser
"""
arg_parser.add_argument('--aws-access-key-id',
help="public AWS access key.")
arg_parser.add_argument('--aws-secret-access-key',
help="S3 secret access key.")
arg_parser.add_argument('--s3-bucket-region',
default='us-east-1',
help="S3 bucket region (default us-east-1)")
arg_parser.add_argument('--s3-ssenc',
action='store_true',
help="Enable AWS S3 server-side encryption")
arg_parser.add_argument('--s3-bucket-name',
required=True,
help="S3 bucket name for backups.")
arg_parser.add_argument('--s3-base-path',
required=True,
help="S3 base path for backups.")
return arg_parser
def get_s3_connection_host(s3_bucket_region):
return S3_CONNECTION_HOSTS[s3_bucket_region]
def map_wrap(f):
"""
Fix annoying multiprocessing.imap bug when sending
*args and **kwargs
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return apply(f, *args, **kwargs)
return wrapper
def _check_bin(bin_name):
try:
subprocess.check_call("{} --version > /dev/null 2>&1".format(bin_name),
shell=True)
except subprocess.CalledProcessError:
sys.exit("{} not found on path".format(bin_name))
def check_lzop():
_check_bin(LZOP_BIN)
def check_pv():
_check_bin(PV_BIN)
def compressed_pipe(path, size, rate_limit, quiet):
"""
Returns a generator that yields compressed chunks of
the given file_path
compression is done with lzop
"""
lzop = subprocess.Popen(
(LZOP_BIN, '--stdout', path),
bufsize=size,
stdout=subprocess.PIPE
)
if rate_limit > 0:
pv_cmd = [PV_BIN, '--rate-limit', '{}k'.format(rate_limit)]
if quiet:
pv_cmd.insert(1, '--quiet')
pv = subprocess.Popen(
pv_cmd,
stdin=lzop.stdout,
stdout=subprocess.PIPE
)
while True:
if rate_limit > 0:
chunk = pv.stdout.read(size)
else:
chunk = lzop.stdout.read(size)
if not chunk:
break
yield StringIO(chunk)
def decompression_pipe(path):
lzop = subprocess.Popen(
(LZOP_BIN, '-d', '-o', path),
stdin=subprocess.PIPE
)
return lzop
|
test/integration/test_multiple_model_endpoint.py | ipanepen/sagemaker-scikit-learn-container | 105 | 12699599 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import encodings
import json
import os
import subprocess
import sys
import time
import pytest
import requests
PING_URL = 'http://localhost:8080/ping'
INVOCATION_URL = 'http://localhost:8080/models/{}/invoke'
MODELS_URL = 'http://localhost:8080/models'
DELETE_MODEL_URL = 'http://localhost:8080/models/{}'
path = os.path.abspath(__file__)
resource_path = os.path.join(os.path.dirname(path), '..', 'resources')
@pytest.fixture(scope='session', autouse=True)
def volume():
try:
model_dir = os.path.join(resource_path, 'models')
subprocess.check_call(
'docker volume create --name dynamic_endpoint_model_volume --opt type=none '
'--opt device={} --opt o=bind'.format(model_dir).split())
yield model_dir
finally:
subprocess.check_call('docker volume rm dynamic_endpoint_model_volume'.split())
@pytest.fixture(scope='session', autouse=True)
def modulevolume():
try:
module_dir = os.path.join(resource_path, 'module')
subprocess.check_call(
'docker volume create --name dynamic_endpoint_module_volume --opt type=none '
'--opt device={} --opt o=bind'.format(module_dir).split())
yield module_dir
finally:
subprocess.check_call('docker volume rm dynamic_endpoint_module_volume'.split())
@pytest.fixture(scope='module', autouse=True)
def container(request, docker_base_name, tag):
test_name = 'sagemaker-sklearn-serving-test'
try:
command = (
'docker run --name {} -p 8080:8080'
' --mount type=volume,source=dynamic_endpoint_model_volume,target=/opt/ml/model,readonly'
' --mount type=volume,source=dynamic_endpoint_module_volume,target=/user_module,readonly'
' -e SAGEMAKER_BIND_TO_PORT=8080'
' -e SAGEMAKER_SAFE_PORT_RANGE=9000-9999'
' -e SAGEMAKER_MULTI_MODEL=true'
' -e SAGEMAKER_PROGRAM={}'
' -e SAGEMAKER_SUBMIT_DIRECTORY={}'
' {}:{} serve'
).format(test_name, 'script.py', "/user_module/user_code.tar.gz", docker_base_name, tag)
proc = subprocess.Popen(command.split(), stdout=sys.stdout, stderr=subprocess.STDOUT)
attempts = 0
while attempts < 5:
time.sleep(3)
try:
requests.get('http://localhost:8080/ping')
break
except Exception:
attempts += 1
pass
yield proc.pid
finally:
subprocess.check_call('docker rm -f {}'.format(test_name).split())
def make_invocation_request(data, model_name, content_type='text/csv'):
headers = {
'Content-Type': content_type,
}
response = requests.post(INVOCATION_URL.format(model_name), data=data, headers=headers)
return response.status_code, json.loads(response.content.decode(encodings.utf_8.getregentry().name))
def make_list_model_request():
response = requests.get(MODELS_URL)
return response.status_code, json.loads(response.content.decode(encodings.utf_8.getregentry().name))
def make_get_model_request(model_name):
response = requests.get(MODELS_URL + '/{}'.format(model_name))
return response.status_code, json.loads(response.content.decode(encodings.utf_8.getregentry().name))
def make_load_model_request(data, content_type='application/json'):
headers = {
'Content-Type': content_type
}
response = requests.post(MODELS_URL, data=data, headers=headers)
return response.status_code, response.content.decode(encodings.utf_8.getregentry().name)
def make_unload_model_request(model_name):
response = requests.delete(DELETE_MODEL_URL.format(model_name))
return response.status_code, response.content.decode(encodings.utf_8.getregentry().name)
def test_ping():
res = requests.get(PING_URL)
assert res.status_code == 200
def test_list_models_empty():
code, res = make_list_model_request()
# assert code == 200
assert res == {'models': []}
def test_delete_unloaded_model():
# unloads the given model/version, no-op if not loaded
model_name = 'non-existing-model'
code, res = make_unload_model_request(model_name)
assert code == 404
def test_load_and_unload_model():
model_name = 'pickled-model-1'
model_data = {
'model_name': model_name,
'url': '/opt/ml/model/{}'.format(model_name)
}
code, res = make_load_model_request(json.dumps(model_data))
assert code == 200, res
res_json = json.loads(res)
assert res_json['status'] == 'Workers scaled'
code, res = make_invocation_request('0.0, 0.0, 0.0, 0.0, 0.0, 0.0', model_name)
assert code == 200, res
code, res = make_unload_model_request(model_name)
assert code == 200, res
res_json = json.loads(res)
assert res_json['status'] == "Model \"{}\" unregistered".format(model_name), res
code, res = make_invocation_request('0.0, 0.0, 0.0, 0.0, 0.0, 0.0', model_name)
assert code == 404, res
assert res['message'] == "Model not found: {}".format(model_name), res
def test_load_and_unload_two_models():
model_name_0 = 'pickled-model-1'
model_data_0 = {
'model_name': model_name_0,
'url': '/opt/ml/model/{}'.format(model_name_0)
}
code, res = make_load_model_request(json.dumps(model_data_0))
assert code == 200, res
res_json = json.loads(res)
assert res_json['status'] == 'Workers scaled'
model_name_1 = 'pickled-model-2'
model_data_1 = {
'model_name': model_name_1,
'url': '/opt/ml/model/{}'.format(model_name_1)
}
code, res = make_load_model_request(json.dumps(model_data_1))
assert code == 200, res
res_json = json.loads(res)
assert res_json['status'] == 'Workers scaled'
code, res = make_invocation_request('0.0, 0.0, 0.0, 0.0, 0.0, 0.0', model_name_0)
assert code == 200, res
code, res = make_invocation_request('0.0, 0.0, 0.0, 0.0, 0.0, 0.0', model_name_1)
assert code == 200, res
code, res = make_unload_model_request(model_name_0)
assert code == 200, res
res_json = json.loads(res)
assert res_json['status'] == "Model \"{}\" unregistered".format(model_name_0), res
code, res = make_unload_model_request(model_name_1)
assert code == 200, res
res_json = json.loads(res)
assert res_json['status'] == "Model \"{}\" unregistered".format(model_name_1), res
def test_container_start_invocation_fail():
x = {
'instances': [1.0, 2.0, 5.0]
}
code, res = make_invocation_request(json.dumps(x), 'half_plus_three')
assert code == 404
assert res['message'] == "Model not found: {}".format('half_plus_three')
def test_load_one_model_two_times():
model_name = 'pickled-model-1'
model_data = {
'model_name': model_name,
'url': '/opt/ml/model/{}'.format(model_name)
}
code_load, res = make_load_model_request(json.dumps(model_data))
assert code_load == 200, res
res_json = json.loads(res)
assert res_json['status'] == 'Workers scaled'
code_load, res = make_load_model_request(json.dumps(model_data))
assert code_load == 409
res_json = json.loads(res)
assert res_json['message'] == 'Model {} is already registered.'.format(model_name)
|
data/transcoder_evaluation_gfg/python/SUBARRAYS_DISTINCT_ELEMENTS.py | mxl1n/CodeGen | 241 | 12699605 | <gh_stars>100-1000
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr , n ) :
s = [ ]
j = 0
ans = 0
for i in range ( n ) :
while ( j < n and ( arr [ j ] not in s ) ) :
s.append ( arr [ j ] )
j += 1
ans += ( ( j - i ) * ( j - i + 1 ) ) // 2
s.remove ( arr [ i ] )
return ans
#TOFILL
if __name__ == '__main__':
param = [
([3, 4, 5, 6, 12, 15, 16, 17, 20, 20, 22, 24, 24, 27, 28, 34, 37, 39, 39, 41, 43, 49, 49, 51, 55, 62, 63, 67, 71, 74, 74, 74, 77, 84, 84, 89, 89, 97, 99],24,),
([-8, 54, -22, 18, 20, 44, 0, 54, 90, -4, 4, 40, -74, -16],13,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],28,),
([36, 71, 36, 58, 38, 90, 17],4,),
([-90, -32, -16, 18, 38, 82],5,),
([1, 0, 1],2,),
([3, 11, 21, 25, 28, 28, 38, 42, 48, 53, 55, 55, 55, 58, 71, 75, 79, 80, 80, 94, 96, 99],20,),
([-16, -52, -4, -46, 54, 0, 8, -64, -82, -10, -62, -10, 58, 44, -28, 86, -24, 16, 44, 22, -28, -42, -52, 8, 76, -44, -34, 2, 88, -88, -14, -84, -36, -68, 76, 20, 20, -50],35,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],27,),
([19, 13, 61, 32, 92, 90, 12, 81, 52],5,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) |
tests/test_resolvers/test_environment_variable.py | dennisconrad/sceptre | 493 | 12699606 | # -*- coding: utf-8 -*-
from mock import patch
from sceptre.resolvers.environment_variable import EnvironmentVariable
class TestEnvironmentVariableResolver(object):
def setup_method(self, test_method):
self.environment_variable_resolver = EnvironmentVariable(
argument=None
)
@patch("sceptre.resolvers.environment_variable.os")
def test_resolving_with_set_environment_variable(self, mock_os):
mock_os.environ = {"VARIABLE": "value"}
self.environment_variable_resolver.argument = "VARIABLE"
response = self.environment_variable_resolver.resolve()
assert response == "value"
def test_resolving_with_unset_environment_variable(self):
self.environment_variable_resolver.argument = "UNSETVARIABLE"
response = self.environment_variable_resolver.resolve()
assert response is None
def test_resolving_with_environment_variable_name_as_none(self):
self.environment_variable_resolver.argument = None
response = self.environment_variable_resolver.resolve()
assert response is None
|
utils/ema.py | yoxu515/aot-benchmark | 105 | 12699623 | <reponame>yoxu515/aot-benchmark
from __future__ import division
from __future__ import unicode_literals
import torch
def get_param_buffer_for_ema(model,
update_buffer=False,
required_buffers=['running_mean', 'running_var']):
params = model.parameters()
all_param_buffer = [p for p in params if p.requires_grad]
if update_buffer:
named_buffers = model.named_buffers()
for key, value in named_buffers:
for buffer_name in required_buffers:
if buffer_name in key:
all_param_buffer.append(value)
break
return all_param_buffer
class ExponentialMovingAverage:
"""
Maintains (exponential) moving average of a set of parameters.
"""
def __init__(self, parameters, decay, use_num_updates=True):
"""
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the result of
`model.parameters()`.
decay: The exponential decay.
use_num_updates: Whether to use number of updates when computing
averages.
"""
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.decay = decay
self.num_updates = 0 if use_num_updates else None
self.shadow_params = [p.clone().detach() for p in parameters]
self.collected_params = []
def update(self, parameters):
"""
Update currently maintained parameters.
Call this every time the parameters are updated, such as the result of
the `optimizer.step()` call.
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the same set of
parameters used to initialize this object.
"""
decay = self.decay
if self.num_updates is not None:
self.num_updates += 1
decay = min(decay,
(1 + self.num_updates) / (10 + self.num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
for s_param, param in zip(self.shadow_params, parameters):
s_param.sub_(one_minus_decay * (s_param - param))
def copy_to(self, parameters):
"""
Copy current parameters into given collection of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored moving averages.
"""
for s_param, param in zip(self.shadow_params, parameters):
param.data.copy_(s_param.data)
def store(self, parameters):
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
"""
self.collected_params = [param.clone() for param in parameters]
def restore(self, parameters):
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters.
"""
for c_param, param in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
del (self.collected_params)
|
needl/adapters/__init__.py | NeuroWinter/Needl | 413 | 12699624 | import requests
from requests.packages.urllib3 import poolmanager
__all__ = (
'poolmanager'
) |
bodymocap/utils/geometry.py | Paultool/frankmocap | 1,612 | 12699637 | <filename>bodymocap/utils/geometry.py
# Original code from SPIN: https://github.com/nkolot/SPIN
import torch
from torch.nn import functional as F
import numpy as np
import torchgeometry
"""
Useful geometric operations, e.g. Perspective projection and a differentiable Rodrigues formula
Parts of the code are taken from https://github.com/MandyMo/pytorch_HMR
"""
def batch_rodrigues(theta):
"""Convert axis-angle representation to rotation matrix.
Args:
theta: size = [B, 3]
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
l1norm = torch.norm(theta + 1e-8, p = 2, dim = 1)
angle = torch.unsqueeze(l1norm, -1)
normalized = torch.div(theta, angle)
angle = angle * 0.5
v_cos = torch.cos(angle)
v_sin = torch.sin(angle)
quat = torch.cat([v_cos, v_sin * normalized], dim = 1)
return quat_to_rotmat(quat)
def quat_to_rotmat(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: size = [B, 4] 4 <===>(w, x, y, z)
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
norm_quat = quat
norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat
def cross_product(u, v):
batch = u.shape[0]
i = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1]
j = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2]
k = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0]
out = torch.cat((i.view(batch, 1), j.view(batch, 1), k.view(batch, 1)), 1)
return out
def normalize_vector(v):
batch = v.shape[0]
v_mag = torch.sqrt(v.pow(2).sum(1)) # batch
v_mag = torch.max(v_mag, v.new([1e-8]))
v_mag = v_mag.view(batch, 1).expand(batch, v.shape[1])
v = v/v_mag
return v
#Code from
def rot6d_to_rotmat(x):
"""Convert 6D rotation representation to 3x3 rotation matrix.
Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
Input:
(B,6) Batch of 6-D rotation representations
Output:
(B,3,3) Batch of corresponding rotation matrices
"""
x = x.view(-1,3,2)
a1 = x[:, :, 0]
a2 = x[:, :, 1]
b1 = F.normalize(a1)
b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)
b3 = torch.cross(b1, b2)
return torch.stack((b1, b2, b3), dim=-1)
def perspective_projection(points, rotation, translation,
focal_length, camera_center):
"""
This function computes the perspective projection of a set of points.
Input:
points (bs, N, 3): 3D points
rotation (bs, 3, 3): Camera rotation
translation (bs, 3): Camera translation
focal_length (bs,) or scalar: Focal length
camera_center (bs, 2): Camera center
"""
batch_size = points.shape[0]
K = torch.zeros([batch_size, 3, 3], device=points.device)
K[:,0,0] = focal_length
K[:,1,1] = focal_length
K[:,2,2] = 1.
K[:,:-1, -1] = camera_center
# Transform points
points = torch.einsum('bij,bkj->bki', rotation, points)
points = points + translation.unsqueeze(1)
# Apply perspective distortion
projected_points = points / points[:,:,-1].unsqueeze(-1)
# Apply camera intrinsics
projected_points = torch.einsum('bij,bkj->bki', K, projected_points)
return projected_points[:, :, :-1]
def estimate_translation_np(S, joints_2d, joints_conf, focal_length=5000, img_size=224):
"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
Input:
S: (25, 3) 3D joint locations
joints: (25, 3) 2D joint locations and confidence
Returns:
(3,) camera translation vector
"""
num_joints = S.shape[0]
# focal length
f = np.array([focal_length,focal_length])
# optical center
center = np.array([img_size/2., img_size/2.])
# transformations
Z = np.reshape(np.tile(S[:,2],(2,1)).T,-1)
XY = np.reshape(S[:,0:2],-1)
O = np.tile(center,num_joints)
F = np.tile(f,num_joints)
weight2 = np.reshape(np.tile(np.sqrt(joints_conf),(2,1)).T,-1)
# least squares
Q = np.array([F*np.tile(np.array([1,0]),num_joints), F*np.tile(np.array([0,1]),num_joints), O-np.reshape(joints_2d,-1)]).T
c = (np.reshape(joints_2d,-1)-O)*Z - F*XY
# weighted least squares
W = np.diagflat(weight2)
Q = np.dot(W,Q)
c = np.dot(W,c)
# square matrix
A = np.dot(Q.T,Q)
b = np.dot(Q.T,c)
# solution
trans = np.linalg.solve(A, b)
return trans
def estimate_translation(S, joints_2d, focal_length=5000., img_size=224.):
"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
Input:
S: (B, 49, 3) 3D joint locations
joints: (B, 49, 3) 2D joint locations and confidence
Returns:
(B, 3) camera translation vectors
"""
device = S.device
# Use only joints 25:49 (GT joints)
S = S[:, 25:, :].cpu().numpy()
joints_2d = joints_2d[:, 25:, :].cpu().numpy()
joints_conf = joints_2d[:, :, -1]
joints_2d = joints_2d[:, :, :-1]
trans = np.zeros((S.shape[0], 3), dtype=np.float32)
# Find the translation for each example in the batch
for i in range(S.shape[0]):
S_i = S[i]
joints_i = joints_2d[i]
conf_i = joints_conf[i]
trans[i] = estimate_translation_np(S_i, joints_i, conf_i, focal_length=focal_length, img_size=img_size)
return torch.from_numpy(trans).to(device)
def weakProjection_gpu(skel3D, scale, trans2D ):
# if len(skel3D.shape)==1:
# skel3D = np.reshape(skel3D, (-1,3))
skel3D = skel3D.view((skel3D.shape[0],-1,3))
trans2D = trans2D.view((trans2D.shape[0],1,2))
scale = scale.view((scale.shape[0],1,1))
skel3D_proj = scale* skel3D[:,:,:2] + trans2D
return skel3D_proj#skel3D_proj.view((skel3D.shape[0],-1)) #(N, 19*2) o
#(57) (1) (2)
def weakProjection(skel3D, scale, trans2D ):
skel3D_proj = scale* skel3D[:,:2] + trans2D
return skel3D_proj#skel3D_proj.view((skel3D.shape[0],-1)) #(N, 19*2) o
def rotmat_to_angleaxis(init_pred_rotmat):
"""
init_pred_rotmat: torch.tensor with (24,3,3) dimension
"""
device = init_pred_rotmat.device
ones = torch.tensor([0,0,1], dtype=torch.float32,).view(1, 3, 1).expand(init_pred_rotmat.shape[1], -1, -1).to(device)
pred_rotmat_hom = torch.cat([ init_pred_rotmat.view(-1, 3, 3),ones ], dim=-1) #24,3,4
pred_aa = torchgeometry.rotation_matrix_to_angle_axis(pred_rotmat_hom).contiguous().view(1, -1) #[1,72]
# tgm.rotation_matrix_to_angle_axis returns NaN for 0 rotation, so manually hack it
pred_aa[torch.isnan(pred_aa)] = 0.0 #[1,72]
pred_aa = pred_aa.view(1,24,3)
return pred_aa
|
baselines/CACB/.old/build-cacb-tree.py | sordonia/HierarchicalEncoderDecoder | 116 | 12699665 | """
__author__ <NAME>
"""
import logging
import cPickle
import os
import sys
import argparse
import cacb
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('cluster_file', type=str, help='Pickled cluster file')
parser.add_argument('sessions', type=str, help='Session file')
args = parser.parse_args()
assert os.path.isfile(args.sessions) \
and os.path.isfile(args.cluster_file)
cacbt = cacb.CACB(4)
cacbt.with_cluster(args.cluster_file)
cluster_coverage = 0
f = open(args.sessions, 'r')
for num, session in enumerate(f):
cluster_coverage += cacbt.add_session(session)
if num % 1000 == 0:
print '{} sessions / {} cc / {} nodes in the PST'.format(num, cluster_coverage, cacbt.num_nodes)
f.close()
# print '{} nodes final'.format(cacbt.num_nodes)
cacbt.prune()
# print '{} nodes after pruning'.format(cacbt.num_nodes)
cacbt.save(args.sessions + "_CACB.pkl")
|
07_compiling/cffi/diffusion_2d_cffi.py | siddheshmhatre/high_performance_python | 698 | 12699687 | #!/usr/bin/env python2.7
import numpy as np
import time
from cffi import FFI, verifier
grid_shape = (512, 512)
ffi = FFI()
ffi.cdef(
r''' void evolve(int Nx, int Ny, double **in, double **out, double D, double dt); ''')
lib = ffi.dlopen("../diffusion.so")
def evolve(grid, dt, out, D=1.0):
X, Y = grid_shape
pointer_grid = ffi.cast('double**', grid.ctypes.data)
pointer_out = ffi.cast('double**', out.ctypes.data)
lib.evolve(X, Y, pointer_grid, pointer_out, D, dt)
def run_experiment(num_iterations):
scratch = np.zeros(grid_shape, dtype=np.double)
grid = np.zeros(grid_shape, dtype=np.double)
block_low = int(grid_shape[0] * .4)
block_high = int(grid_shape[0] * .5)
grid[block_low:block_high, block_low:block_high] = 0.005
start = time.time()
for i in range(num_iterations):
evolve(grid, 0.1, scratch)
grid, scratch = scratch, grid
return time.time() - start
if __name__ == "__main__":
t = run_experiment(500)
print t
verifier.cleanup_tmpdir()
|
grappa/empty.py | sgissinger/grappa | 137 | 12699700 | # -*- coding: utf-8 -*-
class Empty(object):
"""
Empty object represents emptyness state in `grappa`.
"""
def __repr__(self):
return 'Empty'
def __len__(self):
return 0
# Object reference representing emptpyness
empty = Empty()
|
animalai/animalai/communicator_objects/__init__.py | southpawac/AnimalAI-Olympics | 607 | 12699725 | from .arenas_configurations_proto_pb2 import *
from .arena_configuration_proto_pb2 import *
from .items_to_spawn_proto_pb2 import *
from .vector_proto_pb2 import *
from .__init__ import *
|
slybot/slybot/fieldtypes/url.py | rmcwilliams2004/PAscrape | 6,390 | 12699774 | import re
from six.moves.urllib.parse import urljoin
from scrapely.extractors import url as strip_url
from scrapy.utils.url import safe_download_url
from scrapy.utils.markup import unquote_markup
from slybot.baseurl import get_base_url
disallowed = re.compile('[\x00-\x1F\x7F]')
class UrlFieldTypeProcessor(object):
"""Renders URLs as links"""
name = 'url'
description = 'URL'
limit = 80
def extract(self, text):
if text is not None:
return strip_url(text)
return ''
def adapt(self, text, htmlpage=None):
if htmlpage is None:
return text
if text is None:
return
encoding = getattr(htmlpage, 'encoding', 'utf-8')
text = text.encode(encoding)
unquoted = unquote_markup(text, encoding=encoding)
cleaned = strip_url(disallowed.sub('', unquoted))
base = get_base_url(htmlpage).encode(encoding)
base_url = strip_url(unquote_markup(base, encoding=encoding))
joined = urljoin(base_url, cleaned)
return safe_download_url(joined)
|
utility/audiosegment.py | jasonaidm/Speech_emotion_recognition_BLSTM | 245 | 12699791 | <reponame>jasonaidm/Speech_emotion_recognition_BLSTM
"""
This module simply exposes a wrapper of a pydub.AudioSegment object.
"""
from __future__ import division
from __future__ import print_function
import collections
import functools
import itertools
import math
import numpy as np
import pickle
import pydub
import os
import random
import scipy.signal as signal
import subprocess
import sys
import tempfile
import warnings
import webrtcvad
MS_PER_S = 1000
S_PER_MIN = 60
MS_PER_MIN = MS_PER_S * S_PER_MIN
def deprecated(func):
"""
Deprecator decorator.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.warn("Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return new_func
class AudioSegment:
"""
This class is a wrapper for a pydub.AudioSegment that provides additional methods.
"""
def __init__(self, pydubseg, name):
self.seg = pydubseg
self.name = name
def __getattr__(self, attr):
orig_attr = self.seg.__getattribute__(attr)
if callable(orig_attr):
def hooked(*args, **kwargs):
result = orig_attr(*args, **kwargs)
if result == self.seg:
return self
elif type(result) == pydub.AudioSegment:
return AudioSegment(result, self.name)
else:
return result
return hooked
else:
return orig_attr
def __len__(self):
return len(self.seg)
def __eq__(self, other):
return self.seg == other
def __ne__(self, other):
return self.seg != other
def __iter__(self, other):
return (x for x in self.seg)
def __getitem__(self, millisecond):
return AudioSegment(self.seg[millisecond], self.name)
def __add__(self, arg):
if type(arg) == AudioSegment:
self.seg._data = self.seg._data + arg.seg._data
else:
self.seg = self.seg + arg
return self
def __radd__(self, rarg):
return self.seg.__radd__(rarg)
def __repr__(self):
return str(self)
def __str__(self):
s = "%s: %s channels, %s bit, sampled @ %s kHz, %.3fs long" %\
(self.name, str(self.channels), str(self.sample_width * 8),\
str(self.frame_rate / 1000.0), self.duration_seconds)
return s
def __sub__(self, arg):
if type(arg) == AudioSegment:
self.seg = self.seg - arg.seg
else:
self.seg = self.seg - arg
return self
def __mul__(self, arg):
if type(arg) == AudioSegment:
self.seg = self.seg * arg.seg
else:
self.seg = self.seg * arg
return self
@property
def spl(self):
"""
Sound Pressure Level - defined as 20 * log10(abs(value)).
Returns a numpy array of SPL dB values.
"""
return 20.0 * np.log10(np.abs(self.to_numpy_array() + 1E-9))
def _bandpass_filter(self, data, low, high, fs, order=5):
"""
:param data: The data (numpy array) to be filtered.
:param low: The low cutoff in Hz.
:param high: The high cutoff in Hz.
:param fs: The sample rate (in Hz) of the data.
:param order: The order of the filter. The higher the order, the tighter the roll-off.
:returns: Filtered data (numpy array).
"""
nyq = 0.5 * fs
low = low / nyq
high = high / nyq
b, a = signal.butter(order, [low, high], btype='band')
y = signal.lfilter(b, a, data)
return y
def auditory_scene_analysis(self):
"""
Algorithm based on paper: Auditory Segmentation Based on Onset and Offset Analysis,
by Hu and Wang, 2007.
"""
def lowpass_filter(data, cutoff, fs, order=5):
"""
"""
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='low', analog=False)
y = signal.lfilter(b, a, data)
return y
import matplotlib.pyplot as plt
def visualize_time_domain(seg, title=""):
plt.plot(seg)
plt.title(title)
plt.show()
plt.clf()
def visualize(spect, frequencies, title=""):
i = 0
for freq, (index, row) in zip(frequencies[::-1], enumerate(spect[::-1, :])):
plt.subplot(spect.shape[0], 1, index + 1)
if i == 0:
plt.title(title)
i += 1
plt.ylabel("{0:.0f}".format(freq))
plt.plot(row)
plt.show()
plt.clf()
# Normalize self into 25dB average SPL
normalized = self.normalize_spl_by_average(db=25)
visualize_time_domain(normalized.to_numpy_array(), "Normalized")
# Do a band-pass filter in each frequency
data = normalized.to_numpy_array()
start_frequency = 50
stop_frequency = 8000
start = np.log10(start_frequency)
stop = np.log10(stop_frequency)
frequencies = np.logspace(start, stop, num=10, endpoint=True, base=10.0)
print("Dealing with the following frequencies:", frequencies)
rows = [self._bandpass_filter(data, freq*0.8, freq*1.2, self.frame_rate) for freq in frequencies]
rows = np.array(rows)
spect = np.vstack(rows)
visualize(spect, frequencies, "After bandpass filtering (cochlear model)")
# Half-wave rectify each frequency channel
spect[spect < 0] = 0
visualize(spect, frequencies, "After half-wave rectification in each frequency")
# Low-pass filter each frequency channel
spect = np.apply_along_axis(lowpass_filter, 1, spect, 30, self.frame_rate, 6)
visualize(spect, frequencies, "After low-pass filtering in each frequency")
# Downsample each frequency to 400 Hz
downsample_freq_hz = 400
if self.frame_rate > downsample_freq_hz:
step = int(round(self.frame_rate / downsample_freq_hz))
spect = spect[:, ::step]
visualize(spect, frequencies, "After downsampling in each frequency")
# Now you have the temporal envelope of each frequency channel
# Smoothing
scales = [(6, 1/4), (6, 1/14), (1/2, 1/14)]
thetas = [0.95, 0.95, 0.85]
## For each (sc, st) scale, smooth across time using st, then across frequency using sc
gaussian = lambda x, mu, sig: np.exp(-np.power(x - mu, 2.0) / (2 * np.power(sig, 2.0)))
gaussian_kernel = lambda sig: gaussian(np.linspace(-10, 10, len(frequencies) / 2), 0, sig)
spectrograms = []
for sc, st in scales:
time_smoothed = np.apply_along_axis(lowpass_filter, 1, spect, 1/st, downsample_freq_hz, 6)
visualize(time_smoothed, frequencies, "After time smoothing with scale: " + str(st))
freq_smoothed = np.apply_along_axis(np.convolve, 0, spect, gaussian_kernel(sc))
spectrograms.append(freq_smoothed)
visualize(freq_smoothed, frequencies, "After time and frequency smoothing with scales (freq) " + str(sc) + " and (time) " + str(st))
## Now we have a set of scale-space spectrograms of different scales (sc, st)
# Onset/Offset Detection and Matching
def theta_on(spect):
return np.nanmean(spect) + np.nanstd(spect)
def compute_peaks_or_valleys_of_first_derivative(s, do_peaks=True):
"""
Takes a spectrogram and returns a 2D array of the form:
0 0 0 1 0 0 1 0 0 0 1 <-- Frequency 0
0 0 1 0 0 0 0 0 0 1 0 <-- Frequency 1
0 0 0 0 0 0 1 0 1 0 0 <-- Frequency 2
*** Time axis *******
Where a 1 means that the value in that time bin in the spectrogram corresponds to
a peak/valley in the first derivative.
"""
gradient = np.nan_to_num(np.apply_along_axis(np.gradient, 1, s), copy=False)
half_window = 4
if do_peaks:
indexes = [signal.argrelextrema(gradient[i, :], np.greater, order=half_window) for i in range(gradient.shape[0])]
else:
indexes = [signal.argrelextrema(gradient[i, :], np.less, order=half_window) for i in range(gradient.shape[0])]
extrema = np.zeros(s.shape)
for row_index, index_array in enumerate(indexes):
# Each index_array is a list of indexes corresponding to all the extrema in a given row
for col_index in index_array:
extrema[row_index, col_index] = 1
return extrema
for spect, (sc, st) in zip(spectrograms, scales):
# Compute sudden upward changes in spect, these are onsets of events
onsets = compute_peaks_or_valleys_of_first_derivative(spect)
# Compute sudden downward changes in spect, these are offsets of events
offsets = compute_peaks_or_valleys_of_first_derivative(spect, do_peaks=False)
print("TOTAL ONSETS:", np.sum(onsets, axis=1))
print("TOTAL OFFSETS:", np.sum(offsets, axis=1))
exit()
# onsets and offsets are 2D arrays
## Determine the offset time for each onset:
### If t_on[c, i] represents the time of the ith onset in frequency channel c, the corresponding offset
### must occur between t_on[c, i] and t_on[c, i+1]
### If there are more than one offsets candidates in this range, choose the one with largest intensity decrease.
## Create onset/offset fronts by connecting onsets across frequency channels (connect two onsets
## if they occur within 20ms of each other). Start over whenever a frequency band does not contain an offset
## in this range. Do the same procedure for offsets. Now you have onset and offset fronts.
## Now hook up the onsets with the offsets to form segments:
## For each onset front, (t_on[c, i1, t_on[c + 1, i2], ..., t_on[c + m - 1, im]):
## matching_offsets = (t_off[c, i1], t_off[c + 1, i2], ..., t_off[c + m - 1, im])
## Get all offset fronts which contain at least one of offset time found in matching_offsets
## Among these offset fronts, the one that crosses the most of matching_offsets is chosen,
## - call this offset front: matching_offset_front
## Update all t_offs in matching_offsets whose 'c's are in matching_offset_front to be 'matched', and
## - update their times to the corresponding channel offset in matching_offset_front.
## If all t_offs in matching_offsets are 'matched', continue to next onset front
## Now go through all the segments you have created and break them up along frequencies if the temporal
## envelopes don't match well enough. That is, if we have two adjacent channels c and c+1, and they
## are part of the same segment as determined above, break this segment into two along these lines
## if the correlation between them is below theta_c. Theta_c is thetas[i] where i depends on the scale.
# Multiscale Integration
##
## TODO
def detect_voice(self, prob_detect_voice=0.5):
"""
Returns self as a list of tuples:
[('v', voiced segment), ('u', unvoiced segment), (etc.)]
The overall order of the AudioSegment is preserved.
:param prob_detect_voice: The raw probability that any random 20ms window of the audio file
contains voice.
:returns: The described list.
"""
assert self.frame_rate in (48000, 32000, 16000, 8000), "Try resampling to one of the allowed frame rates."
assert self.sample_width == 2, "Try resampling to 16 bit."
assert self.channels == 1, "Try resampling to one channel."
class model_class:
def __init__(self, aggressiveness):
self.v = webrtcvad.Vad(int(aggressiveness))
def predict(self, vector):
if self.v.is_speech(vector.raw_data, vector.frame_rate):
return 1
else:
return 0
model = model_class(aggressiveness=1)
pyesno = 0.3 # Probability of the next 20 ms being unvoiced given that this 20 ms was voiced
pnoyes = 0.2 # Probability of the next 20 ms being voiced given that this 20 ms was unvoiced
p_realyes_outputyes = 0.4 # WebRTCVAD has a very high FP rate - just because it says yes, doesn't mean much
p_realyes_outputno = 0.05 # If it says no, we can be very certain that it really is a no
p_yes_raw = prob_detect_voice
filtered = self.detect_event(model=model,
ms_per_input=20,
transition_matrix=(pyesno, pnoyes),
model_stats=(p_realyes_outputyes, p_realyes_outputno),
event_length_s=0.25,
prob_raw_yes=p_yes_raw)
ret = []
for tup in filtered:
t = ('v', tup[1]) if tup[0] == 'y' else ('u', tup[1])
ret.append(t)
return ret
def dice(self, seconds, zero_pad=False):
"""
Cuts the AudioSegment into `seconds` segments (at most). So for example, if seconds=10,
this will return a list of AudioSegments, in order, where each one is at most 10 seconds
long. If `zero_pad` is True, the last item AudioSegment object will be zero padded to result
in `seconds` seconds.
:param seconds: The length of each segment in seconds. Can be either a float/int, in which case
`self.duration_seconds` / `seconds` are made, each of `seconds` length, or a
list-like can be given, in which case the given list must sum to
`self.duration_seconds` and each segment is specified by the list - e.g.
the 9th AudioSegment in the returned list will be `seconds[8]` seconds long.
:param zero_pad: Whether to zero_pad the final segment if necessary. Ignored if `seconds` is
a list-like.
:returns: A list of AudioSegments, each of which is the appropriate number of seconds long.
:raises: ValueError if a list-like is given for `seconds` and the list's durations do not sum
to `self.duration_seconds`.
"""
try:
total_s = sum(seconds)
if not (self.duration_seconds <= total_s + 1 and self.duration_seconds >= total_s - 1):
raise ValueError("`seconds` does not sum to within one second of the duration of this AudioSegment.\
given total seconds: %s and self.duration_seconds: %s" % (total_s, self.duration_seconds))
starts = []
stops = []
time_ms = 0
for dur in seconds:
starts.append(time_ms)
time_ms += dur * MS_PER_S
stops.append(time_ms)
zero_pad = False
except TypeError:
# `seconds` is not a list
starts = range(0, int(round(self.duration_seconds * MS_PER_S)), int(round(seconds * MS_PER_S)))
stops = (min(self.duration_seconds * MS_PER_S, start + seconds * MS_PER_S) for start in starts)
outs = [self[start:stop] for start, stop in zip(starts, stops)]
out_lens = [out.duration_seconds for out in outs]
# Check if our last slice is within one ms of expected - if so, we don't need to zero pad
if zero_pad and not (out_lens[-1] <= seconds * MS_PER_S + 1 and out_lens[-1] >= seconds * MS_PER_S - 1):
num_zeros = self.frame_rate * (seconds * MS_PER_S - out_lens[-1])
outs[-1] = outs[-1].zero_extend(num_samples=num_zeros)
return outs
def detect_event(self, model, ms_per_input, transition_matrix, model_stats, event_length_s,
start_as_yes=False, prob_raw_yes=0.5):
"""
A list of tuples of the form [('n', AudioSegment), ('y', AudioSegment), etc.] is returned, where tuples
of the form ('n', AudioSegment) are the segments of sound where the event was not detected,
while ('y', AudioSegment) tuples were the segments of sound where the event was detected.
.. code-block:: python
# Example usage
import audiosegment
import keras
import keras.models
import numpy as np
import sys
class Model:
def __init__(self, modelpath):
self.model = keras.models.load_model(modelpath)
def predict(self, seg):
_bins, fft_vals = seg.fft()
fft_vals = np.abs(fft_vals) / len(fft_vals)
predicted_np_form = self.model.predict(np.array([fft_vals]), batch_size=1)
prediction_as_int = int(round(predicted_np_form[0][0]))
return prediction_as_int
modelpath = sys.argv[1]
wavpath = sys.argv[2]
model = Model(modelpath)
seg = audiosegment.from_file(wavpath).resample(sample_rate_Hz=32000, sample_width=2, channels=1)
pyes_to_no = 0.3 # The probability of one 30 ms sample being an event, and the next one not
pno_to_yes = 0.2 # The probability of one 30 ms sample not being an event, and the next one yes
ptrue_pos_rate = 0.8 # The true positive rate (probability of a predicted yes being right)
pfalse_neg_rate = 0.3 # The false negative rate (probability of a predicted no being wrong)
raw_prob = 0.7 # The raw probability of seeing the event in any random 30 ms slice of this file
events = seg.detect_event(model, ms_per_input=30, transition_matrix=[pyes_to_no, pno_to_yes],
model_stats=[ptrue_pos_rate, pfalse_neg_rate], event_length_s=0.25,
prob_raw_yes=raw_prob)
nos = [event[1] for event in events if event[0] == 'n']
yeses = [event[1] for event in events if event[0] == 'y']
if len(nos) > 1:
notdetected = nos[0].reduce(nos[1:])
notdetected.export("notdetected.wav", format="WAV")
if len(yeses) > 1:
detected = yeses[0].reduce(yeses[1:])
detected.export("detected.wav", format="WAV")
:param model: The model. The model must have a predict() function which takes an AudioSegment
of `ms_per_input` number of ms and which outputs 1 if the audio event is detected
in that input, and 0 if not. Make sure to resample the AudioSegment to the right
values before calling this function on it.
:param ms_per_input: The number of ms of AudioSegment to be fed into the model at a time. If this does not
come out even, the last AudioSegment will be zero-padded.
:param transition_matrix: An iterable of the form: [p(yes->no), p(no->yes)]. That is, the probability of moving
from a 'yes' state to a 'no' state and the probability of vice versa.
:param model_stats: An iterable of the form: [p(reality=1|output=1), p(reality=1|output=0)]. That is,
the probability of the ground truth really being a 1, given that the model output a 1,
and the probability of the ground truth being a 1, given that the model output a 0.
:param event_length_s: The typical duration of the event you are looking for in seconds (can be a float).
:param start_as_yes: If True, the first `ms_per_input` will be in the 'y' category. Otherwise it will be
in the 'n' category.
:param prob_raw_yes: The raw probability of finding the event in any given `ms_per_input` vector.
:returns: A list of tuples of the form [('n', AudioSegment), ('y', AudioSegment), etc.],
where over the course of the list, the AudioSegment in tuple 3 picks up
where the one in tuple 2 left off.
:raises: ValueError if `ms_per_input` is negative or larger than the number of ms in this
AudioSegment; if `transition_matrix` or `model_stats` do not have a __len__ attribute
or are not length 2; if the values in `transition_matrix` or `model_stats` are not
in the closed interval [0.0, 1.0].
"""
if ms_per_input < 0 or ms_per_input / MS_PER_S > self.duration_seconds:
raise ValueError("ms_per_input cannot be negative and cannot be longer than the duration of the AudioSegment."\
" The given value was " + str(ms_per_input))
elif not hasattr(transition_matrix, "__len__") or len(transition_matrix) != 2:
raise ValueError("transition_matrix must be an iterable of length 2.")
elif not hasattr(model_stats, "__len__") or len(model_stats) != 2:
raise ValueError("model_stats must be an iterable of length 2.")
elif any([True for prob in transition_matrix if prob > 1.0 or prob < 0.0]):
raise ValueError("Values in transition_matrix are probabilities, and so must be in the range [0.0, 1.0].")
elif any([True for prob in model_stats if prob > 1.0 or prob < 0.0]):
raise ValueError("Values in model_stats are probabilities, and so must be in the range [0.0, 1.0].")
elif prob_raw_yes > 1.0 or prob_raw_yes < 0.0:
raise ValueError("`prob_raw_yes` is a probability, and so must be in the range [0.0, 1.0]")
# Get the yeses or nos for when the filter is triggered (when the event is on/off)
filter_indices = [yes_or_no for yes_or_no in self._get_filter_indices(start_as_yes,
prob_raw_yes,
ms_per_input,
model,
transition_matrix,
model_stats)]
# Run a homogeneity filter over the values to make local regions more self-similar (reduce noise)
ret = self._homogeneity_filter(filter_indices, window_size=int(round(0.25 * MS_PER_S / ms_per_input)))
# Group the consecutive ones together
ret = self._group_filter_values(ret, ms_per_input)
# Take the groups and turn them into AudioSegment objects
real_ret = self._reduce_filtered_segments(ret)
return real_ret
def _get_filter_indices(self, start_as_yes, prob_raw_yes, ms_per_input, model, transition_matrix, model_stats):
"""
This has been broken out of the `filter` function to reduce cognitive load.
"""
filter_triggered = 1 if start_as_yes else 0
prob_raw_no = 1.0 - prob_raw_yes
for segment, _timestamp in self.generate_frames_as_segments(ms_per_input):
yield filter_triggered
observation = int(round(model.predict(segment)))
assert observation == 1 or observation == 0, "The given model did not output a 1 or a 0, output: "\
+ str(observation)
prob_hyp_yes_given_last_hyp = 1.0 - transition_matrix[0] if filter_triggered else transition_matrix[1]
prob_hyp_no_given_last_hyp = transition_matrix[0] if filter_triggered else 1.0 - transition_matrix[1]
prob_hyp_yes_given_data = model_stats[0] if observation == 1 else model_stats[1]
prob_hyp_no_given_data = 1.0 - model_stats[0] if observation == 1 else 1.0 - model_stats[1]
hypothesis_yes = prob_raw_yes * prob_hyp_yes_given_last_hyp * prob_hyp_yes_given_data
hypothesis_no = prob_raw_no * prob_hyp_no_given_last_hyp * prob_hyp_no_given_data
# make a list of ints - each is 0 or 1. The number of 1s is hypotheis_yes * 100
# the number of 0s is hypothesis_no * 100
distribution = [1 for i in range(int(round(hypothesis_yes * 100)))]
distribution.extend([0 for i in range(int(round(hypothesis_no * 100)))])
# shuffle
random.shuffle(distribution)
filter_triggered = random.choice(distribution)
def _group_filter_values(self, filter_indices, ms_per_input):
"""
This has been broken out of the `filter` function to reduce cognitive load.
"""
ret = []
for filter_value, (_segment, timestamp) in zip(filter_indices, self.generate_frames_as_segments(ms_per_input)):
if filter_value == 1:
if len(ret) > 0 and ret[-1][0] == 'n':
ret.append(['y', timestamp]) # The last one was different, so we create a new one
elif len(ret) > 0 and ret[-1][0] == 'y':
ret[-1][1] = timestamp # The last one was the same as this one, so just update the timestamp
else:
ret.append(['y', timestamp]) # This is the first one
else:
if len(ret) > 0 and ret[-1][0] == 'n':
ret[-1][1] = timestamp
elif len(ret) > 0 and ret[-1][0] == 'y':
ret.append(['n', timestamp])
else:
ret.append(['n', timestamp])
return ret
def _homogeneity_filter(self, ls, window_size):
"""
This has been broken out of the `filter` function to reduce cognitive load.
ls is a list of 1s or 0s for when the filter is on or off
"""
k = window_size
i = k
while i <= len(ls) - k:
# Get a window of k items
window = [ls[i + j] for j in range(k)]
# Change the items in the window to be more like the mode of that window
mode = 1 if sum(window) >= k / 2 else 0
for j in range(k):
ls[i+j] = mode
i += k
return ls
def _reduce_filtered_segments(self, ret):
"""
This has been broken out of the `filter` function to reduce cognitive load.
"""
real_ret = []
for i, (this_yesno, next_timestamp) in enumerate(ret):
if i > 0:
_next_yesno, timestamp = ret[i - 1]
else:
timestamp = 0
data = self[timestamp * MS_PER_S:next_timestamp * MS_PER_S].raw_data
seg = AudioSegment(pydub.AudioSegment(data=data, sample_width=self.sample_width,
frame_rate=self.frame_rate, channels=self.channels), self.name)
real_ret.append((this_yesno, seg))
return real_ret
def filter_silence(self, duration_s=1, threshold_percentage=1, console_output=False):
"""
Returns a copy of this AudioSegment, but whose silence has been removed.
.. note:: This method requires that you have the program 'sox' installed.
.. warning:: This method uses the program 'sox' to perform the task. While this is very fast for a single
function call, the IO may add up for a large numbers of AudioSegment objects.
:param duration_s: The number of seconds of "silence" that must be present in a row to
be stripped.
:param threshold_percentage: Silence is defined as any samples whose absolute value is below
`threshold_percentage * max(abs(samples in this segment))`.
:param console_output: If True, will pipe all sox output to the console.
:returns: A copy of this AudioSegment, but whose silence has been removed.
"""
tmp = tempfile.NamedTemporaryFile()
othertmp = tempfile.NamedTemporaryFile()
self.export(tmp.name, format="WAV")
command = "sox " + tmp.name + " -t wav " + othertmp.name + " silence -l 1 0.1 "\
+ str(threshold_percentage) + "% -1 " + str(float(duration_s)) + " " + str(threshold_percentage) + "%"
stdout = stderr = subprocess.PIPE if console_output else subprocess.DEVNULL
res = subprocess.run(command.split(' '), stdout=stdout, stderr=stderr)
assert res.returncode == 0, "Sox did not work as intended, or perhaps you don't have Sox installed?"
other = AudioSegment(pydub.AudioSegment.from_wav(othertmp.name), self.name)
tmp.close()
othertmp.close()
return other
def fft(self, start_s=None, duration_s=None, start_sample=None, num_samples=None, zero_pad=False):
"""
Transforms the indicated slice of the AudioSegment into the frequency domain and returns the bins
and the values.
If neither `start_s` or `start_sample` is specified, the first sample of the slice will be the first sample
of the AudioSegment.
If neither `duration_s` or `num_samples` is specified, the slice will be from the specified start
to the end of the segment.
.. code-block:: python
# Example for plotting the FFT using this function
import matplotlib.pyplot as plt
import numpy as np
seg = audiosegment.from_file("furelise.wav")
# Just take the first 3 seconds
hist_bins, hist_vals = seg[1:3000].fft()
hist_vals_real_normed = np.abs(hist_vals) / len(hist_vals)
plt.plot(hist_bins / 1000, hist_vals_real_normed)
plt.xlabel("kHz")
plt.ylabel("dB")
plt.show()
.. image:: images/fft.png
:param start_s: The start time in seconds. If this is specified, you cannot specify `start_sample`.
:param duration_s: The duration of the slice in seconds. If this is specified, you cannot specify `num_samples`.
:param start_sample: The zero-based index of the first sample to include in the slice.
If this is specified, you cannot specify `start_s`.
:param num_samples: The number of samples to include in the slice. If this is specified, you cannot
specify `duration_s`.
:param zero_pad: If True and the combination of start and duration result in running off the end of
the AudioSegment, the end is zero padded to prevent this.
:returns: np.ndarray of frequencies, np.ndarray of amount of each frequency
:raises: ValueError If `start_s` and `start_sample` are both specified and/or if both `duration_s` and
`num_samples` are specified.
"""
if start_s is not None and start_sample is not None:
raise ValueError("Only one of start_s and start_sample can be specified.")
if duration_s is not None and num_samples is not None:
raise ValueError("Only one of duration_s and num_samples can be specified.")
if start_s is None and start_sample is None:
start_sample = 0
if duration_s is None and num_samples is None:
num_samples = len(self.get_array_of_samples()) - int(start_sample)
if duration_s is not None:
num_samples = int(round(duration_s * self.frame_rate))
if start_s is not None:
start_sample = int(round(start_s * self.frame_rate))
end_sample = start_sample + num_samples # end_sample is excluded
if end_sample > len(self.get_array_of_samples()) and not zero_pad:
raise ValueError("The combination of start and duration will run off the end of the AudioSegment object.")
elif end_sample > len(self.get_array_of_samples()) and zero_pad:
arr = np.array(self.get_array_of_samples())
zeros = np.zeros(end_sample - len(arr))
arr = np.append(arr, zeros)
else:
arr = np.array(self.get_array_of_samples())
audioslice = np.array(arr[start_sample:end_sample])
fft_result = np.fft.fft(audioslice)[range(int(round(num_samples/2)) + 1)]
bins = np.arange(0, int(round(num_samples/2)) + 1, 1.0) * (self.frame_rate / num_samples)
return bins, fft_result
def generate_frames(self, frame_duration_ms, zero_pad=True):
"""
Yields self's data in chunks of frame_duration_ms.
This function adapted from pywebrtc's example [https://github.com/wiseman/py-webrtcvad/blob/master/example.py].
:param frame_duration_ms: The length of each frame in ms.
:param zero_pad: Whether or not to zero pad the end of the AudioSegment object to get all
the audio data out as frames. If not, there may be a part at the end
of the Segment that is cut off (the part will be <= `frame_duration_ms` in length).
:returns: A Frame object with properties 'bytes (the data)', 'timestamp (start time)', and 'duration'.
"""
Frame = collections.namedtuple("Frame", "bytes timestamp duration")
# (samples/sec) * (seconds in a frame) * (bytes/sample)
bytes_per_frame = int(self.frame_rate * (frame_duration_ms / 1000) * self.sample_width)
offset = 0 # where we are so far in self's data (in bytes)
timestamp = 0.0 # where we are so far in self (in seconds)
# (bytes/frame) * (sample/bytes) * (sec/samples)
frame_duration_s = (bytes_per_frame / self.frame_rate) / self.sample_width
while offset + bytes_per_frame < len(self.raw_data):
yield Frame(self.raw_data[offset:offset + bytes_per_frame], timestamp, frame_duration_s)
timestamp += frame_duration_s
offset += bytes_per_frame
if zero_pad:
rest = self.raw_data[offset:]
if len(rest) >= bytes_per_frame and len(rest) % bytes_per_frame:
zeros = bytes(bytes_per_frame - len(rest))
yield Frame(rest + zeros, timestamp, frame_duration_s)
def generate_frames_as_segments(self, frame_duration_ms, zero_pad=True):
"""
Does the same thing as `generate_frames`, but yields tuples of (AudioSegment, timestamp) instead of Frames.
"""
for frame in self.generate_frames(frame_duration_ms, zero_pad=zero_pad):
seg = AudioSegment(pydub.AudioSegment(data=frame.bytes, sample_width=self.sample_width,
frame_rate=self.frame_rate, channels=self.channels), self.name)
yield seg, frame.timestamp
def normalize_spl_by_average(self, db):
"""
Normalize the values in the AudioSegment so that its average dB value
is `db`.
The dB of a value is calculated as 20 * log10(abs(value + 1E-9)).
:param db: The decibels to normalize average to.
:returns: A new AudioSegment object whose values are changed so that their
average is `db`.
"""
def inverse_spl(val):
"""Calculates the (positive) 'PCM' value for the given SPl val"""
return 10 ** (val / 20.0)
# Convert dB into 'PCM'
db_pcm = inverse_spl(db)
# Calculate current 'PCM' average
curavg = np.abs(np.mean(self.to_numpy_array()))
# Calculate ratio of dB_pcm / curavg_pcm
ratio = db_pcm / curavg
# Multiply all values by ratio
dtype_dict = {1: np.int8, 2: np.int16, 4: np.int32}
dtype = dtype_dict[self.sample_width]
new_seg = from_numpy_array(np.array(self.to_numpy_array() * ratio, dtype=dtype), self.frame_rate)
# Check SPL average to see if we are right
#assert math.isclose(np.mean(new_seg.spl), db), "new = " + str(np.mean(new_seg.spl)) + " != " + str(db)
return new_seg
def reduce(self, others):
"""
Reduces others into this one by concatenating all the others onto this one and
returning the result. Does not modify self, instead, makes a copy and returns that.
:param others: The other AudioSegment objects to append to this one.
:returns: The concatenated result.
"""
ret = AudioSegment(self.seg, self.name)
selfdata = [self.seg._data]
otherdata = [o.seg._data for o in others]
ret.seg._data = b''.join(selfdata + otherdata)
return ret
def resample(self, sample_rate_Hz=None, sample_width=None, channels=None, console_output=False):
"""
Returns a new AudioSegment whose data is the same as this one, but which has been resampled to the
specified characteristics. Any parameter left None will be unchanged.
.. note:: This method requires that you have the program 'sox' installed.
.. warning:: This method uses the program 'sox' to perform the task. While this is very fast for a single
function call, the IO may add up for a large numbers of AudioSegment objects.
:param sample_rate_Hz: The new sample rate in Hz.
:param sample_width: The new sample width in bytes, so sample_width=2 would correspond to 16 bit (2 byte) width.
:param channels: The new number of channels.
:param console_output: Will print the output of sox to the console if True.
:returns: The newly sampled AudioSegment.
"""
if sample_rate_Hz is None:
sample_rate_Hz = self.frame_rate
if sample_width is None:
sample_width = self.sample_width
if channels is None:
channels = self.channels
infile, outfile = tempfile.NamedTemporaryFile(), tempfile.NamedTemporaryFile()
self.export(infile.name, format="wav")
command = "sox " + infile.name + " -b" + str(sample_width * 8) + " -r " + str(sample_rate_Hz) + " -t wav " + outfile.name + " channels " + str(channels)
# stdout = stderr = subprocess.PIPE if console_output else subprocess.DEVNULL
# res = subprocess.run(command.split(' '), stdout=stdout, stderr=stderr)
res = subprocess.call(command.split(' '))
if res:
raise subprocess.CalledProcessError(res, cmd=command)
other = AudioSegment(pydub.AudioSegment.from_wav(outfile.name), self.name)
infile.close()
outfile.close()
return other
def serialize(self):
"""
Serializes into a bytestring.
:returns: An object of type Bytes.
"""
d = {'name': self.name, 'seg': pickle.dumps(self.seg, protocol=-1)}
return pickle.dumps(d, protocol=-1)
def spectrogram(self, start_s=None, duration_s=None, start_sample=None, num_samples=None,
window_length_s=None, window_length_samples=None, overlap=0.5):
"""
Does a series of FFTs from `start_s` or `start_sample` for `duration_s` or `num_samples`.
Effectively, transforms a slice of the AudioSegment into the frequency domain across different
time bins.
.. code-block:: python
# Example for plotting a spectrogram using this function
import audiosegment
import matplotlib.pyplot as plt
#...
seg = audiosegment.from_file("somebodytalking.wav")
freqs, times, amplitudes = seg.spectrogram(window_length_s=0.03, overlap=0.5)
amplitudes = 10 * np.log10(amplitudes + 1e-9)
# Plot
plt.pcolormesh(times, freqs, amplitudes)
plt.xlabel("Time in Seconds")
plt.ylabel("Frequency in Hz")
plt.show()
.. image:: images/spectrogram.png
:param start_s: The start time. Starts at the beginning if neither this nor `start_sample` is specified.
:param duration_s: The duration of the spectrogram in seconds. Goes to the end if neither this nor
`num_samples` is specified.
:param start_sample: The index of the first sample to use. Starts at the beginning if neither this nor
`start_s` is specified.
:param num_samples: The number of samples in the spectrogram. Goes to the end if neither this nor
`duration_s` is specified.
:param window_length_s: The length of each FFT in seconds. If the total number of samples in the spectrogram
is not a multiple of the window length in samples, the last window will be zero-padded.
:param window_length_samples: The length of each FFT in number of samples. If the total number of samples in the
spectrogram is not a multiple of the window length in samples, the last window will
be zero-padded.
:param overlap: The fraction of each window to overlap.
:returns: Three np.ndarrays: The frequency values in Hz (the y-axis in a spectrogram), the time values starting
at start time and then increasing by `duration_s` each step (the x-axis in a spectrogram), and
the dB of each time/frequency bin as a 2D array of shape [len(frequency values), len(duration)].
:raises ValueError: If `start_s` and `start_sample` are both specified, if `duration_s` and `num_samples` are both
specified, if the first window's duration plus start time lead to running off the end
of the AudioSegment, or if `window_length_s` and `window_length_samples` are either
both specified or if they are both not specified.
"""
if start_s is not None and start_sample is not None:
raise ValueError("Only one of start_s and start_sample may be specified.")
if duration_s is not None and num_samples is not None:
raise ValueError("Only one of duration_s and num_samples may be specified.")
if window_length_s is not None and window_length_samples is not None:
raise ValueError("Only one of window_length_s and window_length_samples may be specified.")
if window_length_s is None and window_length_samples is None:
raise ValueError("You must specify a window length, either in window_length_s or in window_length_samples.")
if start_s is None and start_sample is None:
start_sample = 0
if duration_s is None and num_samples is None:
num_samples = len(self.get_array_of_samples()) - int(start_sample)
if duration_s is not None:
num_samples = int(round(duration_s * self.frame_rate))
if start_s is not None:
start_sample = int(round(start_s * self.frame_rate))
if window_length_s is not None:
window_length_samples = int(round(window_length_s * self.frame_rate))
if start_sample + num_samples > len(self.get_array_of_samples()):
raise ValueError("The combination of start and duration will run off the end of the AudioSegment object.")
f, t, sxx = signal.spectrogram(self.to_numpy_array(), self.frame_rate, scaling='spectrum', nperseg=window_length_samples,
noverlap=int(round(overlap * window_length_samples)),
mode='magnitude')
return f, t, sxx
def to_numpy_array(self):
"""
Convenience function for `np.array(self.get_array_of_samples())` while
keeping the appropriate dtype.
"""
dtype_dict = {
1: np.int8,
2: np.int16,
4: np.int32
}
dtype = dtype_dict[self.sample_width]
return np.array(self.get_array_of_samples(), dtype=dtype)
@deprecated
def trim_to_minutes(self, strip_last_seconds=False):
"""
Returns a list of minute-long (at most) Segment objects.
.. note:: This function has been deprecated. Use the `dice` function instead.
:param strip_last_seconds: If True, this method will return minute-long segments,
but the last three seconds of this AudioSegment won't be returned.
This is useful for removing the microphone artifact at the end of the recording.
:returns: A list of AudioSegment objects, each of which is one minute long at most
(and only the last one - if any - will be less than one minute).
"""
outs = self.dice(seconds=60, zero_pad=False)
# Now cut out the last three seconds of the last item in outs (it will just be microphone artifact)
# or, if the last item is less than three seconds, just get rid of it
if strip_last_seconds:
if outs[-1].duration_seconds > 3:
outs[-1] = outs[-1][:-MS_PER_S * 3]
else:
outs = outs[:-1]
return outs
def zero_extend(self, duration_s=None, num_samples=None):
"""
Adds a number of zeros (digital silence) to the AudioSegment (returning a new one).
:param duration_s: The number of seconds of zeros to add. If this is specified, `num_samples` must be None.
:param num_samples: The number of zeros to add. If this is specified, `duration_s` must be None.
:returns: A new AudioSegment object that has been zero extended.
:raises: ValueError if duration_s and num_samples are both specified.
"""
if duration_s is not None and num_samples is not None:
raise ValueError("`duration_s` and `num_samples` cannot both be specified.")
elif duration_s is not None:
num_samples = self.frame_rate * duration_s
seg = AudioSegment(self.seg, self.name)
zeros = silent(duration=num_samples / self.frame_rate, frame_rate=self.frame_rate)
return zeros.overlay(seg)
def deserialize(bstr):
"""
Attempts to deserialize a bytestring into an audiosegment.
:param bstr: The bytestring serialized via an audiosegment's serialize() method.
:returns: An AudioSegment object deserialized from `bstr`.
"""
d = pickle.loads(bstr)
seg = pickle.loads(d['seg'])
return AudioSegment(seg, d['name'])
def empty():
"""
Creates a zero-duration AudioSegment object.
:returns: An empty AudioSegment object.
"""
dubseg = pydub.AudioSegment.empty()
return AudioSegment(dubseg, "")
def from_file(path):
"""
Returns an AudioSegment object from the given file based on its file extension.
If the extension is wrong, this will throw some sort of error.
:param path: The path to the file, including the file extension.
:returns: An AudioSegment instance from the file.
"""
_name, ext = os.path.splitext(path)
ext = ext.lower()[1:]
seg = pydub.AudioSegment.from_file(path, ext)
return AudioSegment(seg, path)
def from_mono_audiosegments(*args):
"""
Creates a multi-channel AudioSegment out of multiple mono AudioSegments (two or more). Each mono
AudioSegment passed in should be exactly the same number of samples.
:returns: An AudioSegment of multiple channels formed from the given mono AudioSegments.
"""
return AudioSegment(pydub.AudioSegment.from_mono_audiosegments(*args), "")
def from_numpy_array(nparr, framerate):
"""
Returns an AudioSegment created from the given numpy array.
The numpy array must have shape = (num_samples, num_channels).
:param nparr: The numpy array to create an AudioSegment from.
:returns: An AudioSegment created from the given array.
"""
# interleave the audio across all channels and collapse
if nparr.dtype.itemsize not in (1, 2, 4):
raise ValueError("Numpy Array must contain 8, 16, or 32 bit values.")
if len(nparr.shape) == 1:
arrays = [nparr]
elif len(nparr.shape) == 2:
arrays = [nparr[i,:] for i in range(nparr.shape[0])]
else:
raise ValueError("Numpy Array must be one or two dimensional. Shape must be: (num_samples, num_channels).")
interleaved = np.vstack(arrays).reshape((-1,), order='F')
dubseg = pydub.AudioSegment(interleaved.tobytes(),
frame_rate=framerate,
sample_width=interleaved.dtype.itemsize,
channels=len(interleaved.shape)
)
return AudioSegment(dubseg, "")
def silent(duration=1000, frame_rate=11025):
"""
Creates an AudioSegment object of the specified duration/frame_rate filled with digital silence.
:param duration: The duration of the returned object in ms.
:param frame_rate: The samples per second of the returned object.
:returns: AudioSegment object filled with pure digital silence.
"""
seg = pydub.AudioSegment.silent(duration=duration, frame_rate=frame_rate)
return AudioSegment(seg, "")
|
PyOpenGLExample/mouse.py | DazEB2/SimplePyScripts | 117 | 12699800 | <reponame>DazEB2/SimplePyScripts
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
"""
Mouse interaction
This piece of code will capture mouse clicks by the user. Every time the user
presses the left mouse the current point is pushed onto an array, when the right
mouse button is pressed the last element is removed from the array. After a click
a glutPostRedisplay() is called to trigger a call to the display function which
creates a line strip out of all created points.
http://www.de-brauwer.be/wiki/wikka.php?wakka=PyOpenGLMouse
"""
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
points = []
def initFun():
glClearColor(1.0, 1.0, 1.0, 0.0)
glColor3f(0.0, 0.0, 0.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0, 640.0, 0.0, 480.0)
def displayFun():
global points
glClear(GL_COLOR_BUFFER_BIT)
glBegin(GL_LINE_STRIP)
glColor3f(0, 0, 0)
for p in points:
glVertex2i(p.x, p.y)
glEnd()
glFlush()
def mouseFun(button, state, x, y):
global points
if button == GLUT_LEFT_BUTTON and state == GLUT_DOWN:
p = Point(x, 480 - y)
points.append(p)
if button == GLUT_RIGHT_BUTTON and state == GLUT_DOWN:
if points:
points = points[:-1]
glutPostRedisplay()
if __name__ == '__main__':
glutInit()
glutInitWindowSize(640, 480)
glutCreateWindow(b"Polyline")
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutDisplayFunc(displayFun)
glutMouseFunc(mouseFun)
initFun()
glutMainLoop() |
autotest/test_gwf_auxvars02.py | MODFLOW-USGS/modflow6 | 102 | 12699805 | import os
import pytest
import sys
import numpy as np
try:
import pymake
except:
msg = "Error. Pymake package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install https://github.com/modflowpy/pymake/zipball/master"
raise Exception(msg)
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
ex = ["aux02"]
exdirs = []
for s in ex:
exdirs.append(os.path.join("temp", s))
def build_model(idx, dir):
nlay, nrow, ncol = 1, 10, 10
nper = 3
perlen = [1.0, 1.0, 1.0]
nstp = [10, 10, 10]
tsmult = [1.0, 1.0, 1.0]
lenx = 300.0
delr = delc = lenx / float(nrow)
strt = 100.0
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-9, 1e-3, 0.97
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", nper=nper, perioddata=tdis_rc)
# create gwf model
gwf = flopy.mf6.ModflowGwf(sim, modelname=name)
# create iterative model solution and register the gwf model with it
ims = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="DBD",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
)
sim.register_ims_package(ims, [gwf.name])
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=90.0,
botm=0.0,
)
# initial conditions
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(gwf, save_flows=True, icelltype=1, k=1.0, k33=0.01)
# chd files
chdlist0 = []
chdlist0.append([(0, 0, 0), 100.0] + [a for a in range(100)])
chdlist0.append([(0, nrow - 1, ncol - 1), 95.0] + [a for a in range(100)])
chdspdict = {0: chdlist0}
chd = flopy.mf6.ModflowGwfchd(
gwf,
stress_period_data=chdspdict,
save_flows=True,
auxiliary=[f"aux{i}" for i in range(100)],
pname="CHD-1",
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.bud".format(name),
head_filerecord="{}.hds".format(name),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
filename="{}.oc".format(name),
)
return sim, None
def eval_model(sim):
print("evaluating model...")
# maw budget aux variables
fpth = os.path.join(sim.simpath, "aux02.bud")
bobj = flopy.utils.CellBudgetFile(fpth, precision="double")
records = bobj.get_data(text="CHD")
for r in records:
for a in range(100):
aname = f"AUX{a}"
assert np.allclose(r[aname], a)
return
# - No need to change any code below
@pytest.mark.parametrize(
"idx, dir",
list(enumerate(exdirs)),
)
def test_mf6model(idx, dir):
# initialize testing framework
test = testing_framework()
# build the model
test.build_mf6_models(build_model, idx, dir)
# run the test model
test.run_mf6(Simulation(dir, exfunc=eval_model, idxsim=idx))
def main():
# initialize testing framework
test = testing_framework()
# run the test model
for idx, dir in enumerate(exdirs):
test.build_mf6_models(build_model, idx, dir)
sim = Simulation(dir, exfunc=eval_model, idxsim=idx)
test.run_mf6(sim)
if __name__ == "__main__":
# print message
print("standalone run of {}".format(os.path.basename(__file__)))
# run main routine
main()
|
kernel_tuner/kernelbuilder.py | cffbots/kernel_tuner | 104 | 12699806 | import numpy as np
from kernel_tuner import core
from kernel_tuner.interface import Options, _kernel_options
from kernel_tuner.integration import TuneResults
class PythonKernel(object):
def __init__(self, kernel_name, kernel_string, problem_size, arguments, params=None, inputs=None, outputs=None, device=0, platform=0,
block_size_names=None, grid_div_x=None, grid_div_y=None, grid_div_z=None, verbose=True, lang=None,
results_file=None):
""" Construct Python helper object to compile and call the kernel from Python
This object compiles a GPU kernel parameterized using the parameters in params.
GPU memory is allocated for each argument using its size and type as listed in arguments.
The object can be called directly as a function with the kernel arguments as function arguments.
Kernel arguments marked as inputs will be copied to the GPU on every kernel launch.
Only the kernel arguments marked as outputs will be returned, note that the result is always
returned in a list, even when there is only one output.
Most of the arguments to this function are the same as with tune_kernel or run_kernel in Kernel Tuner,
and are therefore not duplicated here. The two new arguments are:
:param inputs: a boolean list of length arguments to signal whether an argument is input to the kernel
:type inputs: list(bool)
:param outputs: a boolean list of length arguments to signal whether an argument is output of the kernel
:type outputs: list(bool)
"""
#construct device interface
kernel_source = core.KernelSource(kernel_name, kernel_string, lang)
self.dev = core.DeviceInterface(kernel_source, device=device, quiet=True)
if not params:
params = {}
#if results_file is passed use the results file to lookup tunable parameters
if results_file:
results = TuneResults(results_file)
params.update(results.get_best_config(self.dev.name, problem_size))
self.params = params
#construct kernel_options to hold information about the kernel
opts = locals()
kernel_options = Options([(k, opts[k]) for k in _kernel_options.keys() if k in opts.keys()])
#instantiate the kernel given the parameters in params
self.kernel_instance = self.dev.create_kernel_instance(kernel_source, kernel_options, params, verbose)
#compile the kernel
self.func = self.dev.compile_kernel(self.kernel_instance, verbose)
#setup GPU memory
self.gpu_args = self.dev.ready_argument_list(arguments)
if inputs:
self.inputs = inputs
else:
self.inputs = [True for _ in arguments]
if outputs:
self.outputs = outputs
else:
self.outputs = [True for _ in arguments]
def update_gpu_args(self, args):
for i, arg in enumerate(args):
if self.inputs[i]:
if isinstance(args[i], np.ndarray):
self.dev.dev.memcpy_htod(self.gpu_args[i], arg)
else:
self.gpu_args[i] = arg
return self.gpu_args
def get_gpu_result(self, args):
results = []
for i, _ in enumerate(self.gpu_args):
if self.outputs[i] and isinstance(args[i], np.ndarray):
res = np.zeros_like(args[i])
self.dev.memcpy_dtoh(res, self.gpu_args[i])
results.append(res)
return results
def run_kernel(self, args):
"""Run the GPU kernel
Copy the arguments marked as inputs to the GPU
Call the GPU kernel
Copy the arguments marked as outputs from the GPU
Return the outputs in a list of numpy arrays
:param args: A list with the kernel arguments as numpy arrays or numpy scalars
:type args: list(np.ndarray or np.generic)
"""
self.update_gpu_args(args)
self.dev.run_kernel(self.func, self.gpu_args, self.kernel_instance)
return self.get_gpu_result(args)
def __call__(self, *args):
"""Run the GPU kernel
Copy the arguments marked as inputs to the GPU
Call the GPU kernel
Copy the arguments marked as outputs from the GPU
Return the outputs in a list of numpy arrays
:param *args: A variable number of kernel arguments as numpy arrays or numpy scalars
:type *args: np.ndarray or np.generic
"""
return self.run_kernel(args)
def __del__(self):
if hasattr(self, 'dev'):
self.dev.__exit__([None, None, None])
|
test/optimizers/test_optimizers.py | dynamicslab/pysindy | 613 | 12699891 | """
Unit tests for optimizers.
"""
import numpy as np
import pytest
from numpy.linalg import norm
from scipy.integrate import odeint
from sklearn.base import BaseEstimator
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import Lasso
from sklearn.utils.validation import check_is_fitted
from pysindy import FiniteDifference
from pysindy import PolynomialLibrary
from pysindy import SINDy
from pysindy.feature_library import CustomLibrary
from pysindy.optimizers import ConstrainedSR3
from pysindy.optimizers import SINDyOptimizer
from pysindy.optimizers import SR3
from pysindy.optimizers import STLSQ
from pysindy.optimizers import TrappingSR3
from pysindy.utils import supports_multiple_targets
def lorenz(z, t):
return 10 * (z[1] - z[0]), z[0] * (28 - z[2]) - z[1], z[0] * z[1] - 8 / 3 * z[2]
class DummyLinearModel(BaseEstimator):
# Does not natively support multiple targets
def fit(self, x, y):
self.coef_ = np.ones(x.shape[1])
self.intercept_ = 0
return self
def predict(self, x):
return x
class DummyEmptyModel(BaseEstimator):
# Does not have fit or predict methods
def __init__(self):
self.fit_intercept = False
self.normalize = False
class DummyModelNoCoef(BaseEstimator):
# Does not set the coef_ attribute
def fit(self, x, y):
self.intercept_ = 0
return self
def predict(self, x):
return x
@pytest.mark.parametrize(
"cls, support",
[
(Lasso, True),
(STLSQ, True),
(SR3, True),
(ConstrainedSR3, True),
(TrappingSR3, True),
(DummyLinearModel, False),
],
)
def test_supports_multiple_targets(cls, support):
assert supports_multiple_targets(cls()) == support
@pytest.fixture(params=["data_derivative_1d", "data_derivative_2d"])
def data(request):
return request.getfixturevalue(request.param)
@pytest.mark.parametrize(
"optimizer",
[
STLSQ(),
SR3(),
ConstrainedSR3(),
TrappingSR3(),
Lasso(fit_intercept=False),
ElasticNet(fit_intercept=False),
DummyLinearModel(),
],
)
def test_fit(data, optimizer):
x, x_dot = data
if len(x.shape) == 1:
x = x.reshape(-1, 1)
opt = SINDyOptimizer(optimizer, unbias=False)
opt.fit(x, x_dot)
check_is_fitted(opt)
assert opt.complexity >= 0
if len(x_dot.shape) > 1:
assert opt.coef_.shape == (x.shape[1], x_dot.shape[1])
else:
assert opt.coef_.shape == (1, x.shape[1])
@pytest.mark.parametrize(
"optimizer",
[STLSQ(), SR3()],
)
def test_not_fitted(optimizer):
with pytest.raises(NotFittedError):
optimizer.predict(np.ones((1, 3)))
@pytest.mark.parametrize("optimizer", [STLSQ(), SR3()])
def test_complexity_not_fitted(optimizer, data_derivative_2d):
with pytest.raises(NotFittedError):
optimizer.complexity
x, _ = data_derivative_2d
optimizer.fit(x, x)
assert optimizer.complexity > 0
@pytest.mark.parametrize(
"kwargs", [{"normalize": True}, {"fit_intercept": True}, {"copy_X": False}]
)
def test_alternate_parameters(data_derivative_1d, kwargs):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
model = STLSQ(**kwargs)
model.fit(x, x_dot)
model.fit(x, x_dot, sample_weight=x[:, 0])
check_is_fitted(model)
@pytest.mark.parametrize("optimizer", [STLSQ, SR3, ConstrainedSR3])
@pytest.mark.parametrize("params", [dict(threshold=-1), dict(max_iter=0)])
def test_general_bad_parameters(optimizer, params):
with pytest.raises(ValueError):
optimizer(**params)
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
@pytest.mark.parametrize(
"params",
[dict(nu=0), dict(tol=0), dict(trimming_fraction=-1), dict(trimming_fraction=2)],
)
def test_sr3_bad_parameters(optimizer, params):
with pytest.raises(ValueError):
optimizer(**params)
@pytest.mark.parametrize(
"params",
[
dict(eta=-1),
dict(tol=0),
dict(tol_m=0),
dict(eps_solver=0),
dict(alpha_m=-1),
dict(alpha_A=-1),
dict(gamma=1),
dict(evolve_w=False, relax_optim=False),
dict(thresholder="l0"),
dict(threshold=-1),
dict(max_iter=0),
dict(eta=10, alpha_m=20),
dict(eta=10, alpha_A=20),
],
)
def test_trapping_bad_parameters(params):
with pytest.raises(ValueError):
TrappingSR3(**params)
@pytest.mark.parametrize(
"params",
[dict(PL=np.random.rand(3, 3, 3, 9)), dict(PQ=np.random.rand(3, 3, 3, 3, 9))],
)
def test_trapping_bad_tensors(params):
x = np.random.standard_normal((10, 9))
x_dot = np.random.standard_normal((10, 3))
with pytest.raises(ValueError):
model = TrappingSR3(**params)
model.fit(x, x_dot)
@pytest.mark.parametrize(
"params",
[dict(PL=np.ones((3, 3, 3, 9)), PQ=np.ones((3, 3, 3, 3, 9)))],
)
def test_trapping_quadratic_library(params):
x = np.random.standard_normal((10, 3))
library_functions = [
lambda x: x,
lambda x, y: x * y,
lambda x: x ** 2,
]
library_function_names = [
lambda x: str(x),
lambda x, y: "{} * {}".format(x, y),
lambda x: "{}^2".format(x),
]
sindy_library = CustomLibrary(
library_functions=library_functions, function_names=library_function_names
)
opt = TrappingSR3(**params)
model = SINDy(optimizer=opt, feature_library=sindy_library)
model.fit(x)
assert opt.PL.shape == (3, 3, 3, 9)
assert opt.PQ.shape == (3, 3, 3, 3, 9)
check_is_fitted(model)
@pytest.mark.parametrize(
"params",
[dict(PL=np.ones((3, 3, 3, 9)), PQ=np.ones((3, 3, 3, 3, 9)))],
)
def test_trapping_cubic_library(params):
x = np.random.standard_normal((10, 3))
library_functions = [
lambda x: x,
lambda x, y: x * y,
lambda x: x ** 2,
lambda x, y, z: x * y * z,
lambda x, y: x ** 2 * y,
lambda x: x ** 3,
]
library_function_names = [
lambda x: str(x),
lambda x, y: "{} * {}".format(x, y),
lambda x: "{}^2".format(x),
lambda x, y, z: "{} * {} * {}".format(x, y, z),
lambda x, y: "{}^2 * {}".format(x, y),
lambda x: "{}^3".format(x),
]
sindy_library = CustomLibrary(
library_functions=library_functions, function_names=library_function_names
)
with pytest.raises(ValueError):
opt = TrappingSR3(**params)
model = SINDy(optimizer=opt, feature_library=sindy_library)
model.fit(x)
@pytest.mark.parametrize(
"error, optimizer, params",
[
(ValueError, STLSQ, dict(alpha=-1)),
(NotImplementedError, SR3, dict(thresholder="l2")),
(NotImplementedError, ConstrainedSR3, dict(thresholder="l2")),
(ValueError, ConstrainedSR3, dict(thresholder="weighted_l0", thresholds=None)),
(ValueError, ConstrainedSR3, dict(thresholder="weighted_l0", thresholds=None)),
(ValueError, ConstrainedSR3, dict(thresholds=-np.ones((5, 5)))),
],
)
def test_specific_bad_parameters(error, optimizer, params):
with pytest.raises(error):
optimizer(**params)
def test_bad_optimizers(data_derivative_1d):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
with pytest.raises(AttributeError):
opt = SINDyOptimizer(DummyEmptyModel())
with pytest.raises(AttributeError):
opt = SINDyOptimizer(DummyModelNoCoef())
opt.fit(x, x_dot)
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
def test_initial_guess_sr3(optimizer):
x = np.random.standard_normal((10, 3))
x_dot = np.random.standard_normal((10, 2))
control_model = optimizer(max_iter=1).fit(x, x_dot)
initial_guess = np.random.standard_normal((x_dot.shape[1], x.shape[1]))
guess_model = optimizer(max_iter=1, initial_guess=initial_guess).fit(x, x_dot)
assert np.any(np.not_equal(control_model.coef_, guess_model.coef_))
# The different capitalizations are intentional;
# I want to make sure different versions are recognized
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
@pytest.mark.parametrize("thresholder", ["L0", "l1"])
def test_prox_functions(data_derivative_1d, optimizer, thresholder):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
model = optimizer(thresholder=thresholder)
model.fit(x, x_dot)
check_is_fitted(model)
def test_cad_prox_function(data_derivative_1d):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
model = SR3(thresholder="cAd")
model.fit(x, x_dot)
check_is_fitted(model)
@pytest.mark.parametrize("thresholder", ["weighted_l0", "weighted_l1"])
def test_weighted_prox_functions(data, thresholder):
x, x_dot = data
if x.ndim == 1:
x = x.reshape(-1, 1)
thresholds = np.ones((1, 1))
else:
thresholds = np.ones((x_dot.shape[1], x.shape[1]))
model = ConstrainedSR3(thresholder=thresholder, thresholds=thresholds)
model.fit(x, x_dot)
check_is_fitted(model)
@pytest.mark.parametrize("thresholder", ["L0", "l1"])
def test_constrained_sr3_prox_functions(data_derivative_1d, thresholder):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
model = ConstrainedSR3(thresholder=thresholder)
model.fit(x, x_dot)
check_is_fitted(model)
def test_unbias(data_derivative_1d):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
optimizer_biased = SINDyOptimizer(
STLSQ(threshold=0.01, alpha=0.1, max_iter=1), unbias=False
)
optimizer_biased.fit(x, x_dot)
optimizer_unbiased = SINDyOptimizer(
STLSQ(threshold=0.01, alpha=0.1, max_iter=1), unbias=True
)
optimizer_unbiased.fit(x, x_dot)
assert (
norm(optimizer_biased.coef_ - optimizer_unbiased.coef_)
/ norm(optimizer_unbiased.coef_)
> 1e-9
)
def test_unbias_external(data_derivative_1d):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
optimizer_biased = SINDyOptimizer(
Lasso(alpha=0.1, fit_intercept=False, max_iter=1), unbias=False
)
optimizer_biased.fit(x, x_dot)
optimizer_unbiased = SINDyOptimizer(
Lasso(alpha=0.1, fit_intercept=False, max_iter=1), unbias=True
)
optimizer_unbiased.fit(x, x_dot)
assert (
norm(optimizer_biased.coef_ - optimizer_unbiased.coef_)
/ (norm(optimizer_unbiased.coef_) + 1e-5)
> 1e-9
)
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
def test_sr3_trimming(optimizer, data_linear_oscillator_corrupted):
X, X_dot, trimming_array = data_linear_oscillator_corrupted
optimizer_without_trimming = SINDyOptimizer(optimizer(), unbias=False)
optimizer_without_trimming.fit(X, X_dot)
optimizer_trimming = SINDyOptimizer(optimizer(trimming_fraction=0.15), unbias=False)
optimizer_trimming.fit(X, X_dot)
# Check that trimming found the right samples to remove
np.testing.assert_array_equal(
optimizer_trimming.optimizer.trimming_array, trimming_array
)
# Check that the coefficients found by the optimizer with trimming
# are closer to the true coefficients than the coefficients found by the
# optimizer without trimming
true_coef = np.array([[-2.0, 0.0], [0.0, 1.0]])
assert norm(true_coef - optimizer_trimming.coef_) < norm(
true_coef - optimizer_without_trimming.coef_
)
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
def test_sr3_disable_trimming(optimizer, data_linear_oscillator_corrupted):
x, x_dot, _ = data_linear_oscillator_corrupted
model_plain = optimizer()
model_plain.fit(x, x_dot)
model_trimming = optimizer(trimming_fraction=0.5)
model_trimming.disable_trimming()
model_trimming.fit(x, x_dot)
np.testing.assert_allclose(model_plain.coef_, model_trimming.coef_)
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
def test_sr3_enable_trimming(optimizer, data_linear_oscillator_corrupted):
x, x_dot, _ = data_linear_oscillator_corrupted
model_plain = optimizer()
model_plain.enable_trimming(trimming_fraction=0.5)
model_plain.fit(x, x_dot)
model_trimming = optimizer(trimming_fraction=0.5)
model_trimming.fit(x, x_dot)
np.testing.assert_allclose(model_plain.coef_, model_trimming.coef_)
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3, TrappingSR3])
def test_sr3_warn(optimizer, data_linear_oscillator_corrupted):
x, x_dot, _ = data_linear_oscillator_corrupted
model = optimizer(max_iter=1, tol=1e-10)
with pytest.warns(ConvergenceWarning):
model.fit(x, x_dot)
@pytest.mark.parametrize(
"optimizer",
[
STLSQ(max_iter=1),
SR3(max_iter=1),
ConstrainedSR3(max_iter=1),
TrappingSR3(max_iter=1),
],
)
def test_fit_warn(data_derivative_1d, optimizer):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
with pytest.warns(ConvergenceWarning):
optimizer.fit(x, x_dot)
@pytest.mark.parametrize("optimizer", [ConstrainedSR3, TrappingSR3])
@pytest.mark.parametrize("target_value", [0, -1, 3])
def test_row_format_constraints(data_linear_combination, optimizer, target_value):
# Solution is x_dot = x.dot(np.array([[1, 1, 0], [0, 1, 1]]))
x, x_dot = data_linear_combination
constraint_rhs = target_value * np.ones(2)
constraint_lhs = np.zeros((2, x.shape[1] * x_dot.shape[1]))
# Should force corresponding entries of coef_ to be target_value
constraint_lhs[0, 0] = 1
constraint_lhs[1, 3] = 1
model = optimizer(
constraint_lhs=constraint_lhs,
constraint_rhs=constraint_rhs,
constraint_order="feature",
)
model.fit(x, x_dot)
np.testing.assert_allclose(
np.array([model.coef_[0, 0], model.coef_[1, 1]]), target_value, atol=1e-8
)
@pytest.mark.parametrize("optimizer", [ConstrainedSR3, TrappingSR3])
@pytest.mark.parametrize("target_value", [0, -1, 3])
def test_target_format_constraints(data_linear_combination, optimizer, target_value):
x, x_dot = data_linear_combination
constraint_rhs = target_value * np.ones(2)
constraint_lhs = np.zeros((2, x.shape[1] * x_dot.shape[1]))
# Should force corresponding entries of coef_ to be target_value
constraint_lhs[0, 1] = 1
constraint_lhs[1, 4] = 1
model = optimizer(constraint_lhs=constraint_lhs, constraint_rhs=constraint_rhs)
model.fit(x, x_dot)
np.testing.assert_allclose(model.coef_[:, 1], target_value, atol=1e-8)
@pytest.mark.parametrize("thresholds", [0.005, 0.05])
@pytest.mark.parametrize("relax_optim", [False, True])
@pytest.mark.parametrize("noise_levels", [0.0, 0.05, 0.5])
def test_trapping_inequality_constraints(thresholds, relax_optim, noise_levels):
t = np.arange(0, 40, 0.05)
x = odeint(lorenz, [-8, 8, 27], t)
x = x + np.random.normal(0.0, noise_levels, x.shape)
# if order is "feature"
constraint_rhs = np.array([-10.0, -2.0])
constraint_matrix = np.zeros((2, 30))
constraint_matrix[0, 6] = 1.0
constraint_matrix[1, 17] = 1.0
feature_names = ["x", "y", "z"]
opt = TrappingSR3(
threshold=thresholds,
constraint_lhs=constraint_matrix,
constraint_rhs=constraint_rhs,
constraint_order="feature",
inequality_constraints=True,
relax_optim=relax_optim,
)
poly_lib = PolynomialLibrary(degree=2)
model = SINDy(
optimizer=opt,
feature_library=poly_lib,
differentiation_method=FiniteDifference(drop_endpoints=True),
feature_names=feature_names,
)
model.fit(x, t=t[1] - t[0])
assert np.all(
np.dot(constraint_matrix, (model.coefficients()).flatten("F")) <= constraint_rhs
) or np.allclose(
np.dot(constraint_matrix, (model.coefficients()).flatten("F")), constraint_rhs
)
def test_inequality_constraints_reqs():
constraint_rhs = np.array([-10.0, -2.0])
constraint_matrix = np.zeros((2, 30))
constraint_matrix[0, 6] = 1.0
constraint_matrix[1, 17] = 1.0
with pytest.raises(ValueError):
TrappingSR3(
threshold=0.0,
constraint_lhs=constraint_matrix,
constraint_rhs=constraint_rhs,
constraint_order="feature",
inequality_constraints=True,
relax_optim=True,
)
|
rman_sg_nodes/rman_sg_emitter.py | N500/RenderManForBlender | 432 | 12699922 | from .rman_sg_node import RmanSgNode
class RmanSgEmitter(RmanSgNode):
def __init__(self, rman_scene, sg_node, db_name):
super().__init__(rman_scene, sg_node, db_name)
self.matrix_world = None
self.npoints = -1
self.render_type = ''
self.sg_particles_node = None
@property
def matrix_world(self):
return self.__matrix_world
@matrix_world.setter
def matrix_world(self, mtx):
self.__matrix_world = mtx
@property
def npoints(self):
return self.__npoints
@npoints.setter
def npoints(self, npoints):
self.__npoints = npoints
@property
def render_type(self):
return self.__render_type
@render_type.setter
def render_type(self, render_type):
self.__render_type = render_type
|
windows_packages_gpu/torch/utils/__init__.py | codeproject/DeepStack | 353 | 12699928 | from __future__ import absolute_import, division, print_function, unicode_literals
from .throughput_benchmark import ThroughputBenchmark
import os.path as _osp
# Set the module for a given object for nicer printing
def set_module(obj, mod):
if not isinstance(mod, str):
raise TypeError("The mod argument should be a string")
obj.__module__ = mod
#: Path to folder containing CMake definitions for Torch package
cmake_prefix_path = _osp.join(_osp.dirname(_osp.dirname(__file__)), 'share', 'cmake')
|
analysis/regex_test.py | maxwellyin/arxiv-public-datasets | 217 | 12699929 | <gh_stars>100-1000
"""
regex_test.py
author: <NAME>
date: 2019-03-16
This module samples the fulltext of the arxiv, pulls out some arxiv IDs, and
then checks these IDs against valid ones in our set of metadata, producing
a report of bad id's found so that we can improve the citation extraction.
"""
import os
import re
import numpy as np
import arxiv_public_data.regex_arxiv as ra
RE_FLEX = re.compile(ra.REGEX_ARXIV_FLEXIBLE)
def strip_version(name):
return name.split('v')[0]
def format_cat(name):
""" Strip subcategory, add hyphen to category if missing """
if '/' in name: # OLD ID, names contains subcategory
catsubcat, aid = name.split('/')
cat = catsubcat.split('.')[0]
return ra.dashdict.get(cat, cat) + "/" + aid
else:
return name
def zeropad_1501(name):
""" Arxiv IDs after yymm=1501 are padded to 5 zeros """
if not '/' in name: # new ID
yymm, num = name.split('.')
if int(yymm) > 1500 and len(num) < 5:
return yymm + ".0" + num
return name
def clean(name):
funcs = [strip_version, format_cat, zeropad_1501]
for func in funcs:
name = func(name)
return name
def get_alltxt(directory='/pool0/arxiv/full-text'):
out = []
for root, dirs, files in os.walk(directory):
for f in files:
if 'txt' in f:
out.append(os.path.join(root, f))
return out
def sample(out, num):
return [out[s] for s in np.random.randint(0, len(out)-1, num)]
def all_matches(filename, pattern=RE_FLEX):
out = []
matches = pattern.findall(open(filename, 'r').read())
for match in matches:
for group in match:
if group:
out.append(group)
return out
def all_matches_context(filename, pattern=RE_FLEX, pad=10):
out = []
contents = open(filename, 'r').read()
match = pattern.search(contents)
while match is not None:
s, e = match.start(), match.end()
out.append(contents[max(s-pad,0):e+pad])
contents = contents[e:]
match = pattern.search(contents)
return out
def test_samples(samples, valid_ids, directory='/pool0/arxiv/full-text',
pattern=RE_FLEX, showpasses=False):
failures = dict()
n_matches = 0
n_failures = 0
for i, s in enumerate(samples):
matches = all_matches(s, pattern)
n_matches += len(matches)
valid = [clean(m) in valid_ids for m in matches]
if not all(valid):
failures[s] = all_matches_context(s, RE_FLEX)
print("{}: BAD match in {}".format(i, s))
for v, c in zip(valid, failures[s]):
if not v:
n_failures += 1
print("\t{}".format(c))
else:
if showpasses:
print("{}: {} had no match errors".format(i, s))
error_rate = n_failures/n_matches
print("Error rate from {} matches is {}".format(n_matches, error_rate))
return failures
if __name__=="__main__":
from arxiv_public_data.oai_metadata import load_metadata
md_file = 'data/oai-arxiv-metadata-2019-03-01.json.gz'
valid_ids = [m['id'] for m in load_metadata(md_file)]
samples = sample(get_alltxt(), 10000)
failures = test_samples(samples, valid_ids)
|
rkqc/tools/gui/items/TransformationBasedSynthesisItem.py | clairechingching/ScaffCC | 158 | 12699932 | # RevKit: A Toolkit for Reversible Circuit Design (www.revkit.org)
# Copyright (C) 2009-2011 The RevKit Developers <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4.QtCore import SIGNAL, pyqtProperty
from core.BaseItem import *
from revkit import circuit, reed_muller_synthesis_func, swop, transformation_based_synthesis_func, gate_costs, quantum_costs, transistor_costs
from helpers.RevKitHelper import *
from ui.DesignerWidget import DesignerWidget
from ui.TransformationBasedSynthesis import Ui_TransformationBasedSynthesis
class TransformationBasedSynthesis( DesignerWidget ):
def __init__( self, parent = None ):
DesignerWidget.__init__( self, Ui_TransformationBasedSynthesis, parent )
self.connect( self.swop, SIGNAL( 'currentIndexChanged(int)' ), self.swopChanged )
def swopChanged( self, index ):
self.cost_function.setEnabled( index > 0 )
self.cost_function_label.setEnabled( index > 0 )
@item( "Transformation-based Synthesis",
requires = "Truth Table", provides = "Circuit",
properties = [ "variant", "bidi_synthesis", "swop", "cost_function" ],
widget = { 'class': TransformationBasedSynthesis, 'size': (300, 175) } )
class TransformationBasedSynthesisItem( BaseItem ):
"""This item provides the transformation-based synthesis method as well as the corresponding synthesis with output permutation method. The respective synthesis approach can be selected in the pull-down menu (in case of synthesis with output permutation additionally the optimization criteria can be defined). Furthermore, it can be specified whether bi-directional synthesis should be applied or not. After the item has been processed, the enlarged item reports the run-time needed to perform the synthesis."""
def onCreate( self ):
self.setState( self.CONFIGURED )
def executeEvent( self, inputs ):
circ = circuit()
cf = [ gate_costs, quantum_costs, transistor_costs ][int( self.cost_function )]()
synthesis = [ transformation_based_synthesis_func, reed_muller_synthesis_func ][int( self.variant )]
res = swop( circ, inputs[0],
enable = int( self.swop ) > 0,
exhaustive = int( self.swop ) == 1,
synthesis = synthesis( bidirectional = bool( int( self.bidi_synthesis ) ) ),
cf = cf )
if type( res ) == dict:
try:
circ.circuit_name = inputs[0].name
except: pass
self.widget.runtime.setText( "%.2f s" % res['runtime'] )
circuit_add_runtime( circ, res['runtime'] )
else:
return res
return [ circ ]
def onVariantChanged( self, value ):
suffix = [ "TT", "RMS" ][int( value )]
self.setText( "Transformation-based (%s)" % suffix )
|
sprocket/speech/synthesizer.py | zhouming-hfut/sprocket | 500 | 12699941 | # -*- coding: utf-8 -*-
import numpy as np
import pyworld
import pysptk
from pysptk.synthesis import MLSADF
class Synthesizer(object):
"""
Speech synthesizer with several acoustic features
Parameters
----------
fs: int, optional
Sampling frequency
Default set to 16000
fftl: int, optional
Frame Length of STFT
Default set to 1024
shiftms: int, optional
Shift size for STFT
Default set to 5
"""
def __init__(self, fs=16000, fftl=1024, shiftms=5):
self.fs = fs
self.fftl = fftl
self.shiftms = shiftms
return
def synthesis(self, f0, mcep, ap, rmcep=None, alpha=0.42):
"""synthesis generates waveform from F0, mcep, aperiodicity
Parameters
----------
f0 : array, shape (`T`, `1`)
array of F0 sequence
mcep : array, shape (`T`, `dim`)
array of mel-cepstrum sequence
ap : array, shape (`T`, `fftlen / 2 + 1`) or (`T`, `dim_codeap`)
array of aperiodicity or code aperiodicity
rmcep : array, optional, shape (`T`, `dim`)
array of reference mel-cepstrum sequence
Default set to None
alpha : int, optional
Parameter of all-path transfer function
Default set to 0.42
Returns
----------
wav: array,
Synethesized waveform
"""
if rmcep is not None:
# power modification
mcep = mod_power(mcep, rmcep, alpha=alpha)
if ap.shape[1] < self.fftl // 2 + 1:
# decode codeap to ap
ap = pyworld.decode_aperiodicity(ap, self.fs, self.fftl)
# mcep into spc
spc = pysptk.mc2sp(mcep, alpha, self.fftl)
# generate waveform using world vocoder with f0, spc, ap
wav = pyworld.synthesize(f0, spc, ap,
self.fs, frame_period=self.shiftms)
return wav
def synthesis_diff(self, x, diffmcep, rmcep=None, alpha=0.42):
"""filtering with a differential mel-cesptrum
Parameters
----------
x : array, shape (`samples`)
array of waveform sequence
diffmcep : array, shape (`T`, `dim`)
array of differential mel-cepstrum sequence
rmcep : array, shape (`T`, `dim`)
array of reference mel-cepstrum sequence
Default set to None
alpha : float, optional
Parameter of all-path transfer function
Default set to 0.42
Return
----------
wav: array, shape (`samples`)
Synethesized waveform
"""
x = x.astype(np.float64)
dim = diffmcep.shape[1] - 1
shiftl = int(self.fs / 1000 * self.shiftms)
if rmcep is not None:
# power modification
diffmcep = mod_power(rmcep + diffmcep, rmcep, alpha=alpha) - rmcep
b = np.apply_along_axis(pysptk.mc2b, 1, diffmcep, alpha)
assert np.isfinite(b).all()
mlsa_fil = pysptk.synthesis.Synthesizer(
MLSADF(dim, alpha=alpha), shiftl)
wav = mlsa_fil.synthesis(x, b)
return wav
def synthesis_spc(self, f0, spc, ap):
"""synthesis generates waveform from F0, mcep, ap
Parameters
----------
f0 : array, shape (`T`, `1`)
array of F0 sequence
spc : array, shape (`T`, `fftl // 2 + 1`)
array of mel-cepstrum sequence
ap : array, shape (`T`, `fftl // 2 + 1`)
array of aperiodicity
Return
------
wav: vector, shape (`samples`)
Synethesized waveform
"""
# generate waveform using world vocoder with f0, spc, ap
wav = pyworld.synthesize(f0, spc, ap,
self.fs, frame_period=self.shiftms)
return wav
def mod_power(cvmcep, rmcep, alpha=0.42, irlen=1024):
"""Power modification based on inpulse responce
Parameters
----------
cvmcep : array, shape (`T`, `dim`)
array of converted mel-cepstrum
rmcep : array, shape (`T`, `dim`)
array of reference mel-cepstrum
alpha : float, optional
All-path filter transfer function
Default set to 0.42
irlen : int, optional
Length for IIR filter
Default set to 1024
Return
------
modified_cvmcep : array, shape (`T`, `dim`)
array of power modified converted mel-cepstrum
"""
if rmcep.shape != cvmcep.shape:
raise ValueError("The shapes of the converted and \
reference mel-cepstrum are different: \
{} / {}".format(cvmcep.shape, rmcep.shape))
cv_e = pysptk.mc2e(cvmcep, alpha=alpha, irlen=irlen)
r_e = pysptk.mc2e(rmcep, alpha=alpha, irlen=irlen)
dpow = np.log(r_e / cv_e) / 2
modified_cvmcep = np.copy(cvmcep)
modified_cvmcep[:, 0] += dpow
return modified_cvmcep
|
pyabc/sampler/redis_eps/work.py | ICB-DCM/pyABC | 144 | 12699943 | """Function to work on a population in dynamic mode."""
import sys
from redis import StrictRedis
import cloudpickle as pickle
from time import sleep, time
import logging
from ..util import any_particle_preliminary
from .cmd import (
N_EVAL, N_ACC, N_REQ, N_FAIL, ALL_ACCEPTED, N_WORKER, N_LOOKAHEAD_EVAL,
SSA, QUEUE, BATCH_SIZE, IS_LOOK_AHEAD, ANALYSIS_ID, MAX_N_EVAL_LOOK_AHEAD,
SLEEP_TIME, DONE_IXS, idfy)
from .cli import KillHandler
logger = logging.getLogger("ABC.Sampler")
def work_on_population_dynamic(
analysis_id: str,
t: int,
redis: StrictRedis,
catch: bool,
start_time: float,
max_runtime_s: float,
kill_handler: KillHandler):
"""Work on population in dynamic mode.
Here the actual sampling happens.
"""
# short-form
ana_id = analysis_id
def get_int(var: str):
"""Convenience function to read an int variable."""
return int(redis.get(idfy(var, ana_id, t)).decode())
# set timers
population_start_time = time()
cumulative_simulation_time = 0
# read from pipeline
pipeline = redis.pipeline()
# extract bytes
(ssa_b, batch_size_b, all_accepted_b, is_look_ahead_b,
max_eval_look_ahead_b) = (
pipeline.get(idfy(SSA, ana_id, t))
.get(idfy(BATCH_SIZE, ana_id, t))
.get(idfy(ALL_ACCEPTED, ana_id, t))
.get(idfy(IS_LOOK_AHEAD, ana_id, t))
.get(idfy(MAX_N_EVAL_LOOK_AHEAD, ana_id, t)).execute())
# if the ssa object does not exist, something went wrong, return
if ssa_b is None:
return
# notify sign up as worker
n_worker = redis.incr(idfy(N_WORKER, ana_id, t))
logger.info(
f"Begin generation {t}, I am worker {n_worker}")
# only allow stopping the worker at particular points
kill_handler.exit = False
# convert from bytes
simulate_one, sample_factory = pickle.loads(ssa_b)
batch_size = int(batch_size_b.decode())
all_accepted = bool(int(all_accepted_b.decode()))
is_look_ahead = bool(int(is_look_ahead_b.decode()))
max_n_eval_look_ahead = float(max_eval_look_ahead_b.decode())
# counter for number of simulations
internal_counter = 0
# create empty sample
sample = sample_factory(is_look_ahead=is_look_ahead)
# loop until no more particles required
# all numbers are re-loaded in each iteration as they can dynamically
# update
while get_int(N_ACC) < get_int(N_REQ) and (
not all_accepted or
get_int(N_EVAL) - get_int(N_FAIL) < get_int(N_REQ)):
# check whether the process was externally asked to stop
if kill_handler.killed:
logger.info(
f"Worker {n_worker} received stop signal. "
"Terminating in the middle of a population "
f"after {internal_counter} samples.")
# notify quit
redis.decr(idfy(N_WORKER, ana_id, t))
sys.exit(0)
# check whether time's up
current_runtime = time() - start_time
if current_runtime > max_runtime_s:
logger.info(
f"Worker {n_worker} stops during population because "
f"runtime {current_runtime} exceeds "
f"max runtime {max_runtime_s}")
# notify quit
redis.decr(idfy(N_WORKER, ana_id, t))
# return to task queue
return
# check whether the analysis was terminated or replaced by a new one
ana_id_new_b = redis.get(ANALYSIS_ID)
if ana_id_new_b is None or str(ana_id_new_b.decode()) != ana_id:
logger.info(
f"Worker {n_worker} stops during population because "
"the analysis seems to have been stopped.")
# notify quit
redis.decr(idfy(N_WORKER, ana_id, t))
# return to task queue
return
# check if the analysis left the look-ahead mode
if is_look_ahead and not bool(int(
redis.get(idfy(IS_LOOK_AHEAD, ana_id, t)).decode())):
# reload SSA object
ssa_b = redis.get(idfy(SSA, ana_id, t))
simulate_one, sample_factory = pickle.loads(ssa_b)
# cache
is_look_ahead = False
# create new empty sample for clean split
sample = sample_factory(is_look_ahead=is_look_ahead)
# check if in look-ahead mode and should sleep
if is_look_ahead and get_int(N_EVAL) >= max_n_eval_look_ahead:
# sleep ... seconds
sleep(SLEEP_TIME)
continue
# increase global evaluation counter (before simulation!)
particle_max_id: int = redis.incr(
idfy(N_EVAL, ana_id, t), batch_size)
if is_look_ahead:
# increment look-ahead evaluation counter
redis.incr(idfy(N_LOOKAHEAD_EVAL, ana_id, t), batch_size)
# timer for current simulation until batch_size acceptances
this_sim_start = time()
# collect accepted particles
accepted_samples = []
# whether any particle in this iteration is preliminary
any_prel = False
# make batch_size attempts
for n_batched in range(batch_size):
# increase evaluation counter
internal_counter += 1
try:
# simulate
new_sim = simulate_one()
except Exception as e:
logger.warning(f"Redis worker number {n_worker} failed. "
f"Error message is: {e}")
# increment the failure counter
redis.incr(idfy(N_FAIL, ana_id, t), 1)
if not catch:
raise e
continue
# append to current sample
sample.append(new_sim)
# check for acceptance
if new_sim.accepted:
# The order of the IDs is reversed, but this does not
# matter. Important is only that the IDs are specified
# before the simulation starts
# append to accepted list
accepted_samples.append(
pickle.dumps((particle_max_id - n_batched, sample)))
any_prel = any_prel or any_particle_preliminary(sample)
# initialize new sample
sample = sample_factory(is_look_ahead=is_look_ahead)
# update total simulation-specific time
cumulative_simulation_time += time() - this_sim_start
# new pipeline
pipeline = redis.pipeline()
# push to pipeline if at least one sample got accepted
if len(accepted_samples) > 0:
# update particles counter if nothing is preliminary,
# otherwise final acceptance is done by the sampler
if not any_prel:
pipeline.incr(idfy(N_ACC, ana_id, t), len(accepted_samples))
# note: samples are appended 1-by-1
pipeline.rpush(idfy(QUEUE, ana_id, t), *accepted_samples)
# append to list of done simulations
pipeline.rpush(
idfy(DONE_IXS, ana_id, t),
*range(particle_max_id - batch_size + 1, particle_max_id + 1),
)
# execute all commands
pipeline.execute()
# end of sampling loop
# notify quit
redis.decr(idfy(N_WORKER, ana_id, t))
kill_handler.exit = True
population_total_time = time() - population_start_time
logger.info(
f"Finished generation {t}, did {internal_counter} samples. "
f"Simulation time: {cumulative_simulation_time:.2f}s, "
f"total time {population_total_time:.2f}.")
|
opytimizer/optimizers/swarm/pso.py | gugarosa/opytimizer | 528 | 12700032 | <reponame>gugarosa/opytimizer
"""Particle Swarm Optimization-based algorithms.
"""
import copy
import numpy as np
import opytimizer.math.random as r
import opytimizer.utils.constant as c
import opytimizer.utils.exception as e
import opytimizer.utils.logging as l
from opytimizer.core import Optimizer
logger = l.get_logger(__name__)
class PSO(Optimizer):
"""A PSO class, inherited from Optimizer.
This is the designed class to define PSO-related
variables and methods.
References:
<NAME>, <NAME> and <NAME>. Swarm intelligence.
Artificial Intelligence (2001).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: Optimizer -> PSO.')
# Overrides its parent class with the receiving params
super(PSO, self).__init__()
# Inertia weight
self.w = 0.7
# Cognitive constant
self.c1 = 1.7
# Social constant
self.c2 = 1.7
# Builds the class
self.build(params)
logger.info('Class overrided.')
@property
def w(self):
"""float: Inertia weight.
"""
return self._w
@w.setter
def w(self, w):
if not isinstance(w, (float, int)):
raise e.TypeError('`w` should be a float or integer')
if w < 0:
raise e.ValueError('`w` should be >= 0')
self._w = w
@property
def c1(self):
"""float: Cognitive constant.
"""
return self._c1
@c1.setter
def c1(self, c1):
if not isinstance(c1, (float, int)):
raise e.TypeError('`c1` should be a float or integer')
if c1 < 0:
raise e.ValueError('`c1` should be >= 0')
self._c1 = c1
@property
def c2(self):
"""float: Social constant.
"""
return self._c2
@c2.setter
def c2(self, c2):
if not isinstance(c2, (float, int)):
raise e.TypeError('`c2` should be a float or integer')
if c2 < 0:
raise e.ValueError('`c2` should be >= 0')
self._c2 = c2
@property
def local_position(self):
"""np.array: Array of velocities.
"""
return self._local_position
@local_position.setter
def local_position(self, local_position):
if not isinstance(local_position, np.ndarray):
raise e.TypeError('`local_position` should be a numpy array')
self._local_position = local_position
@property
def velocity(self):
"""np.array: Array of velocities.
"""
return self._velocity
@velocity.setter
def velocity(self, velocity):
if not isinstance(velocity, np.ndarray):
raise e.TypeError('`velocity` should be a numpy array')
self._velocity = velocity
def compile(self, space):
"""Compiles additional information that is used by this optimizer.
Args:
space (Space): A Space object containing meta-information.
"""
# Arrays of local positions and velocities
self.local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions))
self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions))
def evaluate(self, space, function):
"""Evaluates the search space according to the objective function.
Args:
space (Space): A Space object that will be evaluated.
function (Function): A Function object that will be used as the objective function.
"""
# Iterates through all agents
for i, agent in enumerate(space.agents):
# Calculates the fitness value of current agent
fit = function(agent.position)
# If fitness is better than agent's best fit
if fit < agent.fit:
# Updates its current fitness to the newer one
agent.fit = fit
# Also updates the local best position to current's agent position
self.local_position[i] = copy.deepcopy(agent.position)
# If agent's fitness is better than global fitness
if agent.fit < space.best_agent.fit:
# Makes a deep copy of agent's local best position and fitness to the best agent
space.best_agent.position = copy.deepcopy(self.local_position[i])
space.best_agent.fit = copy.deepcopy(agent.fit)
def update(self, space):
"""Wraps Particle Swarm Optimization over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
"""
# Iterates through all agents
for i, agent in enumerate(space.agents):
# Generates random numbers
r1 = r.generate_uniform_random_number()
r2 = r.generate_uniform_random_number()
# Updates agent's velocity (p. 294)
self.velocity[i] = self.w * self.velocity[i] + \
self.c1 * r1 * (self.local_position[i] - agent.position) + \
self.c2 * r2 * (space.best_agent.position - agent.position)
# Updates agent's position (p. 294)
agent.position += self.velocity[i]
class AIWPSO(PSO):
"""An AIWPSO class, inherited from PSO.
This is the designed class to define AIWPSO-related
variables and methods.
References:
<NAME>, <NAME> and <NAME>.
A novel particle swarm optimization algorithm with adaptive inertia weight.
Applied Soft Computing (2011).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: PSO -> AIWPSO.')
# Minimum inertia weight
self.w_min = 0.1
# Maximum inertia weight
self.w_max = 0.9
# Overrides its parent class with the receiving params
super(AIWPSO, self).__init__(params)
logger.info('Class overrided.')
@property
def w_min(self):
"""float: Minimum inertia weight.
"""
return self._w_min
@w_min.setter
def w_min(self, w_min):
if not isinstance(w_min, (float, int)):
raise e.TypeError('`w_min` should be a float or integer')
if w_min < 0:
raise e.ValueError('`w_min` should be >= 0')
self._w_min = w_min
@property
def w_max(self):
"""float: Maximum inertia weight.
"""
return self._w_max
@w_max.setter
def w_max(self, w_max):
if not isinstance(w_max, (float, int)):
raise e.TypeError('`w_max` should be a float or integer')
if w_max < 0:
raise e.ValueError('`w_max` should be >= 0')
if w_max < self.w_min:
raise e.ValueError('`w_max` should be >= `w_min`')
self._w_max = w_max
@property
def fitness(self):
"""list: List of fitnesses.
"""
return self._fitness
@fitness.setter
def fitness(self, fitness):
if not isinstance(fitness, list):
raise e.TypeError('`fitness` should be a list')
self._fitness = fitness
def _compute_success(self, agents):
"""Computes the particles' success for updating inertia weight (eq. 16).
Args:
agents (list): List of agents.
"""
# Initial counter
p = 0
# Iterates through every agent
for i, agent in enumerate(agents):
# If current agent fitness is smaller than its best
if agent.fit < self.fitness[i]:
# Increments the counter
p += 1
# Replaces fitness with current agent's fitness
self.fitness[i] = agent.fit
# Update inertia weight value
self.w = (self.w_max - self.w_min) * (p / len(agents)) + self.w_min
def update(self, space, iteration):
"""Wraps Adaptive Inertia Weight Particle Swarm Optimization over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
iteration (int): Current iteration.
"""
# Checks if it is the first iteration
if iteration == 0:
# Creates a list of initial fitnesses
self.fitness = [agent.fit for agent in space.agents]
# Iterates through all agents
for i, agent in enumerate(space.agents):
# Generates random numbers
r1 = r.generate_uniform_random_number()
r2 = r.generate_uniform_random_number()
# Updates agent's velocity
self.velocity[i] = self.w * self.velocity[i] + \
self.c1 * r1 * (self.local_position[i] - agent.position) + \
self.c2 * r2 * (space.best_agent.position - agent.position)
# Updates agent's position
agent.position += self.velocity[i]
# Computing particle's success and updating inertia weight
self._compute_success(space.agents)
class RPSO(PSO):
"""An RPSO class, inherited from Optimizer.
This is the designed class to define RPSO-related
variables and methods.
References:
<NAME>, <NAME>, <NAME>, <NAME> and <NAME>.
Harnessing Particle Swarm Optimization Through Relativistic Velocity.
IEEE Congress on Evolutionary Computation (2020).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: PSO -> RPSO.')
# Overrides its parent class with the receiving params
super(RPSO, self).__init__(params)
logger.info('Class overrided.')
@property
def mass(self):
"""np.array: Array of masses.
"""
return self._mass
@mass.setter
def mass(self, mass):
if not isinstance(mass, np.ndarray):
raise e.TypeError('`mass` should be a numpy array')
self._mass = mass
def compile(self, space):
"""Compiles additional information that is used by this optimizer.
Args:
space (Space): A Space object containing meta-information.
"""
# Arrays of local positions, velocities and masses
self.local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions))
self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions))
self.mass = r.generate_uniform_random_number(size=(space.n_agents, space.n_variables, space.n_dimensions))
def update(self, space):
"""Wraps Relativistic Particle Swarm Optimization over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
"""
# Calculates the maximum velocity
max_velocity = np.max(self.velocity)
# Iterates through all agents
for i, agent in enumerate(space.agents):
# Generates rnadom number
r1 = r.generate_uniform_random_number()
r2 = r.generate_uniform_random_number()
# Updates current agent velocity (eq. 11)
gamma = 1 / np.sqrt(1 - (max_velocity ** 2 / c.LIGHT_SPEED ** 2))
self.velocity[i] = self.mass[i] * self.velocity[i] * gamma + \
self.c1 * r1 * (self.local_position[i] - agent.position) + \
self.c2 * r2 * (space.best_agent.position - agent.position)
# Updates current agent position
agent.position += self.velocity[i]
class SAVPSO(PSO):
"""An SAVPSO class, inherited from Optimizer.
This is the designed class to define SAVPSO-related
variables and methods.
References:
<NAME> and <NAME>.
Self-adaptive velocity particle swarm optimization for solving constrained optimization problems.
Journal of global optimization (2008).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: PSO -> SAVPSO.')
# Overrides its parent class with the receiving params
super(SAVPSO, self).__init__(params)
logger.info('Class overrided.')
def update(self, space):
"""Wraps Self-adaptive Velocity Particle Swarm Optimization over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
"""
# Creates an array of positions
positions = np.zeros((space.agents[0].position.shape[0], space.agents[0].position.shape[1]))
# For every agent
for agent in space.agents:
# Sums up its position
positions += agent.position
# Divides by the number of agents
positions /= len(space.agents)
# Iterates through all agents
for i, agent in enumerate(space.agents):
# Generates a random index for selecting an agent
idx = r.generate_integer_random_number(0, len(space.agents))
# Updates current agent's velocity (eq. 8)
r1 = r.generate_uniform_random_number()
self.velocity[i] = self.w * np.fabs(self.local_position[idx] - self.local_position[i]) * \
np.sign(self.velocity[i]) + r1 * (self.local_position[i] - agent.position) + \
(1 - r1) * (space.best_agent.position - agent.position)
# Updates current agent's position
agent.position += self.velocity[i]
# For every decision variable
for j in range(agent.n_variables):
# Generates a random number
r4 = r.generate_uniform_random_number(0, 1)
# If position is greater than upper bound
if agent.position[j] > agent.ub[j]:
# Replaces its value
agent.position[j] = positions[j] + 1 * r4 * (agent.ub[j] - positions[j])
# If position is smaller than lower bound
if agent.position[j] < agent.lb[j]:
# Replaces its value
agent.position[j] = positions[j] + 1 * r4 * (agent.lb[j] - positions[j])
class VPSO(PSO):
"""A VPSO class, inherited from Optimizer.
This is the designed class to define VPSO-related
variables and methods.
References:
<NAME>. Vertical particle swarm optimization algorithm and its application in soft-sensor modeling.
International Conference on Machine Learning and Cybernetics (2007).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: PSO -> VPSO.')
# Overrides its parent class with the receiving params
super(VPSO, self).__init__(params)
logger.info('Class overrided.')
@property
def v_velocity(self):
"""np.array: Array of vertical velocities.
"""
return self._v_velocity
@v_velocity.setter
def v_velocity(self, v_velocity):
if not isinstance(v_velocity, np.ndarray):
raise e.TypeError('`v_velocity` should be a numpy array')
self._v_velocity = v_velocity
def compile(self, space):
"""Compiles additional information that is used by this optimizer.
Args:
space (Space): A Space object containing meta-information.
"""
# Arrays of local positions, velocities and vertical velocities
self.local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions))
self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions))
self.v_velocity = np.ones((space.n_agents, space.n_variables, space.n_dimensions))
def update(self, space):
"""Wraps Vertical Particle Swarm Optimization over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
"""
# Iterates through all agents
for i, agent in enumerate(space.agents):
# Generates uniform random numbers
r1 = r.generate_uniform_random_number()
r2 = r.generate_uniform_random_number()
# Updates current agent velocity (eq. 3)
self.velocity[i] = self.w * self.velocity[i] + self.c1 * r1 * (self.local_position[i] - agent.position) + \
self.c2 * r2 * (space.best_agent.position - agent.position)
# Updates current agent vertical velocity (eq. 4)
self.v_velocity[i] -= (np.dot(self.velocity[i].T, self.v_velocity[i]) /
(np.dot(self.velocity[i].T, self.velocity[i]) + c.EPSILON)) * self.velocity[i]
# Updates current agent position (eq. 5)
r1 = r.generate_uniform_random_number()
agent.position += r1 * self.velocity[i] + (1 - r1) * self.v_velocity[i]
|
effdet/data/dataset.py | AgentVi/efficientdet-pytorch | 1,386 | 12700034 | <gh_stars>1000+
""" Detection dataset
Hacked together by <NAME>
"""
import torch.utils.data as data
import numpy as np
from PIL import Image
from .parsers import create_parser
class DetectionDatset(data.Dataset):
"""`Object Detection Dataset. Use with parsers for COCO, VOC, and OpenImages.
Args:
parser (string, Parser):
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
"""
def __init__(self, data_dir, parser=None, parser_kwargs=None, transform=None):
super(DetectionDatset, self).__init__()
parser_kwargs = parser_kwargs or {}
self.data_dir = data_dir
if isinstance(parser, str):
self._parser = create_parser(parser, **parser_kwargs)
else:
assert parser is not None and len(parser.img_ids)
self._parser = parser
self._transform = transform
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, annotations (target)).
"""
img_info = self._parser.img_infos[index]
target = dict(img_idx=index, img_size=(img_info['width'], img_info['height']))
if self._parser.has_labels:
ann = self._parser.get_ann_info(index)
target.update(ann)
img_path = self.data_dir / img_info['file_name']
img = Image.open(img_path).convert('RGB')
if self.transform is not None:
img, target = self.transform(img, target)
return img, target
def __len__(self):
return len(self._parser.img_ids)
@property
def parser(self):
return self._parser
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, t):
self._transform = t
class SkipSubset(data.Dataset):
r"""
Subset of a dataset at specified indices.
Arguments:
dataset (Dataset): The whole Dataset
n (int): skip rate (select every nth)
"""
def __init__(self, dataset, n=2):
self.dataset = dataset
assert n >= 1
self.indices = np.arange(len(dataset))[::n]
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
@property
def parser(self):
return self.dataset.parser
@property
def transform(self):
return self.dataset.transform
@transform.setter
def transform(self, t):
self.dataset.transform = t
|
zentral/contrib/santa/migrations/0022_auto_20210121_1745.py | gwhitehawk/zentral | 634 | 12700061 | # Generated by Django 2.2.17 on 2021-01-21 17:45
from django.db import migrations
POLICY_DICT = {
"BLACKLIST": 2,
"WHITELIST": 1,
"SILENT_BLACKLIST": 3,
}
def convert_santa_probes(apps, schema_editor):
ProbeSource = apps.get_model("probes", "ProbeSource")
Tag = apps.get_model("inventory", "Tag")
Configuration = apps.get_model("santa", "Configuration")
Rule = apps.get_model("santa", "Rule")
Target = apps.get_model("santa", "Target")
configurations = list(Configuration.objects.all())
if not configurations:
return
for ps in ProbeSource.objects.filter(model="SantaProbe", status="ACTIVE"):
body = ps.body
if not body:
continue
rules = body.get("rules", [])
if not rules:
continue
tag_ids = set([])
for inv_filter in body.get("filters", {}).get("inventory", []):
for tag_id in inv_filter.get("tag_ids", []):
tag_ids.add(tag_id)
tags = []
if tag_ids:
tags = list(Tag.objects.filter(pk__in=tag_ids))
for rule in rules:
policy = rule.get("policy")
if policy == "REMOVE":
continue
defaults = {}
try:
defaults["policy"] = POLICY_DICT[policy]
except KeyError:
continue
custom_msg = rule.get("custom_msg")
if custom_msg:
defaults["custom_msg"] = custom_msg
else:
defaults["custom_msg"] = ""
target_type = rule.get("rule_type")
if target_type not in ("CERTIFICATE", "BINARY"):
continue
sha256 = rule.get("sha256")
if not sha256:
continue
target, _ = Target.objects.get_or_create(type=target_type, sha256=sha256)
for configuration in configurations:
r, _ = Rule.objects.update_or_create(configuration=configuration,
target=target,
defaults=defaults)
for tag in tags:
r.tags.add(tag)
ProbeSource.objects.filter(model="SantaProbe").delete()
class Migration(migrations.Migration):
dependencies = [
('santa', '0021_delete_collectedapplication'),
]
operations = [
migrations.RunPython(convert_santa_probes),
]
|
third_party/catapult/experimental/qbot/qbot/api.py | zipated/src | 2,151 | 12700083 | <reponame>zipated/src
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import httplib2
import json
from oauth2client import service_account # pylint: disable=no-name-in-module
import os
import sys
DEFAULT_SCOPES = ['https://www.googleapis.com/auth/userinfo.email']
MILO_ENDPOINT = 'https://luci-milo.appspot.com/prpc/milo.Buildbot/'
LOGDOG_ENDPOINT = 'https://luci-logdog.appspot.com/prpc/logdog.Logs/'
class _MiloLogdogConfig(object):
"""Config class used to hold credentials shared by all API resquests."""
credentials = None
class RequestError(Exception):
pass
def IsOkStatus(status):
return 200 <= int(status) <= 299
def SetCredentials(json_keyfile, scopes=None):
"""Configure the credentials used to access the milo/logdog API."""
filepath = os.path.expanduser(json_keyfile)
if not os.path.isfile(filepath):
sys.stderr.write('Credentials not found: %s\n' % json_keyfile)
sys.stderr.write('You need a json keyfile for a service account with '
'milo/logdog access.\n')
sys.exit(1)
if scopes is None:
scopes = DEFAULT_SCOPES
_MiloLogdogConfig.credentials = (
service_account.ServiceAccountCredentials.from_json_keyfile_name(
filepath, scopes))
def _Request(url, params):
if _MiloLogdogConfig.credentials is None:
# Try to use some default credentials if they haven't been explicitly set.
SetCredentials('~/.default_service_credentials.json')
http = _MiloLogdogConfig.credentials.authorize(httplib2.Http())
body = json.dumps(params).encode('utf-8')
response, content = http.request(url, 'POST', body, headers={
'Accept': 'application/json', 'Content-Type': 'application/json'})
if not IsOkStatus(response['status']):
raise RequestError('Server returned %s response' % response['status'])
# Need to skip over the 4 characters jsonp header.
return json.loads(content[4:].decode('utf-8'))
def MiloRequest(method, params):
return _Request(MILO_ENDPOINT + method, params)
def LogdogRequest(method, params):
return _Request(LOGDOG_ENDPOINT + method, params)
|
lib/python/treadmill/cli/run.py | krcooke/treadmill | 133 | 12700085 | """Manage Treadmill app manifest.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import shlex
import click
import six
from six.moves import urllib_parse
from treadmill import cli
from treadmill import context
from treadmill import restclient
from treadmill import yamlwrapper as yaml
_LOGGER = logging.getLogger(__name__)
_DEFAULT_MEM = '100M'
_DEFAULT_DISK = '100M'
_DEFAULT_CPU = '10%'
def _run(apis,
count,
manifest,
memory,
cpu,
disk,
tickets,
traits,
service,
restart_limit,
restart_interval,
endpoint,
debug,
debug_services,
appname,
command):
"""Run Treadmill app."""
# too many branches
#
# pylint: disable=R0912
app = {}
if manifest:
app = yaml.load(stream=manifest)
if endpoint:
app['endpoints'] = [{'name': name, 'port': port}
for name, port in endpoint]
if tickets:
app['tickets'] = tickets
if traits:
app['traits'] = traits
if command:
if not service:
# Take the basename of the command, always assume / on all
# platforms.
service = os.path.basename(shlex.split(command[0])[0])
services_dict = {svc['name']: svc for svc in app.get('services', [])}
if service:
if service not in services_dict:
services_dict[service] = {
'name': service,
'restart': {
'limit': restart_limit,
'interval': restart_interval,
}
}
if command:
services_dict[service]['command'] = ' '.join(list(command))
if services_dict:
app['services'] = list(six.itervalues(services_dict))
if app:
# Ensure defaults are set.
if 'memory' not in app:
app['memory'] = _DEFAULT_MEM
if 'disk' not in app:
app['disk'] = _DEFAULT_DISK
if 'cpu' not in app:
app['cpu'] = _DEFAULT_CPU
# Override if requested.
if memory is not None:
app['memory'] = str(memory)
if disk is not None:
app['disk'] = str(disk)
if cpu is not None:
app['cpu'] = str(cpu)
url = '/instance/' + appname
query = {}
if count:
query['count'] = count
if debug:
query['debug'] = 'true'
if debug_services:
query['debug_services'] = ','.join(debug_services)
if query:
url = '{}?{}'.format(
url, urllib_parse.urlencode(query)
)
response = restclient.post(apis, url, payload=app)
for instance_id in response.json()['instances']:
cli.out(instance_id)
def init():
"""Return top level command handler."""
@click.command()
@click.option('--api-service-principal', required=False,
envvar='TREADMILL_API_SERVICE_PRINCIPAL',
callback=cli.handle_context_opt,
help='API service principal for SPNEGO auth (default HTTP)',
expose_value=False)
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False)
@click.option('--count', help='Number of instances to start',
default=1)
@click.option('-m', '--manifest', help='App manifest file (stream)',
type=click.File('rb'))
@click.option('--memory', help='Memory demand, default %s.' % _DEFAULT_MEM,
metavar='G|M',
callback=cli.validate_memory)
@click.option('--cpu', help='CPU demand, default %s.' % _DEFAULT_CPU,
metavar='XX%',
callback=cli.validate_cpu)
@click.option('--disk', help='Disk demand, default %s.' % _DEFAULT_DISK,
metavar='G|M',
callback=cli.validate_disk)
@click.option('--tickets', help='Tickets.',
type=cli.LIST)
@click.option('--traits', help='Traits.',
type=cli.LIST)
@click.option('--service', help='Service name.', type=str)
@click.option('--restart-limit', type=int, default=0,
help='Service restart limit.')
@click.option('--restart-interval', type=int, default=60,
help='Service restart limit interval.')
@click.option('--endpoint', help='Network endpoint.',
type=(str, int), multiple=True)
@click.option('--debug/--no-debug', help='Do not start services.',
is_flag=True, default=False)
@click.option('--debug-services', help='Do not start specified services.',
type=cli.LIST)
@click.argument('appname')
@click.argument('command', nargs=-1)
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
def run(count,
manifest,
memory,
cpu,
disk,
tickets,
traits,
service,
restart_limit,
restart_interval,
endpoint,
debug,
debug_services,
appname,
command):
"""Schedule Treadmill app.
With no options, will schedule already configured app, fail if app
is not configured.
When manifest (or other options) are specified, they will be merged
on top of existing manifest if it exists.
"""
apis = context.GLOBAL.cell_api()
return _run(
apis, count, manifest, memory, cpu, disk, tickets, traits,
service, restart_limit, restart_interval, endpoint,
debug, debug_services, appname, command)
return run
|
pyexcel/internal/utils.py | quis/pyexcel | 1,045 | 12700105 | def default_getter(attribute=None):
"""a default method for missing renderer method
for example, the support to write data in a specific file type
is missing but the support to read data exists
"""
def none_presenter(_, **__):
"""docstring is assigned a few lines down the line"""
raise NotImplementedError("%s getter is not defined." % attribute)
none_presenter.__doc__ = "%s getter is not defined." % attribute
return none_presenter
def default_setter(attribute=None):
"""a default method for missing parser method
for example, the support to read data in a specific file type
is missing but the support to write data exists
"""
def none_importer(_x, _y, **_z):
"""docstring is assigned a few lines down the line"""
raise NotImplementedError("%s setter is not defined." % attribute)
none_importer.__doc__ = "%s setter is not defined." % attribute
return none_importer
def make_a_property(
cls,
attribute,
doc_string,
getter_func=default_getter,
setter_func=default_setter,
):
"""
create custom attributes for each class
"""
getter = getter_func(attribute)
setter = setter_func(attribute)
attribute_property = property(
# note:
# without fget, fset, pypy 5.4.0 crashes randomly.
fget=getter,
fset=setter,
doc=doc_string,
)
if "." in attribute:
attribute = attribute.replace(".", "_")
else:
attribute = attribute
setattr(cls, attribute, attribute_property)
setattr(cls, "get_%s" % attribute, getter)
setattr(cls, "set_%s" % attribute, setter)
|
aliyun-python-sdk-moguan-sdk/aliyunsdkmoguan_sdk/request/v20210415/RegisterDeviceRequest.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 12700121 | <reponame>yndu13/aliyun-openapi-python-sdk
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmoguan_sdk.endpoint import endpoint_data
class RegisterDeviceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'moguan-sdk', '2021-04-15', 'RegisterDevice')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_UserDeviceId(self):
return self.get_body_params().get('UserDeviceId')
def set_UserDeviceId(self,UserDeviceId):
self.add_body_params('UserDeviceId', UserDeviceId)
def get_Extend(self):
return self.get_body_params().get('Extend')
def set_Extend(self,Extend):
self.add_body_params('Extend', Extend)
def get_SdkCode(self):
return self.get_body_params().get('SdkCode')
def set_SdkCode(self,SdkCode):
self.add_body_params('SdkCode', SdkCode)
def get_AppKey(self):
return self.get_body_params().get('AppKey')
def set_AppKey(self,AppKey):
self.add_body_params('AppKey', AppKey)
def get_DeviceId(self):
return self.get_body_params().get('DeviceId')
def set_DeviceId(self,DeviceId):
self.add_body_params('DeviceId', DeviceId) |
dialogue-engine/test/programytest/rdf/test_creation.py | cotobadesign/cotoba-agent-oss | 104 | 12700160 | <gh_stars>100-1000
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
import os
from programy.storage.stores.file.config import FileStorageConfiguration
from programy.storage.stores.file.engine import FileStorageEngine
from programy.storage.stores.file.config import FileStoreConfiguration
from programy.storage.factory import StorageFactory
from programy.rdf.collection import RDFCollection
class RDFCollectionCreationTests(unittest.TestCase):
def test_add_collection(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
collection.add_entity("ACCOUNT", "hasSize", "0", "BANIKING")
self.assertTrue(collection.has_subject('ACCOUNT'))
self.assertTrue(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertTrue(collection.has_object('ACCOUNT', 'hasSize', "0"))
def test_add_multi_object_collection(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
collection.add_entity("ACTOR", "ISA", "PERSON", "TEST")
collection.add_entity("ACTOR", "ISA", "MAN", "TEST")
self.assertTrue(collection.has_subject('ACTOR'))
self.assertTrue(collection.has_predicate('ACTOR', 'ISA'))
self.assertTrue(collection.has_object('ACTOR', 'ISA', "PERSON"))
self.assertTrue(collection.has_object('ACTOR', 'ISA', "MAN"))
def test_delete_collection_subject(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
collection.add_entity("ACCOUNT", "hasSize", "0", "BANIKING")
self.assertTrue(collection.has_subject('ACCOUNT'))
self.assertTrue(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertTrue(collection.has_object('ACCOUNT', 'hasSize', "0"))
collection.delete_entity("ACCOUNT")
self.assertFalse(collection.has_subject('ACCOUNT'))
def test_delete_collection_subject_predicate(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
collection.add_entity("ACCOUNT", "hasSize", "0", "BANIKING")
self.assertTrue(collection.has_subject('ACCOUNT'))
self.assertTrue(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertTrue(collection.has_object('ACCOUNT', 'hasSize', "0"))
collection.delete_entity("ACCOUNT", "hasSize")
self.assertFalse(collection.has_subject('ACCOUNT'))
def test_delete_collection_subject_predicate_object(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
collection.add_entity("ACCOUNT", "hasSize", "0", "BANIKING")
self.assertTrue(collection.has_subject('ACCOUNT'))
self.assertTrue(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertTrue(collection.has_object('ACCOUNT', 'hasSize', "0"))
collection.delete_entity("ACCOUNT", "hasSize", "0")
self.assertFalse(collection.has_subject('ACCOUNT'))
self.assertFalse(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertFalse(collection.has_object('ACCOUNT', 'hasSize', "0"))
def test_delete_collection_subject_predicate_diff_object(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
collection.add_entity("ACCOUNT", "hasSize", "0", "BANKING", "BANIKING")
self.assertTrue(collection.has_subject('ACCOUNT'))
self.assertTrue(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertTrue(collection.has_object('ACCOUNT', 'hasSize', "0"))
collection.delete_entity("ACCOUNT", "hasSize", "1")
self.assertTrue(collection.has_subject('ACCOUNT'))
self.assertTrue(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertTrue(collection.has_object('ACCOUNT', 'hasSize', "0"))
def test_delete_collection_diff_subject(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
collection.add_entity("ACCOUNT", "hasSize", "0", "BANKING", "BANIKING")
self.assertTrue(collection.has_subject('ACCOUNT'))
self.assertTrue(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertTrue(collection.has_object('ACCOUNT', 'hasSize', "0"))
collection.delete_entity("ACCOUNT1", "hasSize", "0")
self.assertTrue(collection.has_subject('ACCOUNT'))
self.assertTrue(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertTrue(collection.has_object('ACCOUNT', 'hasSize', "0"))
def test_delete_collection_diff_predicate(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
collection.add_entity("ACCOUNT", "hasSize", "0", "BANKING", "BANIKING")
self.assertTrue(collection.has_subject('ACCOUNT'))
self.assertTrue(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertTrue(collection.has_object('ACCOUNT', 'hasSize', "0"))
collection.delete_entity("ACCOUNT", "hasSize1", "1")
self.assertTrue(collection.has_subject('ACCOUNT'))
self.assertTrue(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertTrue(collection.has_object('ACCOUNT', 'hasSize', "0"))
def test_delete_collection_diff_predicate_none_obj(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
collection.add_entity("ACCOUNT", "hasSize", "0", "BANKING", "BANIKING")
self.assertTrue(collection.has_subject('ACCOUNT'))
self.assertTrue(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertTrue(collection.has_object('ACCOUNT', 'hasSize', "0"))
collection.delete_entity("ACCOUNT", "hasSize1", None)
self.assertTrue(collection.has_subject('ACCOUNT'))
self.assertTrue(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertTrue(collection.has_object('ACCOUNT', 'hasSize', "0"))
def test_collection_update_to_updates_file(self):
config = FileStorageConfiguration()
tmpdir = os.path.dirname(__file__) + os.sep + "rdf_updates"
config.rdf_updates_storage._dirs = [tmpdir]
config.rdf_updates_storage._has_single_file = True
factory = StorageFactory()
storage_engine = FileStorageEngine(config)
factory._storage_engines[StorageFactory.RDF_UPDATES] = storage_engine
factory._store_to_engine_map[StorageFactory.RDF_UPDATES] = storage_engine
updates_engine = factory.entity_storage_engine(StorageFactory.RDF_UPDATES)
updates_store = updates_engine.rdf_updates_store()
updates_store.empty()
collection = RDFCollection()
self.assertIsNotNone(collection)
collection._storage_factory = factory
collection.add_entity("ACCOUNT", "hasSize", "0", "BANKING", "BANIKING")
self.assertTrue(collection.has_subject('ACCOUNT'))
self.assertTrue(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertTrue(collection.has_object('ACCOUNT', 'hasSize', "0"))
collection.delete_entity("ACCOUNT", "hasSize", "0")
self.assertFalse(collection.has_subject('ACCOUNT'))
updates_store.empty()
def test_collection_others(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
collection.add_entity("ACCOUNT", "hasSize", "0", "BANKING", "BANIKING")
self.assertTrue(collection.has_subject('ACCOUNT'))
self.assertTrue(collection.has_predicate('ACCOUNT', 'hasSize'))
self.assertTrue(collection.has_object('ACCOUNT', 'hasSize', "0"))
self.assertIsNone(collection.storename("BANKING1"))
self.assertEqual(0, len(collection.predicates("account1")))
self.assertEqual(0, len(collection.objects("ACCOUNT1", "hasSize")))
self.assertEqual(0, len(collection.objects("ACCOUNT", "hasSize1")))
self.assertFalse(collection.has_object("ACCOUNT", "hasSize", "1"))
|
count.py | kamata1729/shiftresnet-cifar | 132 | 12700183 | <reponame>kamata1729/shiftresnet-cifar<gh_stars>100-1000
from models import ResNet20
from models import ShiftResNet20
from models import ResNet56
from models import ShiftResNet56
from models import ResNet110
from models import ShiftResNet110
import torch
from torch.autograd import Variable
import numpy as np
import argparse
all_models = {
'resnet20': ResNet20,
'shiftresnet20': ShiftResNet20,
'resnet56': ResNet56,
'shiftresnet56': ShiftResNet56,
'resnet110': ResNet110,
'shiftresnet110': ShiftResNet110,
}
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--arch', choices=all_models.keys(),
help='Architecture to count parameters for', default='shiftresnet110')
parser.add_argument('--expansion', type=int, default=1, help='expansion for shift layers')
parser.add_argument('--reduction', type=float, default=1, help='reduction for resnet')
parser.add_argument('--reduction-mode', choices=('block', 'net', 'depthwise', 'shuffle', 'mobile'), help='"block" reduces inner representation for BasicBlock, "net" reduces for all layers', default='net')
args = parser.parse_args()
def count_params(net):
return sum([np.prod(param.size()) for name, param in net.named_parameters()])
def count_flops(net):
"""Approximately count number of FLOPs"""
dummy = Variable(torch.randn(1, 3, 32, 32)).cuda() # size is specific to cifar10, cifar100!
net.cuda().forward(dummy)
return net.flops()
original = all_models[args.arch.replace('shift', '')]()
original_count = count_params(original)
original_flops = count_flops(original)
cls = all_models[args.arch]
assert 'shift' not in args.arch or args.reduction == 1, \
'Only default resnet supports reductions'
if args.reduction != 1:
print('==> %s with reduction %.2f' % (args.arch, args.reduction))
net = cls(reduction=args.reduction, reduction_mode=args.reduction_mode)
else:
net = cls() if 'shift' not in args.arch else cls(expansion=args.expansion)
new_count = count_params(net)
new_flops = count_flops(net)
print('Parameters: (new) %d (original) %d (reduction) %.2f' % (
new_count, original_count, float(original_count) / new_count))
print('FLOPs: (new) %d (original) %d (reduction) %.2f' % (
new_flops, original_flops, float(original_flops) / new_flops))
|
test_models/create.py | nktch1/tfgo | 2,010 | 12700219 | # Copyright (C) 2017-2020 <NAME> <<EMAIL>>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
# Exhibit B is not attached; this software is compatible with the
# licenses expressed under Section 1.12 of the MPL v2.
import sys
import tensorflow as tf
def keras():
"""Define a trivial module for image (28x28x1) classification.
Export it as a SavedModel without even training it.
Rawly serialize an uninitialized Keras Sequential model."""
model = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(
8,
(3, 3),
strides=(2, 2),
padding="valid",
input_shape=(28, 28, 1),
activation=tf.nn.relu,
name="inputs",
), # 14x14x8
tf.keras.layers.Conv2D(
16, (3, 3), strides=(2, 2), padding="valid", activation=tf.nn.relu
), # 7x716
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, name="logits"), # linear
]
)
tf.saved_model.save(model, "output/keras")
def tf_function():
pass
def main():
tf.io.gfile.makedirs("output")
keras()
tf_function()
if __name__ == "__main__":
sys.exit(main())
|
tests/transformers/xslt_test.py | elifesciences/sciencebeam | 272 | 12700224 | <filename>tests/transformers/xslt_test.py
from lxml import etree
from sciencebeam.transformers.xslt import _to_xslt_input
class TestToXsltInput:
def test_should_tolerate_duplicate_ids(self):
result: etree.ElementBase = _to_xslt_input(
'''
<xml>
<item xml:id="id1">item 1</item>
<item xml:id="id1">item 2</item>
</xml>
'''
)
items = result.findall('item')
assert len(items) == 2
assert [item.text for item in items] == ['item 1', 'item 2']
|
tests/basics/tuple_compare.py | peterson79/pycom-micropython-sigfox | 303 | 12700226 | <reponame>peterson79/pycom-micropython-sigfox
print(() == ())
print(() > ())
print(() < ())
print(() == (1,))
print((1,) == ())
print(() > (1,))
print((1,) > ())
print(() < (1,))
print((1,) < ())
print(() >= (1,))
print((1,) >= ())
print(() <= (1,))
print((1,) <= ())
print((1,) == (1,))
print((1,) != (1,))
print((1,) == (2,))
print((1,) == (1, 0,))
print((1,) > (1,))
print((1,) > (2,))
print((2,) > (1,))
print((1, 0,) > (1,))
print((1, -1,) > (1,))
print((1,) > (1, 0,))
print((1,) > (1, -1,))
print((1,) < (1,))
print((2,) < (1,))
print((1,) < (2,))
print((1,) < (1, 0,))
print((1,) < (1, -1,))
print((1, 0,) < (1,))
print((1, -1,) < (1,))
print((1,) >= (1,))
print((1,) >= (2,))
print((2,) >= (1,))
print((1, 0,) >= (1,))
print((1, -1,) >= (1,))
print((1,) >= (1, 0,))
print((1,) >= (1, -1,))
print((1,) <= (1,))
print((2,) <= (1,))
print((1,) <= (2,))
print((1,) <= (1, 0,))
print((1,) <= (1, -1,))
print((1, 0,) <= (1,))
print((1, -1,) <= (1,))
print((10, 0) > (1, 1))
print((10, 0) < (1, 1))
print((0, 0, 10, 0) > (0, 0, 1, 1))
print((0, 0, 10, 0) < (0, 0, 1, 1))
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.