filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_20611 | #!/usr/bin/env python
#pylint: disable=E1103
# E1103: Attach objects to threading
"""
_TwoFileBased_
File based job splitting for two file read workflows. This works the same as
normal file based splitting except that the input files will also have their
parentage information loaded so that the parents can be included in the job.
"""
import logging
import threading
from WMCore.JobSplitting.JobFactory import JobFactory
from WMCore.DAOFactory import DAOFactory
from WMCore.WMBS.File import File
class TwoFileBased(JobFactory):
"""
Two file read workflow splitting
"""
def __init__(self, package='WMCore.DataStructs',
subscription=None,
generators=[],
limit = None):
"""
__init__
Create the DAOs
"""
myThread = threading.currentThread()
JobFactory.__init__(self, package = 'WMCore.WMBS',
subscription = subscription,
generators = generators,
limit = limit)
self.daoFactory = DAOFactory(package = "WMCore.WMBS",
logger = myThread.logger,
dbinterface = myThread.dbi)
self.getParentInfoAction = self.daoFactory(classname = "Files.GetParentAndGrandParentInfo")
return
def algorithm(self, *args, **kwargs):
"""
_algorithm_
Split up all the available files such that each job will process a
maximum of "files_per_job". If the "files_per_job" parameters is not
passed in jobs will process a maximum of 10 files.
"""
filesPerJob = int(kwargs.get("files_per_job", 10))
jobsPerGroup = int(kwargs.get("jobs_per_group", 0))
filesInJob = 0
totalJobs = 0
listOfFiles = []
#Get a dictionary of sites, files
locationDict = self.sortByLocation()
for location in locationDict.keys():
#Now we have all the files in a certain location
fileList = locationDict[location]
filesInJob = 0
jobsInGroup = 0
self.newGroup()
if len(fileList) == 0:
#No files for this location
#This isn't supposed to happen, but better safe then sorry
logging.debug("Have location %s with no files" % (location))
continue
for file in fileList:
parentLFNs = self.findParent(lfn = file['lfn'])
for lfn in parentLFNs:
parent = File(lfn = lfn)
file['parents'].add(parent)
if filesInJob == 0 or filesInJob == filesPerJob:
if jobsPerGroup:
if jobsInGroup > jobsPerGroup:
self.newGroup()
jobsInGroup = 0
self.newJob(name = self.getJobName(length=totalJobs))
filesInJob = 0
totalJobs += 1
jobsInGroup += 1
filesInJob += 1
self.currentJob.addFile(file)
listOfFiles.append(file)
return
def findParent(self, lfn):
"""
_findParent_
Find the parents for a file based on its lfn
"""
parentsInfo = self.getParentInfoAction.execute([lfn])
newParents = set()
for parentInfo in parentsInfo:
# This will catch straight to merge files that do not have redneck
# parents. We will mark the straight to merge file from the job
# as a child of the merged parent.
if int(parentInfo["merged"]) == 1:
newParents.add(parentInfo["lfn"])
elif parentInfo['gpmerged'] == None:
continue
# Handle the files that result from merge jobs that aren't redneck
# children. We have to setup parentage and then check on whether or
# not this file has any redneck children and update their parentage
# information.
elif int(parentInfo["gpmerged"]) == 1:
newParents.add(parentInfo["gplfn"])
# If that didn't work, we've reached the great-grandparents
# And we have to work via recursion
else:
parentSet = self.findParent(lfn = parentInfo['gplfn'])
for parent in parentSet:
newParents.add(parent)
return newParents
|
the-stack_0_20615 | #!/usr/bin/env python3
# System imports
import sys
import unittest
# Import NumPy
import numpy as np
major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
if major == 0: BadListError = TypeError
else: BadListError = ValueError
import Vector
######################################################################
class VectorTestCase(unittest.TestCase):
def __init__(self, methodName="runTest"):
unittest.TestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
# Test the (type IN_ARRAY1[ANY]) typemap
def testLength(self):
"Test length function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertEqual(length([5, 12, 0]), 13)
# Test the (type IN_ARRAY1[ANY]) typemap
def testLengthBadList(self):
"Test length function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertRaises(BadListError, length, [5, "twelve", 0])
# Test the (type IN_ARRAY1[ANY]) typemap
def testLengthWrongSize(self):
"Test length function with wrong size"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertRaises(TypeError, length, [5, 12])
# Test the (type IN_ARRAY1[ANY]) typemap
def testLengthWrongDim(self):
"Test length function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertRaises(TypeError, length, [[1, 2], [3, 4]])
# Test the (type IN_ARRAY1[ANY]) typemap
def testLengthNonContainer(self):
"Test length function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertRaises(TypeError, length, None)
# Test the (type* IN_ARRAY1, int DIM1) typemap
def testProd(self):
"Test prod function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
prod = Vector.__dict__[self.typeStr + "Prod"]
self.assertEqual(prod([1, 2, 3, 4]), 24)
# Test the (type* IN_ARRAY1, int DIM1) typemap
def testProdBadList(self):
"Test prod function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
prod = Vector.__dict__[self.typeStr + "Prod"]
self.assertRaises(BadListError, prod, [[1, "two"], ["e", "pi"]])
# Test the (type* IN_ARRAY1, int DIM1) typemap
def testProdWrongDim(self):
"Test prod function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
prod = Vector.__dict__[self.typeStr + "Prod"]
self.assertRaises(TypeError, prod, [[1, 2], [8, 9]])
# Test the (type* IN_ARRAY1, int DIM1) typemap
def testProdNonContainer(self):
"Test prod function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
prod = Vector.__dict__[self.typeStr + "Prod"]
self.assertRaises(TypeError, prod, None)
# Test the (int DIM1, type* IN_ARRAY1) typemap
def testSum(self):
"Test sum function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
sum = Vector.__dict__[self.typeStr + "Sum"]
self.assertEqual(sum([5, 6, 7, 8]), 26)
# Test the (int DIM1, type* IN_ARRAY1) typemap
def testSumBadList(self):
"Test sum function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
sum = Vector.__dict__[self.typeStr + "Sum"]
self.assertRaises(BadListError, sum, [3, 4, 5, "pi"])
# Test the (int DIM1, type* IN_ARRAY1) typemap
def testSumWrongDim(self):
"Test sum function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
sum = Vector.__dict__[self.typeStr + "Sum"]
self.assertRaises(TypeError, sum, [[3, 4], [5, 6]])
# Test the (int DIM1, type* IN_ARRAY1) typemap
def testSumNonContainer(self):
"Test sum function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
sum = Vector.__dict__[self.typeStr + "Sum"]
self.assertRaises(TypeError, sum, True)
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverse(self):
"Test reverse function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
vector = np.array([1, 2, 4], self.typeCode)
reverse(vector)
self.assertEqual((vector == [4, 2, 1]).all(), True)
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverseWrongDim(self):
"Test reverse function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
vector = np.array([[1, 2], [3, 4]], self.typeCode)
self.assertRaises(TypeError, reverse, vector)
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverseWrongSize(self):
"Test reverse function with wrong size"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
vector = np.array([9, 8, 7, 6, 5, 4], self.typeCode)
self.assertRaises(TypeError, reverse, vector)
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverseWrongType(self):
"Test reverse function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
vector = np.array([1, 2, 4], 'c')
self.assertRaises(TypeError, reverse, vector)
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverseNonArray(self):
"Test reverse function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
self.assertRaises(TypeError, reverse, [2, 4, 6])
# Test the (type* INPLACE_ARRAY1, int DIM1) typemap
def testOnes(self):
"Test ones function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ones = Vector.__dict__[self.typeStr + "Ones"]
vector = np.zeros(5, self.typeCode)
ones(vector)
np.testing.assert_array_equal(vector, np.array([1, 1, 1, 1, 1]))
# Test the (type* INPLACE_ARRAY1, int DIM1) typemap
def testOnesWrongDim(self):
"Test ones function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ones = Vector.__dict__[self.typeStr + "Ones"]
vector = np.zeros((5, 5), self.typeCode)
self.assertRaises(TypeError, ones, vector)
# Test the (type* INPLACE_ARRAY1, int DIM1) typemap
def testOnesWrongType(self):
"Test ones function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ones = Vector.__dict__[self.typeStr + "Ones"]
vector = np.zeros((5, 5), 'c')
self.assertRaises(TypeError, ones, vector)
# Test the (type* INPLACE_ARRAY1, int DIM1) typemap
def testOnesNonArray(self):
"Test ones function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ones = Vector.__dict__[self.typeStr + "Ones"]
self.assertRaises(TypeError, ones, [2, 4, 6, 8])
# Test the (int DIM1, type* INPLACE_ARRAY1) typemap
def testZeros(self):
"Test zeros function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
zeros = Vector.__dict__[self.typeStr + "Zeros"]
vector = np.ones(5, self.typeCode)
zeros(vector)
np.testing.assert_array_equal(vector, np.array([0, 0, 0, 0, 0]))
# Test the (int DIM1, type* INPLACE_ARRAY1) typemap
def testZerosWrongDim(self):
"Test zeros function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
zeros = Vector.__dict__[self.typeStr + "Zeros"]
vector = np.ones((5, 5), self.typeCode)
self.assertRaises(TypeError, zeros, vector)
# Test the (int DIM1, type* INPLACE_ARRAY1) typemap
def testZerosWrongType(self):
"Test zeros function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
zeros = Vector.__dict__[self.typeStr + "Zeros"]
vector = np.ones(6, 'c')
self.assertRaises(TypeError, zeros, vector)
# Test the (int DIM1, type* INPLACE_ARRAY1) typemap
def testZerosNonArray(self):
"Test zeros function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
zeros = Vector.__dict__[self.typeStr + "Zeros"]
self.assertRaises(TypeError, zeros, [1, 3, 5, 7, 9])
# Test the (type ARGOUT_ARRAY1[ANY]) typemap
def testEOSplit(self):
"Test eoSplit function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
eoSplit = Vector.__dict__[self.typeStr + "EOSplit"]
even, odd = eoSplit([1, 2, 3])
self.assertEqual((even == [1, 0, 3]).all(), True)
self.assertEqual((odd == [0, 2, 0]).all(), True)
# Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
def testTwos(self):
"Test twos function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
twos = Vector.__dict__[self.typeStr + "Twos"]
vector = twos(5)
self.assertEqual((vector == [2, 2, 2, 2, 2]).all(), True)
# Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
def testTwosNonInt(self):
"Test twos function with non-integer dimension"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
twos = Vector.__dict__[self.typeStr + "Twos"]
self.assertRaises(TypeError, twos, 5.0)
# Test the (int DIM1, type* ARGOUT_ARRAY1) typemap
def testThrees(self):
"Test threes function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
threes = Vector.__dict__[self.typeStr + "Threes"]
vector = threes(6)
self.assertEqual((vector == [3, 3, 3, 3, 3, 3]).all(), True)
# Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
def testThreesNonInt(self):
"Test threes function with non-integer dimension"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
threes = Vector.__dict__[self.typeStr + "Threes"]
self.assertRaises(TypeError, threes, "threes")
######################################################################
class scharTestCase(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "schar"
self.typeCode = "b"
######################################################################
class ucharTestCase(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "uchar"
self.typeCode = "B"
######################################################################
class shortTestCase(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "short"
self.typeCode = "h"
######################################################################
class ushortTestCase(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "ushort"
self.typeCode = "H"
######################################################################
class intTestCase(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "int"
self.typeCode = "i"
######################################################################
class uintTestCase(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "uint"
self.typeCode = "I"
######################################################################
class longTestCase(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "long"
self.typeCode = "l"
######################################################################
class ulongTestCase(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "ulong"
self.typeCode = "L"
######################################################################
class longLongTestCase(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "longLong"
self.typeCode = "q"
######################################################################
class ulongLongTestCase(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "ulongLong"
self.typeCode = "Q"
######################################################################
class floatTestCase(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "float"
self.typeCode = "f"
######################################################################
class doubleTestCase(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
######################################################################
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite( scharTestCase))
suite.addTest(unittest.makeSuite( ucharTestCase))
suite.addTest(unittest.makeSuite( shortTestCase))
suite.addTest(unittest.makeSuite( ushortTestCase))
suite.addTest(unittest.makeSuite( intTestCase))
suite.addTest(unittest.makeSuite( uintTestCase))
suite.addTest(unittest.makeSuite( longTestCase))
suite.addTest(unittest.makeSuite( ulongTestCase))
suite.addTest(unittest.makeSuite( longLongTestCase))
suite.addTest(unittest.makeSuite(ulongLongTestCase))
suite.addTest(unittest.makeSuite( floatTestCase))
suite.addTest(unittest.makeSuite( doubleTestCase))
# Execute the test suite
print("Testing 1D Functions of Module Vector")
print("NumPy version", np.__version__)
print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.errors + result.failures))
|
the-stack_0_20617 | #!/usr/bin/env python
"""
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
import pyglet
from pyglet.window.xlib import xlib
import lib_xrandr as xrandr
def _check_extension(display):
major_opcode = ctypes.c_int()
first_event = ctypes.c_int()
first_error = ctypes.c_int()
xlib.XQueryExtension(display._display, 'RANDR',
ctypes.byref(major_opcode),
ctypes.byref(first_event),
ctypes.byref(first_error))
if not major_opcode.value:
raise Exception('RANDR extension not available')
def _check_version(display):
major = ctypes.c_int()
minor = ctypes.c_int()
xrandr.XRRQueryVersion(display._display,
ctypes.byref(major), ctypes.byref(minor))
if major.value < 1 or minor.value < 2:
raise Exception('Server does not support RandR 1.2')
return '%d.%d' % (major.value, minor.value)
display = pyglet.window.get_platform().get_default_display()
_check_extension(display)
_check_version(display)
_display = display._display
root_windows = set()
for screen in display.get_screens():
x_screen = xlib.XScreenOfDisplay(_display, screen._x_screen_id)
root_window = xlib.XRootWindowOfScreen(x_screen)
root_windows.add(root_window)
for root_window in root_windows:
resources_p = xrandr.XRRGetScreenResources(_display, root_window)
resources = resources_p.contents
print('CRTCs:')
for i in range(resources.ncrtc):
info = xrandr.XRRGetCrtcInfo(_display, resources_p, resources.crtcs[i])
info = info.contents
print(' %dx%d @ %d,%d' % (info.width, info.height, info.x, info.y))
print('Modes:')
for i in range(resources.nmode):
info = resources.modes[i]
print(' (%d) %dx%d "%s"' % (info.id,
info.width, info.height, info.name))
# Set CRTC 0 to mode 1 without changing outputs
info = xrandr.XRRGetCrtcInfo(_display, resources_p, resources.crtcs[0])
info = info.contents
xrandr.XRRSetCrtcConfig(_display, resources_p, resources.crtcs[0],
info.timestamp, info.x, info.y, resources.modes[0].id,
info.rotation, info.outputs, info.noutput)
|
the-stack_0_20622 | # model settings
model = dict(
type='CascadeRCNN',
pretrained='torchvision://resnet101',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.1, 0.3, 0.5, 1.0, 2.0, 10.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='CascadeRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=7,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=7,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=7,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
]))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
gpu_assign_thr=20,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.3,
neg_iou_thr=0.3,
min_pos_iou=0.3,
gpu_assign_thr=20,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='OHEMSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.4,
neg_iou_thr=0.4,
min_pos_iou=0.4,
gpu_assign_thr=20,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='OHEMSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
gpu_assign_thr=20,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='OHEMSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='soft_nms', iou_threshold=0.1),
max_per_img=200))
# dataset setting
dataset_type = 'TileDataset'
data_root = '/data/huangyifei/data_guangdong/tile_round1_train_20201231/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=[(2100, 2100), (1800, 1800), (1600, 1600)], keep_ratio=True, multiscale_mode="value"),
dict(type='PhotoMetricDistortion'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=[(2100, 2100), (1800, 1800), (1600, 1600)],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=3,
train=dict(
type=dataset_type,
ann_file=data_root + 'train_infos_crop.pkl',
img_prefix=data_root + 'crop_train_imgs/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'val_infos_crop.pkl',
img_prefix=data_root + 'crop_train_imgs/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline)
)
evaluation = dict(interval=1, metric='mAP')
# optimizer
# optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer = dict(type='Adam', lr=7e-5)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[12, 15])
total_epochs = 16
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = 'work_dirs/r101_baseline_coco/latest.pth'
resume_from = None
work_dir = 'work_dirs/r101_baseline_pretrain'
workflow = [('train', 1)]
# fp16 settings
fp16 = dict(loss_scale=512.)
cudnn_benchmark = True
dist_params = dict(backend='nccl')
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) |
the-stack_0_20623 | from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM8_if_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM8_if_CompleteLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM8_if_CompleteLHS, self).__init__(name='HMM8_if_CompleteLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HMM8_if_CompleteLHS')
self["equations"] = []
# Set the node attributes
# apply class ListenBranch(0.24.a.0ListenBranch) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__ListenBranch"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.24.a.0ListenBranch')
self["equations"].append(((0,'pivot'),('constant','ListenBranchf49c25a2ListenBranch')))
# Add the edges
self.add_edges([
])
# define evaluation methods for each match class.
# define evaluation methods for each apply class.
def eval_attr11(self, attr_value, this):
return True
# define evaluation methods for each match association.
# define evaluation methods for each apply association.
def constraint(self, PreNode, graph):
return True
|
the-stack_0_20625 | from distutils.core import setup
import os
def extract_version(filepath):
"""
Extract package version without importing module.
Parameters
----------
filepath : str
Filepath to file extract the version reference.
Returns
-------
: str
Package version.
"""
#filepath = os.path.join(os.path.dirname(__file__), filepath)
with open(filepath) as fd:
for line in fd:
if line.startswith("__version__"):
_, version = line.split("=")
# Remove any in-line comment and quotation
return version.split("#")[0].strip()[1:-1]
raise RuntimeError(f"Unable to extract version reference for package: {filepath}")
setup(name='nslice',
version=extract_version("nslice.py"),
py_modules=['nslice']) |
the-stack_0_20626 | """
Container for the layout.
(Containers can contain other containers or user interface controls.)
"""
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from pygments.token import Token
from six import with_metaclass
from .screen import Point, WritePosition, Char
from .dimension import LayoutDimension, sum_layout_dimensions, max_layout_dimensions
from .controls import UIControl, TokenListControl
from .margins import Margin
from prompt_toolkit.filters import to_cli_filter
from prompt_toolkit.mouse_events import MouseEvent, MouseEventTypes
from prompt_toolkit.utils import SimpleLRUCache, take_using_weights
__all__ = (
'Container',
'HSplit',
'VSplit',
'FloatContainer',
'Float',
'Window',
'WindowRenderInfo',
'ConditionalContainer',
'ScrollOffsets'
)
Transparent = Token.Transparent
class Container(with_metaclass(ABCMeta, object)):
"""
Base class for user interface layout.
"""
@abstractmethod
def reset(self):
"""
Reset the state of this container and all the children.
(E.g. reset scroll offsets, etc...)
"""
@abstractmethod
def preferred_width(self, cli, max_available_width):
"""
Return a :class:`~prompt_toolkit.layout.dimension.LayoutDimension` that
represents the desired width for this container.
:param cli: :class:`~prompt_toolkit.interface.CommandLineInterface`.
"""
@abstractmethod
def preferred_height(self, cli, width):
"""
Return a :class:`~prompt_toolkit.layout.dimension.LayoutDimension` that
represents the desired height for this container.
:param cli: :class:`~prompt_toolkit.interface.CommandLineInterface`.
"""
@abstractmethod
def write_to_screen(self, cli, screen, mouse_handlers, write_position):
"""
Write the actual content to the screen.
:param cli: :class:`~prompt_toolkit.interface.CommandLineInterface`.
:param screen: :class:`~prompt_toolkit.layout.screen.Screen`
:param mouse_handlers: :class:`~prompt_toolkit.layout.mouse_handlers.MouseHandlers`.
"""
@abstractmethod
def walk(self, cli):
"""
Walk through all the layout nodes (and their children) and yield them.
"""
def _window_too_small():
" Create a `Window` that displays the 'Window too small' text. "
return Window(TokenListControl.static(
[(Token.WindowTooSmall, ' Window too small... ')]))
class HSplit(Container):
"""
Several layouts, one stacked above/under the other.
:param children: List of child :class:`.Container` objects.
:param window_too_small: A :class:`.Container` object that is displayed if
there is not enough space for all the children. By default, this is a
"Window too small" message.
:param get_dimensions: (`None` or a callable that takes a
`CommandLineInterface` and returns a list of `LayoutDimension`
instances.) By default the dimensions are taken from the children and
divided by the available space. However, when `get_dimensions` is specified,
this is taken instead.
:param report_dimensions_callback: When rendering, this function is called
with the `CommandLineInterface` and the list of used dimensions. (As a
list of integers.)
"""
def __init__(self, children, window_too_small=None,
get_dimensions=None, report_dimensions_callback=None):
assert all(isinstance(c, Container) for c in children)
assert window_too_small is None or isinstance(window_too_small, Container)
assert get_dimensions is None or callable(get_dimensions)
assert report_dimensions_callback is None or callable(report_dimensions_callback)
self.children = children
self.window_too_small = window_too_small or _window_too_small()
self.get_dimensions = get_dimensions
self.report_dimensions_callback = report_dimensions_callback
def preferred_width(self, cli, max_available_width):
if self.children:
dimensions = [c.preferred_width(cli, max_available_width) for c in self.children]
return max_layout_dimensions(dimensions)
else:
return LayoutDimension(0)
def preferred_height(self, cli, width):
dimensions = [c.preferred_height(cli, width) for c in self.children]
return sum_layout_dimensions(dimensions)
def reset(self):
for c in self.children:
c.reset()
def write_to_screen(self, cli, screen, mouse_handlers, write_position):
"""
Render the prompt to a `Screen` instance.
:param screen: The :class:`~prompt_toolkit.layout.screen.Screen` class
to which the output has to be written.
"""
sizes = self._divide_heigths(cli, write_position)
if self.report_dimensions_callback:
self.report_dimensions_callback(cli, sizes)
if sizes is None:
self.window_too_small.write_to_screen(
cli, screen, mouse_handlers, write_position)
else:
# Draw child panes.
ypos = write_position.ypos
xpos = write_position.xpos
width = write_position.width
for s, c in zip(sizes, self.children):
c.write_to_screen(cli, screen, mouse_handlers, WritePosition(xpos, ypos, width, s))
ypos += s
def _divide_heigths(self, cli, write_position):
"""
Return the heights for all rows.
Or None when there is not enough space.
"""
if not self.children:
return []
# Calculate heights.
given_dimensions = self.get_dimensions(cli) if self.get_dimensions else None
def get_dimension_for_child(c, index):
if given_dimensions and given_dimensions[index] is not None:
return given_dimensions[index]
else:
return c.preferred_height(cli, write_position.width)
dimensions = [get_dimension_for_child(c, index) for index, c in enumerate(self.children)]
# Sum dimensions
sum_dimensions = sum_layout_dimensions(dimensions)
# If there is not enough space for both.
# Don't do anything.
if sum_dimensions.min > write_position.extended_height:
return
# Find optimal sizes. (Start with minimal size, increase until we cover
# the whole height.)
sizes = [d.min for d in dimensions]
child_generator = take_using_weights(
items=list(range(len(dimensions))),
weights=[d.weight for d in dimensions])
i = next(child_generator)
while sum(sizes) < min(write_position.extended_height, sum_dimensions.preferred):
# Increase until we meet at least the 'preferred' size.
if sizes[i] < dimensions[i].preferred:
sizes[i] += 1
i = next(child_generator)
if not any([cli.is_returning, cli.is_exiting, cli.is_aborting]):
while sum(sizes) < min(write_position.height, sum_dimensions.max):
# Increase until we use all the available space. (or until "max")
if sizes[i] < dimensions[i].max:
sizes[i] += 1
i = next(child_generator)
return sizes
def walk(self, cli):
""" Walk through children. """
yield self
for c in self.children:
for i in c.walk(cli):
yield i
class VSplit(Container):
"""
Several layouts, one stacked left/right of the other.
:param children: List of child :class:`.Container` objects.
:param window_too_small: A :class:`.Container` object that is displayed if
there is not enough space for all the children. By default, this is a
"Window too small" message.
:param get_dimensions: (`None` or a callable that takes a
`CommandLineInterface` and returns a list of `LayoutDimension`
instances.) By default the dimensions are taken from the children and
divided by the available space. However, when `get_dimensions` is specified,
this is taken instead.
:param report_dimensions_callback: When rendering, this function is called
with the `CommandLineInterface` and the list of used dimensions. (As a
list of integers.)
"""
def __init__(self, children, window_too_small=None,
get_dimensions=None, report_dimensions_callback=None):
assert all(isinstance(c, Container) for c in children)
assert window_too_small is None or isinstance(window_too_small, Container)
assert get_dimensions is None or callable(get_dimensions)
assert report_dimensions_callback is None or callable(report_dimensions_callback)
self.children = children
self.window_too_small = window_too_small or _window_too_small()
self.get_dimensions = get_dimensions
self.report_dimensions_callback = report_dimensions_callback
def preferred_width(self, cli, max_available_width):
dimensions = [c.preferred_width(cli, max_available_width) for c in self.children]
return sum_layout_dimensions(dimensions)
def preferred_height(self, cli, width):
sizes = self._divide_widths(cli, width)
if sizes is None:
return LayoutDimension()
else:
dimensions = [c.preferred_height(cli, s)
for s, c in zip(sizes, self.children)]
return max_layout_dimensions(dimensions)
def reset(self):
for c in self.children:
c.reset()
def _divide_widths(self, cli, width):
"""
Return the widths for all columns.
Or None when there is not enough space.
"""
if not self.children:
return []
# Calculate widths.
given_dimensions = self.get_dimensions(cli) if self.get_dimensions else None
def get_dimension_for_child(c, index):
if given_dimensions and given_dimensions[index] is not None:
return given_dimensions[index]
else:
return c.preferred_width(cli, width)
dimensions = [get_dimension_for_child(c, index) for index, c in enumerate(self.children)]
# Sum dimensions
sum_dimensions = sum_layout_dimensions(dimensions)
# If there is not enough space for both.
# Don't do anything.
if sum_dimensions.min > width:
return
# Find optimal sizes. (Start with minimal size, increase until we cover
# the whole height.)
sizes = [d.min for d in dimensions]
child_generator = take_using_weights(
items=list(range(len(dimensions))),
weights=[d.weight for d in dimensions])
i = next(child_generator)
while sum(sizes) < min(width, sum_dimensions.preferred):
# Increase until we meet at least the 'preferred' size.
if sizes[i] < dimensions[i].preferred:
sizes[i] += 1
i = next(child_generator)
while sum(sizes) < min(width, sum_dimensions.max):
# Increase until we use all the available space.
if sizes[i] < dimensions[i].max:
sizes[i] += 1
i = next(child_generator)
return sizes
def write_to_screen(self, cli, screen, mouse_handlers, write_position):
"""
Render the prompt to a `Screen` instance.
:param screen: The :class:`~prompt_toolkit.layout.screen.Screen` class
to which the output has to be written.
"""
if not self.children:
return
sizes = self._divide_widths(cli, write_position.width)
if self.report_dimensions_callback:
self.report_dimensions_callback(cli, sizes)
# If there is not enough space.
if sizes is None:
self.window_too_small.write_to_screen(
cli, screen, mouse_handlers, write_position)
return
# Calculate heights, take the largest possible, but not larger than write_position.extended_height.
heights = [child.preferred_height(cli, width).preferred
for width, child in zip(sizes, self.children)]
height = max(write_position.height, min(write_position.extended_height, max(heights)))
# Draw child panes.
ypos = write_position.ypos
xpos = write_position.xpos
for s, c in zip(sizes, self.children):
c.write_to_screen(cli, screen, mouse_handlers, WritePosition(xpos, ypos, s, height))
xpos += s
def walk(self, cli):
""" Walk through children. """
yield self
for c in self.children:
for i in c.walk(cli):
yield i
class FloatContainer(Container):
"""
Container which can contain another container for the background, as well
as a list of floating containers on top of it.
Example Usage::
FloatContainer(content=Window(...),
floats=[
Float(xcursor=True,
ycursor=True,
layout=CompletionMenu(...))
])
"""
def __init__(self, content, floats):
assert isinstance(content, Container)
assert all(isinstance(f, Float) for f in floats)
self.content = content
self.floats = floats
def reset(self):
self.content.reset()
for f in self.floats:
f.content.reset()
def preferred_width(self, cli, write_position):
return self.content.preferred_width(cli, write_position)
def preferred_height(self, cli, width):
"""
Return the preferred height of the float container.
(We don't care about the height of the floats, they should always fit
into the dimensions provided by the container.)
"""
return self.content.preferred_height(cli, width)
def write_to_screen(self, cli, screen, mouse_handlers, write_position):
self.content.write_to_screen(cli, screen, mouse_handlers, write_position)
for fl in self.floats:
# When a menu_position was given, use this instead of the cursor
# position. (These cursor positions are absolute, translate again
# relative to the write_position.)
# Note: This should be inside the for-loop, because one float could
# set the cursor position to be used for the next one.
cursor_position = screen.menu_position or screen.cursor_position
cursor_position = Point(x=cursor_position.x - write_position.xpos,
y=cursor_position.y - write_position.ypos)
fl_width = fl.get_width(cli)
fl_height = fl.get_height(cli)
# Left & width given.
if fl.left is not None and fl_width is not None:
xpos = fl.left
width = fl_width
# Left & right given -> calculate width.
elif fl.left is not None and fl.right is not None:
xpos = fl.left
width = write_position.width - fl.left - fl.right
# Width & right given -> calculate left.
elif fl_width is not None and fl.right is not None:
xpos = write_position.width - fl.right - fl_width
width = fl_width
elif fl.xcursor:
width = fl_width
if width is None:
width = fl.content.preferred_width(cli, write_position.width).preferred
width = min(write_position.width, width)
xpos = cursor_position.x
if xpos + width > write_position.width:
xpos = max(0, write_position.width - width)
# Only width given -> center horizontally.
elif fl_width:
xpos = int((write_position.width - fl_width) / 2)
width = fl_width
# Otherwise, take preferred width from float content.
else:
width = fl.content.preferred_width(cli, write_position.width).preferred
if fl.left is not None:
xpos = fl.left
elif fl.right is not None:
xpos = max(0, write_position.width - width - fl.right)
else: # Center horizontally.
xpos = max(0, int((write_position.width - width) / 2))
# Trim.
width = min(width, write_position.width - xpos)
# Top & height given.
if fl.top is not None and fl_height is not None:
ypos = fl.top
height = fl_height
# Top & bottom given -> calculate height.
elif fl.top is not None and fl.bottom is not None:
ypos = fl.top
height = write_position.height - fl.top - fl.bottom
# Height & bottom given -> calculate top.
elif fl_height is not None and fl.bottom is not None:
ypos = write_position.height - fl_height - fl.bottom
height = fl_height
# Near cursor
elif fl.ycursor:
ypos = cursor_position.y + 1
height = fl_height
if height is None:
height = fl.content.preferred_height(cli, width).preferred
# Reduce height if not enough space. (We can use the
# extended_height when the content requires it.)
if height > write_position.extended_height - ypos:
if write_position.extended_height - ypos + 1 >= ypos:
# When the space below the cursor is more than
# the space above, just reduce the height.
height = write_position.extended_height - ypos
else:
# Otherwise, fit the float above the cursor.
height = min(height, cursor_position.y)
ypos = cursor_position.y - height
# Only height given -> center vertically.
elif fl_width:
ypos = int((write_position.height - fl_height) / 2)
height = fl_height
# Otherwise, take preferred height from content.
else:
height = fl.content.preferred_height(cli, width).preferred
if fl.top is not None:
ypos = fl.top
elif fl.bottom is not None:
ypos = max(0, write_position.height - height - fl.bottom)
else: # Center vertically.
ypos = max(0, int((write_position.height - height) / 2))
# Trim.
height = min(height, write_position.height - ypos)
# Write float.
# (xpos and ypos can be negative: a float can be partially visible.)
if height > 0 and width > 0:
wp = WritePosition(xpos=xpos + write_position.xpos,
ypos=ypos + write_position.ypos,
width=width, height=height)
fl.content.write_to_screen(cli, screen, mouse_handlers, wp)
def walk(self, cli):
""" Walk through children. """
yield self
for i in self.content.walk(cli):
yield i
for f in self.floats:
for i in f.content.walk(cli):
yield i
class Float(object):
"""
Float for use in a :class:`.FloatContainer`.
:param content: :class:`.Container` instance.
"""
def __init__(self, top=None, right=None, bottom=None, left=None,
width=None, height=None, get_width=None, get_height=None,
xcursor=False, ycursor=False, content=None):
assert isinstance(content, Container)
assert width is None or get_width is None
assert height is None or get_height is None
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self._width = width
self._height = height
self._get_width = get_width
self._get_height = get_height
self.xcursor = xcursor
self.ycursor = ycursor
self.content = content
def get_width(self, cli):
if self._width:
return self._width
if self._get_width:
return self._get_width(cli)
def get_height(self, cli):
if self._height:
return self._height
if self._get_height:
return self._get_height(cli)
def __repr__(self):
return 'Float(content=%r)' % self.content
class WindowRenderInfo(object):
"""
Render information, for the last render time of this control.
It stores mapping information between the input buffers (in case of a
:class:`~prompt_toolkit.layout.controls.BufferControl`) and the actual
render position on the output screen.
(Could be used for implementation of the Vi 'H' and 'L' key bindings as
well as implementing mouse support.)
:param original_screen: The original full screen instance that contains the
whole input, without clipping. (temp_screen)
:param horizontal_scroll: The horizontal scroll of the :class:`.Window` instance.
:param vertical_scroll: The vertical scroll of the :class:`.Window` instance.
:param height: The height that was used for the rendering.
:param cursor_position: `Point` instance. Where the cursor is currently
shown, relative to the window.
"""
def __init__(self, original_screen, horizontal_scroll, vertical_scroll,
window_width, window_height, cursor_position,
configured_scroll_offsets, applied_scroll_offsets):
self.original_screen = original_screen
self.vertical_scroll = vertical_scroll
self.window_width = window_width
self.window_height = window_height
self.cursor_position = cursor_position
self.configured_scroll_offsets = configured_scroll_offsets
self.applied_scroll_offsets = applied_scroll_offsets
@property
def input_line_to_screen_line(self):
"""
Return a dictionary mapping the line numbers of the screen to the one
of the input buffer.
"""
return dict((v, k) for k, v in
self.original_screen.screen_line_to_input_line.items())
@property
def screen_line_to_input_line(self):
"""
Return the dictionary mapping the line numbers of the input buffer to
the lines of the screen.
"""
return self.original_screen.screen_line_to_input_line
@property
def visible_line_to_input_line(self):
"""
Return a dictionary mapping the visible rows to the line numbers of the
input.
"""
return dict((k - self.vertical_scroll, v) for
k, v in self.original_screen.screen_line_to_input_line.items())
def first_visible_line(self, after_scroll_offset=False):
"""
Return the line number (0 based) of the input document that corresponds
with the first visible line.
"""
# Note that we can't just do vertical_scroll+height because some input
# lines could be wrapped and span several lines in the screen.
screen = self.original_screen
height = self.window_height
start = self.vertical_scroll
if after_scroll_offset:
start += self.applied_scroll_offsets.top
for y in range(start, self.vertical_scroll + height):
if y in screen.screen_line_to_input_line:
return screen.screen_line_to_input_line[y]
return 0
def last_visible_line(self, before_scroll_offset=False):
"""
Like `first_visible_line`, but for the last visible line.
"""
screen = self.original_screen
height = self.window_height
start = self.vertical_scroll + height - 1
if before_scroll_offset:
start -= self.applied_scroll_offsets.bottom
for y in range(start, self.vertical_scroll, -1):
if y in screen.screen_line_to_input_line:
return screen.screen_line_to_input_line[y]
return 0
def center_visible_line(self, before_scroll_offset=False,
after_scroll_offset=False):
"""
Like `first_visible_line`, but for the center visible line.
"""
return (self.first_visible_line(after_scroll_offset) +
(self.last_visible_line(before_scroll_offset) -
self.first_visible_line(after_scroll_offset)) / 2
)
@property
def content_height(self):
"""
The full height of the user control.
"""
return self.original_screen.height
@property
def full_height_visible(self):
"""
True when the full height is visible (There is no vertical scroll.)
"""
return self.window_height >= self.original_screen.height
@property
def top_visible(self):
"""
True when the top of the buffer is visible.
"""
return self.vertical_scroll == 0
@property
def bottom_visible(self):
"""
True when the bottom of the buffer is visible.
"""
return self.vertical_scroll >= \
self.original_screen.height - self.window_height
@property
def vertical_scroll_percentage(self):
"""
Vertical scroll as a percentage. (0 means: the top is visible,
100 means: the bottom is visible.)
"""
return (100 * self.vertical_scroll //
(self.original_screen.height - self.window_height))
class ScrollOffsets(object):
"""
Scroll offsets for the :class:`.Window` class.
Note that left/right offsets only make sense if line wrapping is disabled.
"""
def __init__(self, top=0, bottom=0, left=0, right=0):
self.top = top
self.bottom = bottom
self.left = left
self.right = right
def __repr__(self):
return 'ScrollOffsets(top=%r, bottom=%r, left=%r, right=%r)' % (
self.top, self.bottom, self.left, self.right)
class Window(Container):
"""
Container that holds a control.
:param content: :class:`~prompt_toolkit.layout.controls.UIControl` instance.
:param width: :class:`~prompt_toolkit.layout.dimension.LayoutDimension` instance.
:param height: :class:`~prompt_toolkit.layout.dimension.LayoutDimension` instance.
:param get_width: callable which takes a `CommandLineInterface` and returns a `LayoutDimension`.
:param get_height: callable which takes a `CommandLineInterface` and returns a `LayoutDimension`.
:param dont_extend_width: When `True`, don't take up more width then the
preferred width reported by the control.
:param dont_extend_height: When `True`, don't take up more width then the
preferred height reported by the control.
:param left_margins: A list of :class:`~prompt_toolkit.layout.margins.Margin`
instance to be displayed on the left. For instance:
:class:`~prompt_toolkit.layout.margins.NumberredMargin` can be one of
them in order to show line numbers.
:param right_margins: Like `left_margins`, but on the other side.
:param scroll_offsets: :class:`.ScrollOffsets` instance, representing the
preferred amount of lines/columns to be always visible before/after the
cursor. When both top and bottom are a very high number, the cursor
will be centered vertically most of the time.
:param allow_scroll_beyond_bottom: A `bool` or
:class:`~prompt_toolkit.filters.CLIFilter` instance. When True, allow
scrolling so far, that the top part of the content is not visible
anymore, while there is still empty space available at the bottom of
the window. In the Vi editor for instance, this is possible. You will
see tildes while the top part of the body is hidden.
:param get_vertical_scroll: Callable that takes this window
instance as input and returns a preferred vertical scroll.
(When this is `None`, the scroll is only determined by the last and
current cursor position.)
:param get_horizontal_scroll: Callable that takes this window
instance as input and returns a preferred vertical scroll.
:param always_hide_cursor: A `bool` or
:class:`~prompt_toolkit.filters.CLIFilter` instance. When True, never
display the cursor, even when the user control specifies a cursor
position.
"""
def __init__(self, content, width=None, height=None, get_width=None,
get_height=None, dont_extend_width=False, dont_extend_height=False,
left_margins=None, right_margins=None, scroll_offsets=None,
allow_scroll_beyond_bottom=False,
get_vertical_scroll=None, get_horizontal_scroll=None, always_hide_cursor=False):
assert isinstance(content, UIControl)
assert width is None or isinstance(width, LayoutDimension)
assert height is None or isinstance(height, LayoutDimension)
assert get_width is None or callable(get_width)
assert get_height is None or callable(get_height)
assert width is None or get_width is None
assert height is None or get_height is None
assert scroll_offsets is None or isinstance(scroll_offsets, ScrollOffsets)
assert left_margins is None or all(isinstance(m, Margin) for m in left_margins)
assert right_margins is None or all(isinstance(m, Margin) for m in right_margins)
assert get_vertical_scroll is None or callable(get_vertical_scroll)
assert get_horizontal_scroll is None or callable(get_horizontal_scroll)
self.allow_scroll_beyond_bottom = to_cli_filter(allow_scroll_beyond_bottom)
self.always_hide_cursor = to_cli_filter(always_hide_cursor)
self.content = content
self.dont_extend_width = dont_extend_width
self.dont_extend_height = dont_extend_height
self.left_margins = left_margins or []
self.right_margins = right_margins or []
self.scroll_offsets = scroll_offsets or ScrollOffsets()
self.get_vertical_scroll = get_vertical_scroll
self.get_horizontal_scroll = get_horizontal_scroll
self._width = get_width or (lambda cli: width)
self._height = get_height or (lambda cli: height)
# Cache for the screens generated by the margin.
self._margin_cache = SimpleLRUCache(maxsize=8)
self.reset()
def __repr__(self):
return 'Window(content=%r)' % self.content
def reset(self):
self.content.reset()
#: Scrolling position of the main content.
self.vertical_scroll = 0
self.horizontal_scroll = 0
#: Keep render information (mappings between buffer input and render
#: output.)
self.render_info = None
def preferred_width(self, cli, max_available_width):
# Width of the margins.
total_margin_width = sum(m.get_width(cli) for m in
self.left_margins + self.right_margins)
# Window of the content.
preferred_width = self.content.preferred_width(
cli, max_available_width - total_margin_width)
if preferred_width is not None:
preferred_width += total_margin_width
# Merge.
return self._merge_dimensions(
dimension=self._width(cli),
preferred=preferred_width,
dont_extend=self.dont_extend_width)
def preferred_height(self, cli, width):
return self._merge_dimensions(
dimension=self._height(cli),
preferred=self.content.preferred_height(cli, width),
dont_extend=self.dont_extend_height)
@staticmethod
def _merge_dimensions(dimension, preferred=None, dont_extend=False):
"""
Take the LayoutDimension from this `Window` class and the received
preferred size from the `UIControl` and return a `LayoutDimension` to
report to the parent container.
"""
dimension = dimension or LayoutDimension()
# When a preferred dimension was explicitly given to the Window,
# ignore the UIControl.
if dimension.preferred_specified:
preferred = dimension.preferred
# When a 'preferred' dimension is given by the UIControl, make sure
# that it stays within the bounds of the Window.
if preferred is not None:
if dimension.max:
preferred = min(preferred, dimension.max)
if dimension.min:
preferred = max(preferred, dimension.min)
# When a `dont_extend` flag has been given, use the preferred dimension
# also as the max dimension.
if dont_extend and preferred is not None:
max_ = min(dimension.max, preferred)
else:
max_ = dimension.max
return LayoutDimension(min=dimension.min, max=max_, preferred=preferred)
def write_to_screen(self, cli, screen, mouse_handlers, write_position):
"""
Write window to screen. This renders the user control, the margins and
copies everything over to the absolute position at the given screen.
"""
# Calculate margin sizes.
left_margin_widths = [m.get_width(cli) for m in self.left_margins]
right_margin_widths = [m.get_width(cli) for m in self.right_margins]
total_margin_width = sum(left_margin_widths + right_margin_widths)
# Render UserControl.
tpl = self.content.create_screen(
cli, write_position.width - total_margin_width, write_position.height)
if isinstance(tpl, tuple):
temp_screen, highlighting = tpl
else:
# For backwards, compatibility.
temp_screen, highlighting = tpl, defaultdict(lambda: defaultdict(lambda: None))
# Scroll content.
applied_scroll_offsets = self._scroll(
temp_screen, write_position.width - total_margin_width, write_position.height, cli)
# Write body to screen.
self._copy_body(cli, temp_screen, highlighting, screen, write_position,
sum(left_margin_widths), write_position.width - total_margin_width,
applied_scroll_offsets)
# Remember render info. (Set before generating the margins. They need this.)
self.render_info = WindowRenderInfo(
original_screen=temp_screen,
horizontal_scroll=self.horizontal_scroll,
vertical_scroll=self.vertical_scroll,
window_width=write_position.width,
window_height=write_position.height,
cursor_position=Point(y=temp_screen.cursor_position.y - self.vertical_scroll,
x=temp_screen.cursor_position.x - self.horizontal_scroll),
configured_scroll_offsets=self.scroll_offsets,
applied_scroll_offsets=applied_scroll_offsets)
# Set mouse handlers.
def mouse_handler(cli, mouse_event):
""" Wrapper around the mouse_handler of the `UIControl` that turns
absolute coordinates into relative coordinates. """
position = mouse_event.position
# Call the mouse handler of the UIControl first.
result = self.content.mouse_handler(
cli, MouseEvent(
position=Point(x=position.x - write_position.xpos - sum(left_margin_widths),
y=position.y - write_position.ypos + self.vertical_scroll),
event_type=mouse_event.event_type))
# If it returns NotImplemented, handle it here.
if result == NotImplemented:
return self._mouse_handler(cli, mouse_event)
return result
mouse_handlers.set_mouse_handler_for_range(
x_min=write_position.xpos + sum(left_margin_widths),
x_max=write_position.xpos + write_position.width - total_margin_width,
y_min=write_position.ypos,
y_max=write_position.ypos + write_position.height,
handler=mouse_handler)
# Render and copy margins.
move_x = 0
def render_margin(m, width):
" Render margin. Return `Screen`. "
# Retrieve margin tokens.
tokens = m.create_margin(cli, self.render_info, width, write_position.height)
# Turn it into a screen. (Take a screen from the cache if we
# already rendered those tokens using this size.)
def create_screen():
return TokenListControl.static(tokens).create_screen(
cli, width + 1, write_position.height)
key = (tokens, width, write_position.height)
return self._margin_cache.get(key, create_screen)
for m, width in zip(self.left_margins, left_margin_widths):
# Create screen for margin.
margin_screen = render_margin(m, width)
# Copy and shift X.
self._copy_margin(margin_screen, screen, write_position, move_x, width)
move_x += width
move_x = write_position.width - sum(right_margin_widths)
for m, width in zip(self.right_margins, right_margin_widths):
# Create screen for margin.
margin_screen = render_margin(m, width)
# Copy and shift X.
self._copy_margin(margin_screen, screen, write_position, move_x, width)
move_x += width
def _copy_body(self, cli, temp_screen, highlighting, new_screen,
write_position, move_x, width, applied_scroll_offsets):
"""
Copy characters from the temp screen that we got from the `UIControl`
to the real screen.
"""
xpos = write_position.xpos + move_x
ypos = write_position.ypos
height = write_position.height
temp_buffer = temp_screen.data_buffer
new_buffer = new_screen.data_buffer
temp_screen_height = temp_screen.height
vertical_scroll = self.vertical_scroll
horizontal_scroll = self.horizontal_scroll
y = 0
# Now copy the region we need to the real screen.
for y in range(0, height):
# We keep local row variables. (Don't look up the row in the dict
# for each iteration of the nested loop.)
new_row = new_buffer[y + ypos]
if y >= temp_screen_height and y >= write_position.height:
# Break out of for loop when we pass after the last row of the
# temp screen. (We use the 'y' position for calculation of new
# screen's height.)
break
else:
temp_row = temp_buffer[y + vertical_scroll]
highlighting_row = highlighting[y + vertical_scroll]
# Copy row content, except for transparent tokens.
# (This is useful in case of floats.)
# Also apply highlighting.
for x in range(0, width):
cell = temp_row[x + horizontal_scroll]
highlighting_token = highlighting_row[x]
if highlighting_token:
new_row[x + xpos] = Char(cell.char, highlighting_token)
elif cell.token != Transparent:
new_row[x + xpos] = cell
if self.content.has_focus(cli):
new_screen.cursor_position = Point(y=temp_screen.cursor_position.y + ypos - vertical_scroll,
x=temp_screen.cursor_position.x + xpos - horizontal_scroll)
if not self.always_hide_cursor(cli):
new_screen.show_cursor = temp_screen.show_cursor
if not new_screen.menu_position and temp_screen.menu_position:
new_screen.menu_position = Point(y=temp_screen.menu_position.y + ypos - vertical_scroll,
x=temp_screen.menu_position.x + xpos - horizontal_scroll)
# Update height of the output screen. (new_screen.write_data is not
# called, so the screen is not aware of its height.)
new_screen.height = max(new_screen.height, ypos + y + 1)
def _copy_margin(self, temp_screen, new_screen, write_position, move_x, width):
"""
Copy characters from the margin screen to the real screen.
"""
xpos = write_position.xpos + move_x
ypos = write_position.ypos
temp_buffer = temp_screen.data_buffer
new_buffer = new_screen.data_buffer
# Now copy the region we need to the real screen.
for y in range(0, write_position.height):
new_row = new_buffer[y + ypos]
temp_row = temp_buffer[y]
# Copy row content, except for transparent tokens.
# (This is useful in case of floats.)
for x in range(0, width):
cell = temp_row[x]
if cell.token != Transparent:
new_row[x + xpos] = cell
def _scroll(self, temp_screen, width, height, cli):
"""
Scroll to make sure the cursor position is visible and that we maintain the
requested scroll offset.
Return the applied scroll offsets.
"""
def do_scroll(current_scroll, scroll_offset_start, scroll_offset_end,
cursor_pos, window_size, content_size):
" Scrolling algorithm. Used for both horizontal and vertical scrolling. "
# Calculate the scroll offset to apply.
# This can obviously never be more than have the screen size. Also, when the
# cursor appears at the top or bottom, we don't apply the offset.
scroll_offset_start = int(min(scroll_offset_start, window_size / 2, cursor_pos))
scroll_offset_end = int(min(scroll_offset_end, window_size / 2,
content_size - 1 - cursor_pos))
# Prevent negative scroll offsets.
if current_scroll < 0:
current_scroll = 0
# Scroll back if we scrolled to much and there's still space to show more of the document.
if (not self.allow_scroll_beyond_bottom(cli) and
current_scroll > content_size - window_size):
current_scroll = max(0, content_size - window_size)
# Scroll up if cursor is before visible part.
if current_scroll > cursor_pos - scroll_offset_start:
current_scroll = max(0, cursor_pos - scroll_offset_start)
# Scroll down if cursor is after visible part.
if current_scroll < (cursor_pos + 1) - window_size + scroll_offset_end:
current_scroll = (cursor_pos + 1) - window_size + scroll_offset_end
# Calculate the applied scroll offset. This value can be lower than what we had.
scroll_offset_start = max(0, min(current_scroll, scroll_offset_start))
scroll_offset_end = max(0, min(content_size - current_scroll - window_size, scroll_offset_end))
return current_scroll, scroll_offset_start, scroll_offset_end
# When a preferred scroll is given, take that first into account.
if self.get_vertical_scroll:
self.vertical_scroll = self.get_vertical_scroll(self)
assert isinstance(self.vertical_scroll, int)
if self.get_horizontal_scroll:
self.horizontal_scroll = self.get_horizontal_scroll(self)
assert isinstance(self.horizontal_scroll, int)
# Update horizontal/vertical scroll to make sure that the cursor
# remains visible.
offsets = self.scroll_offsets
self.vertical_scroll, scroll_offset_top, scroll_offset_bottom = do_scroll(
current_scroll=self.vertical_scroll,
scroll_offset_start=offsets.top,
scroll_offset_end=offsets.bottom,
cursor_pos=temp_screen.cursor_position.y,
window_size=height,
content_size=temp_screen.height)
self.horizontal_scroll, scroll_offset_left, scroll_offset_right = do_scroll(
current_scroll=self.horizontal_scroll,
scroll_offset_start=offsets.left,
scroll_offset_end=offsets.right,
cursor_pos=temp_screen.cursor_position.x,
window_size=width,
content_size=temp_screen.width)
applied_scroll_offsets = ScrollOffsets(
top=scroll_offset_top,
bottom=scroll_offset_bottom,
left=scroll_offset_left,
right=scroll_offset_right)
return applied_scroll_offsets
def _mouse_handler(self, cli, mouse_event):
"""
Mouse handler. Called when the UI control doesn't handle this
particular event.
"""
if mouse_event.event_type == MouseEventTypes.SCROLL_DOWN:
self._scroll_down(cli)
elif mouse_event.event_type == MouseEventTypes.SCROLL_UP:
self._scroll_up(cli)
def _scroll_down(self, cli):
" Scroll window down. "
info = self.render_info
if self.vertical_scroll < info.content_height - info.window_height:
if info.cursor_position.y <= info.configured_scroll_offsets.top:
self.content.move_cursor_down(cli)
self.vertical_scroll += 1
def _scroll_up(self, cli):
" Scroll window up. "
info = self.render_info
if info.vertical_scroll > 0:
if info.cursor_position.y >= info.window_height - 1 - info.configured_scroll_offsets.bottom:
self.content.move_cursor_up(cli)
self.vertical_scroll -= 1
def walk(self, cli):
# Only yield self. A window doesn't have children.
yield self
class ConditionalContainer(Container):
"""
Wrapper around any other container that can change the visibility. The
received `filter` determines whether the given container should be
displayed or not.
:param content: :class:`.Container` instance.
:param filter: :class:`~prompt_toolkit.filters.CLIFilter` instance.
"""
def __init__(self, content, filter):
assert isinstance(content, Container)
self.content = content
self.filter = to_cli_filter(filter)
def reset(self):
self.content.reset()
def preferred_width(self, cli, max_available_width):
if self.filter(cli):
return self.content.preferred_width(cli, max_available_width)
else:
return LayoutDimension.exact(0)
def preferred_height(self, cli, width):
if self.filter(cli):
return self.content.preferred_height(cli, width)
else:
return LayoutDimension.exact(0)
def write_to_screen(self, cli, screen, mouse_handlers, write_position):
if self.filter(cli):
return self.content.write_to_screen(cli, screen, mouse_handlers, write_position)
def walk(self, cli):
return self.content.walk(cli)
# Deprecated alias for 'Container'.
Layout = Container
|
the-stack_0_20630 | #!/usr/bin/env python
#
# Copyright (c) 2014, 2016 Apple Inc. All rights reserved.
# Copyright (c) 2014 University of Washington. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
import string
from string import Template
from generator import Generator, ucfirst
from models import ObjectType, EnumType, Frameworks
from objc_generator import ObjCTypeCategory, ObjCGenerator
from objc_generator_templates import ObjCGeneratorTemplates as ObjCTemplates
log = logging.getLogger('global')
def add_newline(lines):
if lines and lines[-1] == '':
return
lines.append('')
class ObjCProtocolTypesImplementationGenerator(ObjCGenerator):
def __init__(self, *args, **kwargs):
ObjCGenerator.__init__(self, *args, **kwargs)
def output_filename(self):
return '%sTypes.mm' % self.protocol_name()
def domains_to_generate(self):
return filter(self.should_generate_types_for_domain, Generator.domains_to_generate(self))
def generate_output(self):
secondary_headers = [
'"%sTypeConversions.h"' % self.protocol_name(),
Generator.string_for_file_include('%sJSONObjectPrivate.h' % ObjCGenerator.OBJC_STATIC_PREFIX, Frameworks.WebInspector, self.model().framework),
'<wtf/Assertions.h>',
'<wtf/JSONValues.h>',
]
# The FooProtocolInternal.h header is only needed to declare the backend-side event dispatcher bindings.
primaryIncludeName = self.protocol_name()
if self.get_generator_setting('generate_backend', False):
primaryIncludeName += 'Internal'
header_args = {
'primaryInclude': '"%s.h"' % primaryIncludeName,
'secondaryIncludes': '\n'.join(['#import %s' % header for header in secondary_headers]),
}
domains = self.domains_to_generate()
sections = []
sections.append(self.generate_license())
sections.append(Template(ObjCTemplates.ImplementationPrelude).substitute(None, **header_args))
sections.extend(map(self.generate_type_implementations, domains))
sections.append(Template(ObjCTemplates.ImplementationPostlude).substitute(None, **header_args))
return '\n\n'.join(sections)
def generate_type_implementations(self, domain):
lines = []
for declaration in self.type_declarations_for_domain(domain):
if (isinstance(declaration.type, ObjectType)):
add_newline(lines)
lines.append(self.generate_type_implementation(domain, declaration))
return '\n'.join(lines)
def generate_type_implementation(self, domain, declaration):
lines = []
lines.append('@implementation %s' % self.objc_name_for_type(declaration.type))
# The initializer that takes a payload is only needed by the frontend.
if self.get_generator_setting('generate_frontend', False):
lines.append('')
lines.append(self._generate_init_method_for_payload(domain, declaration))
lines.append(self._generate_init_method_for_json_object(domain, declaration))
required_members = filter(lambda member: not member.is_optional, declaration.type_members)
if required_members:
lines.append('')
lines.append(self._generate_init_method_for_required_members(domain, declaration, required_members))
for member in declaration.type_members:
lines.append('')
lines.append(self._generate_setter_for_member(domain, declaration, member))
lines.append('')
lines.append(self._generate_getter_for_member(domain, declaration, member))
lines.append('')
lines.append('@end')
return '\n'.join(lines)
def _generate_init_method_for_json_object(self, domain, declaration):
lines = []
lines.append('- (instancetype)initWithJSONObject:(RWIProtocolJSONObject *)jsonObject')
lines.append('{')
lines.append(' if (!(self = [super initWithInspectorObject:[jsonObject toInspectorObject].get()]))')
lines.append(' return nil;')
lines.append('')
lines.append(' return self;')
lines.append('}')
return '\n'.join(lines)
def _generate_init_method_for_payload(self, domain, declaration):
lines = []
lines.append('- (instancetype)initWithPayload:(nonnull NSDictionary<NSString *, id> *)payload')
lines.append('{')
lines.append(' if (!(self = [super init]))')
lines.append(' return nil;')
lines.append('')
for member in declaration.type_members:
member_name = member.member_name
if not member.is_optional:
lines.append(' THROW_EXCEPTION_FOR_REQUIRED_PROPERTY(payload[@"%s"], @"%s");' % (member_name, member_name))
objc_type = self.objc_type_for_member(declaration, member)
var_name = ObjCGenerator.identifier_to_objc_identifier(member_name)
conversion_expression = self.payload_to_objc_expression_for_member(declaration, member)
if isinstance(member.type, EnumType):
lines.append(' std::optional<%s> %s = %s;' % (objc_type, var_name, conversion_expression))
if not member.is_optional:
lines.append(' THROW_EXCEPTION_FOR_BAD_ENUM_VALUE(%s, @"%s");' % (var_name, member_name))
lines.append(' self.%s = %s.value();' % (var_name, var_name))
else:
lines.append(' if (%s)' % var_name)
lines.append(' self.%s = %s.value();' % (var_name, var_name))
else:
lines.append(' self.%s = %s;' % (var_name, conversion_expression))
lines.append('')
lines.append(' return self;')
lines.append('}')
return '\n'.join(lines)
def _generate_init_method_for_required_members(self, domain, declaration, required_members):
pairs = []
for member in required_members:
objc_type = self.objc_type_for_member(declaration, member)
var_name = ObjCGenerator.identifier_to_objc_identifier(member.member_name)
pairs.append('%s:(%s)%s' % (var_name, objc_type, var_name))
pairs[0] = ucfirst(pairs[0])
lines = []
lines.append('- (instancetype)initWith%s' % ' '.join(pairs))
lines.append('{')
lines.append(' if (!(self = [super init]))')
lines.append(' return nil;')
lines.append('')
required_pointer_members = filter(lambda member: ObjCGenerator.is_type_objc_pointer_type(member.type), required_members)
if required_pointer_members:
for member in required_pointer_members:
var_name = ObjCGenerator.identifier_to_objc_identifier(member.member_name)
lines.append(' THROW_EXCEPTION_FOR_REQUIRED_PROPERTY(%s, @"%s");' % (var_name, var_name))
objc_array_class = self.objc_class_for_array_type(member.type)
if objc_array_class and objc_array_class.startswith(self.objc_prefix()):
lines.append(' THROW_EXCEPTION_FOR_BAD_TYPE_IN_ARRAY(%s, [%s class]);' % (var_name, objc_array_class))
lines.append('')
for member in required_members:
var_name = ObjCGenerator.identifier_to_objc_identifier(member.member_name)
lines.append(' self.%s = %s;' % (var_name, var_name))
lines.append('')
lines.append(' return self;')
lines.append('}')
return '\n'.join(lines)
def _generate_setter_for_member(self, domain, declaration, member):
objc_type = self.objc_type_for_member(declaration, member)
var_name = ObjCGenerator.identifier_to_objc_identifier(member.member_name)
setter_method = ObjCGenerator.objc_setter_method_for_member(declaration, member)
conversion_expression = self.objc_to_protocol_expression_for_member(declaration, member, var_name)
lines = []
lines.append('- (void)set%s:(%s)%s' % (ucfirst(var_name), objc_type, var_name))
lines.append('{')
objc_array_class = self.objc_class_for_array_type(member.type)
if objc_array_class and objc_array_class.startswith(self.objc_prefix()):
lines.append(' THROW_EXCEPTION_FOR_BAD_TYPE_IN_ARRAY(%s, [%s class]);' % (var_name, objc_array_class))
lines.append(' [super %s:%s forKey:@"%s"];' % (setter_method, conversion_expression, member.member_name))
lines.append('}')
return '\n'.join(lines)
def _generate_getter_for_member(self, domain, declaration, member):
objc_type = self.objc_type_for_member(declaration, member)
var_name = ObjCGenerator.identifier_to_objc_identifier(member.member_name)
getter_method = ObjCGenerator.objc_getter_method_for_member(declaration, member)
basic_expression = '[super %s:@"%s"]' % (getter_method, member.member_name)
category = ObjCTypeCategory.category_for_type(member.type)
if category is ObjCTypeCategory.Object:
lines = []
lines.append('- (%s)%s' % (objc_type, var_name))
lines.append('{')
lines.append(self.protocol_to_objc_code_block_for_object_member(declaration, member, basic_expression))
lines.append('}')
else:
conversion_expression = self.protocol_to_objc_expression_for_member(declaration, member, basic_expression)
lines = []
lines.append('- (%s)%s' % (objc_type, var_name))
lines.append('{')
lines.append(' return %s;' % conversion_expression)
lines.append('}')
return '\n'.join(lines)
|
the-stack_0_20631 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
``fitscheck`` is a command line script based on astropy.io.fits for verifying
and updating the CHECKSUM and DATASUM keywords of .fits files. ``fitscheck``
can also detect and often fix other FITS standards violations. ``fitscheck``
facilitates re-writing the non-standard checksums originally generated by
astropy.io.fits with standard checksums which will interoperate with CFITSIO.
``fitscheck`` will refuse to write new checksums if the checksum keywords are
missing or their values are bad. Use ``--force`` to write new checksums
regardless of whether or not they currently exist or pass. Use
``--ignore-missing`` to tolerate missing checksum keywords without comment.
Example uses of fitscheck:
1. Add checksums::
$ fitscheck --write *.fits
2. Write new checksums, even if existing checksums are bad or missing::
$ fitscheck --write --force *.fits
3. Verify standard checksums and FITS compliance without changing the files::
$ fitscheck --compliance *.fits
4. Only check and fix compliance problems, ignoring checksums::
$ fitscheck --checksum none --compliance --write *.fits
5. Verify standard interoperable checksums::
$ fitscheck *.fits
6. Delete checksum keywords::
$ fitscheck --checksum remove --write *.fits
"""
import sys
import logging
import argparse
import warnings
from astropy.io import fits
from astropy import __version__
log = logging.getLogger('fitscheck')
DESCRIPTION = """
e.g. fitscheck example.fits
Verifies and optionally re-writes the CHECKSUM and DATASUM keywords
for a .fits file.
Optionally detects and fixes FITS standard compliance problems.
This script is part of the Astropy package. See
https://docs.astropy.org/en/latest/io/fits/usage/scripts.html#module-astropy.io.fits.scripts.fitscheck
for further documentation.
""".strip()
def handle_options(args):
if not len(args):
args = ['-h']
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--version', action='version',
version=f'%(prog)s {__version__}')
parser.add_argument(
'fits_files', metavar='file', nargs='+',
help='.fits files to process.')
parser.add_argument(
'-k', '--checksum', dest='checksum_kind',
choices=['standard', 'remove', 'none'],
help='Choose FITS checksum mode or none. Defaults standard.',
default='standard')
parser.add_argument(
'-w', '--write', dest='write_file',
help='Write out file checksums and/or FITS compliance fixes.',
default=False, action='store_true')
parser.add_argument(
'-f', '--force', dest='force',
help='Do file update even if original checksum was bad.',
default=False, action='store_true')
parser.add_argument(
'-c', '--compliance', dest='compliance',
help='Do FITS compliance checking; fix if possible.',
default=False, action='store_true')
parser.add_argument(
'-i', '--ignore-missing', dest='ignore_missing',
help='Ignore missing checksums.',
default=False, action='store_true')
parser.add_argument(
'-v', '--verbose', dest='verbose', help='Generate extra output.',
default=False, action='store_true')
global OPTIONS
OPTIONS = parser.parse_args(args)
if OPTIONS.checksum_kind == 'none':
OPTIONS.checksum_kind = False
elif OPTIONS.checksum_kind == 'standard':
OPTIONS.checksum_kind = True
elif OPTIONS.checksum_kind == 'remove':
OPTIONS.write_file = True
OPTIONS.force = True
return OPTIONS.fits_files
def setup_logging():
log.handlers.clear()
if OPTIONS.verbose:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.WARNING)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(message)s'))
log.addHandler(handler)
def verify_checksums(filename):
"""
Prints a message if any HDU in `filename` has a bad checksum or datasum.
"""
with warnings.catch_warnings(record=True) as wlist:
warnings.simplefilter('always')
with fits.open(filename, checksum=OPTIONS.checksum_kind) as hdulist:
for i, hdu in enumerate(hdulist):
# looping on HDUs is needed to read them and verify the
# checksums
if not OPTIONS.ignore_missing:
if not hdu._checksum:
log.warning('MISSING {!r} .. Checksum not found '
'in HDU #{}'.format(filename, i))
return 1
if not hdu._datasum:
log.warning('MISSING {!r} .. Datasum not found '
'in HDU #{}'.format(filename, i))
return 1
for w in wlist:
if str(w.message).startswith(('Checksum verification failed',
'Datasum verification failed')):
log.warning('BAD %r %s', filename, str(w.message))
return 1
log.info(f'OK {filename!r}')
return 0
def verify_compliance(filename):
"""Check for FITS standard compliance."""
with fits.open(filename) as hdulist:
try:
hdulist.verify('exception')
except fits.VerifyError as exc:
log.warning('NONCOMPLIANT %r .. %s',
filename, str(exc).replace('\n', ' '))
return 1
return 0
def update(filename):
"""
Sets the ``CHECKSUM`` and ``DATASUM`` keywords for each HDU of `filename`.
Also updates fixes standards violations if possible and requested.
"""
output_verify = 'silentfix' if OPTIONS.compliance else 'ignore'
# For unit tests we reset temporarily the warning filters. Indeed, before
# updating the checksums, fits.open will verify the existing checksums and
# raise warnings, which are later caught and converted to log.warning...
# which is an issue when testing, using the "error" action to convert
# warnings to exceptions.
with warnings.catch_warnings():
warnings.resetwarnings()
with fits.open(filename, do_not_scale_image_data=True,
checksum=OPTIONS.checksum_kind, mode='update') as hdulist:
hdulist.flush(output_verify=output_verify)
def process_file(filename):
"""
Handle a single .fits file, returning the count of checksum and compliance
errors.
"""
try:
checksum_errors = verify_checksums(filename)
if OPTIONS.compliance:
compliance_errors = verify_compliance(filename)
else:
compliance_errors = 0
if OPTIONS.write_file and checksum_errors == 0 or OPTIONS.force:
update(filename)
return checksum_errors + compliance_errors
except Exception as e:
log.error(f'EXCEPTION {filename!r} .. {e}')
return 1
def main(args=None):
"""
Processes command line parameters into options and files, then checks
or update FITS DATASUM and CHECKSUM keywords for the specified files.
"""
errors = 0
fits_files = handle_options(args or sys.argv[1:])
setup_logging()
for filename in fits_files:
errors += process_file(filename)
if errors:
log.warning(f'{errors} errors')
return int(bool(errors))
|
the-stack_0_20633 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import torch
from torch.optim import Optimizer
from pytorch_lightning.plugins.precision_plugin import PrecisionPlugin
class NativeAMPPlugin(PrecisionPlugin):
def __init__(self, trainer=None):
"""
Integrates native amp into Lightning's internals.
"""
self.trainer = trainer
def connect(self, model, optimizers):
return model, optimizers
def backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs):
closure_loss = self.trainer.scaler.scale(closure_loss)
automatic_optimization = self.trainer.train_loop.automatic_optimization
# do backward pass
if automatic_optimization:
model = self.trainer.get_model()
model.backward(closure_loss, optimizer, opt_idx)
else:
closure_loss.backward(*args, **kwargs)
# once backward has been applied, release graph
closure_loss = closure_loss.detach()
# unscale gradient to allow analyze within `on_after_backward`
if not self.trainer.train_loop.should_accumulate() and automatic_optimization:
self.trainer.scaler.unscale_(optimizer)
return closure_loss
def training_step(self, fx, args):
with torch.cuda.amp.autocast():
output = fx(*args)
return output
def clip_gradients(self, grad_clip_val: Union[int, float], optimizer: Optimizer, norm_type: float):
model = self.trainer.get_model()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=grad_clip_val, norm_type=norm_type)
|
the-stack_0_20634 | import pytest
from django.http import HttpRequest
from http_stubs.admin import (
HTTPStubAdminBase,
LogEntryAdminBase,
ProxyLogEntryAdmin,
)
from http_stubs.models import HTTPStub, LogEntry, ProxyLogEntity
class TestLogEntryAdminBase:
"""Tests for LogEntryAdminBase class."""
instance = LogEntryAdminBase(LogEntry, '')
def test_has_add_permission(self):
"""Should always return false."""
assert self.instance.has_add_permission() is False
@pytest.mark.parametrize(
'string, expect', (
('', ''),
('bad json', ''),
('{"not":"indented"}', '{\n "not": "indented"\n}'),
('{\n "indent": "json"\n}', '{\n "indent": "json"\n}'),
),
)
def test_pretty_str(self, string, expect):
"""Test pretty_str method.
:param string: a string for check
:param expect: expect value for the string
"""
assert self.instance.pretty_str(string) == expect
def test_pretty_request_body(self):
"""Test pretty_request_body method."""
mock = type('mock', (), {'request_body': '{"valid":"json"}'})
expect = '{\n "valid": "json"\n}'
assert self.instance.pretty_request_body(mock) == expect
class TestProxyLogEntryAdmin:
"""Tests for ProxyLogEntryAdmin class."""
instance = ProxyLogEntryAdmin(ProxyLogEntity, '')
def test_pretty_response_body(self):
"""Test for pretty_response_body method."""
mock = type('mock', (), {'response_body': '{"valid":"json"}'})
expect = '{\n "valid": "json"\n}'
assert self.instance.pretty_response_body(mock) == expect
class TestHTTPStubAdminBase:
"""Tests for HTTPStubAdminBase class."""
instance = HTTPStubAdminBase(HTTPStub, '')
def test_enable_action(self, http_stub_factory):
"""Test for enable_action method.
:param http_stub_factory: factory for HTTPStubs
"""
stub = http_stub_factory(is_active=False)
self.instance.enable_action(HttpRequest(), HTTPStub.objects.all())
stub.refresh_from_db()
assert stub.is_active is True
def test_disable_action(self, http_stub_factory):
"""Test for disable_action method.
:param http_stub_factory: factory for HTTPStubs
"""
stub = http_stub_factory(is_active=True)
self.instance.disable_action(HttpRequest(), HTTPStub.objects.all())
stub.refresh_from_db()
assert stub.is_active is False
|
the-stack_0_20639 | import sys
sys.path.append('../src')
import transaction
import blockchain
import block
#import pytest
from hashlib import sha256
import json
import requests
import time
import random
from flask import Flask, request
from random import shuffle
app = Flask(__name__)
# the node's copy of blockchain
blockchain = blockchain.Blockchain()
############################# Connection Test ###############################
@app.route('/', methods=['POST'])
def hello_world():
return 'Hello, World!'
@app.route('/hello')
def hello():
return 'Hello, World'
################################################################
@app.route('/new_transaction', methods=['GET'])
def new_transaction():
#block_string = json.dumps("123456")
#proof_string = json.dumps("000234567")
transactions = transaction.Transaction(
version=0,
transaction_id=123456,
transaction_type="test",
tx_generator_address="1234567",
time_stamp = time.time(),
lock_time = 12334
)
tx_data = json.dumps({"transaction" : transactions.__dict__}, sort_keys=True)
return tx_data
# endpoint to query unconfirmed transactions
@app.route('/add_new_transactions', methods=['GET'])
def add_new_transactions():
unconfirmed_transactions_test = blockchain.unconfirmed_transactions
version = 0.1
t_id = 123456
transaction_type="Admin"
tx_generator_address= sha256( ("1234567".encode())).hexdigest()
s = '0 1 2 3 4 5 6 7 8 9 A B C D E F'.split()
type = ["Admin", "Regular"]
for i in range(5):
transactions = transaction.Transaction(
version = version,
transaction_type = transaction_type,
tx_generator_address = tx_generator_address,
time_stamp = time.time(),
lock_time = 12334
)
tx_data = json.dumps({"transaction" : transactions.__dict__}, sort_keys=True)
blockchain.add_new_transaction(tx_data)
t_id = t_id + i
shuffle(type)
transaction_type = type[0]
tx_generator_address = sha256( (str(s).encode())).hexdigest()
shuffle(s)
tx_data = json.dumps( unconfirmed_transactions_test, sort_keys=True)
return tx_data
#unit test to see whether we can create a block
@app.route('/new_block', methods=['GET'])
def new_block():
blocks = block.Block(
version = 0,
id = 0,
transactions = new_transaction(),
previous_hash = '1231asdfas',
block_generator_address = 'asdfs1as',
block_generation_proof = 'asdfsdwe1211',
nonce = 1,
status = 'Accepted',
)
block_data = json.dumps(
{"block": blocks.__dict__}, sort_keys=True)
return block_data
#testing whether we can add blocks to the blockchain
#can now add multiple blocks in a chain
@app.route('/add_new_block', methods=['GET'])
def add_new_block():
block_string = ""
#set the block_number to 0
block_number = 0
#set the version number of the block to 0
version = 0
#set the id number to 0
id = 0
#set the merkle hash. currently this will be random, will be implemented later
merkle_hash = random.getrandbits(128)
#set the block_generator_address, this is the public key of the client or validator
block_generator_address = sha256( ("1234567".encode())).hexdigest()
#set the block_generation_proof will be random for now
block_generation_proof = ""
#set nonce to be zero, but will be randomize later on
nonce = 0
#set the status as propose for now
status = 'proposed'
#set the amount of transactions to be 0 because the genesis block do not have any transactions
t_counter = 0
#set the type of statuses
type_of_status = ['accepted', 'rejected', 'proposed', 'confirmed']
#this is to create random hash
random_string = [0,1,2,3,4,5,6,7,8,9,'a','b','c','d','e','f']
#go through for loop to add the blocks to the chain
for i in range(5):
#add transactions to the block will be 5 each time based on the add_new_transaction() function
transactions = add_new_transactions()
#update the amount of transaction in the current block
t_counter = t_counter + 5
#update previous hash
my_last_block = blockchain.last_block
previous_hash = my_last_block.hash
#create a block
blocks = block.Block(
version=version,
id=id,
transactions=transactions,
previous_hash=previous_hash,
block_generator_address=block_generator_address,
block_generation_proof=block_generation_proof,
nonce=nonce,
status=status,
)
#create a hash for the current block
#current_block_hash = blocks.compute_hash()
#add the block to the chain
blockchain.add_block(blocks, hash(blocks))
#update the id for the next block
id = i + 1
#update the merkle root for next block
merkle_hash = random.getrandbits(128)
#update the block_generator_address
block_generator_address = sha256( (str(random_string).encode())).hexdigest()
#randomize the random_string array
shuffle(random_string)
#randomize the block_generation_proof of the next block
block_generation_proof = random.randint(1,101)
#update the nonce value to be random
nonce = random.randint(1,101)
#randomize the status of the next block
status = type_of_status[random.randint(1,3)]
#randomize the type_of_status for the next block
shuffle(type_of_status)
#go through all the blocks in the chain and add it to block_string to return because flask can't return a list of objects
for my_block in blockchain.chain:
block_data = json.dumps(my_block.__dict__)
block_string = block_string + '\nblock ' + str(block_number) + ': ' + block_data
block_number = block_number + 1
return block_string
@app.route('/genesis_block', methods=['GET'])
def get_genesis():
#chain_test = blockchain.chain
blockchain.create_genesis_block()
chain_test = blockchain.last_block
b_data = json.dumps(chain_test.__dict__, sort_keys=True)
return b_data
@app.route('/chain', methods=['GET'])
def get_chain():
chain_data = []
for block in blockchain.chain:
chain_data.append(block.__dict__)
return json.dumps({"length": len(chain_data),
"chain": chain_data})
@app.route('/mine', methods=['GET'])
def mine_unconfirmed_transactions():
result = blockchain.mine()
if not result:
return "No transactions to mine"
return "Block #{} is mined.".format(result)
app.run(debug=True, port=8000)
|
the-stack_0_20640 | from indy import ledger
import json
import pytest
@pytest.mark.asyncio
async def test_build_get_revoc_reg_request_work():
identifier = "Th7MpTaRZVRYnPiabds81Y"
rev_reg_def_id = "RevocRegID"
timestamp = 100
expected_response = {
"operation": {
"type": "116",
"revocRegDefId": rev_reg_def_id,
"timestamp": timestamp
}
}
request = json.loads(await ledger.build_get_revoc_reg_request(identifier, rev_reg_def_id, timestamp))
assert expected_response.items() <= request.items()
|
the-stack_0_20642 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for git_cl.py."""
import contextlib
import json
import os
import StringIO
import sys
import unittest
import urlparse
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support.auto_stub import TestCase
import git_cl
import git_common
import git_footers
import subprocess2
def callError(code=1, cmd='', cwd='', stdout='', stderr=''):
return subprocess2.CalledProcessError(code, cmd, cwd, stdout, stderr)
CERR1 = callError(1)
class ChangelistMock(object):
# A class variable so we can access it when we don't have access to the
# instance that's being set.
desc = ""
def __init__(self, **kwargs):
pass
def GetIssue(self):
return 1
def GetDescription(self):
return ChangelistMock.desc
def UpdateDescription(self, desc, force=False):
ChangelistMock.desc = desc
class PresubmitMock(object):
def __init__(self, *args, **kwargs):
self.reviewers = []
@staticmethod
def should_continue():
return True
class RietveldMock(object):
def __init__(self, *args, **kwargs):
pass
@staticmethod
def get_description(issue):
return 'Issue: %d' % issue
@staticmethod
def get_issue_properties(_issue, _messages):
return {
'reviewers': ['[email protected]', '[email protected]'],
'messages': [
{
'approval': True,
'sender': '[email protected]',
},
],
'patchsets': [1, 20001],
}
@staticmethod
def close_issue(_issue):
return 'Closed'
@staticmethod
def get_patch(issue, patchset):
return 'patch set from issue %s patchset %s' % (issue, patchset)
@staticmethod
def update_description(_issue, _description):
return 'Updated'
@staticmethod
def add_comment(_issue, _comment):
return 'Commented'
class GitCheckoutMock(object):
def __init__(self, *args, **kwargs):
pass
@staticmethod
def reset():
GitCheckoutMock.conflict = False
def apply_patch(self, p):
if GitCheckoutMock.conflict:
raise Exception('failed')
class WatchlistsMock(object):
def __init__(self, _):
pass
@staticmethod
def GetWatchersForPaths(_):
return ['[email protected]']
class CodereviewSettingsFileMock(object):
def __init__(self):
pass
# pylint: disable=R0201
def read(self):
return ("CODE_REVIEW_SERVER: gerrit.chromium.org\n" +
"GERRIT_HOST: True\n")
class AuthenticatorMock(object):
def __init__(self, *_args):
pass
def has_cached_credentials(self):
return True
def authorize(self, http):
return http
def CookiesAuthenticatorMockFactory(hosts_with_creds=None, same_cookie=False):
"""Use to mock Gerrit/Git credentials from ~/.netrc or ~/.gitcookies.
Usage:
>>> self.mock(git_cl.gerrit_util, "CookiesAuthenticator",
CookiesAuthenticatorMockFactory({'host1': 'cookie1'}))
OR
>>> self.mock(git_cl.gerrit_util, "CookiesAuthenticator",
CookiesAuthenticatorMockFactory(cookie='cookie'))
"""
class CookiesAuthenticatorMock(git_cl.gerrit_util.CookiesAuthenticator):
def __init__(self): # pylint: disable=W0231
# Intentionally not calling super() because it reads actual cookie files.
pass
@classmethod
def get_gitcookies_path(cls):
return '~/.gitcookies'
@classmethod
def get_netrc_path(cls):
return '~/.netrc'
def get_auth_header(self, host):
if same_cookie:
return same_cookie
return (hosts_with_creds or {}).get(host)
return CookiesAuthenticatorMock
class MockChangelistWithBranchAndIssue():
def __init__(self, branch, issue):
self.branch = branch
self.issue = issue
def GetBranch(self):
return self.branch
def GetIssue(self):
return self.issue
class SystemExitMock(Exception):
pass
class TestGitClBasic(unittest.TestCase):
def _test_ParseIssueUrl(self, func, url, issue, patchset, hostname, fail):
parsed = urlparse.urlparse(url)
result = func(parsed)
if fail:
self.assertIsNone(result)
return None
self.assertIsNotNone(result)
self.assertEqual(result.issue, issue)
self.assertEqual(result.patchset, patchset)
self.assertEqual(result.hostname, hostname)
return result
def test_ParseIssueURL_rietveld(self):
def test(url, issue=None, patchset=None, hostname=None, fail=None):
self._test_ParseIssueUrl(
git_cl._RietveldChangelistImpl.ParseIssueURL,
url, issue, patchset, hostname, fail)
test('http://codereview.chromium.org/123',
123, None, 'codereview.chromium.org')
test('https://codereview.chromium.org/123',
123, None, 'codereview.chromium.org')
test('https://codereview.chromium.org/123/',
123, None, 'codereview.chromium.org')
test('https://codereview.chromium.org/123/whatever',
123, None, 'codereview.chromium.org')
test('https://codereview.chromium.org/123/#ps20001',
123, 20001, 'codereview.chromium.org')
test('http://codereview.chromium.org/download/issue123_4.diff',
123, 4, 'codereview.chromium.org')
# This looks like bad Gerrit, but is actually valid Rietveld.
test('https://chrome-review.source.com/123/4/',
123, None, 'chrome-review.source.com')
test('https://codereview.chromium.org/deadbeaf', fail=True)
test('https://codereview.chromium.org/api/123', fail=True)
test('bad://codereview.chromium.org/123', fail=True)
test('http://codereview.chromium.org/download/issue123_4.diffff', fail=True)
def test_ParseIssueURL_gerrit(self):
def test(url, issue=None, patchset=None, hostname=None, fail=None):
self._test_ParseIssueUrl(
git_cl._GerritChangelistImpl.ParseIssueURL,
url, issue, patchset, hostname, fail)
test('http://chrome-review.source.com/c/123',
123, None, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/',
123, None, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/#/c/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/123',
123, None, 'chrome-review.source.com')
test('https://chrome-review.source.com/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/1/whatisthis', fail=True)
test('https://chrome-review.source.com/c/abc/', fail=True)
test('ssh://chrome-review.source.com/c/123/1/', fail=True)
def test_ParseIssueNumberArgument(self):
def test(arg, issue=None, patchset=None, hostname=None, fail=False):
result = git_cl.ParseIssueNumberArgument(arg)
self.assertIsNotNone(result)
if fail:
self.assertFalse(result.valid)
else:
self.assertEqual(result.issue, issue)
self.assertEqual(result.patchset, patchset)
self.assertEqual(result.hostname, hostname)
test('123', 123)
test('', fail=True)
test('abc', fail=True)
test('123/1', fail=True)
test('123a', fail=True)
test('ssh://chrome-review.source.com/#/c/123/4/', fail=True)
# Rietveld.
test('https://codereview.source.com/123',
123, None, 'codereview.source.com')
test('https://codereview.source.com/www123', fail=True)
# Gerrrit.
test('https://chrome-review.source.com/c/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/bad/123/4', fail=True)
def test_get_bug_line_values(self):
f = lambda p, bugs: list(git_cl._get_bug_line_values(p, bugs))
self.assertEqual(f('', ''), [])
self.assertEqual(f('', '123,v8:456'), ['123', 'v8:456'])
self.assertEqual(f('v8', '456'), ['v8:456'])
self.assertEqual(f('v8', 'chromium:123,456'), ['v8:456', 'chromium:123'])
# Not nice, but not worth carying.
self.assertEqual(f('v8', 'chromium:123,456,v8:123'),
['v8:456', 'chromium:123', 'v8:123'])
def _test_git_number(self, parent_msg, dest_ref, child_msg,
parent_hash='parenthash'):
desc = git_cl.ChangeDescription(child_msg)
desc.update_with_git_number_footers(parent_hash, parent_msg, dest_ref)
return desc.description
def assertEqualByLine(self, actual, expected):
self.assertEqual(actual.splitlines(), expected.splitlines())
def test_git_number_bad_parent(self):
with self.assertRaises(ValueError):
self._test_git_number('Parent', 'refs/heads/master', 'Child')
def test_git_number_bad_parent_footer(self):
with self.assertRaises(AssertionError):
self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: wrong',
'refs/heads/master', 'Child')
def test_git_number_bad_lineage_ignored(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#1}\n'
'Cr-Branched-From: mustBeReal40CharHash-branch@{#pos}',
'refs/heads/master', 'Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#2}\n'
'Cr-Branched-From: mustBeReal40CharHash-branch@{#pos}')
def test_git_number_same_branch(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/master',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#13}')
def test_git_number_same_branch_with_originals(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/master',
child_msg='Child\n'
'\n'
'Some users are smart and insert their own footers\n'
'\n'
'Cr-Whatever: value\n'
'Cr-Commit-Position: refs/copy/paste@{#22}')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Some users are smart and insert their own footers\n'
'\n'
'Cr-Original-Whatever: value\n'
'Cr-Original-Commit-Position: refs/copy/paste@{#22}\n'
'Cr-Commit-Position: refs/heads/master@{#13}')
def test_git_number_new_branch(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/branch',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#1}\n'
'Cr-Branched-From: parenthash-refs/heads/master@{#12}')
def test_git_number_lineage(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#1}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}',
dest_ref='refs/heads/branch',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#2}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}')
def test_git_number_moooooooore_lineage(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#5}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}',
dest_ref='refs/heads/mooore',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/mooore@{#1}\n'
'Cr-Branched-From: parenthash-refs/heads/branch@{#5}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}')
def test_git_number_cherry_pick(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#1}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}',
dest_ref='refs/heads/branch',
child_msg='Child, which is cherry-pick from master\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#100}\n'
'(cherry picked from commit deadbeef12345678deadbeef12345678deadbeef)')
self.assertEqualByLine(
actual,
'Child, which is cherry-pick from master\n'
'\n'
'(cherry picked from commit deadbeef12345678deadbeef12345678deadbeef)\n'
'\n'
'Cr-Original-Commit-Position: refs/heads/master@{#100}\n'
'Cr-Commit-Position: refs/heads/branch@{#2}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}')
class TestGitCl(TestCase):
def setUp(self):
super(TestGitCl, self).setUp()
self.calls = []
self._calls_done = []
self.mock(subprocess2, 'call', self._mocked_call)
self.mock(subprocess2, 'check_call', self._mocked_call)
self.mock(subprocess2, 'check_output', self._mocked_call)
self.mock(subprocess2, 'communicate',
lambda *a, **kw: ([self._mocked_call(*a, **kw), ''], 0))
self.mock(git_cl.gclient_utils, 'CheckCallAndFilter', self._mocked_call)
self.mock(git_common, 'is_dirty_git_tree', lambda x: False)
self.mock(git_common, 'get_or_create_merge_base',
lambda *a: (
self._mocked_call(['get_or_create_merge_base']+list(a))))
self.mock(git_cl, 'BranchExists', lambda _: True)
self.mock(git_cl, 'FindCodereviewSettingsFile', lambda: '')
self.mock(git_cl, 'ask_for_data', self._mocked_call)
self.mock(git_cl, 'write_json', lambda path, contents:
self._mocked_call('write_json', path, contents))
self.mock(git_cl.presubmit_support, 'DoPresubmitChecks', PresubmitMock)
self.mock(git_cl.rietveld, 'Rietveld', RietveldMock)
self.mock(git_cl.rietveld, 'CachingRietveld', RietveldMock)
self.mock(git_cl.checkout, 'GitCheckout', GitCheckoutMock)
GitCheckoutMock.reset()
self.mock(git_cl.upload, 'RealMain', self.fail)
self.mock(git_cl.watchlists, 'Watchlists', WatchlistsMock)
self.mock(git_cl.auth, 'get_authenticator_for_host', AuthenticatorMock)
self.mock(git_cl.gerrit_util.GceAuthenticator, 'is_gce',
classmethod(lambda _: False))
self.mock(git_cl, 'DieWithError',
lambda msg: self._mocked_call(['DieWithError', msg]))
# It's important to reset settings to not have inter-tests interference.
git_cl.settings = None
def tearDown(self):
try:
# Note: has_failed returns True if at least 1 test ran so far, current
# included, has failed. That means current test may have actually ran
# fine, and the check for no leftover calls would be skipped.
if not self.has_failed():
self.assertEquals([], self.calls)
finally:
super(TestGitCl, self).tearDown()
def _mocked_call(self, *args, **_kwargs):
self.assertTrue(
self.calls,
'@%d Expected: <Missing> Actual: %r' % (len(self._calls_done), args))
top = self.calls.pop(0)
expected_args, result = top
# Also logs otherwise it could get caught in a try/finally and be hard to
# diagnose.
if expected_args != args:
N = 5
prior_calls = '\n '.join(
'@%d: %r' % (len(self._calls_done) - N + i, c[0])
for i, c in enumerate(self._calls_done[-N:]))
following_calls = '\n '.join(
'@%d: %r' % (len(self._calls_done) + i + 1, c[0])
for i, c in enumerate(self.calls[:N]))
extended_msg = (
'A few prior calls:\n %s\n\n'
'This (expected):\n @%d: %r\n'
'This (actual):\n @%d: %r\n\n'
'A few following expected calls:\n %s' %
(prior_calls, len(self._calls_done), expected_args,
len(self._calls_done), args, following_calls))
git_cl.logging.error(extended_msg)
self.fail('@%d\n'
' Expected: %r\n'
' Actual: %r' % (
len(self._calls_done), expected_args, args))
self._calls_done.append(top)
if isinstance(result, Exception):
raise result
return result
def test_LoadCodereviewSettingsFromFile_gerrit(self):
codereview_file = StringIO.StringIO('GERRIT_HOST: true')
self.calls = [
((['git', 'config', '--unset-all', 'rietveld.cc'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.private'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.tree-status-url'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.viewvc-url'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.bug-prefix'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.cpplint-regex'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.force-https-commit-url'],),
CERR1),
((['git', 'config', '--unset-all', 'rietveld.cpplint-ignore-regex'],),
CERR1),
((['git', 'config', '--unset-all', 'rietveld.project'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.pending-ref-prefix'],),
CERR1),
((['git', 'config', '--unset-all', 'rietveld.run-post-upload-hook'],),
CERR1),
((['git', 'config', 'gerrit.host', 'true'],), ''),
]
self.assertIsNone(git_cl.LoadCodereviewSettingsFromFile(codereview_file))
@classmethod
def _is_gerrit_calls(cls, gerrit=False):
return [((['git', 'config', 'rietveld.autoupdate'],), ''),
((['git', 'config', 'gerrit.host'],), 'True' if gerrit else '')]
@classmethod
def _upload_calls(cls, similarity, find_copies, private):
return (cls._git_base_calls(similarity, find_copies) +
cls._git_upload_calls(private))
@classmethod
def _upload_no_rev_calls(cls, similarity, find_copies):
return (cls._git_base_calls(similarity, find_copies) +
cls._git_upload_no_rev_calls())
@classmethod
def _git_base_calls(cls, similarity, find_copies):
if similarity is None:
similarity = '50'
similarity_call = ((['git', 'config',
'branch.master.git-cl-similarity'],), CERR1)
else:
similarity_call = ((['git', 'config',
'branch.master.git-cl-similarity', similarity],), '')
if find_copies is None:
find_copies = True
find_copies_call = ((['git', 'config', '--bool',
'branch.master.git-find-copies'],), CERR1)
else:
val = str(find_copies).lower()
find_copies_call = ((['git', 'config', '--bool',
'branch.master.git-find-copies', val],), '')
if find_copies:
stat_call = ((['git', 'diff', '--no-ext-diff', '--stat',
'-l100000', '-C'+similarity,
'fake_ancestor_sha', 'HEAD'],), '+dat')
else:
stat_call = ((['git', 'diff', '--no-ext-diff', '--stat',
'-M'+similarity, 'fake_ancestor_sha', 'HEAD'],), '+dat')
return [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
similarity_call,
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
find_copies_call,
] + cls._is_gerrit_calls() + [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.rietveldissue'],), CERR1),
((['git', 'config', 'branch.master.gerritissue'],), CERR1),
((['git', 'config', 'rietveld.server'],),
'codereview.example.com'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['get_or_create_merge_base', 'master', 'master'],),
'fake_ancestor_sha'),
] + cls._git_sanity_checks('fake_ancestor_sha', 'master') + [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'rev-parse', 'HEAD'],), '12345'),
((['git', 'diff', '--name-status', '--no-renames', '-r',
'fake_ancestor_sha...', '.'],),
'M\t.gitignore\n'),
((['git', 'config', 'branch.master.rietveldpatchset'],), CERR1),
((['git', 'log', '--pretty=format:%s%n%n%b',
'fake_ancestor_sha...'],),
'foo'),
((['git', 'config', 'user.email'],), '[email protected]'),
stat_call,
((['git', 'log', '--pretty=format:%s\n\n%b',
'fake_ancestor_sha..HEAD'],),
'desc\n'),
((['git', 'config', 'rietveld.bug-prefix'],), ''),
]
@classmethod
def _git_upload_no_rev_calls(cls):
return [
((['git', 'config', 'core.editor'],), ''),
]
@classmethod
def _git_upload_calls(cls, private):
if private:
cc_call = []
private_call = []
else:
cc_call = [((['git', 'config', 'rietveld.cc'],), '')]
private_call = [
((['git', 'config', 'rietveld.private'],), '')]
return [
((['git', 'config', 'core.editor'],), ''),
] + cc_call + private_call + [
((['git', 'config', 'branch.master.base-url'],), ''),
((['git', 'config', 'rietveld.pending-ref-prefix'],), ''),
((['git',
'config', '--local', '--get-regexp', '^svn-remote\\.'],),
(('', None), 0)),
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'svn', 'info'],), ''),
((['git', 'config', 'rietveld.project'],), ''),
((['git', 'config', 'branch.master.rietveldissue', '1'],), ''),
((['git', 'config', 'branch.master.rietveldserver',
'https://codereview.example.com'],), ''),
((['git',
'config', 'branch.master.rietveldpatchset', '2'],), ''),
] + cls._git_post_upload_calls()
@classmethod
def _git_post_upload_calls(cls):
return [
((['git', 'rev-parse', 'HEAD'],), 'hash'),
((['git', 'symbolic-ref', 'HEAD'],), 'hash'),
((['git',
'config', 'branch.hash.last-upload-hash', 'hash'],), ''),
((['git', 'config', 'rietveld.run-post-upload-hook'],), ''),
]
@staticmethod
def _git_sanity_checks(diff_base, working_branch, get_remote_branch=True):
fake_ancestor = 'fake_ancestor'
fake_cl = 'fake_cl_for_patch'
return [
((['git',
'rev-parse', '--verify', diff_base],), fake_ancestor),
((['git',
'merge-base', fake_ancestor, 'HEAD'],), fake_ancestor),
((['git',
'rev-list', '^' + fake_ancestor, 'HEAD'],), fake_cl),
# Mock a config miss (error code 1)
((['git',
'config', 'gitcl.remotebranch'],), CERR1),
] + ([
# Call to GetRemoteBranch()
((['git',
'config', 'branch.%s.merge' % working_branch],),
'refs/heads/master'),
((['git',
'config', 'branch.%s.remote' % working_branch],), 'origin'),
] if get_remote_branch else []) + [
((['git', 'rev-list', '^' + fake_ancestor,
'refs/remotes/origin/master'],), ''),
]
@classmethod
def _dcommit_calls_1(cls):
return [
((['git', 'config', 'rietveld.autoupdate'],),
''),
((['git', 'config', 'rietveld.pending-ref-prefix'],),
''),
((['git',
'config', '--local', '--get-regexp', '^svn-remote\\.'],),
((('svn-remote.svn.url svn://svn.chromium.org/chrome\n'
'svn-remote.svn.fetch trunk/src:refs/remotes/origin/master'),
None),
0)),
((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'),
((['git', 'config',
'branch.working.git-cl-similarity'],), CERR1),
((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'),
((['git', 'config', '--bool',
'branch.working.git-find-copies'],), CERR1),
((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'),
((['git',
'config', 'branch.working.rietveldissue'],), '12345'),
((['git',
'config', 'rietveld.server'],), 'codereview.example.com'),
((['git',
'config', 'branch.working.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.working.remote'],), 'origin'),
((['git', 'config', 'branch.working.merge'],),
'refs/heads/master'),
((['git', 'config', 'branch.working.remote'],), 'origin'),
((['git', 'rev-list', '--merges',
'--grep=^SVN changes up to revision [0-9]*$',
'refs/remotes/origin/master^!'],), ''),
((['git', 'rev-list', '^refs/heads/working',
'refs/remotes/origin/master'],),
''),
((['git',
'log', '--grep=^git-svn-id:', '-1', '--pretty=format:%H'],),
'3fc18b62c4966193eb435baabe2d18a3810ec82e'),
((['git',
'rev-list', '^3fc18b62c4966193eb435baabe2d18a3810ec82e',
'refs/remotes/origin/master'],), ''),
((['git',
'merge-base', 'refs/remotes/origin/master', 'HEAD'],),
'fake_ancestor_sha'),
]
@classmethod
def _dcommit_calls_normal(cls):
return [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'rev-parse', 'HEAD'],),
'00ff397798ea57439712ed7e04ab96e13969ef40'),
((['git',
'diff', '--name-status', '--no-renames', '-r', 'fake_ancestor_sha...',
'.'],),
'M\tPRESUBMIT.py'),
((['git',
'config', 'branch.working.rietveldpatchset'],), '31137'),
((['git', 'config', 'branch.working.rietveldserver'],),
'codereview.example.com'),
((['git', 'config', 'user.email'],), '[email protected]'),
((['git', 'config', 'rietveld.tree-status-url'],), ''),
]
@classmethod
def _dcommit_calls_bypassed(cls):
return [
((['git', 'config', 'branch.working.rietveldserver'],),
'codereview.example.com'),
]
@classmethod
def _dcommit_calls_3(cls):
return [
((['git',
'diff', '--no-ext-diff', '--stat', '-l100000', '-C50',
'fake_ancestor_sha', 'refs/heads/working'],),
(' PRESUBMIT.py | 2 +-\n'
' 1 files changed, 1 insertions(+), 1 deletions(-)\n')),
((['git', 'show-ref', '--quiet', '--verify',
'refs/heads/git-cl-commit'],), ''),
((['git', 'branch', '-D', 'git-cl-commit'],), ''),
((['git', 'show-ref', '--quiet', '--verify',
'refs/heads/git-cl-cherry-pick'],), CERR1),
((['git', 'rev-parse', '--show-cdup'],), '\n'),
((['git', 'checkout', '-q', '-b', 'git-cl-commit'],), ''),
((['git', 'reset', '--soft', 'fake_ancestor_sha'],), ''),
((['git', 'commit', '-m',
'Issue: 12345\n\[email protected]\n\n'
'Review URL: https://codereview.example.com/12345 .'],),
''),
((['git', 'config', 'rietveld.force-https-commit-url'],), ''),
((['git',
'svn', 'dcommit', '-C50', '--no-rebase', '--rmdir'],),
(('', None), 0)),
((['git', 'checkout', '-q', 'working'],), ''),
((['git', 'branch', '-D', 'git-cl-commit'],), ''),
]
@staticmethod
def _cmd_line(description, args, similarity, find_copies, private, cc):
"""Returns the upload command line passed to upload.RealMain()."""
return [
'upload', '--assume_yes', '--server',
'https://codereview.example.com',
'--message', description
] + args + [
'--cc', ','.join(['[email protected]'] + cc),
] + (['--private'] if private else []) + [
'--git_similarity', similarity or '50'
] + (['--git_no_find_copies'] if find_copies == False else []) + [
'fake_ancestor_sha', 'HEAD'
]
def _run_reviewer_test(
self,
upload_args,
expected_description,
returned_description,
final_description,
reviewers,
private=False,
cc=None):
"""Generic reviewer test framework."""
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
try:
similarity = upload_args[upload_args.index('--similarity')+1]
except ValueError:
similarity = None
if '--find-copies' in upload_args:
find_copies = True
elif '--no-find-copies' in upload_args:
find_copies = False
else:
find_copies = None
private = '--private' in upload_args
cc = cc or []
self.calls = self._upload_calls(similarity, find_copies, private)
def RunEditor(desc, _, **kwargs):
self.assertEquals(
'# Enter a description of the change.\n'
'# This will be displayed on the codereview site.\n'
'# The first line will also be used as the subject of the review.\n'
'#--------------------This line is 72 characters long'
'--------------------\n' +
expected_description,
desc)
return returned_description
self.mock(git_cl.gclient_utils, 'RunEditor', RunEditor)
def check_upload(args):
cmd_line = self._cmd_line(final_description, reviewers, similarity,
find_copies, private, cc)
self.assertEquals(cmd_line, args)
return 1, 2
self.mock(git_cl.upload, 'RealMain', check_upload)
git_cl.main(['upload'] + upload_args)
def test_no_reviewer(self):
self._run_reviewer_test(
[],
'desc\n\nBUG=',
'# Blah blah comment.\ndesc\n\nBUG=',
'desc\n\nBUG=',
[])
def test_keep_similarity(self):
self._run_reviewer_test(
['--similarity', '70'],
'desc\n\nBUG=',
'# Blah blah comment.\ndesc\n\nBUG=',
'desc\n\nBUG=',
[])
def test_keep_find_copies(self):
self._run_reviewer_test(
['--no-find-copies'],
'desc\n\nBUG=',
'# Blah blah comment.\ndesc\n\nBUG=\n',
'desc\n\nBUG=',
[])
def test_private(self):
self._run_reviewer_test(
['--private'],
'desc\n\nBUG=',
'# Blah blah comment.\ndesc\n\nBUG=\n',
'desc\n\nBUG=',
[])
def test_reviewers_cmd_line(self):
# Reviewer is passed as-is
description = 'desc\n\[email protected]\nBUG='
self._run_reviewer_test(
['-r' '[email protected]'],
description,
'\n%s\n' % description,
description,
['[email protected]'])
def test_reviewer_tbr_overriden(self):
# Reviewer is overriden with TBR
# Also verifies the regexp work without a trailing LF
description = 'Foo Bar\n\[email protected]'
self._run_reviewer_test(
['-r' '[email protected]'],
'desc\n\[email protected]\nBUG=',
description.strip('\n'),
description,
['[email protected]'])
def test_reviewer_multiple(self):
# Handles multiple R= or TBR= lines.
description = (
'Foo Bar\[email protected]\nBUG=\[email protected]\n'
'[email protected],[email protected]')
self._run_reviewer_test(
[],
'desc\n\nBUG=',
description,
description,
['[email protected],[email protected]'],
cc=['[email protected]', '[email protected]'])
def test_reviewer_send_mail(self):
# --send-mail can be used without -r if R= is used
description = 'Foo Bar\[email protected]'
self._run_reviewer_test(
['--send-mail'],
'desc\n\nBUG=',
description.strip('\n'),
description,
['[email protected]', '--send_mail'])
def test_reviewer_send_mail_no_rev(self):
# Fails without a reviewer.
stdout = StringIO.StringIO()
self.calls = self._upload_no_rev_calls(None, None) + [
((['DieWithError', 'Must specify reviewers to send email.'],),
SystemExitMock())
]
def RunEditor(desc, _, **kwargs):
return desc
self.mock(git_cl.gclient_utils, 'RunEditor', RunEditor)
self.mock(sys, 'stdout', stdout)
with self.assertRaises(SystemExitMock):
git_cl.main(['upload', '--send-mail'])
self.assertEqual(
'Using 50% similarity for rename/copy detection. Override with '
'--similarity.\n',
stdout.getvalue())
def test_bug_on_cmd(self):
self._run_reviewer_test(
['--bug=500658,proj:123'],
'desc\n\nBUG=500658\nBUG=proj:123',
'# Blah blah comment.\ndesc\n\nBUG=500658\nBUG=proj:1234',
'desc\n\nBUG=500658\nBUG=proj:1234',
[])
def test_dcommit(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = (
self._dcommit_calls_1() +
self._git_sanity_checks('fake_ancestor_sha', 'working') +
self._dcommit_calls_normal() +
self._dcommit_calls_3())
git_cl.main(['dcommit'])
def test_dcommit_bypass_hooks(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = (
self._dcommit_calls_1() +
self._dcommit_calls_bypassed() +
self._dcommit_calls_3())
git_cl.main(['dcommit', '--bypass-hooks'])
def _land_rietveld_common(self, debug_stdout=False):
if not debug_stdout:
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(RietveldMock, 'update_description', staticmethod(
lambda i, d: self._mocked_call(['update_description', i, d])))
self.mock(RietveldMock, 'add_comment', staticmethod(
lambda i, c: self._mocked_call(['add_comment', i, c])))
self.calls = [
((['git', 'config', 'rietveld.autoupdate'],), ''),
((['git', 'config', 'rietveld.pending-ref-prefix'],), CERR1),
((['git', 'config', '--local', '--get-regexp', '^svn-remote\\.'],),
CERR1),
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.git-cl-similarity'],), CERR1),
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', '--bool', 'branch.feature.git-find-copies'],),
CERR1),
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.rietveldissue'],), '123'),
((['git', 'config', 'rietveld.server'],),
'https://codereview.chromium.org'),
((['git', 'config', 'branch.feature.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'branch.feature.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'rev-list', '--merges',
'--grep=^SVN changes up to revision [0-9]*$',
'refs/remotes/origin/master^!'],), ''),
((['git', 'rev-list', '^feature', 'refs/remotes/origin/master'],),
''), # No commits to rebase, according to local view of origin.
((['git', 'merge-base', 'refs/remotes/origin/master', 'HEAD'],),
'fake_ancestor_sha'),
] + self._git_sanity_checks('fake_ancestor_sha', 'feature') + [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'rev-parse', 'HEAD'],), 'fake_sha'),
((['git', 'diff', '--name-status', '--no-renames', '-r',
'fake_ancestor_sha...', '.'],),
'M\tfile1.cpp'),
((['git', 'config', 'branch.feature.rietveldpatchset'],), '20001'),
((['git', 'config', 'branch.feature.rietveldserver'],),
'https://codereview.chromium.org'),
((['git', 'config', 'user.email'],), '[email protected]'),
((['git', 'config', 'rietveld.tree-status-url'],), CERR1),
((['git', 'diff', '--no-ext-diff', '--stat', '-l100000', '-C50',
'fake_ancestor_sha', 'feature'],),
# This command just prints smth like this:
# file1.cpp | 53 ++++++--
# 1 file changed, 33 insertions(+), 20 deletions(-)\n
''),
((['git', 'show-ref', '--quiet', '--verify',
'refs/heads/git-cl-commit'],),
''), # 0 return code means branch exists.
((['git', 'branch', '-D', 'git-cl-commit'],), ''),
((['git', 'show-ref', '--quiet', '--verify',
'refs/heads/git-cl-cherry-pick'],),
CERR1), # This means git-cl-cherry-pick branch does not exist.
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'checkout', '-q', '-b', 'git-cl-commit'],), ''),
((['git', 'reset', '--soft', 'fake_ancestor_sha'],), ''),
((['git', 'commit', '-m',
'Issue: 123\n\[email protected]\n\n'
'Review URL: https://codereview.chromium.org/123 .'],), ''),
((['git', 'config', 'branch.feature.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', '--get', 'remote.origin.url'],),
'https://chromium.googlesource.com/infra/infra'),
]
def test_land_rietveld(self):
self._land_rietveld_common()
self.calls += [
((['git', 'config', 'rietveld.pending-ref-prefix'],), CERR1),
((['git', 'push', '--porcelain', 'origin', 'HEAD:refs/heads/master'],),
''),
((['git', 'rev-parse', 'HEAD'],), 'fake_sha_rebased'),
((['git', 'checkout', '-q', 'feature'],), ''),
((['git', 'branch', '-D', 'git-cl-commit'],), ''),
((['git', 'config', 'rietveld.viewvc-url'],),
'https://chromium.googlesource.com/infra/infra/+/'),
((['update_description', 123,
'Issue: 123\n\[email protected]\n\nCommitted: '
'https://chromium.googlesource.com/infra/infra/+/fake_sha_rebased'],),
''),
((['add_comment', 123, 'Committed patchset #2 (id:20001) manually as '
'fake_sha_rebased (presubmit successful).'],), ''),
]
git_cl.main(['land'])
def test_land_rietveld_gnumbd(self):
self._land_rietveld_common()
self.mock(git_cl, 'WaitForRealCommit',
lambda *a: self._mocked_call(['WaitForRealCommit'] + list(a)))
self.calls += [
((['git', 'config', 'rietveld.pending-ref-prefix'],), 'refs/pending/'),
((['git', 'rev-parse', 'HEAD'],), 'fake_sha_rebased'),
((['git', 'retry', 'fetch', 'origin',
'+refs/pending/heads/master:refs/git-cl/pending/heads/master'],), ''),
((['git', 'checkout', 'refs/git-cl/pending/heads/master'],), ''),
((['git', 'cherry-pick', 'fake_sha_rebased'],), ''),
((['git', 'retry', 'push', '--porcelain', 'origin',
'HEAD:refs/pending/heads/master'],),''),
((['git', 'rev-parse', 'HEAD'],), 'fake_sha_rebased_on_pending'),
((['git', 'checkout', '-q', 'feature'],), ''),
((['git', 'branch', '-D', 'git-cl-commit'],), ''),
((['WaitForRealCommit', 'origin', 'fake_sha_rebased_on_pending',
'refs/remotes/origin/master', 'refs/heads/master'],),
'fake_sha_gnumbded'),
((['git', 'config', 'rietveld.viewvc-url'],),
'https://chromium.googlesource.com/infra/infra/+/'),
((['update_description', 123,
'Issue: 123\n\[email protected]\n\nCommitted: '
'https://chromium.googlesource.com/infra/infra/+/fake_sha_gnumbded'],),
''),
((['add_comment', 123, 'Committed patchset #2 (id:20001) manually as '
'fake_sha_gnumbded (presubmit successful).'],),
''),
]
git_cl.main(['land'])
def test_land_rietveld_git_numberer(self):
self._land_rietveld_common(debug_stdout=False)
self.mock(git_cl, 'ShouldGenerateGitNumberFooters',
lambda *a: self._mocked_call(['ShouldGenerateGitNumberFooters']))
# Special mocks to check validity of timestamp.
original_git_amend_head = git_cl._git_amend_head
def _git_amend_head_mock(msg, tstamp):
self._mocked_call(['git_amend_head committer timestamp', tstamp])
return original_git_amend_head(msg, tstamp)
self.mock(git_cl, '_git_amend_head', _git_amend_head_mock)
self.calls += [
((['git', 'config', 'rietveld.pending-ref-prefix'],), CERR1),
((['ShouldGenerateGitNumberFooters'],), True),
((['git', 'show', '-s', '--format=%B', 'fake_ancestor_sha'],),
'This is parent commit.\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#543}\n'
'Cr-Branched-From: refs/svn/2014@{#2208}'),
((['git', 'show', '-s', '--format=%ct', 'fake_ancestor_sha'],),
'1480022355'), # Committer's unix timestamp.
((['git', 'show', '-s', '--format=%ct', 'HEAD'],),
'1480024000'),
((['git_amend_head committer timestamp', 1480024000],), None),
((['git', 'commit', '--amend', '-m',
'Issue: 123\n\[email protected]\n'
'\n'
'Review URL: https://codereview.chromium.org/123 .\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#544}\n'
'Cr-Branched-From: refs/svn/2014@{#2208}'],), ''),
((['git', 'push', '--porcelain', 'origin', 'HEAD:refs/heads/master'],),
''),
((['git', 'rev-parse', 'HEAD'],), 'fake_sha_rebased'),
((['git', 'checkout', '-q', 'feature'],), ''),
((['git', 'branch', '-D', 'git-cl-commit'],), ''),
((['git', 'config', 'rietveld.viewvc-url'],),
'https://chromium.googlesource.com/infra/infra/+/'),
((['update_description', 123,
'Issue: 123\n\[email protected]\n'
'\n'
'Review URL: https://codereview.chromium.org/123 .\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#544}\n'
'Cr-Branched-From: refs/svn/2014@{#2208}\n'
'Committed: '
'https://chromium.googlesource.com/infra/infra/+/fake_sha_rebased'],),
''),
((['add_comment', 123, 'Committed patchset #2 (id:20001) manually as '
'fake_sha_rebased (presubmit successful).'],), ''),
]
git_cl.main(['land'])
def test_land_rietveld_git_numberer_bad_parent(self):
self._land_rietveld_common()
self.mock(git_cl, 'ShouldGenerateGitNumberFooters',
lambda *a: self._mocked_call(['ShouldGenerateGitNumberFooters']))
self.calls += [
((['git', 'config', 'rietveld.pending-ref-prefix'],), CERR1),
((['ShouldGenerateGitNumberFooters'],), True),
((['git', 'show', '-s', '--format=%B', 'fake_ancestor_sha'],),
'This is parent commit with no footer.'),
((['git', 'checkout', '-q', 'feature'],), ''),
((['git', 'branch', '-D', 'git-cl-commit'],), ''),
]
with self.assertRaises(ValueError) as cm:
git_cl.main(['land'])
self.assertEqual(cm.exception.message,
'Unable to infer commit position from footers')
def test_ShouldGenerateGitNumberFooters(self):
self.mock(git_cl, 'FindCodereviewSettingsFile', lambda: StringIO.StringIO(
'GENERATE_GIT_NUMBER_FOOTERS: true\n'
))
self.assertTrue(git_cl.ShouldGenerateGitNumberFooters())
self.mock(git_cl, 'FindCodereviewSettingsFile', lambda: StringIO.StringIO(
'GENERATE_GIT_NUMBER_FOOTERS: false\n'
))
self.assertFalse(git_cl.ShouldGenerateGitNumberFooters())
self.mock(git_cl, 'FindCodereviewSettingsFile', lambda: StringIO.StringIO(
'GENERATE_GIT_NUMBER_FOOTERS: anything but true is false\n'
))
self.assertFalse(git_cl.ShouldGenerateGitNumberFooters())
self.mock(git_cl, 'FindCodereviewSettingsFile', lambda: StringIO.StringIO(
'whatever: ignored'
))
self.assertFalse(git_cl.ShouldGenerateGitNumberFooters())
self.mock(git_cl, 'FindCodereviewSettingsFile', lambda: None)
self.assertFalse(git_cl.ShouldGenerateGitNumberFooters())
def test_GitNumbererState_not_whitelisted_repo(self):
self.calls = [
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.pending-ref-prefix'],), CERR1),
]
res = git_cl._GitNumbererState.load(
remote_url='https://chromium.googlesource.com/chromium/tools/build',
remote_ref='refs/whatever')
self.assertEqual(res.pending_prefix, None)
self.assertEqual(res.should_git_number, False)
def test_GitNumbererState_fail_fetch(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = [
((['git', 'fetch', 'https://chromium.googlesource.com/chromium/src',
'+refs/meta/config:refs/git_cl/meta/config',
'+refs/gnumbd-config/main:refs/git_cl/gnumbd-config/main'],), CERR1),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.pending-ref-prefix'],),
'refs/pending-prefix'),
]
res = git_cl._GitNumbererState.load(
remote_url='https://chromium.googlesource.com/chromium/src',
remote_ref='refs/whatever')
self.assertEqual(res.pending_prefix, 'refs/pending-prefix')
self.assertEqual(res.should_git_number, False)
def test_GitNumbererState_fail_gnumbd_and_validator(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = [
((['git', 'fetch', 'https://chromium.googlesource.com/chromium/src',
'+refs/meta/config:refs/git_cl/meta/config',
'+refs/gnumbd-config/main:refs/git_cl/gnumbd-config/main'],), ''),
((['git', 'show', 'refs/git_cl/gnumbd-config/main:config.json'],),
'ba d conig'),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.pending-ref-prefix'],), CERR1),
((['git', 'show', 'refs/git_cl/meta/config:project.config'],), CERR1),
]
res = git_cl._GitNumbererState.load(
remote_url='https://chromium.googlesource.com/chromium/src',
remote_ref='refs/whatever')
self.assertEqual(res.pending_prefix, None)
self.assertEqual(res.should_git_number, False)
def test_GitNumbererState_valid_configs(self):
class NamedTempFileStab(StringIO.StringIO):
@classmethod
@contextlib.contextmanager
def create(cls, *_, **__):
yield cls()
name = 'tempfile'
self.mock(git_cl.tempfile, 'NamedTemporaryFile', NamedTempFileStab.create)
self.calls = [
((['git', 'fetch', 'https://chromium.googlesource.com/chromium/src',
'+refs/meta/config:refs/git_cl/meta/config',
'+refs/gnumbd-config/main:refs/git_cl/gnumbd-config/main'],), ''),
((['git', 'show', 'refs/git_cl/gnumbd-config/main:config.json'],),
'''{
"pending_tag_prefix": "refs/pending-tags",
"pending_ref_prefix": "refs/pending",
"enabled_refglobs": [
"refs/heads/m*"
]
}
'''),
((['git', 'show', 'refs/git_cl/meta/config:project.config'],),
'''
[plugin "git-numberer"]
validate-enabled-refglob = refs/else/*
validate-enabled-refglob = refs/heads/*
validate-disabled-refglob = refs/heads/disabled
validate-disabled-refglob = refs/branch-heads/*
'''),
((['git', 'config', '-f', 'tempfile', '--get-all',
'plugin.git-numberer.validate-enabled-refglob'],),
'refs/else/*\n'
'refs/heads/*\n'),
((['git', 'config', '-f', 'tempfile', '--get-all',
'plugin.git-numberer.validate-disabled-refglob'],),
'refs/heads/disabled\n'
'refs/branch-heads/*\n'),
] * 3 # 3 tests below have exactly same IO.
res = git_cl._GitNumbererState.load(
remote_url='https://chromium.googlesource.com/chromium/src',
remote_ref='refs/heads/master')
self.assertEqual(res.pending_prefix, 'refs/pending')
self.assertEqual(res.should_git_number, False)
res = git_cl._GitNumbererState.load(
remote_url='https://chromium.googlesource.com/chromium/src',
remote_ref='refs/heads/test')
self.assertEqual(res.pending_prefix, None)
self.assertEqual(res.should_git_number, True)
res = git_cl._GitNumbererState.load(
remote_url='https://chromium.googlesource.com/chromium/src',
remote_ref='refs/heads/disabled')
self.assertEqual(res.pending_prefix, None)
self.assertEqual(res.should_git_number, False)
@classmethod
def _gerrit_ensure_auth_calls(cls, issue=None, skip_auth_check=False):
cmd = ['git', 'config', '--bool', 'gerrit.skip-ensure-authenticated']
if skip_auth_check:
return [((cmd, ), 'true')]
calls = [((cmd, ), CERR1)]
if issue:
calls.extend([
((['git', 'config', 'branch.master.gerritserver'],), ''),
])
calls.extend([
((['git', 'config', 'branch.master.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/my/repo'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/my/repo'),
])
return calls
@classmethod
def _gerrit_base_calls(cls, issue=None):
return [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.git-cl-similarity'],),
CERR1),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', '--bool', 'branch.master.git-find-copies'],),
CERR1),
] + cls._is_gerrit_calls(True) + [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.rietveldissue'],), CERR1),
((['git', 'config', 'branch.master.gerritissue'],),
CERR1 if issue is None else str(issue)),
((['git', 'config', 'branch.master.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['get_or_create_merge_base', 'master',
'refs/remotes/origin/master'],),
'fake_ancestor_sha'),
# Calls to verify branch point is ancestor
] + (cls._gerrit_ensure_auth_calls(issue=issue) +
cls._git_sanity_checks('fake_ancestor_sha', 'master',
get_remote_branch=False)) + [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'rev-parse', 'HEAD'],), '12345'),
((['git',
'diff', '--name-status', '--no-renames', '-r',
'fake_ancestor_sha...', '.'],),
'M\t.gitignore\n'),
((['git', 'config', 'branch.master.gerritpatchset'],), CERR1),
] + ([] if issue else [
((['git',
'log', '--pretty=format:%s%n%n%b', 'fake_ancestor_sha...'],),
'foo'),
]) + [
((['git', 'config', 'user.email'],), '[email protected]'),
((['git',
'diff', '--no-ext-diff', '--stat', '-l100000', '-C50',
'fake_ancestor_sha', 'HEAD'],),
'+dat'),
]
@classmethod
def _gerrit_upload_calls(cls, description, reviewers, squash,
squash_mode='default',
expected_upstream_ref='origin/refs/heads/master',
ref_suffix='', notify=False,
post_amend_description=None, issue=None, cc=None):
if post_amend_description is None:
post_amend_description = description
calls = []
cc = cc or []
if squash_mode == 'default':
calls.extend([
((['git', 'config', '--bool', 'gerrit.override-squash-uploads'],), ''),
((['git', 'config', '--bool', 'gerrit.squash-uploads'],), ''),
])
elif squash_mode in ('override_squash', 'override_nosquash'):
calls.extend([
((['git', 'config', '--bool', 'gerrit.override-squash-uploads'],),
'true' if squash_mode == 'override_squash' else 'false'),
])
else:
assert squash_mode in ('squash', 'nosquash')
# If issue is given, then description is fetched from Gerrit instead.
if issue is None:
calls += [
((['git', 'log', '--pretty=format:%s\n\n%b',
'fake_ancestor_sha..HEAD'],),
description)]
if not git_footers.get_footer_change_id(description) and not squash:
calls += [
# DownloadGerritHook(False)
((False, ),
''),
# Amending of commit message to get the Change-Id.
((['git', 'log', '--pretty=format:%s\n\n%b',
'fake_ancestor_sha..HEAD'],),
description),
((['git', 'commit', '--amend', '-m', description],),
''),
((['git', 'log', '--pretty=format:%s\n\n%b',
'fake_ancestor_sha..HEAD'],),
post_amend_description)
]
if squash:
if not issue:
# Prompting to edit description on first upload.
calls += [
((['git', 'config', 'core.editor'],), ''),
((['RunEditor'],), description),
]
ref_to_push = 'abcdef0123456789'
calls += [
((['git', 'config', 'branch.master.merge'],),
'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],),
'origin'),
((['get_or_create_merge_base', 'master',
'refs/remotes/origin/master'],),
'origin/master'),
((['git', 'rev-parse', 'HEAD:'],),
'0123456789abcdef'),
((['git', 'commit-tree', '0123456789abcdef', '-p',
'origin/master', '-m', description],),
ref_to_push),
]
else:
ref_to_push = 'HEAD'
calls += [
((['git', 'rev-list',
expected_upstream_ref + '..' + ref_to_push],), ''),
]
notify_suffix = 'notify=%s' % ('ALL' if notify else 'NONE')
if ref_suffix:
ref_suffix += ',' + notify_suffix
else:
ref_suffix = '%' + notify_suffix
if reviewers:
ref_suffix += ',' + ','.join('r=%s' % email
for email in sorted(reviewers))
calls += [
((['git', 'push', 'origin',
ref_to_push + ':refs/for/refs/heads/master' + ref_suffix],),
('remote:\n'
'remote: Processing changes: (\)\n'
'remote: Processing changes: (|)\n'
'remote: Processing changes: (/)\n'
'remote: Processing changes: (-)\n'
'remote: Processing changes: new: 1 (/)\n'
'remote: Processing changes: new: 1, done\n'
'remote:\n'
'remote: New Changes:\n'
'remote: https://chromium-review.googlesource.com/123456 XXX.\n'
'remote:\n'
'To https://chromium.googlesource.com/yyy/zzz\n'
' * [new branch] hhhh -> refs/for/refs/heads/master\n')),
]
if squash:
calls += [
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://chromium-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash',
'abcdef0123456789'],), ''),
]
calls += [
((['git', 'config', 'rietveld.cc'],), ''),
((['AddReviewers', 'chromium-review.googlesource.com',
123456 if squash else None,
['[email protected]'] + cc, False],), ''),
]
calls += cls._git_post_upload_calls()
return calls
def _run_gerrit_upload_test(
self,
upload_args,
description,
reviewers=None,
squash=True,
squash_mode=None,
expected_upstream_ref='origin/refs/heads/master',
ref_suffix='',
notify=False,
post_amend_description=None,
issue=None,
cc=None):
"""Generic gerrit upload test framework."""
if squash_mode is None:
if '--no-squash' in upload_args:
squash_mode = 'nosquash'
elif '--squash' in upload_args:
squash_mode = 'squash'
else:
squash_mode = 'default'
reviewers = reviewers or []
cc = cc or []
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.gerrit_util, 'CookiesAuthenticator',
CookiesAuthenticatorMockFactory(same_cookie='same_cred'))
self.mock(git_cl._GerritChangelistImpl, '_GerritCommitMsgHookCheck',
lambda _, offer_removal: None)
self.mock(git_cl.gclient_utils, 'RunEditor',
lambda *_, **__: self._mocked_call(['RunEditor']))
self.mock(git_cl, 'DownloadGerritHook', self._mocked_call)
self.mock(git_cl.gerrit_util, 'AddReviewers',
lambda h, i, add, is_reviewer: self._mocked_call(
['AddReviewers', h, i, add, is_reviewer]))
self.calls = self._gerrit_base_calls(issue=issue)
self.calls += self._gerrit_upload_calls(
description, reviewers, squash,
squash_mode=squash_mode,
expected_upstream_ref=expected_upstream_ref,
ref_suffix=ref_suffix, notify=notify,
post_amend_description=post_amend_description,
issue=issue, cc=cc)
# Uncomment when debugging.
# print '\n'.join(map(lambda x: '%2i: %s' % x, enumerate(self.calls)))
git_cl.main(['upload'] + upload_args)
def test_gerrit_upload_without_change_id(self):
self._run_gerrit_upload_test(
['--no-squash'],
'desc\n\nBUG=\n',
[],
squash=False,
post_amend_description='desc\n\nBUG=\n\nChange-Id: Ixxx')
def test_gerrit_upload_without_change_id_override_nosquash(self):
self.mock(git_cl, 'DownloadGerritHook', self._mocked_call)
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\n',
[],
squash=False,
squash_mode='override_nosquash',
post_amend_description='desc\n\nBUG=\n\nChange-Id: Ixxx')
def test_gerrit_no_reviewer(self):
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\n\nChange-Id: I123456789\n',
[],
squash=False,
squash_mode='override_nosquash')
def test_gerrit_patch_bad_chars(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self._run_gerrit_upload_test(
['-f', '-t', 'Don\'t put bad cha,.rs'],
'desc\n\nBUG=\n\nChange-Id: I123456789',
squash=False,
squash_mode='override_nosquash',
ref_suffix='%m=Dont_put_bad_chars')
self.assertIn(
'WARNING: Patchset title may only contain alphanumeric chars '
'and spaces. Cleaned up title:\nDont put bad chars\n',
git_cl.sys.stdout.getvalue())
def test_gerrit_reviewers_cmd_line(self):
self._run_gerrit_upload_test(
['-r', '[email protected]', '--send-mail'],
'desc\n\nBUG=\n\nChange-Id: I123456789',
['[email protected]'],
squash=False,
squash_mode='override_nosquash',
notify=True)
def test_gerrit_reviewer_multiple(self):
self._run_gerrit_upload_test(
[],
'desc\[email protected]\nBUG=\[email protected]\n'
'[email protected],[email protected]\n\n'
'Change-Id: 123456789\n',
['[email protected]', '[email protected]'],
squash=False,
squash_mode='override_nosquash',
ref_suffix='%l=Code-Review+1',
cc=['[email protected]', '[email protected]'])
def test_gerrit_upload_squash_first_is_default(self):
# Mock Gerrit CL description to indicate the first upload.
self.mock(git_cl.Changelist, 'GetDescription',
lambda *_: None)
self._run_gerrit_upload_test(
[],
'desc\nBUG=\n\nChange-Id: 123456789',
[],
expected_upstream_ref='origin/master')
def test_gerrit_upload_squash_first(self):
# Mock Gerrit CL description to indicate the first upload.
self.mock(git_cl.Changelist, 'GetDescription',
lambda *_: None)
self._run_gerrit_upload_test(
['--squash'],
'desc\nBUG=\n\nChange-Id: 123456789',
[],
squash=True,
expected_upstream_ref='origin/master')
def test_gerrit_upload_squash_reupload(self):
description = 'desc\nBUG=\n\nChange-Id: 123456789'
# Mock Gerrit CL description to indicate re-upload.
self.mock(git_cl.Changelist, 'GetDescription',
lambda *args: description)
self.mock(git_cl.Changelist, 'GetMostRecentPatchset',
lambda *args: 1)
self.mock(git_cl._GerritChangelistImpl, '_GetChangeDetail',
lambda *args: {'change_id': '123456789'})
self._run_gerrit_upload_test(
['--squash'],
description,
[],
squash=True,
expected_upstream_ref='origin/master',
issue=123456)
def test_upload_branch_deps(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
def mock_run_git(*args, **_kwargs):
if args[0] == ['for-each-ref',
'--format=%(refname:short) %(upstream:short)',
'refs/heads']:
# Create a local branch dependency tree that looks like this:
# test1 -> test2 -> test3 -> test4 -> test5
# -> test3.1
# test6 -> test0
branch_deps = [
'test2 test1', # test1 -> test2
'test3 test2', # test2 -> test3
'test3.1 test2', # test2 -> test3.1
'test4 test3', # test3 -> test4
'test5 test4', # test4 -> test5
'test6 test0', # test0 -> test6
'test7', # test7
]
return '\n'.join(branch_deps)
self.mock(git_cl, 'RunGit', mock_run_git)
class RecordCalls:
times_called = 0
record_calls = RecordCalls()
def mock_CMDupload(*args, **_kwargs):
record_calls.times_called += 1
return 0
self.mock(git_cl, 'CMDupload', mock_CMDupload)
self.calls = [
(('[Press enter to continue or ctrl-C to quit]',), ''),
]
class MockChangelist():
def __init__(self):
pass
def GetBranch(self):
return 'test1'
def GetIssue(self):
return '123'
def GetPatchset(self):
return '1001'
def IsGerrit(self):
return False
ret = git_cl.upload_branch_deps(MockChangelist(), [])
# CMDupload should have been called 5 times because of 5 dependent branches.
self.assertEquals(5, record_calls.times_called)
self.assertEquals(0, ret)
def test_gerrit_change_id(self):
self.calls = [
((['git', 'write-tree'], ),
'hashtree'),
((['git', 'rev-parse', 'HEAD~0'], ),
'branch-parent'),
((['git', 'var', 'GIT_AUTHOR_IDENT'], ),
'A B <[email protected]> 1456848326 +0100'),
((['git', 'var', 'GIT_COMMITTER_IDENT'], ),
'C D <[email protected]> 1456858326 +0100'),
((['git', 'hash-object', '-t', 'commit', '--stdin'], ),
'hashchange'),
]
change_id = git_cl.GenerateGerritChangeId('line1\nline2\n')
self.assertEqual(change_id, 'Ihashchange')
def test_desecription_append_footer(self):
for init_desc, footer_line, expected_desc in [
# Use unique desc first lines for easy test failure identification.
('foo', 'R=one', 'foo\n\nR=one'),
('foo\n\nR=one', 'BUG=', 'foo\n\nR=one\nBUG='),
('foo\n\nR=one', 'Change-Id: Ixx', 'foo\n\nR=one\n\nChange-Id: Ixx'),
('foo\n\nChange-Id: Ixx', 'R=one', 'foo\n\nR=one\n\nChange-Id: Ixx'),
('foo\n\nR=one\n\nChange-Id: Ixx', 'TBR=two',
'foo\n\nR=one\nTBR=two\n\nChange-Id: Ixx'),
('foo\n\nR=one\n\nChange-Id: Ixx', 'Foo-Bar: baz',
'foo\n\nR=one\n\nChange-Id: Ixx\nFoo-Bar: baz'),
('foo\n\nChange-Id: Ixx', 'Foo-Bak: baz',
'foo\n\nChange-Id: Ixx\nFoo-Bak: baz'),
('foo', 'Change-Id: Ixx', 'foo\n\nChange-Id: Ixx'),
]:
desc = git_cl.ChangeDescription(init_desc)
desc.append_footer(footer_line)
self.assertEqual(desc.description, expected_desc)
def test_update_reviewers(self):
data = [
('foo', [], 'foo'),
('foo\nR=xx', [], 'foo\nR=xx'),
('foo\nTBR=xx', [], 'foo\nTBR=xx'),
('foo', ['a@c'], 'foo\n\nR=a@c'),
('foo\nR=xx', ['a@c'], 'foo\n\nR=a@c, xx'),
('foo\nTBR=xx', ['a@c'], 'foo\n\nR=a@c\nTBR=xx'),
('foo\nTBR=xx\nR=yy', ['a@c'], 'foo\n\nR=a@c, yy\nTBR=xx'),
('foo\nBUG=', ['a@c'], 'foo\nBUG=\nR=a@c'),
('foo\nR=xx\nTBR=yy\nR=bar', ['a@c'], 'foo\n\nR=a@c, xx, bar\nTBR=yy'),
('foo', ['a@c', 'b@c'], 'foo\n\nR=a@c, b@c'),
('foo\nBar\n\nR=\nBUG=', ['c@c'], 'foo\nBar\n\nR=c@c\nBUG='),
('foo\nBar\n\nR=\nBUG=\nR=', ['c@c'], 'foo\nBar\n\nR=c@c\nBUG='),
# Same as the line before, but full of whitespaces.
(
'foo\nBar\n\n R = \n BUG = \n R = ', ['c@c'],
'foo\nBar\n\nR=c@c\n BUG =',
),
# Whitespaces aren't interpreted as new lines.
('foo BUG=allo R=joe ', ['c@c'], 'foo BUG=allo R=joe\n\nR=c@c'),
]
expected = [i[2] for i in data]
actual = []
for orig, reviewers, _expected in data:
obj = git_cl.ChangeDescription(orig)
obj.update_reviewers(reviewers)
actual.append(obj.description)
self.assertEqual(expected, actual)
def test_get_target_ref(self):
# Check remote or remote branch not present.
self.assertEqual(None, git_cl.GetTargetRef('origin', None, 'master', None))
self.assertEqual(None, git_cl.GetTargetRef(None,
'refs/remotes/origin/master',
'master', None))
# Check default target refs for branches.
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin', 'refs/remotes/origin/master',
None, None))
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin', 'refs/remotes/origin/lkgr',
None, None))
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin', 'refs/remotes/origin/lkcr',
None, None))
self.assertEqual('refs/branch-heads/123',
git_cl.GetTargetRef('origin',
'refs/remotes/branch-heads/123',
None, None))
self.assertEqual('refs/diff/test',
git_cl.GetTargetRef('origin',
'refs/remotes/origin/refs/diff/test',
None, None))
self.assertEqual('refs/heads/chrome/m42',
git_cl.GetTargetRef('origin',
'refs/remotes/origin/chrome/m42',
None, None))
# Check target refs for user-specified target branch.
for branch in ('branch-heads/123', 'remotes/branch-heads/123',
'refs/remotes/branch-heads/123'):
self.assertEqual('refs/branch-heads/123',
git_cl.GetTargetRef('origin',
'refs/remotes/origin/master',
branch, None))
for branch in ('origin/master', 'remotes/origin/master',
'refs/remotes/origin/master'):
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin',
'refs/remotes/branch-heads/123',
branch, None))
for branch in ('master', 'heads/master', 'refs/heads/master'):
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin',
'refs/remotes/branch-heads/123',
branch, None))
# Check target refs for pending prefix.
self.assertEqual('prefix/heads/master',
git_cl.GetTargetRef('origin', 'refs/remotes/origin/master',
None, 'prefix/'))
def test_patch_when_dirty(self):
# Patch when local tree is dirty
self.mock(git_common, 'is_dirty_git_tree', lambda x: True)
self.assertNotEqual(git_cl.main(['patch', '123456']), 0)
def test_diff_when_dirty(self):
# Do 'git cl diff' when local tree is dirty
self.mock(git_common, 'is_dirty_git_tree', lambda x: True)
self.assertNotEqual(git_cl.main(['diff']), 0)
def _patch_common(self, is_gerrit=False, force_codereview=False,
new_branch=False):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl._RietveldChangelistImpl, 'GetMostRecentPatchset',
lambda x: '60001')
self.mock(git_cl._GerritChangelistImpl, '_GetChangeDetail',
lambda *args: {
'current_revision': '7777777777',
'revisions': {
'1111111111': {
'_number': 1,
'fetch': {'http': {
'url': 'https://chromium.googlesource.com/my/repo',
'ref': 'refs/changes/56/123456/1',
}},
},
'7777777777': {
'_number': 7,
'fetch': {'http': {
'url': 'https://chromium.googlesource.com/my/repo',
'ref': 'refs/changes/56/123456/7',
}},
},
},
})
self.mock(git_cl.Changelist, 'GetDescription',
lambda *args: 'Description')
self.mock(git_cl, 'IsGitVersionAtLeast', lambda *args: True)
if new_branch:
self.calls = [((['git', 'new-branch', 'master'],), ''),]
else:
self.calls = [((['git', 'symbolic-ref', 'HEAD'],), 'master')]
if not force_codereview:
# These calls detect codereview to use.
self.calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.rietveldissue'],), CERR1),
((['git', 'config', 'branch.master.gerritissue'],), CERR1),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
]
if is_gerrit:
if not force_codereview:
self.calls += [
((['git', 'config', 'gerrit.host'],), 'true'),
]
else:
self.calls += [
((['git', 'config', 'gerrit.host'],), CERR1),
((['git', 'config', 'rietveld.server'],), 'codereview.example.com'),
((['git', 'config', 'branch.master.rietveldserver',],), CERR1),
((['git', 'rev-parse', '--show-cdup'],), ''),
]
def _common_patch_successful(self, new_branch=False):
self._patch_common(new_branch=new_branch)
self.calls += [
((['git', 'commit', '-m',
'Description\n\n' +
'patch from issue 123456 at patchset 60001 ' +
'(http://crrev.com/123456#ps60001)'],), ''),
((['git', 'config', 'branch.master.rietveldissue', '123456'],),
''),
((['git', 'config', 'branch.master.rietveldserver',
'https://codereview.example.com'],), ''),
((['git', 'config', 'branch.master.rietveldpatchset', '60001'],),
''),
]
def test_patch_successful(self):
self._common_patch_successful()
self.assertEqual(git_cl.main(['patch', '123456']), 0)
def test_patch_successful_new_branch(self):
self._common_patch_successful(new_branch=True)
self.assertEqual(git_cl.main(['patch', '-b', 'master', '123456']), 0)
def test_patch_conflict(self):
self._patch_common()
GitCheckoutMock.conflict = True
self.assertNotEqual(git_cl.main(['patch', '123456']), 0)
def test_gerrit_patch_successful(self):
self._patch_common(is_gerrit=True)
self.calls += [
((['git', 'fetch', 'https://chromium.googlesource.com/my/repo',
'refs/changes/56/123456/7'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver'],), ''),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/my/repo'),
((['git', 'config', 'branch.master.gerritserver',
'https://chromium-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '7'],), ''),
]
self.assertEqual(git_cl.main(['patch', '123456']), 0)
def test_patch_force_codereview(self):
self._patch_common(is_gerrit=True, force_codereview=True)
self.calls += [
((['git', 'fetch', 'https://chromium.googlesource.com/my/repo',
'refs/changes/56/123456/7'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver'],), ''),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/my/repo'),
((['git', 'config', 'branch.master.gerritserver',
'https://chromium-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '7'],), ''),
]
self.assertEqual(git_cl.main(['patch', '--gerrit', '123456']), 0)
def test_gerrit_patch_url_successful(self):
self._patch_common(is_gerrit=True)
self.calls += [
((['git', 'fetch', 'https://chromium.googlesource.com/my/repo',
'refs/changes/56/123456/1'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://chromium-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '1'],), ''),
]
self.assertEqual(git_cl.main(
['patch', 'https://chromium-review.googlesource.com/#/c/123456/1']), 0)
def test_gerrit_patch_conflict(self):
self._patch_common(is_gerrit=True)
self.calls += [
((['git', 'fetch', 'https://chromium.googlesource.com/my/repo',
'refs/changes/56/123456/1'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), CERR1),
((['DieWithError', 'Command "git cherry-pick FETCH_HEAD" failed.\n'],),
SystemExitMock()),
]
with self.assertRaises(SystemExitMock):
git_cl.main(['patch',
'https://chromium-review.googlesource.com/#/c/123456/1'])
def test_gerrit_patch_not_exists(self):
def notExists(_issue, *_, **kwargs):
self.assertFalse(kwargs['ignore_404'])
raise git_cl.gerrit_util.GerritError(404, '')
self.mock(git_cl.gerrit_util, 'GetChangeDetail', notExists)
url = 'https://chromium-review.googlesource.com'
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.rietveldissue'],), CERR1),
((['git', 'config', 'branch.master.gerritissue'],), CERR1),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'gerrit.host'],), 'true'),
((['DieWithError', 'change 123456 at ' + url + ' does not exist '
'or you have no access to it'],), SystemExitMock()),
]
with self.assertRaises(SystemExitMock):
self.assertEqual(1, git_cl.main(['patch', url + '/#/c/123456/1']))
def _checkout_calls(self):
return [
((['git', 'config', '--local', '--get-regexp',
'branch\\..*\\.rietveldissue'], ),
('branch.retrying.rietveldissue 1111111111\n'
'branch.some-fix.rietveldissue 2222222222\n')),
((['git', 'config', '--local', '--get-regexp',
'branch\\..*\\.gerritissue'], ),
('branch.ger-branch.gerritissue 123456\n'
'branch.gbranch654.gerritissue 654321\n')),
]
def test_checkout_gerrit(self):
"""Tests git cl checkout <issue>."""
self.calls = self._checkout_calls()
self.calls += [((['git', 'checkout', 'ger-branch'], ), '')]
self.assertEqual(0, git_cl.main(['checkout', '123456']))
def test_checkout_rietveld(self):
"""Tests git cl checkout <issue>."""
self.calls = self._checkout_calls()
self.calls += [((['git', 'checkout', 'some-fix'], ), '')]
self.assertEqual(0, git_cl.main(['checkout', '2222222222']))
def test_checkout_not_found(self):
"""Tests git cl checkout <issue>."""
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = self._checkout_calls()
self.assertEqual(1, git_cl.main(['checkout', '99999']))
def test_checkout_no_branch_issues(self):
"""Tests git cl checkout <issue>."""
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = [
((['git', 'config', '--local', '--get-regexp',
'branch\\..*\\.rietveldissue'], ), CERR1),
((['git', 'config', '--local', '--get-regexp',
'branch\\..*\\.gerritissue'], ), CERR1),
]
self.assertEqual(1, git_cl.main(['checkout', '99999']))
def _test_gerrit_ensure_authenticated_common(self, auth,
skip_auth_check=False):
self.mock(git_cl.gerrit_util, 'CookiesAuthenticator',
CookiesAuthenticatorMockFactory(hosts_with_creds=auth))
self.mock(git_cl, 'DieWithError',
lambda msg: self._mocked_call(['DieWithError', msg]))
self.mock(git_cl, 'ask_for_data',
lambda msg: self._mocked_call(['ask_for_data', msg]))
self.calls = self._gerrit_ensure_auth_calls(skip_auth_check=skip_auth_check)
cl = git_cl.Changelist(codereview='gerrit')
cl.branch = 'master'
cl.branchref = 'refs/heads/master'
cl.lookedup_issue = True
return cl
def test_gerrit_ensure_authenticated_missing(self):
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com': 'git is ok, but gerrit one is missing',
})
self.calls.append(
((['DieWithError',
'Credentials for the following hosts are required:\n'
' chromium-review.googlesource.com\n'
'These are read from ~/.gitcookies (or legacy ~/.netrc)\n'
'You can (re)generate your credentails by visiting '
'https://chromium-review.googlesource.com/new-password'],), ''),)
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_conflict(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com': 'one',
'chromium-review.googlesource.com': 'other',
})
self.calls.append(
((['ask_for_data', 'If you know what you are doing, '
'press Enter to continue, Ctrl+C to abort.'],), ''))
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_ok(self):
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com': 'same',
'chromium-review.googlesource.com': 'same',
})
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_skipped(self):
cl = self._test_gerrit_ensure_authenticated_common(
auth={}, skip_auth_check=True)
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_cmd_set_commit_rietveld(self):
self.mock(git_cl._RietveldChangelistImpl, 'SetFlags',
lambda _, v: self._mocked_call(['SetFlags', v]))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.rietveldissue'],), '123'),
((['git', 'config', 'rietveld.autoupdate'],), ''),
((['git', 'config', 'rietveld.server'],), ''),
((['git', 'config', 'rietveld.server'],), ''),
((['git', 'config', 'branch.feature.rietveldserver'],),
'https://codereview.chromium.org'),
((['SetFlags', {'commit': '1', 'cq_dry_run': '0'}], ), ''),
]
self.assertEqual(0, git_cl.main(['set-commit']))
def _cmd_set_commit_gerrit_common(self, vote):
self.mock(git_cl.gerrit_util, 'SetReview',
lambda h, i, labels: self._mocked_call(
['SetReview', h, i, labels]))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.rietveldissue'],), CERR1),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['SetReview', 'chromium-review.googlesource.com', 123,
{'Commit-Queue': vote}],), ''),
]
def test_cmd_set_commit_gerrit_clear(self):
self._cmd_set_commit_gerrit_common(0)
self.assertEqual(0, git_cl.main(['set-commit', '-c']))
def test_cmd_set_commit_gerrit_dry(self):
self._cmd_set_commit_gerrit_common(1)
self.assertEqual(0, git_cl.main(['set-commit', '-d']))
def test_cmd_set_commit_gerrit(self):
self._cmd_set_commit_gerrit_common(2)
self.assertEqual(0, git_cl.main(['set-commit']))
def test_description_display(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl, 'Changelist', ChangelistMock)
ChangelistMock.desc = 'foo\n'
self.assertEqual(0, git_cl.main(['description', '-d']))
self.assertEqual('foo\n', out.getvalue())
def test_description_rietveld(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl.Changelist, 'GetDescription', lambda *args: 'foobar')
self.assertEqual(0, git_cl.main([
'description', 'https://code.review.org/123123', '-d', '--rietveld']))
self.assertEqual('foobar\n', out.getvalue())
def test_StatusFieldOverrideIssueMissingArgs(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stderr', out)
try:
self.assertEqual(git_cl.main(['status', '--issue', '1']), 0)
except SystemExit as ex:
self.assertEqual(ex.code, 2)
self.assertRegexpMatches(out.getvalue(), r'--issue must be specified')
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stderr', out)
try:
self.assertEqual(git_cl.main(['status', '--issue', '1', '--rietveld']), 0)
except SystemExit as ex:
self.assertEqual(ex.code, 2)
self.assertRegexpMatches(out.getvalue(), r'--field must be specified')
def test_StatusFieldOverrideIssue(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
def assertIssue(cl_self, *_args):
self.assertEquals(cl_self.issue, 1)
return 'foobar'
self.mock(git_cl.Changelist, 'GetDescription', assertIssue)
self.calls = [
((['git', 'config', 'rietveld.autoupdate'],), ''),
((['git', 'config', 'rietveld.server'],), ''),
((['git', 'config', 'rietveld.server'],), ''),
]
self.assertEqual(
git_cl.main(['status', '--issue', '1', '--rietveld', '--field', 'desc']),
0)
self.assertEqual(out.getvalue(), 'foobar\n')
def test_SetCloseOverrideIssue(self):
def assertIssue(cl_self, *_args):
self.assertEquals(cl_self.issue, 1)
return 'foobar'
self.mock(git_cl.Changelist, 'GetDescription', assertIssue)
self.mock(git_cl.Changelist, 'CloseIssue', lambda *_: None)
self.calls = [
((['git', 'config', 'rietveld.autoupdate'],), ''),
((['git', 'config', 'rietveld.server'],), ''),
((['git', 'config', 'rietveld.server'],), ''),
]
self.assertEqual(
git_cl.main(['set-close', '--issue', '1', '--rietveld']), 0)
def test_SetCommitOverrideIssue(self):
def assertIssue(cl_self, *_args):
self.assertEquals(cl_self.issue, 1)
return 'foobar'
self.mock(git_cl.Changelist, 'GetDescription', assertIssue)
self.mock(git_cl.Changelist, 'SetCQState', lambda *_: None)
self.calls = [
((['git', 'config', 'rietveld.autoupdate'],), ''),
((['git', 'config', 'rietveld.server'],), ''),
((['git', 'config', 'rietveld.server'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), ''),
((['git', 'config', 'rietveld.server'],), ''),
((['git', 'config', 'rietveld.server'],), ''),
]
self.assertEqual(
git_cl.main(['set-close', '--issue', '1', '--rietveld']), 0)
def test_description_gerrit(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl.Changelist, 'GetDescription', lambda *args: 'foobar')
self.assertEqual(0, git_cl.main([
'description', 'https://code.review.org/123123', '-d', '--gerrit']))
self.assertEqual('foobar\n', out.getvalue())
def test_description_set_raw(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl, 'Changelist', ChangelistMock)
self.mock(git_cl.sys, 'stdin', StringIO.StringIO('hihi'))
self.assertEqual(0, git_cl.main(['description', '-n', 'hihi']))
self.assertEqual('hihi', ChangelistMock.desc)
def test_description_appends_bug_line(self):
current_desc = 'Some.\n\nChange-Id: xxx'
def RunEditor(desc, _, **kwargs):
self.assertEquals(
'# Enter a description of the change.\n'
'# This will be displayed on the codereview site.\n'
'# The first line will also be used as the subject of the review.\n'
'#--------------------This line is 72 characters long'
'--------------------\n'
'Some.\n\nBUG=\n\nChange-Id: xxx',
desc)
# Simulate user changing something.
return 'Some.\n\nBUG=123\n\nChange-Id: xxx'
def UpdateDescriptionRemote(_, desc, force=False):
self.assertEquals(desc, 'Some.\n\nBUG=123\n\nChange-Id: xxx')
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.Changelist, 'GetDescription',
lambda *args: current_desc)
self.mock(git_cl._GerritChangelistImpl, 'UpdateDescriptionRemote',
UpdateDescriptionRemote)
self.mock(git_cl.gclient_utils, 'RunEditor', RunEditor)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.bug-prefix'],), CERR1),
((['git', 'config', 'core.editor'],), 'vi'),
]
self.assertEqual(0, git_cl.main(['description', '--gerrit']))
def test_description_set_stdin(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl, 'Changelist', ChangelistMock)
self.mock(git_cl.sys, 'stdin', StringIO.StringIO('hi \r\n\t there\n\nman'))
self.assertEqual(0, git_cl.main(['description', '-n', '-']))
self.assertEqual('hi\n\t there\n\nman', ChangelistMock.desc)
def test_archive(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master\nrefs/heads/foo\nrefs/heads/bar'),
((['git', 'config', 'branch.master.rietveldissue'],), '1'),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.server'],), 'codereview.example.com'),
((['git', 'config', 'branch.foo.rietveldissue'],), '456'),
((['git', 'config', 'branch.bar.rietveldissue'],), CERR1),
((['git', 'config', 'branch.bar.gerritissue'],), '789'),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'tag', 'git-cl-archived-456-foo', 'foo'],), ''),
((['git', 'branch', '-D', 'foo'],), '')]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'open'),
(MockChangelistWithBranchAndIssue('foo', 456), 'closed'),
(MockChangelistWithBranchAndIssue('bar', 789), 'open')])
self.assertEqual(0, git_cl.main(['archive', '-f']))
def test_archive_current_branch_fails(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master'),
((['git', 'config', 'branch.master.rietveldissue'],), '1'),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.server'],), 'codereview.example.com'),
((['git', 'symbolic-ref', 'HEAD'],), 'master')]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'closed')])
self.assertEqual(1, git_cl.main(['archive', '-f']))
def test_archive_dry_run(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master\nrefs/heads/foo\nrefs/heads/bar'),
((['git', 'config', 'branch.master.rietveldissue'],), '1'),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.server'],), 'codereview.example.com'),
((['git', 'config', 'branch.foo.rietveldissue'],), '456'),
((['git', 'config', 'branch.bar.rietveldissue'],), CERR1),
((['git', 'config', 'branch.bar.gerritissue'],), '789'),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'open'),
(MockChangelistWithBranchAndIssue('foo', 456), 'closed'),
(MockChangelistWithBranchAndIssue('bar', 789), 'open')])
self.assertEqual(0, git_cl.main(['archive', '-f', '--dry-run']))
def test_archive_no_tags(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master\nrefs/heads/foo\nrefs/heads/bar'),
((['git', 'config', 'branch.master.rietveldissue'],), '1'),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.server'],), 'codereview.example.com'),
((['git', 'config', 'branch.foo.rietveldissue'],), '456'),
((['git', 'config', 'branch.bar.rietveldissue'],), CERR1),
((['git', 'config', 'branch.bar.gerritissue'],), '789'),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'branch', '-D', 'foo'],), '')]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'open'),
(MockChangelistWithBranchAndIssue('foo', 456), 'closed'),
(MockChangelistWithBranchAndIssue('bar', 789), 'open')])
self.assertEqual(0, git_cl.main(['archive', '-f', '--notags']))
def test_cmd_issue_erase_existing(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.rietveldissue'],), CERR1),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
# Let this command raise exception (retcode=1) - it should be ignored.
((['git', 'config', '--unset', 'branch.feature.last-upload-hash'],),
CERR1),
((['git', 'config', '--unset', 'branch.feature.gerritissue'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritpatchset'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritserver'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritsquashhash'],),
''),
]
self.assertEqual(0, git_cl.main(['issue', '0']))
def test_cmd_issue_json(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.rietveldissue'],), '123'),
((['git', 'config', 'rietveld.autoupdate'],), ''),
((['git', 'config', 'rietveld.server'],),
'https://codereview.chromium.org'),
((['git', 'config', 'branch.feature.rietveldserver'],), ''),
(('write_json', 'output.json',
{'issue': 123, 'issue_url': 'https://codereview.chromium.org/123'}),
''),
]
self.assertEqual(0, git_cl.main(['issue', '--json', 'output.json']))
def test_git_cl_try_default_cq_dry_run(self):
self.mock(git_cl.Changelist, 'GetChange',
lambda _, *a: (
self._mocked_call(['GetChange']+list(a))))
self.mock(git_cl.presubmit_support, 'DoGetTryMasters',
lambda *_, **__: (
self._mocked_call(['DoGetTryMasters'])))
self.mock(git_cl._RietveldChangelistImpl, 'SetCQState',
lambda _, s: self._mocked_call(['SetCQState', s]))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.rietveldissue'],), '123'),
((['git', 'config', 'rietveld.autoupdate'],), ''),
((['git', 'config', 'rietveld.server'],),
'https://codereview.chromium.org'),
((['git', 'config', 'branch.feature.rietveldserver'],), ''),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['get_or_create_merge_base', 'feature', 'feature'],),
'fake_ancestor_sha'),
((['GetChange', 'fake_ancestor_sha', None], ),
git_cl.presubmit_support.GitChange(
'', '', '', '', '', '', '', '')),
((['git', 'rev-parse', '--show-cdup'],), '../'),
((['DoGetTryMasters'], ), None),
((['SetCQState', git_cl._CQState.DRY_RUN], ), None),
]
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.assertEqual(0, git_cl.main(['try']))
self.assertEqual(
out.getvalue(),
'scheduled CQ Dry Run on https://codereview.chromium.org/123\n')
def test_git_cl_try_default_cq_dry_run_gerrit(self):
self.mock(git_cl.Changelist, 'GetChange',
lambda _, *a: (
self._mocked_call(['GetChange']+list(a))))
self.mock(git_cl.presubmit_support, 'DoGetTryMasters',
lambda *_, **__: (
self._mocked_call(['DoGetTryMasters'])))
self.mock(git_cl._GerritChangelistImpl, 'SetCQState',
lambda _, s: self._mocked_call(['SetCQState', s]))
def _GetChangeDetail(gerrit_change_list_impl, opts=None):
# Get realistic expectations.
gerrit_change_list_impl._GetGerritHost()
return self._mocked_call(['_GetChangeDetail', opts or []])
self.mock(git_cl._GerritChangelistImpl, '_GetChangeDetail',
_GetChangeDetail)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.rietveldissue'],), CERR1),
((['git', 'config', 'branch.feature.gerritissue'],), '123456'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['_GetChangeDetail', []],), {'status': 'OPEN'}),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['get_or_create_merge_base', 'feature', 'feature'],),
'fake_ancestor_sha'),
((['GetChange', 'fake_ancestor_sha', None], ),
git_cl.presubmit_support.GitChange(
'', '', '', '', '', '', '', '')),
((['git', 'rev-parse', '--show-cdup'],), '../'),
((['DoGetTryMasters'], ), None),
((['SetCQState', git_cl._CQState.DRY_RUN], ), None),
]
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.assertEqual(0, git_cl.main(['try']))
self.assertEqual(
out.getvalue(),
'scheduled CQ Dry Run on '
'https://chromium-review.googlesource.com/123456\n')
def test_git_cl_try_buildbucket_with_properties_rietveld(self):
self.mock(git_cl._RietveldChangelistImpl, 'GetIssueProperties',
lambda _: {
'owner_email': '[email protected]',
'private': False,
'closed': False,
'project': 'depot_tools',
'patchsets': [20001],
})
self.mock(git_cl.uuid, 'uuid4', lambda: 'uuid4')
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.rietveldissue'],), '123'),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.server'],),
'https://codereview.chromium.org'),
((['git', 'config', 'branch.feature.rietveldpatchset'],), '20001'),
((['git', 'config', 'branch.feature.rietveldserver'],), CERR1),
]
def _buildbucket_retry(*_, **kw):
# self.maxDiff = 10000
body = json.loads(kw['body'])
self.assertEqual(len(body['builds']), 1)
build = body['builds'][0]
params = json.loads(build.pop('parameters_json'))
self.assertEqual(params, {
u'builder_name': u'win',
u'changes': [{u'author': {u'email': u'[email protected]'},
u'revision': None}],
u'properties': {
u'category': u'git_cl_try',
u'issue': 123,
u'key': u'val',
u'json': [{u'a': 1}, None],
u'master': u'tryserver.chromium',
u'patch_project': u'depot_tools',
u'patch_storage': u'rietveld',
u'patchset': 20001,
u'rietveld': u'https://codereview.chromium.org',
}
})
self.assertEqual(build, {
u'bucket': u'master.tryserver.chromium',
u'client_operation_id': u'uuid4',
u'tags': [u'builder:win',
u'buildset:patch/rietveld/codereview.chromium.org/123/20001',
u'user_agent:git_cl_try',
u'master:tryserver.chromium'],
})
self.mock(git_cl, '_buildbucket_retry', _buildbucket_retry)
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.assertEqual(0, git_cl.main([
'try', '-m', 'tryserver.chromium', '-b', 'win',
'-p', 'key=val', '-p', 'json=[{"a":1}, null]']))
self.assertRegexpMatches(
git_cl.sys.stdout.getvalue(),
'Tried jobs on:\nBucket: master.tryserver.chromium')
def test_git_cl_try_buildbucket_with_properties_gerrit(self):
self.mock(git_cl.Changelist, 'GetMostRecentPatchset', lambda _: 7)
self.mock(git_cl.uuid, 'uuid4', lambda: 'uuid4')
def _GetChangeDetail(gerrit_change_list_impl, opts=None):
# Get realistic expectations.
gerrit_change_list_impl._GetGerritHost()
return self._mocked_call(['_GetChangeDetail', opts or []])
self.mock(git_cl._GerritChangelistImpl, '_GetChangeDetail',
_GetChangeDetail)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.rietveldissue'],), CERR1),
((['git', 'config', 'branch.feature.gerritissue'],), '123456'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['_GetChangeDetail', []],), {'status': 'OPEN'}),
((['_GetChangeDetail', ['DETAILED_ACCOUNTS']],),
{'owner': {'email': '[email protected]'}}),
((['_GetChangeDetail', ['ALL_REVISIONS']],), {
'project': 'depot_tools',
'revisions': {
'deadbeaf': {
'_number': 6,
},
'beeeeeef': {
'_number': 7,
'fetch': {'http': {
'url': 'https://chromium.googlesource.com/depot_tools',
'ref': 'refs/changes/56/123456/7'
}},
},
},
}),
]
def _buildbucket_retry(*_, **kw):
# self.maxDiff = 10000
body = json.loads(kw['body'])
self.assertEqual(len(body['builds']), 1)
build = body['builds'][0]
params = json.loads(build.pop('parameters_json'))
self.assertEqual(params, {
u'builder_name': u'win',
u'changes': [{u'author': {u'email': u'[email protected]'},
u'revision': None}],
u'properties': {
u'category': u'git_cl_try',
u'key': u'val',
u'json': [{u'a': 1}, None],
u'master': u'tryserver.chromium',
u'patch_gerrit_url':
u'https://chromium-review.googlesource.com',
u'patch_issue': 123456,
u'patch_project': u'depot_tools',
u'patch_ref': u'refs/changes/56/123456/7',
u'patch_repository_url':
u'https://chromium.googlesource.com/depot_tools',
u'patch_set': 7,
u'patch_storage': u'gerrit',
}
})
self.assertEqual(build, {
u'bucket': u'master.tryserver.chromium',
u'client_operation_id': u'uuid4',
u'tags': [
u'builder:win',
u'buildset:patch/gerrit/chromium-review.googlesource.com/123456/7',
u'user_agent:git_cl_try',
u'master:tryserver.chromium'],
})
self.mock(git_cl, '_buildbucket_retry', _buildbucket_retry)
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.assertEqual(0, git_cl.main([
'try', '-m', 'tryserver.chromium', '-b', 'win',
'-p', 'key=val', '-p', 'json=[{"a":1}, null]']))
self.assertRegexpMatches(
git_cl.sys.stdout.getvalue(),
'Tried jobs on:\nBucket: master.tryserver.chromium')
def test_git_cl_try_buildbucket_bucket_flag(self):
self.mock(git_cl._RietveldChangelistImpl, 'GetIssueProperties',
lambda _: {
'owner_email': '[email protected]',
'private': False,
'closed': False,
'project': 'depot_tools',
'patchsets': [20001],
})
self.mock(git_cl.uuid, 'uuid4', lambda: 'uuid4')
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.rietveldissue'],), '123'),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.server'],),
'https://codereview.chromium.org'),
((['git', 'config', 'branch.feature.rietveldpatchset'],), '20001'),
((['git', 'config', 'branch.feature.rietveldserver'],), CERR1),
]
def _buildbucket_retry(*_, **kw):
body = json.loads(kw['body'])
self.assertEqual(len(body['builds']), 1)
build = body['builds'][0]
params = json.loads(build.pop('parameters_json'))
self.assertEqual(params, {
u'builder_name': u'win',
u'changes': [{u'author': {u'email': u'[email protected]'},
u'revision': None}],
u'properties': {
u'category': u'git_cl_try',
u'issue': 123,
u'patch_project': u'depot_tools',
u'patch_storage': u'rietveld',
u'patchset': 20001,
u'rietveld': u'https://codereview.chromium.org',
}
})
self.assertEqual(build, {
u'bucket': u'test.bucket',
u'client_operation_id': u'uuid4',
u'tags': [u'builder:win',
u'buildset:patch/rietveld/codereview.chromium.org/123/20001',
u'user_agent:git_cl_try'],
})
self.mock(git_cl, '_buildbucket_retry', _buildbucket_retry)
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.assertEqual(0, git_cl.main([
'try', '-B', 'test.bucket', '-b', 'win']))
self.assertRegexpMatches(
git_cl.sys.stdout.getvalue(),
'Tried jobs on:\nBucket: test.bucket')
def test_git_cl_try_bots_on_multiple_masters(self):
self.mock(git_cl.Changelist, 'GetMostRecentPatchset', lambda _: 20001)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.rietveldissue'],), '123'),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.server'],),
'https://codereview.chromium.org'),
((['git', 'config', 'branch.feature.rietveldserver'],), CERR1),
((['git', 'config', 'branch.feature.rietveldpatchset'],), '20001'),
]
def _buildbucket_retry(*_, **kw):
body = json.loads(kw['body'])
self.assertEqual(len(body['builds']), 2)
first_build_params = json.loads(body['builds'][0]['parameters_json'])
self.assertEqual(first_build_params['builder_name'], 'builder1')
self.assertEqual(first_build_params['properties']['master'], 'master1')
first_build_params = json.loads(body['builds'][1]['parameters_json'])
self.assertEqual(first_build_params['builder_name'], 'builder2')
self.assertEqual(first_build_params['properties']['master'], 'master2')
self.mock(git_cl, '_buildbucket_retry', _buildbucket_retry)
self.mock(git_cl.urllib2, 'urlopen', lambda _: StringIO.StringIO(
json.dumps({'builder1': ['master1'], 'builder2': ['master2']})))
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.assertEqual(
0, git_cl.main(['try', '-b', 'builder1', '-b', 'builder2']))
self.assertEqual(
git_cl.sys.stdout.getvalue(),
'Tried jobs on:\n'
'Bucket: master.master1\n'
' builder1: []\n'
'Bucket: master.master2\n'
' builder2: []\n'
'To see results here, run: git cl try-results\n'
'To see results in browser, run: git cl web\n')
def _common_GerritCommitMsgHookCheck(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.os.path, 'abspath',
lambda path: self._mocked_call(['abspath', path]))
self.mock(git_cl.os.path, 'exists',
lambda path: self._mocked_call(['exists', path]))
self.mock(git_cl.gclient_utils, 'FileRead',
lambda path: self._mocked_call(['FileRead', path]))
self.mock(git_cl.gclient_utils, 'rm_file_or_tree',
lambda path: self._mocked_call(['rm_file_or_tree', path]))
self.calls = [
((['git', 'rev-parse', '--show-cdup'],), '../'),
((['abspath', '../'],), '/abs/git_repo_root'),
]
return git_cl.Changelist(codereview='gerrit', issue=123)
def test_GerritCommitMsgHookCheck_custom_hook(self):
cl = self._common_GerritCommitMsgHookCheck()
self.calls += [
((['exists', '/abs/git_repo_root/.git/hooks/commit-msg'],), True),
((['FileRead', '/abs/git_repo_root/.git/hooks/commit-msg'],),
'#!/bin/sh\necho "custom hook"')
]
cl._codereview_impl._GerritCommitMsgHookCheck(offer_removal=True)
def test_GerritCommitMsgHookCheck_not_exists(self):
cl = self._common_GerritCommitMsgHookCheck()
self.calls += [
((['exists', '/abs/git_repo_root/.git/hooks/commit-msg'],), False),
]
cl._codereview_impl._GerritCommitMsgHookCheck(offer_removal=True)
def test_GerritCommitMsgHookCheck(self):
cl = self._common_GerritCommitMsgHookCheck()
self.calls += [
((['exists', '/abs/git_repo_root/.git/hooks/commit-msg'],), True),
((['FileRead', '/abs/git_repo_root/.git/hooks/commit-msg'],),
'...\n# From Gerrit Code Review\n...\nadd_ChangeId()\n'),
(('Do you want to remove it now? [Yes/No]',), 'Yes'),
((['rm_file_or_tree', '/abs/git_repo_root/.git/hooks/commit-msg'],),
''),
]
cl._codereview_impl._GerritCommitMsgHookCheck(offer_removal=True)
def test_GerritCmdLand(self):
self.calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritsquashhash'],),
'deadbeaf'),
((['git', 'diff', 'deadbeaf'],), ''), # No diff.
((['git', 'config', 'branch.feature.gerritserver'],),
'chromium-review.googlesource.com'),
]
cl = git_cl.Changelist(issue=123, codereview='gerrit')
cl._codereview_impl._GetChangeDetail = lambda _: {
'labels': {},
'current_revision': 'deadbeaf',
}
cl._codereview_impl._GetChangeCommit = lambda: {
'commit': 'deadbeef',
'web_links': [{'name': 'gerrit',
'url': 'https://git.googlesource.com/test/+/deadbeef'}],
}
cl._codereview_impl.SubmitIssue = lambda wait_for_merge: None
out = StringIO.StringIO()
self.mock(sys, 'stdout', out)
self.assertEqual(0, cl.CMDLand(force=True, bypass_hooks=True, verbose=True))
self.assertRegexpMatches(out.getvalue(), 'Issue.*123 has been submitted')
self.assertRegexpMatches(out.getvalue(), 'Landed as .*deadbeef')
BUILDBUCKET_BUILDS_MAP = {
'9000': {
'id': '9000',
'status': 'STARTED',
'url': 'http://build.cr.org/p/x.y/builders/my-builder/builds/2',
'result_details_json': '{"properties": {}}',
'bucket': 'master.x.y',
'created_by': 'user:[email protected]',
'created_ts': '147200002222000',
'parameters_json': '{"builder_name": "my-builder", "category": ""}',
},
'8000': {
'id': '8000',
'status': 'COMPLETED',
'result': 'FAILURE',
'failure_reason': 'BUILD_FAILURE',
'url': 'http://build.cr.org/p/x.y/builders/my-builder/builds/1',
'result_details_json': '{"properties": {}}',
'bucket': 'master.x.y',
'created_by': 'user:[email protected]',
'created_ts': '147200001111000',
'parameters_json': '{"builder_name": "my-builder", "category": ""}',
},
}
def test_write_try_results_json(self):
expected_output = [
{
'buildbucket_id': '8000',
'bucket': 'master.x.y',
'builder_name': 'my-builder',
'status': 'COMPLETED',
'result': 'FAILURE',
'failure_reason': 'BUILD_FAILURE',
'url': 'http://build.cr.org/p/x.y/builders/my-builder/builds/1',
},
{
'buildbucket_id': '9000',
'bucket': 'master.x.y',
'builder_name': 'my-builder',
'status': 'STARTED',
'result': None,
'failure_reason': None,
'url': 'http://build.cr.org/p/x.y/builders/my-builder/builds/2',
}
]
self.calls = [(('write_json', 'output.json', expected_output), '')]
git_cl.write_try_results_json('output.json', self.BUILDBUCKET_BUILDS_MAP)
def _setup_fetch_try_jobs(self, most_recent_patchset=20001):
out = StringIO.StringIO()
self.mock(sys, 'stdout', out)
self.mock(git_cl.Changelist, 'GetMostRecentPatchset',
lambda *args: most_recent_patchset)
self.mock(git_cl.auth, 'get_authenticator_for_host', lambda host, _cfg:
self._mocked_call(['get_authenticator_for_host', host]))
self.mock(git_cl, '_buildbucket_retry', lambda *_, **__:
self._mocked_call(['_buildbucket_retry']))
def _setup_fetch_try_jobs_rietveld(self, *request_results):
self._setup_fetch_try_jobs(most_recent_patchset=20001)
self.calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.rietveldissue'],), '1'),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.server'],), 'codereview.example.com'),
((['git', 'config', 'branch.feature.rietveldpatchset'],), '20001'),
((['git', 'config', 'branch.feature.rietveldserver'],),
'codereview.example.com'),
((['get_authenticator_for_host', 'codereview.example.com'],),
AuthenticatorMock()),
] + [((['_buildbucket_retry'],), r) for r in request_results]
def test_fetch_try_jobs_none_rietveld(self):
self._setup_fetch_try_jobs_rietveld({})
# Simulate that user isn't logged in.
self.mock(AuthenticatorMock, 'has_cached_credentials', lambda _: False)
self.assertEqual(0, git_cl.main(['try-results']))
self.assertRegexpMatches(sys.stdout.getvalue(),
'Warning: Some results might be missing')
self.assertRegexpMatches(sys.stdout.getvalue(), 'No try jobs')
def test_fetch_try_jobs_some_rietveld(self):
self._setup_fetch_try_jobs_rietveld({
'builds': self.BUILDBUCKET_BUILDS_MAP.values(),
})
self.assertEqual(0, git_cl.main(['try-results']))
self.assertRegexpMatches(sys.stdout.getvalue(), '^Failures:')
self.assertRegexpMatches(sys.stdout.getvalue(), 'Started:')
self.assertRegexpMatches(sys.stdout.getvalue(), '2 try jobs')
def _setup_fetch_try_jobs_gerrit(self, *request_results):
self._setup_fetch_try_jobs(most_recent_patchset=13)
self.calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.rietveldissue'],), CERR1),
((['git', 'config', 'branch.feature.gerritissue'],), '1'),
# TODO(tandrii): Uncomment the below if we decide to support checking
# patchsets for Gerrit.
# Simulate that Gerrit has more patchsets than local.
# ((['git', 'config', 'branch.feature.gerritpatchset'],), '12'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://x-review.googlesource.com'),
((['get_authenticator_for_host', 'x-review.googlesource.com'],),
AuthenticatorMock()),
] + [((['_buildbucket_retry'],), r) for r in request_results]
def test_fetch_try_jobs_none_gerrit(self):
self._setup_fetch_try_jobs_gerrit({})
self.assertEqual(0, git_cl.main(['try-results']))
# TODO(tandrii): Uncomment the below if we decide to support checking
# patchsets for Gerrit.
# self.assertRegexpMatches(
# sys.stdout.getvalue(),
# r'Warning: Codereview server has newer patchsets \(13\)')
self.assertRegexpMatches(sys.stdout.getvalue(), 'No try jobs')
def test_fetch_try_jobs_some_gerrit(self):
self._setup_fetch_try_jobs_gerrit({
'builds': self.BUILDBUCKET_BUILDS_MAP.values(),
})
# TODO(tandrii): Uncomment the below if we decide to support checking
# patchsets for Gerrit.
# self.calls.remove(
# ((['git', 'config', 'branch.feature.gerritpatchset'],), '12'))
self.assertEqual(0, git_cl.main(['try-results', '--patchset', '5']))
# ... and doesn't result in warning.
self.assertNotRegexpMatches(sys.stdout.getvalue(), 'Warning')
self.assertRegexpMatches(sys.stdout.getvalue(), '^Failures:')
self.assertRegexpMatches(sys.stdout.getvalue(), 'Started:')
self.assertRegexpMatches(sys.stdout.getvalue(), '2 try jobs')
if __name__ == '__main__':
git_cl.logging.basicConfig(
level=git_cl.logging.DEBUG if '-v' in sys.argv else git_cl.logging.ERROR)
unittest.main()
|
the-stack_0_20645 | import polyomino as mino
import unittest
import cProfile
class TestPolyomino(unittest.TestCase):
def setUp(self):
# Test is this guy:
# [][][]
# []
self.test_mino = mino.Polyomino([(0,0), (0,1), (0,2), (1,0)])
def test_rotations_reflections(self):
# Check rotations
rot_left = mino.Polyomino([(0,0),(1,0),(2,0),(2,1)])
rot_half = mino.Polyomino([(0,2),(1,0),(1,1),(1,2)])
rot_right = mino.Polyomino([(0,0),(0,1),(1,1),(2,1)])
self.assertEqual(rot_left, self.test_mino.rotate_left())
self.assertEqual(rot_half, self.test_mino.rotate_half())
self.assertEqual(rot_right, self.test_mino.rotate_right())
# Test rotations()
self.assertEqual({self.test_mino, rot_left, rot_half, rot_right},
set(self.test_mino.rotations()),
msg="The rotations() function doesn't work.")
# Check reflections
ref_horiz = mino.Polyomino([(0,0),(0,1),(0,2),(1,2)])
ref_vert = mino.Polyomino([(0,0),(1,0),(1,1),(1,2)])
ref_diag = mino.Polyomino([(0,0),(0,1),(1,0),(2,0)])
ref_skew = mino.Polyomino([(0,1),(1,1),(2,0),(2,1)])
self.assertEqual(ref_vert, self.test_mino.reflect_vert())
self.assertEqual(ref_horiz, self.test_mino.reflect_horiz())
self.assertEqual(ref_diag, self.test_mino.reflect_diag())
self.assertEqual(ref_skew, self.test_mino.reflect_skew())
# Test transforms()
self.assertEqual({self.test_mino, rot_left, rot_half, rot_right,
ref_horiz, ref_vert, ref_diag, ref_skew},
set(self.test_mino.transforms()),
msg="transforms() doesn't work.")
def test_shape(self):
h_exp, w_exp = expected = (2, 3)
self.assertEqual(expected, self.test_mino.shape,
msg="Wrong shape")
self.assertEqual(h_exp, self.test_mino.height,
msg="Wrong height")
self.assertEqual(w_exp, self.test_mino.width,
msg="Wrong width")
def test_grid(self):
expected = [[True, True, True],[True, False, False]]
self.assertEqual(expected, self.test_mino.grid(),
msg="Wrong grid representation")
def test_str(self):
pstr = str(self.test_mino)
expected = "[][][]\n[] "
self.assertEqual(expected, pstr,
msg="Wrong repr: {0}".format(pstr))
def test_generate(self):
# Check sizes of the generation
sizes_fixed = [0, 1, 2, 6, 19, 63, 216, 760, 2725, 9910]
sizes_onesided = [0, 1, 1, 2, 7, 18, 60, 196, 704, 2500]
sizes_free = [0, 1, 1, 2, 5, 12, 35, 108, 369, 1285]
for i in range(10):
minos = mino.generate(i)
minos_onesided = mino.one_sided(minos)
minos_free = mino.free(minos)
self.assertEqual(sizes_fixed[i], len(minos),
msg="Wrong fixed size for n={0}".format(i))
self.assertEqual(sizes_onesided[i], len(minos_onesided),
msg="Wrong onesided size for n={0}".format(i))
self.assertEqual(sizes_free[i], len(minos_free),
msg="Wrong free size for n={0}".format(i))
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestPolyomino)
unittest.TextTestRunner(verbosity=2).run(suite)
|
the-stack_0_20646 | import numpy as np
import pytest
from sunkit_instruments import suvi
# Test files are all remote data.
pytestmark = pytest.mark.remote_data
def test_suvi_despiking_fits(L1B_FITS):
_, l1b_fits_data, _ = suvi.read_suvi(
L1B_FITS,
)
despiked_l1b_fits_data = l1b_fits_data
despiked_l1b_fits_data = suvi.despike_l1b_file(L1B_FITS)
assert not np.array_equal(l1b_fits_data, despiked_l1b_fits_data)
def test_suvi_despiking_nc(L1B_NC):
_, l1b_nc_data, _ = suvi.read_suvi(L1B_NC)
despiked_l1b_nc_data = l1b_nc_data
despiked_l1b_nc_data = suvi.despike_l1b_file(L1B_NC)
assert not np.array_equal(l1b_nc_data, despiked_l1b_nc_data)
def test_get_response_nc(L1B_NC):
l1b_nc_response = suvi.get_response(L1B_NC)
assert l1b_nc_response["wavelength_channel"] == 171
def test_get_response_fits(L1B_FITS):
l1b_fits_response = suvi.get_response(L1B_FITS)
assert l1b_fits_response["wavelength_channel"] == 171
def test_get_response_wavelength():
response_195 = suvi.get_response(195)
assert response_195["wavelength_channel"] == 195
|
the-stack_0_20648 | # model settings
det_thr = 0.8
rec_thr = 0.3
model = dict(
type='MaskTextSpotter',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=64,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=64,
feat_channels=64,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHeadWithText',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=64,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=64,
fc_out_channels=512,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=64,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=64,
conv_out_channels=64,
num_classes=1,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
text_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=28, sampling_ratio=0),
out_channels=64,
featmap_strides=[4, 8, 16, 32]),
text_head=dict(
type='TextRecognitionHeadAttention',
input_feature_size=[28, 28],
encoder_dim_input=64,
encoder_dim_internal=256,
encoder_num_layers=3,
decoder_input_feature_size=[28, 28],
decoder_max_seq_len=28,
decoder_vocab_size=38,
decoder_dim_hidden=256,
decoder_sos_index=0,
decoder_rnn_type='GRU',
dropout_ratio=0.5
),
text_thr=rec_thr),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=True,
ignore_iof_thr=-1),
text_assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.99,
neg_iou_thr=0.5,
min_pos_iou=0.99,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
text_sampler=dict(
type='RandomSampler',
num=128,
pos_fraction=1.0,
neg_pos_ub=-1,
add_gt_as_proposals=True),
area_per_symbol_thr = 25,
mask_size=28,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_across_levels=False,
nms_pre=300,
nms_post=300,
max_num=300,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=det_thr,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5),
score_thr=det_thr))
dataset_type = 'CocoWithTextDataset'
data_root = 'data'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
albu_train_transforms = [
dict(
type='RandomRotate90and270',
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.1, 0.3],
contrast_limit=[0.1, 0.3],
p=0.2),
dict(
type='OneOf',
transforms=[
dict(
type='RGBShift',
r_shift_limit=10,
g_shift_limit=10,
b_shift_limit=10,
p=1.0),
dict(
type='HueSaturationValue',
hue_shift_limit=20,
sat_shift_limit=30,
val_shift_limit=20,
p=1.0)
],
p=0.1),
dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2),
dict(type='ChannelShuffle', p=0.1),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
]
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True, with_text=True),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_masks': 'masks',
'gt_bboxes': 'bboxes',
'gt_texts': 'texts'
},
update_pad_shape=False,
skip_img_without_anno=True),
dict(
type='Resize',
img_scale=[(1280, 768), (1280, 768 + 64), (1280, 768 - 64), (1280 + 64, 768), (1280 - 64, 768),
(1216, 704), (1216 - 64, 704), (1216, 704 - 64), (1216-64, 704 - 64)],
multiscale_mode='value',
keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_texts']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1280, 768),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=3,
train=dict(
type='RepeatDataset',
times=6,
dataset=dict(
type=dataset_type,
ann_file=[data_root + 'openimages_v5_train_1_2_f.json',
data_root + 'dataset_train_wo_tests_ic13_ic15_tt.json'],
img_prefix=[data_root, data_root],
classes=('text', ),
min_size=0,
max_texts_num=150,
pipeline=train_pipeline)
),
val=dict(
type=dataset_type,
ann_file=data_root + 'icdar2015_test.json',
img_prefix=data_root,
classes=('text',),
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'icdar2015_test.json',
img_prefix=data_root,
classes=('text',),
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm', 'f1', 'word_spotting'])
# optimizer
optimizer = dict(
type='SGD',
lr=0.02,
momentum=0.9,
weight_decay=0.0001)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='CosineAnnealing',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
min_lr=0.00001)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
checkpoint_config = dict(interval=1)
# yapf:disable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
total_epochs = 25
device_ids = range(4)
lexicon_mapping = 'lexicons/ic15/GenericVocabulary_pair_list.txt'
lexicon = 'lexicons/ic15/GenericVocabulary_new.txt'
|
the-stack_0_20649 | # Copyright (c) 2021 Oleg Polakow. All rights reserved.
# This code is licensed under Apache 2.0 with Commons Clause license (see LICENSE.md for details)
"""Utilities for working with dates and time."""
import copy
from datetime import datetime, timezone, timedelta, tzinfo, time
import dateparser
import numpy as np
import pandas as pd
import pytz
from vectorbt import _typing as tp
DatetimeIndexes = (pd.DatetimeIndex, pd.TimedeltaIndex, pd.PeriodIndex)
def freq_to_timedelta(arg: tp.FrequencyLike) -> pd.Timedelta:
"""`pd.to_timedelta` that uses unit abbreviation with number."""
if isinstance(arg, str) and not arg[0].isdigit():
# Otherwise "ValueError: unit abbreviation w/o a number"
return pd.Timedelta(1, unit=arg)
return pd.Timedelta(arg)
def get_utc_tz() -> timezone:
"""Get UTC timezone."""
return timezone.utc
def get_local_tz() -> timezone:
"""Get local timezone."""
return timezone(datetime.now(timezone.utc).astimezone().utcoffset())
def convert_tzaware_time(t: time, tz_out: tp.Optional[tzinfo]) -> time:
"""Return as non-naive time.
`datetime.time` should have `tzinfo` set."""
return datetime.combine(datetime.today(), t).astimezone(tz_out).timetz()
def tzaware_to_naive_time(t: time, tz_out: tp.Optional[tzinfo]) -> time:
"""Return as naive time.
`datetime.time` should have `tzinfo` set."""
return datetime.combine(datetime.today(), t).astimezone(tz_out).time()
def naive_to_tzaware_time(t: time, tz_out: tp.Optional[tzinfo]) -> time:
"""Return as non-naive time.
`datetime.time` should not have `tzinfo` set."""
return datetime.combine(datetime.today(), t).astimezone(tz_out).time().replace(tzinfo=tz_out)
def convert_naive_time(t: time, tz_out: tp.Optional[tzinfo]) -> time:
"""Return as naive time.
`datetime.time` should not have `tzinfo` set."""
return datetime.combine(datetime.today(), t).astimezone(tz_out).time()
def is_tz_aware(dt: tp.SupportsTZInfo) -> bool:
"""Whether datetime is timezone-aware."""
tz = dt.tzinfo
if tz is None:
return False
return tz.utcoffset(datetime.now()) is not None
def to_timezone(tz: tp.TimezoneLike, to_py_timezone: tp.Optional[bool] = None, **kwargs) -> tzinfo:
"""Parse the timezone.
Strings are parsed by `pytz` and `dateparser`, while integers and floats are treated as hour offsets.
If the timezone object can't be checked for equality based on its properties,
it's automatically converted to `datetime.timezone`.
If `to_py_timezone` is set to True, will convert to `datetime.timezone`.
`**kwargs` are passed to `dateparser.parse`."""
from vectorbt._settings import settings
datetime_cfg = settings['datetime']
if tz is None:
return get_local_tz()
if to_py_timezone is None:
to_py_timezone = datetime_cfg['to_py_timezone']
if isinstance(tz, str):
try:
tz = pytz.timezone(tz)
except pytz.UnknownTimeZoneError:
dt = dateparser.parse('now %s' % tz, **kwargs)
if dt is not None:
tz = dt.tzinfo
if isinstance(tz, (int, float)):
tz = timezone(timedelta(hours=tz))
if isinstance(tz, timedelta):
tz = timezone(tz)
if isinstance(tz, tzinfo):
if to_py_timezone or tz != copy.copy(tz):
return timezone(tz.utcoffset(datetime.now()))
return tz
raise TypeError("Couldn't parse the timezone")
def to_tzaware_datetime(dt_like: tp.DatetimeLike,
naive_tz: tp.Optional[tp.TimezoneLike] = None,
tz: tp.Optional[tp.TimezoneLike] = None,
**kwargs) -> datetime:
"""Parse the datetime as a timezone-aware `datetime.datetime`.
See [dateparser docs](http://dateparser.readthedocs.io/en/latest/) for valid string formats and `**kwargs`.
Raw timestamps are localized to UTC, while naive datetime is localized to `naive_tz`.
Set `naive_tz` to None to use the default value defined under `datetime` settings
in `vectorbt._settings.settings`.
To explicitly convert the datetime to a timezone, use `tz` (uses `to_timezone`)."""
from vectorbt._settings import settings
datetime_cfg = settings['datetime']
if naive_tz is None:
naive_tz = datetime_cfg['naive_tz']
if isinstance(dt_like, float):
dt = datetime.fromtimestamp(dt_like, timezone.utc)
elif isinstance(dt_like, int):
if len(str(dt_like)) > 10:
dt = datetime.fromtimestamp(dt_like / 10 ** (len(str(dt_like)) - 10), timezone.utc)
else:
dt = datetime.fromtimestamp(dt_like, timezone.utc)
elif isinstance(dt_like, str):
dt = dateparser.parse(dt_like, **kwargs)
elif isinstance(dt_like, pd.Timestamp):
dt = dt_like.to_pydatetime()
elif isinstance(dt_like, np.datetime64):
dt = datetime.combine(dt_like.astype(datetime), time())
else:
dt = dt_like
if dt is None:
raise ValueError("Couldn't parse the datetime")
if not is_tz_aware(dt):
dt = dt.replace(tzinfo=to_timezone(naive_tz))
else:
dt = dt.replace(tzinfo=to_timezone(dt.tzinfo))
if tz is not None:
dt = dt.astimezone(to_timezone(tz))
return dt
def datetime_to_ms(dt: datetime) -> int:
"""Convert a datetime to milliseconds."""
epoch = datetime.fromtimestamp(0, dt.tzinfo)
return int((dt - epoch).total_seconds() * 1000.0)
def interval_to_ms(interval: str) -> tp.Optional[int]:
"""Convert an interval string to milliseconds."""
seconds_per_unit = {
"m": 60,
"h": 60 * 60,
"d": 24 * 60 * 60,
"w": 7 * 24 * 60 * 60,
}
try:
return int(interval[:-1]) * seconds_per_unit[interval[-1]] * 1000
except (ValueError, KeyError):
return None
|
the-stack_0_20654 | from __future__ import print_function
import os
import sys
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
from keras.models import Sequential, load_model, Model
from keras.layers import Input, Dense, Conv1D, Conv2D, Dropout, Activation, concatenate, LocallyConnected2D
from keras.layers import MaxPooling1D, MaxPooling2D, Flatten, Merge, LSTM, noise, Reshape, Add, Lambda
from keras.callbacks import ModelCheckpoint
from keras.regularizers import l2
from keras.utils import np_utils, plot_model
from keras.layers.normalization import BatchNormalization
import keras
from keras import backend as BK
from keras.optimizers import *
from keras.layers.advanced_activations import LeakyReLU, ELU
import scipy.io as sio
import numpy as np
import h5py
import IPython
import scipy
import random
#GPU configuration
config = BK.tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = BK.tf.Session(config=config)
def corr2(a,b):
k = np.shape(a)
H=k[0]
W=k[1]
c = np.zeros((H,W))
d = np.zeros((H,W))
e = np.zeros((H,W))
#Calculating mean values
AM=np.mean(a)
BM=np.mean(b)
#Calculating terms of the formula
for ii in range(H):
for jj in range(W):
c[ii,jj]=(a[ii,jj]-AM)*(b[ii,jj]-BM)
d[ii,jj]=(a[ii,jj]-AM)**2
e[ii,jj]=(b[ii,jj]-BM)**2
#Formula itself
r = np.sum(c)/float(np.sqrt(np.sum(d)*np.sum(e)))
return r
def corr2_mse_loss(a,b):
a = BK.tf.subtract(a, BK.tf.reduce_mean(a))
b = BK.tf.subtract(b, BK.tf.reduce_mean(b))
tmp1 = BK.tf.reduce_sum(BK.tf.multiply(a,a))
tmp2 = BK.tf.reduce_sum(BK.tf.multiply(b,b))
tmp3 = BK.tf.sqrt(BK.tf.multiply(tmp1,tmp2))
tmp4 = BK.tf.reduce_sum(BK.tf.multiply(a,b))
r = -BK.tf.divide(tmp4,tmp3)
m=BK.tf.reduce_mean(BK.tf.square(BK.tf.subtract(a, b)))
rm=BK.tf.add(r,m)
return rm
####two subjects
print('Loading data and models...')
print('Loading data from subject 123...')
h5f = h5py.File('Data_Decoding_LIJ123_HGaLFP_pitch_Train.h5','r')
f0_train = h5f['f0'][:]
vuv_train = h5f['vuv'][:]
aperiodicity_train = h5f['aperiodicity'][:]
spectrogram_train = h5f['spectrogram'][:]
neu_train_123 = h5f['shft_neural_4d'][:]
h5f.close()
h5f = h5py.File('Data_Decoding_LIJ123_HGaLFP_pitch_Test.h5','r')
f0_val = h5f['f0'][:]
vuv_val = h5f['vuv'][:]
aperiodicity_val = h5f['aperiodicity'][:]
spectrogram_val = h5f['spectrogram'][:]
neu_val_123 = h5f['shft_neural_4d'][:]
h5f.close()
print('Loading data from subject 120...')
h5f = h5py.File('Data_Decoding_LIJ120_HGaLFP_pitch_Train.h5','r')
neu_train_120 = h5f['shft_neural_4d'][:]
h5f.close()
h5f = h5py.File('Data_Decoding_LIJ120_HGaLFP_pitch_Test.h5','r')
neu_val_120 = h5f['shft_neural_4d'][:]
h5f.close()
print('Concatenating neural data...')
index=range(98249,2*98249)
neu_train_120=np.delete(neu_train_120,index,axis=0)
neu_train=np.concatenate((neu_train_120,neu_train_123),axis=2)
neu_val=np.concatenate((neu_val_120,neu_val_123),axis=2)
features_train = np.concatenate((spectrogram_train, aperiodicity_train, f0_train, vuv_train), axis=1)
features_val = np.concatenate((spectrogram_val, aperiodicity_val, f0_val, vuv_val), axis=1)
print('Loading and concatenation done.')
Bottleneck='B256'
print('Coding features...')
Encoder_name='AEC_models/'+Bottleneck+'/Encoder_val.h5'
Decoder_name='AEC_models/'+Bottleneck+'/Decoder_val.h5'
encoder = load_model(Encoder_name, custom_objects={'corr2_mse_loss': corr2_mse_loss})
encoded_train = encoder.predict(features_train)
encoded_val = encoder.predict(features_val)
print('Coding done.')
def save_preds(encoded_preds,loss_history,D_name):
print('Decoding and saving predicted features...')
decoder = load_model(D_name, custom_objects={'corr2_mse_loss': corr2_mse_loss})
decoded_preds = decoder.predict(encoded_preds)
spec=np.power(decoded_preds[0],10)
aper=-np.power(10,decoded_preds[1])+1
f0=np.power(10,decoded_preds[2])-1
vuv=np.round(decoded_preds[3])
sio.savemat('Main_models/'+Bottleneck+'/Main_preds_Val_AEC_LCN_MLP_LIJ120_123.mat', mdict={'spectrogram':spec.T, 'aperiodicity':aper.T, 'f0':f0.T, 'vuv':vuv.T, 'loss': loss_history})
print('Saving done.')
#main network
adam=Adam(lr=.0001)
def build_model(shp_in,shp_out):
reg=.0005
inputs = Input(shape=shp_in)
x = LocallyConnected2D(1, kernel_size=[5, 5], padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(reg))(inputs)
x = Dropout(.2)(LeakyReLU(alpha=.25)(BatchNormalization()(x)))
x = LocallyConnected2D(1, kernel_size=[3, 3], padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(reg))(x)
x = Dropout(.2)(LeakyReLU(alpha=.25)(BatchNormalization()(x)))
x = LocallyConnected2D(2, kernel_size=[1, 1], padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(reg))(x)
x = Dropout(.2)(LeakyReLU(alpha=.25)(BatchNormalization()(x)))
x = Flatten()(x)
x_MLP = Flatten()(inputs)
x_MLP = Dense(256,kernel_initializer='he_normal', kernel_regularizer=l2(reg))(x_MLP)
x_MLP = Dropout(.3)(ELU(alpha=1.0)(BatchNormalization()(x_MLP)))
x_MLP = Dense(256,kernel_initializer='he_normal', kernel_regularizer=l2(reg))(x_MLP)
x_MLP = Dropout(.3)(ELU(alpha=1.0)(BatchNormalization()(x_MLP)))
x = concatenate([x,x_MLP], axis=1)
x = Dense(256,kernel_initializer='he_normal', kernel_regularizer=l2(reg))(x)
x = Dropout(.3)(ELU(alpha=1.0)(BatchNormalization()(x)))
x = Dense(128,kernel_initializer='he_normal', kernel_regularizer=l2(reg))(x)
x = Dropout(.3)(ELU(alpha=1.0)(BatchNormalization()(x)))
x = Dense(shp_out,kernel_initializer='he_normal')(x)
coded_preds = Activation('tanh', name='coded_preds')(x)
model = Model(inputs, coded_preds)
return model
#Inits
adam=Adam(lr=.0001)
config = BK.tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = BK.tf.Session(config=config)
num_iter=150
shp_in=neu_val.shape[1:]
shp_out=encoded_val.shape[1]
loss_history=np.empty((num_iter,2), dtype='float32')
#cnt_lr=0
model=build_model(shp_in,shp_out)
model.compile(loss=corr2_mse_loss, optimizer=adam)
filepath='Main_models/'+Bottleneck+'/Model_Best_Val_AEC_LCN_MLP_LIJ120_123.h5'
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
for j in range(num_iter):
print('#### Iteration:'+str(j+1)+'/'+str(num_iter))
history = model.fit(neu_train, encoded_train, epochs=1, batch_size=256, verbose=1, callbacks=callbacks_list, validation_data=(neu_val, encoded_val))
loss_history[j,0]=history.history['loss'][0]
loss_history[j,1]=history.history['val_loss'][0]
#if i>4 and cnt_lr<2:
# if loss_history[i,j-5,1]<loss_history[i,j,1] and loss_history[i,j-5,1]<loss_history[i,j-1,1] and loss_history[i,j-5,1]<loss_history[i,j-2,1] and loss_history[i,j-5,1]<loss_history[i,j-3,1] and loss_history[i,j-5,1]<loss_history[i,j-4,1]:
# print("########### Validation loss didn't improve after 5 epochs, lr is divided by 2 ############")
# BK.set_value(model.optimizer.lr, .5*BK.get_value(model.optimizer.lr))
# cnt_lr+=1
model.save('Main_models/'+Bottleneck+'/Model_Val_AEC_LCN_MLP_LIJ120_123.h5')
#model.load_weights(filepath)
encoded_preds = model.predict(neu_val)
h5f = h5py.File('Main_models/'+Bottleneck+'/Encoded_Val_AEC_LCN_MLP_LIJ120_123.h5','w')
h5f.create_dataset('encoded_preds', data=encoded_preds)
h5f.close()
save_preds(encoded_preds,loss_history,Decoder_name) |
the-stack_0_20656 | #!/usr/bin/env python
from __future__ import print_function
from datetime import datetime, timedelta
import sys
import operator
import re
def print_time(t):
total_seconds = int(abs(t).total_seconds())
hours, remainder = divmod(total_seconds, 60*60)
minutes, seconds = divmod(remainder, 60)
sign = t.days < 0 and '- ' or ''
print(sign + '{}h {}m {}s'.format(hours, minutes, seconds))
def to_delta(op, time):
time_format = ''
time_format += 'h' in time and '%Hh' or ''
time_format += 'm' in time and '%Mm' or ''
time_format += 's' in time and '%Ss' or ''
try:
date_time = datetime.strptime(time, time_format)
except ValueError:
raise Exception('Invalid input')
exit(1)
time_delta = timedelta(hours=date_time.hour, minutes=date_time.minute, seconds=date_time.second)
return op == '-' and -time_delta or +time_delta
input_string = reduce(operator.concat, sys.argv[1:]).replace(' ', '')
splitted_string = re.split(r'([+,-])', input_string)
time_strings = splitted_string[::2]
time_ops = ['+'] + splitted_string[1::2]
grouped_times = zip(time_ops, time_strings)
deltas = map(lambda t: to_delta(t[0], t[1]), grouped_times)
summed_deltas = reduce(operator.add, deltas)
print_time(summed_deltas)
|
the-stack_0_20658 | import random
input_y = input("Enter WIDTH: ")
input_x = input("Enter HEIGHT: ")
input_rand = input("Enter RANDOM: ")
coord_x = int(input_x)
coord_y = int(input_y)
input_random = int(input_rand)
if input_random > coord_x*coord_y:
raise Exception("I can't do that, Dave! Du Wurst!")
positions = {}
#positions["cat"] = "mau"
#print(positions["cat"])
#if "cat" in positions:
# print("yes")
for i in range(input_random):
zuf_x = random.randint(1,coord_x)-1
zuf_y = random.randint(1,coord_y)-1
poskey = str(zuf_x) + ":" + str(zuf_y)
# print(poskey)
while poskey in positions:
zuf_x = random.randint(1,coord_x)-1
zuf_y = random.randint(1,coord_y)-1
poskey = str(zuf_x) + ":" + str(zuf_y)
# print(poskey)
positions[poskey] = 1
final=""
for x in range(coord_x):
for y in range(coord_y):
poskey = str(x) + ":" + str(y)
if poskey in positions:
final += "*"
else:
final += "#"
final += "\n"
print(final.strip())
exit()
#falsch!!!!
for poskey,posvalue in positions.items():
str_x, str_y = poskey.split(":")
zuf_x = int(str_x)
zuf_y = int(str_y)
for x in range(coord_x):
if x != zuf_x:
print("#"*coord_x)
else:
for y in range(coord_y):
if y != zuf_y:
final = final + "#"
else:
final = final + "*"
print(final)
final = ""
|
the-stack_0_20660 | from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x >= 0) -> pc' = 1
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 -> pc' = 2
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
# pc = 2 -> same_x & y' = y + 1
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_20), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x0", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GT(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, y), i_1)))
loc1 = Location(env, mgr.GE(x, i_2))
loc1.set_progress(2, mgr.LT(x_x, mgr.Times(m_1, x, x)))
loc2 = Location(env, mgr.LE(x, i_4))
loc2.set_progress(0, mgr.GE(x_x, mgr.Div(x, x)))
h_x = Hint("h_x7", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(y, y)))
loc1 = Location(env, mgr.GE(y, i_0))
loc1.set_progress(0, mgr.GE(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y6", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
return frozenset(res)
|
the-stack_0_20663 | """Session provider for interaction with Renault servers."""
import asyncio
import logging
from typing import Any
from typing import Dict
from typing import Optional
import aiohttp
from . import gigya
from . import kamereon
from .const import CONF_COUNTRY
from .const import CONF_GIGYA_APIKEY
from .const import CONF_GIGYA_URL
from .const import CONF_KAMEREON_APIKEY
from .const import CONF_KAMEREON_URL
from .const import CONF_LOCALE
from .credential import Credential
from .credential import JWTCredential
from .credential_store import CredentialStore
from .exceptions import NotAuthenticatedException
from .exceptions import RenaultException
from .gigya.exceptions import GigyaResponseException
from .kamereon import models
from renault_api.helpers import get_api_keys
_LOGGER = logging.getLogger(__name__)
class RenaultSession:
"""Renault session for interaction with Renault servers."""
def __init__(
self,
websession: aiohttp.ClientSession,
locale: Optional[str] = None,
country: Optional[str] = None,
locale_details: Optional[Dict[str, str]] = None,
credential_store: Optional[CredentialStore] = None,
) -> None:
"""Initialise RenaultSession."""
self._gigya_lock = asyncio.Lock()
self._websession = websession
self._credentials: CredentialStore = credential_store or CredentialStore()
if locale_details:
for k, v in locale_details.items():
self._credentials[k] = Credential(v)
if locale:
self._credentials[CONF_LOCALE] = Credential(locale)
if country:
self._credentials[CONF_COUNTRY] = Credential(country)
async def login(self, login_id: str, password: str) -> None:
"""Attempt login on Gigya."""
self._credentials.clear_keys(gigya.GIGYA_KEYS)
response = await gigya.login(
self._websession,
await self._get_gigya_root_url(),
await self._get_gigya_api_key(),
login_id,
password,
)
credential = Credential(response.get_session_cookie())
self._credentials[gigya.GIGYA_LOGIN_TOKEN] = credential
async def _get_credential(self, key: str) -> str:
"""Get specified credential, or raise RenaultException."""
if key not in self._credentials:
if CONF_LOCALE in self._credentials:
await self._update_from_locale()
value = self._credentials.get_value(key)
if value:
return value
if key == gigya.GIGYA_LOGIN_TOKEN:
raise NotAuthenticatedException("Gigya login token not available.")
raise RenaultException(f"Credential `{key}` not found in credential cache.")
async def _update_from_locale(self) -> None:
"""Update all missing setting based on locale."""
locale = await self._get_credential(CONF_LOCALE)
if CONF_COUNTRY not in self._credentials:
self._credentials[CONF_COUNTRY] = Credential(locale[-2:])
locale_details = await get_api_keys(locale=locale, websession=self._websession)
for k, v in locale_details.items():
if k not in self._credentials:
self._credentials[k] = Credential(v)
async def _get_country(self) -> str:
"""Get country from credential store."""
return await self._get_credential(CONF_COUNTRY)
async def _get_kamereon_api_key(self) -> str:
"""Get Kamereon api-key from credential store."""
return await self._get_credential(CONF_KAMEREON_APIKEY)
async def _get_kamereon_root_url(self) -> str:
"""Get Kamereon root url from credential store."""
return await self._get_credential(CONF_KAMEREON_URL)
async def _get_gigya_api_key(self) -> str:
"""Get Gigya api-key from credential store."""
return await self._get_credential(CONF_GIGYA_APIKEY)
async def _get_gigya_root_url(self) -> str:
"""Get Gigya root url from credential store."""
return await self._get_credential(CONF_GIGYA_URL)
async def _get_login_token(self) -> str:
"""Get current login token from credential store."""
return await self._get_credential(gigya.GIGYA_LOGIN_TOKEN)
async def _get_person_id(self) -> str:
"""Get person id from credential store or from Gigya."""
async with self._gigya_lock:
person_id = self._credentials.get_value(gigya.GIGYA_PERSON_ID)
if person_id:
return person_id
login_token = await self._get_login_token()
response = await gigya.get_account_info(
self._websession,
await self._get_gigya_root_url(),
await self._get_gigya_api_key(),
login_token,
)
person_id = response.get_person_id()
self._credentials[gigya.GIGYA_PERSON_ID] = Credential(person_id)
return person_id
async def _get_jwt(self) -> str:
"""Get json web token from credential store or from Gigya.."""
async with self._gigya_lock:
jwt = self._credentials.get_value(gigya.GIGYA_JWT)
if jwt:
return jwt
login_token = await self._get_login_token()
try:
response = await gigya.get_jwt(
self._websession,
await self._get_gigya_root_url(),
await self._get_gigya_api_key(),
login_token,
)
except GigyaResponseException as exc:
if exc.error_code in [403005, 403013]: # pragma: no branch
self._credentials.clear_keys(gigya.GIGYA_KEYS)
raise NotAuthenticatedException("Authentication expired.") from exc
else:
jwt = response.get_jwt()
self._credentials[gigya.GIGYA_JWT] = JWTCredential(jwt)
return jwt
async def get_person(self) -> models.KamereonPersonResponse:
"""GET to /persons/{person_id}."""
return await kamereon.get_person(
websession=self._websession,
root_url=await self._get_kamereon_root_url(),
api_key=await self._get_kamereon_api_key(),
gigya_jwt=await self._get_jwt(),
country=await self._get_country(),
person_id=await self._get_person_id(),
)
async def get_account_vehicles(
self, account_id: str
) -> models.KamereonVehiclesResponse:
"""GET to /accounts/{account_id}/vehicles."""
return await kamereon.get_account_vehicles(
websession=self._websession,
root_url=await self._get_kamereon_root_url(),
api_key=await self._get_kamereon_api_key(),
gigya_jwt=await self._get_jwt(),
country=await self._get_country(),
account_id=account_id,
)
async def get_vehicle_data(
self,
account_id: str,
vin: str,
endpoint: str,
params: Optional[Dict[str, str]] = None,
) -> models.KamereonVehicleDataResponse:
"""GET to /v{endpoint_version}/cars/{vin}/{endpoint}."""
return await kamereon.get_vehicle_data(
websession=self._websession,
root_url=await self._get_kamereon_root_url(),
api_key=await self._get_kamereon_api_key(),
gigya_jwt=await self._get_jwt(),
country=await self._get_country(),
account_id=account_id,
vin=vin,
endpoint=endpoint,
params=params,
)
async def set_vehicle_action(
self,
account_id: str,
vin: str,
endpoint: str,
attributes: Dict[str, Any],
) -> models.KamereonVehicleDataResponse:
"""POST to /v{endpoint_version}/cars/{vin}/actions/{endpoint}."""
return await kamereon.set_vehicle_action(
websession=self._websession,
root_url=await self._get_kamereon_root_url(),
api_key=await self._get_kamereon_api_key(),
gigya_jwt=await self._get_jwt(),
country=await self._get_country(),
account_id=account_id,
vin=vin,
endpoint=endpoint,
attributes=attributes,
)
|
the-stack_0_20664 | import numpy as np
from typing import Tuple
import sys
sys.path.append("./")
# ! IMPLEMENT CONSISTENCY FOR NORM SUCH THAT IT ALWAYS STAYS ON THE SAME SIDE
np.seterr(all="ignore")
from geometry.vector import Vector
def compute_vectors(
x: np.ndarray, y: np.ndarray, fps: int = 1
) -> Tuple[Vector, np.ndarray]:
"""
Given the X and Y position at each frame -
Compute vectors:
i. velocity vector
ii. unit tangent
iii. unit norm
iv. acceleration
and scalar quantities:
i. speed
ii. curvature
See: https://stackoverflow.com/questions/28269379/curve-curvature-in-numpy
"""
# compute velocity vector
dx_dt = np.gradient(x)
dy_dt = np.gradient(y)
velocity = (
np.array([[dx_dt[i], dy_dt[i]] for i in range(dx_dt.size)]) * fps
)
# compute scalr speed vector
ds_dt = np.sqrt(dx_dt * dx_dt + dy_dt * dy_dt)
# get unit tangent vector
tangent = np.array([1 / ds_dt] * 2).transpose() * velocity
# get unit normal vector
tangent_x = tangent[:, 0]
tangent_y = tangent[:, 1]
deriv_tangent_x = np.gradient(tangent_x)
deriv_tangent_y = np.gradient(tangent_y)
dT_dt = np.array(
[
[deriv_tangent_x[i], deriv_tangent_y[i]]
for i in range(deriv_tangent_x.size)
]
)
length_dT_dt = np.sqrt(
deriv_tangent_x * deriv_tangent_x + deriv_tangent_y * deriv_tangent_y
)
normal = np.array([1 / length_dT_dt] * 2).transpose() * dT_dt
# get acceleration and curvature
d2s_dt2 = np.gradient(ds_dt)
d2x_dt2 = np.gradient(dx_dt)
d2y_dt2 = np.gradient(dy_dt)
curvature = (
np.abs(d2x_dt2 * dy_dt - dx_dt * d2y_dt2)
/ (dx_dt * dx_dt + dy_dt * dy_dt) ** 1.5
)
t_component = np.array([d2s_dt2] * 2).transpose()
n_component = np.array([curvature * ds_dt * ds_dt] * 2).transpose()
acceleration = t_component * tangent + n_component * normal
return (
Vector(velocity),
Vector(tangent),
Vector(normal),
Vector(acceleration),
ds_dt * fps,
curvature,
)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import sys
sys.path.append("./")
import draw
x = np.linspace(0, 5, 100)
y = np.sin(2 * x)
y[(x > 2) & (x < 2.75)] = -0.75
(
velocity,
tangent,
normal,
acceleration,
speed,
curvature,
) = compute_vectors(x, y)
f, axes = plt.subplots(nrows=2, sharex=False, figsize=(12, 8))
axes[0].scatter(x, y)
draw.Arrows(
x[::7],
y[::7],
tangent.angle[::7],
ax=axes[0],
L=0.25,
color="r",
label="tangent",
)
draw.Arrows(
x[::7],
y[::7],
normal.angle[::7],
ax=axes[0],
L=0.25,
color="g",
label="normal",
)
draw.Arrows(
x[::7],
y[::7],
acceleration.angle[::7],
ax=axes[0],
L=0.25,
color="m",
label="acceleration",
)
axes[1].plot(x, speed, label="speed")
axes[1].plot(x, curvature, label="curvature")
axes[0].legend()
axes[1].legend()
plt.show()
|
the-stack_0_20666 | from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
class RegisteredEmailValidator:
user_model = get_user_model()
code = 'invalid'
def __call__(self, email):
try:
user = self.user_model.objects.get(email=email)
except self.user_model.DoesNotExist:
raise ValidationError('Email not subscribed.', code=self.code)
else:
if user.is_active:
raise ValidationError('You are already authenticated.', code=self.code)
return |
the-stack_0_20668 | #!/usr/bin/python
from __future__ import unicode_literals
# https://stackoverflow.com/questions/19475955/using-django-models-in-external-python-script
from django.core.management.base import BaseCommand, CommandError
import datetime
from django.utils import timezone
from provision.models import Cluster
from qrba import settings
class Command(BaseCommand):
help = "synchronizes cluster path related objects from the integration to the integration cluster"
def handle(self, *args, **options):
qr = Cluster.objects.filter(name=settings.QUMULO_intcluster['name'])
if qr.count() == 0:
intserver = Cluster.objects.create(name=settings.QUMULO_intcluster['name'],
ipaddr=settings.QUMULO_intcluster['ipaddr'],
adminpassword=settings.QUMULO_intcluster['adminpassword'])
intserver.save()
else:
intserver = qr[0]
now = timezone.now() + datetime.timedelta(days=30)
print("now: " + str(now))
print("intserver is: " + str(intserver))
print("calling " + str(intserver) + ".sync_clusterpaths_from_cluster( " + str(intserver) + " ) at " + str(now))
activity = intserver.sync_clusterpaths_from_cluster(intserver)
print(" activity is " + str(activity) + " at " + str(now))
|
the-stack_0_20671 | ##-------------------------------------------------------------------
"""
Crout matrix decomposition is used to find two matrices that, when multiplied
give our input matrix, so L * U = A.
L stands for lower and L has non-zero elements only on diagonal and below.
U stands for upper and U has non-zero elements only on diagonal and above.
This can for example be used to solve systems of linear equations.
The last if is used if to avoid dividing by zero.
Example:
We input the A matrix:
[[1,2,3],
[3,4,5],
[6,7,8]]
We get:
L = [1.0, 0.0, 0.0]
[3.0, -2.0, 0.0]
[6.0, -5.0, 0.0]
U = [1.0, 2.0, 3.0]
[0.0, 1.0, 2.0]
[0.0, 0.0, 1.0]
We can check that L * U = A.
I think the complexity should be O(n^3).
##-------------------------------------------------------------------
"""
def crout_matrix_decomposition(A):
n = len(A)
L = [[0.0] * n for i in range(n)]
U = [[0.0] * n for i in range(n)]
for j in range(n):
U[j][j] = 1.0
for i in range(j, n):
alpha = float(A[i][j])
for k in range(j):
alpha -= L[i][k] * U[k][j]
L[i][j] = float(alpha)
for i in range(j + 1, n):
tempU = float(A[j][i])
for k in range(j):
tempU -= float(L[j][k] * U[k][i])
if int(L[j][j]) == 0:
L[j][j] = float(0.1 ** 40)
U[j][i] = float(tempU / L[j][j])
return (L, U)
|
the-stack_0_20672 | #!/usr/bin/env python
"""
Copyright (c) 2014-2019 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from antivirus.lists_based_engine.update import retrieve_content
from antivirus.lists_based_engine.settings import IP_TYPE
from antivirus.lists_based_engine.settings import PROXY_TYPE
__url__ = "https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/proxyspy_1d.ipset"
__check__ = "proxyspy_1d"
__info__ = "proxy (suspicious)"
__reference__ = "spys.ru"
__type__ = IP_TYPE
__classification__ = PROXY_TYPE
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line] = (__info__, __reference__, __type__, __classification__)
return retval
|
the-stack_0_20673 | """ TVM testing utilities """
import logging
import numpy as np
def assert_allclose(actual, desired, rtol=1e-7, atol=1e-7):
""" Version of np.testing.assert_allclose with `atol` and `rtol` fields set
in reasonable defaults.
Arguments `actual` and `desired` are not interchangable, since the function
compares the `abs(actual-desired)` with `atol+rtol*abs(desired)`. Since we
often allow `desired` to be close to zero, we generally want non-zero `atol`.
"""
np.testing.assert_allclose(actual, desired, rtol=rtol, atol=atol, verbose=True)
def check_numerical_grads(function, input_values, grad_values, function_value=None,
delta=1e-3, atol=1e-2, rtol=0.1):
"""A helper function that checks that numerical gradients of a function are
equal to gradients computed in some different way (analytical gradients).
Numerical gradients are computed using finite difference approximation. To
reduce the number of function evaluations, the number of points used is
gradually increased if the error value is too high (up to 5 points).
Parameters
----------
function
A function that takes inputs either as positional or as keyword
arguments (either `function(*input_values)` or `function(**input_values)`
should be correct) and returns a scalar result. Should accept numpy
ndarrays.
input_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]
A list of values or a dict assigning values to variables. Represents the
point at which gradients should be computed.
grad_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]
Gradients computed using a different method.
function_value : float, optional
Should be equal to `function(**input_values)`.
delta : float, optional
A small number used for numerical computation of partial derivatives.
The default 1e-3 is a good choice for float32.
atol : float, optional
Absolute tolerance. Gets multiplied by `sqrt(n)` where n is the size of a
gradient.
rtol : float, optional
Relative tolerance.
"""
# If input_values is a list then function accepts positional arguments
# In this case transform it to a function taking kwargs of the form {"0": ..., "1": ...}
if not isinstance(input_values, dict):
input_len = len(input_values)
input_values = {str(idx): val for idx, val in enumerate(input_values)}
def _function(_input_len=input_len, _orig_function=function, **kwargs):
return _orig_function(*(kwargs[str(i)] for i in range(input_len)))
function = _function
grad_values = {str(idx): val for idx, val in enumerate(grad_values)}
if function_value is None:
function_value = function(**input_values)
# a helper to modify j-th element of val by a_delta
def modify(val, j, a_delta):
val = val.copy()
val.reshape(-1)[j] = val.reshape(-1)[j] + a_delta
return val
# numerically compute a partial derivative with respect to j-th element of the var `name`
def derivative(x_name, j, a_delta):
modified_values = {n: modify(val, j, a_delta) if n == x_name else val
for n, val in input_values.items()}
return (function(**modified_values) - function_value)/a_delta
def compare_derivative(j, n_der, grad):
der = grad.reshape(-1)[j]
return np.abs(n_der - der) < atol + rtol*np.abs(n_der)
for x_name, grad in grad_values.items():
if grad.shape != input_values[x_name].shape:
raise AssertionError(
"Gradient wrt '{}' has unexpected shape {}, expected {} "
.format(x_name, grad.shape, input_values[x_name].shape))
ngrad = np.zeros_like(grad)
wrong_positions = []
# compute partial derivatives for each position in this variable
for j in range(np.prod(grad.shape)):
# forward difference approximation
nder = derivative(x_name, j, delta)
# if the derivative is not equal to the analytical one, try to use more
# precise and expensive methods
if not compare_derivative(j, nder, grad):
# central difference approximation
nder = (derivative(x_name, j, -delta) + nder)/2
if not compare_derivative(j, nder, grad):
# central difference approximation using h = delta/2
cnder2 = (derivative(x_name, j, delta/2) + derivative(x_name, j, -delta/2))/2
# five-point derivative
nder = (4*cnder2 - nder)/3
# if the derivatives still don't match, add this position to the
# list of wrong positions
if not compare_derivative(j, nder, grad):
wrong_positions.append(np.unravel_index(j, grad.shape))
ngrad.reshape(-1)[j] = nder
wrong_percentage = int(100*len(wrong_positions)/np.prod(grad.shape))
dist = np.sqrt(np.sum((ngrad - grad)**2))
grad_norm = np.sqrt(np.sum(ngrad**2))
if not (np.isfinite(dist) and np.isfinite(grad_norm)):
raise ValueError(
"NaN or infinity detected during numerical gradient checking wrt '{}'\n"
"analytical grad = {}\n numerical grad = {}\n"
.format(x_name, grad, ngrad))
# we multiply atol by this number to make it more universal for different sizes
sqrt_n = np.sqrt(float(np.prod(grad.shape)))
if dist > atol*sqrt_n + rtol*grad_norm:
raise AssertionError(
"Analytical and numerical grads wrt '{}' differ too much\n"
"analytical grad = {}\n numerical grad = {}\n"
"{}% of elements differ, first 10 of wrong positions: {}\n"
"distance > atol*sqrt(n) + rtol*grad_norm\n"
"distance {} > {}*{} + {}*{}"
.format(x_name, grad, ngrad, wrong_percentage, wrong_positions[:10],
dist, atol, sqrt_n, rtol, grad_norm))
max_diff = np.max(np.abs(ngrad - grad))
avg_diff = np.mean(np.abs(ngrad - grad))
logging.info("Numerical grad test wrt '%s' of shape %s passes, "
"dist = %f, max_diff = %f, avg_diff = %f",
x_name, grad.shape, dist, max_diff, avg_diff)
|
the-stack_0_20675 | # Copyright 2019 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from manila.api import common
from oslo_db import exception as db_exception
from oslo_log import log
from six.moves import http_client
import webob
from webob import exc
from manila.api.openstack import wsgi
from manila.api.views import share_network_subnets as subnet_views
from manila.db import api as db_api
from manila import exception
from manila.i18n import _
from manila.share import rpcapi as share_rpcapi
LOG = log.getLogger(__name__)
class ShareNetworkSubnetController(wsgi.Controller):
"""The Share Network Subnet API controller for the OpenStack API."""
resource_name = 'share_network_subnet'
_view_builder_class = subnet_views.ViewBuilder
def __init__(self):
super(ShareNetworkSubnetController, self).__init__()
self.share_rpcapi = share_rpcapi.ShareAPI()
@wsgi.Controller.api_version("2.51")
@wsgi.Controller.authorize
def index(self, req, share_network_id):
"""Returns a list of share network subnets."""
context = req.environ['manila.context']
try:
share_network = db_api.share_network_get(context, share_network_id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
return self._view_builder.build_share_network_subnets(
req, share_network.get('share_network_subnets'))
def _all_share_servers_are_auto_deletable(self, share_network_subnet):
return all([ss['is_auto_deletable'] for ss
in share_network_subnet['share_servers']])
@wsgi.Controller.api_version('2.51')
@wsgi.Controller.authorize
def delete(self, req, share_network_id, share_network_subnet_id):
"""Delete specified share network subnet."""
context = req.environ['manila.context']
try:
db_api.share_network_get(context, share_network_id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
try:
share_network_subnet = db_api.share_network_subnet_get(
context, share_network_subnet_id)
except exception.ShareNetworkSubnetNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
for share_server in share_network_subnet['share_servers'] or []:
shares = db_api.share_instances_get_all_by_share_server(
context, share_server['id'])
if shares:
msg = _("Cannot delete share network subnet %(id)s, it has "
"one or more shares.") % {
'id': share_network_subnet_id}
LOG.warning(msg)
raise exc.HTTPConflict(explanation=msg)
# NOTE(silvacarlose): Do not allow the deletion of any share server
# if any of them has the flag is_auto_deletable = False
if not self._all_share_servers_are_auto_deletable(
share_network_subnet):
msg = _("The service cannot determine if there are any "
"non-managed shares on the share network subnet %(id)s,"
"so it cannot be deleted. Please contact the cloud "
"administrator to rectify.") % {
'id': share_network_subnet_id}
LOG.error(msg)
raise exc.HTTPConflict(explanation=msg)
for share_server in share_network_subnet['share_servers']:
self.share_rpcapi.delete_share_server(context, share_server)
db_api.share_network_subnet_delete(context, share_network_subnet_id)
return webob.Response(status_int=http_client.ACCEPTED)
def _validate_subnet(self, context, share_network_id, az=None):
"""Validate the az for the given subnet.
If az is None, the method will search for an existent default subnet.
In case of a given AZ, validates if there's an existent subnet for it.
"""
msg = ("Another share network subnet was found in the "
"specified availability zone. Only one share network "
"subnet is allowed per availability zone for share "
"network %s." % share_network_id)
if az is None:
default_subnet = db_api.share_network_subnet_get_default_subnet(
context, share_network_id)
if default_subnet is not None:
raise exc.HTTPConflict(explanation=msg)
else:
az_subnet = (
db_api.share_network_subnet_get_by_availability_zone_id(
context, share_network_id, az['id'])
)
# If the 'availability_zone_id' is not None, we found a conflict,
# otherwise we just have found the default subnet
if az_subnet and az_subnet['availability_zone_id']:
raise exc.HTTPConflict(explanation=msg)
@wsgi.Controller.api_version("2.51")
@wsgi.Controller.authorize
def create(self, req, share_network_id, body):
"""Add a new share network subnet into the share network."""
context = req.environ['manila.context']
if not self.is_valid_body(body, 'share-network-subnet'):
msg = _("Share Network Subnet is missing from the request body.")
raise exc.HTTPBadRequest(explanation=msg)
data = body['share-network-subnet']
data['share_network_id'] = share_network_id
common.check_net_id_and_subnet_id(data)
try:
db_api.share_network_get(context, share_network_id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
availability_zone = data.pop('availability_zone', None)
subnet_az = None
if availability_zone:
try:
subnet_az = db_api.availability_zone_get(context,
availability_zone)
except exception.AvailabilityZoneNotFound:
msg = _("The provided availability zone %s does not "
"exist.") % availability_zone
raise exc.HTTPBadRequest(explanation=msg)
self._validate_subnet(context, share_network_id, az=subnet_az)
try:
data['availability_zone_id'] = (
subnet_az['id'] if subnet_az is not None else None)
share_network_subnet = db_api.share_network_subnet_create(
context, data)
except db_exception.DBError as e:
msg = _('Could not create the share network subnet.')
LOG.error(e)
raise exc.HTTPInternalServerError(explanation=msg)
share_network_subnet = db_api.share_network_subnet_get(
context, share_network_subnet['id'])
return self._view_builder.build_share_network_subnet(
req, share_network_subnet)
@wsgi.Controller.api_version('2.51')
@wsgi.Controller.authorize
def show(self, req, share_network_id, share_network_subnet_id):
"""Show share network subnet."""
context = req.environ['manila.context']
try:
db_api.share_network_get(context, share_network_id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
try:
share_network_subnet = db_api.share_network_subnet_get(
context, share_network_subnet_id)
except exception.ShareNetworkSubnetNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
return self._view_builder.build_share_network_subnet(
req, share_network_subnet)
def create_resource():
return wsgi.Resource(ShareNetworkSubnetController())
|
the-stack_0_20676 | from redbot.core import commands, checks, Config
from redbot.core.utils.chat_formatting import box, humanize_list, pagify
import asyncio
import datetime
import dateutil
import dateutil.parser
import discord
import logging
import re
import random
from collections import OrderedDict
#from redbot.core.utils.chat_formatting import box, humanize_list, pagify
class Funbear(commands.Cog):
"""My custom cog"""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, 12345679, force_registration=True)
self.redbear_config = Config.get_conf(self, 12345678, False, "redbear")
default_global = {
}
self.config.register_global(**default_global)
default_guild = {
"member_commands": {},
"embed_role": "",
"usernotes_channel": "",
"skip_channels": {},
"info_channels": {}
}
self.config.register_guild(**default_guild)
default_member = {
"personal_commands": {}
}
self.config.register_member(**default_member)
#default_channel = {
#}
#self.config.register_channel(**default_channel)
self.all_users = dict()
# member_commands, personal_commands, muted_members
@commands.command()
@checks.admin()
async def funsetup(self, ctx, embed_role = ""): #mute_role = "", mod_role = "", usernotes_channel = "", timeout_channel = "", moderator_channel = ""):
# """
# `Makes sure all necessary setup is complete.
# """
await ctx.react_quietly("🐻")
if embed_role == "": #mute_role == "" and mod_role == "" and usernotes_channel == "" and timeout_channel == "" and moderator_channel == "":
guild_data = await self.config.guild(ctx.guild).all()
if not guild_data["embed_role"]:
text = f"`embed_role` is not set.\n"
else:
text = f"`embed_role`: `{guild_data['embed_role']}`\n"
# if not guild_data["moderator_role"]:
# text += f"`moderator_role` is not set.\n"
# else:
# text += f"`moderator_role`: `{guild_data['moderator_role']}`\n"
# if not guild_data["usernotes_channel"]:
# text += f"`usernotes_channel` is not set.\n"
# else:
# text += f"`usernotes_channel`: `{guild_data['usernotes_channel']}`\n"
# if not guild_data["timeout_channel"]:
# text += f"`timeout_channel` is not set.\n"
# else:
# text += f"`timeout_channel`: `{guild_data['timeout_channel']}`\n"
# if not guild_data["moderator_channel"]:
# text += f"`moderator_channel` is not set.\n"
# else:
# text += f"`moderator_channel`: `{guild_data['moderator_channel']}`\n"
text += f"\n```!funsetup embed_role```"
await ctx.send(text)
elif embed_role != "": # and mod_role != "" and usernotes_channel != "" and timeout_channel != "" and moderator_channel != "":
#set the config up
try:
embed_role_actual = get_guild_role(ctx, embed_role) # discord.utils.get(ctx.guild.roles, id=int(mute_role))
except:
pass
if embed_role_actual is None:
await ctx.react_quietly("⚠")
await ctx.send(f"No embed role found with ID `{embed_role}`.")
else:
await self.config.guild(ctx.guild).embed_role.set(embed_role)
await ctx.send(f"Embed role successfully set to ID `{embed_role}`.")
# try:
# mod_role_actual = get_guild_role(ctx, mod_role) #discord.utils.get(ctx.guild.roles, id=int(mod_role))
# except:
# pass
# if mod_role_actual is None:
# await ctx.react_quietly("⚠")
# await ctx.send(f"No mod role found with ID `{mod_role}`.")
# else:
# await self.config.guild(ctx.guild).moderator_role.set(mod_role)
# await ctx.send(f"Mod role successfully set to ID `{mod_role}`.")
# try:
# usernotes_channel_actual = get_guild_channel(self, usernotes_channel) #self.bot.get_channel(int(usernotes_channel))
# except:
# pass
# if usernotes_channel_actual is None:
# await ctx.react_quietly("⚠")
# await ctx.send(f"No usernotes channel found with ID `{usernotes_channel}`.")
# else:
# await self.config.guild(ctx.guild).usernotes_channel.set(usernotes_channel)
# await ctx.send(f"Usernotes channel successfully set to ID `{usernotes_channel}`.")
# try:
# timeout_channel_actual = get_guild_channel(self, timeout_channel) #self.bot.get_channel(int(timeout_channel))
# except:
# pass
# if timeout_channel_actual is None:
# await ctx.react_quietly("⚠")
# await ctx.send(f"No timeout channel channel found with ID `{timeout_channel}`.")
# else:
# await self.config.guild(ctx.guild).timeout_channel.set(timeout_channel)
# await ctx.send(f"Timeout channel successfully set to ID `{timeout_channel}`.")
# try:
# moderator_channel_actual = get_guild_channel(self, moderator_channel) #self.bot.get_channel(int(timeout_channel))
# except:
# pass
# if moderator_channel_actual is None:
# await ctx.react_quietly("⚠")
# await ctx.send(f"No moderator channel channel found with ID `{moderator_channel}`.")
# else:
# await self.config.guild(ctx.guild).moderator_channel.set(moderator_channel)
# await ctx.send(f"Moderator channel successfully set to ID `{moderator_channel}`.")
else:
await ctx.react_quietly("⚠")
await ctx.send(f"Usage: !funsetup `embed_role_id`")
#@commands.command()
#@checks.admin()
#async def skipchannel(self, ctx, addremove = "", channel_id = 0):
# """
# `!skipchannelsetup [add/remove] [channel_id]. The code here is the same as infochannel so update both
# """
# await ctx.react_quietly("🐻")
# if addremove == "add" or addremove == "remove":
# if channel_id > 0:
# channel = self.bot.get_channel(channel_id)
# if channel in ctx.guild.channels:
# async with self.config.guild(ctx.guild).skip_channels() as skipchannels:
# if addremove=="add":
# skipchannels[channel_id] = ""
# await ctx.send(f"`{channel.name}:{channel_id}` added to channels to skip.")
# elif str(channel_id) in skipchannels:
# skipchannels.pop(str(channel_id))
# await ctx.send(f"`{channel.name}:{channel_id}` removed from channels to skip.")
# else:
# await ctx.react_quietly("⚠")
# await ctx.send("Nothing to remove.")
# else:
# await ctx.react_quietly("⚠")
# await ctx.send(f"Channel ID `{channel_id}` doesn't seem to be in this server.")
# else:
# await ctx.react_quietly("⚠")
# await ctx.send(f"A valid `channel_id` must be provided.")
# else:
# await ctx.send(f"Usage: !skipchannel [`\"add\" or \"remove\"`] [`channel_id`]")
# skipped = await self.config.guild(ctx.guild).skip_channels()
# if skipped is not None:
# content = f"The following channels are being skipped for certain commands:\n"
# for key in skipped:
# channel = self.bot.get_channel(int(key))
# content += f"`{channel.name}:{key}` "
# await ctx.send(content)
#@commands.command()
#async def infochannel(self, ctx, addremove = "", channel_id = 0):
# """
# `!infochannel [add/remove] [channel_id] . The code here is the same as skipchannel so update both
# """
# await ctx.react_quietly("🐻")
# if addremove == "add" or addremove == "remove":
# if channel_id > 0:
# channel = self.bot.get_channel(channel_id)
# if channel in ctx.guild.channels:
# async with self.config.guild(ctx.guild).info_channels() as info_channels:
# if addremove=="add":
# info_channels[channel_id] = ""
# await ctx.send(f"`{channel.name}:{channel_id}` added to eligible !userinfo channels.")
# elif str(channel_id) in skipchannels:
# info_channels.pop(str(channel_id))
# await ctx.send(f"`{channel.name}:{channel_id}` removed from eligible !userinfo channels.")
# else:
# await ctx.react_quietly("⚠")
# await ctx.send("Nothing to remove.")
# else:
# await ctx.react_quietly("⚠")
# await ctx.send(f"Channel ID `{channel_id}` doesn't seem to be in this server.")
# else:
# await ctx.react_quietly("⚠")
# await ctx.send(f"A valid `channel_id` must be provided.")
# else:
# await ctx.send(f"Usage: !infochannel [`\"add\" or \"remove\"`] [`channel_id`]")
# infochannels = await self.config.guild(ctx.guild).info_channels()
# if infochannels is not None:
# content = f"The following channels are OK to use !userinfo:\n"
# for key in infochannels:
# channel = self.bot.get_channel(int(key))
# content += f"`{channel.name}:{key}` "
# await ctx.send(content)
@commands.command()
@checks.is_owner()
async def baned(self, ctx): # checked
"""
`baned`: Sends a random baned cat to the message channel.
"""
baned_cats = (
'http://i.imgur.com/Pn4BFLj.jpg', 'http://i.imgur.com/xtmyPBN.jpg',
'http://i.imgur.com/avVmttp.jpg', 'http://i.imgur.com/58wFteM.jpg',
'http://i.imgur.com/UPOqky8.jpg', 'http://i.imgur.com/HOeaLRz.jpg',
'http://i.imgur.com/AKhPIXr.jpg')
await ctx.react_quietly("🐻")
try:
await ctx.send(random.choice(baned_cats))
except Exception as e:
print(f"baned: {e}")
await ctx.react_quietly("⚠")
@commands.command()
async def shitposting(self, ctx):
guild_data = await self.config.guild(ctx.guild).all()
redbear_data = await self.redbear_config.guild(ctx.guild).all()
embed_role = get_guild_role(ctx, guild_data["embed_role"])
moderator_role = get_guild_role(ctx, redbear_data["moderator_role"])
if embed_role in ctx.author.roles or moderator_role in ctx.author.roles:
copypasta_text = get_shitposts(ctx)
await ctx.send('look, until you get your shit together i really don\'t have the time to explain {} to a kid'.format(random.choice(copypasta_text)))
else:
await ctx.react_quietly("🚫")
@commands.command()
async def mod(self, ctx):
"""
`!mod @someone @someoneelse`: Shuffles the characters in members names.
"""
redbear_data = await self.redbear_config.guild(ctx.guild).all()
moderator_role = get_guild_role(ctx, redbear_data["moderator_role"])
usernotes_channel = get_guild_channel(self, redbear_data["usernotes_channel"])
if moderator_role in ctx.author.roles:
await ctx.react_quietly("🐻")
try:
for mentioned_member in ctx.message.mentions:
if moderator_role not in mentioned_member.roles and mentioned_member is not self.bot.user:
nickname = ''.join(random.sample(mentioned_member.display_name, len(mentioned_member.display_name)))
await mentioned_member.edit(nick=nickname)
await usernotes_channel.send(f'`{mentioned_member.name}`:`{mentioned_member.id}` ({mentioned_member.mention})\'s display name was modded by {ctx.author.mention}.\n--{ctx.message.jump_url}')
except Exception as e:
print(f"mod: {e}")
await ctx.react_quietly("⚠")
else:
await ctx.react_quietly("🚫")
@commands.command()
async def unmod(self, ctx): # checked
"""
`!mod @someone @someoneelse`: Shuffles the characters in members names.
"""
redbear_data = await self.redbear_config.guild(ctx.guild).all()
moderator_role = get_guild_role(ctx, redbear_data["moderator_role"])
usernotes_channel = get_guild_channel(self, redbear_data["usernotes_channel"])
if moderator_role in ctx.author.roles:
await ctx.react_quietly("🐻")
try:
for mentioned_member in ctx.message.mentions:
if moderator_role not in mentioned_member.roles and mentioned_member is not self.bot.user:
await mentioned_member.edit(nick=None)
await usernotes_channel.send(f'`{mentioned_member.name}`:`{mentioned_member.id}` ({mentioned_member.mention})\'s display name was unmodded by {ctx.author.mention}.\n--{ctx.message.jump_url}')
except Exception as e:
print(f"unmod: {e}")
await ctx.react_quietly("⚠")
else:
await ctx.react_quietly("🚫")
@commands.Cog.listener()
async def on_message(self, message):
try:
#guild_data = await self.config.guild(message.guild).all()
#mod_role = message.guild.get_role(int(guild_data["moderator_role"])) #no CTX here so get roles the "hard" way
#muted_role = message.guild.get_role(int(guild_data["mute_role"]))
if message.author.bot:
return
except Exception as e:
print(e)
@commands.Cog.listener()
async def on_message_edit(self, before, after):
return
@commands.Cog.listener()
async def on_member_join(self, member):
return
@commands.Cog.listener()
async def on_member_update(self, before, after): # checked
if self.bot.is_ready():
#try:
# guild_data = await self.config.guild(after.guild).all()
# moderator_role = after.guild.get_role(int(guild_data["moderator_role"]))
# muted_role = after.guild.get_role(int(guild_data["mute_role"]))
# timeout_channel = get_guild_channel(self, guild_data["timeout_channel"])
# member_data = await self.config.member(after).all()
# await all_users_setdefault(self, before, datetime.datetime.utcnow())
# if before.roles != after.roles:
# await asyncio.sleep(2.0)
# added_roles = [role for role in after.roles if role not in before.roles]
# removed_roles = [role for role in before.roles if role not in after.roles]
# if moderator_role not in after.roles and added_roles == [muted_role]:
# roles = [role.id for role in before.roles]
# await self.config.member(after).muted.set(True)
# await self.config.guild(after.guild).muted_members.set_raw(after.id, value = roles)
# await after.edit(roles=[muted_role])
# await asyncio.sleep(5.0)
# await timeout_channel.send(f'{after.mention}. \'You were muted because you broke the rules. Reread them, then write `@{moderator_role.name}` to be unmuted.\nMessage history is disabled in this channel. If you tab out, or select another channel, the messages will disappear.\'')
# if moderator_role not in after.roles and removed_roles == [muted_role]:
# await after.remove_roles(muted_role)
# await self.config.member(after).muted.set(False)
# oldroles = await self.config.guild(after.guild).muted_members.get_raw(after.id)
# for role_id in oldroles:
# try:
# thisrole = after.guild.get_role(role_id)
# await after.add_roles(thisrole)
# except Exception as e:
# pass
# await self.config.guild(after.guild).muted_members.set_raw(after.id, value = "")
#except Exception as e:
# print(e)
return
@commands.Cog.listener()
async def on_member_remove(self, member):
return
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
#if self.bot.is_ready:
#guild = self.bot.get_guild(payload.guild_id)
#guild_data = await self.config.guild(guild).all()
#payload_message_channel = get_guild_channel(self, payload.channel_id)
#try:
# payload_message = await payload_message_channel.fetch_message(payload.message_id)
#except discord.errors.NotFound:
# return
#try:
# if payload.member != self.bot:
# if payload.emoji.name == "❗":
# await payload_message.add_reaction('👮')
# await payload_message.remove_reaction(payload.emoji, payload.member)
# mod_channel = get_guild_channel(self, guild_data["moderator_channel"])
# usernotes_channel = get_guild_channel(self, guild_data["usernotes_channel"])
# em = make_embed_from_message(payload_message)
# content = f"{payload.member.mention} reported this message in {payload_message.channel.mention}:\n--{payload_message.jump_url}"
# await mod_channel.send(content=content, embed=em)
# await usernotes_channel.send(content=content, embed=em)
#except discord.DiscordException as e:
# await payload_message.add_reaction("⚠")
# print(e)
return
@commands.Cog.listener()
async def on_ready(self):
print("yo")
# HELPER FUNCTIONS #
def get_shitposts(ctx):
return ['the complexity of AI dictators and immortality,',
'this^^,',
'how shitposting belongs in offtopic',
'about south african weapons of mass destruction',
'about french politics',
'the national gun regime',
'*lyrical* complexity',
'the fact that roads are socialism',
'the obvious errors of the Clinton campaign',
'how bernie would have won',
'the corrupt implications of the leaked DNC emails',
'my fanfic about bernie sanders and hillary clinton',
'about our woke slay queen Hillary Clinton',
'how i\'m sorry I still read books - ',
'how the DNC stole the primary',
'how the electoral college stole the election',
'how trump won in a landslide',
'how the DNC stole clintonmas',
'how Trump voters were motivated by economic anxiety/racism/misogyny',
'how the dossier is real',
'how the dossier is fake ',
'how democrats need to stay the course',
'we need to all say "stop bedwetting, ed"',
'"i\'m freaking out, is this big?"',
'how trump won, get over it.',
'that poppyj is **always** right',
'the national importance of me asking "any news today guys?"',
'how capitalism is a better economic system than socialism',
'how socialism is a better economic system than capitalism',
'why Evan McMullin is a war criminal',
'why catgirls, as a concept, are banned,',
'broken windows',
'nazi punching',
'pepe',
'why antifascists are the real fascists',
'why fascists are the real antifascists',
'why Nate Silver must be eliminated',
'how 538 is fake news',
'how "please make me a mod" was the worst thing to say',
'when Democrats want a nuclear strike on Moscow',
'how {} is a Russian agent'.format(ctx.author.mention),
'why both sides are the same',
'about that stupid face swim used to make, ',
'why i always say "As a liberal/minority group, [opinion that is contrary to liberal\'s/that group\'s interest]"',
'why the Webster\'s dictionary is the best academic source on fascism',
'how Scalia was great for the Constitution',
'why citizens united was actually ok',
'the dialectic',
'acid privilege',
'how the beatles benefited from acid privilege',
'https://cdn.discordapp.com/attachments/204689269778808833/304046964037779487/unknown.png',
'anime',
'about the time gray volunteered for a buzzfeed interview',
'about how there needs to be a serious discussion about the state of this discord, sooner than later.']
def check_load_error(loaderrors, checkObj, string):
result = False
i = len(loaderrors)
if string == "our_guild":
bonusStr = " (as a result, no roles were set)"
else:
bonusStr = ""
if checkObj is None:
loaderrors[i] = f"{string} not set{bonusStr}."
else:
result = True
return result
async def all_users_setdefault(self, member, timestamp: datetime):
# need to convert timestamp to iso 8601
timestamp_str = timestamp.isoformat()
await self.config.member(member).join_strikes.set(0)
await self.config.member(member).joined_at.set(timestamp_str)
await self.config.member(member).strikes.set(0)
await self.config.member(member).last_check.set(timestamp_str)
await self.config.member(member).last_message.set(None)
await self.config.member(member).spammer.set(False)
def get_usable_date_time(str):
dt = ""
if str == "":
dt = datetime.datetime.utcnow()
else:
dt = dateutil.parser.parse(str)
return dt
def get_guild_role(ctx, id):
return ctx.guild.get_role(int(id))
def get_guild_channel(self, id):
return self.bot.get_channel(int(id))
def build_skip_string(self, channel_dict):
skipstring = ""
for channel_id in channel_dict:
if skipstring == "":
skipstring = get_guild_channel(self, channel_id).name
else:
skipstring += f", {get_guild_channel(self, channel_id).name}"
return skipstring
def make_embed_from_message(message, friendly = 0):
"""
Takes a discord message and returns an embed quoting it.
:param message: a discord message object
:return: discord.Embed object quoting the message.
"""
description = message.clean_content
if len(description) > 2047:
description = f"{description[2044]}..."
em = discord.Embed(description=description)
author = f"{message.author.display_name}"
if friendly == 0:
author += f" : {message.author.id}"
else:
author += f", in #{message.channel}:"
em.set_author(name=author,
icon_url=message.author.avatar_url)
return em |
the-stack_0_20679 | #!/usr/bin/env python
import os
import csv
from collections import OrderedDict
from scripts.prices_data import *
from scripts.prices_regress import *
def generic_learner(column_features, column_predict, preprocessors, model, day, complete_data, instance, historic_days):
preprocessors = [interpret_column_values] + preprocessors
data_previous_historic_days = get_data_prevdays(complete_data, day, timedelta(historic_days))
x_train, y_train = features_and_preprocess(column_features, column_predict, preprocessors,
data_previous_historic_days)
model.fit(x_train, y_train)
data_prediction_day = get_data_day(complete_data, day)
x_test, y_test = features_and_preprocess(column_features, column_predict, preprocessors, data_prediction_day)
return Prediction(model.predict(x_test), actual_values=y_test, instance=instance)
def features_and_preprocess(column_features, column_predict, preprocessors, data):
xs, y = features(column_features, column_predict, data)
xs = row2col(xs)
for preprocessor in preprocessors:
xs = [preprocessor(x) for x in xs]
y = preprocessor(y)
xs = col2row(xs)
return xs, y
def row2col(rows):
cols = [[] for _ in range(len(rows[0]))]
for row in rows:
for index, value in enumerate(row):
cols[index].append(value)
return cols
def col2row(cols):
rows = [[] for _ in range(len(cols[0]))]
for col in cols:
for index, value in enumerate(col):
rows[index].append(value)
return rows
def features(column_features, column_predict, data):
xs = [[v for (k, v) in row.iteritems() if k in column_features] for row in data]
y = [row[column_predict] for row in data]
return xs, y
# turns every None in the column to a value perfectly between it's two neighbours.
# if every value in the column is None, this function will fail.
def interpolate_none(column):
result = []
none_index = -1
if column[0] is None:
column[0] = next(v for v in column if v is not None)
if column[-1] is None:
column[-1] = next(v for v in reversed(column) if v is not None)
if column[0] is int:
converter = int
else:
converter = float
for i, v in enumerate(column):
if v is None:
if none_index == -1:
none_index = i
else:
if none_index != -1:
start_v = result[none_index - 1]
end_v = v
step = (end_v - start_v) / float(i - none_index + 1)
result.extend([converter(start_v + step * place) for place in range(1, i - none_index + 1)])
none_index = -1
result.append(v)
return result
def interpret_column_values(column):
return map(convert_to_value_or_null, column)
def convert_to_value_or_null(value):
try:
value = eval(value)
return value
except NameError:
return None
def export(predictions, ml_name, day, instance):
# prediction_export_location = "predictions"
# if not os.path.isdir(prediction_export_location):
# os.makedirs(prediction_export_location)
# file_location = os.path.join(prediction_export_location, "{0}-{1}.csv".format(ml_name, day))
# data = [{'time_slot': i, 'actual': predictions.actual_values[i], 'forecasted': predictions.prediction_values[i]} for
# i in range(len(predictions.actual_values))]
# fields = ['time_slot', 'forecasted', 'actual']
# with open(file_location, 'w') as export_file:
# writer = csv.DictWriter(export_file, fieldnames=fields, dialect='excel')
# writer.writeheader()
# for d in data:
# writer.writerow(d)
pass
class Prediction:
def __init__(self, prediction_values, actual_values=[], instance=None):
self.prediction_values = prediction_values
self.actual_values = actual_values
self.instance = instance
# returns {quality_metric_name: value}
def evaluate(self):
instance_analysis = self.instance.analyse()
# option 1 is max - min
w_option_1 = combine_lists(instance_analysis.max_load, instance_analysis.min_load, lambda x, y: x - y)
w_option_1 = normalize(w_option_1)
# option 2 is max - exp
w_option_2 = combine_lists(instance_analysis.max_load, instance_analysis.exp_load, lambda x, y: x - y)
w_option_2 = normalize(w_option_2)
# option 3 is exp - min
w_option_3 = combine_lists(instance_analysis.exp_load, instance_analysis.min_load, lambda x, y: x - y)
w_option_3 = normalize(w_option_3)
instance_analysis = instance_analysis.normalize()
evaluation = OrderedDict()
mse = self.mse()
mae = self.mae()
w_mae_min = self.weighted_mae(instance_analysis.min_load)
w_mae_max = self.weighted_mae(instance_analysis.max_load)
w_mae_exp = self.weighted_mae(instance_analysis.exp_load)
w_mae_o1 = self.weighted_mae(w_option_1)
w_mae_o2 = self.weighted_mae(w_option_2)
w_mae_o3 = self.weighted_mae(w_option_3)
spearman = self.spearman_rank_correlation()
w_spearman_min = self.weighted_spearman_rank_correlation(instance_analysis.min_load)
w_spearman_max = self.weighted_spearman_rank_correlation(instance_analysis.max_load)
w_spearman_exp = self.weighted_spearman_rank_correlation(instance_analysis.exp_load)
w_spearman_o1 = self.weighted_spearman_rank_correlation(w_option_1)
w_spearman_o2 = self.weighted_spearman_rank_correlation(w_option_2)
w_spearman_o3 = self.weighted_spearman_rank_correlation(w_option_3)
evaluation['mse'] = mse
evaluation['mae'] = mae
evaluation['w_mae_min'] = w_mae_min
evaluation['w_mae_max'] = w_mae_max
evaluation['w_mae_exp'] = w_mae_exp
evaluation['w_mae_o1'] = w_mae_o1
evaluation['w_mae_o2'] = w_mae_o2
evaluation['w_mae_o3'] = w_mae_o3
evaluation['spearman'] = spearman
evaluation['w_spearman_min'] = w_spearman_min
evaluation['w_spearman_max'] = w_spearman_max
evaluation['w_spearman_exp'] = w_spearman_exp
evaluation['w_spearman_o1'] = w_spearman_o1
evaluation['w_spearman_o2'] = w_spearman_o2
evaluation['w_spearman_o3'] = w_spearman_o3
return evaluation
def mse(self):
predictions = range(len(self.prediction_values))
return sum([(self.prediction_values[i] - self.actual_values[i]) ** 2 for i in predictions]) / len(predictions)
def mae(self):
predictions = range(len(self.prediction_values))
return sum([abs(self.prediction_values[i] - self.actual_values[i]) for i in predictions]) / len(predictions)
def weighted_mae(self, weights):
predictions = range(len(self.prediction_values))
return sum([weights[i] * abs(self.prediction_values[i] - self.actual_values[i]) for i in
predictions]) / len(predictions)
# https://en.wikipedia.org/wiki/Spearman's_rank_correlation_coefficient
def spearman_rank_correlation(self):
n = len(self.prediction_values)
sorted_predictions = sorted(self.prediction_values)
sorted_actuals = sorted(self.actual_values)
summation = sum(
[(sorted_predictions.index(self.prediction_values[i]) - sorted_actuals.index(self.actual_values[i])) ** 2
for i in range(n)])
n = float(n)
spearman_rank_corr = 1 - ((6 * summation) / (n * (n ** 2 - 1)))
return spearman_rank_corr
# 1 - 6 * sum( w * d ** 2) / n * (n ** 2 - 1)
def weighted_spearman_rank_correlation(self, weights):
n = len(self.prediction_values)
sorted_predictions = sorted(self.prediction_values)
sorted_actuals = sorted(self.actual_values)
summation = sum(
[weights[i] * (sorted_predictions.index(self.prediction_values[i]) - sorted_actuals.index(self.actual_values[i])) ** 2
for i in range(n)])
n = float(n)
spearman_rank_corr = 1 - ((6 * summation) / (n * (n ** 2 - 1)))
return spearman_rank_corr
def combine_lists(a, b, f):
c = []
for i in range(len(a)):
c.append(f(a[i], b[i]))
return c
def normalize(a):
s = sum(a)
return map(lambda x: x/s, a)
|
the-stack_0_20680 | from __future__ import print_function
import os
import sys
from flask import Blueprint, send_from_directory
from flask import render_template, flash, redirect, url_for, request
from config import Config
from app import db
from app.Controller.forms import PostForm, EditForm, EditPasswordForm, ApplyForm, AddFieldForm, RemoveFieldForm, SortForm
from flask_login import current_user, login_user, logout_user, login_required
from app.Controller.auth_forms import LoginForm, RegistrationForm
from app.Model.models import Post, Application, User, Field
bp_routes = Blueprint('routes', __name__)
bp_routes.template_folder = Config.TEMPLATE_FOLDER #'..\\View\\templates'
@bp_routes.route('/', methods=['GET','POST'])
@bp_routes.route('/index/', methods=['GET','POST'])
@login_required
def index():
posts = Post.query.order_by(Post.timestamp.desc())
postscount = Post.query.count()
print(postscount)
sform = SortForm()
if sform.validate_on_submit():
order = sform.select.data
if order == '0':
for post in posts.all():
cnt = 0
for pfield in post.ResearchFields:
for ufield in current_user.Fields:
if ufield.id == pfield.id:
cnt+=1
break
post.sharedFieldCount = cnt
print(cnt)
db.session.add(post)
db.session.commit()
posts = Post.query.order_by(Post.sharedFieldCount.desc())
elif order == '1':
posts = Post.query.order_by(Post.title.asc())
elif order == '2':
posts = Post.query.order_by(Post.timecommitment.desc())
return render_template('index.html', title="WSU Undergraduate Research Portal", posts=posts.all(), User = User, postscount = postscount, sortForm = sform)
@bp_routes.route('/post/', methods=['POST','GET'])
@login_required
def post():
# only faculty can create new research positions
if current_user.faculty is True:
# handle the form submission
sform = PostForm()
if request.method == 'POST':
if sform.validate_on_submit():
newPost = Post(title = sform.title.data, description = sform.description.data, user_id = current_user.id,
startdate = sform.startdate.data, enddate = sform.enddate.data, timecommitment = sform.timecommitment.data,
qualifications = sform.qualifications.data)
for ResearchFields in sform.ResearchFields.data:
newPost.ResearchFields.append(ResearchFields)
print(newPost)
db.session.add(newPost)
db.session.commit()
flash('New Post ' + newPost.title + " is posted")
return redirect(url_for('routes.index'))
pass
return render_template('create.html', form = sform)
flash('Error: No faculty permissions discovered')
return redirect(url_for('routes.index'))
@bp_routes.route('/display_profile/<user_id>', methods=['GET'])
@login_required
def display_profile(user_id):
user = User.query.get_or_404(user_id)
# cant view profile if the current user isn't the profile being accessed or isnt a faculty
if (user != current_user) and (current_user.faculty is False):
flash("You don't have permission to view another user's profile")
return redirect(url_for('routes.index'))
return render_template('display_profile.html', title='Display Profile', user = user)
@bp_routes.route('/edit_profile/', methods=['GET', 'POST'])
@login_required
def edit_profile():
eform = EditForm()
if request.method == 'POST':
# handle the form submission
if eform.validate_on_submit():
# clearing the relationships
current_user.remove_languages()
current_user.remove_fields()
current_user.firstname = eform.firstname.data
current_user.lastname = eform.lastname.data
current_user.phone = eform.phone.data
current_user.major = eform.major.data
current_user.gpa = eform.gpa.data
current_user.graduationDate = eform.graduationDate.data
current_user.experience = eform.experience.data
current_user.electiveCourses = eform.electives.data
for language in eform.languages.data:
current_user.LanguagesKnown.append(language)
for field in eform.fields.data:
current_user.Fields.append(field)
db.session.add(current_user)
db.session.commit()
flash("Your changes have been saved")
return redirect(url_for('routes.display_profile', user_id = current_user.id))
pass
elif request.method == 'GET':
# populate the user data from DB
eform.firstname.data = current_user.firstname
eform.lastname.data = current_user.lastname
eform.phone.data = current_user.phone
eform.major.data = current_user.major
eform.gpa.data = current_user.gpa
eform.graduationDate.data = current_user.graduationDate
eform.experience.data = current_user.experience
eform.electives.data = current_user.electiveCourses
eform.languages.data = current_user.LanguagesKnown
eform.fields.data = current_user.Fields
else:
pass
return render_template('edit_profile.html', title='Edit Profile', form = eform)
@bp_routes.route('/edit_password/', methods=['GET', 'POST'])
@login_required
def edit_password():
pform = EditPasswordForm()
if request.method == 'POST':
if pform.validate_on_submit():
current_user.set_password(pform.password.data)
db.session.add(current_user)
db.session.commit()
flash("Your password has been changed")
return redirect(url_for('routes.display_profile'))
pass
return render_template('edit_password.html', title='Edit Password', form = pform)
@bp_routes.route('/apply/<post_id>', methods=['POST'])
@login_required
def apply(post_id):
aform = ApplyForm()
post = Post.query.filter_by(id=post_id).first()
if request.method == 'POST':
if aform.validate_on_submit():
newApp = Application(userid=current_user.id, preferredname = aform.preferredname.data, description=aform.description.data, referenceName=aform.refName.data, referenceEmail=aform.refEmail.data)
post.Applications.append(newApp)
db.session.add(newApp)
db.session.commit()
flash("You have successfully applied to this position")
return redirect(url_for('routes.index'))
pass
return render_template('apply.html', title='Apply', form = aform)
@bp_routes.route('/delete/<post_id>', methods=['POST'])
@login_required
def delete(post_id):
# only faculty can create delete their research positions
if current_user.faculty is True:
currentPost=Post.query.filter_by(id=post_id).first()
if currentPost is None:
flash('Post with id "{}" not found.'.format(post_id))
return redirect(url_for('routes.index'))
PostTitle = currentPost.title
for t in currentPost.ResearchFields:
currentPost.ResearchFields.remove(t)
for t in currentPost.Applications:
currentPost.Applications.remove(t)
db.session.delete(currentPost)
db.session.commit()
flash('Post "{}" has been successfully deleted'.format(PostTitle))
return redirect(url_for('routes.index'))
flash('Error: No faculty permissions discovered')
return redirect(url_for('routes.index'))
@bp_routes.route('/myposts/', methods=['POST','GET'])
@login_required
def myposts():
if current_user.faculty is True:
# only faculty can view their own posts
posts = Post.query.filter_by(user_id=current_user.id)
return render_template('index.html', title="My Research Postings", posts=posts.all(), User = User)
flash('Error: No faculty permissions discovered')
return redirect(url_for('routes.index'))
@bp_routes.route('/becoming_hired/<app_id>', methods=['GET'])
@login_required
def becoming_hired(app_id):
# TODO: Make method post only
# if request.method == 'POST':
# cant view profile if the current user isn't the profile being accessed or isn't a faculty
if current_user.faculty is False:
flash("You don't have permission to update student's status")
return redirect(url_for('routes.index'))
app = Application.query.filter_by(id=app_id).first()
user = User.query.get_or_404(app.userid)
if user.hired is True:
flash("This student has already been hired for a position.")
return redirect(url_for('routes.index'))
app.hired = True
app.approved = False
app.nothired = False
user.hired = True
db.session.add(user)
db.session.add(app)
db.session.commit()
return redirect(url_for('routes.index'))
@bp_routes.route('/becoming_approved/<app_id>', methods=['GET'])
@login_required
def becoming_approved(app_id):
# TODO: Make method post only
# if request.method == 'POST':
# cant view profile if the current user isn't the profile being accessed or isn't a faculty
if current_user.faculty is False:
flash("You don't have permission to update student's status")
return redirect(url_for('routes.index'))
app = Application.query.filter_by(id=app_id).first()
user = User.query.get_or_404(app.userid)
if user.hired is True:
flash("This student has already been hired for a position.")
return redirect(url_for('routes.index'))
app.approved = True
app.nothired = False
app.hired = False
user.approved = True
db.session.add(user)
db.session.add(app)
db.session.commit()
return redirect(url_for('routes.index'))
@bp_routes.route('/not_hired/<app_id>', methods=['GET'])
@login_required
def not_hired(app_id):
# TODO: Make method post only
# if request.method == 'POST':
# cant view profile if the current user isn't the profile being accessed or isn't a faculty
if current_user.faculty is False:
flash("You don't have permission to update student's status")
return redirect(url_for('routes.index'))
app = Application.query.filter_by(id=app_id).first()
user = User.query.get_or_404(app.userid)
if user.hired is True and app.hired is True:
user.hired = False
app.nothired = True
app.hired = False
app.approved = False
db.session.add(app)
user.approved = False
db.session.add(user)
db.session.commit()
for app in Application.query.filter_by(userid=user.id):
if app.approved == True:
user.approved = True
db.session.add(user)
db.session.commit()
return redirect(url_for('routes.index'))
@bp_routes.route('/make_faculty/<user_id>', methods=['POST','GET'])
@login_required
def make_faculty(user_id):
if current_user.admin is True:
# TODO: Make method post only
# if request.method == 'POST':
# only admin can update users to be faculty
user = User.query.get_or_404(user_id)
if (user.faculty is True):
user.faculty = False
else:
user.faculty = True
db.session.add(user)
db.session.commit()
flash("User Status has been updated")
return redirect(url_for('routes.show_faculty')) #html for admin page
flash('Error: No admin permissions discovered')
return redirect(url_for('routes.index'))
@bp_routes.route('/show_faculty/', methods=['GET'])
@login_required
def show_faculty():
if current_user.admin is True:
if request.method == 'GET':
users = User.query.all()
for user in users:
print(user.username)
return render_template('show_faculty.html', users=users, User = User) #html for admin page
flash('Error: No admin permissions discovered')
return redirect(url_for('routes.index'))
@bp_routes.route('/add_field/', methods=['GET', 'POST'])
@login_required
def add_field():
if current_user.admin is True:
aform = AddFieldForm()
if request.method == 'POST':
# handle the form submission
if aform.validate_on_submit():
newField = Field(name=aform.newfieldname.data)
db.session.add(newField)
db.session.commit()
flash("Field has been added")
return redirect(url_for('routes.index')) #html for admin page
return render_template('add_fields.html', title='Edit Fields', form = aform) #html for admin page
flash('Error: No admin permissions discovered')
return redirect(url_for('routes.index'))
@bp_routes.route('/remove_field/', methods=['GET', 'POST'])
@login_required
def remove_field():
if current_user.admin is True:
rform = RemoveFieldForm()
if request.method == 'POST':
# handle the form submission
if rform.validate_on_submit():
for field in rform.ResearchFields.data:
# remove field from database
# User.query.filter_by(id=123).delete()
# Field.query.filter_by(name=field.name).delete()
db.session.delete(field)
# db.session.add(newField)
db.session.commit()
flash("Field(s) have been removed")
return redirect(url_for('routes.index')) #html for admin page
return render_template('remove_fields.html', title='Edit Fields', form = rform) #html for admin page
flash('Error: No admin permissions discovered')
return redirect(url_for('routes.index'))
@bp_routes.route('/cancelApplication/<application_id>/', methods=['POST','DELETE'])
@login_required
def cancelApplication(application_id):
if current_user.faculty is False:
application = Application.query.filter_by(id=application_id).first()
if application.approved == True:
flash('You had been approved for an interview. Please inform the professor that you have canceled your application!')
db.session.delete(application)
db.session.commit()
flash('Application has been canceled')
return redirect(url_for('routes.index'))
@bp_routes.route('/favicon.ico')
def favicon():
path_list=[bp_routes.root_path,os.pardir,"View","static","img"]
print(bp_routes.root_path)
print(os.path.join(*path_list))
return send_from_directory(os.path.join(*path_list),
'favicon.ico',mimetype='image/vnd.microsoft.icon')
|
the-stack_0_20683 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Nipype : Neuroimaging in Python pipelines and interfaces package.
Nipype intends to create python interfaces to other neuroimaging
packages and create an API for specifying a full analysis pipeline in
python.
Much of the machinery at the beginning of this file has been copied over from
nibabel denoted by ## START - COPIED FROM NIBABEL and a corresponding ## END
"""
# Build helper
import os
from os.path import join as pjoin
# Commit hash writing, and dependency checking
from setuptools.command.build_py import build_py
class BuildWithCommitInfoCommand(build_py):
"""Return extended build command class for recording commit
The extended command tries to run git to find the current commit, getting
the empty string if it fails. It then writes the commit hash into a file
in the `pkg_dir` path, named ``COMMIT_INFO.txt``.
In due course this information can be used by the package after it is
installed, to tell you what commit it was installed from if known.
To make use of this system, you need a package with a COMMIT_INFO.txt file
e.g. ``myproject/COMMIT_INFO.txt`` - that might well look like this::
# This is an ini file that may contain information about the code state
[commit hash]
# The line below may contain a valid hash if it has been substituted
# during 'git archive'
archive_subst_hash=$Format:%h$
# This line may be modified by the install process
install_hash=
The COMMIT_INFO file above is also designed to be used with git
substitution - so you probably also want a ``.gitattributes`` file in the
root directory of your working tree that contains something like this::
myproject/COMMIT_INFO.txt export-subst
That will cause the ``COMMIT_INFO.txt`` file to get filled in by ``git
archive`` - useful in case someone makes such an archive - for example with
via the github 'download source' button.
Although all the above will work as is, you might consider having something
like a ``get_info()`` function in your package to display the commit
information at the terminal. See the ``pkg_info.py`` module in the nipy
package for an example.
"""
def run(self):
import subprocess
import configparser
build_py.run(self)
proc = subprocess.Popen(
"git rev-parse --short HEAD",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
repo_commit = proc.communicate()[0].decode()
# We write the installation commit even if it's empty
cfg_parser = configparser.RawConfigParser()
cfg_parser.read(pjoin("nipype", "COMMIT_INFO.txt"))
cfg_parser.set("commit hash", "install_hash", repo_commit.strip())
out_pth = pjoin(self.build_lib, "nipype", "COMMIT_INFO.txt")
cfg_parser.write(open(out_pth, "wt"))
def main():
from setuptools import setup, find_packages
thispath, _ = os.path.split(__file__)
testdatafiles = [
pjoin("testing", "data", val)
for val in os.listdir(pjoin(thispath, "nipype", "testing", "data"))
if not os.path.isdir(pjoin(thispath, "nipype", "testing", "data", val))
]
testdatafiles += [
pjoin("testing", "data", "dicomdir", "*"),
pjoin("testing", "data", "bedpostxout", "*"),
pjoin("testing", "data", "tbss_dir", "*"),
pjoin("testing", "data", "brukerdir", "fid"),
pjoin("testing", "data", "brukerdir", "pdata", "1", "*"),
pjoin("testing", "data", "ds005", "*"),
pjoin("testing", "data", "realign_json.json"),
pjoin("workflows", "data", "*"),
pjoin("pipeline", "engine", "report_template.html"),
pjoin("external", "d3.js"),
pjoin("interfaces", "fsl", "model_templates", "*"),
pjoin("interfaces", "tests", "use_resources"),
"pytest.ini",
"conftest.py",
]
# Python 3: use a locals dictionary
# http://stackoverflow.com/a/1463370/6820620
ldict = locals()
# Get version and release info, which is all stored in nipype/info.py
ver_file = os.path.join(thispath, "nipype", "info.py")
with open(ver_file) as infofile:
exec(infofile.read(), globals(), ldict)
setup(
name=ldict["NAME"],
maintainer=ldict["MAINTAINER"],
maintainer_email=ldict["MAINTAINER_EMAIL"],
description=ldict["DESCRIPTION"],
long_description=ldict["LONG_DESCRIPTION"],
url=ldict["URL"],
download_url=ldict["DOWNLOAD_URL"],
license=ldict["LICENSE"],
classifiers=ldict["CLASSIFIERS"],
author=ldict["AUTHOR"],
author_email=ldict["AUTHOR_EMAIL"],
platforms=ldict["PLATFORMS"],
version=ldict["VERSION"],
python_requires=ldict["PYTHON_REQUIRES"],
install_requires=ldict["REQUIRES"],
provides=ldict["PROVIDES"],
packages=find_packages(),
package_data={"nipype": testdatafiles},
cmdclass={"build_py": BuildWithCommitInfoCommand},
tests_require=ldict["TESTS_REQUIRES"],
zip_safe=False,
extras_require=ldict["EXTRA_REQUIRES"],
entry_points="""
[console_scripts]
nipypecli=nipype.scripts.cli:cli
""",
)
if __name__ == "__main__":
main()
|
the-stack_0_20686 | """
Tests for the game class
"""
import unittest
import numpy as np
from nashpy.algorithms.vertex_enumeration import vertex_enumeration
class TestVertexEnumeration(unittest.TestCase):
"""
Tests for the vertex enumeration algorithm
"""
def test_three_by_two_vertex_enumeration(self):
A = np.array([[3, 3], [2, 5], [0, 6]])
B = np.array([[3, 2], [2, 6], [3, 1]])
expected_equilibria = sorted([(np.array([1, 0, 0]), np.array([1, 0])),
(np.array([0, 1 / 3, 2 / 3]),
np.array([1 / 3, 2 / 3])),
(np.array([4 / 5, 1 / 5, 0]),
np.array([2 / 3, 1 / 3]))],
key=lambda a: list(np.round(a[0], 4)))
equilibria = sorted(vertex_enumeration(A, B),
key=lambda a: list(np.round(a[0], 4)))
for equilibrium, expected_equilibrium in zip(equilibria,
expected_equilibria):
for strategy, expected_strategy in zip(equilibrium,
expected_equilibrium):
self.assertTrue(all(np.isclose(strategy, expected_strategy)))
def test_with_negative_utilities(self):
A = np.array([[1, -1], [-1, 1]])
B = - A
expected_equilibrium = (np.array([ 0.5, 0.5]), np.array([ 0.5, 0.5]))
equilibrium = next(vertex_enumeration(A, B))
for strategy, expected_strategy in zip(equilibrium,
expected_equilibrium):
self.assertTrue(all(np.isclose(strategy, expected_strategy)))
|
the-stack_0_20688 | #!/usr/bin/env python3
import scipy.io
nimat = scipy.io.loadmat("data/models/NI-930101_930630-DEPTH.mat")
simat = scipy.io.loadmat("data/models/SI-930101_930630-DEPTH.mat")
nishape = nimat["Yp"].shape
sishape = simat["Yp"].shape
print("island,i,j,lat,lon,depth")
for i in range(nishape[0]):
for j in range(nishape[1]):
lat = str(nimat["Yp"][i][j])
lng = str(nimat["Xp"][i][j])
depth = str(nimat["Depth_19930101_000000"][i][j])
if depth == "nan":
depth = 0
print(f"NI,{i},{j},{lat},{lng},{depth}")
for i in range(sishape[0]):
for j in range(sishape[1]):
lat = str(simat["Yp"][i][j])
lng = str(simat["Xp"][i][j])
depth = str(simat["Depth_19930101_000000"][i][j])
if depth == "nan":
depth = 0
print(f"SI,{i},{j},{lat},{lng},{depth}")
|
the-stack_0_20689 | #!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test addressindex generation and fetching
#
import time
from test_framework.test_particl import ParticlTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class AddressIndexTest(ParticlTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [ ['-debug',] for i in range(self.num_nodes)]
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(self.start_node(0, self.options.tmpdir, ["-debug", "-relaypriority=0"]))
self.nodes.append(self.start_node(1, self.options.tmpdir, ["-debug", "-addressindex"]))
# Nodes 2/3 are used for testing
self.nodes.append(self.start_node(2, self.options.tmpdir, ["-debug", "-addressindex", "-relaypriority=0"]))
self.nodes.append(self.start_node(3, self.options.tmpdir, ["-debug", "-addressindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
nodes = self.nodes
# Stop staking
ro = nodes[0].reservebalance(True, 10000000)
ro = nodes[1].reservebalance(True, 10000000)
ro = nodes[2].reservebalance(True, 10000000)
ro = nodes[3].reservebalance(True, 10000000)
ro = nodes[0].extkeyimportmaster("abandon baby cabbage dad eager fabric gadget habit ice kangaroo lab absorb")
assert(ro['account_id'] == 'aaaZf2qnNr5T7PWRmqgmusuu5ACnBcX2ev')
ro = nodes[0].getinfo()
assert(ro['total_balance'] == 100000)
ro = nodes[1].extkeyimportmaster('graine article givre hublot encadrer admirer stipuler capsule acajou paisible soutirer organe')
ro = nodes[2].extkeyimportmaster('sección grito médula hecho pauta posada nueve ebrio bruto buceo baúl mitad')
ro = nodes[3].extkeyimportmaster('けっこん ゆそう へいねつ しあわせ ちまた きつね たんたい むかし たかい のいず こわもて けんこう')
addrs = []
addrs.append(nodes[1].getnewaddress())
addrs.append(nodes[1].getnewaddress())
addrs.append(nodes[1].getnewaddress())
ms1 = nodes[1].addmultisigaddress(2, addrs)
assert(ms1 == 'r8L81gLiWg46j5EGfZSp2JHmA9hBgLbHuf') # rFHaEuXkYpNUYpMMY3kMkDdayQxpc7ozti
addr1 = nodes[2].getnewaddress()
assert(addr1 == 'pqZDE7YNWv5PJWidiaEG8tqfebkd6PNZDV') # pcX1WHotKuQwFypDf1ZkJrh81J1DS7DfXd
addr2 = nodes[3].getnewaddress()
assert(addr2 == 'pqavEUgLCZeGh8o9sTcCfYVAsrTgnQTUsK')
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
balance0 = self.nodes[1].getaddressbalance("r8L81gLiWg46j5EGfZSp2JHmA9hBgLbHuf")
assert_equal(balance0["balance"], 0)
# Check p2pkh and p2sh address indexes
print("Testing p2pkh and p2sh address index...")
txid0 = self.nodes[0].sendtoaddress("pqZDE7YNWv5PJWidiaEG8tqfebkd6PNZDV", 10)
self.stakeToHeight(1, fSync=False)
txidb0 = self.nodes[0].sendtoaddress("r8L81gLiWg46j5EGfZSp2JHmA9hBgLbHuf", 10)
self.stakeToHeight(2, fSync=False)
txid1 = self.nodes[0].sendtoaddress("pqZDE7YNWv5PJWidiaEG8tqfebkd6PNZDV", 15)
self.stakeToHeight(3, fSync=False)
txidb1 = self.nodes[0].sendtoaddress("r8L81gLiWg46j5EGfZSp2JHmA9hBgLbHuf", 15)
self.stakeToHeight(4, fSync=False)
txid2 = self.nodes[0].sendtoaddress("pqZDE7YNWv5PJWidiaEG8tqfebkd6PNZDV", 20)
self.stakeToHeight(5, fSync=False)
txidb2 = self.nodes[0].sendtoaddress("r8L81gLiWg46j5EGfZSp2JHmA9hBgLbHuf", 20)
self.stakeToHeight(6)
txids = self.nodes[1].getaddresstxids("pqZDE7YNWv5PJWidiaEG8tqfebkd6PNZDV")
assert_equal(len(txids), 3)
assert_equal(txids[0], txid0)
assert_equal(txids[1], txid1)
assert_equal(txids[2], txid2)
txidsb = self.nodes[1].getaddresstxids("r8L81gLiWg46j5EGfZSp2JHmA9hBgLbHuf")
assert_equal(len(txidsb), 3)
assert_equal(txidsb[0], txidb0)
assert_equal(txidsb[1], txidb1)
assert_equal(txidsb[2], txidb2)
# Check that limiting by height works
print("Testing querying txids by range of block heights..")
# Note start and end parameters must be > 0 to apply
height_txids = self.nodes[1].getaddresstxids({
"addresses": ["r8L81gLiWg46j5EGfZSp2JHmA9hBgLbHuf"],
"start": 3,
"end": 4
})
assert_equal(len(height_txids), 1)
#assert_equal(height_txids[0], txidb0)
assert_equal(height_txids[0], txidb1)
# Check that multiple addresses works
multitxids = self.nodes[1].getaddresstxids({"addresses": ["r8L81gLiWg46j5EGfZSp2JHmA9hBgLbHuf", "pqZDE7YNWv5PJWidiaEG8tqfebkd6PNZDV"]})
assert_equal(len(multitxids), 6)
assert_equal(multitxids[0], txid0)
assert_equal(multitxids[1], txidb0)
assert_equal(multitxids[2], txid1)
assert_equal(multitxids[3], txidb1)
assert_equal(multitxids[4], txid2)
assert_equal(multitxids[5], txidb2)
# Check that balances are correct
balance0 = self.nodes[1].getaddressbalance("r8L81gLiWg46j5EGfZSp2JHmA9hBgLbHuf")
assert_equal(balance0["balance"], 45 * 100000000)
# Check that outputs with the same address will only return one txid
print("Testing for txid uniqueness...")
inputs = []
outputs = {'pqZDE7YNWv5PJWidiaEG8tqfebkd6PNZDV':1,'pqavEUgLCZeGh8o9sTcCfYVAsrTgnQTUsK':1}
tx = self.nodes[0].createrawtransaction(inputs,outputs)
# modified outputs to go to the same address
#tx = 'a0000000000000020100e1f505000000001976a914e2c470d8005a3d40c6d79eb1907c479c24173ede88ac0100e1f505000000001976a914e2c470d8005a3d40c6d79eb1907c479c24173ede88ac'
tx = 'a0000000000000020100e1f505000000001976a914e317164ad324e5ec2f8b5de080f0cb614042982d88ac0100e1f505000000001976a914e317164ad324e5ec2f8b5de080f0cb614042982d88ac'
txfunded = self.nodes[0].fundrawtransaction(tx)
#ro = self.nodes[0].decoderawtransaction(txfunded['hex'])
#print(json.dumps(ro, indent=4, default=self.jsonDecimal))
txsigned = self.nodes[0].signrawtransaction(txfunded['hex'])
sent_txid = self.nodes[0].sendrawtransaction(txsigned['hex'], True)
self.stakeBlocks(1)
txidsmany = self.nodes[1].getaddresstxids("pqavEUgLCZeGh8o9sTcCfYVAsrTgnQTUsK")
assert_equal(len(txidsmany), 1)
assert_equal(txidsmany[0], sent_txid)
# Check that balances are correct
print("Testing balances...")
balance0 = self.nodes[1].getaddressbalance("pqavEUgLCZeGh8o9sTcCfYVAsrTgnQTUsK")
assert_equal(balance0["balance"], 2 * 100000000)
unspent2 = self.nodes[2].listunspent()
balance0 = self.nodes[1].getaddressbalance("pqZDE7YNWv5PJWidiaEG8tqfebkd6PNZDV")
assert_equal(balance0["balance"], 45 * 100000000)
inputs = []
outputs = {'pqavEUgLCZeGh8o9sTcCfYVAsrTgnQTUsK':1}
tx = self.nodes[2].createrawtransaction(inputs,outputs)
txfunded = self.nodes[2].fundrawtransaction(tx)
txsigned = self.nodes[2].signrawtransaction(txfunded['hex'])
sent_txid = self.nodes[2].sendrawtransaction(txsigned['hex'], True)
print("sent_txid", sent_txid)
ro = self.nodes[0].decoderawtransaction(txsigned['hex'])
print(json.dumps(ro, indent=4, default=self.jsonDecimal))
self.sync_all()
self.stakeBlocks(1)
txidsmany = self.nodes[1].getaddresstxids("pqavEUgLCZeGh8o9sTcCfYVAsrTgnQTUsK")
print("txidsmany", txidsmany)
assert_equal(len(txidsmany), 2)
assert_equal(txidsmany[1], sent_txid)
balance0 = self.nodes[1].getaddressbalance('pqZDE7YNWv5PJWidiaEG8tqfebkd6PNZDV')
assert(balance0["balance"] < 45 * 100000000)
# Check that deltas are returned correctly
deltas = self.nodes[1].getaddressdeltas({"addresses": ['pqavEUgLCZeGh8o9sTcCfYVAsrTgnQTUsK'], "start": 1, "end": 200})
balance3 = 0
for delta in deltas:
balance3 += delta["satoshis"]
assert_equal(balance3, 300000000)
assert_equal(deltas[0]["address"], 'pqavEUgLCZeGh8o9sTcCfYVAsrTgnQTUsK')
#assert_equal(deltas[0]["blockindex"], 1)
address2 = 'pqZDE7YNWv5PJWidiaEG8tqfebkd6PNZDV'
# Check that entire range will be queried
deltasAll = self.nodes[1].getaddressdeltas({"addresses": [address2]})
assert_equal(len(deltasAll), 4)
# Check that deltas can be returned from range of block heights
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 3, "end": 3})
assert_equal(len(deltas), 1)
# Check that unspent outputs can be queried
print("Testing utxos...")
utxos = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos), 2)
assert_equal(utxos[0]["satoshis"], 1500000000)
# Check that indexes will be updated with a reorg
print("Testing reorg...")
height_before = self.nodes[1].getblockcount()
best_hash = self.nodes[0].getbestblockhash()
self.nodes[0].invalidateblock(best_hash)
self.nodes[1].invalidateblock(best_hash)
self.nodes[2].invalidateblock(best_hash)
self.nodes[3].invalidateblock(best_hash)
self.sync_all()
assert(self.nodes[1].getblockcount() == height_before - 1)
balance4 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance4['balance'], 4500000000)
utxos2 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos2), 3)
assert_equal(utxos2[0]["satoshis"], 1000000000)
# Check sorting of utxos
self.stakeBlocks(1)
txidsort1 = self.nodes[0].sendtoaddress(address2, 50)
self.stakeBlocks(1)
txidsort2 = self.nodes[0].sendtoaddress(address2, 50)
self.stakeBlocks(1)
utxos3 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos3), 4)
assert_equal(utxos3[0]["height"], 3)
assert_equal(utxos3[1]["height"], 5)
assert_equal(utxos3[2]["height"], 9)
assert_equal(utxos3[3]["height"], 10)
# Check mempool indexing
print("Testing mempool indexing...")
address3 = nodes[3].getnewaddress()
txidsort1 = self.nodes[2].sendtoaddress(address3, 1)
txidsort2 = self.nodes[2].sendtoaddress(address3, 1)
txidsort3 = self.nodes[2].sendtoaddress(address3, 1)
mempool = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool), 3)
"""
# Check sorting of utxos
self.nodes[2].generate(150)
txidsort1 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
txidsort2 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
self.sync_all()
utxos3 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos3), 3)
assert_equal(utxos3[0]["height"], 114)
assert_equal(utxos3[1]["height"], 264)
assert_equal(utxos3[2]["height"], 265)
# Check mempool indexing
print("Testing mempool indexing...")
privKey3 = "7wknRvW5NvcK9NM6vLoK2dZZMqUDD5CX1sFQfG1hgs8YLHniKYdm"
address3 = "pkSCghnT5d1z846K24Mz5iodQPmyFyCWee"
addressHash3 = bytes([170,152,114,181,187,205,181,17,216,158,14,17,170,39,218,115,253,44,63,80])
scriptPubKey3 = CScript([OP_DUP, OP_HASH160, addressHash3, OP_EQUALVERIFY, OP_CHECKSIG])
address4 = "rMncd8ybvLsVPnef7jhG2DtmvQok3EGTKw"
scriptPubKey4 = CScript([OP_HASH160, addressHash3, OP_EQUAL])
unspent = self.nodes[2].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = int(unspent[0]["amount"] * 100000000 - 100000)
tx.vout = [CTxOut(amount, scriptPubKey3)]
tx.rehash()
signed_tx = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid1 = self.nodes[2].sendrawtransaction(signed_tx["hex"], True)
time.sleep(2)
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(unspent[1]["txid"], 16), unspent[1]["vout"]))]
amount = int(unspent[1]["amount"] * 100000000 - 100000)
tx2.vout = [
CTxOut(int(amount / 4), scriptPubKey3),
CTxOut(int(amount / 4), scriptPubKey3),
CTxOut(int(amount / 4), scriptPubKey4),
CTxOut(int(amount / 4), scriptPubKey4)
]
tx2.rehash()
signed_tx2 = self.nodes[2].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
memtxid2 = self.nodes[2].sendrawtransaction(signed_tx2["hex"], True)
time.sleep(2)
mempool = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool), 3)
assert_equal(mempool[0]["txid"], memtxid1)
assert_equal(mempool[0]["address"], address3)
assert_equal(mempool[0]["index"], 0)
assert_equal(mempool[1]["txid"], memtxid2)
assert_equal(mempool[1]["index"], 0)
assert_equal(mempool[2]["txid"], memtxid2)
assert_equal(mempool[2]["index"], 1)
blk_hashes = self.nodes[2].generate(1);
self.sync_all();
mempool2 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool2), 0)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(memtxid2, 16), 0)),
CTxIn(COutPoint(int(memtxid2, 16), 1))
]
tx.vout = [CTxOut(int(amount / 2 - 10000), scriptPubKey2)]
tx.rehash()
self.nodes[2].importprivkey(privKey3)
signed_tx3 = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid3 = self.nodes[2].sendrawtransaction(signed_tx3["hex"], True)
time.sleep(2)
mempool3 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool3), 2)
assert_equal(mempool3[0]["prevtxid"], memtxid2)
assert_equal(mempool3[0]["prevout"], 0)
assert_equal(mempool3[1]["prevtxid"], memtxid2)
assert_equal(mempool3[1]["prevout"], 1)
# sending and receiving to the same address
privkey1 = "7rdLWvaivKefhMy7Q7hpAQytkry3zND88nrwwPrUkf2h8w2yY5ww"
address1 = "pnXhQCNov8ezRwL85zX5VHmiCLyexYC5yf"
address1hash = bytes([193,146,191,247,81,175,142,254,193,81,53,212,43,254,237,249,26,111,62,52])
address1script = CScript([OP_DUP, OP_HASH160, address1hash, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].sendtoaddress(address1, 10)
self.nodes[0].generate(1)
self.sync_all()
utxos = self.nodes[1].getaddressutxos({"addresses": [address1]})
assert_equal(len(utxos), 1)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["outputIndex"]))
]
amount = int(utxos[0]["satoshis"] - 1000)
tx.vout = [CTxOut(amount, address1script)]
tx.rehash()
self.nodes[0].importprivkey(privkey1)
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
mem_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.sync_all()
mempool_deltas = self.nodes[2].getaddressmempool({"addresses": [address1]})
assert_equal(len(mempool_deltas), 2)
# Include chaininfo in results
print("Testing results with chain info...")
deltas_with_info = self.nodes[1].getaddressdeltas({
"addresses": [address2],
"start": 1,
"end": 200,
"chainInfo": True
})
start_block_hash = self.nodes[1].getblockhash(1);
end_block_hash = self.nodes[1].getblockhash(200);
assert_equal(deltas_with_info["start"]["height"], 1)
assert_equal(deltas_with_info["start"]["hash"], start_block_hash)
assert_equal(deltas_with_info["end"]["height"], 200)
assert_equal(deltas_with_info["end"]["hash"], end_block_hash)
utxos_with_info = self.nodes[1].getaddressutxos({"addresses": [address2], "chainInfo": True})
expected_tip_block_hash = self.nodes[1].getblockhash(267);
assert_equal(utxos_with_info["height"], 267)
assert_equal(utxos_with_info["hash"], expected_tip_block_hash)
"""
print("Passed\n")
if __name__ == '__main__':
AddressIndexTest().main()
|
the-stack_0_20691 | """tracker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^', include('intake.urls')),
url(r'^auth/', include('uaa_client.urls')),
url(r'^admin/', admin.site.urls),
]
|
the-stack_0_20692 | from datetime import date, datetime, timedelta
from pandas.compat import range
from pandas import compat
import numpy as np
from pandas.tseries.tools import to_datetime, normalize_date
from pandas.core.common import ABCSeries, ABCDatetimeIndex
# import after tools, dateutil check
from dateutil.relativedelta import relativedelta, weekday
from dateutil.easter import easter
import pandas.tslib as tslib
from pandas.tslib import Timestamp, OutOfBoundsDatetime, Timedelta
import functools
import operator
__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
'CBMonthEnd', 'CBMonthBegin',
'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',
'BusinessHour',
'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',
'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',
'LastWeekOfMonth', 'FY5253Quarter', 'FY5253',
'Week', 'WeekOfMonth', 'Easter',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset']
# convert to/from datetime/timestamp to allow invalid Timestamp ranges to
# pass thru
def as_timestamp(obj):
if isinstance(obj, Timestamp):
return obj
try:
return Timestamp(obj)
except (OutOfBoundsDatetime):
pass
return obj
def as_datetime(obj):
f = getattr(obj, 'to_pydatetime', None)
if f is not None:
obj = f()
return obj
def apply_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
if other is tslib.NaT:
return tslib.NaT
elif isinstance(other, (timedelta, Tick, DateOffset)):
# timedelta path
return func(self, other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = as_timestamp(other)
tz = getattr(other, 'tzinfo', None)
nano = getattr(other, 'nanosecond', 0)
try:
if self._adjust_dst and isinstance(other, Timestamp):
other = other.tz_localize(None)
result = func(self, other)
if self._adjust_dst:
result = tslib._localize_pydatetime(result, tz)
result = Timestamp(result)
if self.normalize:
result = result.normalize()
# nanosecond may be deleted depending on offset process
if not self.normalize and nano != 0:
if not isinstance(self, Nano) and result.nanosecond != nano:
if result.tz is not None:
# convert to UTC
value = tslib.tz_convert_single(
result.value, 'UTC', result.tz)
else:
value = result.value
result = Timestamp(value + nano)
if tz is not None and result.tzinfo is None:
result = tslib._localize_pydatetime(result, tz)
except OutOfBoundsDatetime:
result = func(self, as_datetime(other))
if self.normalize:
# normalize_date returns normal datetime
result = normalize_date(result)
if tz is not None and result.tzinfo is None:
result = tslib._localize_pydatetime(result, tz)
return result
return wrapper
def apply_index_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
result = func(self, other)
if self.normalize:
result = result.to_period('D').to_timestamp()
return result
return wrapper
def _is_normalized(dt):
if (dt.hour != 0 or dt.minute != 0 or dt.second != 0 or
dt.microsecond != 0 or getattr(dt, 'nanosecond', 0) != 0):
return False
return True
# ---------------------------------------------------------------------
# DateOffset
class ApplyTypeError(TypeError):
# sentinel class for catching the apply error to return NotImplemented
pass
class CacheableOffset(object):
_cacheable = True
class DateOffset(object):
"""
Standard kind of date increment used for a date range.
Works exactly like relativedelta in terms of the keyword args you
pass in, use of the keyword n is discouraged-- you would be better
off specifying n in the keywords you use, but regardless it is
there for you. n is needed for DateOffset subclasses.
DateOffets work as follows. Each offset specify a set of dates
that conform to the DateOffset. For example, Bday defines this
set to be the set of dates that are weekdays (M-F). To test if a
date is in the set of a DateOffset dateOffset we can use the
onOffset method: dateOffset.onOffset(date).
If a date is not on a valid date, the rollback and rollforward
methods can be used to roll the date to the nearest valid date
before/after the date.
DateOffsets can be created to move dates forward a given number of
valid dates. For example, Bday(2) can be added to a date to move
it two business days forward. If the date does not start on a
valid date, first it is moved to a valid date. Thus psedo code
is:
def __add__(date):
date = rollback(date) # does nothing if date is valid
return date + <n number of periods>
When a date offset is created for a negitive number of periods,
the date is first rolled forward. The pseudo code is:
def __add__(date):
date = rollforward(date) # does nothing is date is valid
return date + <n number of periods>
Zero presents a problem. Should it roll forward or back? We
arbitrarily have it rollforward:
date + BDay(0) == BDay.rollforward(date)
Since 0 is a bit weird, we suggest avoiding its use.
"""
_cacheable = False
_normalize_cache = True
_kwds_use_relativedelta = (
'years', 'months', 'weeks', 'days',
'year', 'month', 'week', 'day', 'weekday',
'hour', 'minute', 'second', 'microsecond'
)
_use_relativedelta = False
_adjust_dst = False
# default for prior pickles
normalize = False
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self._offset, self._use_relativedelta = self._determine_offset()
def _determine_offset(self):
# timedelta is used for sub-daily plural offsets and all singular
# offsets relativedelta is used for plural offsets of daily length or
# more nanosecond(s) are handled by apply_wraps
kwds_no_nanos = dict(
(k, v) for k, v in self.kwds.items()
if k not in ('nanosecond', 'nanoseconds')
)
use_relativedelta = False
if len(kwds_no_nanos) > 0:
if any(k in self._kwds_use_relativedelta for k in kwds_no_nanos):
use_relativedelta = True
offset = relativedelta(**kwds_no_nanos)
else:
# sub-daily offset - use timedelta (tz-aware)
offset = timedelta(**kwds_no_nanos)
else:
offset = timedelta(1)
return offset, use_relativedelta
@apply_wraps
def apply(self, other):
if self._use_relativedelta:
other = as_datetime(other)
if len(self.kwds) > 0:
tzinfo = getattr(other, 'tzinfo', None)
if tzinfo is not None and self._use_relativedelta:
# perform calculation in UTC
other = other.replace(tzinfo=None)
if self.n > 0:
for i in range(self.n):
other = other + self._offset
else:
for i in range(-self.n):
other = other - self._offset
if tzinfo is not None and self._use_relativedelta:
# bring tz back from UTC calculation
other = tslib._localize_pydatetime(other, tzinfo)
return as_timestamp(other)
else:
return other + timedelta(self.n)
@apply_index_wraps
def apply_index(self, i):
"""
Vectorized apply of DateOffset to DatetimeIndex,
raises NotImplentedError for offsets without a
vectorized implementation
.. versionadded:: 0.17.0
Parameters
----------
i : DatetimeIndex
Returns
-------
y : DatetimeIndex
"""
if not type(self) is DateOffset:
raise NotImplementedError("DateOffset subclass %s "
"does not have a vectorized "
"implementation"
% (self.__class__.__name__,))
relativedelta_fast = set(['years', 'months', 'weeks',
'days', 'hours', 'minutes',
'seconds', 'microseconds'])
# relativedelta/_offset path only valid for base DateOffset
if (self._use_relativedelta and
set(self.kwds).issubset(relativedelta_fast)):
months = ((self.kwds.get('years', 0) * 12 +
self.kwds.get('months', 0)) * self.n)
if months:
shifted = tslib.shift_months(i.asi8, months)
i = i._shallow_copy(shifted)
weeks = (self.kwds.get('weeks', 0)) * self.n
if weeks:
i = (i.to_period('W') + weeks).to_timestamp() + \
i.to_perioddelta('W')
timedelta_kwds = dict((k, v) for k, v in self.kwds.items()
if k in ['days', 'hours', 'minutes',
'seconds', 'microseconds'])
if timedelta_kwds:
delta = Timedelta(**timedelta_kwds)
i = i + (self.n * delta)
return i
elif not self._use_relativedelta and hasattr(self, '_offset'):
# timedelta
return i + (self._offset * self.n)
else:
# relativedelta with other keywords
raise NotImplementedError("DateOffset with relativedelta "
"keyword(s) %s not able to be "
"applied vectorized" %
(set(self.kwds) - relativedelta_fast),)
def isAnchored(self):
return (self.n == 1)
def copy(self):
return self.__class__(self.n, normalize=self.normalize, **self.kwds)
def _should_cache(self):
return self.isAnchored() and self._cacheable
def _params(self):
all_paras = dict(list(vars(self).items()) + list(self.kwds.items()))
if 'holidays' in all_paras and not all_paras['holidays']:
all_paras.pop('holidays')
exclude = ['kwds', 'name', 'normalize', 'calendar']
attrs = [(k, v) for k, v in all_paras.items()
if (k not in exclude) and (k[0] != '_')]
attrs = sorted(set(attrs))
params = tuple([str(self.__class__)] + attrs)
return params
def __repr__(self):
className = getattr(self, '_outputName', type(self).__name__)
exclude = set(['n', 'inc', 'normalize'])
attrs = []
for attr in sorted(self.__dict__):
if ((attr == 'kwds' and len(self.kwds) == 0) or
attr.startswith('_')):
continue
elif attr == 'kwds':
kwds_new = {}
for key in self.kwds:
if not hasattr(self, key):
kwds_new[key] = self.kwds[key]
if len(kwds_new) > 0:
attrs.append('='.join((attr, repr(kwds_new))))
else:
if attr not in exclude:
attrs.append('='.join((attr, repr(getattr(self, attr)))))
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
n_str = ""
if self.n != 1:
n_str = "%s * " % self.n
out = '<%s' % n_str + className + plural
if attrs:
out += ': ' + ', '.join(attrs)
out += '>'
return out
@property
def name(self):
return self.rule_code
def __eq__(self, other):
if other is None:
return False
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if not isinstance(other, DateOffset):
return False
return self._params() == other._params()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._params())
def __call__(self, other):
return self.apply(other)
def __add__(self, other):
if isinstance(other, (ABCDatetimeIndex, ABCSeries)):
return other + self
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, datetime):
raise TypeError('Cannot subtract datetime from offset.')
elif type(other) == type(self):
return self.__class__(self.n - other.n, normalize=self.normalize,
**self.kwds)
else: # pragma: no cover
return NotImplemented
def __rsub__(self, other):
if isinstance(other, (ABCDatetimeIndex, ABCSeries)):
return other - self
return self.__class__(-self.n, normalize=self.normalize,
**self.kwds) + other
def __mul__(self, someInt):
return self.__class__(n=someInt * self.n, normalize=self.normalize,
**self.kwds)
def __rmul__(self, someInt):
return self.__mul__(someInt)
def __neg__(self):
return self.__class__(-self.n, normalize=self.normalize, **self.kwds)
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt - self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
# XXX, see #1395
if type(self) == DateOffset or isinstance(self, Tick):
return True
# Default (slow) method for determining if some date is a member of the
# date range generated by this offset. Subclasses may have this
# re-implemented in a nicer way.
a = dt
b = ((dt + self) - self)
return a == b
# helpers for vectorized offsets
def _beg_apply_index(self, i, freq):
"""Offsets index to beginning of Period frequency"""
off = i.to_perioddelta('D')
from pandas.tseries.frequencies import get_freq_code
base, mult = get_freq_code(freq)
base_period = i.to_period(base)
if self.n <= 0:
# when subtracting, dates on start roll to prior
roll = np.where(base_period.to_timestamp() == i - off,
self.n, self.n + 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp()
return base + off
def _end_apply_index(self, i, freq):
"""Offsets index to end of Period frequency"""
off = i.to_perioddelta('D')
from pandas.tseries.frequencies import get_freq_code
base, mult = get_freq_code(freq)
base_period = i.to_period(base)
if self.n > 0:
# when adding, dates on end roll to next
roll = np.where(base_period.to_timestamp(how='end') == i - off,
self.n, self.n - 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp(how='end')
return base + off
# way to get around weirdness with rule_code
@property
def _prefix(self):
raise NotImplementedError('Prefix not defined')
@property
def rule_code(self):
return self._prefix
@property
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '%d%s' % (self.n, code)
else:
fstr = code
return fstr
@property
def nanos(self):
raise ValueError("{0} is a non-fixed frequency".format(self))
class SingleConstructorOffset(DateOffset):
@classmethod
def _from_name(cls, suffix=None):
# default _from_name calls cls with no args
if suffix:
raise ValueError("Bad freq suffix %s" % suffix)
return cls()
class BusinessMixin(object):
""" mixin to business types to provide related functions """
# TODO: Combine this with DateOffset by defining a whitelisted set of
# attributes on each object rather than the existing behavior of iterating
# over internal ``__dict__``
def __repr__(self):
className = getattr(self, '_outputName', self.__class__.__name__)
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
n_str = ""
if self.n != 1:
n_str = "%s * " % self.n
out = '<%s' % n_str + className + plural + self._repr_attrs() + '>'
return out
def _repr_attrs(self):
if self.offset:
attrs = ['offset=%s' % repr(self.offset)]
else:
attrs = None
out = ''
if attrs:
out += ': ' + ', '.join(attrs)
return out
class BusinessDay(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
"""
_prefix = 'B'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
@property
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '%d%s' % (self.n, code)
else:
fstr = code
if self.offset:
fstr += self._offset_str()
return fstr
def _offset_str(self):
def get_str(td):
off_str = ''
if td.days > 0:
off_str += str(td.days) + 'D'
if td.seconds > 0:
s = td.seconds
hrs = int(s / 3600)
if hrs != 0:
off_str += str(hrs) + 'H'
s -= hrs * 3600
mts = int(s / 60)
if mts != 0:
off_str += str(mts) + 'Min'
s -= mts * 60
if s != 0:
off_str += str(s) + 's'
if td.microseconds > 0:
off_str += str(td.microseconds) + 'us'
return off_str
if isinstance(self.offset, timedelta):
zero = timedelta(0, 0, 0)
if self.offset >= zero:
off_str = '+' + get_str(self.offset)
else:
off_str = '-' + get_str(-self.offset)
return off_str
else:
return '+' + repr(self.offset)
def isAnchored(self):
return (self.n == 1)
@apply_wraps
def apply(self, other):
if isinstance(other, datetime):
n = self.n
if n == 0 and other.weekday() > 4:
n = 1
result = other
# avoid slowness below
if abs(n) > 5:
k = n // 5
result = result + timedelta(7 * k)
if n < 0 and result.weekday() > 4:
n += 1
n -= 5 * k
if n == 0 and result.weekday() > 4:
n -= 1
while n != 0:
k = n // abs(n)
result = result + timedelta(k)
if result.weekday() < 5:
n -= k
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine business day with '
'datetime or timedelta.')
@apply_index_wraps
def apply_index(self, i):
time = i.to_perioddelta('D')
# to_period rolls forward to next BDay; track and
# reduce n where it does when rolling forward
shifted = (i.to_perioddelta('B') - time).asi8 != 0
if self.n > 0:
roll = np.where(shifted, self.n - 1, self.n)
else:
roll = self.n
return (i.to_period('B') + roll).to_timestamp() + time
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() < 5
class BusinessHour(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
.. versionadded: 0.16.1
"""
_prefix = 'BH'
_anchor = 0
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
# must be validated here to equality check
kwds['start'] = self._validate_time(kwds.get('start', '09:00'))
kwds['end'] = self._validate_time(kwds.get('end', '17:00'))
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.start = kwds.get('start', '09:00')
self.end = kwds.get('end', '17:00')
# used for moving to next businessday
if self.n >= 0:
self.next_bday = BusinessDay(n=1)
else:
self.next_bday = BusinessDay(n=-1)
def _validate_time(self, t_input):
from datetime import time as dt_time
import time
if isinstance(t_input, compat.string_types):
try:
t = time.strptime(t_input, '%H:%M')
return dt_time(hour=t.tm_hour, minute=t.tm_min)
except ValueError:
raise ValueError("time data must match '%H:%M' format")
elif isinstance(t_input, dt_time):
if t_input.second != 0 or t_input.microsecond != 0:
raise ValueError(
"time data must be specified only with hour and minute")
return t_input
else:
raise ValueError("time data must be string or datetime.time")
def _get_daytime_flag(self):
if self.start == self.end:
raise ValueError('start and end must not be the same')
elif self.start < self.end:
return True
else:
return False
def _repr_attrs(self):
out = super(BusinessHour, self)._repr_attrs()
attrs = ['BH=%s-%s' % (self.start.strftime('%H:%M'),
self.end.strftime('%H:%M'))]
out += ': ' + ', '.join(attrs)
return out
def _next_opening_time(self, other):
"""
If n is positive, return tomorrow's business day opening time.
Otherwise yesterday's business day's opening time.
Opening time always locates on BusinessDay.
Otherwise, closing time may not if business hour extends over midnight.
"""
if not self.next_bday.onOffset(other):
other = other + self.next_bday
else:
if self.n >= 0 and self.start < other.time():
other = other + self.next_bday
elif self.n < 0 and other.time() < self.start:
other = other + self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _prev_opening_time(self, other):
"""
If n is positive, return yesterday's business day opening time.
Otherwise yesterday business day's opening time.
"""
if not self.next_bday.onOffset(other):
other = other - self.next_bday
else:
if self.n >= 0 and other.time() < self.start:
other = other - self.next_bday
elif self.n < 0 and other.time() > self.start:
other = other - self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _get_business_hours_by_sec(self):
"""
Return business hours in a day by seconds.
"""
if self._get_daytime_flag():
# create dummy datetime to calcurate businesshours in a day
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 1, self.end.hour, self.end.minute)
return tslib.tot_seconds(until - dtstart)
else:
self.daytime = False
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 2, self.end.hour, self.end.minute)
return tslib.tot_seconds(until - dtstart)
@apply_wraps
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
if not self.onOffset(dt):
businesshours = self._get_business_hours_by_sec()
if self.n >= 0:
dt = self._prev_opening_time(
dt) + timedelta(seconds=businesshours)
else:
dt = self._next_opening_time(
dt) + timedelta(seconds=businesshours)
return dt
@apply_wraps
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
if not self.onOffset(dt):
if self.n >= 0:
return self._next_opening_time(dt)
else:
return self._prev_opening_time(dt)
return dt
@apply_wraps
def apply(self, other):
# calcurate here because offset is not immutable
daytime = self._get_daytime_flag()
businesshours = self._get_business_hours_by_sec()
bhdelta = timedelta(seconds=businesshours)
if isinstance(other, datetime):
# used for detecting edge condition
nanosecond = getattr(other, 'nanosecond', 0)
# reset timezone and nanosecond
# other may be a Timestamp, thus not use replace
other = datetime(other.year, other.month, other.day,
other.hour, other.minute,
other.second, other.microsecond)
n = self.n
if n >= 0:
if (other.time() == self.end or
not self._onOffset(other, businesshours)):
other = self._next_opening_time(other)
else:
if other.time() == self.start:
# adjustment to move to previous business day
other = other - timedelta(seconds=1)
if not self._onOffset(other, businesshours):
other = self._next_opening_time(other)
other = other + bhdelta
bd, r = divmod(abs(n * 60), businesshours // 60)
if n < 0:
bd, r = -bd, -r
if bd != 0:
skip_bd = BusinessDay(n=bd)
# midnight busienss hour may not on BusinessDay
if not self.next_bday.onOffset(other):
remain = other - self._prev_opening_time(other)
other = self._next_opening_time(other + skip_bd) + remain
else:
other = other + skip_bd
hours, minutes = divmod(r, 60)
result = other + timedelta(hours=hours, minutes=minutes)
# because of previous adjustment, time will be larger than start
if ((daytime and (result.time() < self.start or
self.end < result.time())) or
not daytime and (self.end < result.time() < self.start)):
if n >= 0:
bday_edge = self._prev_opening_time(other)
bday_edge = bday_edge + bhdelta
# calcurate remainder
bday_remain = result - bday_edge
result = self._next_opening_time(other)
result += bday_remain
else:
bday_edge = self._next_opening_time(other)
bday_remain = result - bday_edge
result = self._next_opening_time(result) + bhdelta
result += bday_remain
# edge handling
if n >= 0:
if result.time() == self.end:
result = self._next_opening_time(result)
else:
if result.time() == self.start and nanosecond == 0:
# adjustment to move to previous business day
result = self._next_opening_time(
result - timedelta(seconds=1)) + bhdelta
return result
else:
raise ApplyTypeError(
'Only know how to combine business hour with ')
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if dt.tzinfo is not None:
dt = datetime(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, dt.microsecond)
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
businesshours = self._get_business_hours_by_sec()
return self._onOffset(dt, businesshours)
def _onOffset(self, dt, businesshours):
"""
Slight speedups using calcurated values
"""
# if self.normalize and not _is_normalized(dt):
# return False
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
if self.n >= 0:
op = self._prev_opening_time(dt)
else:
op = self._next_opening_time(dt)
span = tslib.tot_seconds(dt - op)
if span <= businesshours:
return True
else:
return False
class CustomBusinessDay(BusinessDay):
"""
**EXPERIMENTAL** DateOffset subclass representing possibly n business days
excluding holidays
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'C'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
calendar, holidays = self.get_calendar(weekmask=weekmask,
holidays=holidays,
calendar=calendar)
# CustomBusinessDay instances are identified by the
# following two attributes. See DateOffset._params()
# holidays, weekmask
self.kwds['weekmask'] = self.weekmask = weekmask
self.kwds['holidays'] = self.holidays = holidays
self.kwds['calendar'] = self.calendar = calendar
def get_calendar(self, weekmask, holidays, calendar):
'''Generate busdaycalendar'''
if isinstance(calendar, np.busdaycalendar):
if not holidays:
holidays = tuple(calendar.holidays)
elif not isinstance(holidays, tuple):
holidays = tuple(holidays)
else:
# trust that calendar.holidays and holidays are
# consistent
pass
return calendar, holidays
if holidays is None:
holidays = []
try:
holidays = holidays + calendar.holidays().tolist()
except AttributeError:
pass
holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in
holidays]
holidays = tuple(sorted(holidays))
kwargs = {'weekmask': weekmask}
if holidays:
kwargs['holidays'] = holidays
try:
busdaycalendar = np.busdaycalendar(**kwargs)
except:
# Check we have the required numpy version
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < '1.7.0':
raise NotImplementedError(
"CustomBusinessDay requires numpy >= "
"1.7.0. Current version: " + np.__version__)
else:
raise
return busdaycalendar, holidays
def __getstate__(self):
"""Return a pickleable state"""
state = self.__dict__.copy()
del state['calendar']
# we don't want to actually pickle the calendar object
# as its a np.busyday; we recreate on deserilization
try:
state['kwds'].pop('calendar')
except:
pass
return state
def __setstate__(self, state):
"""Reconstruct an instance from a pickled state"""
self.__dict__ = state
calendar, holidays = self.get_calendar(weekmask=self.weekmask,
holidays=self.holidays,
calendar=None)
self.kwds['calendar'] = self.calendar = calendar
self.kwds['holidays'] = self.holidays = holidays
self.kwds['weekmask'] = state['weekmask']
@apply_wraps
def apply(self, other):
if self.n <= 0:
roll = 'forward'
else:
roll = 'backward'
if isinstance(other, datetime):
date_in = other
np_dt = np.datetime64(date_in.date())
np_incr_dt = np.busday_offset(np_dt, self.n, roll=roll,
busdaycal=self.calendar)
dt_date = np_incr_dt.astype(datetime)
result = datetime.combine(dt_date, date_in.time())
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine trading day with '
'datetime, datetime64 or timedelta.')
def apply_index(self, i):
raise NotImplementedError
@staticmethod
def _to_dt64(dt, dtype='datetime64'):
# Currently
# > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]')
# numpy.datetime64('2013-05-01T02:00:00.000000+0200')
# Thus astype is needed to cast datetime to datetime64[D]
if getattr(dt, 'tzinfo', None) is not None:
i8 = tslib.pydt_to_i8(dt)
dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo)
dt = Timestamp(dt)
dt = np.datetime64(dt)
if dt.dtype.name != dtype:
dt = dt.astype(dtype)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
day64 = self._to_dt64(dt, 'datetime64[D]')
return np.is_busday(day64, busdaycal=self.calendar)
class MonthOffset(SingleConstructorOffset):
_adjust_dst = True
@property
def name(self):
if self.isAnchored:
return self.rule_code
else:
return "%s-%s" % (self.rule_code, _int_to_month[self.n])
class MonthEnd(MonthOffset):
"""DateOffset of one month end"""
@apply_wraps
def apply(self, other):
n = self.n
_, days_in_month = tslib.monthrange(other.year, other.month)
if other.day != days_in_month:
other = other + relativedelta(months=-1, day=31)
if n <= 0:
n = n + 1
other = other + relativedelta(months=n, day=31)
return other
@apply_index_wraps
def apply_index(self, i):
shifted = tslib.shift_months(i.asi8, self.n, 'end')
return i._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
days_in_month = tslib.monthrange(dt.year, dt.month)[1]
return dt.day == days_in_month
_prefix = 'M'
class MonthBegin(MonthOffset):
"""DateOffset of one month at beginning"""
@apply_wraps
def apply(self, other):
n = self.n
if other.day > 1 and n <= 0: # then roll forward if n<=0
n += 1
return other + relativedelta(months=n, day=1)
@apply_index_wraps
def apply_index(self, i):
shifted = tslib.shift_months(i.asi8, self.n, 'start')
return i._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day == 1
_prefix = 'MS'
class BusinessMonthEnd(MonthOffset):
"""DateOffset increments between business EOM dates"""
def isAnchored(self):
return (self.n == 1)
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
% 7) - 4, 0)
if n > 0 and not other.day >= lastBDay:
n = n - 1
elif n <= 0 and other.day > lastBDay:
n = n + 1
other = other + relativedelta(months=n, day=31)
if other.weekday() > 4:
other = other - BDay()
return other
_prefix = 'BM'
class BusinessMonthBegin(MonthOffset):
"""DateOffset of one business month at beginning"""
@apply_wraps
def apply(self, other):
n = self.n
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
if other.day > first and n <= 0:
# as if rolled forward already
n += 1
elif other.day < first and n > 0:
other = other + timedelta(days=first - other.day)
n -= 1
other = other + relativedelta(months=n)
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
result = datetime(other.year, other.month, first,
other.hour, other.minute,
other.second, other.microsecond)
return result
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
first_weekday, _ = tslib.monthrange(dt.year, dt.month)
if first_weekday == 5:
return dt.day == 3
elif first_weekday == 6:
return dt.day == 2
else:
return dt.day == 1
_prefix = 'BMS'
class CustomBusinessMonthEnd(BusinessMixin, MonthOffset):
"""
**EXPERIMENTAL** DateOffset of one custom business month
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'CBM'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
weekmask=weekmask, holidays=holidays,
calendar=calendar, **kwds)
self.m_offset = MonthEnd(n=1, normalize=normalize, **kwds)
self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
@apply_wraps
def apply(self, other):
n = self.n
# First move to month offset
cur_mend = self.m_offset.rollforward(other)
# Find this custom month offset
cur_cmend = self.cbday.rollback(cur_mend)
# handle zero case. arbitrarily rollforward
if n == 0 and other != cur_cmend:
n += 1
if other < cur_cmend and n >= 1:
n -= 1
elif other > cur_cmend and n <= -1:
n += 1
new = cur_mend + n * self.m_offset
result = self.cbday.rollback(new)
return result
class CustomBusinessMonthBegin(BusinessMixin, MonthOffset):
"""
**EXPERIMENTAL** DateOffset of one custom business month
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'CBMS'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
weekmask=weekmask, holidays=holidays,
calendar=calendar, **kwds)
self.m_offset = MonthBegin(n=1, normalize=normalize, **kwds)
self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
@apply_wraps
def apply(self, other):
n = self.n
dt_in = other
# First move to month offset
cur_mbegin = self.m_offset.rollback(dt_in)
# Find this custom month offset
cur_cmbegin = self.cbday.rollforward(cur_mbegin)
# handle zero case. arbitrarily rollforward
if n == 0 and dt_in != cur_cmbegin:
n += 1
if dt_in > cur_cmbegin and n <= -1:
n += 1
elif dt_in < cur_cmbegin and n >= 1:
n -= 1
new = cur_mbegin + n * self.m_offset
result = self.cbday.rollforward(new)
return result
class Week(DateOffset):
"""
Weekly offset
Parameters
----------
weekday : int, default None
Always generate specific day of week. 0 for Monday
"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds.get('weekday', None)
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
self._inc = timedelta(weeks=1)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.weekday is not None)
@apply_wraps
def apply(self, other):
base = other
if self.weekday is None:
return other + self.n * self._inc
if self.n > 0:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
k = k - 1
other = other
for i in range(k):
other = other + self._inc
else:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
for i in range(-k):
other = other - self._inc
other = datetime(other.year, other.month, other.day,
base.hour, base.minute, base.second, base.microsecond)
return other
@apply_index_wraps
def apply_index(self, i):
if self.weekday is None:
return ((i.to_period('W') + self.n).to_timestamp() +
i.to_perioddelta('W'))
else:
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() == self.weekday
_prefix = 'W'
@property
def rule_code(self):
suffix = ''
if self.weekday is not None:
suffix = '-%s' % (_int_to_weekday[self.weekday])
return self._prefix + suffix
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
weekday = None
else:
weekday = _weekday_to_int[suffix]
return cls(weekday=weekday)
class WeekDay(object):
MON = 0
TUE = 1
WED = 2
THU = 3
FRI = 4
SAT = 5
SUN = 6
_int_to_weekday = {
WeekDay.MON: 'MON',
WeekDay.TUE: 'TUE',
WeekDay.WED: 'WED',
WeekDay.THU: 'THU',
WeekDay.FRI: 'FRI',
WeekDay.SAT: 'SAT',
WeekDay.SUN: 'SUN'
}
_weekday_to_int = dict((v, k) for k, v in _int_to_weekday.items())
class WeekOfMonth(DateOffset):
"""
Describes monthly dates like "the Tuesday of the 2nd week of each month"
Parameters
----------
n : int
week : {0, 1, 2, 3, ...}
0 is 1st week of month, 1 2nd week, etc.
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds['weekday']
self.week = kwds['week']
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
if self.week < 0 or self.week > 3:
raise ValueError('Week must be 0<=day<=3, got %d' %
self.week)
self.kwds = kwds
@apply_wraps
def apply(self, other):
base = other
offsetOfMonth = self.getOffsetOfMonth(other)
if offsetOfMonth > other:
if self.n > 0:
months = self.n - 1
else:
months = self.n
elif offsetOfMonth == other:
months = self.n
else:
if self.n > 0:
months = self.n
else:
months = self.n + 1
other = self.getOffsetOfMonth(
other + relativedelta(months=months, day=1))
other = datetime(other.year, other.month, other.day, base.hour,
base.minute, base.second, base.microsecond)
return other
def getOffsetOfMonth(self, dt):
w = Week(weekday=self.weekday)
d = datetime(dt.year, dt.month, 1, tzinfo=dt.tzinfo)
d = w.rollforward(d)
for i in range(self.week):
d = w.apply(d)
return d
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
d = datetime(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo)
return d == self.getOffsetOfMonth(dt)
@property
def rule_code(self):
return '%s-%d%s' % (self._prefix, self.week + 1,
_int_to_weekday.get(self.weekday, ''))
_prefix = 'WOM'
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix %r requires a suffix." % (cls._prefix))
# TODO: handle n here...
# only one digit weeks (1 --> week 0, 2 --> week 1, etc.)
week = int(suffix[0]) - 1
weekday = _weekday_to_int[suffix[1:]]
return cls(week=week, weekday=weekday)
class LastWeekOfMonth(DateOffset):
"""
Describes monthly dates in last week of month like "the last Tuesday of
each month"
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds['weekday']
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
self.kwds = kwds
@apply_wraps
def apply(self, other):
offsetOfMonth = self.getOffsetOfMonth(other)
if offsetOfMonth > other:
if self.n > 0:
months = self.n - 1
else:
months = self.n
elif offsetOfMonth == other:
months = self.n
else:
if self.n > 0:
months = self.n
else:
months = self.n + 1
return self.getOffsetOfMonth(
other + relativedelta(months=months, day=1))
def getOffsetOfMonth(self, dt):
m = MonthEnd()
d = datetime(dt.year, dt.month, 1, dt.hour, dt.minute,
dt.second, dt.microsecond, tzinfo=dt.tzinfo)
eom = m.rollforward(d)
w = Week(weekday=self.weekday)
return w.rollback(eom)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt == self.getOffsetOfMonth(dt)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_weekday.get(self.weekday, ''))
_prefix = 'LWOM'
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix %r requires a suffix." % (cls._prefix))
# TODO: handle n here...
weekday = _weekday_to_int[suffix]
return cls(weekday=weekday)
class QuarterOffset(DateOffset):
"""Quarter representation - doesn't call super"""
#: default month for __init__
_default_startingMonth = None
#: default month in _from_name
_from_name_startingMonth = None
_adjust_dst = True
# TODO: Consider combining QuarterOffset and YearOffset __init__ at some
# point
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds.get('startingMonth',
self._default_startingMonth)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['startingMonth'] = _month_to_int[suffix]
else:
if cls._from_name_startingMonth is not None:
kwargs['startingMonth'] = cls._from_name_startingMonth
return cls(**kwargs)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_month[self.startingMonth])
class BQuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
"""
_outputName = 'BusinessQuarterEnd'
_default_startingMonth = 3
# 'BQ'
_from_name_startingMonth = 12
_prefix = 'BQ'
@apply_wraps
def apply(self, other):
n = self.n
base = other
other = datetime(other.year, other.month, other.day,
other.hour, other.minute, other.second,
other.microsecond)
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
% 7) - 4, 0)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
monthsToGo = 0
if n > 0 and not (other.day >= lastBDay and monthsToGo == 0):
n = n - 1
elif n <= 0 and other.day > lastBDay and monthsToGo == 0:
n = n + 1
other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
other = tslib._localize_pydatetime(other, base.tzinfo)
if other.weekday() > 4:
other = other - BDay()
return other
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
modMonth = (dt.month - self.startingMonth) % 3
return BMonthEnd().onOffset(dt) and modMonth == 0
_int_to_month = tslib._MONTH_ALIASES
_month_to_int = dict((v, k) for k, v in _int_to_month.items())
# TODO: This is basically the same as BQuarterEnd
class BQuarterBegin(QuarterOffset):
_outputName = "BusinessQuarterBegin"
# I suspect this is wrong for *all* of them.
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'BQS'
@apply_wraps
def apply(self, other):
n = self.n
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
monthsSince = (other.month - self.startingMonth) % 3
if n <= 0 and monthsSince != 0: # make sure to roll forward so negate
monthsSince = monthsSince - 3
# roll forward if on same month later than first bday
if n <= 0 and (monthsSince == 0 and other.day > first):
n = n + 1
# pretend to roll back if on same month but before firstbday
elif n > 0 and (monthsSince == 0 and other.day < first):
n = n - 1
# get the first bday for result
other = other + relativedelta(months=3 * n - monthsSince)
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
result = datetime(other.year, other.month, first,
other.hour, other.minute, other.second,
other.microsecond)
return result
class QuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ...
"""
_outputName = 'QuarterEnd'
_default_startingMonth = 3
_prefix = 'Q'
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds.get('startingMonth', 3)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@apply_wraps
def apply(self, other):
n = self.n
other = datetime(other.year, other.month, other.day,
other.hour, other.minute, other.second,
other.microsecond)
wkday, days_in_month = tslib.monthrange(other.year, other.month)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
monthsToGo = 0
if n > 0 and not (other.day >= days_in_month and monthsToGo == 0):
n = n - 1
other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
return other
@apply_index_wraps
def apply_index(self, i):
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
modMonth = (dt.month - self.startingMonth) % 3
return MonthEnd().onOffset(dt) and modMonth == 0
class QuarterBegin(QuarterOffset):
_outputName = 'QuarterBegin'
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'QS'
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, other.month)
monthsSince = (other.month - self.startingMonth) % 3
if n <= 0 and monthsSince != 0:
# make sure you roll forward, so negate
monthsSince = monthsSince - 3
if n <= 0 and (monthsSince == 0 and other.day > 1):
# after start, so come back an extra period as if rolled forward
n = n + 1
other = other + relativedelta(months=3 * n - monthsSince, day=1)
return other
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.startingMonth == 1 else self.startingMonth - 1
# freq_month = self.startingMonth
freqstr = 'Q-%s' % (_int_to_month[freq_month],)
return self._beg_apply_index(i, freqstr)
class YearOffset(DateOffset):
"""DateOffset that just needs a month"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.month = kwds.get('month', self._default_month)
if self.month < 1 or self.month > 12:
raise ValueError('Month must go from 1 to 12')
DateOffset.__init__(self, n=n, normalize=normalize, **kwds)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['month'] = _month_to_int[suffix]
return cls(**kwargs)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_month[self.month])
class BYearEnd(YearOffset):
"""DateOffset increments between business EOM dates"""
_outputName = 'BusinessYearEnd'
_default_month = 12
_prefix = 'BA'
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, self.month)
lastBDay = (days_in_month -
max(((wkday + days_in_month - 1) % 7) - 4, 0))
years = n
if n > 0:
if (other.month < self.month or
(other.month == self.month and other.day < lastBDay)):
years -= 1
elif n <= 0:
if (other.month > self.month or
(other.month == self.month and other.day > lastBDay)):
years += 1
other = other + relativedelta(years=years)
_, days_in_month = tslib.monthrange(other.year, self.month)
result = datetime(other.year, self.month, days_in_month,
other.hour, other.minute, other.second,
other.microsecond)
if result.weekday() > 4:
result = result - BDay()
return result
class BYearBegin(YearOffset):
"""DateOffset increments between business year begin dates"""
_outputName = 'BusinessYearBegin'
_default_month = 1
_prefix = 'BAS'
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, self.month)
first = _get_firstbday(wkday)
years = n
if n > 0: # roll back first for positive n
if (other.month < self.month or
(other.month == self.month and other.day < first)):
years -= 1
elif n <= 0: # roll forward
if (other.month > self.month or
(other.month == self.month and other.day > first)):
years += 1
# set first bday for result
other = other + relativedelta(years=years)
wkday, days_in_month = tslib.monthrange(other.year, self.month)
first = _get_firstbday(wkday)
return datetime(other.year, self.month, first, other.hour,
other.minute, other.second, other.microsecond)
class YearEnd(YearOffset):
"""DateOffset increments between calendar year ends"""
_default_month = 12
_prefix = 'A'
@apply_wraps
def apply(self, other):
def _increment(date):
if date.month == self.month:
_, days_in_month = tslib.monthrange(date.year, self.month)
if date.day != days_in_month:
year = date.year
else:
year = date.year + 1
elif date.month < self.month:
year = date.year
else:
year = date.year + 1
_, days_in_month = tslib.monthrange(year, self.month)
return datetime(year, self.month, days_in_month,
date.hour, date.minute, date.second,
date.microsecond)
def _decrement(date):
year = date.year if date.month > self.month else date.year - 1
_, days_in_month = tslib.monthrange(year, self.month)
return datetime(year, self.month, days_in_month,
date.hour, date.minute, date.second,
date.microsecond)
def _rollf(date):
if date.month != self.month or\
date.day < tslib.monthrange(date.year, date.month)[1]:
date = _increment(date)
return date
n = self.n
result = other
if n > 0:
while n > 0:
result = _increment(result)
n -= 1
elif n < 0:
while n < 0:
result = _decrement(result)
n += 1
else:
# n == 0, roll forward
result = _rollf(result)
return result
@apply_index_wraps
def apply_index(self, i):
# convert month anchor to annual period tuple
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
wkday, days_in_month = tslib.monthrange(dt.year, self.month)
return self.month == dt.month and dt.day == days_in_month
class YearBegin(YearOffset):
"""DateOffset increments between calendar year begin dates"""
_default_month = 1
_prefix = 'AS'
@apply_wraps
def apply(self, other):
def _increment(date, n):
year = date.year + n - 1
if date.month >= self.month:
year += 1
return datetime(year, self.month, 1, date.hour, date.minute,
date.second, date.microsecond)
def _decrement(date, n):
year = date.year + n + 1
if date.month < self.month or (date.month == self.month and
date.day == 1):
year -= 1
return datetime(year, self.month, 1, date.hour, date.minute,
date.second, date.microsecond)
def _rollf(date):
if (date.month != self.month) or date.day > 1:
date = _increment(date, 1)
return date
n = self.n
result = other
if n > 0:
result = _increment(result, n)
elif n < 0:
result = _decrement(result, n)
else:
# n == 0, roll forward
result = _rollf(result)
return result
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.month == 1 else self.month - 1
freqstr = 'A-%s' % (_int_to_month[freq_month],)
return self._beg_apply_index(i, freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.month == self.month and dt.day == 1
class FY5253(DateOffset):
"""
Describes 52-53 week fiscal year. This is also known as a 4-4-5 calendar.
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'RE'
_suffix_prefix_last = 'L'
_suffix_prefix_nearest = 'N'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds['startingMonth']
self.weekday = kwds["weekday"]
self.variation = kwds["variation"]
self.kwds = kwds
if self.n == 0:
raise ValueError('N cannot be 0')
if self.variation not in ["nearest", "last"]:
raise ValueError('%s is not a valid variation' % self.variation)
if self.variation == "nearest":
weekday_offset = weekday(self.weekday)
self._rd_forward = relativedelta(weekday=weekday_offset)
self._rd_backward = relativedelta(weekday=weekday_offset(-1))
else:
self._offset_lwom = LastWeekOfMonth(n=1, weekday=self.weekday)
def isAnchored(self):
return self.n == 1 \
and self.startingMonth is not None \
and self.weekday is not None
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
dt = datetime(dt.year, dt.month, dt.day)
year_end = self.get_year_end(dt)
if self.variation == "nearest":
# We have to check the year end of "this" cal year AND the previous
return year_end == dt or \
self.get_year_end(dt - relativedelta(months=1)) == dt
else:
return year_end == dt
@apply_wraps
def apply(self, other):
n = self.n
prev_year = self.get_year_end(
datetime(other.year - 1, self.startingMonth, 1))
cur_year = self.get_year_end(
datetime(other.year, self.startingMonth, 1))
next_year = self.get_year_end(
datetime(other.year + 1, self.startingMonth, 1))
prev_year = tslib._localize_pydatetime(prev_year, other.tzinfo)
cur_year = tslib._localize_pydatetime(cur_year, other.tzinfo)
next_year = tslib._localize_pydatetime(next_year, other.tzinfo)
if n > 0:
if other == prev_year:
year = other.year - 1
elif other == cur_year:
year = other.year
elif other == next_year:
year = other.year + 1
elif other < prev_year:
year = other.year - 1
n -= 1
elif other < cur_year:
year = other.year
n -= 1
elif other < next_year:
year = other.year + 1
n -= 1
else:
assert False
result = self.get_year_end(
datetime(year + n, self.startingMonth, 1))
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second,
other.microsecond)
return result
else:
n = -n
if other == prev_year:
year = other.year - 1
elif other == cur_year:
year = other.year
elif other == next_year:
year = other.year + 1
elif other > next_year:
year = other.year + 1
n -= 1
elif other > cur_year:
year = other.year
n -= 1
elif other > prev_year:
year = other.year - 1
n -= 1
else:
assert False
result = self.get_year_end(
datetime(year - n, self.startingMonth, 1))
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second,
other.microsecond)
return result
def get_year_end(self, dt):
if self.variation == "nearest":
return self._get_year_end_nearest(dt)
else:
return self._get_year_end_last(dt)
def get_target_month_end(self, dt):
target_month = datetime(
dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo)
next_month_first_of = target_month + relativedelta(months=+1)
return next_month_first_of + relativedelta(days=-1)
def _get_year_end_nearest(self, dt):
target_date = self.get_target_month_end(dt)
if target_date.weekday() == self.weekday:
return target_date
else:
forward = target_date + self._rd_forward
backward = target_date + self._rd_backward
if forward - target_date < target_date - backward:
return forward
else:
return backward
def _get_year_end_last(self, dt):
current_year = datetime(
dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo)
return current_year + self._offset_lwom
@property
def rule_code(self):
suffix = self.get_rule_code_suffix()
return "%s-%s" % (self._get_prefix(), suffix)
def _get_prefix(self):
return self._prefix
def _get_suffix_prefix(self):
if self.variation == "nearest":
return self._suffix_prefix_nearest
else:
return self._suffix_prefix_last
def get_rule_code_suffix(self):
return '%s-%s-%s' % (self._get_suffix_prefix(),
_int_to_month[self.startingMonth],
_int_to_weekday[self.weekday])
@classmethod
def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
if varion_code == "N":
variation = "nearest"
elif varion_code == "L":
variation = "last"
else:
raise ValueError(
"Unable to parse varion_code: %s" % (varion_code,))
startingMonth = _month_to_int[startingMonth_code]
weekday = _weekday_to_int[weekday_code]
return {
"weekday": weekday,
"startingMonth": startingMonth,
"variation": variation,
}
@classmethod
def _from_name(cls, *args):
return cls(**cls._parse_suffix(*args))
class FY5253Quarter(DateOffset):
"""
DateOffset increments between business quarter dates
for 52-53 week fiscal year (also known as a 4-4-5 calendar).
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
qtr_with_extra_week : The quarter number that has the leap
or 14 week when needed. {1,2,3,4}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'REQ'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.qtr_with_extra_week = kwds["qtr_with_extra_week"]
self.kwds = kwds
if self.n == 0:
raise ValueError('N cannot be 0')
self._offset = FY5253(
startingMonth=kwds['startingMonth'],
weekday=kwds["weekday"],
variation=kwds["variation"])
def isAnchored(self):
return self.n == 1 and self._offset.isAnchored()
@apply_wraps
def apply(self, other):
base = other
n = self.n
if n > 0:
while n > 0:
if not self._offset.onOffset(other):
qtr_lens = self.get_weeks(other)
start = other - self._offset
else:
start = other
qtr_lens = self.get_weeks(other + self._offset)
for weeks in qtr_lens:
start += relativedelta(weeks=weeks)
if start > other:
other = start
n -= 1
break
else:
n = -n
while n > 0:
if not self._offset.onOffset(other):
qtr_lens = self.get_weeks(other)
end = other + self._offset
else:
end = other
qtr_lens = self.get_weeks(other)
for weeks in reversed(qtr_lens):
end -= relativedelta(weeks=weeks)
if end < other:
other = end
n -= 1
break
other = datetime(other.year, other.month, other.day,
base.hour, base.minute, base.second, base.microsecond)
return other
def get_weeks(self, dt):
ret = [13] * 4
year_has_extra_week = self.year_has_extra_week(dt)
if year_has_extra_week:
ret[self.qtr_with_extra_week - 1] = 14
return ret
def year_has_extra_week(self, dt):
if self._offset.onOffset(dt):
prev_year_end = dt - self._offset
next_year_end = dt
else:
next_year_end = dt + self._offset
prev_year_end = dt - self._offset
week_in_year = (next_year_end - prev_year_end).days / 7
return week_in_year == 53
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if self._offset.onOffset(dt):
return True
next_year_end = dt - self._offset
qtr_lens = self.get_weeks(dt)
current = next_year_end
for qtr_len in qtr_lens[0:4]:
current += relativedelta(weeks=qtr_len)
if dt == current:
return True
return False
@property
def rule_code(self):
suffix = self._offset.get_rule_code_suffix()
return "%s-%s" % (self._prefix,
"%s-%d" % (suffix, self.qtr_with_extra_week))
@classmethod
def _from_name(cls, *args):
return cls(**dict(FY5253._parse_suffix(*args[:-1]),
qtr_with_extra_week=int(args[-1])))
class Easter(DateOffset):
'''
DateOffset for the Easter holiday using
logic defined in dateutil. Right now uses
the revised method which is valid in years
1583-4099.
'''
_adjust_dst = True
def __init__(self, n=1, **kwds):
super(Easter, self).__init__(n, **kwds)
@apply_wraps
def apply(self, other):
currentEaster = easter(other.year)
currentEaster = datetime(
currentEaster.year, currentEaster.month, currentEaster.day)
currentEaster = tslib._localize_pydatetime(currentEaster, other.tzinfo)
# NOTE: easter returns a datetime.date so we have to convert to type of
# other
if self.n >= 0:
if other >= currentEaster:
new = easter(other.year + self.n)
else:
new = easter(other.year + self.n - 1)
else:
if other > currentEaster:
new = easter(other.year + self.n + 1)
else:
new = easter(other.year + self.n)
new = datetime(new.year, new.month, new.day, other.hour,
other.minute, other.second, other.microsecond)
return new
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return date(dt.year, dt.month, dt.day) == easter(dt.year)
# ---------------------------------------------------------------------
# Ticks
def _tick_comp(op):
def f(self, other):
return op(self.delta, other.delta)
return f
class Tick(SingleConstructorOffset):
_inc = Timedelta(microseconds=1000)
__gt__ = _tick_comp(operator.gt)
__ge__ = _tick_comp(operator.ge)
__lt__ = _tick_comp(operator.lt)
__le__ = _tick_comp(operator.le)
__eq__ = _tick_comp(operator.eq)
__ne__ = _tick_comp(operator.ne)
def __add__(self, other):
if isinstance(other, Tick):
if type(self) == type(other):
return type(self)(self.n + other.n)
else:
return _delta_to_tick(self.delta + other.delta)
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
def __eq__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta == other.delta
else:
return DateOffset.__eq__(self, other)
# This is identical to DateOffset.__hash__, but has to be redefined here
# for Python 3, because we've redefined __eq__.
def __hash__(self):
return hash(self._params())
def __ne__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta != other.delta
else:
return DateOffset.__ne__(self, other)
@property
def delta(self):
return self.n * self._inc
@property
def nanos(self):
return _delta_to_nanoseconds(self.delta)
def apply(self, other):
# Timestamp can handle tz and nano sec, thus no need to use apply_wraps
if isinstance(other, (datetime, np.datetime64, date)):
return as_timestamp(other) + self
if isinstance(other, timedelta):
return other + self.delta
elif isinstance(other, type(self)):
return type(self)(self.n + other.n)
else:
raise ApplyTypeError('Unhandled type: %s' % type(other).__name__)
_prefix = 'undefined'
def isAnchored(self):
return False
def _delta_to_tick(delta):
if delta.microseconds == 0:
if delta.seconds == 0:
return Day(delta.days)
else:
seconds = delta.days * 86400 + delta.seconds
if seconds % 3600 == 0:
return Hour(seconds / 3600)
elif seconds % 60 == 0:
return Minute(seconds / 60)
else:
return Second(seconds)
else:
nanos = _delta_to_nanoseconds(delta)
if nanos % 1000000 == 0:
return Milli(nanos // 1000000)
elif nanos % 1000 == 0:
return Micro(nanos // 1000)
else: # pragma: no cover
return Nano(nanos)
_delta_to_nanoseconds = tslib._delta_to_nanoseconds
class Day(Tick):
_inc = Timedelta(days=1)
_prefix = 'D'
class Hour(Tick):
_inc = Timedelta(hours=1)
_prefix = 'H'
class Minute(Tick):
_inc = Timedelta(minutes=1)
_prefix = 'T'
class Second(Tick):
_inc = Timedelta(seconds=1)
_prefix = 'S'
class Milli(Tick):
_inc = Timedelta(milliseconds=1)
_prefix = 'L'
class Micro(Tick):
_inc = Timedelta(microseconds=1)
_prefix = 'U'
class Nano(Tick):
_inc = Timedelta(nanoseconds=1)
_prefix = 'N'
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
CBMonthEnd = CustomBusinessMonthEnd
CBMonthBegin = CustomBusinessMonthBegin
CDay = CustomBusinessDay
def _get_firstbday(wkday):
"""
wkday is the result of monthrange(year, month)
If it's a saturday or sunday, increment first business day to reflect this
"""
first = 1
if wkday == 5: # on Saturday
first = 3
elif wkday == 6: # on Sunday
first = 2
return first
def generate_range(start=None, end=None, periods=None,
offset=BDay(), time_rule=None):
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
objects to represent time increments
Parameters
----------
start : datetime (default None)
end : datetime (default None)
periods : int, optional
time_rule : (legacy) name of DateOffset object to be used, optional
Corresponds with names expected by tseries.frequencies.get_offset
Notes
-----
* This method is faster for generating weekdays than dateutil.rrule
* At least two of (start, end, periods) must be specified.
* If both start and end are specified, the returned dates will
satisfy start <= date <= end.
* If both time_rule and offset are specified, time_rule supersedes offset.
Returns
-------
dates : generator object
"""
if time_rule is not None:
from pandas.tseries.frequencies import get_offset
offset = get_offset(time_rule)
start = to_datetime(start)
end = to_datetime(end)
if start and not offset.onOffset(start):
start = offset.rollforward(start)
elif end and not offset.onOffset(end):
end = offset.rollback(end)
if periods is None and end < start:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
cur = start
if offset.n >= 0:
while cur <= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date <= cur:
raise ValueError('Offset %s did not increment date' % offset)
cur = next_date
else:
while cur >= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date >= cur:
raise ValueError('Offset %s did not decrement date' % offset)
cur = next_date
prefix_mapping = dict((offset._prefix, offset) for offset in [
YearBegin, # 'AS'
YearEnd, # 'A'
BYearBegin, # 'BAS'
BYearEnd, # 'BA'
BusinessDay, # 'B'
BusinessMonthBegin, # 'BMS'
BusinessMonthEnd, # 'BM'
BQuarterEnd, # 'BQ'
BQuarterBegin, # 'BQS'
BusinessHour, # 'BH'
CustomBusinessDay, # 'C'
CustomBusinessMonthEnd, # 'CBM'
CustomBusinessMonthBegin, # 'CBMS'
MonthEnd, # 'M'
MonthBegin, # 'MS'
Week, # 'W'
Second, # 'S'
Minute, # 'T'
Micro, # 'U'
QuarterEnd, # 'Q'
QuarterBegin, # 'QS'
Milli, # 'L'
Hour, # 'H'
Day, # 'D'
WeekOfMonth, # 'WOM'
FY5253,
FY5253Quarter,
])
prefix_mapping['N'] = Nano
|
the-stack_0_20693 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from telemetry.page.actions import gesture_action
from telemetry.unittest import tab_test_case
class MockGestureAction(gesture_action.GestureAction):
"""Mock gesture action that simply sleeps for a specified amount of time."""
def __init__(self, attributes=None):
super(MockGestureAction, self).__init__(attributes)
self._SetTimelineMarkerBaseName('MockGestureAction::RunAction')
def RunGesture(self, page, tab):
duration = getattr(self, 'duration', 2)
time.sleep(duration)
class GestureActionTest(tab_test_case.TabTestCase):
def testGestureAction(self):
"""Test that GestureAction.RunAction() calls RunGesture()."""
action = MockGestureAction({ 'duration': 1 })
start_time = time.time()
action.RunAction(None, self._tab)
self.assertGreaterEqual(time.time() - start_time, 1.0)
def testWaitAfter(self):
action = MockGestureAction({ 'duration': 1,
'wait_after': { 'seconds': 1 } })
start_time = time.time()
action.RunAction(None, self._tab)
self.assertGreaterEqual(time.time() - start_time, 2.0)
|
the-stack_0_20694 | from __future__ import unicode_literals, print_function, division
import config
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
use_cuda = config.use_gpu and torch.cuda.is_available()
torch.manual_seed(123)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(123)
def init_lstm_wt(lstm):
for names in lstm._all_weights:
for name in names:
if name.startswith('weight_'):
wt = getattr(lstm, name)
wt.data.uniform_(-config.rand_unif_init_mag, config.rand_unif_init_mag)
elif name.startswith('bias_'):
# set forget bias to 1
bias = getattr(lstm, name)
n = bias.size(0)
start, end = n // 4, n // 2
bias.data.fill_(0.)
bias.data[start:end].fill_(1.)
def init_linear_wt(linear):
linear.weight.data.normal_(std=config.trunc_norm_init_std)
if linear.bias is not None:
linear.bias.data.normal_(std=config.trunc_norm_init_std)
def init_wt_normal(wt):
wt.data.normal_(std=config.trunc_norm_init_std)
def init_wt_unif(wt):
wt.data.uniform_(-config.rand_unif_init_mag, config.rand_unif_init_mag)
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.embedding = nn.Embedding(config.vocab_size, config.emb_dim)
init_wt_normal(self.embedding.weight)
self.lstm = nn.LSTM(config.emb_dim, config.hidden_dim, num_layers=1,
batch_first=True, bidirectional=True)
init_lstm_wt(self.lstm)
self.W_h = nn.Linear(config.hidden_dim * 2, config.hidden_dim * 2, bias=False)
# seq_lens should be in descending order
def forward(self, input, seq_lens):
embedded = self.embedding(input)
packed = pack_padded_sequence(embedded, seq_lens, batch_first=True)
output, hidden = self.lstm(packed) # hidden = ((2 x B x H), (2 x B x H))
encoder_outputs, _ = pad_packed_sequence(output, batch_first=True) # B x L x 2H
encoder_feature = encoder_outputs.contiguous().view(-1, 2*config.hidden_dim) # BL x 2H
encoder_feature = self.W_h(encoder_feature) # BL x 2H
return encoder_outputs, encoder_feature, hidden
class ReduceState(nn.Module):
def __init__(self):
super(ReduceState, self).__init__()
self.reduce_h = nn.Linear(config.hidden_dim * 2, config.hidden_dim)
init_linear_wt(self.reduce_h)
self.reduce_c = nn.Linear(config.hidden_dim * 2, config.hidden_dim)
init_linear_wt(self.reduce_c)
def forward(self, hidden):
h, c = hidden
h_in = h.transpose(0, 1).contiguous().view(-1, config.hidden_dim * 2) # B x 2H
hidden_reduced_h = F.relu(self.reduce_h(h_in)) # B x H
c_in = c.transpose(0, 1).contiguous().view(-1, config.hidden_dim * 2)
hidden_reduced_c = F.relu(self.reduce_c(c_in)) # B x H
return (hidden_reduced_h.unsqueeze(0), hidden_reduced_c.unsqueeze(0)) # 1 x B x H
class Attention(nn.Module):
def __init__(self):
super(Attention, self).__init__()
if config.is_coverage:
self.W_c = nn.Linear(1, config.hidden_dim * 2, bias=False)
self.decode_proj = nn.Linear(config.hidden_dim * 2, config.hidden_dim * 2)
self.v = nn.Linear(config.hidden_dim * 2, 1, bias=False)
def forward(self, s_t_hat, encoder_outputs, encoder_feature, enc_padding_mask, coverage):
b, t_k, n = list(encoder_outputs.size())
dec_fea = self.decode_proj(s_t_hat) # B x 2H
dec_fea_expanded = dec_fea.unsqueeze(1).expand(b, t_k, n).contiguous() # B x L x 2H
dec_fea_expanded = dec_fea_expanded.view(-1, n) # BL x 2H
att_features = encoder_feature + dec_fea_expanded # BL x 2H
if config.is_coverage:
coverage_input = coverage.view(-1, 1) # BL x 1
coverage_feature = self.W_c(coverage_input) # BL x 2H
att_features = att_features + coverage_feature
e = torch.tanh(att_features) # BL x 2H
scores = self.v(e) # BL x 1
scores = scores.view(-1, t_k) # B x L
# NOTE: Here may lead to NAN problem!!!
attn_dist_ = F.softmax(scores, dim=1) * enc_padding_mask # B x L
normalization_factor = attn_dist_.sum(1, keepdim=True)
attn_dist = attn_dist_ / normalization_factor
attn_dist = attn_dist.unsqueeze(1) # B x 1 x L
c_t = torch.bmm(attn_dist, encoder_outputs) # B x 1 x 2H
c_t = c_t.view(-1, config.hidden_dim * 2) # B x 2H
attn_dist = attn_dist.view(-1, t_k) # B x L
if config.is_coverage:
coverage = coverage.view(-1, t_k)
coverage = coverage + attn_dist # B x L
return c_t, attn_dist, coverage
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.attention_network = Attention()
self.embedding = nn.Embedding(config.vocab_size, config.emb_dim)
init_wt_normal(self.embedding.weight)
self.x_context = nn.Linear(config.hidden_dim * 2 + config.emb_dim, config.emb_dim)
self.lstm = nn.LSTM(config.emb_dim, config.hidden_dim, num_layers=1,
batch_first=True, bidirectional=False)
init_lstm_wt(self.lstm)
if config.pointer_gen:
self.p_gen_linear = nn.Linear(config.hidden_dim * 4 + config.emb_dim, 1)
# p_vocab
self.out1 = nn.Linear(config.hidden_dim * 3, config.hidden_dim)
self.out2 = nn.Linear(config.hidden_dim, config.vocab_size)
init_linear_wt(self.out2)
def forward(self, y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask,
c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, step):
if not self.training and step == 0:
h_decoder, c_decoder = s_t_1
s_t_hat = torch.cat((h_decoder.view(-1, config.hidden_dim),
c_decoder.view(-1, config.hidden_dim)), 1) # B x 2H
c_t, _, coverage_next = self.attention_network(s_t_hat, encoder_outputs, encoder_feature,
enc_padding_mask, coverage)
coverage = coverage_next # B x L
y_t_1_embd = self.embedding(y_t_1) # B x E
x = self.x_context(torch.cat((c_t_1, y_t_1_embd), 1)) # B x E
lstm_out, s_t = self.lstm(x.unsqueeze(1), s_t_1)
h_decoder, c_decoder = s_t
s_t_hat = torch.cat((h_decoder.view(-1, config.hidden_dim),
c_decoder.view(-1, config.hidden_dim)), 1) # B x 2H
c_t, attn_dist, coverage_next = self.attention_network(s_t_hat, encoder_outputs, encoder_feature,
enc_padding_mask, coverage)
if self.training or step > 0:
coverage = coverage_next
p_gen = None
if config.pointer_gen:
p_gen_input = torch.cat((c_t, s_t_hat, x), 1) # B x (4H + E)
p_gen = self.p_gen_linear(p_gen_input)
p_gen = torch.sigmoid(p_gen) # B x 1
output = torch.cat((lstm_out.view(-1, config.hidden_dim), c_t), 1) # B x 3H
output = self.out1(output) # B x H
output = self.out2(output) # B x V
vocab_dist = F.softmax(output, dim=1)
if config.pointer_gen:
vocab_dist_ = p_gen * vocab_dist
attn_dist_ = (1 - p_gen) * attn_dist
if extra_zeros is not None:
vocab_dist_ = torch.cat([vocab_dist_, extra_zeros], 1)
final_dist = vocab_dist_.scatter_add(1, enc_batch_extend_vocab, attn_dist_)
else:
final_dist = vocab_dist
return final_dist, s_t, c_t, attn_dist, p_gen, coverage
class Model(object):
def __init__(self, model_file_path=None, is_eval=False):
encoder = Encoder()
decoder = Decoder()
reduce_state = ReduceState()
# shared the embedding between encoder and decoder
decoder.embedding.weight = encoder.embedding.weight
if is_eval:
self.encoder = encoder.eval()
self.decoder = decoder.eval()
self.reduce_state = reduce_state.eval()
else:
self.encoder = encoder.train()
self.decoder = decoder.train()
self.reduce_state = reduce_state.train()
if use_cuda:
self.encoder = encoder.cuda()
self.decoder = decoder.cuda()
self.reduce_state = reduce_state.cuda()
if model_file_path is not None:
state = torch.load(model_file_path, map_location= lambda storage, location: storage)
self.encoder.load_state_dict(state['encoder_state_dict'])
self.decoder.load_state_dict(state['decoder_state_dict'], strict=False)
self.reduce_state.load_state_dict(state['reduce_state_dict'])
|
the-stack_0_20696 | """Unit tests for LEAP's suite of real-valued fitness functions."""
import numpy as np
from pytest import approx
from leap_ec.real_rep import problems
########################
# Tests for GriewankProblem
########################
def test_GriewankProblem_eval():
"""The value of a test point should be what we expected."""
t = np.array((0.5, 0.5))
# In two dimensions, the Griewank function expands like so
expected = t[0]**2/4000 + t[1]**2/4000 - np.cos(t[0]/np.sqrt(1))*np.cos(t[1]/np.sqrt(2)) + 1
p = problems.GriewankProblem()
assert(approx(expected) == p.evaluate(t))
########################
# Tests for WeierstrassProblem
########################
def test_WeierstrassProblem_eval():
"""The Weierstrass function has a (0, ... ,0) in all dimensions
and have a fitness of zero.
"""
p = problems.WeierstrassProblem()
assert(approx(0) == p.evaluate(np.array([0, 0])))
assert(approx(0) == p.evaluate(np.array([0]*25)))
|
the-stack_0_20697 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.errorreporting_v1beta1.types import common
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
__protobuf__ = proto.module(
package="google.devtools.clouderrorreporting.v1beta1",
manifest={
"ReportErrorEventRequest",
"ReportErrorEventResponse",
"ReportedErrorEvent",
},
)
class ReportErrorEventRequest(proto.Message):
r"""A request for reporting an individual error event.
Attributes:
project_name (str):
Required. The resource name of the Google Cloud Platform
project. Written as ``projects/`` plus the `Google Cloud
Platform project
ID <https://support.google.com/cloud/answer/6158840>`__.
Example: ``projects/my-project-123``.
event (~.report_errors_service.ReportedErrorEvent):
Required. The error event to be reported.
"""
project_name = proto.Field(proto.STRING, number=1)
event = proto.Field(proto.MESSAGE, number=2, message="ReportedErrorEvent",)
class ReportErrorEventResponse(proto.Message):
r"""Response for reporting an individual error event.
Data may be added to this message in the future.
"""
class ReportedErrorEvent(proto.Message):
r"""An error event which is reported to the Error Reporting
system.
Attributes:
event_time (~.timestamp.Timestamp):
Optional. Time when the event occurred.
If not provided, the time when the event was
received by the Error Reporting system will be
used.
service_context (~.common.ServiceContext):
Required. The service context in which this
error has occurred.
message (str):
Required. The error message. If no
``context.reportLocation`` is provided, the message must
contain a header (typically consisting of the exception type
name and an error message) and an exception stack trace in
one of the supported programming languages and formats.
Supported languages are Java, Python, JavaScript, Ruby, C#,
PHP, and Go. Supported stack trace formats are:
- **Java**: Must be the return value of
```Throwable.printStackTrace()`` <https://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html#printStackTrace%28%29>`__.
- **Python**: Must be the return value of
```traceback.format_exc()`` <https://docs.python.org/2/library/traceback.html#traceback.format_exc>`__.
- **JavaScript**: Must be the value of
```error.stack`` <https://github.com/v8/v8/wiki/Stack-Trace-API>`__
as returned by V8.
- **Ruby**: Must contain frames returned by
```Exception.backtrace`` <https://ruby-doc.org/core-2.2.0/Exception.html#method-i-backtrace>`__.
- **C#**: Must be the return value of
```Exception.ToString()`` <https://msdn.microsoft.com/en-us/library/system.exception.tostring.aspx>`__.
- **PHP**: Must start with
``PHP (Notice|Parse error|Fatal error|Warning)`` and
contain the result of
```(string)$exception`` <http://php.net/manual/en/exception.tostring.php>`__.
- **Go**: Must be the return value of
```runtime.Stack()`` <https://golang.org/pkg/runtime/debug/#Stack>`__.
context (~.common.ErrorContext):
Optional. A description of the context in
which the error occurred.
"""
event_time = proto.Field(proto.MESSAGE, number=1, message=timestamp.Timestamp,)
service_context = proto.Field(
proto.MESSAGE, number=2, message=common.ServiceContext,
)
message = proto.Field(proto.STRING, number=3)
context = proto.Field(proto.MESSAGE, number=4, message=common.ErrorContext,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_0_20699 | import cv2
from dct.camera.stream import StreamConsumer
import numpy as np
from .connection import ConnectionOverlay
class VisualizationOverlay:
def placeholder(self, input_frame):
raise NotImplementedError
def frame(self, input_frame):
raise NotImplementedError
class BaseFrameVisualizer:
"""Base class which serves as a starting point for frame visualizations."""
def __init__(self, stream: StreamConsumer, width=480, height=360):
self.input_stream = stream
self.width = width
self.height = height
self.last_frame = np.zeros((height, width, 3), dtype=np.uint8)
self.visualizations = [ConnectionOverlay()]
def add(self, viz: VisualizationOverlay):
self.visualizations.append(viz)
def placeholder(self):
frame = self.last_frame.copy()
# Apply added placeholder modifications in order.
for viz in self.visualizations:
frame = viz.placeholder(frame)
return cv2.imencode(".jpg", frame)[1].tobytes()
def generate_frames(self):
for input_frame in self.input_stream.frame_iterator():
frame = input_frame.copy()
if frame is None:
return
# Apply added visualizations in order.
for viz in self.visualizations:
frame = viz.frame(frame)
self.last_frame = input_frame
yield cv2.imencode(".jpg", frame)[1].tobytes()
|
the-stack_0_20700 | """
Solving Poisson's equation in 1d
================================
This example shows how to solve a 1d Poisson equation with boundary conditions.
"""
from pde import CartesianGrid, ScalarField, solve_poisson_equation
grid = CartesianGrid([[0, 1]], 32, periodic=False)
field = ScalarField(grid, 1)
result = solve_poisson_equation(field, bc=[{"value": 0}, {"derivative": 1}])
result.plot()
|
the-stack_0_20701 | import glob
import h5py
import tensorflow as tf
import numpy as np
from .img_utils import get_images
"""
This module provides three data reader: directly from file, from h5 database, use channel
h5 database is recommended since it could enable very data feeding speed
"""
class FileDataReader(object):
def __init__(self, data_dir, input_height, input_width, height, width,
batch_size):
self.data_dir = data_dir
self.input_height, self.input_width = input_height, input_width
self.height, self.width = height, width
self.batch_size = batch_size
self.image_files = glob.glob(data_dir+'*')
def next_batch(self, batch_size):
sample_files = np.random.choice(self.image_files, batch_size)
images = get_images(
sample_files, self.input_height, self.input_width,
self.height, self.width)
return images
class H5DataLoader(object):
def __init__(self, data_path, is_train=True):
self.is_train = is_train
data_file = h5py.File(data_path, 'r')
self.images, self.labels = data_file['X'], data_file['Y']
self.gen_indexes()
def gen_indexes(self):
if self.is_train:
self.indexes = np.random.permutation(range(self.images.shape[0]))
else:
self.indexes = np.array(range(self.images.shape[0]))
self.cur_index = 0
def next_batch(self, batch_size):
next_index = self.cur_index+batch_size
cur_indexes = list(self.indexes[self.cur_index:next_index])
self.cur_index = next_index
if len(cur_indexes) < batch_size and self.is_train:
self.gen_indexes()
return self.next_batch(batch_size)
cur_indexes.sort()
return self.images[cur_indexes], self.labels[cur_indexes]
class QueueDataReader(object):
def __init__(self, sess, data_dir, data_list, input_size, class_num,
name, data_format):
self.sess = sess
self.scope = name + '/data_reader'
self.class_num = class_num
self.channel_axis = 3
images, labels = self.read_data(data_dir, data_list)
images = tf.convert_to_tensor(images, dtype=tf.string)
labels = tf.convert_to_tensor(labels, dtype=tf.string)
queue = tf.train.slice_input_producer(
[images, labels], shuffle=True, name=self.scope+'/slice')
self.image, self.label = self.read_dataset(
queue, input_size, data_format)
def next_batch(self, batch_size):
image_batch, label_batch = tf.train.shuffle_batch(
[self.image, self.label], batch_size=batch_size,
num_threads=4, capacity=50000, min_after_dequeue=10000,
name=self.scope+'/batch')
return image_batch, label_batch
def read_dataset(self, queue, input_size, data_format):
image = tf.image.decode_jpeg(
tf.read_file(queue[0]), channels=3, name=self.scope+'/image')
label = tf.image.decode_png(
tf.read_file(queue[1]), channels=1, name=self.scope+'/label')
image = tf.image.resize_images(image, input_size)
label = tf.image.resize_images(label, input_size, 1)
if data_format == 'NCHW':
self.channel_axis = 1
image = tf.transpose(image, [2, 0, 1])
label = tf.transpose(label, [2, 0, 1])
image -= tf.reduce_mean(tf.cast(image, dtype=tf.float32),
(0, 1), name=self.scope+'/mean')
return image, label
def read_data(self, data_dir, data_list):
with open(data_list, 'r') as f:
images, labels = [], []
for line in f:
image, label = line.strip('\n').split(' ')
images.append(data_dir + image)
labels.append(data_dir + label)
return images, labels
def start(self):
self.coord = tf.train.Coordinator()
self.threads = tf.train.start_queue_runners(
coord=self.coord, sess=self.sess)
def close(self):
self.coord.request_stop()
self.coord.join(self.threads)
|
the-stack_0_20702 | #!/usr/bin/env python
'''
Node to convert from quaternions to rpy in various ROS messages
'''
import rospy
import tf
import math
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import Pose
from geometry_msgs.msg import PoseArray
from sensor_msgs.msg import Imu
from gazebo_msgs.msg import ModelStates
from nav_msgs.msg import Odometry
class Node():
def __init__(self,pose_index=None,model_name=None,
input_msg_type='Pose'):
self.pubmsg = None
self.pub = None
self.pose_index = pose_index
self.model_name = model_name
self.input_msg_type = input_msg_type
def callback(self,data):
#rospy.loginfo("callback")
if (not (pose_index==None)):
data = data[pose_index]
elif self.model_name is not None:
try:
index = data.name.index(model_name)
except ValueError:
rospy.logwarn_throttle(10.0, 'Model state {} not found'.format(model_name))
return
data = data.pose[index]
elif ( (self.input_msg_type == 'Pose') or
(self.input_msg_type == 'Imu')):
pass
elif self.input_msg_type == 'Odometry':
data = data.pose.pose
else:
rospy.logerr("Don't know what to do with message type %s"%
self.input_msg_type)
sys.exit()
q = (data.orientation.x,
data.orientation.y,
data.orientation.z,
data.orientation.w)
euler = tf.transformations.euler_from_quaternion(q)
self.pubmsg.x = euler[0] * 180 / math.pi
self.pubmsg.y = euler[1] * 180 / math.pi
self.pubmsg.z = euler[2] * 180 / math.pi
rospy.logdebug("publishing rpy: %.2f, %.2f, %.2f"
%(euler[0],euler[1],euler[2]))
self.pub.publish(self.pubmsg)
if __name__ == '__main__':
rospy.init_node('quat2rpy', anonymous=True)
# ROS Parameters
in_topic = 'in_topic'
out_topic = 'out_topic'
pose_index = rospy.get_param('~pose_index',None)
model_name = rospy.get_param('~model_name',None)
inmsgtype = rospy.get_param('~input_msg_type','Pose')
# Initiate node object
node=Node(pose_index, model_name, input_msg_type=inmsgtype)
node.pubmsg = Vector3()
# Setup publisher
node.pub = rospy.Publisher(out_topic,Vector3,queue_size=10)
# Subscriber
if (not(model_name == None)):
inmsgtype = 'ModelStates[%s]'% model_name
rospy.Subscriber(in_topic,ModelStates,node.callback)
elif (not (pose_index == None)):
inmsgtype = 'PoseArray[%d]'%pose_index
# Setup subscriber
rospy.Subscriber(in_topic,PoseArray,node.callback)
else:
if inmsgtype == 'Pose':
# Setup subscriber
rospy.Subscriber(in_topic,Pose,node.callback)
elif inmsgtype == 'Imu':
rospy.Subscriber(in_topic,Imu,node.callback)
elif inmsgtype == 'Odometry':
rospy.Subscriber(in_topic,Odometry,node.callback)
else:
rospy.logerr("I don't know how to deal with message type <%s>"%
inmsgtype)
sys.exit()
rospy.loginfo("Subscribing to %s, looking for %s messages."%
(in_topic,inmsgtype))
rospy.loginfo("Publishing to %s, sending Vector3 messages"%
(out_topic))
try:
rospy.spin()
except rospy.ROSInterruptException:
pass
|
the-stack_0_20703 | #!/usr/bin/env python
# this script separates interior and exterior images.
# loading from /source/ dir.
# using Resnet50 model: resnet_model.h5
# using SVM model: svm_model18.sav
# output is saved in /cleaned or /interior folders
import codecs
import json
import os
import pickle
import sys
import time
import numpy as np
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
from keras.models import Model, load_model
from shutil import copyfile
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# /dir/source/Audi/Audi_A3_2008/45a88c858e.jpg
def copy_file_back(src, is_car):
# save images
if is_car:
target = src.replace("/source/", "/cleaned/")
else:
target = src.replace("/source/", "/interier/")
arr = target.split("/")[:-1]
path = "/".join(arr)
if not os.path.exists("/{}".format(path)):
os.makedirs(path)
# print("copy {} -> {}".format(src, target))
copyfile(src, target)
def get_model():
loaded_model = load_model('resnet_model.h5')
loaded_model.compile(loss='mean_squared_error', optimizer='sgd')
return loaded_model
def get_svm():
filename = 'svm_model18.sav'
return pickle.load(open(filename, 'rb'))
def process_images(folder):
n = 0
files = os.listdir(folder)
print(folder)
print(len(files))
files = list(map(lambda x: os.path.join(folder, x), files))
model = get_model()
svm = get_svm()
for f in sorted(files):
exists = os.path.isfile(f)
print("{} - {}".format(f, exists))
if exists and f[-4:] == ".jpg":
img = image.load_img(f, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# vector on the end of Resnet
# shape (1, 2048)
model_output = model.get_layer("avg_pool").output
intermediate_layer_model = Model(inputs=model.input, outputs=model_output)
intermediate_output = intermediate_layer_model.predict(x)
pred = svm.predict(intermediate_output.reshape(1, -1))
is_car = pred[0] == 1
n += 1
# copy an image
print("{} - {}".format(f[-40:], is_car))
copy_file_back(f, is_car)
if __name__ == '__main__':
print(len(sys.argv))
if len(sys.argv) < 2:
print("Need param: python svm_classifier.py path")
exit(1)
folder = str(sys.argv[1])
exists = os.path.isdir(folder)
if not exists:
print("Folder '{}' not found.".format(folder))
exit(1)
if "/source/" not in folder:
print("Folder '{}' must be in /source/ directory.".format(folder))
exit(1)
# serialize model to JSON
# model = ResNet50(weights='imagenet')
# model.save("resnet_model.h5")
# model_json = model.to_json()
# with open("resnet_model.json", "w") as json_file:
# json_file.write(model_json)
process_images(folder)
print("===== end.")
|
the-stack_0_20704 | #!/usr/bin/python
import itertools
import numpy as np
import pytest
from scipy import stats
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.svm import SVC
GAMMA = 1.
COEF0 = 1.
def main():
# polynomial()
cross_validation()
# rbf()
def read_dataset(dataset_type):
dataset = np.loadtxt('features.' + dataset_type)
return dataset[:, 1:], dataset[:, 0] # X, y
def polynomial():
clf = SVC(C=.01, kernel='poly', degree=2, gamma=GAMMA, coef0=COEF0)
for offset in [0, 1]:
num_supports, E_ins = [], []
digits = np.array([0, 2, 4, 6, 8], dtype=float) + offset
for digit in digits:
X_training, y_training = read_dataset('train')
y_training[~np.isclose(y_training, digit)] = -1.
clf.fit(X_training, y_training)
E_ins.append(1 - clf.score(X_training, y_training))
num_supports.append(clf.n_support_.sum())
chosen_idx = np.argmax(E_ins) if offset == 0 else np.argmin(E_ins)
print('digit={}: E_in={}, num_supports={}'.format(digits[chosen_idx],
E_ins[chosen_idx], num_supports[chosen_idx]))
print('\n--------------------\n')
X_training, y_training = read_dataset('train')
one_or_five = np.isclose(y_training, 1.) | np.isclose(y_training, 5.)
X_training, y_training = X_training[one_or_five], y_training[one_or_five]
X_test, y_test = read_dataset('test')
one_or_five = np.isclose(y_test, 1.) | np.isclose(y_test, 5.)
X_test, y_test = X_test[one_or_five], y_test[one_or_five]
Cs = [.001, .01, .1, 1.]
clfs = [SVC(C=C, kernel='poly', degree=2, gamma=GAMMA, coef0=COEF0)
for C in Cs]
[clf.fit(X_training, y_training) for clf in clfs]
num_supports = [clf.n_support_.sum() for clf in clfs]
E_ins = [1 - clf.score(X_training, y_training) for clf in clfs]
E_outs = [1 - clf.score(X_test, y_test) for clf in clfs]
print('num_supports={}'.format(num_supports))
print('E_ins={}'.format(E_ins))
print('diff E_ins={}'.format(np.diff(E_ins, 1)))
print('E_outs={}'.format(E_outs))
print('diff E_outs={}'.format(np.diff(E_outs, 1)))
print('\n--------------------\n')
Cs = [.0001, .001, .01, 1]
degrees = [2, 5]
clfs = {C: {degree: SVC(C=C, kernel='poly', degree=degree, gamma=GAMMA,
coef0=COEF0).fit(X_training, y_training)
for degree in degrees}
for C in Cs}
E_ins = [1 - clf.score(X_training, y_training)
for clf in clfs[.0001].values()]
print('C=0.0001: E_ins={}'.format(E_ins))
num_supports = [clf.n_support_.sum() for clf in clfs[.001].values()]
print('C=0.001: num_supports={}'.format(num_supports))
E_ins = [1 - clf.score(X_training, y_training)
for clf in clfs[.01].values()]
print('C=0.01: E_ins={}'.format(E_ins))
E_outs = [1 - clf.score(X_test, y_test)
for clf in clfs[1].values()]
print('C=1: E_outs={}'.format(E_outs))
def cross_validation():
X_training, y_training = read_dataset('train')
one_or_five = np.isclose(y_training, 1.) | np.isclose(y_training, 5.)
X_training, y_training = X_training[one_or_five], y_training[one_or_five]
Cs = [.0001, .001, .01, .1, 1.]
clfs = [GridSearchCV(SVC(kernel='poly', degree=2, gamma=GAMMA, coef0=COEF0),
param_grid=dict(C=Cs),
cv=KFold(n_splits=10, shuffle=True),
n_jobs=8).fit(X_training, y_training)
for _ in range(100)]
chosen_Cs = [clf.best_params_['C'] for clf in clfs]
E_cvs = [1 - clf.best_score_ for clf in clfs]
print(stats.mode(chosen_Cs))
print(np.mean(E_cvs))
def rbf():
X_training, y_training = read_dataset('train')
one_or_five = np.isclose(y_training, 1.) | np.isclose(y_training, 5.)
X_training, y_training = X_training[one_or_five], y_training[one_or_five]
X_test, y_test = read_dataset('test')
one_or_five = np.isclose(y_test, 1.) | np.isclose(y_test, 5.)
X_test, y_test = X_test[one_or_five], y_test[one_or_five]
Cs = [.01, 1, 100, 1e4, 1e6]
clfs = [SVC(C=C, kernel='rbf', gamma=GAMMA).fit(X_training, y_training)
for C in Cs]
E_ins = [1 - clf.score(X_training, y_training) for clf in clfs]
print('E_ins={}'.format(E_ins))
print('argmin E_ins={}'.format(np.argmin(E_ins)))
E_outs = [1 - clf.score(X_test, y_test) for clf in clfs]
print('E_outs={}'.format(E_outs))
print('argmin E_outs={}'.format(np.argmin(E_outs)))
if __name__ == '__main__':
main()
|
the-stack_0_20706 | """
API operations provenance
"""
import logging
from galaxy import web
from galaxy.web.base.controller import BaseAPIController
from paste.httpexceptions import HTTPNotImplemented, HTTPBadRequest
from galaxy import managers
log = logging.getLogger( __name__ )
class BaseProvenanceController( BaseAPIController ):
"""
"""
def __init__( self, app ):
super( BaseProvenanceController, self ).__init__( app )
self.hda_manager = managers.hdas.HDAManager( app )
@web.expose_api
def index( self, trans, **kwd ):
follow = kwd.get('follow', False)
value = self._get_provenance( trans, self.provenance_item_class, kwd[self.provenance_item_id], follow )
return value
@web.expose_api
def show( self, trans, elem_name, **kwd ):
follow = kwd.get('follow', False)
value = self._get_provenance( trans, self.provenance_item_class, kwd[self.provenance_item_id], follow )
return value
@web.expose_api
def create( self, trans, tag_name, payload=None, **kwd ):
payload = payload or {}
raise HTTPNotImplemented()
@web.expose_api
def delete( self, trans, tag_name, **kwd ):
raise HTTPBadRequest("Cannot Delete Provenance")
def _get_provenance( self, trans, item_class_name, item_id, follow=True ):
provenance_item = self.get_object( trans, item_id, item_class_name, check_ownership=False, check_accessible=False)
if item_class_name == "HistoryDatasetAssociation":
self.hda_manager.error_unless_accessible( trans, provenance_item, trans.user )
else:
self.security_check( trans, provenance_item, check_accessible=True )
out = self._get_record( trans, provenance_item, follow )
return out
def _get_record(self, trans, item, follow):
if item is not None:
if item.copied_from_library_dataset_dataset_association:
item = item.copied_from_library_dataset_dataset_association
job = item.creating_job
if job is not None:
return {
"id": trans.security.encode_id(item.id),
"uuid": ( lambda uuid: str( uuid ) if uuid else None )( item.dataset.uuid),
"job_id": trans.security.encode_id( job.id ),
"tool_id": job.tool_id,
"parameters": self._get_job_record(trans, job, follow),
"stderr": job.stderr,
"stdout": job.stdout,
}
else:
return {
"id": trans.security.encode_id(item.id),
"uuid": ( lambda uuid: str( uuid ) if uuid else None )( item.dataset.uuid)
}
return None
def _get_job_record(self, trans, job, follow):
out = {}
for p in job.parameters:
out[p.name] = p.value
for in_d in job.input_datasets:
if not in_d.dataset:
continue
if follow:
out[in_d.name] = self._get_record(trans, in_d.dataset, follow)
else:
out[in_d.name] = {
"id": trans.security.encode_id(in_d.dataset.id),
"uuid": ( lambda uuid: str( uuid ) if uuid else None )( in_d.dataset.dataset.uuid ),
}
return out
class HDAProvenanceController( BaseProvenanceController ):
controller_name = "history_content_provenance"
provenance_item_class = "HistoryDatasetAssociation"
provenance_item_id = "history_content_id"
class LDDAProvenanceController( BaseProvenanceController ):
controller_name = "ldda_provenance"
provenance_item_class = "LibraryDatasetDatasetAssociation"
provenance_item_id = "library_content_id"
|
the-stack_0_20708 | #!/usr/bin/env python3
"""{PIPELINE_NAME} pipeline (version: {PIPELINE_VERSION}): creates
pipeline-specific config files to given output directory and runs the
pipeline (unless otherwise requested).
"""
# generic usage {PIPELINE_NAME} and {PIPELINE_VERSION} replaced while
# printing usage
#--- standard library imports
#
import sys
import os
import logging
#--- third-party imports
#
import yaml
#--- project specific imports
#
# add lib dir for this pipeline installation to PYTHONPATH
LIB_PATH = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "lib"))
if LIB_PATH not in sys.path:
sys.path.insert(0, LIB_PATH)
from readunits import get_samples_and_readunits_from_cfgfile
from readunits import get_readunits_from_args
from pipelines import get_pipeline_version
from pipelines import PipelineHandler
from pipelines import logger as aux_logger
from pipelines import get_cluster_cfgfile
from pipelines import default_argparser
import configargparse
__author__ = "Andreas Wilm"
__email__ = "[email protected]"
__copyright__ = "2016 Genome Institute of Singapore"
__license__ = "The MIT License (MIT)"
# only dump() and following do not automatically create aliases
yaml.Dumper.ignore_aliases = lambda *args: True
PIPELINE_BASEDIR = os.path.dirname(sys.argv[0])
CFG_DIR = os.path.join(PIPELINE_BASEDIR, "cfg")
# same as folder name. also used for cluster job names
PIPELINE_NAME = "lacer-lofreq"
# global logger
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
'[{asctime}] {levelname:8s} {filename} {message}', style='{'))
logger.addHandler(handler)
def main():
"""main function
"""
default_parser = default_argparser(CFG_DIR, with_readunits=True)
parser = configargparse.ArgumentParser(description=__doc__.format(
PIPELINE_NAME=PIPELINE_NAME, PIPELINE_VERSION=get_pipeline_version()),
parents=[default_parser])
parser._optionals.title = "Arguments"
# pipeline specific args
parser.add_argument('-t', "--seqtype", required=True,
choices=['WGS', 'WES', 'targeted'],
help="Sequencing type")
parser.add_argument('-l', "--bed",
help="Bed file listing regions of interest."
" Required for WES and targeted sequencing.")
parser.add_argument('-D', '--dont-mark-dups', action='store_true',
help="Don't mark duplicate reads")
# raw bam not possible because the pipeline splits on the fly into chromosomes
parser.add_argument('--proc-bam',
help="Advanced: Injects processed BAM (overwrites fq options)."
" WARNING: reference and pre-processing need to match pipeline requirements")
parser.add_argument('--bam-only', action='store_true',
help="Don't call variants, just process BAM file")
args = parser.parse_args()
# Repeateable -v and -q for setting logging level.
# See https://www.reddit.com/r/Python/comments/3nctlm/what_python_tools_should_i_be_using_on_every/
# and https://gist.github.com/andreas-wilm/b6031a84a33e652680d4
# script -vv -> DEBUG
# script -v -> INFO
# script -> WARNING
# script -q -> ERROR
# script -qq -> CRITICAL
# script -qqq -> no logging at all
logger.setLevel(logging.WARN + 10*args.quiet - 10*args.verbose)
aux_logger.setLevel(logging.WARN + 10*args.quiet - 10*args.verbose)
if os.path.exists(args.outdir):
logger.fatal("Output directory %s already exists", args.outdir)
sys.exit(1)
# samples is a dictionary with sample names as key (mostly just
# one) and readunit keys as value. readunits is a dict with
# readunits (think: fastq pairs with attributes) as value
if args.sample_cfg:
if any([args.fq1, args.fq2, args.sample, args.proc_bam]):
logger.fatal("Config file overrides fastq and sample arguments."
" Use one or the other")
sys.exit(1)
if not os.path.exists(args.sample_cfg):
logger.fatal("Config file %s does not exist", args.sample_cfg)
sys.exit(1)
samples, readunits = get_samples_and_readunits_from_cfgfile(args.sample_cfg)
else:# no sample config, so input is either fastq or existing bam
samples = dict()
if not args.sample:
logger.fatal("Need sample name if not using config file")
sys.exit(1)
if args.proc_bam:
assert not args.fq1, ("BAM injection overwrites fastq arguments")
if args.proc_bam:
assert os.path.exists(args.proc_bam)
readunits = dict()
samples[args.sample] = []
elif args.fq1:
readunits = get_readunits_from_args(args.fq1, args.fq2)
# all readunits go into this one sample specified on the command-line
samples[args.sample] = list(readunits.keys())
else:
logger.fatal("Need at least one fastq files as argument if not using config file")
sys.exit(1)
if args.seqtype in ['WES', 'targeted']:
if not args.bed:
logger.fatal("Analysis of exome and targeted sequence runs requires a bed file")
sys.exit(1)
else:
if not os.path.exists(args.bed):
logger.fatal("Bed file %s does not exist", args.sample_cfg)
sys.exit(1)
# turn arguments into cfg_dict (gets merged with other configs late)
#
cfg_dict = dict()
cfg_dict['readunits'] = readunits
cfg_dict['samples'] = samples
cfg_dict['seqtype'] = args.seqtype
cfg_dict['intervals'] = os.path.abspath(args.bed) if args.bed else None# always safe, might be used for WGS as well
cfg_dict['mark_dups'] = not args.dont_mark_dups
cfg_dict['bam_only'] = args.bam_only
pipeline_handler = PipelineHandler(
PIPELINE_NAME, PIPELINE_BASEDIR,
args, cfg_dict,
cluster_cfgfile=get_cluster_cfgfile(CFG_DIR))
pipeline_handler.setup_env()
# Inject existing BAM by symlinking (everything upstream is temporary anyway)
# WARNING: filename has to match definition in Snakefile!
if args.proc_bam:
target = os.path.join(args.outdir, "out", args.sample,
"{}.bwamem.lofreq".format(args.sample))
if cfg_dict['mark_dups']:
target += ".dedup"
target += ".lacer.bam"
os.makedirs(os.path.dirname(target))
os.symlink(os.path.abspath(args.proc_bam), target)
pipeline_handler.submit(args.no_run)
if __name__ == "__main__":
main()
|
the-stack_0_20709 |
import numpy as np
def error_metric(real, prediction, upper_bound, lower_bound):
real_tilde = np.maximum(real, 1)
err_pred = np.abs(real - prediction)
err_upper = np.abs(real - upper_bound)
err_lower = np.abs(real - lower_bound)
general_term = (err_pred + err_lower + err_upper) / real_tilde
relative_upper = (1.5 + (2 * err_upper / real_tilde))
relative_lower = (1.5 + (2 * err_lower / real_tilde))
relative_error = np.ones(len(real))
relative_error[real > upper_bound] = relative_upper[real > upper_bound]
relative_error[real < lower_bound] = relative_lower[real < lower_bound]
print(f"Relative term {np.mean(relative_error)}")
print(f"General term {np.mean(general_term)}")
return np.mean(relative_error * general_term)
# real = np.zeros(100)
# prediction = real + 1
# upper_bound = prediction + np.arange(100)
# lower_bound = prediction - np.arange(100)
# print(error_metric(real, prediction, upper_bound, lower_bound)) |
the-stack_0_20710 | import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from densepose import _C
class _DefROIAlign(Function):
@staticmethod
def forward(ctx, input, roi, offsets, output_size, spatial_scale, sampling_ratio, trans_std, aligned):
ctx.save_for_backward(input, roi, offsets)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.trans_std = trans_std
ctx.input_shape = input.size()
ctx.aligned = aligned
output = _C.def_roi_align_forward(
input, roi, offsets, spatial_scale, output_size[0], output_size[1],
sampling_ratio, trans_std, aligned
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
data, rois, offsets = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
trans_std = ctx.trans_std
bs, ch, h, w = ctx.input_shape
grad_offsets = torch.zeros_like(offsets)
grad_input = _C.def_roi_align_backward(
data,
grad_output,
rois,
offsets,
grad_offsets,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
sampling_ratio,
trans_std,
ctx.aligned,
)
return grad_input, None, grad_offsets, None, None, None, None, None
def_roi_align = _DefROIAlign.apply
class DefROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale,
sampling_ratio, trans_std, aligned=True):
"""
Args:
output_size (tuple): h, w
spatial_scale (float): scale the input boxes by this number
sampling_ratio (int): number of inputs samples to take for each output
sample. 0 to take samples densely.
trans_std (float): offset scale according to the normalized roi size
aligned (bool): if False, use the legacy implementation in
Detectron. If True, align the results more perfectly.
"""
super(DefROIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
self.trans_std = trans_std
self.aligned = aligned
def forward(self, input, rois, offsets):
"""
Args:
input: NCHW images
rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy.
"""
assert rois.dim() == 2 and rois.size(1) == 5
return def_roi_align(
input, rois, offsets, self.output_size,
self.spatial_scale, self.sampling_ratio,
self.trans_std, self.aligned
)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ", trans_std=" + str(self.trans_std)
tmpstr += ", aligned=" + str(self.aligned)
tmpstr += ")"
return tmpstr
|
the-stack_0_20712 | #!/usr/bin/env python
import argparse
import atexit
import copy
import gc
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import warnings
try:
import django
except ImportError as e:
raise RuntimeError(
'Django module not found, reference tests/README.rst for instructions.'
) from e
else:
from django.apps import apps
from django.conf import settings
from django.db import connection, connections
from django.test import TestCase, TransactionTestCase
from django.test.runner import default_test_processes
from django.test.selenium import SeleniumTestCaseBase
from django.test.utils import NullTimeKeeper, TimeKeeper, get_runner
from django.utils.deprecation import (
RemovedInDjango41Warning, RemovedInDjango50Warning,
)
from django.utils.log import DEFAULT_LOGGING
try:
import MySQLdb
except ImportError:
pass
else:
# Ignore informational warnings from QuerySet.explain().
warnings.filterwarnings('ignore', r'\(1003, *', category=MySQLdb.Warning)
# Make deprecation warnings errors to ensure no usage of deprecated features.
warnings.simplefilter('error', RemovedInDjango50Warning)
warnings.simplefilter('error', RemovedInDjango41Warning)
# Make resource and runtime warning errors to ensure no usage of error prone
# patterns.
warnings.simplefilter("error", ResourceWarning)
warnings.simplefilter("error", RuntimeWarning)
# Ignore known warnings in test dependencies.
warnings.filterwarnings("ignore", "'U' mode is deprecated", DeprecationWarning, module='docutils.io')
# RemovedInDjango41Warning: Ignore MemcachedCache deprecation warning.
warnings.filterwarnings(
'ignore',
'MemcachedCache is deprecated',
category=RemovedInDjango41Warning,
)
# Reduce garbage collection frequency to improve performance. Since CPython
# uses refcounting, garbage collection only collects objects with cyclic
# references, which are a minority, so the garbage collection threshold can be
# larger than the default threshold of 700 allocations + deallocations without
# much increase in memory usage.
gc.set_threshold(100_000)
RUNTESTS_DIR = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
# Removing the temporary TMPDIR.
atexit.register(shutil.rmtree, TMPDIR)
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_runner_apps',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and isn't in an application in INSTALLED_APPS."
CONTRIB_TESTS_TO_APPS = {
'deprecation': ['django.contrib.flatpages', 'django.contrib.redirects'],
'flatpages_tests': ['django.contrib.flatpages'],
'redirects_tests': ['django.contrib.redirects'],
}
def get_test_modules():
modules = []
discovery_paths = [(None, RUNTESTS_DIR)]
if connection.features.gis_enabled:
# GIS tests are in nested apps
discovery_paths.append(('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')))
else:
SUBDIRS_TO_SKIP.append('gis_tests')
for modpath, dirpath in discovery_paths:
for f in os.scandir(dirpath):
if ('.' not in f.name and
os.path.basename(f.name) not in SUBDIRS_TO_SKIP and
not f.is_file() and
os.path.exists(os.path.join(f.path, '__init__.py'))):
modules.append((modpath, f.name))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels, start_at, start_after):
# Reduce the given test labels to just the app module path.
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE': settings.MIDDLEWARE,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = 'static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE = ALWAYS_MIDDLEWARE
settings.MIGRATION_MODULES = {
# This lets us skip creating migrations for the test models as many of
# them depend on one of the following contrib applications.
'auth': None,
'contenttypes': None,
'sessions': None,
}
log_config = copy.deepcopy(DEFAULT_LOGGING)
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
settings.SILENCED_SYSTEM_CHECKS = [
'fields.W342', # ForeignKey(unique=True) -> OneToOneField
]
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# It would be nice to put this validation earlier but it must come after
# django.setup() so that connection.features.gis_enabled can be accessed
# without raising AppRegistryNotReady when running gis_tests in isolation
# on some backends (e.g. PostGIS).
if 'gis_tests' in test_labels_set and not connection.features.gis_enabled:
print('Aborting: A GIS database backend is required to run gis_tests.')
sys.exit(1)
def _module_match_label(module_label, label):
# Exact or ancestor match.
return module_label == label or module_label.startswith(label + '.')
# Load all the test model apps.
test_modules = get_test_modules()
found_start = not (start_at or start_after)
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = modpath + '.' + module_name
else:
module_label = module_name
if not found_start:
if start_at and _module_match_label(module_label, start_at):
found_start = True
elif start_after and _module_match_label(module_label, start_after):
found_start = True
continue
else:
continue
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
module_found_in_labels = not test_labels or any(
_module_match_label(module_label, label) for label in test_labels_set
)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
for contrib_app in CONTRIB_TESTS_TO_APPS[module_name]:
if contrib_app not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.append(contrib_app)
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
# Set an environment variable that other code may consult to see if
# Django's own test suite is running.
os.environ['RUNNING_DJANGOS_TEST_SUITE'] = 'true'
return state
def teardown(state):
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
# Discard the multiprocessing.util finalizer that tries to remove a
# temporary directory that's already removed by this script's
# atexit.register(shutil.rmtree, TMPDIR) handler. Prevents
# FileNotFoundError at the end of a test run (#27890).
from multiprocessing.util import _finalizer_registry
_finalizer_registry.pop((-100, 0), None)
del os.environ['RUNNING_DJANGOS_TEST_SUITE']
def actual_test_processes(parallel):
if parallel == 0:
# This doesn't work before django.setup() on some databases.
if all(conn.features.can_clone_databases for conn in connections.all()):
return default_test_processes()
else:
return 1
else:
return parallel
class ActionSelenium(argparse.Action):
"""
Validate the comma-separated list of requested browsers.
"""
def __call__(self, parser, namespace, values, option_string=None):
browsers = values.split(',')
for browser in browsers:
try:
SeleniumTestCaseBase.import_webdriver(browser)
except ImportError:
raise argparse.ArgumentError(self, "Selenium browser specification '%s' is not valid." % browser)
setattr(namespace, self.dest, browsers)
def django_tests(verbosity, interactive, failfast, keepdb, reverse,
test_labels, debug_sql, parallel, tags, exclude_tags,
test_name_patterns, start_at, start_after, pdb, buffer,
timing):
if verbosity >= 1:
msg = "Testing against Django installed in '%s'" % os.path.dirname(django.__file__)
max_parallel = default_test_processes() if parallel == 0 else parallel
if max_parallel > 1:
msg += " with up to %d processes" % max_parallel
print(msg)
state = setup(verbosity, test_labels, start_at, start_after)
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
parallel=actual_test_processes(parallel),
tags=tags,
exclude_tags=exclude_tags,
test_name_patterns=test_name_patterns,
pdb=pdb,
buffer=buffer,
timing=timing,
)
failures = test_runner.run_tests(test_labels or get_installed())
teardown(state)
return failures
def get_app_test_labels(verbosity, start_at, start_after):
test_labels = []
state = setup(verbosity, test_labels, start_at, start_after)
test_labels = get_installed()
teardown(state)
return test_labels
def get_subprocess_args(options):
subprocess_args = [
sys.executable, __file__, '--settings=%s' % options.settings
]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
if options.tags:
subprocess_args.append('--tag=%s' % options.tags)
if options.exclude_tags:
subprocess_args.append('--exclude_tag=%s' % options.exclude_tags)
return subprocess_args
def bisect_tests(bisection_label, options, test_labels, start_at, start_after):
if not test_labels:
test_labels = get_app_test_labels(options.verbosity, start_at, start_after)
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.run(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.run(subprocess_args + test_labels_b)
if failures_a.returncode and not failures_b.returncode:
print("***** Problem found in first half. Bisecting again...")
iteration += 1
test_labels = test_labels_a[:-1]
elif failures_b.returncode and not failures_a.returncode:
print("***** Problem found in second half. Bisecting again...")
iteration += 1
test_labels = test_labels_b[:-1]
elif failures_a.returncode and failures_b.returncode:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
def paired_tests(paired_test, options, test_labels, start_at, start_after):
if not test_labels:
test_labels = get_app_test_labels(options.verbosity, start_at, start_after)
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Django test suite.")
parser.add_argument(
'modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".',
)
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output',
)
parser.add_argument(
'--noinput', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--failfast', action='store_true',
help='Tells Django to stop running the test suite after first failed test.',
)
parser.add_argument(
'--keepdb', action='store_true',
help='Tells Django to preserve the test database between runs.',
)
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.',
)
parser.add_argument(
'--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.',
)
parser.add_argument(
'--pair',
help='Run the test suite in pairs with the named test to find problem pairs.',
)
parser.add_argument(
'--reverse', action='store_true',
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.',
)
parser.add_argument(
'--selenium', action=ActionSelenium, metavar='BROWSERS',
help='A comma-separated list of browsers to run the Selenium tests against.',
)
parser.add_argument(
'--headless', action='store_true',
help='Run selenium tests in headless mode, if the browser supports the option.',
)
parser.add_argument(
'--selenium-hub',
help='A URL for a selenium hub instance to use in combination with --selenium.',
)
parser.add_argument(
'--external-host', default=socket.gethostname(),
help='The external host that can be reached by the selenium hub instance when running Selenium '
'tests via Selenium Hub.',
)
parser.add_argument(
'--debug-sql', action='store_true',
help='Turn on the SQL query logger within tests.',
)
parser.add_argument(
'--parallel', nargs='?', default=0, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.',
)
parser.add_argument(
'--tag', dest='tags', action='append',
help='Run only tests with the specified tags. Can be used multiple times.',
)
parser.add_argument(
'--exclude-tag', dest='exclude_tags', action='append',
help='Do not run tests with the specified tag. Can be used multiple times.',
)
parser.add_argument(
'--start-after', dest='start_after',
help='Run tests starting after the specified top-level module.',
)
parser.add_argument(
'--start-at', dest='start_at',
help='Run tests starting at the specified top-level module.',
)
parser.add_argument(
'--pdb', action='store_true',
help='Runs the PDB debugger on error or failure.'
)
parser.add_argument(
'-b', '--buffer', action='store_true',
help='Discard output of passing tests.',
)
parser.add_argument(
'--timing', action='store_true',
help='Output timings, including database set up and total run time.',
)
parser.add_argument(
'-k', dest='test_name_patterns', action='append',
help=(
'Only run test methods and classes matching test name pattern. '
'Same as unittest -k option. Can be used multiple times.'
),
)
options = parser.parse_args()
using_selenium_hub = options.selenium and options.selenium_hub
if options.selenium_hub and not options.selenium:
parser.error('--selenium-hub and --external-host require --selenium to be used.')
if using_selenium_hub and not options.external_host:
parser.error('--selenium-hub and --external-host must be used together.')
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
mutually_exclusive_options = [options.start_at, options.start_after, options.modules]
enabled_module_options = [bool(option) for option in mutually_exclusive_options].count(True)
if enabled_module_options > 1:
print('Aborting: --start-at, --start-after, and test labels are mutually exclusive.')
sys.exit(1)
for opt_name in ['start_at', 'start_after']:
opt_val = getattr(options, opt_name)
if opt_val:
if '.' in opt_val:
print('Aborting: --%s must be a top-level module.' % opt_name.replace('_', '-'))
sys.exit(1)
setattr(options, opt_name, os.path.normpath(opt_val))
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_sqlite')
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.selenium:
if not options.tags:
options.tags = ['selenium']
elif 'selenium' not in options.tags:
options.tags.append('selenium')
if options.selenium_hub:
SeleniumTestCaseBase.selenium_hub = options.selenium_hub
SeleniumTestCaseBase.external_host = options.external_host
SeleniumTestCaseBase.headless = options.headless
SeleniumTestCaseBase.browsers = options.selenium
if options.bisect:
bisect_tests(
options.bisect, options, options.modules, options.start_at,
options.start_after,
)
elif options.pair:
paired_tests(
options.pair, options, options.modules, options.start_at,
options.start_after,
)
else:
time_keeper = TimeKeeper() if options.timing else NullTimeKeeper()
with time_keeper.timed('Total run'):
failures = django_tests(
options.verbosity, options.interactive, options.failfast,
options.keepdb, options.reverse, options.modules,
options.debug_sql, options.parallel, options.tags,
options.exclude_tags,
getattr(options, 'test_name_patterns', None),
options.start_at, options.start_after, options.pdb, options.buffer,
options.timing,
)
time_keeper.print_results()
if failures:
sys.exit(1)
|
the-stack_0_20715 | # Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
notification = client.notifications("NO5a7a84730f529f0a76b3e30c01315d1a") \
.fetch()
print(notification.message_text)
|
the-stack_0_20716 | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from itsdangerous import TimedJSONWebSignatureSerializer as TJWSSerializer, BadData
# Create your models here.
from meiduo_mall.utils.models import BaseModel
from users import constants
class User(AbstractUser):
"""用户模型类"""
mobile = models.CharField(max_length=11, verbose_name='手机号')
email_active = models.BooleanField(default=False, verbose_name='邮箱验证状态')
# openid = models.CharField(max_length=64, verbose_name='OpenID')
default_address = models.ForeignKey('Address', related_name='users', null=True, blank=True,
on_delete=models.SET_NULL, verbose_name='默认地址')
class Meta:
db_table = 'tb_users'
verbose_name = '用户'
verbose_name_plural = verbose_name
def generate_verify_email_url(self):
"""
生成对应用户的邮箱验证的链接地址
"""
# 组织用户数据
data = {
'id': self.id,
'email': self.email
}
# 进行加密
serializer = TJWSSerializer(settings.SECRET_KEY, constants.VERIFY_EMAIL_TOKEN_EXPIRES)
token = serializer.dumps(data).decode() # str
# 拼接验证的链接地址
verify_url = 'http://www.meiduo.site:8080/success_verify_email.html?token=' + token
return verify_url
@staticmethod
def check_verify_email_token(token):
"""校验邮箱验证的token是否有效"""
serializer = TJWSSerializer(settings.SECRET_KEY)
try:
data = serializer.loads(token)
except BadData:
return None
else:
# 获取用户id和email
id = data.get('id')
email = data.get('email')
# 获取对应的用户
user = User.objects.get(id=id, email=email)
return user
# address
# address.province
# address.city
# address.district
class Address(BaseModel):
"""
用户地址模型类
"""
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='addresses', verbose_name='用户')
title = models.CharField(max_length=20, verbose_name='地址名称')
receiver = models.CharField(max_length=20, verbose_name='收货人')
province = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='province_addresses', verbose_name='省')
city = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='city_addresses', verbose_name='市')
district = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='district_addresses', verbose_name='区')
place = models.CharField(max_length=50, verbose_name='地址')
mobile = models.CharField(max_length=11, verbose_name='手机')
tel = models.CharField(max_length=20, null=True, blank=True, default='', verbose_name='固定电话')
email = models.CharField(max_length=30, null=True, blank=True, default='', verbose_name='电子邮箱')
is_deleted = models.BooleanField(default=False, verbose_name='逻辑删除')
# is_default = models.BooleanField(default=False, verbose_name='是否默认')
class Meta:
db_table = 'tb_address'
verbose_name = '用户地址'
verbose_name_plural = verbose_name
ordering = ['-update_time']
|
the-stack_0_20717 | from concurrent.futures import ProcessPoolExecutor
from functools import partial
import itertools
import re
import multiprocessing as mp
import posixpath
import pprint
import os.path
from tqdm import tqdm
import sys
import click
import pathos.pools
from cloudfiles import CloudFiles
from cloudfiles.compression import transcode
from cloudfiles.paths import extract, get_protocol
from cloudfiles.lib import toabs, sip, toiter, first
def cloudpathjoin(cloudpath, *args):
cloudpath = normalize_path(cloudpath)
proto = get_protocol(cloudpath)
if proto == "file":
# join function can strip "file://"
return "file://" + os.path.join(cloudpath, *args).replace("file://", "")
else:
return posixpath.join(cloudpath, *args)
def normalize_path(cloudpath):
if not get_protocol(cloudpath):
return "file://" + toabs(cloudpath)
return cloudpath
def ispathdir(cloudpath):
expath = extract(normalize_path(cloudpath))
return (
(expath.protocol != "file" and cloudpath[-1] == "/")
or (expath.protocol == "file" and cloudpath[-1] == os.path.sep)
or (expath.protocol == "file" and os.path.isdir(expath.path))
)
@click.group()
@click.option('-p', '--parallel', default=1, help='Number of parallel processes. <= 0 for all cores.')
@click.pass_context
def main(ctx, parallel):
parallel = int(parallel)
if parallel <= 0:
parallel = mp.cpu_count()
ctx.ensure_object(dict)
ctx.obj["parallel"] = parallel
@main.command()
def license():
"""Prints the license for this library and cli tool."""
path = os.path.join(os.path.dirname(__file__), 'LICENSE')
with open(path, 'rt') as f:
print(f.read())
@main.command()
@click.option('--shortpath', is_flag=True, default=False, help='Don\'t print the common base path for each listed path.')
@click.option('--flat', is_flag=True, default=False, help='Only produce a single level of directory hierarchy.')
@click.option('-e','--expr',is_flag=True, default=False, help='Use a limited regexp language (e.g. [abc123]\{3\}) to generate prefixes.')
@click.argument("cloudpath")
def ls(shortpath, flat, expr, cloudpath):
"""Recursively lists the contents of a directory."""
cloudpath = normalize_path(cloudpath)
_, flt, prefix = get_mfp(cloudpath, True)
epath = extract(cloudpath)
if len(epath.path) > 0:
if prefix == "" and flt == False:
prefix = os.path.basename(cloudpath)
cloudpath = os.path.dirname(cloudpath)
flat = flat or flt
cf = CloudFiles(cloudpath, green=True)
iterables = []
if expr:
# TODO: make this a reality using a parser
# match "[abc]{2}" or "[123]" meaning generate a 2 character cartesian
# product of a,b, and c or a 1 character cartesian product of 1,2,3
# e.g. aa, ab, ac, ba, bb, bc, ca, cb, cc
# 1, 2, 3
matches = re.findall(r'\[([a-zA-Z0-9]+)\]', prefix)
if len(matches):
iterables.extend(
[ cf.list(prefix=pfx, flat=flat) for pfx in exprgen(prefix, matches) ]
)
else:
iterables.append(
cf.list(flat=flat)
)
else:
iterables = [ cf.list(prefix=prefix, flat=flat) ]
iterables = itertools.chain(*iterables)
for pathset in sip(iterables, 1000):
if not shortpath:
pathset = [ cloudpathjoin(cloudpath, pth) for pth in pathset ]
print("\n".join(pathset))
def exprgen(prefix, matches):
"""
Given a string "hello[world]" and matches := ["world"]
return ["hellow", "helloo", "hellor", "hellol", "hellod"]
"""
if len(matches) == 0:
return [ prefix ]
match = matches[0]
prefixes = []
for char in match:
prefixes.append(prefix.replace(f"[{match}]", char, 1))
finished_prefixes = []
for pfx in prefixes:
finished_prefixes += exprgen(pfx, matches[1:])
return finished_prefixes
def get_mfp(path, recursive):
"""many,flat,prefix"""
path = normalize_path(path)
flat = not recursive
many = recursive
prefix = ""
if path[-2:] == "**":
many = True
flat = False
prefix = os.path.basename(path[:-2])
elif path[-1:] == "*":
many = True
flat = True
prefix = os.path.basename(path[:-1])
return (many, flat, prefix)
@main.command()
@click.argument("source", nargs=-1)
@click.argument("destination", nargs=1)
@click.option('-r', '--recursive', is_flag=True, default=False, help='Recursive copy.')
@click.option('-c', '--compression', default='same', help="Destination compression type. Options: same (default), none, gzip, br, zstd")
@click.option('--progress', is_flag=True, default=False, help="Show transfer progress.")
@click.option('-b', '--block-size', default=128, help="Number of files to download at a time.")
@click.pass_context
def cp(ctx, source, destination, recursive, compression, progress, block_size):
"""
Copy one or more files from a source to destination.
If source is "-" read newline delimited filenames from stdin.
Note that for gs:// to gs:// transfers, the gsutil
tool is more efficient because the files never leave
Google's network.
"""
if len(source) > 1 and not ispathdir(destination):
print("cloudfiles: destination must be a directory for multiple source files.")
return
for src in source:
_cp_single(ctx, src, destination, recursive, compression, progress, block_size)
def _cp_single(ctx, source, destination, recursive, compression, progress, block_size):
use_stdin = (source == '-')
nsrc = normalize_path(source)
ndest = normalize_path(destination)
ctx.ensure_object(dict)
parallel = int(ctx.obj.get("parallel", 1))
issrcdir = ispathdir(source) and use_stdin == False
isdestdir = ispathdir(destination)
srcpath = nsrc if issrcdir else os.path.dirname(nsrc)
many, flat, prefix = get_mfp(nsrc, recursive)
if issrcdir and not many:
print(f"cloudfiles: {source} is a directory (not copied).")
return
xferpaths = os.path.basename(nsrc)
if use_stdin:
xferpaths = sys.stdin.readlines()
xferpaths = [ x.replace("\n", "") for x in xferpaths ]
prefix = os.path.commonprefix(xferpaths)
xferpaths = [ x.replace(prefix, "") for x in xferpaths ]
srcpath = cloudpathjoin(srcpath, prefix)
elif many:
xferpaths = CloudFiles(srcpath, green=True).list(prefix=prefix, flat=flat)
destpath = ndest
if isinstance(xferpaths, str):
destpath = ndest if isdestdir else os.path.dirname(ndest)
elif not isdestdir:
if os.path.exists(ndest.replace("file://", "")):
print(f"cloudfiles: {ndest} is not a directory (not copied).")
return
if compression == "same":
compression = None
elif compression == "none":
compression = False
if not isinstance(xferpaths, str):
if parallel == 1:
_cp(srcpath, destpath, compression, progress, block_size, xferpaths)
return
total = None
try:
total = len(xferpaths)
except TypeError:
pass
fn = partial(_cp, srcpath, destpath, compression, False, block_size)
with tqdm(desc="Transferring", total=total, disable=(not progress)) as pbar:
with pathos.pools.ProcessPool(parallel) as executor:
for _ in executor.imap(fn, sip(xferpaths, block_size)):
pbar.update(block_size)
else:
cfsrc = CloudFiles(srcpath, green=True, progress=progress)
if not cfsrc.exists(xferpaths):
print(f"cloudfiles: source path not found: {cfsrc.abspath(xferpaths).replace('file://','')}")
return
downloaded = cfsrc.get(xferpaths, raw=True)
if compression is not None:
downloaded = transcode(downloaded, compression, in_place=True)
cfdest = CloudFiles(destpath, green=True, progress=progress)
if isdestdir:
cfdest.put(os.path.basename(nsrc), downloaded, raw=True)
else:
cfdest.put(os.path.basename(ndest), downloaded, raw=True)
def _cp(src, dst, compression, progress, block_size, paths):
cfsrc = CloudFiles(src, green=True, progress=progress)
cfdest = CloudFiles(dst, green=True, progress=progress)
cfsrc.transfer_to(
cfdest, paths=paths,
reencode=compression, block_size=block_size
)
@main.command()
@click.argument('paths', nargs=-1)
@click.option('-r', '--recursive', is_flag=True, default=False, help='Descend into directories.')
@click.option('--progress', is_flag=True, default=False, help="Show transfer progress.")
@click.option('-b', '--block-size', default=128, help="Number of files to process at a time.")
@click.pass_context
def rm(ctx, paths, recursive, progress, block_size):
"""
Remove file objects.
Note that if the only path provided is "-",
rm will read the paths from STDIN separated by
newlines.
"""
ctx.ensure_object(dict)
parallel = int(ctx.obj.get("parallel", 1))
if len(paths) == 1 and paths[0] == "-":
paths = sys.stdin.readlines()
paths = [ path[:-1] for path in paths ] # clip "\n"
for path in paths:
many, flat, prefix = get_mfp(path, recursive)
if ispathdir(path) and not many:
print(f"cloudfiles: {path}: is a directory.")
return
for path in paths:
_rm(path, recursive, progress, parallel, block_size)
def _rm(path, recursive, progress, parallel, block_size):
npath = normalize_path(path)
many, flat, prefix = get_mfp(path, recursive)
cfpath = npath if ispathdir(path) else os.path.dirname(npath)
xferpaths = os.path.basename(npath)
if many:
xferpaths = CloudFiles(cfpath, green=True).list(prefix=prefix, flat=flat)
if parallel == 1 or not many:
__rm(cfpath, progress, xferpaths)
return
fn = partial(__rm, cfpath, False)
with tqdm(desc="Deleting", disable=(not progress)) as pbar:
with pathos.pools.ProcessPool(parallel) as executor:
for _ in executor.imap(fn, sip(xferpaths, block_size)):
pbar.update(block_size)
def __rm(cloudpath, progress, paths):
CloudFiles(cloudpath, green=True, progress=progress).delete(paths)
@main.command()
@click.argument('paths', nargs=-1)
@click.option('-c', '--grand-total', is_flag=True, default=False, help="Sum a grand total of all inputs.")
@click.option('-s', '--summarize', is_flag=True, default=False, help="Sum a total for each input argument.")
@click.option('-h', '--human-readable', is_flag=True, default=False, help='"Human-readable" output. Use unit suffixes: Bytes, KiB, MiB, GiB, TiB, PiB, and EiB.')
def du(paths, grand_total, summarize, human_readable):
"""Display disk usage statistics."""
results = []
for path in paths:
npath = normalize_path(path)
if ispathdir(path):
cf = CloudFiles(npath, green=True)
results.append(cf.size(cf.list()))
else:
cf = CloudFiles(os.path.dirname(npath), green=True)
results.append({ path: cf.size(os.path.basename(npath)) })
def SI(val):
if not human_readable:
return val
if val < 1024:
return f"{val} Bytes"
elif val < 2**20:
return f"{(val / 2**10):.2f} KiB"
elif val < 2**30:
return f"{(val / 2**20):.2f} MiB"
elif val < 2**40:
return f"{(val / 2**30):.2f} GiB"
elif val < 2**50:
return f"{(val / 2**40):.2f} TiB"
elif val < 2**60:
return f"{(val / 2**50):.2f} PiB"
else:
return f"{(val / 2**60):.2f} EiB"
summary = {}
for path, res in zip(paths, results):
summary[path] = sum(res.values())
if summarize:
print(f"{SI(summary[path])}\t{path}")
if not summarize:
for res in results:
for pth, size in res.items():
print(f"{SI(size)}\t{pth}")
if grand_total:
print(f"{SI(sum(summary.values()))}\ttotal")
@main.command()
@click.argument('paths', nargs=-1)
def head(paths):
results = {}
for path in paths:
npath = normalize_path(path)
npath = re.sub(r'\*+$', '', path)
many, flat, prefix = get_mfp(path, False)
if many:
cf = CloudFiles(npath, green=True)
res = cf.head(cf.list(prefix=prefix, flat=flat))
results.update(res)
else:
cf = CloudFiles(os.path.dirname(npath), green=True)
results[path] = cf.head(os.path.basename(npath))
pp = pprint.PrettyPrinter(indent=2)
if len(paths) == 1 and len(results) == 1:
val = first(results.values())
if val is not None:
print(val)
else:
print("cloudfiles: head: File not found: {}".format(paths[0]))
elif len(paths) > 0:
pp.pprint(results)
|
the-stack_0_20718 | ##########################################################################
# Copyright (c) 2017 Nandini Khanwalkar
# [email protected]
##########################################################################
import os
import random
import numpy as np
import sklearn
from sklearn.metrics import *
from sklearn.model_selection import train_test_split
in_size = 785
out_size = 10
train_set = 60000
test_set = 10000
n = [20, 50, 100]
alphas = [0, 0.25, 0.5]
eta = 0.1
##########################################################################
def sigmoid(z):
return 1/(1 + np.exp(-z))
def dsigmoid(z):
return z*(1-z)
def fwd_prop(data, wi2h, wh2o):
ai = np.reshape(data, (1, in_size))
ah = sigmoid(np.dot(ai, wi2h))
ah[0][0] = 1
ao = sigmoid(np.dot(ah, wh2o))
return ai, ah, ao
def back_prop(error, ai, ah, ao, wh2o, wi2h, d_wh2o_prev, d_wi2h_prev, mv):
delta_k = dsigmoid(ao)*error
delta_j = dsigmoid(ah)*np.dot(delta_k, np.transpose(wh2o))
d_wh2o_curr = (eta*np.dot(np.transpose(ah), delta_k)) + (mv*d_wh2o_prev)
d_wi2h_curr = (eta*np.dot(np.transpose(ai), delta_j)) + (mv*d_wi2h_prev)
wh2o += d_wh2o_curr
wi2h += d_wi2h_curr
return wh2o, wi2h, d_wh2o_curr, d_wi2h_curr
##########################################################################
def train_NN(wh2o, wi2h, d_wh2o_prev, d_wi2h_prev, mv):
for i in range(0, train_set):
ai, ah, ao = fwd_prop(train_data[i, :], wi2h, wh2o) # Feed-forward an image sample to get output array
t_k = np.insert((np.zeros((1, out_size-1)) + 0.0001), int(train_labels[i]), 0.9999) # Compute array for target value
wh2o, wi2h, d_wh2o_prev, d_wi2h_prev = back_prop(t_k-ao, ai, ah, ao, wh2o, wi2h, d_wh2o_prev, d_wi2h_prev, mv) # Backpropagate the error to obtain updated weights
return wi2h, wh2o
def test_NN(dataset, data_labels, set_size, wi2h, wh2o):
pred = []
for i in range(0, set_size):
ai, ah, ao = fwd_prop(dataset[i, :], wi2h, wh2o) # Feed-forward an image sample to get output array
pred.append(np.argmax(ao)) # Append the predicted output to pred list
return accuracy_score(data_labels, pred), pred
def Neural_Network(h_size, mv):
# Randomize Weights :
wi2h = (np.random.rand(in_size, h_size) - 0.5)*0.1
wh2o = (np.random.rand(h_size, out_size) - 0.5)*0.1
# Initialize delta_w_(t-1) arrays to 0 arrays :
d_wh2o_prev = np.zeros(wh2o.shape)
d_wi2h_prev = np.zeros(wi2h.shape)
# Run Epochs :
for epoch in range(0, 50):
train_accu, pred = test_NN(train_data, train_labels, train_set, wi2h, wh2o) # Test network on training set and get accuracy and prediction
test_accu, pred = test_NN(test_data, test_labels, test_set, wi2h, wh2o) # Test network on test set and get accuracy and prediction
print("Epoch " + str(epoch) + " :\tTraining Set Accuracy = " + str(train_accu) + "\n\t\tTest Set Accuracy = " + str(test_accu))
wi2h, wh2o = train_NN(wh2o, wi2h, d_wh2o_prev, d_wi2h_prev, mv) # Train network to compute new weights
epoch += 1
train_accu, pred = test_NN(train_data, train_labels, train_set, wi2h, wh2o) # Test network on training set and get accuracy and prediction
test_accu, pred = test_NN(test_data, test_labels, test_set, wi2h, wh2o) # Test network on test set and get accuracy and prediction
print("Epoch " + str(epoch) + " :\tTraining Set Accuracy = " + str(train_accu) + "\n\t\tTest Set Accuracy = " + str(test_accu) + "\n\nHidden Layer Size = " + str(h_size) + "\tMomentum = " + str(mv) + "\tTraining Samples = " + str(train_set) + "\n\nConfusion Matrix :\n")
print(confusion_matrix(test_labels, pred))
print("\n")
return
##########################################################################
def load_data(file_name):
data_file = np.loadtxt(file_name, delimiter=',')
dataset = np.insert(data_file[:, np.arange(1, in_size)]/255, 0, 1, axis=1)
data_labels = data_file[:, 0]
return dataset, data_labels
####################################################################################################
# Load Training and Test Sets :
print("\nLoading Training Set")
train_data, train_labels = load_data('mnist_train.csv')
print("\nLoading Test Set\n")
test_data, test_labels = load_data('mnist_test.csv')
# Experiment 1 :
for h_size in n:
Neural_Network(h_size, 0.9) # Varying the number of hidden units
# Experiment 2 :
for mv in alphas:
Neural_Network(100, mv) # Varying the momentum
# Experiment 3 :
for i in range(0, 2):
train_data, X, train_labels, Y = train_test_split(train_data, train_labels, test_size=0.50) # Splitting data to experiment on half and quarter of the original dataset
train_set = int(train_set/2)
Neural_Network(100, 0.9)
|
the-stack_0_20719 | #
# Copyright (c) 2019 UAVCAN Development Team
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <[email protected]>
#
import copy
import typing
import asyncio
import logging
import dataclasses
import pyuavcan
from ._session import UDPInputSession, SelectiveUDPInputSession, PromiscuousUDPInputSession
from ._session import UDPOutputSession
from ._frame import UDPFrame
from ._network_map import NetworkMap
from ._port_mapping import udp_port_from_data_specifier
from ._demultiplexer import UDPDemultiplexer, UDPDemultiplexerStatistics
# This is for internal use only: the maximum possible payload per UDP frame.
# We assume that it equals the maximum size of an Ethernet jumbo frame.
# We subtract the size of the L2/L3/L4 overhead here, and add one byte to enable packet truncation detection.
_MAX_UDP_MTU = 9 * 1024 - 20 - 8 + 1
_logger = logging.getLogger(__name__)
@dataclasses.dataclass
class UDPTransportStatistics(pyuavcan.transport.TransportStatistics):
demultiplexer: typing.Dict[pyuavcan.transport.DataSpecifier, UDPDemultiplexerStatistics] = \
dataclasses.field(default_factory=dict)
"""
Basic input session statistics: instances of :class:`UDPDemultiplexerStatistics` keyed by data specifier.
"""
class UDPTransport(pyuavcan.transport.Transport):
"""
The UDP/IP (v4/v6) transport is intended for
low-latency, high-throughput switched Ethernet vehicular networks with complex topologies.
Please read the module documentation for details.
"""
DEFAULT_SERVICE_TRANSFER_MULTIPLIER = 1
"""
By default, service transfer multiplication is disabled for UDP.
This option may be justified for extremely unreliable experimental networks.
"""
VALID_SERVICE_TRANSFER_MULTIPLIER_RANGE = (1, 5)
DEFAULT_MTU = 1024
"""
The recommended application-level MTU is one kibibyte. Lower values should not be used.
This is compatible with the IPv6 minimum MTU requirement, which is 1280 bytes.
The IPv4 has a lower MTU requirement of 576 bytes, but for local networks the MTU is normally much higher.
The transport can always accept any MTU regardless of its configuration.
"""
VALID_MTU_RANGE = (1024, 9000)
"""
A conventional Ethernet jumbo frame can carry up to 9 KiB (9216 bytes).
These are the application-level MTU values, so we take overheads into account.
An attempt to transmit a larger frame than supported by L2 may lead to IP fragmentation,
which is undesirable for time-deterministic networks.
"""
NODE_ID_BIT_LENGTH = NetworkMap.NODE_ID_BIT_LENGTH
"""
The maximum theoretical number of nodes on the network is determined by raising 2 into this power.
A node-ID is the set of this many least significant bits of the IP address of the node.
"""
def __init__(self,
ip_address: str,
mtu: int = DEFAULT_MTU,
service_transfer_multiplier: int = DEFAULT_SERVICE_TRANSFER_MULTIPLIER,
loop: typing.Optional[asyncio.AbstractEventLoop] = None):
"""
:param ip_address: Specifies which local IP address to use for this transport.
This setting also implicitly specifies the network interface to use.
All output sockets will be bound (see ``bind()``) to the specified local address.
If the specified address is not available locally, initialization will fail with
:class:`pyuavcan.transport.InvalidMediaConfigurationError`.
If the specified IP address cannot be mapped to a valid node-ID, the local node will be anonymous.
An IP address will be impossible to map to a valid node-ID if the address happens to be
the broadcast address for the subnet (e.g., ``192.168.0.255/24``),
or if the value of the host address exceeds the valid node-ID range (e.g.,
given IP address ``127.123.123.123/8``, the host address is 8092539,
which exceeds the range of valid node-ID values).
If the local node is anonymous, any attempt to create an output session will fail with
:class:`pyuavcan.transport.OperationNotDefinedForAnonymousNodeError`.
For use on localhost, any IP address from the localhost range can be used;
for example, ``127.0.0.123``.
This generally does not work with physical interfaces;
for example, if a host has one physical interface at ``192.168.1.200``,
an attempt to run a node at ``192.168.1.201`` will trigger the media configuration error
because ``bind()`` will fail with ``EADDRNOTAVAIL``.
One can change the node-ID of a physical transport by altering the network
interface configuration in the underlying operating system itself.
IPv4 addresses shall have the network mask specified, this is necessary for the transport to
determine the subnet's broadcast address (for broadcast UAVCAN transfers).
The mask will also be used to derive the range of node-ID values for the subnet,
capped by two raised to the power of the node-ID bit length.
For example:
- ``192.168.1.200/24`` -- a subnet with up to 255 UAVCAN nodes; for example:
- ``192.168.1.0`` -- node-ID of zero (may be unusable depending on the network configuration).
- ``192.168.1.254`` -- the maximum available node-ID in this subnet is 254.
- ``192.168.1.255`` -- the broadcast address, not a valid node. If you specify this address,
the local node will be anonymous.
- ``127.0.0.42/8`` -- a subnet with the maximum possible number of nodes ``2**NODE_ID_BIT_LENGTH``.
The local loopback subnet is useful for testing.
- ``127.0.0.1`` -- node-ID 1.
- ``127.0.0.255`` -- node-ID 255.
- ``127.0.15.255`` -- node-ID 4095.
- ``127.123.123.123`` -- not a valid node-ID because it exceeds ``2**NODE_ID_BIT_LENGTH``.
All traffic from this address will be rejected as non-UAVCAN.
If used for local node, the local node will be anonymous.
- ``127.255.255.255`` -- the broadcast address; notice that this address lies outside of the
node-ID-mapped space, no conflicts. If used for local node, the local node will be anonymous.
IPv6 addresses may be specified without the mask, in which case it will be assumed to be
equal ``128 - NODE_ID_BIT_LENGTH``.
Don't forget to specify the scope-ID for link-local IPv6 addresses.
:param mtu: The application-level MTU for outgoing packets.
In other words, this is the maximum number of payload bytes per UDP frame.
Transfers where the number of payload bytes does not exceed this value will be single-frame transfers,
otherwise, multi-frame transfers will be used.
This setting affects only outgoing frames;
the MTU of incoming frames is fixed at a sufficiently large value to accept any meaningful UDP frame.
:param service_transfer_multiplier: Deterministic data loss mitigation is disabled by default.
This parameter specifies the number of times each outgoing service transfer will be repeated.
This setting does not affect message transfers.
:param loop: The event loop to use. Defaults to :func:`asyncio.get_event_loop`.
"""
self._network_map = NetworkMap.new(ip_address)
self._mtu = int(mtu)
self._srv_multiplier = int(service_transfer_multiplier)
self._loop = loop if loop is not None else asyncio.get_event_loop()
low, high = self.VALID_SERVICE_TRANSFER_MULTIPLIER_RANGE
if not (low <= self._srv_multiplier <= high):
raise ValueError(f'Invalid service transfer multiplier: {self._srv_multiplier}')
low, high = self.VALID_MTU_RANGE
if not (low <= self._mtu <= high):
raise ValueError(f'Invalid MTU: {self._mtu} bytes')
_logger.debug(f'IP: {self._network_map}; max nodes: {self._network_map.max_nodes}; '
f'local node-ID: {self.local_node_id}')
self._demultiplexer_registry: typing.Dict[pyuavcan.transport.DataSpecifier, UDPDemultiplexer] = {}
self._input_registry: typing.Dict[pyuavcan.transport.InputSessionSpecifier, UDPInputSession] = {}
self._output_registry: typing.Dict[pyuavcan.transport.OutputSessionSpecifier, UDPOutputSession] = {}
self._closed = False
self._statistics = UDPTransportStatistics()
@property
def loop(self) -> asyncio.AbstractEventLoop:
return self._loop
@property
def protocol_parameters(self) -> pyuavcan.transport.ProtocolParameters:
return pyuavcan.transport.ProtocolParameters(
transfer_id_modulo=UDPFrame.TRANSFER_ID_MASK + 1,
max_nodes=self._network_map.max_nodes,
mtu=self._mtu,
)
@property
def local_node_id(self) -> typing.Optional[int]:
return self._network_map.local_node_id
def close(self) -> None:
self._closed = True
for s in (*self.input_sessions, *self.output_sessions):
try:
s.close()
except Exception as ex: # pragma: no cover
_logger.exception('%s: Failed to close %r: %s', self, s, ex)
def get_input_session(self,
specifier: pyuavcan.transport.InputSessionSpecifier,
payload_metadata: pyuavcan.transport.PayloadMetadata) -> UDPInputSession:
self._ensure_not_closed()
if specifier not in self._input_registry:
self._setup_input_session(specifier, payload_metadata)
assert specifier.data_specifier in self._demultiplexer_registry
out = self._input_registry[specifier]
assert isinstance(out, UDPInputSession)
assert out.specifier == specifier
return out
def get_output_session(self,
specifier: pyuavcan.transport.OutputSessionSpecifier,
payload_metadata: pyuavcan.transport.PayloadMetadata) -> UDPOutputSession:
self._ensure_not_closed()
if specifier not in self._output_registry:
def finalizer() -> None:
del self._output_registry[specifier]
multiplier = \
self._srv_multiplier if isinstance(specifier.data_specifier, pyuavcan.transport.ServiceDataSpecifier) \
else 1
sock = self._network_map.make_output_socket(
specifier.remote_node_id,
udp_port_from_data_specifier(specifier.data_specifier)
)
self._output_registry[specifier] = UDPOutputSession(
specifier=specifier,
payload_metadata=payload_metadata,
mtu=self._mtu,
multiplier=multiplier,
sock=sock,
loop=self._loop,
finalizer=finalizer,
)
out = self._output_registry[specifier]
assert isinstance(out, UDPOutputSession)
assert out.specifier == specifier
return out
def sample_statistics(self) -> UDPTransportStatistics:
return copy.copy(self._statistics)
@property
def input_sessions(self) -> typing.Sequence[UDPInputSession]:
return list(self._input_registry.values())
@property
def output_sessions(self) -> typing.Sequence[UDPOutputSession]:
return list(self._output_registry.values())
@property
def descriptor(self) -> str:
return f'<udp srv_mult="{self._srv_multiplier}">{self._network_map}</udp>'
@property
def local_ip_address_with_netmask(self) -> str:
"""
The configured IP address of the local node with network mask.
For example: ``192.168.1.200/24``.
"""
return str(self._network_map)
def _setup_input_session(self,
specifier: pyuavcan.transport.InputSessionSpecifier,
payload_metadata: pyuavcan.transport.PayloadMetadata) -> None:
"""
In order to set up a new input session, we have to link together a lot of objects. Tricky.
Also, the setup and teardown actions shall be atomic. Hence the separate method.
"""
assert specifier not in self._input_registry
try:
if specifier.data_specifier not in self._demultiplexer_registry:
_logger.debug('%r: Setting up new demultiplexer for %s', self, specifier.data_specifier)
# Service transfers cannot be broadcast.
expect_broadcast = not isinstance(specifier.data_specifier, pyuavcan.transport.ServiceDataSpecifier)
udp_port = udp_port_from_data_specifier(specifier.data_specifier)
self._demultiplexer_registry[specifier.data_specifier] = UDPDemultiplexer(
sock=self._network_map.make_input_socket(udp_port, expect_broadcast),
udp_mtu=_MAX_UDP_MTU,
node_id_mapper=self._network_map.map_ip_address_to_node_id,
local_node_id=self.local_node_id,
statistics=self._statistics.demultiplexer.setdefault(specifier.data_specifier,
UDPDemultiplexerStatistics()),
loop=self.loop,
)
cls: typing.Union[typing.Type[PromiscuousUDPInputSession], typing.Type[SelectiveUDPInputSession]] = \
PromiscuousUDPInputSession if specifier.is_promiscuous else SelectiveUDPInputSession
session = cls(specifier=specifier,
payload_metadata=payload_metadata,
loop=self.loop,
finalizer=lambda: self._teardown_input_session(specifier))
# noinspection PyProtectedMember
self._demultiplexer_registry[specifier.data_specifier].add_listener(specifier.remote_node_id,
session._process_frame)
except Exception:
self._teardown_input_session(specifier) # Rollback to ensure atomicity.
raise
self._input_registry[specifier] = session
def _teardown_input_session(self, specifier: pyuavcan.transport.InputSessionSpecifier) -> None:
"""
The finalizer may be invoked at any point during the setup process,
so it must be able to deconstruct the pipeline even if it is not fully set up.
This is why we have these try-except everywhere. Who knew that atomic transactions can be so messy?
"""
# Unregister the session first.
try:
del self._input_registry[specifier]
except LookupError:
pass
# Remove the session from the list of demultiplexer listeners.
try:
demux = self._demultiplexer_registry[specifier.data_specifier]
except LookupError:
pass # The demultiplexer has not been set up yet, nothing to do.
else:
try:
demux.remove_listener(specifier.remote_node_id)
except LookupError:
pass
# Destroy the demultiplexer if there are no listeners left.
if not demux.has_listeners:
try:
_logger.debug('%r: Destroying %r for %s', self, demux, specifier.data_specifier)
demux.close()
finally:
del self._demultiplexer_registry[specifier.data_specifier]
def _ensure_not_closed(self) -> None:
if self._closed:
raise pyuavcan.transport.ResourceClosedError(f'{self} is closed')
|
the-stack_0_20721 | import unittest
from tkp.db.orm import DataSet, Image
import tkp.db
import tkp.db.database
from tkp.db.associations import associate_extracted_sources
from tkp.db.general import insert_extracted_sources
from tkp.testutil.decorators import requires_database
from tkp.testutil import db_subs
from tkp.db.generic import columns_from_table
# Convenient default values
deRuiter_r = 3.7
new_source_sigma_margin = 3
class TestSourceAssociation(unittest.TestCase):
@requires_database()
def setUp(self):
self.database = tkp.db.database.Database()
self.dataset = DataSet(data={'description': "Src. assoc:" +
self._testMethodName},
database=self.database)
self.im_params = db_subs.generate_timespaced_dbimages_data(n_images=8)
self.db_imgs=[]
def tearDown(self):
tkp.db.rollback()
def test_null_case_sequential(self):
"""test_null_case_sequential
-Check extractedsource insertion routines can deal with empty input!
-Check source association can too
"""
for im in self.im_params:
self.db_imgs.append(Image(data=im, dataset=self.dataset))
insert_extracted_sources(self.db_imgs[-1]._id, [],'blind')
associate_extracted_sources(self.db_imgs[-1]._id, deRuiter_r,
new_source_sigma_margin)
running_cat = columns_from_table(table="runningcatalog",
keywords="*",
where={"dataset":self.dataset.id})
self.assertEqual(len(running_cat), 0)
def test_only_first_epoch_source(self):
"""test_only_first_epoch_source
- Pretend to extract a source only from the first image.
- Run source association for each image, as we would in TraP.
- Check the image source listing works
- Check runcat and assocxtrsource are correct.
"""
first_epoch = True
extracted_source_ids = []
for im in self.im_params:
self.db_imgs.append(Image( data=im, dataset=self.dataset))
last_img = self.db_imgs[-1]
if first_epoch:
insert_extracted_sources(last_img._id,
[db_subs.example_extractedsource_tuple()], 'blind')
associate_extracted_sources(last_img._id, deRuiter_r,
new_source_sigma_margin)
# First, check the runcat has been updated correctly
running_cat = columns_from_table(table="runningcatalog",
keywords=['datapoints'],
where={"dataset": self.dataset.id})
self.assertEqual(len(running_cat), 1)
self.assertEqual(running_cat[0]['datapoints'], 1)
last_img.update()
last_img.update_sources()
img_xtrsrc_ids = [src.id for src in last_img.sources]
if first_epoch:
self.assertEqual(len(img_xtrsrc_ids),1)
extracted_source_ids.extend(img_xtrsrc_ids)
assocxtrsrcs_rows = columns_from_table(table="assocxtrsource",
keywords=['runcat', 'xtrsrc' ],
where={"xtrsrc":img_xtrsrc_ids[0]})
self.assertEqual(len(assocxtrsrcs_rows),1)
self.assertEqual(assocxtrsrcs_rows[0]['xtrsrc'], img_xtrsrc_ids[0])
else:
self.assertEqual(len(img_xtrsrc_ids),0)
first_epoch=False
#Assocxtrsources still ok after multiple images?
self.assertEqual(len(extracted_source_ids),1)
assocxtrsrcs_rows = columns_from_table(table="assocxtrsource",
keywords=['runcat', 'xtrsrc' ],
where={"xtrsrc":extracted_source_ids[0]})
self.assertEqual(len(assocxtrsrcs_rows),1)
self.assertEqual(assocxtrsrcs_rows[0]['xtrsrc'], extracted_source_ids[0],
"Runcat xtrsrc entry must match the only extracted source")
def test_single_fixed_source(self):
"""test_single_fixed_source
- Pretend to extract the same source in each of a series of images.
- Perform source association
- Check the image source listing works
- Check runcat, assocxtrsource.
"""
fixed_src_runcat_id = None
for img_idx, im in enumerate(self.im_params):
self.db_imgs.append( Image(data=im, dataset=self.dataset))
last_img = self.db_imgs[-1]
insert_extracted_sources(last_img._id,
[db_subs.example_extractedsource_tuple()],'blind')
associate_extracted_sources(last_img._id, deRuiter_r,
new_source_sigma_margin)
running_cat = columns_from_table(table="runningcatalog",
keywords=['id', 'datapoints'],
where={"dataset":self.dataset.id})
self.assertEqual(len(running_cat), 1)
self.assertEqual(running_cat[0]['datapoints'], img_idx+1)
# Check runcat ID does not change for a steady single source
if img_idx == 0:
fixed_src_runcat_id = running_cat[0]['id']
self.assertIsNotNone(fixed_src_runcat_id, "No runcat id assigned to source")
self.assertEqual(running_cat[0]['id'], fixed_src_runcat_id,
"Multiple runcat ids for same fixed source")
runcat_flux = columns_from_table(table="runningcatalog_flux",
keywords=['f_datapoints'],
where={"runcat":fixed_src_runcat_id})
self.assertEqual(len(runcat_flux),1)
self.assertEqual(img_idx+1, runcat_flux[0]['f_datapoints'])
last_img.update()
last_img.update_sources()
img_xtrsrc_ids = [src.id for src in last_img.sources]
self.assertEqual(len(img_xtrsrc_ids), 1)
#Get the association row for most recent extraction:
assocxtrsrcs_rows = columns_from_table(table="assocxtrsource",
keywords=['runcat', 'xtrsrc' ],
where={"xtrsrc":img_xtrsrc_ids[0]})
# print "ImageID:", last_img.id
# print "Imgs sources:", img_xtrsrc_ids
# print "Assoc entries:", assocxtrsrcs_rows
# print "First extracted source id:", ds_source_ids[0]
# if len(assocxtrsrcs_rows):
# print "Associated source:", assocxtrsrcs_rows[0]['xtrsrc']
self.assertEqual(len(assocxtrsrcs_rows),1,
msg="No entries in assocxtrsrcs for image number "+str(img_idx))
self.assertEqual(assocxtrsrcs_rows[0]['runcat'], fixed_src_runcat_id,
"Mismatched runcat id in assocxtrsrc table")
|
the-stack_0_20722 | #
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class rdpconnections(base_resource) :
""" Configuration for active rdp connections resource. """
def __init__(self) :
self._username = None
self._all = None
self._endpointip = None
self._endpointport = None
self._targetip = None
self._targetport = None
self._peid = None
self.___count = None
@property
def username(self) :
r"""User name for which to display connections.<br/>Minimum length = 1.
"""
try :
return self._username
except Exception as e:
raise e
@username.setter
def username(self, username) :
r"""User name for which to display connections.<br/>Minimum length = 1
"""
try :
self._username = username
except Exception as e:
raise e
@property
def all(self) :
r"""Terminate all active rdpconnections.
"""
try :
return self._all
except Exception as e:
raise e
@all.setter
def all(self, all) :
r"""Terminate all active rdpconnections.
"""
try :
self._all = all
except Exception as e:
raise e
@property
def endpointip(self) :
r"""The client IP address.
"""
try :
return self._endpointip
except Exception as e:
raise e
@property
def endpointport(self) :
r"""The client port.<br/>Range 1 - 65535<br/>* in CLI is represented as 65535 in NITRO API.
"""
try :
return self._endpointport
except Exception as e:
raise e
@property
def targetip(self) :
r"""The Server IP address.
"""
try :
return self._targetip
except Exception as e:
raise e
@property
def targetport(self) :
r"""The server port.<br/>Range 1 - 65535<br/>* in CLI is represented as 65535 in NITRO API.
"""
try :
return self._targetport
except Exception as e:
raise e
@property
def peid(self) :
r"""Core id of the session owner.
"""
try :
return self._peid
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(rdpconnections_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.rdpconnections
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def kill(cls, client, resource) :
r""" Use this API to kill rdpconnections.
"""
try :
if type(resource) is not list :
killresource = rdpconnections()
killresource.username = resource.username
killresource.all = resource.all
return killresource.perform_operation(client,"kill")
else :
if (resource and len(resource) > 0) :
killresources = [ rdpconnections() for _ in range(len(resource))]
for i in range(len(resource)) :
killresources[i].username = resource[i].username
killresources[i].all = resource[i].all
result = cls.perform_operation_bulk_request(client, killresources,"kill")
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the rdpconnections resources that are configured on netscaler.
"""
try :
if not name :
obj = rdpconnections()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
r""" Use this API to fetch all the rdpconnections resources that are configured on netscaler.
# This uses rdpconnections_args which is a way to provide additional arguments while fetching the resources.
"""
try :
obj = rdpconnections()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of rdpconnections resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = rdpconnections()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the rdpconnections resources configured on NetScaler.
"""
try :
obj = rdpconnections()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of rdpconnections resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = rdpconnections()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class rdpconnections_response(base_response) :
def __init__(self, length=1) :
self.rdpconnections = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.rdpconnections = [rdpconnections() for _ in range(length)]
|
the-stack_0_20723 | from wsproto.events import BytesMessage, TextMessage, Message
import anyio
import pytest
from asyncwebsockets.client import open_websocket
from asyncwebsockets.client import create_websocket_client
from asyncwebsockets.server import open_websocket_server
@pytest.mark.anyio
async def test_echo():
async with open_websocket("ws://echo.websocket.org") as sock: # pylint: disable=E1701
await sock.send(b"test")
rcvd = 0
async for message in sock:
print("Event received", message)
if isinstance(message, BytesMessage):
assert message.data == b"test"
rcvd += 1
await sock.close(code=1000, reason="Thank you!")
assert rcvd == 1
@pytest.mark.anyio
async def test_local_echo():
async with anyio.create_task_group() as n:
async def serve_one(s):
async with open_websocket_server(s) as w: # pylint: disable=E1701
async for m in w:
if isinstance(m, Message):
await w.send(m.data)
else:
break
async def serve(*, task_status):
listeners = await anyio.create_tcp_listener(local_port=0)
task_status.started(listeners)
await listeners.serve(serve_one)
listeners = await n.start(serve)
addr = listeners.extra(anyio.abc.SocketAttribute.local_address)
conn = await anyio.connect_tcp(*addr)
sock = await create_websocket_client(conn, "localhost", "/", subprotocols=["echo"])
await sock.send(b"test")
rcvd = 0
async for message in sock:
print("Event received", message)
if isinstance(message, BytesMessage):
assert message.data == b"test"
rcvd += 1
await sock.close(code=1000, reason="Thank you!")
assert rcvd == 1
n.cancel_scope.cancel()
await sock.close()
@pytest.mark.anyio
async def test_secure_echo():
async with open_websocket("wss://echo.websocket.org") as sock: # pylint: disable=E1701
await sock.send("test")
rcvd = 0
async for message in sock:
print("Event received", message)
if isinstance(message, TextMessage):
assert message.data == "test"
rcvd += 1
await sock.close(code=1000, reason="Thank you!")
assert rcvd == 1
|
the-stack_0_20724 |
import json
import os
import subprocess
from pywps import Process, ComplexInput, LiteralOutput, Format
from pywps.wpsserver import temp_dir
__author__ = 'matteo'
class Area(Process):
"""Process calculating area of given polygon
"""
def __init__(self):
inputs = [ComplexInput('layer', 'Layer',
[Format('application/gml+xml')])]
outputs = [LiteralOutput('area', 'Area', data_type='string')]
super(Area, self).__init__(
self._handler,
identifier='area',
title='Process Area',
abstract="""Process returns the area of each
feature from a submitted GML file""",
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response):
# ogr2ogr requires gdal-bin
from shapely.geometry import shape
with temp_dir() as tmp:
input_gml = request.inputs['layer'][0].file
input_geojson = os.path.join(tmp, 'input.geojson')
subprocess.check_call(['ogr2ogr', '-f', 'geojson',
str(input_geojson), input_gml])
with open(input_geojson, 'rb') as f:
data = json.loads(f.read())
for feature in data['features']:
geom = shape(feature['geometry'])
feature['area'] = geom.area
response.outputs['area'].data = [feature['area']
for feature in data['features']]
return response
|
the-stack_0_20725 | from metasdk import MetaApp
META = MetaApp()
log = META.log
configuration = {
"database": {
# укажите meta alias для БД
"alias": "adplatform",
# или укажите все подключение
# "name": "XXXXXXXX",
# "host": "XXXXXXXX",
# "port": 777,
# "username": "XXXXXXXX",
# "password": "XXXXXXXX",
# "type": "MySQL"
},
"download": {
"sourceFormat": "JSON_NEWLINE",
"dbQuery": {
"command": """ SELECT * FROM users LIMIT 10 """
}
}
}
DbService = META.DbService
result = DbService.persist_query(configuration)
print(u"result = %s" % str(result))
print(u"result['downloadUrlPart'] = %s" % str(result['downloadUrlPart'])) |
the-stack_0_20726 | #!/usr/bin/env python
#------------------------------------------------------------------------------
# srg_nn.py
#
# author: H. Hergert
# version: 1.1.0
# date: Nov 21, 2016
#
# tested with Python v2.7
#
# SRG evolution of a chiral NN interaction with cutoff Lambda in the deuteron
# partial waves, using a Gauss-Legendre momentum mesh.
#
#------------------------------------------------------------------------------
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.colors import SymLogNorm, Normalize
from mpl_toolkits.axes_grid1 import AxesGrid, make_axes_locatable
import numpy as np
from numpy import array, dot, diag, reshape, sqrt
from math import sqrt, pi
from scipy.linalg import eigvalsh
from scipy.integrate import ode
#------------------------------------------------------------------------------
# constants
#------------------------------------------------------------------------------
hbarm = 41.4710570772
#------------------------------------------------------------------------------
# helpers
#------------------------------------------------------------------------------
def find_nearest(array, value):
distance = np.absolute(array-value)
indices = np.where(distance == np.min(distance))
return indices[0]
#------------------------------------------------------------------------------
# plot matrix snapshots
#------------------------------------------------------------------------------
def plot_snapshots(Hs, flowparams, momenta, qMax):
fig = plt.figure(1, (10., 50.))
nplots = len(flowparams)
ncols = 1
nrows = nplots
grid = AxesGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(nrows, ncols), # creates grid of axes
axes_pad=1., # pad between axes in inch.
label_mode='all', # put labels on left, bottom
cbar_mode='each', # color bars
cbar_pad=0.20, # insert space between plots and color bar
cbar_size='10%' # size of colorbar relative to last image
)
hmax = 0.0
hmin = 0.0
for h in Hs:
hmax = max(hmax, np.ma.max(h))
hmin = min(hmin, np.ma.min(h))
# get indices of max. momenta
cmax, ccmax = find_nearest(momenta, qMax)
edge = len(momenta)/2
# create individual snapshots - figures are still addressed by single index,
# despite multi-row grid
for s in range(Hs.shape[0]):
h = np.vstack((np.hstack((Hs[s,0:cmax,0:cmax], Hs[s,0:cmax,edge:ccmax])),
np.hstack((Hs[s,edge:ccmax,0:cmax], Hs[s,edge:ccmax,edge:ccmax]))
))
img = grid[s].imshow(h,
cmap=plt.get_cmap('RdBu_r'), # choose color map
interpolation='bicubic',
# filterrad=10,
norm=SymLogNorm(linthresh=0.0001, vmax=2., vmin=-2.0), # normalize
vmin=-2.0, # min/max values for data
vmax=2.0
)
# contours
levels = np.arange(-2, 1, 0.12)
grid[s].contour(h, levels, colors='black', ls="-", origin='lower',linewidths=1)
# plot labels, tick marks etc.
grid[s].set_title('$\\lambda=%s\,\mathrm{fm}^{-1}$'%flowparams[s])
grid[s].set_xticks([0,20,40,60,80,100,120,140,160])
grid[s].set_yticks([0,20,40,60,80,100,120,140,160])
grid[s].set_xticklabels(['$0$','$1.0$','$2.0$','$3.0$','$4.0$','$1.0$','$2.0$','$3.0$','$4.0$'])
grid[s].set_yticklabels(['$0$','$1.0$','$2.0$','$3.0$','$4.0$','$1.0$','$2.0$','$3.0$','$4.0$'])
grid[s].tick_params(axis='both',which='major',width=1.5,length=5)
grid[s].tick_params(axis='both',which='minor',width=1.5,length=5)
grid[s].axvline(x=[80],color="black", ls="--")
grid[s].axhline(y=[80],color="black", ls="--")
grid[s].xaxis.set_label_text("$q\,[\mathrm{fm}^{-1}]$")
grid[s].yaxis.set_label_text("$q'\,[\mathrm{fm}^{-1}]$")
# color bar
cbar = grid.cbar_axes[s]
plt.colorbar(img, cax=cbar,
ticks=[ -2.0, -1.0, -1.0e-1, -1.0e-2, -1.0e-3, -1.0e-4, 0.,
1.0e-4, 1.0e-3, 1.0e-2, 1.0e-1, 1.0]
)
cbar.axes.set_yticklabels(['$-2.0$', '$-1.0$', '$-10^{-1}$', '$-10^{-2}$',
'$-10^{-3}$', '$-10^{-4}$','$0.0$', '$10^{-4}$', '$10^{-3}$', '$10^{-2}$',
'$10^{-1}$', '$1.0$'])
cbar.set_ylabel("$V(q,q')\,\mathrm{[fm]}$")
# save figure
plt.savefig("srg_n3lo500.pdf", bbox_inches="tight", pad_inches=0.05)
plt.savefig("srg_n3lo500.png", bbox_inches="tight", pad_inches=0.05)
#plt.show()
return
#------------------------------------------------------------------------------
# matrix element I/O, mesh functions
#------------------------------------------------------------------------------
def uniform_weights(momenta):
weights = np.ones_like(momenta)
weights *= abs(momenta[1]-momenta[0])
return weights
def read_mesh(filename):
data = np.loadtxt(filename, comments="#")
dim = data.shape[1]
momenta = data[0,:dim]
return momenta
def read_interaction(filename):
data = np.loadtxt(filename, comments="#")
dim = data.shape[1]
V = data[1:,:dim]
return V
#------------------------------------------------------------------------------
# commutator
#------------------------------------------------------------------------------
def commutator(a,b):
return dot(a,b) - dot(b,a)
#------------------------------------------------------------------------------
# flow equation (right-hand side)
#------------------------------------------------------------------------------
def derivative(lam, y, T):
dim = T.shape[0]
# reshape the solution vector into a dim x dim matrix
V = reshape(y, (dim, dim))
# calculate the generator
eta = commutator(T, V)
# dV is the derivative in matrix form
dV = -4.0/(lam**5) * commutator(eta, T+V)
# convert dH into a linear array for the ODE solver
dy = reshape(dV, -1)
return dy
#------------------------------------------------------------------------------
# Main program
#------------------------------------------------------------------------------
def main():
# duplicate the mesh points (and weights, see below) because we have a
# coupled-channel problem
mom_tmp = read_mesh("n3lo500_3s1.meq")
momenta = np.concatenate([mom_tmp,mom_tmp])
weights = uniform_weights(momenta)
dim = len(momenta)
# set up p^2 (kinetic energy in units where h^2/2\mu = 1)
T = diag(momenta*momenta)
# set up interaction matrix in coupled channels:
#
# / V_{3S1} V_{3S1-3D1} \
# \ V_{3S1-3D1}^\dag V_{3D1} /
# read individual partial waves
partial_waves=[]
for filename in ["n3lo500_3s1.meq", "n3lo500_3d1.meq", "n3lo500_3sd1.meq"]:
partial_waves.append(read_interaction(filename))
# print partial_waves[-1].shape
# assemble coupled channel matrix
V = np.vstack((np.hstack((partial_waves[0], partial_waves[2])),
np.hstack((np.transpose(partial_waves[2]), partial_waves[1]))
))
# switch to scattering units
V = V/hbarm
# set up conversion matrix for V: this is used to absorb momentum^2 and
# weight factors into V, so that we can use the commutator routines for
# eta and the derivative as is
conversion_matrix = np.zeros_like(T)
for i in range(dim):
for j in range(dim):
# Regularize the conversion matrix at zero momentum - set elements
# to machine precision so we can invert the matrix for plots etc.
# Note that momentum values are positive, by construction.
qiqj = max(np.finfo(float).eps, momenta[i]*momenta[j])
conversion_matrix[i,j] = qiqj*sqrt(weights[i]*weights[j])
V *= conversion_matrix
# turn initial interaction into a linear array
y0 = reshape(V, -1)
# flow parameters for snapshot images - the initial lambda should be
# infinity, we use something reasonably large
lam_initial = 20.0
lam_final = 1.5
# integrate using scipy.ode instead of scipy.odeint - this gives
# us more control over the solver
solver = ode(derivative,jac=None)
# equations may get stiff, so we use VODE and Backward Differentiation
solver.set_integrator('vode', method='bdf', order=5, nsteps=1000)
solver.set_f_params(T)
solver.set_initial_value(y0, lam_initial)
print("%-8s %-14s"%("s", "E_deuteron [MeV]"))
print("-----------------------------------------------------------------------------------------------------------------")
# calculate exact eigenvalues
print("%8.5f %14.8f"%(solver.t, eigvalsh((T + V)*hbarm)[0]))
flowparams=([lam_initial])
Vs=([V])
while solver.successful() and solver.t > lam_final:
# adjust the step size in different regions of the flow parameter
if solver.t >= 6.0:
ys = solver.integrate(solver.t-1.0)
elif solver.t < 6.0 and solver.t >= 2.5:
ys = solver.integrate(solver.t-0.5)
elif solver.t < 2.5 and solver.t >= lam_final:
ys = solver.integrate(solver.t-0.1)
# add evolved interactions to the list
flowparams.append(solver.t)
Vtmp = reshape(ys,(dim,dim))
Vs.append(Vtmp)
print("%8.5f %14.8f"%(solver.t, eigvalsh((T + Vtmp)*hbarm)[0]))
# generate snapshots of the evolution
plot_snapshots((Vs[-2:]/conversion_matrix), flowparams[-2:], momenta, 4.0)
return
#------------------------------------------------------------------------------
# make executable
#------------------------------------------------------------------------------
if __name__ == "__main__":
main() |
the-stack_0_20731 | from __future__ import annotations
import os
import procrunner
import pytest
from dxtbx.model.experiment_list import ExperimentListFactory
from libtbx import phil
from dials.algorithms.refinement import RefinerFactory
from dials.array_family import flex
def test1(dials_regression, run_in_tmpdir):
from scitbx import matrix
data_dir = os.path.join(dials_regression, "refinement_test_data", "multi_stills")
result = procrunner.run(
[
"dials.refine",
os.path.join(data_dir, "combined_experiments.json"),
os.path.join(data_dir, "combined_reflections.pickle"),
]
)
assert not result.returncode and not result.stderr
# load results
reg_exp = ExperimentListFactory.from_json_file(
os.path.join(data_dir, "regression_experiments.json"), check_format=False
)
ref_exp = ExperimentListFactory.from_json_file("refined.expt", check_format=False)
# compare results
tol = 1e-5
for b1, b2 in zip(reg_exp.beams(), ref_exp.beams()):
assert b1.is_similar_to(
b2,
wavelength_tolerance=tol,
direction_tolerance=tol,
polarization_normal_tolerance=tol,
polarization_fraction_tolerance=tol,
)
s0_1 = matrix.col(b1.get_unit_s0())
s0_2 = matrix.col(b2.get_unit_s0())
assert s0_1.accute_angle(s0_2, deg=True) < 0.0057 # ~0.1 mrad
for c1, c2 in zip(reg_exp.crystals(), ref_exp.crystals()):
assert c1.is_similar_to(c2)
for d1, d2 in zip(reg_exp.detectors(), ref_exp.detectors()):
assert d1.is_similar_to(
d2,
fast_axis_tolerance=1e-4,
slow_axis_tolerance=1e-4,
origin_tolerance=1e-2,
)
@pytest.mark.skipif(
os.name == "nt",
reason="Multiprocessing error on Windows: 'This class cannot be instantiated from Python'",
)
def test_multi_process_refinement_gives_same_results_as_single_process_refinement(
dials_regression, run_in_tmpdir
):
data_dir = os.path.join(dials_regression, "refinement_test_data", "multi_stills")
cmd = [
"dials.refine",
os.path.join(data_dir, "combined_experiments.json"),
os.path.join(data_dir, "combined_reflections.pickle"),
"outlier.algorithm=null",
"engine=LBFGScurvs",
"output.reflections=None",
]
result = procrunner.run(cmd + ["output.experiments=refined_nproc4.expt", "nproc=4"])
assert not result.returncode and not result.stderr
result = procrunner.run(cmd + ["output.experiments=refined_nproc1.expt", "nproc=1"])
assert not result.returncode and not result.stderr
# load results
nproc1 = ExperimentListFactory.from_json_file(
"refined_nproc1.expt", check_format=False
)
nproc4 = ExperimentListFactory.from_json_file(
"refined_nproc4.expt", check_format=False
)
# compare results
for b1, b2 in zip(nproc1.beams(), nproc4.beams()):
assert b1.is_similar_to(b2)
for c1, c2 in zip(nproc1.crystals(), nproc4.crystals()):
assert c1.is_similar_to(c2)
for d1, d2 in zip(nproc1.detectors(), nproc4.detectors()):
assert d1.is_similar_to(
d2,
fast_axis_tolerance=5e-5,
slow_axis_tolerance=5e-5,
origin_tolerance=5e-5,
)
def test_restrained_refinement_with_fixed_parameterisations(
dials_regression, run_in_tmpdir
):
# Avoid a regression to https://github.com/dials/dials/issues/1142 by
# testing that refinement succeeds when some parameterisations are fixed
# by parameter auto reduction code, but restraints are requested for
# those parameterisations.
# The phil scope
from dials.algorithms.refinement.refiner import phil_scope
user_phil = phil.parse(
"""
refinement {
parameterisation {
auto_reduction {
min_nref_per_parameter = 90
action = fail *fix remove
}
crystal {
unit_cell {
restraints {
tie_to_target {
values = 95 95 132 90 90 120
sigmas = 1 1 1 0 0 0
id = 0 1 2 3 4 5 6 7 8 9
}
}
}
}
}
}
"""
)
working_phil = phil_scope.fetch(source=user_phil)
working_params = working_phil.extract()
# use the multi stills test data
data_dir = os.path.join(dials_regression, "refinement_test_data", "multi_stills")
experiments_path = os.path.join(data_dir, "combined_experiments.json")
pickle_path = os.path.join(data_dir, "combined_reflections.pickle")
experiments = ExperimentListFactory.from_json_file(
experiments_path, check_format=False
)
reflections = flex.reflection_table.from_file(pickle_path)
refiner = RefinerFactory.from_parameters_data_experiments(
working_params, reflections, experiments
)
history = refiner.run()
rmsd_limits = (0.2044, 0.2220, 0.0063)
for a, b in zip(history["rmsd"][-1], rmsd_limits):
assert a < b
|
the-stack_0_20732 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'g.i.s.A'
# DATETIME_FORMAT =
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y-m-d'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
the-stack_0_20735 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='sdmaster',
version='1.0.3',
packages=setuptools.find_packages(),
url='https://github.com/RedrumSherlock/stockdatamaster',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
license='MIT',
author='Mike Wang',
author_email='[email protected]',
description='Stock Data Master',
long_description=long_description,
long_description_content_type='text/markdown',
python_requires='>=3.5',
install_requires=["pandas", "plotly", "matplotlib", "numpy", "pytz"],
package_dir={'sdmaster':'sdm'},
) |
the-stack_0_20737 | import os
import glob
from netCDF4 import Dataset
import numpy as np
from datetime import date
def getyear(fname):
return fname[-36:-32]
def getmonth(fname):
return fname[-32:-30]
def getday(fname):
return fname[-30:-28]
def getdoy(fname):
"""Extract doy from the file name
:param fname: ex. BEC_SM____SMOS__EUM_L4__A_20200826T035430_001km_3d_REP_v5.0.nc
:return: doy
"""
year = int(getyear(fname))
month = int(getmonth(fname))
day = int(getday(fname))
#print(fname, year,month,day)
d = date(year, month, day)
return int(d.strftime("%j"))
def getgeo(fname, longitude, latitude):
"""Get geographically bound data from NC file
:param fname: netcdf filename (.nc)
:param longitude: longitude dd.ddd)
:param latitude: latitude (dd.ddd)
:return: pixel value at long,lat
"""
f = Dataset(fname)
sm = f.variables['SM']
longitude_array = f.variables['lon'][:].data
latitude_array = f.variables['lat'][:].data
smraw = f.variables['SM'][:].data
i = np.abs(longitude_array - np.float(longitude)).argmin()
j = np.abs(latitude_array - np.float(latitude)).argmin()
pixval = smraw[:, j, i]
if pixval == sm._FillValue or pixval == sm.missing_value:
pixval = np.nan
else:
pixval = pixval
# pixval = sm.scale_factor * pixval + sm.add_offset
if pixval < 0.0:
pixval = 0.0
if pixval > 0.6:
pixval = 0.6
return pixval
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
#>>> # linear interpolation of NaNs
#>>> nans, x= nan_helper(y)
#>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def getgeoydoy(rsdir, wildcard, longitude, latitude, listyear, listdoy, **kwargs):
interpolate = kwargs.get('i', None)
py_glob = os.path.join(rsdir, wildcard)
#print(rsdir, wildcard)
#print(py_glob)
RSyear = []
RSdoy = []
value = []
for path in glob.iglob(py_glob):
year = getyear(path)
#print(year)
doy = getdoy(path)
#print(doy)
RSyear.append(year)
RSdoy.append(doy)
value.append(getgeo(path, longitude, latitude))
#print(value[-1])
resarr = np.zeros((len(listdoy),), dtype=np.float)
resarr.fill(np.nan)
# Fill list with nan for the len(listdoy)
for d in range(len(listdoy)):
for l in range(len(RSdoy)):
if int(listyear[d]) == int(RSyear[l]):
if int(listdoy[d]) == int(RSdoy[l]):
resarr[d] = value[l]
# If Kwargs 'i' is set to True (i=True), interpolate the content
try:
if interpolate is True:
# interpolate nans to values
nans, x = nan_helper(resarr)
resarr[nans] = np.interp(x(nans), x(~nans), resarr[~nans])
except:
pass
# print(result)
result = resarr.tolist()
return result
# int16 SM(time, lat, lon)
# long_name: Surface Soil Moisture
# units: m^3/m^3
# scale_factor: 1e-04
# add_offset: 0.0
# valid_min: 0.0
# valid_max: 0.6
# coordinates: time lat lon
# missing_value: -999
# _FillValue: -999
|
the-stack_0_20738 | import os
import subprocess
import unittest
class TestExample(unittest.TestCase):
def test_example(self):
cmd = 'cd {this_dir} && python entrypoint.py'.format(
this_dir=os.path.dirname(os.path.abspath(__file__))
)
stdout = subprocess.check_output(cmd, shell=True).decode()
expected_stdout = 'message set by task_1\n'
self.assertEqual(stdout, expected_stdout)
def test_add_test_for_switch_task(self):
self.skipTest('implement later')
|
the-stack_0_20740 | from setuptools import setup, find_packages
import os.path as p
version = "0.3.4"
with open(p.join(p.dirname(__file__), 'requirements', 'package.txt'), 'r') as reqs:
install_requires = [line.strip() for line in reqs]
tests_require = []
try:
with open(p.join(p.dirname(__file__), 'requirements', 'test.txt'), 'r') as reqs:
tests_require = [line.strip() for line in reqs]
except IOError:
pass
setup(
name="twitterbot",
version=version,
author="Jessamyn Smith",
author_email="[email protected]",
url="https://github.com/jessamynsmith/twitterbot",
download_url='https://github.com/jessamynsmith/twitterbot/archive/v{0}.tar.gz'.format(version),
license='MIT',
description="Configurable bot that replies to mentions and posts messages to twitter",
long_description=open('README.rst').read(),
keywords=['twitter', 'bot', 'web 2.0', 'command-line tools'],
install_requires=install_requires,
tests_require=tests_require,
packages=find_packages(exclude=['*test*']),
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries",
"Topic :: Software Development",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
])
|
the-stack_0_20742 | # author: MDS-2021-22 block3 group21
# date: 2021-11-25
"""
Cleans NCDB 2017 data and creates training and test data set csv files.
Usage: clean_split_data.py --input=<input filepath> --output=<output directory>
Options:
--input=<input filepath> Filepath of data in csv format
--output=<output directory> Directory specifying where to store training and test data sets
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from docopt import docopt
import os
opt = docopt(__doc__)
def main():
# Importing NCDB 2017 dataset
ncdb = pd.read_csv(opt["--input"], low_memory=False, skiprows=1).sort_index()
# Make all columns contain strings
ncdb = ncdb.astype("string")
# Create 'FATALITY' column
ncdb.loc[ncdb["C_SEV"] == "1", "FATALITY"] = int(1)
ncdb.loc[ncdb["C_SEV"] != "1", "FATALITY"] = int(0)
# Dropping irrelevant or redundant columns
ncdb = ncdb.drop(columns=["C_YEAR", "C_CASE", "P_ISEV", "V_ID", "P_ID", "C_SEV"])
# Split data into train and test split (80:20)
train_df, test_df = train_test_split(ncdb, test_size=0.2, random_state=21)
# Set unknown, data not provided by jurisdiction, and "other" values to missing
null_value = ["N", "NN", "NNNN", "Q", "QQ", "U", "UU", "UUUU", "X", "XX", "XXXX"]
train_df = train_df.replace(to_replace=null_value, value="missing")
test_df = test_df.replace(to_replace=null_value, value="missing")
# Create training and test set files
# Test if we have the given filepath in the directory, if not, create one.
output = opt["--output"]
try:
train_df.to_csv(f"{output}train.csv", index_label="index")
except:
os.makedirs(os.path.dirname(output))
train_df.to_csv(f"{output}train.csv", index_label="index")
try:
test_df.to_csv(f"{output}test.csv", index_label="index")
except:
os.makedirs(os.path.dirname(output))
test_df.to_csv(f"{output}test.csv", index_label="index")
# When reading train_df or test_df, must use .set_index("index").rename_axis(None)
if __name__ == "__main__":
main()
|
the-stack_0_20744 | # qubit number=4
# total number=40
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=19
prog.y(input_qubit[2]) # number=36
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.cx(input_qubit[0],input_qubit[3]) # number=14
prog.h(input_qubit[3]) # number=37
prog.cz(input_qubit[0],input_qubit[3]) # number=38
prog.h(input_qubit[3]) # number=39
prog.cx(input_qubit[0],input_qubit[3]) # number=28
prog.x(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=30
prog.cx(input_qubit[3],input_qubit[1]) # number=35
prog.y(input_qubit[2]) # number=34
prog.cx(input_qubit[0],input_qubit[3]) # number=27
prog.h(input_qubit[3]) # number=22
prog.cz(input_qubit[0],input_qubit[3]) # number=23
prog.h(input_qubit[3]) # number=24
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=18
prog.z(input_qubit[3]) # number=10
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.cx(input_qubit[3],input_qubit[0]) # number=31
prog.z(input_qubit[3]) # number=32
prog.cx(input_qubit[3],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2673.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_0_20748 | '''
This file represents a generator function.
A generator function when called creates a generator object.
However, it does not start running the function.
The function only executes on next()
Example:
>>> x = func(10)
>>> x
<generator object at 0x58490>
>>> x.next()
10
>>> x.next()
9
>>>
'''
import time
def watch_file(thefile):
"""A function to read and watch the last line of a file"""
# Go to the end of the file
thefile.seek(0, 2)
while True:
line = thefile.readline()
if not line:
# sleep briefly
time.sleep(0.1)
continue
yield line
# Practical example
logfile = open("logs")
for line in watch_file(logfile):
print (line),
|
the-stack_0_20750 | from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end]
self.__map = {}
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
min(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
if __name__ == '__main__':
d = OrderedDict([('foo',2),('bar',3),('baz',4),('zot',5),('arrgh',6)])
assert [x for x in d] == ['foo', 'bar', 'baz', 'zot', 'arrgh']
|
the-stack_0_20751 | """
Firebase Module | Cannlytics
Author: Keegan Skeate <[email protected]>
Created: 2/7/2021
Updated: 5/4/2021
Resources:
- https://firebase.google.com/docs/
Description:
A wrapper of firebase_admin to make interacting with the Firestore database
and Firebase Storage buckets even easier.
Example:
```py
import os
import environ
# Get and set all credentials.
env = environ.Env()
env.read_env('.env')
credentials = env('GOOGLE_APPLICATION_CREDENTIALS')
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credentials
bucket_name = environ.get('FIREBASE_STORAGE_BUCKET')
# Initialize Firebase
db = initialize_firebase()
```
"""
import ulid
from datetime import datetime
from os import listdir
from os.path import isfile, join
from re import sub, findall
from django.utils.crypto import get_random_string
from firebase_admin import auth, firestore, initialize_app, storage
try:
from google.cloud.firestore import ArrayUnion, ArrayRemove, Increment
from google.cloud.firestore_v1.collection import CollectionReference
except:
pass
try:
from pandas import notnull, read_csv, read_excel, DataFrame, Series
except:
# FIXME: pandas has problems with Django on Cloud Run
pass
# from uuid import uuid4
# ------------------------------------------------------------#
# Firestore
# ------------------------------------------------------------#
def add_to_array(ref, field, value):
"""Add an element to a given field for a given reference.
Args:
ref (str): A document reference.
field (str): A list field to create or update.
value (dynamic): The value to be added to the list.
"""
database = firestore.client()
doc = create_reference(database, ref)
doc.update({field: ArrayUnion([value])})
def create_document(ref, values):
"""Create a given document with given values, this leverages the
same functionality as `update_document` thanks to `set` with `merge=True`.
Args:
ref (str): A document reference.
values (str): A dictionary of values to update.
"""
update_document(ref, values)
def create_reference(database, path):
"""Create a database reference for a given path.
Args:
database (Firestore Client): The Firestore Client.
path (str): The path to the document or collection.
Returns:
(ref): Either a document or collection reference.
"""
ref = database
parts = path.split('/')
for i in range(len(parts)):
part = parts[i]
if i % 2:
ref = ref.document(part)
else:
ref = ref.collection(part)
return ref
def delete_collection(ref, batch_size=420):
"""Delete a given collection, a batch at a time.
Args:
ref (str): A document reference.
batch_size (int): The number of documents to delete at a time.
The default is 420 and the maximum is 500.
"""
database = firestore.client()
col = create_reference(database, ref)
docs = col.limit(batch_size).stream()
deleted = 0
for doc in docs:
doc.reference.delete()
deleted = deleted + 1
if deleted >= batch_size:
return delete_collection(col, batch_size)
def delete_document(ref):
"""Delete a given document.
Args:
ref (str): A document reference.
"""
database = firestore.client()
doc = create_reference(database, ref)
doc.delete()
def delete_field(ref, field):
"""Delete a given field from a document.
Args:
ref (str): A document reference.
"""
# FIXME:
# database = firestore.client()
# doc = create_reference(database, ref)
# update = {}
# update[field] = firestore.DELETE_FIELD
# doc.update(update)
raise NotImplementedError
def remove_from_array(ref, field, value):
"""Remove an element from a given field for a given reference.
Args:
ref (str): A document reference.
field (str): A list field to update.
value (dynamic): The value to be removed from the list.
"""
database = firestore.client()
doc = create_reference(database, ref)
doc.update({field: ArrayRemove([value])})
def increment_value(ref, field, amount=1):
"""Increment a given field for a given reference.
Args:
ref (str): A document reference.
field (str): A numeric field to create or update.
amount (int): The amount to increment, default 1.
"""
database = firestore.client()
doc = create_reference(database, ref)
doc.update({field: Increment(amount)})
def initialize_firebase():
"""Initialize Firebase, unless already initialized.
Returns:
(Firestore client): A Firestore database instance.
"""
try:
initialize_app()
except ValueError:
pass
return firestore.client()
def update_document(ref, values):
"""Update a given document with given values.
Args:
ref (str): A document reference.
values (str): A dictionary of values to update.
"""
database = firestore.client()
doc = create_reference(database, ref)
doc.set(values, merge=True)
def get_document(ref):
"""Get a given document.
Args:
ref (str): A document reference.
Returns:
(dict): Returns the document as a dictionary.
Returns an empty dictionary if no data is found.
"""
database = firestore.client()
doc = create_reference(database, ref)
data = doc.get()
if data is None:
return {}
else:
return data.to_dict()
def get_collection(ref, limit=None, order_by=None, desc=False, filters=[]):
"""Get documents from a collection.
Args:
ref (str): A document reference.
limit (int): The maximum number of documents to return. The default is no limit.
order_by (str): A field to order the documents by, with the default being none.
desc (bool): The direction to order the documents by the order_by field.
filters (list): Filters are dictionaries of the form
`{'key': '', 'operation': '', 'value': ''}`.
Filters apply [Firebase queries](https://firebase.google.com/docs/firestore/query-data/queries)
to the given `key` for the given `value`.
Operators include: `==`, `>=`, `<=`, `>`, `<`, `!=`,
`in`, `not_in`, `array_contains`, `array_contains_any`.
Returns:
(list): A list of documents.
"""
docs = []
database = firestore.client()
collection = create_reference(database, ref)
if filters:
for filter in filters:
collection = collection.where(
filter['key'], filter['operation'], filter['value']
)
if order_by and desc:
collection = collection.order_by(order_by, direction='DESCENDING')
elif order_by:
collection = collection.order_by(order_by)
if limit:
collection = collection.limit(limit)
query = collection.stream() # Only handles streams less than 2 mins.
for doc in query:
data = doc.to_dict()
docs.append(data)
return docs
def import_data(db, ref, data_file):
"""Import data into Firestore.
Args:
db (Firestore Client):
ref (str): A collection or document reference.
data_file (str): The path to the local data file to upload.
Wishlist
- Batch upload
- Handle types <https://hackersandslackers.com/importing-excel-dates-times-into-pandas/>
"""
try:
data = read_csv(
data_file,
header=0,
skip_blank_lines=True,
encoding='latin-1'
)
except:
try:
data = read_csv(data_file, sep=' ', header=None)
except:
try:
data = read_csv(
data_file,
header=0,
skip_blank_lines=True,
encoding='utf-16',
sep='\t',
)
except:
data = read_excel(data_file, header=0)
data.columns = map(snake_case, data.columns)
data = data.where(notnull(data), None)
data_ref = create_reference(db, ref)
if isinstance(data_ref, CollectionReference):
for index, values in data.iterrows():
doc_id = str(index)
doc_data = values.to_dict()
data_ref.document(doc_id).set(doc_data, merge=True)
else:
doc_data = data.to_dict(orient='index')
data_ref.set(doc_data, merge=True)
def export_data(db, ref, data_file):
"""Export data from Firestore.
Args:
db (Firestore Client):
ref (str): A collection or document reference.
data_file (str): The path to the local data file to upload.
Wishlist
- Parse fields that are objects into fields. E.g.
from pandas.io.json import json_normalize
artist_and_track = json_normalize(
data=tracks_response['tracks'],
record_path='artists',
meta=['id'],
record_prefix='sp_artist_',
meta_prefix='sp_track_',
sep='_'
)
"""
data_ref = create_reference(db, ref)
if isinstance(data_ref, CollectionReference):
data = []
docs = data_ref.stream()
for doc in docs:
doc_data = doc.to_dict()
doc_data['id'] = doc.id
data.append(doc_data)
output = DataFrame(data)
else:
doc = data_ref.get()
output = Series(doc.to_dict())
output.name = doc.id
if data_file.endswith('.csv'):
output.to_csv(data_file)
else:
output.to_excel(data_file)
def create_id():
"""Generate a universal ID."""
return ulid.new().str.lower()
def create_id_from_datetime(dt):
"""Create an ID from an existing datetime.
Args:
dt (datetime): The time to timestamp the ID.
"""
return ulid.from_timestamp(dt)
def get_id_timestamp(uid):
"""Get the datetime that an ID was created.
Args:
uid (str): A unique ID string.
"""
return ulid.from_str(uid).timestamp().datetime
# ------------------------------------------------------------#
# Authentication
# ------------------------------------------------------------#
def create_user(name, email, notification=True):
"""
Given user name and email, create an account.
If the email is already being used, then nothing is returned.
Args:
name (str): A name for the user.
email (str): The user's email.
notification (bool): Whether to notify the user.
Returns:
(tuple): User object, random password
"""
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$-_'
password = get_random_string(42, chars)
photo_url = f'https://robohash.org/{email}?set=set5'
try:
user = auth.create_user(
# uid=str(uuid4()),
uid=create_id(),
email=email,
email_verified=False,
password=password,
display_name=name,
photo_url=photo_url,
disabled=False,
)
return user, password
except:
return None, None
def create_custom_claims(uid, email=None, claims=None):
"""Create custom claims for a user to grant granular permission.
The new custom claims will propagate to the user's ID token the
next time a new one is issued.
Args:
uid (str): A user's ID.
email (str): A user's email.
claims (dict): A dictionary of the user's custom claims.
"""
if email:
user = auth.get_user_by_email(email)
uid = user.uid
auth.set_custom_user_claims(uid, claims)
def update_custom_claims(uid, email=None, claims=None):
"""Update custom claims for a user.
The new custom claims will propagate to the user's ID token the
next time a new one is issued.
Args:
uid (str): A user's ID.
email (str): A user's email.
claims (dict): A dictionary of the user's custom claims.
"""
if email:
user = auth.get_user_by_email(email)
uid = user.uid
existing_claims = get_custom_claims(uid)
if existing_claims:
existing_owner = existing_claims.get('owner', [])
else:
existing_claims = {}
existing_owner = []
current_owner = claims.get('owner', [])
claims['owner'] = list(set(existing_owner + current_owner))
auth.set_custom_user_claims(uid, {**existing_claims, **claims})
def get_custom_claims(name):
"""Get custom claims for a user.
Args:
name (str): A user ID or user email.
"""
user = get_user(name)
return user.custom_claims
def create_custom_token(uid='', email=None, claims=None):
"""Create a custom token for a given user, expires after one hour.
Args:
uid (str): A user's ID.
email (str): A user's email.
claims (dict): A dictionary of the user's claims.
"""
if email:
user = auth.get_user_by_email(email)
uid = user.uid
return auth.create_custom_token(uid, claims)
def verify_token(token):
"""Verify a user's custom token.
Args:
token (str): The custom token to authenticate a user.
"""
return auth.verify_id_token(token)
def get_user(name):
"""Get a user by user ID or by email.
Args:
name (str): A user ID, email, or phone number.
Returns:
(UserRecord): A Firebase user object.
"""
user = None
try:
user = auth.get_user(name)
except:
pass
if user is None:
try:
user = auth.get_user_by_email(name)
except:
pass
if user is None:
try:
user = auth.get_user_by_phone_number(name)
except:
pass
return user
def get_users():
"""Get all Firebase users.
Returns:
(list): A list of Firebase users.
"""
users = []
for user in auth.list_users().iterate_all():
users.append(user)
return users
def update_user(existing_user, data):
"""Update a user.
Args:
existing_user (Firebase user):
data (dict): The values of the user to update, which can include
email, phone_number, email_verified, diplay_name, photo_url,
and disabled.
"""
values = {}
fields = [
'email',
'phone_number',
'email_verified',
'display_name',
'photo_url',
'disabled',
]
for field in fields:
new_value = data.get(field)
if new_value:
values[field] = new_value
else:
values[field] = getattr(existing_user, field)
return auth.update_user(
existing_user.uid,
email=values['email'],
phone_number=values['phone_number'],
email_verified=values['email_verified'],
display_name=values['display_name'],
photo_url=values['photo_url'],
disabled=values['disabled'],
)
def delete_user(uid):
"""Delete a user from Firebase.
Args:
uid (str): A user's ID.
"""
auth.delete_user(uid)
# TODO: Create user secret
def create_user_secret(uid):
"""Delete a user from Firebase.
Args:
uid (str): A user's ID.
"""
raise NotImplementedError
# ------------------------------------------------------------#
# Secret Management
# 'Secret Manager Admin' permissions needed for service account.
# https://cloud.google.com/secret-manager/docs/creating-and-accessing-secrets
# ------------------------------------------------------------#
def create_secret(project_id, secret_id, secret):
"""Create a new secret with the given name. A secret is a logical wrapper
around a collection of secret versions. Secret versions hold the actual
secret material.
Args:
project_id (str): The project associated with the secret.
secret_id (str): An ID for the secret.
secret (str): The secret data.
"""
# Import the Secret Manager client library.
from google.cloud import secretmanager
# Create the Secret Manager client.
client = secretmanager.SecretManagerServiceClient()
# Build the resource name of the parent project.
parent = f'projects/{project_id}'
# Create the secret.
response = client.create_secret(parent, secret_id, {"replication": {"automatic": {}}})
# Return the new secret version name.
return response.name
def add_secret_version(project_id, secret_id, payload):
"""
Add a new secret version to the given secret with the provided payload.
A secret version contains the actual contents of a secret.
A secret version can be enabled, disabled, or destroyed.
To change the contents of a secret, you create a new version.
Adding a secret version requires the Secret Manager Admin role
(roles/secretmanager.admin) on the secret, project, folder, or organization.
Roles can't be granted on a secret version.
"""
# Import the Secret Manager client library.
from google.cloud import secretmanager
# Create the Secret Manager client.
client = secretmanager.SecretManagerServiceClient()
# Build the resource name of the parent secret.
# parent = client.secret_path(project_id, secret_id)
parent = f'projects/{project_id}/secrets/{secret_id}'
# Convert the string payload into a bytes. This step can be omitted if you
# pass in bytes instead of a str for the payload argument.
payload = payload.encode('UTF-8')
# Add the secret version.
response = client.add_secret_version(parent, {'data': payload})
# Return the new secret version name.
return response.name
def access_secret_version(project_id, secret_id, version_id):
"""
Access the payload for a given secret version if one exists. The version
can be a version number as a string (e.g. "5") or an alias (e.g. "latest").
"""
# Import the Secret Manager client library.
from google.cloud import secretmanager
# Create the Secret Manager client.
client = secretmanager.SecretManagerServiceClient()
# Build the resource name of the secret version.
name = f'projects/{project_id}/secrets/{secret_id}/versions/{version_id}'
# Access the secret version.
response = client.access_secret_version(name)
# Return the secret.
# WARNING: Do not print the secret in a production environment.
return response.payload.data.decode('UTF-8')
# def get_user_secret(uid):
# """Delete a user from Firebase.
# Args:
# uid (str): A user's ID.
# """
# raise NotImplementedError
# def update_user_secret(uid):
# """Delete a user from Firebase.
# Args:
# uid (str): A user's ID.
# """
# raise NotImplementedError
# def delete_user_secret(uid):
# """Delete a user from Firebase.
# Args:
# uid (str): A user's ID.
# """
# raise NotImplementedError
# Optional: Implement custom email.
# def send_password_reset(email):
# """Send a password reset to a user given an email."""
# link = auth.generate_password_reset_link(email)
# send_custom_email(email, link)
# ------------------------------------------------------------#
# Storage
# ------------------------------------------------------------#
def download_file(bucket_name, source_blob_name, destination_file_name, verbose=True):
"""Downloads a file from Firebase Storage.
Args:
bucket_name (str): The name of the storage bucket.
source_blob_name (str): The file name to upload.
destination_file_name (str): The destination file name.
verbose (bool): Whether or not to print status.
"""
bucket = storage.bucket(name=bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
if verbose:
print(
'Blob {} downloaded to {}.'.format(source_blob_name, destination_file_name)
)
def download_files(bucket_name, bucket_folder, local_folder, verbose=True):
"""Download all files in a given Firebase Storage folder.
Args:
bucket_name (str): The name of the storage bucket.
bucket_folder (str): A folder in the storage bucket.
local_folder (str): The local folder to download files.
verbose (bool): Whether or not to print status.
"""
bucket = storage.bucket(name=bucket_name)
file_list = list_files(bucket_name, bucket_folder)
for file in file_list:
blob = bucket.blob(file)
file_name = blob.name.split('/')[-1]
blob.download_to_filename(local_folder + '/' + file_name)
if verbose:
print(f'{file_name} downloaded from bucket.')
def upload_file(bucket_name, destination_blob_name, source_file_name, verbose=True):
"""Upload file to Firebase Storage.
Args:
bucket_name (str): The name of the storage bucket.
destination_blob_name (str): The name to save the file as.
source_file_name (str): The local file name.
verbose (bool): Whether or not to print status.
"""
bucket = storage.bucket(name=bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
if verbose:
print('File {} uploaded to {}.'.format(source_file_name, destination_blob_name))
def upload_files(bucket_name, bucket_folder, local_folder, verbose=True):
"""Upload multiple files to Firebase Storage.
Args:
bucket_name (str): The name of the storage bucket.
bucket_folder (str): A folder in the storage bucket to upload files.
local_folder (str): The local folder of files to upload.
verbose (bool): Whether or not to print status.
"""
bucket = storage.bucket(name=bucket_name)
files = [f for f in listdir(local_folder) if isfile(join(local_folder, f))]
for file in files:
local_file = join(local_folder, file)
blob = bucket.blob(bucket_folder + '/' + file)
blob.upload_from_filename(local_file)
if verbose:
print(f'Uploaded {len(files)} to "{bucket_folder}" bucket.')
def list_files(bucket_name, bucket_folder):
"""List all files in GCP bucket folder.
Args:
bucket_name (str): The name of the storage bucket.
bucket_folder (str): A folder in the storage bucket to list files.
"""
bucket = storage.bucket(name=bucket_name)
files = bucket.list_blobs(prefix=bucket_folder)
return [file.name for file in files if '.' in file.name]
def delete_file(bucket_name, bucket_folder, file_name, verbose=True):
"""Delete file from GCP bucket.
Args:
bucket_name (str): The name of the storage bucket.
bucket_folder (str): A folder in the storage bucket.
file_name (str): The name of the file to delete.
verbose (bool): Whether or not to print status.
"""
bucket = storage.bucket(name=bucket_name)
bucket.delete_blob(bucket_folder + '/' + file_name)
if verbose:
print(f'{file_name} deleted from bucket.')
def rename_file(bucket_name, bucket_folder, file_name, newfile_name, verbose=True):
"""Rename file in GCP bucket.
Args:
bucket_name (str): The name of the storage bucket.
bucket_folder (str): A folder in the storage bucket.
file_name (str): The name of the file to rename.
newfile_name (str): The new name for the file.
verbose (bool): Whether or not to print status.
"""
bucket = storage.bucket(name=bucket_name)
blob = bucket.blob(bucket_folder + '/' + file_name)
bucket.rename_blob(blob, new_name=newfile_name)
if verbose:
print(f'{file_name} renamed to {newfile_name}.')
# ------------------------------------------------------------#
# Misc
# ------------------------------------------------------------#
def create_log(ref, claims, action, log_type, key, changes=None):
"""Create an activity log.
Args:
ref (str): Path to a collection of logs.
claims (dict): A dict with user fields or a Firestore user object.
action (str): The activity that took place.
log_type (str): The log type.
key (str): A key to recognize the action.
changes (list): An optional list of changes that took place.
"""
now = datetime.now()
timestamp = datetime.now().isoformat()
log_id = now.strftime('%Y-%m-%d_%H-%M-%S')
log_entry = {
'action': action,
'type': log_type,
'key': key,
'created_at': timestamp,
'user': claims.get('uid'),
'user_name': claims.get('display_name'),
'user_email': claims.get('email'),
'user_photo_url': claims.get('photo_url'),
'changes': changes,
}
update_document(f'{ref}/{log_id}', log_entry)
def get_keywords(string):
"""Get keywords for a given string.
Args:
string (str): A string to get keywords for.
"""
keywords = string.lower().split(' ')
keywords = [x.strip() for x in keywords if x]
keywords = list(set(keywords))
return keywords
def snake_case(s):
"""Turn a given string to snake case.
Handles CamelCase, replaces known special characters with
preferred namespaces, replaces spaces with underscores,
and removes all other nuisance characters.
Args:
s (str): The string to turn to snake case.
Returns"
(str): A snake case string.
"""
clean_name = s.replace(' ', '_')
clean_name = clean_name.replace('&', 'and')
clean_name = clean_name.replace('%', 'percent')
clean_name = clean_name.replace('#', 'number')
clean_name = clean_name.replace('$', 'dollars')
clean_name = clean_name.replace('/', '_')
clean_name = clean_name.replace(r'\\', '_')
clean_name = sub('[!@#$%^&*()[]{};:,./<>?\|`~-=+]', ' ', clean_name)
words = findall(r'[A-Z]?[a-z]+|[A-Z]{2,}(?=[A-Z][a-z]|\d|\W|$)|\d+', clean_name)
return '_'.join(map(str.lower, words))
|
the-stack_0_20752 | from hwt.hdl.constants import DIRECTION
from hwt.hdl.types.defs import BIT
class NotSpecified(Exception):
"""
This error means that you need to implement this function to use this functionality
f.e. you have to implement Simulation agent for interface when you create new one and you can not use existing
"""
pass
def walkPhysInterfaces(intf):
if intf._interfaces:
for si in intf._interfaces:
yield from walkPhysInterfaces(si)
else:
yield intf
def walkParams(intf, discovered):
"""
walk parameter instances on this interface
"""
for si in intf._interfaces:
yield from walkParams(si, discovered)
for p in intf._params:
if p not in discovered:
discovered.add(p)
yield p
def connectPacked(srcPacked, dstInterface, exclude=None):
"""
Connect 1D vector signal to this structuralized interface
:param packedSrc: vector which should be connected
:param dstInterface: structuralized interface where should
packedSrc be connected to
:param exclude: sub interfaces of self which should be excluded
"""
offset = 0
connections = []
for i in reversed(list(walkPhysInterfaces(dstInterface))):
if exclude is not None and i in exclude:
continue
sig = i._sig
t = sig._dtype
if t == BIT:
s = srcPacked[offset]
offset += 1
else:
w = t.bit_length()
s = srcPacked[(w + offset): offset]
offset += w
connections.append(sig(s))
return connections
def walkFlatten(interface, shouldEnterIntfFn):
"""
:param shouldEnterIntfFn: function (actual interface)
returns tuple (shouldEnter, shouldYield)
"""
_shouldEnter, _shouldYield = shouldEnterIntfFn(interface)
if _shouldYield:
yield interface
if shouldEnterIntfFn:
for intf in interface._interfaces:
yield from walkFlatten(intf, shouldEnterIntfFn)
def packIntf(intf, masterDirEqTo=DIRECTION.OUT, exclude=None):
"""
Concatenate all signals to one big signal, recursively
:param masterDirEqTo: only signals with this direction are packed
:param exclude: sequence of signals/interfaces to exclude
"""
if not intf._interfaces:
if intf._masterDir == masterDirEqTo:
return intf._sig
return None
res = None
for i in intf._interfaces:
if exclude is not None and i in exclude:
continue
if i._interfaces:
if i._masterDir == DIRECTION.IN:
d = DIRECTION.opposite(masterDirEqTo)
else:
d = masterDirEqTo
s = i._pack(d, exclude=exclude)
else:
if i._masterDir == masterDirEqTo:
s = i._sig
else:
s = None
if s is not None:
if res is None:
res = s
else:
res = res._concat(s)
return res
|
the-stack_0_20753 | class Solution:
def displayTable(self, orders: List[List[str]]) -> List[List[str]]:
food_list = list()
table_f_dict = {}
for order in orders:
table_no = order[1]
food_item = order[2]
if not food_item in food_list:
food_list.append(food_item)
table_f_dict.setdefault(table_no, list()).append(food_item)
sorted_food_list = sorted(food_list)
ans = []
ans.append(['Table'] + sorted_food_list)
temp_list = []
d_keys = list(table_f_dict.keys())
sorted_d_key = sorted(d_keys, key=lambda x: int(x))
for table in sorted_d_key:
temp_list.append(table)
f_list = table_f_dict[table]
for f in sorted_food_list:
if f in f_list:
cnt_str = str(f_list.count(f))
temp_list.append(cnt_str)
else:
temp_list.append('0')
ans.append(temp_list)
temp_list = []
return ans |
the-stack_0_20755 | #!/usr/bin/env python3
import sys
import subprocess
import re
import os
import xml.etree.ElementTree as ET
from email.utils import parseaddr
import sh
import logging
import argparse
#from check_identity import verify_signed_off
if "ZEPHYR_BASE" not in os.environ:
logging.error("$ZEPHYR_BASE environment variable undefined.\n")
exit(1)
logger = None
DOCS_WARNING_FILE = "doc.warnings"
repository_path = os.environ['ZEPHYR_BASE']
sh_special_args = {
'_tty_out': False,
'_cwd': repository_path
}
# list_undef_kconfig_refs.py makes use of Kconfiglib
sys.path.append(os.path.join(repository_path, "scripts/kconfig"))
import list_undef_kconfig_refs
def init_logs():
global logger
log_lev = os.environ.get('LOG_LEVEL', None)
level = logging.INFO
if log_lev == "DEBUG":
level = logging.DEBUG
elif log_lev == "ERROR":
level = logging.ERROR
console = logging.StreamHandler()
format = logging.Formatter('%(levelname)-8s: %(message)s')
console.setFormatter(format)
logger = logging.getLogger('')
logger.addHandler(console)
logger.setLevel(level)
logging.debug("Log init completed")
def parse_args():
parser = argparse.ArgumentParser(
description="Check for coding style and documentation warnings.")
parser.add_argument('-c', '--commits', default=None,
help="Commit range in the form: a..b")
return parser.parse_args()
def get_shas(refspec):
sha_list = sh.git("rev-list",
'--max-count={0}'.format(-1 if "." in refspec else 1),
refspec, **sh_special_args).split()
return sha_list
def run_gitlint(tc, commit_range):
proc = subprocess.Popen('gitlint --commits %s' %(commit_range),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
msg = ""
if proc.wait() != 0:
msg = proc.stdout.read()
if msg != "":
failure = ET.SubElement(tc, 'failure', type="failure", message="commit message error on range: %s" %commit_range)
failure.text = (msg.decode('utf8'))
return 1
return 0
def run_checkpatch(tc, commit_range):
output = None
out = ""
diff = subprocess.Popen(('git', 'diff', '%s' %(commit_range)), stdout=subprocess.PIPE)
try:
output = subprocess.check_output(('%s/scripts/checkpatch.pl' %repository_path,
'--mailback', '--no-tree', '-'), stdin=diff.stdout,
stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as ex:
m = re.search("([1-9][0-9]*) errors,", ex.output.decode('utf8'))
if m:
failure = ET.SubElement(tc, 'failure', type="failure", message="checkpatch issues")
failure.text = (ex.output.decode('utf8'))
return 1
return 0
def run_kconfig_undef_ref_check(tc, commit_range):
# Parse the entire Kconfig tree, to make sure we see all symbols
os.environ["ENV_VAR_BOARD_DIR"] = "boards/*/*"
os.environ["ENV_VAR_ARCH"] = "*"
# Hack:
#
# When using 'srctree', Kconfiglib checks the current directory before
# checking the 'srctree' directory (only for compatibility with the C
# tools... this behavior is pretty bad for Kconfig files).
#
# This can cause problems for external projects that happen to run
# check-compliance.py from a directory that contains Kconfig files that
# overlap with Kconfig in the Zephyr tree (subsys/Kconfig would override
# the same file in the base Zephyr repo, etc.).
#
# Work around it by temporarily changing directory instead of using
# 'srctree'.
cur_dir = os.getcwd()
os.chdir(repository_path)
try:
# Returns the empty string if there are no references to undefined symbols
msg = list_undef_kconfig_refs.report()
if msg:
failure = ET.SubElement(tc, "failure", type="failure",
message="undefined Kconfig symbols")
failure.text = msg
return 1
return 0
finally:
# Restore working directory
os.chdir(cur_dir)
def verify_signed_off(tc, commit):
signed = []
author = ""
sha = ""
parsed_addr = None
for line in commit.split("\n"):
match = re.search("^commit\s([^\s]*)", line)
if match:
sha = match.group(1)
match = re.search("^Author:\s(.*)", line)
if match:
author = match.group(1)
parsed_addr = parseaddr(author)
match = re.search("signed-off-by:\s(.*)", line, re.IGNORECASE)
if match:
signed.append(match.group(1))
error1 = "%s: author email (%s) needs to match one of the signed-off-by entries." %(sha, author)
error2 = "%s: author email (%s) does not follow the syntax: First Last <email>." %(sha, author)
error = 0
failure = None
if author not in signed:
failure = ET.SubElement(tc, 'failure', type="failure", message="identity error")
failure.text = error1
error = 1
if not parsed_addr or len(parsed_addr[0].split(" ")) < 2:
if not failure:
failure = ET.SubElement(tc, 'failure', type="failure", message="identity error")
failure.text = error2
else:
failure.text = failure.text + "\n" + error2
error = 1
return error
def run_check_identity(tc, range):
error = 0
for f in get_shas(range):
commit = sh.git("log","--decorate=short", "-n 1", f, **sh_special_args)
error += verify_signed_off(tc, commit)
return error
def check_doc(tc, range):
if os.path.exists(DOCS_WARNING_FILE) and os.path.getsize(DOCS_WARNING_FILE) > 0:
with open(DOCS_WARNING_FILE, "rb") as f:
log = f.read()
failure = ET.SubElement(tc, 'failure', type="failure",
message="documentation issues")
failure.text = (log.decode('utf8'))
return 1
return 0
tests = {
"gitlint": {
"call": run_gitlint,
"name": "Commit message style",
},
"identity": {
"call": run_check_identity,
"name": "Author Identity verification",
},
"checkpatch": {
"call": run_checkpatch,
"name": "Code style check using checkpatch",
},
"checkkconfig": {
"call": run_kconfig_undef_ref_check,
"name": "Check Kconfig files for references to undefined symbols",
},
"documentation": {
"call": check_doc,
"name": "New warnings and errors when building documentation",
}
}
def run_tests(range):
run = "Commit Message / Documentation / Coding Style"
eleTestsuite = None
fails = 0
passes = 0
errors = 0
total = 0
filename = "compliance.xml"
eleTestsuites = ET.Element('testsuites')
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', name=run, time="0",
tests="%d" %(errors + passes + fails), failures="%d" %fails, errors="%d" %errors, skip="0")
for test in tests.keys():
total += 1
eleTestcase = ET.SubElement(eleTestsuite, 'testcase', classname="%s"
%(test), name="%s" %(tests[test]['name']), time="0")
fails += tests[test]['call'](eleTestcase, range)
eleTestsuite.set("tests", "%s" %total)
eleTestsuite.set("failures", "%s" %fails)
result = ET.tostring(eleTestsuites)
f = open(filename, 'wb')
f.write(result)
f.close()
return fails
def main():
args = parse_args()
if not args.commits:
exit(1)
fails = run_tests(args.commits)
print(fails)
sys.exit(fails)
if __name__ == "__main__":
#init_logs()
main()
|
the-stack_0_20756 | import logging
from typing import List
from dvc.exceptions import (
MetricDoesNotExistError,
NoMetricsFoundError,
NoMetricsParsedError,
)
from dvc.output import BaseOutput
from dvc.path_info import PathInfo
from dvc.repo import locked
from dvc.repo.collect import collect
from dvc.repo.live import summary_path_info
from dvc.scm.base import SCMError
from dvc.tree.repo import RepoTree
from dvc.utils.serialize import YAMLFileCorruptedError, load_yaml
logger = logging.getLogger(__name__)
def _is_metric(out: BaseOutput) -> bool:
return bool(out.metric) or bool(out.live)
def _to_path_infos(metrics: List[BaseOutput]) -> List[PathInfo]:
result = []
for out in metrics:
if out.metric:
result.append(out.path_info)
elif out.live:
path_info = summary_path_info(out)
if path_info:
result.append(path_info)
return result
def _collect_metrics(repo, targets, revision, recursive):
metrics, path_infos = collect(
repo,
targets=targets,
output_filter=_is_metric,
recursive=recursive,
rev=revision,
)
return _to_path_infos(metrics) + list(path_infos)
def _extract_metrics(metrics, path, rev):
if isinstance(metrics, (int, float)):
return metrics
if not isinstance(metrics, dict):
return None
ret = {}
for key, val in metrics.items():
m = _extract_metrics(val, path, rev)
if m not in (None, {}):
ret[key] = m
else:
logger.debug(
"Could not parse '%s' metric from '%s' at '%s' "
"due to its unsupported type: '%s'",
key,
path,
rev,
type(val).__name__,
)
return ret
def _read_metrics(repo, metrics, rev):
tree = RepoTree(repo)
res = {}
for metric in metrics:
if not tree.isfile(metric):
continue
try:
val = load_yaml(metric, tree=tree)
except (FileNotFoundError, YAMLFileCorruptedError):
logger.debug(
"failed to read '%s' on '%s'", metric, rev, exc_info=True
)
continue
val = _extract_metrics(val, metric, rev)
if val not in (None, {}):
res[str(metric)] = val
return res
@locked
def show(
repo,
targets=None,
all_branches=False,
all_tags=False,
recursive=False,
revs=None,
all_commits=False,
):
res = {}
metrics_found = False
for rev in repo.brancher(
revs=revs,
all_branches=all_branches,
all_tags=all_tags,
all_commits=all_commits,
):
metrics = _collect_metrics(repo, targets, rev, recursive)
if not metrics_found and metrics:
metrics_found = True
vals = _read_metrics(repo, metrics, rev)
if vals:
res[rev] = vals
if not res:
if metrics_found:
raise NoMetricsParsedError("metrics")
elif targets:
raise MetricDoesNotExistError(targets)
else:
raise NoMetricsFoundError("metrics", "-m/-M")
# Hide workspace metrics if they are the same as in the active branch
try:
active_branch = repo.scm.active_branch()
except (TypeError, SCMError):
# TypeError - detached head
# SCMError - no repo case
pass
else:
if res.get("workspace") == res.get(active_branch):
res.pop("workspace", None)
return res
|
the-stack_0_20759 | '''
Created on Jun 18, 2015
@author: hsorby
'''
from PySide2 import QtGui, QtWidgets
from mapclientplugins.hoofmeasurementstep.view.ui_hoofmeasurementwidget import Ui_HoofMeasurementWidget
from mapclientplugins.hoofmeasurementstep.scene.hoofmeasurementscene import HoofMeasurementScene
ANGLE_RANGE = 50
class HoofMeasurementWidget(QtWidgets.QWidget):
'''
classdocs
'''
def __init__(self, model, parent=None):
'''
Constructor
'''
super(HoofMeasurementWidget, self).__init__(parent)
self._ui = Ui_HoofMeasurementWidget()
self._ui.setupUi(self)
angle_initial_value = 0
slider_range = [0, 2 * ANGLE_RANGE]
slider_initial_value = ANGLE_RANGE
self._ui.lineEditAngle.setText(str(angle_initial_value))
self._ui.horizontalSliderAngle.setValue(slider_initial_value)
self._ui.horizontalSliderAngle.setMinimum(slider_range[0])
self._ui.horizontalSliderAngle.setMaximum(slider_range[1])
v = QtGui.QIntValidator(-ANGLE_RANGE, ANGLE_RANGE)
self._ui.lineEditAngle.setValidator(v)
self._ui.labelAngle.setText('Angle [{0}, {1}] (Degrees):'.format(-ANGLE_RANGE, ANGLE_RANGE))
self._callback = None
self._model = model
self._scene = HoofMeasurementScene(model)
self._ui.widgetZinc.setContext(model.getContext())
self._ui.widgetZinc.setModel(model.getMarkerModel())
self._ui.widgetZinc.setPlaneAngle(angle_initial_value)
# self._ui.widgetZinc.setSelectionfilter(model.getSelectionfilter())
self._makeConnections()
def _makeConnections(self):
self._ui.pushButtonContinue.clicked.connect(self._continueExecution)
self._ui.pushButtonViewAll.clicked.connect(self._viewAllButtonClicked)
self._ui.horizontalSliderAngle.valueChanged.connect(self._angleSliderValueChanged)
self._ui.widgetZinc.graphicsInitialized.connect(self._zincWidgetReady)
self._ui.pushButtonDeleteNode.clicked.connect(self._ui.widgetZinc.deleteSelectedNodes)
self._ui.lineEditAngle.returnPressed.connect(self._angleLineEditTextEditFinished)
def getLandmarks(self):
return self._model.getLandmarks()
def setCoordinateDescription(self, coordinate_description):
self._model.setCoordinateDescription(coordinate_description)
def load(self, file_location):
self._model.load(file_location)
def registerDoneExecution(self, done_exectution):
self._callback = done_exectution
def _zincWidgetReady(self):
self._ui.widgetZinc.setSelectionfilter(self._model.getSelectionfilter())
def _viewAllButtonClicked(self):
self._ui.widgetZinc.viewAll()
def _continueExecution(self):
self._callback()
def _angleSliderValueChanged(self, value):
angle = value - 50
self._ui.lineEditAngle.setText(str(angle))
self._model.setRotationAngle(angle)
self._ui.widgetZinc.setPlaneAngle(angle)
def _angleLineEditTextEditFinished(self):
angle = int(self._ui.lineEditAngle.text())
self._ui.horizontalSliderAngle.setValue(angle + ANGLE_RANGE)
self._model.setRotationAngle(angle)
self._ui.widgetZinc.setPlaneAngle(angle)
|
the-stack_0_20762 | """
Write and call functions that demonstrate both
default parameter values and pass by reference.
"""
import random
def main():
randnums = [16.2, 75.1, 52.3]
print(f"randnums {randnums}")
# Call the append_random_numbers function to
# add one random number to the randnums list.
append_random_numbers(randnums)
print(f"randnums {randnums}")
# Call the append_random_numbers function to add
# three random numbers to the randnums list.
append_random_numbers(randnums, 3)
print(f"randnums {randnums}")
# Create a list to store random words.
randwords = []
# Call the append_random_words function
# to add one random word to the list.
append_random_words(randwords)
print(f"randwords {randwords}")
# Call the append_random_words function
# to add five random words to the list.
append_random_words(randwords, 5)
print(f"randwords {randwords}")
def append_random_numbers(numbers_list, quantity=1):
"""Append quantity random numbers onto the numbers list.
The random numbers are between 0 and 100, inclusive.
Parameters
numbers_list: A list of numbers where this function will append
random numbers.
quantity: The number of random numbers that this function will
append onto numbers_list.
Return: nothing. It's unnecessary for this function to return
anything because this function changes the numbers_list.
"""
for _ in range(quantity):
random_number = random.uniform(0, 100)
rounded = round(random_number, 1)
numbers_list.append(rounded)
def append_random_words(words_list, quantity=1):
"""Append quantity randomly chosen words onto the words list.
Parameters
words_list: A list of words where this function will append
random words.
quantity: The number of random words that this function will
append onto words_list.
Return: nothing. It's unnecessary for this function to return
anything because this function changes the words_list.
"""
# A list of words to randomly choose from.
candidates = [
"arm", "car", "cloud", "head", "heal", "hydrogen", "jog",
"join", "laugh", "love", "sleep", "smile", "speak",
"sunshine", "toothbrush", "tree", "truth", "walk", "water"
]
# Randomly choose quantity words and append them onto words_list.
for _ in range(quantity):
word = random.choice(candidates)
words_list.append(word)
# If this file was executed like this:
# > python teach_solution.py
# then call the main function. However, if this file
# was simply imported, then skip the call to main.
if __name__ == "__main__":
main()
|
the-stack_0_20764 | import os
import numpy as np
import matplotlib
matplotlib.use('Agg') # or 'PS', 'PDF', 'SVG'
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
from argparse import ArgumentParser
from torch.utils.data import DataLoader
import torch
import cv2
from anomaly_detection.datasets.selfsupervised_images import SelfSupervisedDataset
SQ_SIZE=32
def correctColor(image):
image = np.transpose(image, (1, 2, 0))
image = np.flip(image, axis=2)
return image
def makeIndexValid(x_ind, y_ind, img_shape):
x_shape = img_shape[0]
y_shape = img_shape[1]
if x_ind[0] < 0:
x_ind -= x_ind[0]
if x_ind[1] > x_shape:
x_ind -= (x_ind[1]-x_shape)
if y_ind[0] < 0:
y_ind -= y_ind[0]
if y_ind[1] > y_shape:
y_ind -= (y_ind[1]-y_shape)
def saveImages(img_rgb, img_target, step):
img_rgb = correctColor(img_rgb)
img_target = correctColor(img_target)
cv2.imwrite(os.path.join(args.outdir, "{:05d}".format(step)+'_rgb.png'), img_rgb*255)
cv2.imwrite(os.path.join(args.outdir, "{:05d}".format(step)+'_target.png'), img_target*255)
def label(args):
dataset = SelfSupervisedDataset(args.datadir, file_format='csv', subsample=args.subsample, tensor_type='float')
loader = DataLoader(dataset, shuffle=False)
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
print('Create directory: ' + args.outdir)
n_steps = len(loader)
for step, (images, labels) in enumerate(loader):
print(str(step+1) + '/' + str(n_steps),end='\r')
img_rgb = images[0].squeeze().numpy()
img_target = np.zeros(img_rgb.shape)
label_mask = (labels!=0).squeeze().numpy()
foot_ind = np.where(label_mask)
# Make sure we have a foothold in the image.
if foot_ind[0].shape[0] == 0:
print('Encountered empty foothold mask')
for i in range(foot_ind[0].shape[0]):
indices = np.array([foot_ind[0][i], foot_ind[1][i]])
x_ind = np.array([int(indices[0]-SQ_SIZE/2), int(indices[0]+SQ_SIZE/2)])
y_ind = np.array([int(indices[1]-SQ_SIZE/2), int(indices[1]+SQ_SIZE/2)])
makeIndexValid(x_ind, y_ind, np.squeeze(img_rgb.shape[1:]))
patch = img_rgb[:, x_ind[0]:x_ind[1], y_ind[0]:y_ind[1]]
img_target[:, x_ind[0]:x_ind[1], y_ind[0]:y_ind[1]] = patch
img_rgb[:, label_mask] = 0.0
# Erode.
label_mask = cv2.erode(label_mask.astype(np.uint8)*255, np.ones([5,5], np.uint8),iterations=1).astype(np.bool)
img_rgb[:, label_mask] = 1.0
# Save everything in the appropriate format.
saveImages(img_rgb, img_target, step)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--datadir', required=True, help='Directory for dataset')
parser.add_argument('--outdir', required=True, help='Output directory for patches')
parser.add_argument('--subsample', type=int, default=1, help='Only use every nth image of the dataset')
args = parser.parse_args()
label(args)
|
the-stack_0_20765 | """
Purpose of this package is to populate named ranges
in Excel with NX measurement data from a JSON file.
Prior to use, the Excel document needs to have ranges named
and ready to populate, which takes some time to setup
the first time this is used.
xlwings requires that Excel be open in order to run this code.
"""
import datetime
import json
import logging
import logging.config
from pathlib import Path
from typing import List, Optional, Union
import xlwings as xw
# logging set-up
logging.config.fileConfig("logging.conf")
logger: logging.Logger = logging.getLogger(__name__)
########################
## XLWINGS INTERFACES ##
########################
def backup_workbook(workbook: xw.main.Book, backup_dir: str = ".") -> Path:
"""Create a backup copy of the workbook.
Returns the path of the backup copy."""
# TODO: Make more robust naming convention
# TODO: Verify M365 files are backing up correctly
print(f"Backuping up {workbook.name}...")
wb_name: str = workbook.name.split(".xlsx")[0]
backup_path = Path(f"{backup_dir}\\{wb_name}_BACKUP.xlsx")
# Open a new blank workbook
backup_wb: xw.main.Book = xw.Book()
# Copy sheets individually
for sheet in workbook.sheets:
sheet.copy(after=backup_wb.sheets[0])
# Delete the first blank sheet
backup_wb.sheets[0].delete()
# Save & close
backup_wb.save(path=backup_path)
backup_wb.close()
return backup_path
def dump(workbook: xw.main.Book, json_file: str) -> None:
"""Take data frome a dictionary of key-value pairs
that originated from a JSON file, and place it in Excel
in a new worksheet for easy access.
"""
sheet_name: str = "DATUM " + json_file.split("\\")[-1]
# get the data from the json_file
data = get_json_key_value_pairs(json_file)
if data is None:
logger.error("No key-value pairs in JSON file to dump.")
else:
# create a new worksheet
try:
workbook.sheets.add(sheet_name)
except ValueError: # sheet already exists
workbook.sheets[sheet_name].delete()
workbook.sheets.add(sheet_name)
# keep count of rows added
current_row: int = 1
# dump metadata
metadata = load_metadata_from_json(json_file)
if metadata is not None:
for key in metadata:
target_range: str = f"A{current_row}:B{current_row}"
workbook.sheets[sheet_name].range(target_range).value = [
key,
metadata[key],
]
current_row += 1
current_row += 1 # blank row
# create header row
target_range = f"A{current_row}:B{current_row}"
workbook.sheets[sheet_name].range(target_range).value = ["PARAMETER", "VALUE"]
current_row += 1
# add each key-value pair from the data
for index, key in enumerate(sorted(data)):
target_range = f"A{current_row + index}:B{current_row + index}"
workbook.sheets[sheet_name].range(target_range).value = list(
flatten_list([key, data[key]])
)
def get_workbook_key_value_pairs(workbook: xw.main.Book) -> Optional[dict]:
"""Find all named ranges in a workbook and return
a dictionary of name-value pairs."""
if len(workbook.names) == 0:
logger.error(f"workbook{workbook.name} has no named ranges.")
return None
# make a dict of named ranges, measurement names, and measurement types
workbook_named_ranges = dict()
for named_range in workbook.names:
# Sometimes Excel puts in hidden names that start
# with _xlfn. -- skip these
if named_range.name.startswith("_xlfn."):
logger.debug(f"Skipping range {named_range.name}")
continue
if "!#REF" in named_range.refers_to:
logger.error(f"Name {named_range.name} has a #REF! error.")
continue
workbook_named_ranges[named_range.name] = named_range.refers_to_range.value
return workbook_named_ranges
def load_metadata_from_json(json_file: str) -> Optional[dict]:
"""Loads 'METADATA' field from JSON file"""
try:
with open(json_file, "r") as json_handle:
json_metadata: dict = json.load(json_handle)
if check_dict_keys(json_metadata, ["METADATA"]):
return json_metadata["METADATA"]
else:
logger.debug(f'No "METADATA" in {json_file}')
return None
# TODO: Build JSON Validation function
except FileNotFoundError:
logger.debug(f"{json_file} not found.")
return None
except json.decoder.JSONDecodeError:
logger.error(f"JSON file {json_file} is corrupt.")
return None
def write_named_range(
workbook: xw.main.Book,
range_name: str,
new_value: Optional[Union[list, float, int, str, datetime.datetime]],
) -> Optional[Union[list, int, str, float, datetime.datetime]]:
"""Write a value or list to a named range.
Keyword arguments:
workbook -- xlwings Book object
range_name -- string with range name
new_value -- new value or list of values to write
"""
if range_name not in workbook.names:
raise KeyError(f"Name {range_name} not in {workbook.name}")
if "!#REF" in workbook.names[range_name].refers_to:
raise TypeError(f"Name {range_name} has a #REF! error.")
target_range: xw.main.Range = workbook.names[range_name].refers_to_range
if isinstance(new_value, list):
# Flatten any arbitrary list
new_value = list(flatten_list(new_value))
new_value_len: int = len(new_value)
if new_value_len > target_range.size:
# Truncate input if range size is too small
new_value = new_value[: target_range.size]
logger.warning(f"range {target_range.name} has size {target_range.size}.")
logger.warning(f"Vector of length {new_value_len} will be truncated.")
elif new_value_len < target_range.size:
# If range is too big, warn that not all cells will be populated
logger.warning(
f"Range {range_name} of size\
{target_range.size} is larger than required."
)
for index in range(min(target_range.size, new_value_len)):
if (
not isinstance(
new_value[index], (int, str, float, list, datetime.datetime)
)
and new_value[index] is not None
):
raise TypeError(f"Write {type(new_value[index])} not allowed.")
target_range[index].value = new_value[index]
return new_value
elif (
isinstance(new_value, (int, str, float, datetime.datetime)) or new_value is None
):
target_range.value = new_value
return new_value
else:
raise TypeError(f"Cannot write value of type {type(new_value)}")
#######################
###### UTILITIES ######
#######################
def check_dict_keys(target_dict: dict, keys_to_check: list) -> bool:
"""Check that a dictionary has the required keys.
Ensure keys are not empty lists. Warn if keys not found."""
if not isinstance(target_dict, dict):
logger.warning("Not a dictionary.")
return False
for key in keys_to_check:
if key not in target_dict.keys():
logger.warning(f'Key "{key}" not found.')
return False
if isinstance(target_dict[key], (list, dict)) and len(target_dict[key]) == 0:
logger.warning(f'Key "{key}" has no entires.')
return False
return True
def flatten_list(target_list: list):
"""Flatten any nested list."""
if not isinstance(target_list, list):
raise TypeError("Cannot flatten a non-list")
for element in target_list:
if isinstance(element, list):
for sub_element in flatten_list(element):
yield sub_element
else:
yield element
def report_difference(
old_value: Optional[Union[int, float, str, datetime.datetime]],
new_value: Optional[Union[int, float, str, datetime.datetime]],
) -> Optional[Union[float, datetime.timedelta]]:
"""Report the difference between any two instances of
int, float, str, date or None. Return None if no numerical
comparison can be made. Return percent difference for ints and floats.
Return timedelta for comparison of dates."""
if old_value == 0:
return None # otherwise divide by zero error
if isinstance(old_value, datetime.datetime) and isinstance(
new_value, datetime.datetime
):
return new_value - old_value
if isinstance(old_value, (int, float)) and isinstance(new_value, (int, float)):
return (new_value - old_value) / old_value
return None
########################
#### CORE FUNCTIONS ####
########################
def get_json_key_value_pairs(json_file: str) -> Optional[dict]:
"""Load JSON measurement dict from a JSON file."""
try:
with open(json_file, "r") as json_file_handle:
json_data: dict = json.load(json_file_handle)
# TODO: Build JSON Validation function
except FileNotFoundError:
logger.error(f"Unable to open {json_file}")
return None
except json.decoder.JSONDecodeError:
logger.error(f"JSON file {json_file} is corrupt.")
return None
if not check_dict_keys(json_data, ["measurements"]):
logger.warning(f'No "measurement" field in {json_file}')
return None
json_named_measurements: dict = dict()
for measurement in json_data["measurements"]:
if not check_dict_keys(measurement, ["name", "expressions"]):
logger.warning(f"{measurement} is missing name and/or expressions")
continue
# replace spaces with underscores - no spaces allowed in excel range names
measurement_name: str = measurement["name"].replace(" ", "_")
for expr in measurement["expressions"]:
if not check_dict_keys(expr, ["name", "type", "value"]):
logger.warning(f"missing name/type/value fields in {expr}")
continue
range_name: str = f"{measurement_name}.{expr['name']}"
if expr["type"] == "Point" or expr["type"] == "Vector":
vector: List[float] = []
for coordinate in ["x", "y", "z"]:
coordinate_name: str = f"{range_name}.{coordinate}"
json_named_measurements[coordinate_name] = expr["value"][coordinate]
vector.append(expr["value"][coordinate])
# TODO: Keep dicts as dicts, don't conver them to vectors.
# Requires update of write_named_range to handle dicts.
# json_named_measurements[range_name] = expr["value"]
json_named_measurements[range_name] = vector
elif expr["type"] == "List":
json_named_measurements[range_name] = expr["value"]
for index in range(3):
range_index = f"{range_name}.{index}"
json_named_measurements[range_index] = expr["value"][index]
else:
json_named_measurements[range_name] = expr["value"]
return json_named_measurements
def preview_named_range_update(
existing_values: dict, new_values: dict, min_diff: float = 0.001
) -> None:
"""Print out list of values that will be overwritten."""
column_widths = [36, 17, 17, 17]
column_headings = ["PARAMETER", "OLD VALUE", "NEW VALUE", "PERCENT CHANGE"]
underlines = ["-" * 20, "-" * 12, "-" * 12, "-" * 15]
print() # newline
print_columns(column_widths, column_headings)
print_columns(column_widths, underlines)
for range_name in new_values.keys():
json_value = new_values[range_name]
excel_value = existing_values[range_name]
if isinstance(json_value, (list, dict)):
for index, json_item in enumerate(json_value):
if isinstance(json_value, dict):
item_name = f"{range_name}[{json_item}]"
json_item = json_value[json_item]
else:
item_name = f"{range_name}[{index}]"
if isinstance(excel_value, list):
excel_item = excel_value[index]
elif index == 0:
excel_item = excel_value
else:
excel_item = None
difference = report_difference(excel_item, json_item)
if isinstance(difference, float) and abs(difference) < min_diff:
continue # @pytest-pass
print_columns(
column_widths, [item_name, excel_item, json_item, difference]
)
else:
difference = report_difference(excel_value, json_value)
if isinstance(difference, float) and abs(difference) < min_diff:
continue
print_columns(
column_widths, [range_name, excel_value, json_value, difference]
)
def print_columns(
widths: list, values: list, decimals: int = 3, na_string: str = "-"
) -> None:
if len(widths) != len(values):
raise IndexError("Mismatch of columns & values.")
if any([not isinstance(item, int) for item in widths]):
raise TypeError("Column widths must be integers")
alignments = ["<", ">", ">", ">"] # align left for first column
for column, value in enumerate(values):
if isinstance(value, float):
# use percentage on last column
if column == 3:
fspec = "%"
else:
fspec = "g"
print(
"{val:{al}{wid}.{prec}{fspec}}".format(
val=value,
al=alignments[column],
wid=widths[column],
prec=decimals,
fspec=fspec,
),
end="",
)
elif isinstance(value, str):
# truncate extra long strings
if len(value) > widths[column]:
num_chars = int(widths[column] / 2) - 3
value = value[:num_chars] + "..." + value[-num_chars:]
print(
"{val:{al}{wid}}".format(
val=value, al=alignments[column], wid=widths[column]
),
end="",
)
elif isinstance(value, datetime.datetime):
datestr = value.strftime("%Y-%m-%d")
print(
"{val:{al}{wid}}".format(val=datestr, al=">", wid=widths[column]),
end="",
)
elif isinstance(value, datetime.timedelta):
date_delta = f"{value.days} days"
print(
"{val:{al}{wid}}".format(val=date_delta, al=">", wid=widths[column]),
end="",
)
elif value is None:
print(
"{val:{al}{wid}}".format(val=na_string, al="^", wid=widths[column]),
end="",
)
print() # newline
def update_named_ranges(
source: Union[str, dict], target: xw.main.Book, backup: bool = False
) -> Optional[dict]:
"""
Open a JSON file and an excel file. Update the named
ranges in the excel file with the corresponding JSON values.
Named ranges may correspond to a particular expression type.
For example, the measurement SURFACE_SPHERICAL has an expression
of type "area", along with other expressions. To populate this in
Excel, we need to name the range "SURFACE_SPHERICAL.area"
"""
# Assume target is open excel worksheet
# TODO: Implement ability to take .xlsx file path as argument
target_data: Optional[dict] = get_workbook_key_value_pairs(target)
if not target_data:
print("No named ranges in Excel file.")
return None
# Check if source is json file
if isinstance(source, str) and source.lower().endswith(".json"):
source_data: Optional[dict] = get_json_key_value_pairs(source)
source_str: str = source
if not source_data:
print("No measurement data found in JSON file.")
return None
elif isinstance(source, dict):
source_data = source
source_str = "UNDO BUFFER"
# find range names that occur both in Excel and JSON
ranges_to_update: list = list(source_data.keys() & target_data.keys())
range_update_buffer: dict = dict()
range_undo_buffer: dict = dict()
for range in ranges_to_update:
range_update_buffer[range] = source_data[range]
range_undo_buffer[range] = target_data[range]
write_named_ranges(
range_undo_buffer, range_update_buffer, target, source_str, backup
)
return range_undo_buffer
def write_named_ranges(
exiting_values: dict,
new_values: dict,
workbook: xw.main.Book,
source_str: str,
backup: bool = False,
) -> None:
"""Update named ranges in a workbook from a dictionary."""
preview_named_range_update(exiting_values, new_values)
print("The values listed above will be overwritten.")
# TODO: Add argument to function to skip confirmation
overwrite_confirm: str = input("Enter 'y' to continue: ")
if overwrite_confirm == "y":
if backup:
backup_path: Path = backup_workbook(workbook)
logger.debug(f"Backed up to {backup_path}")
logger.debug(
f"Updating named ranges.\n\
Source: {source_str}\n\
Target: {workbook.fullname}"
)
for range in new_values.keys():
write_named_range(workbook, range, new_values[range])
else:
print("Aborted.")
|
the-stack_0_20766 | import matplotlib.pyplot as plt
import numpy as np
# データ生成
x = np.linspace(0, 10, 100)
y1 = np.sin(x)
y2 = np.cos(x)
# プロット領域(Figure, Axes)の初期化
plt.figure(figsize=(12, 8))
fig1=plt.subplot(131)
fig2=plt.subplot(132)
fig3=plt.subplot(133)
# 棒グラフの作成
fig1.bar([1,2,3],[3,4,5])
fig1.set_xlabel("x")
fig1.set_ylabel("y")
fig2.barh([0.5,1.5,2.5],[0.5,1,2])
fig2.set_xlabel("xbar")
fig2.set_ylabel("ybar")
fig2.set_xlim(0,3)
fig2.set_ylim(0,3)
fig3.scatter(y1, y2)
plt.xlabel("sin(x)")
plt.ylabel("cos(x)")
plt.xlim(-1.2,1.2)
plt.ylim(-1.5,1.5)
#fig3.set_xlabel("sin(x)")
#fig3.set_ylabel("cos(x)")
plt.show() |
the-stack_0_20767 | import numpy as np
def MonteCarlo_double(f, g, x0, x1, y0, y1, n):
"""
Monte Carlo integration of f over a domain g>=0, embedded
in a rectangle [x0,x1]x[y0,y1]. n^2 is the number of
random points.
"""
# Draw n**2 random points in the rectangle
x = np.random.uniform(x0, x1, n)
y = np.random.uniform(y0, y1, n)
# Compute sum of f values inside the integration domain
f_mean = 0
num_inside = 0 # number of x,y points inside domain (g>=0)
for i in range(len(x)):
for j in range(len(y)):
if g(x[i], y[j]) >= 0:
num_inside = num_inside + 1
f_mean = f_mean + f(x[i], y[j])
f_mean = f_mean/num_inside
area = num_inside/(n**2)*(x1 - x0)*(y1 - y0)
return area*f_mean
def test_MonteCarlo_double_rectangle_area():
"""Check the area of a rectangle."""
def g(x, y):
return (1 if (0 <= x <= 2 and 3 <= y <= 4.5) else -1)
x0 = 0; x1 = 3; y0 = 2; y1 = 5 # embedded rectangle
n = 1000
np.random.seed(8) # must fix the seed!
I_expected = 3.121092 # computed with this seed
I_computed = MonteCarlo_double(
lambda x, y: 1, g, x0, x1, y0, y1, n)
assert abs(I_expected - I_computed) < 1E-14
def test_MonteCarlo_double_circle_r():
"""Check the integral of r over a circle with radius 2."""
def g(x, y):
xc, yc = 0, 0 # center
R = 2 # radius
return R**2 - ((x-xc)**2 + (y-yc)**2)
# Exact: integral of r*r*dr over circle with radius R becomes
# 2*pi*1/3*R**3
import sympy
r = sympy.symbols('r')
I_exact = sympy.integrate(2*sympy.pi*r*r, (r, 0, 2))
print('Exact integral: {:g}'.format(I_exact.evalf()))
x0 = -2; x1 = 2; y0 = -2; y1 = 2
n = 1000
np.random.seed(6)
I_expected = 16.7970837117376384 # Computed with this seed
I_computed = MonteCarlo_double(
lambda x, y: np.sqrt(x**2 + y**2),
g, x0, x1, y0, y1, n)
print('MC approximation, {:d} samples: {:.16f}'\
.format(n**2, I_computed))
assert abs(I_expected - I_computed) < 1E-15
if __name__ == '__main__':
test_MonteCarlo_double_rectangle_area()
test_MonteCarlo_double_circle_r() |
the-stack_0_20768 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from matplotlib import rcParams
from matplotlib import rcParamsDefault
import numpy as np
def set_figure_settings(Figure_Type,**kwargs):
rcParams.update(rcParamsDefault)
params = {}
if Figure_Type == 'paper':
params = {'lines.linewidth': 2,
'lines.markersize': 5,
'legend.fontsize': 8,
'legend.borderpad': 0.2,
'legend.labelspacing': 0.2,
'legend.handletextpad' : 0.2,
'legend.borderaxespad' : 0.2,
'legend.scatterpoints' :1,
'xtick.labelsize' : 8,
'ytick.labelsize' : 8,
'axes.titlesize' : 8,
'axes.labelsize' : 8,
'figure.autolayout': True,
'font.family': 'Calibri',
'font.size': 8}
elif Figure_Type == 'presentation':
params = {'lines.linewidth' : 3,
'legend.handlelength' : 1.0,
'legend.handleheight' : 1.0,
'legend.fontsize': 16,
'legend.borderpad': 0.2,
'legend.labelspacing': 0.2,
'legend.handletextpad' : 0.2,
'legend.borderaxespad' : 0.2,
'legend.scatterpoints' :1,
'xtick.labelsize' : 16,
'ytick.labelsize' : 16,
'axes.titlesize' : 24,
'axes.labelsize' : 20,
'figure.autolayout': True,
'font.size': 16.0}
rcParams.update(params)
rcParams.update(kwargs) |
the-stack_0_20772 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in propms/__init__.py
from propms import __version__ as version
setup(
name='propms',
version=version,
description='PropMs',
author='Dexciss',
author_email='d',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
|
the-stack_0_20774 | # -*- coding: utf-8 -*-
import sys,os
father_path = os.path.join(os.getcwd())
print(father_path, "==father path==")
def find_bert(father_path):
if father_path.split("/")[-1] == "BERT":
return father_path
output_path = ""
for fi in os.listdir(father_path):
if fi == "BERT":
output_path = os.path.join(father_path, fi)
break
else:
if os.path.isdir(os.path.join(father_path, fi)):
find_bert(os.path.join(father_path, fi))
else:
continue
return output_path
bert_path = find_bert(father_path)
t2t_bert_path = os.path.join(bert_path, "t2t_bert")
sys.path.extend([bert_path, t2t_bert_path])
print(sys.path)
import tensorflow as tf
from distributed_single_sentence_classification import hvd_train_eval
from distributed_multitask import hvd_train_eval as multitask_hvd_train_eval
import horovod.tensorflow as hvd
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR)
flags.DEFINE_string("buckets", "", "oss buckets")
flags.DEFINE_string(
"config_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"init_checkpoint", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"vocab_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"label_id", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"max_length", 128,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"train_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"dev_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"model_output", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"epoch", 5,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"num_classes", 5,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"train_size", 1402171,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"batch_size", 32,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"model_type", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"if_shard", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"eval_size", 1000,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"opt_type", "ps_sync",
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"is_debug", "0",
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"run_type", "0",
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"distribution_strategy", "ParameterServerStrategy",
"distribution strategy"
)
flags.DEFINE_string(
"rule_model", "normal",
"distribution strategy"
)
flags.DEFINE_string(
"parse_type", "parse_single",
"the required num_gpus"
)
flags.DEFINE_string(
"profiler", "normal",
"the required num_gpus"
)
flags.DEFINE_string(
"train_op", "adam_weight_decay_exclude",
"the required num_gpus"
)
flags.DEFINE_integer(
"num_gpus", 4,
"the required num_gpus"
)
flags.DEFINE_string(
"running_type", "train",
"the required num_gpus"
)
flags.DEFINE_string(
"load_pretrained", "no",
"the required num_gpus"
)
flags.DEFINE_string(
"w2v_path", "",
"pretrained w2v"
)
flags.DEFINE_string(
"with_char", "no_char",
"pretrained w2v"
)
flags.DEFINE_string(
"input_target", "",
"the required num_gpus"
)
flags.DEFINE_string(
"decay", "no",
"pretrained w2v"
)
flags.DEFINE_string(
"warmup", "no",
"pretrained w2v"
)
flags.DEFINE_string(
"distillation", "normal",
"if apply distillation"
)
flags.DEFINE_float(
"temperature", 2.0,
"if apply distillation"
)
flags.DEFINE_float(
"distillation_ratio", 1.0,
"if apply distillation"
)
flags.DEFINE_integer(
"num_hidden_layers", 12,
"if apply distillation"
)
flags.DEFINE_string(
"task_type", "single_sentence_classification",
"if apply distillation"
)
flags.DEFINE_string(
"classifier", "order_classifier",
"if apply distillation"
)
flags.DEFINE_string(
"output_layer", "interaction",
"if apply distillation"
)
flags.DEFINE_integer(
"char_limit", 5,
"if apply distillation"
)
flags.DEFINE_string(
"mode", "single_task",
"if apply distillation"
)
flags.DEFINE_string(
"multi_task_type", "wsdm",
"if apply distillation"
)
flags.DEFINE_string(
"multi_task_config", "wsdm",
"if apply distillation"
)
def main(_):
print(FLAGS)
print(tf.__version__, "==tensorflow version==")
hvd.init()
init_checkpoint = os.path.join(FLAGS.buckets, FLAGS.init_checkpoint)
train_file = os.path.join(FLAGS.buckets, FLAGS.train_file)
dev_file = os.path.join(FLAGS.buckets, FLAGS.dev_file)
checkpoint_dir = os.path.join(FLAGS.buckets, FLAGS.model_output)
print(init_checkpoint, train_file, dev_file, checkpoint_dir)
worker_count = hvd.size()
task_index = hvd.local_rank()
is_chief = task_index == 0
print("==worker_count==", worker_count, "==local_rank==", task_index, "==is is_chief==", is_chief)
cluster = ""
target = ""
# FLAGS.config_file = os.path.join(FLAGS.buckets, FLAGS.config_file)
FLAGS.label_id = os.path.join(FLAGS.buckets, FLAGS.label_id)
if FLAGS.mode == "single_task":
train_eval_api = hvd_train_eval
elif FLAGS.mode == "multi_task":
train_eval_api = multitask_hvd_train_eval
if FLAGS.run_type == "sess":
train_eval_api.monitored_sess(
FLAGS=FLAGS,
worker_count=worker_count,
task_index=task_index,
cluster=cluster,
is_chief=is_chief,
target=target,
init_checkpoint=init_checkpoint,
train_file=train_file,
dev_file=dev_file,
checkpoint_dir=checkpoint_dir,
distribution_strategy=FLAGS.distribution_strategy,
rule_model=FLAGS.rule_model,
parse_type=FLAGS.parse_type,
train_op=FLAGS.train_op,
running_type=FLAGS.running_type,
input_target=FLAGS.input_target,
decay=FLAGS.decay,
warmup=FLAGS.warmup,
distillation=FLAGS.distillation,
temperature=FLAGS.temperature,
distillation_ratio=FLAGS.distillation_ratio)
elif FLAGS.run_type == "estimator":
train_eval_api.monitored_estimator(
FLAGS=FLAGS,
worker_count=worker_count,
task_index=task_index,
cluster=cluster,
is_chief=is_chief,
target=target,
init_checkpoint=init_checkpoint,
train_file=train_file,
dev_file=dev_file,
checkpoint_dir=checkpoint_dir,
distribution_strategy=FLAGS.distribution_strategy,
rule_model=FLAGS.rule_model,
parse_type=FLAGS.parse_type,
train_op=FLAGS.train_op,
running_type=FLAGS.running_type,
input_target=FLAGS.input_target,
decay=FLAGS.decay,
warmup=FLAGS.warmup,
distillation=FLAGS.distillation,
temperature=FLAGS.temperature,
distillation_ratio=FLAGS.distillation_ratio)
if __name__ == "__main__":
tf.app.run() |
the-stack_0_20780 | """Copyright 2014 Cyrus Dasadia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from webservice.models import Plugin
from webservice.forms import plugin_forms
@login_required(login_url='/login/')
def view_all_plugins(request):
render_vars = dict()
try:
render_vars['plugins'] = Plugin.objects.all()
except Plugin.DoesNotExist:
render_vars['plugins'] = None
return render_to_response('view_all_plugins.html', render_vars, context_instance=RequestContext(request))
@login_required(login_url='/login/')
def add_new_plugin(request):
if request.method == 'POST':
form = plugin_forms.PluginForm(request.POST)
if form.is_valid():
form.save()
return redirect('/plugins/')
else:
form = plugin_forms.PluginForm()
render_vars = dict()
render_vars['form'] = form
return render_to_response('generic_form.html', render_vars, context_instance=RequestContext(request))
@login_required(login_url='/login/')
def edit_plugin(request, plugin_id):
render_vars = dict()
plugin_object = get_object_or_404(Plugin, pk=plugin_id)
if request.method == "POST":
form = plugin_forms.PluginForm(request.POST)
if form.is_valid():
form.save(commit=False)
plugin_object.name = form.cleaned_data.get('name')
plugin_object.description = form.cleaned_data.get('description')
plugin_object.plugin_path = form.cleaned_data.get('plugin_path')
plugin_object.status = form.cleaned_data.get('status')
plugin_object.accessible_by = form.cleaned_data.get('accessible_by')
plugin_object.save()
return redirect('/plugins/')
else:
form = plugin_forms.PluginForm(instance=plugin_object)
render_vars['form'] = form
return render_to_response('generic_form.html', render_vars, context_instance=RequestContext(request)) |
the-stack_0_20782 | def binary(bn):
i=0
sum=0
while(bn!=0):
ld = bn%10
bn = bn//10
sum=ld*pow(2,i)+sum
i+=1
if(sum%3==0):
return(1)
else:
return(0)
n = int(input())
for i in range(n):
bn= int(input())
result = binary(bn)
print(result)
|
the-stack_0_20783 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.run.default_configuration import get_default_configuration
from nnunet.paths import default_plans_identifier
from nnunet.training.cascade_stuff.predict_next_stage import predict_next_stage
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerCascadeFullRes import nnUNetTrainerCascadeFullRes
from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name
def main():
parser = argparse.ArgumentParser()
parser.add_argument("network")
parser.add_argument("network_trainer")
parser.add_argument("task", help="can be task name or task id")
parser.add_argument("fold", help='0, 1, ..., 5 or \'all\'')
parser.add_argument("-val", "--validation_only", help="use this if you want to only run the validation",
action="store_true")
parser.add_argument("-c", "--continue_training", help="use this if you want to continue a training",
action="store_true")
parser.add_argument("-p", help="plans identifier. Only change this if you created a custom experiment planner",
default=default_plans_identifier, required=False)
parser.add_argument("--use_compressed_data", default=False, action="store_true",
help="If you set use_compressed_data, the training cases will not be decompressed. Reading compressed data "
"is much more CPU and RAM intensive and should only be used if you know what you are "
"doing", required=False)
parser.add_argument("--deterministic",
help="Makes training deterministic, but reduces training speed substantially. I (Fabian) think "
"this is not necessary. Deterministic training will make you overfit to some random seed. "
"Don't use that.",
required=False, default=False, action="store_true")
parser.add_argument("-gpus", help="number of gpus", required=True,type=int)
parser.add_argument("--dbs", required=False, default=False, action="store_true", help="distribute batch size. If "
"True then whatever "
"batch_size is in plans will "
"be distributed over DDP "
"models, if False then each "
"model will have batch_size "
"for a total of "
"GPUs*batch_size")
parser.add_argument("--npz", required=False, default=False, action="store_true", help="if set then nnUNet will "
"export npz files of "
"predicted segmentations "
"in the vlaidation as well. "
"This is needed to run the "
"ensembling step so unless "
"you are developing nnUNet "
"you should enable this")
parser.add_argument("--valbest", required=False, default=False, action="store_true", help="")
parser.add_argument("--find_lr", required=False, default=False, action="store_true", help="")
parser.add_argument("--fp32", required=False, default=False, action="store_true",
help="disable mixed precision training and run old school fp32")
parser.add_argument("--val_folder", required=False, default="validation_raw",
help="name of the validation folder. No need to use this for most people")
parser.add_argument("--disable_saving", required=False, action='store_true',
help="If set nnU-Net will not save any parameter files. Useful for development when you are "
"only interested in the results and want to save some disk space")
# parser.add_argument("--interp_order", required=False, default=3, type=int,
# help="order of interpolation for segmentations. Testing purpose only. Hands off")
# parser.add_argument("--interp_order_z", required=False, default=0, type=int,
# help="order of interpolation along z if z is resampled separately. Testing purpose only. "
# "Hands off")
# parser.add_argument("--force_separate_z", required=False, default="None", type=str,
# help="force_separate_z resampling. Can be None, True or False. Testing purpose only. Hands off")
args = parser.parse_args()
task = args.task
fold = args.fold
network = args.network
network_trainer = args.network_trainer
validation_only = args.validation_only
plans_identifier = args.p
use_compressed_data = args.use_compressed_data
decompress_data = not use_compressed_data
deterministic = args.deterministic
valbest = args.valbest
find_lr = args.find_lr
num_gpus = args.gpus
fp32 = args.fp32
val_folder = args.val_folder
# interp_order = args.interp_order
# interp_order_z = args.interp_order_z
# force_separate_z = args.force_separate_z
if not task.startswith("Task"):
task_id = int(task)
task = convert_id_to_task_name(task_id)
if fold == 'all':
pass
else:
fold = int(fold)
# if force_separate_z == "None":
# force_separate_z = None
# elif force_separate_z == "False":
# force_separate_z = False
# elif force_separate_z == "True":
# force_separate_z = True
# else:
# raise ValueError("force_separate_z must be None, True or False. Given: %s" % force_separate_z)
plans_file, output_folder_name, dataset_directory, batch_dice, stage, \
trainer_class = get_default_configuration(network, task, network_trainer, plans_identifier)
if trainer_class is None:
raise RuntimeError("Could not find trainer class")
if network == "3d_cascade_fullres":
assert issubclass(trainer_class, nnUNetTrainerCascadeFullRes), "If running 3d_cascade_fullres then your " \
"trainer class must be derived from " \
"nnUNetTrainerCascadeFullRes"
else:
assert issubclass(trainer_class, nnUNetTrainer), "network_trainer was found but is not derived from " \
"nnUNetTrainer"
trainer = trainer_class(plans_file, fold, output_folder=output_folder_name,
dataset_directory=dataset_directory, batch_dice=batch_dice, stage=stage,
unpack_data=decompress_data, deterministic=deterministic,
distribute_batch_size=args.dbs, num_gpus=num_gpus, fp16=not fp32)
if args.disable_saving:
trainer.save_latest_only = False # if false it will not store/overwrite _latest but separate files each
trainer.save_intermediate_checkpoints = False # whether or not to save checkpoint_latest
trainer.save_best_checkpoint = False # whether or not to save the best checkpoint according to self.best_val_eval_criterion_MA
trainer.save_final_checkpoint = False # whether or not to save the final checkpoint
trainer.initialize(not validation_only)
if find_lr:
trainer.find_lr()
else:
if not validation_only:
if args.continue_training:
trainer.load_latest_checkpoint()
trainer.run_training()
else:
if valbest:
trainer.load_best_checkpoint(train=False)
else:
trainer.load_latest_checkpoint(train=False)
trainer.network.eval()
# predict validation
trainer.validate(save_softmax=args.npz, validation_folder_name=val_folder)
if network == '3d_lowres':
print("predicting segmentations for the next stage of the cascade")
predict_next_stage(trainer, join(dataset_directory, trainer.plans['data_identifier'] + "_stage%d" % 1))
if __name__ == "__main__":
main()
|
the-stack_0_20784 | import torch
import torch.nn as nn
import torch.nn.init as init
from model.SKBlock import SKBlock
def tconv(planes=1, rate=2):
if rate%2==0:
return nn.ConvTranspose2d(planes, planes, kernel_size=int(4*rate//2), stride=rate, padding=rate//2, bias=False)
else:
return nn.ConvTranspose2d(planes, planes, kernel_size=int(4*rate//2), stride=rate, padding=rate//2, bias=False)
class Net(nn.Module):
def __init__(self, blocks, rate):
super(Net, self).__init__()
# self.convt_I1 = nn.ConvTranspose2d(in_channels=1, out_channels=1, kernel_size=4, stride=2, padding=1, bias=False)
self.convt_I1 = tconv(planes=1, rate=rate)
self.conv_input = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.LeakyReLU()
# self.convt_F1 = self.make_layer(SrSEBlock(64), blocks)
self.convt_F1 = self._make_layer(SKBlock(64), blocks)
# self.Transpose = nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=4, stride=2, padding=1, bias=False)
self.Transpose = tconv(planes=64, rate=rate)
self.relu_transpose = nn.LeakyReLU(0.2, inplace=True)
self.convt_R1 = nn.Conv2d(in_channels=64, out_channels=1, kernel_size=3, stride=1, padding=1, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.orthogonal(m.weight)
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
init.orthogonal(m.weight)
if m.bias is not None:
m.bias.data.zero_()
def _make_layer(self, block, blocks):
layers = []
for _ in range(blocks):
layers.append(block)
return nn.Sequential(*layers)
def forward(self, x):
"""放大LR"""
convt_I1 = self.convt_I1(x)
"""利用SE-ResNet进行特征提取"""
out = self.relu(self.conv_input(x))
convt_F1 = self.convt_F1(out)
"""放大提取到的feature map"""
convt_out = self.relu_transpose(self.Transpose(convt_F1))
convt_R1 = self.convt_R1(convt_out)
HR = convt_I1 + convt_R1
return HR
class L1_Charbonnier_loss(nn.Module):
"""L1 Charbonnierloss."""
def __init__(self):
super(L1_Charbonnier_loss, self).__init__()
self.eps = 1e-6
def forward(self, X, Y):
diff = torch.add(X, -Y)
error = torch.sqrt(diff * diff + self.eps)
loss = torch.sum(error)
# print(loss.data[0])
return loss
|
the-stack_0_20787 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from Minimiza.LeituraEscrita import *
class Par: #representa um par da tabela de minimizacao
idPar = None
e1 = None #estado 1
e2 = None #estado 2
valido = None #se os estados desse par sao considerados iguais
dependentes = None #os pares dependentes desse par
motivo = None #o motivo de nao serem iguais
def __init__(self, idPar, e1, e2):
self.idPar = idPar
self.e1 = e1
self.e2 = e2
if(e1.final == e2.final): #checa se os dois sao, ao mesmo tempo, finais ou nao-finais
self.valido = True
self.motivo = ""
else: #se nao forem, considera-os diferentes e adiciona o motivo
self.valido = False
self.motivo = "final/nao-final"
self.dependentes = []
def getValidacao(self): #converte True para 1 e False para 0
if(self.valido == False): #em python isso nao e automatico
return 1
return 0
class Automato:
estados = None #lista de estados do automato
estadosDic = None #dicionario que referencia os estados
alfabeto = None
inicial = None
finais = None #dicionario de estados finais
#gera o automato minimizado
def novoAutomato(self):
novoAutomato = Automato()
for estado in self.estados:
if not(estado.usado): #Se o estado nao foi usado, insere-o no novo Automato
#Se houver estados equivalentes, une-os em um único
if(len(estado.equivalentes) >0): #OBS: Equivalentes sao adicionados durante a geracao da tabela
novoEstado = Estado()
novoEstado.final = estado.final #Se estados finais foram unidos, gera um estado tambem final, senao gera um nao-final
novoAutomato.estados.append(novoEstado)
estado.estadoNovo = novoEstado #O antigo estado referencia o novo estado minimizado
nome = "q" + estado.idEstado
novoEstado.equivalentes.append(estado) #Adiciona ao novo, os estados originais
ini = (estado.idEstado == self.inicial) #Se algum dos estados antigos for inicial, o novo estado tambem o é
estado.usado = True #Indica que o estado foi usado
#Faz-se o mesmo para cada equivalente
for equivalente in estado.equivalentes:
novoEstado.equivalentes.append(equivalente)
equivalente.usado = True
nome += "q" + equivalente.idEstado
equivalente.estadoNovo = novoEstado
if(equivalente.idEstado == self.inicial):
ini = True
novoEstado.idEstado = nome #O nome do novo estado é a concatenação dos nomes dos antigos
if(ini):
novoAutomato.inicial = novoEstado.idEstado
#Se nao houver estados equivalentes, cria um novo estado igual ao antigo
else:
novoEstado = Estado()
novoEstado.final = estado.final
novoAutomato.estados.append(novoEstado)
estado.estadoNovo = novoEstado
nome = "q" + estado.idEstado
novoEstado.idEstado = nome
novoEstado.equivalentes.append(estado)
if(estado.idEstado == self.inicial):
novoAutomato.inicial = novoEstado.idEstado
novoAutomato.estadosDic = novoAutomato.criaDicionario(novoAutomato.estados)
for i in range(len(self.estados)): #Considera todos como nao usados novamente
self.estados[i].usado = False
#Pega as transições dos estados antigos e adiciona no novo automato
for estado in novoAutomato.estados:
equivalente = estado.equivalentes[0] #Pega o equivalente desse estado no automato antigo
for transicao in equivalente.transicoes:
if (transicao.origem.estadoNovo == None): #Se o estado nao foi minimizado, a origem se mantem
e1 = "q" + transicao.origem.idEstado
else: #Senao, a origem se torna o estado minimizado
e1 = transicao.origem.estadoNovo.idEstado
#O mesmo acontece para o destino da transiçao
if (transicao.destino.estadoNovo == None):
e2 = "q" + transicao.destino.idEstado
else:
e2 = transicao.destino.estadoNovo.idEstado
#Adiciona a transiçao no estado do automato novo
e1 = novoAutomato.estadosDic[e1]
e2 = novoAutomato.estadosDic[e2]
novaTransicao = Transicao(e1, transicao.letra ,e2)
if(novaTransicao not in e1.transicoes):
e1.transicoes.append(novaTransicao)
novoAutomato.alfabeto = self.alfabeto
return novoAutomato
def renomeia(self):
i = 0
for estado in self.estados:
print(estado.idEstado + "+++++++++++++++++++")
if(estado.idEstado.find("qERRO") < 0):
if(estado.idEstado == self.inicial):
self.inicial = "q" + str(i)
estado.idEstado = "q" + str(i)
i += 1
else:
estado.idEstado = "qERRO"
print("EUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUuu")
#cria um dicionario de estados para o automato
def criaDicionario(self, estados):
estadosDic = {}
for i in range(len(estados)):
estadosDic[estados[i].idEstado] = estados[i]
return estadosDic
def __init__(self, nomeArq = None):
if(nomeArq == None): #se nao for passado o nome do arquivo por parametro, cria-se um automato vazio
self.estados = []
self.estadosDic = {}
self.alfabeto = []
self.finais = {}
return
arquivo = Arquivo(nomeArq, 'r') #instancia um objeto da classe arquivo (no arquivo LeituraEscrita)
#operacoes de leitura do arquivo
self.estados = arquivo.leEstados()
self.estadosDic = self.criaDicionario(self.estados)
self.alfabeto = arquivo.leAlfabeto()
arquivo.leTransicoes(self)
self.inicial = arquivo.leInicial()
self.finais = arquivo.leFinais()
#marca os estados finais
for i in range(len(self.estados)):
if(self.estados[i].idEstado in self.finais):
self.estados[i].final = True
#funcao de minimizacao
def minimiza(self, arqTabela, arqMin):
#~ arquivoTabela = open(arqTabela, 'w') #instancia um arquivo para escrever a tabela
tabela = Tabela() #instancia o objeto que monta a tabela
#cria os pares da tabela de minimizacao
idPar = 0
for i in range(len(self.estados)):
for j in range(i+1, len(self.estados)):
par = Par(idPar, self.estados[i], self.estados[j])
idPar += 1
tabela.pares.append(par)
tabela.minimiza(self) #aplica o algoritmo de minimizacao na tabela
#~ tabela.imprimeTabela(arqTabela) #escreve a tabela no arquivo
#tabela.imprimeTabela("FODACE")
escritor = Arquivo(arqMin, 'w') #instancia o objeto para escrever o automato no arquivo
escritor.escreveMinimizado(self) #escreve o novo automato no arquivo
class Tabela:
pares = None
def __init__(self):
self.pares = []
#propaga recursivamente o resultado de um par de estados
def propaga(self, par, paresPropagados):
#se houver dependentes, para cada dependente propaga o resultado
if(len(par.dependentes) > 0):
for dep in par.dependentes:
if(dep not in paresPropagados):
paresPropagados[dep] = dep
dep.valido = False
dep.motivo = "prop[" + par.e1.idEstado + "," + par.e2.idEstado + "]"
self.propaga(dep, paresPropagados) #propaga tambem para os dependentes desse dependente
#aplica o algoritmo de minimizacao na tabela
def minimiza(self, automato):
for par in self.pares:
for transicao1 in par.e1.transicoes:
for transicao2 in par.e2.transicoes:
#se o par ainda for considerado igual devem ser testadas as transicoes desse par
if(par.valido and transicao1.letra == transicao2.letra):
#se uma das transicoes tiver como destino um estado final e a outra nao, considera diferentes e propaga para seus dependentes
if((transicao1.destino.idEstado in automato.finais) != (transicao2.destino.idEstado in automato.finais)):
par.valido = False
par.motivo = transicao1.letra + "[" + transicao2.destino.idEstado + "," + transicao1.destino.idEstado + "]"
self.propaga(par, {par : par})
#se nao,##SILVERAAA, explica isso#
else:
if(transicao1.origem != transicao1.destino or transicao2.origem != transicao2.destino):
for p in self.pares:
if((p.e1.idEstado == transicao1.destino.idEstado and p.e2.idEstado == transicao2.destino.idEstado) or
(p.e2.idEstado == transicao1.destino.idEstado and p.e1.idEstado == transicao2.destino.idEstado)):
achou = False
for i in range(len(p.dependentes)):
if(p.dependentes[i].idPar == par.idPar):
achou = True
if(not achou):
p.dependentes.append(par)
#~ else:
#~ if(transicao1.origem == transicao1.destino and transicao2.origem != transicao2.destino):
#~ for p in self.pares:
#~ if(p.e1.idEstado == transicao1.origem.idEstado and p.e2.idEstado == transicao)
#~ if(transicao1.origem != transicao1.destino and transicao2.origem == transicao2.destino):
#adiciona, ao estado, os estados iguais a ele
for par in self.pares:
if(par.valido):
par.e1.equivalentes.append(par.e2)
par.e2.equivalentes.append(par.e1)
#formata o texto e escreve no arquivo
def imprimeTabela(self, arquivo):
saida = ("INDICE \t \t" + "D[i,j] = \t " + "S[i,j] = \t \t" + "MOTIVO\n")
for par in self.pares:
dep = "{ "
if(par.dependentes != None):
for i in range(len(par.dependentes)):
dep += "[" + par.dependentes[i].e1.idEstado + ", " + par.dependentes[i].e2.idEstado + "]"
if(i < len(par.dependentes) - 1):
dep += ","
dep += " }"
saida += ("[" + str(par.e1.idEstado) + "," + str(par.e2.idEstado)
+ "] \t\t" + str(par.getValidacao()) + "\t \t" + str(dep) + "\t \t \t" + str(par.motivo) + "\n")
print(saida)
#~ arquivo.write(saida)
|
the-stack_0_20788 | #!/usr/bin/env python3
#
# Copyright (c) 2018 Paul Melis
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import apsw
KNOWN_ACCOUNTS = {
'xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3': 'Genesis',
'xrb_13ezf4od79h1tgj9aiu4djzcmmguendtjfuhwfukhuucboua8cpoihmh8byo': 'Landing',
'xrb_35jjmmmh81kydepzeuf9oec8hzkay7msr6yxagzxpcht7thwa5bus5tomgz9': 'Faucet',
'xrb_1111111111111111111111111111111111111111111111111111hifc8npp': 'Burn',
'xrb_3wm37qz19zhei7nzscjcopbrbnnachs4p1gnwo5oroi3qonw6inwgoeuufdp': 'Developer Donations',
'xrb_1ipx847tk8o46pwxt5qjdbncjqcbwcc1rrmqnkztrfjy5k7z4imsrata9est': 'Developer Fund',
'xrb_3arg3asgtigae3xckabaaewkx3bzsh7nwz7jkmjos79ihyaxwphhm6qgjps4': 'Official representative #1',
'xrb_1stofnrxuz3cai7ze75o174bpm7scwj9jn3nxsn8ntzg784jf1gzn1jjdkou': 'Official representative #2',
'xrb_1q3hqecaw15cjt7thbtxu3pbzr1eihtzzpzxguoc37bj1wc5ffoh7w74gi6p': 'Official representative #3',
'xrb_3dmtrrws3pocycmbqwawk6xs7446qxa36fcncush4s1pejk16ksbmakis78m': 'Official representative #4',
'xrb_3hd4ezdgsp15iemx7h81in7xz5tpxi43b6b41zn3qmwiuypankocw3awes5k': 'Official representative #5',
'xrb_1awsn43we17c1oshdru4azeqjz9wii41dy8npubm4rg11so7dx3jtqgoeahy': 'Official representative #6',
'xrb_1anrzcuwe64rwxzcco8dkhpyxpi8kd7zsjc1oeimpc3ppca4mrjtwnqposrs': 'Official representative #7',
'xrb_1hza3f7wiiqa7ig3jczyxj5yo86yegcmqk3criaz838j91sxcckpfhbhhra1': 'Official representative #8',
'xrb_3wu7h5in34ntmbiremyxtszx7ufgkceb3jx8orkuncyytcxwzrawuf3dy3sh': 'NanoWalletBot',
'xrb_16k5pimotz9zehjk795wa4qcx54mtusk8hc5mdsjgy57gnhbj3hj6zaib4ic': 'NanoWalletBot representative',
'xrb_39ymww61tksoddjh1e43mprw5r8uu1318it9z3agm7e6f96kg4ndqg9tuds4': 'BitGrail Representative 1',
'xrb_31a51k53fdzam7bhrgi4b67py9o7wp33rec1hi7k6z1wsgh8oagqs7bui9p1': 'BitGrail Representative 2',
'xrb_3decyj8e1kpzrthikh79x6dwhn8ei81grennibmt43mcm9o8fgxqd8t46whj': 'Mercatox Representative',
'xrb_369dmjiipkuwar1zxxiuixaqq1kfmyp9rwsttksxdbf8zi3qwit1kxiujpdo': 'RaiBlocks Community',
'xrb_1nanexadj9takfo4ja958st8oasuosi9tf8ur4hwkmh6dtxfugmmii5d8uho': 'Nanex.co Representative',
'xrb_1niabkx3gbxit5j5yyqcpas71dkffggbr6zpd3heui8rpoocm5xqbdwq44oh': 'KuCoin Representative',
'xrb_3kab648ixurzeio4ixjowkn89tk3jbwd7sy91i7bnnxynzq13hjrifxpm78c': "Tony's Eliquid Co. Representative",
'xrb_1tig1rio7iskejqgy6ap75rima35f9mexjazdqqquthmyu48118jiewny7zo': 'OkEx Representative',
'xrb_1tpzgiiwb69k1rfmpjqc96neca5rgakdajb4azgm6ks8qe1o4gwu4ea575pd': 'TipBot',
'xrb_3jybgajxebuj9kby3xusmn4sqiomzu15trmkwb1xyrynnc7axss3qp1yn679': 'Nano-Miner',
'xrb_3jwrszth46rk1mu7rmb4rhm54us8yg1gw3ipodftqtikf5yqdyr7471nsg1k': 'Binance Representative'
}
GENESIS_ACCOUNT = 'xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3'
GENESIS_OPEN_BLOCK_HASH = '991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948'
GENESIS_PUBLIC_KEY = 'E89208DD038FBB269987689621D52292AE9C35941A7484756ECCED92A65093BA'
GENESIS_BALANCE_XRB = 340282366.920939
GENESIS_BALANCE_RAW = 2**128 - 1
assert KNOWN_ACCOUNTS[GENESIS_ACCOUNT] == 'Genesis'
class NanoDBException(BaseException):
pass
class BlockNotFound(NanoDBException):
pass
class AccountNotFound(NanoDBException):
pass
class NanoDatabase:
def __init__(self, dbfile, trace=False):
self.sqldb = apsw.Connection(dbfile, flags=apsw.SQLITE_OPEN_READONLY)
if trace:
self.sqldb.setexectrace(self._exectrace)
def _exectrace(self, cursor, sql, bindings):
print('%s [%s]' % (sql, repr(bindings)))
return True
def close(self):
# Mostly for use under Flask
self.sqldb.close()
def account_from_id(self, id):
assert isinstance(id, int)
cur = self.sqldb.cursor()
try:
cur.execute('select address from accounts where id=?', (id,))
row = next(cur)
return Account(self, id, row[0])
except StopIteration:
raise AccountNotFound('Unknown account %d' % id)
def account_from_address(self, addr):
cur = self.sqldb.cursor()
try:
cur.execute('select id from accounts where address=?', (addr,))
row = next(cur)
return Account(self, row[0], addr)
except StopIteration:
raise AccountNotFound('Unknown account %s' % addr)
def account_from_name(self, name):
# XXX we store the names in the DB as well, but never query them in this class, only in Account
for address, accname in KNOWN_ACCOUNTS.items():
if name == accname:
return self.account_from_address(address)
raise AccountNotFound('Account with name "%s" not found' % name)
def accounts(self):
"""Return a list of all accounts"""
res = []
cur = self.sqldb.cursor()
cur.execute('select id, address from accounts')
for id, addr in cur:
res.append(Account(self, id, addr))
return res
def account_tree(self, return_ids=False):
"""
All open blocks reference a send block from another account, which
must have been created before the receiving account was opened.
Therefore, account creation can be represented as a tree, with
the Genesis account as the root.
Returns the account tree as a dictionary:
{
<parent-account>: [<child-account>, ...]
...
}
If return_ids=True instead of Account objects integer IDs
will be used.
"""
# Find account (by open block) and corresponding send block
cur = self.sqldb.cursor()
cur.execute("""
select b.account, i.account
from blocks b, block_info i
where b.type=? and b.source=i.block
""",
('open',))
res = {}
for account, parent_account in cur:
if not return_ids:
account = self.account_from_id(account)
parent_account = self.account_from_id(parent_account)
if parent_account not in res:
res[parent_account] = [ account ]
else:
res[parent_account].append(account)
return res
def account_interactions(self, left_account, right_account):
"""
Return a list of transactions (send blocks) between two accounts,
in global (ascending) order.
[(<direction>, <block>), ...]
Transaction direction is either 'left' (send from right account
to left) or 'right' (send from left to right).
"""
assert isinstance(left_account, Account)
assert isinstance(right_account, Account)
cur = self.sqldb.cursor()
cur.execute("""
select i.account, b.id from blocks b, block_info i
where
b.id = i.block and b.type=? and
((i.account=? and b.destination=?)
or
(i.account=? and b.destination=?))
order by i.global_index asc
""",
('send',
left_account.id, right_account.id,
right_account.id, left_account.id))
res = []
for account, block in cur:
if account == left_account.id:
res.append(('right', block))
else:
res.append(('left', block))
return res
def block_from_id(self, id, type=None):
assert isinstance(id, int)
return Block(self, id, type)
def block_from_hash(self, hash):
cur = self.sqldb.cursor()
try:
cur.execute('select id from blocks where hash=?', (hash,))
row = next(cur)
return Block(self, int(row[0]))
except StopIteration:
raise BlockNotFound('No block with hash %s found' % hash)
# XXX add blocks()?
def check(self):
"""Perform consistency checks, mostly for debugging purposes"""
pass
# Check for missing blocks, e.g. previous id points to non-existent block.
# For genesis open block: source points to non-existent block
# Check for accounts not having an open block
# Check that no forks exist, i.e. two or more blocks with a common previous block
# Number of send blocks >= number of receive blocks + number of open blocks
# Check successor value against previous of successor block
# check block hash length (which are not the same, have leading zeroes?)
def cursor(self):
"""For when you know what you're doing..."""
return self.sqldb.cursor()
def dot_graph(self, fname, blocks):
"""For a selection of blocks write a DOT graph to file"""
pass
def stats(self):
"""Return a dict with some statistics"""
cur = self.sqldb.cursor()
blocks_by_type = {}
cur.execute('select type, count(*) from blocks group by type')
for type, count in cur:
blocks_by_type[type] = count
# XXX compute in raw
# XXX need amount, not balance
#cur.execute('select sum(balance) from blocks where type=?', ('send',))
#total_volume_sent = next(cur)[0]
# XXX need amount, not balance
if False:
cur.execute("""
select sum(b.balance)
from blocks b, block_info i
where b.id=i.sister and b.type=? and i.sister is null
""",
('send',))
volume_unpocketed = next(cur)[0]
return dict(
blocks_by_type=blocks_by_type,
#total_volume_sent=total_volume_sent,
#volume_unpocketed=volume_unpocketed
)
class Account:
def __init__(self, db, id, address=None):
self.db = db
self.sqldb = db.sqldb
self.id = id
if address is None:
cur = self.db.cursor()
cur.execute('select address from accounts where id=?', (id,))
address = next(cur)[0]
self.address = address
self.open_block_ = None
self.last_block_ = None
self.name_ = None
def __repr__(self):
# XXX include name, if set
return '<Account #%d %s>' % (self.id, self.address)
# XXX rename to open_block()
def first_block(self):
"""Return the first block in the chain. Should always return an "open" block"""
if self.open_block_ is not None:
return self.open_block_
cur = self.db.cursor()
cur.execute('select id from blocks where account=? and type=?', (self.id, 'open'))
try:
row = next(cur)
except StopIteration:
return None
self.open_block_ = Block(self.db, row[0])
return self.open_block_
def last_block(self):
"""Return the last (i.e. most recent) block in the chain"""
if self.last_block_ is not None:
return self.last_block_
cur = self.db.cursor()
cur.execute("""
select block from block_info where account=? and chain_index in (
select max(chain_index) from block_info where account=?
)
""", (self.id, self.id))
try:
row = next(cur)
except StopIteration:
return None
self.last_block_ = Block(self.db, row[0])
return self.last_block_
def chain_length(self):
"""Number of blocks in this account's chain"""
cur = self.db.cursor()
cur.execute('select count(*) from block_info where account=?', (self.id,))
return next(cur)[0]
def chain(self, type=None, limit=None, reverse=False):
"""
Return all blocks in the chain, in sequence.
reverse = False: open block first
reverse = True: last block first
If "type" is set, only blocks of the requested type will be returned.
If "limit" is set, at most limit blocks will be returned.
"""
order = 'desc' if reverse else 'asc'
q = 'select block from block_info where account=?'
v = [self.id]
if type is not None:
q += ' and type=?'
v.append(type)
q += ' order by chain_index %s' % order
if limit is not None:
q += ' limit ?'
v.append(limit)
print(q, v)
res = []
cur = self.db.cursor()
cur.execute(q, v)
for row in cur:
b = Block(self.db, row[0])
res.append(b)
return res
def chain2(self, type=None, start=0, limit=None, reverse=False):
"""
Return all blocks in the chain, in sequence.
reverse = False: open block first
reverse = True: last block first
start: chain index of the first block returned
If "type" is set, only blocks of the requested type will be returned.
If "limit" is set, at most limit blocks will be returned.
"""
q = 'select block from block_info where account=?'
v = [self.id]
if type is not None:
q += ' and type=?'
v.append(type)
if reverse:
q += ' and chain_index <= ?'
else:
q += ' and chain_index >= ?'
if start < 0:
start = self.chain_length() + start
v.append(start)
order = 'desc' if reverse else 'asc'
q += ' order by chain_index %s' % order
if limit is not None:
q += ' limit ?'
v.append(limit)
print(q, v)
res = []
cur = self.db.cursor()
cur.execute(q, v)
for row in cur:
b = Block(self.db, row[0])
res.append(b)
return res
def unpocketed(self, limit=None, reverse=False):
"""Return send transactions to this account that are not pocketed yet"""
# Find send blocks to this account with no sister (receive) block
order = 'desc' if reverse else 'asc'
q = """
select block from blocks b, block_info i
where b.id=i.block and b.type=? and b.destination=? and i.sister is null
order by i.global_index %s
""" % order
v = ['send', self.id]
if limit is not None:
q += ' limit ?'
v.append(limit)
cur = self.db.cursor()
cur.execute(q, v)
res = []
for row in cur:
b = Block(self.db, row[0])
res.append(b)
return res
def name(self):
if self.name_ is not None:
return self.name_
cur = self.db.cursor()
cur.execute('select name from accounts where id=?', (self.id,))
name = next(cur)[0]
self.name_ = name
return name
# def balance()
# find last send/receive block
class Block:
def __init__(self, db, id, type=None):
assert isinstance(id, int)
self.db = db
self.sqldb = db.sqldb
self.id = id
if type is None:
cur = self.sqldb.cursor()
cur.execute('select type from blocks where id=?', (self.id,))
type = next(cur)[0]
self.type = type
self.hash_ = None
#self.previous_ = None
#self.next_ = None
self.sister_ = None
self.balance_ = None
self.amount_ = None # Only for send/open/receive blocks
self.account_ = None
self.global_index_ = None
self.chain_index_ = None
self.destination_ = None
def __repr__(self):
return '<Block #%d %s %s>' % (self.id, self.type, self.hash())
def hash(self):
if self.hash_ is not None:
return self.hash_
cur = self.sqldb.cursor()
cur.execute('select hash from blocks where id=?', (self.id,))
self.hash_ = next(cur)[0]
return self.hash_
def previous(self):
"""Return the previous block in the chain. Returns None if there is no previous block"""
cur = self.sqldb.cursor()
try:
cur.execute('select previous from blocks where id=?', (self.id,))
previd = next(cur)[0]
if previd is None:
return None
except StopIteration:
return None
cur.execute('select type from blocks where id=?', (previd,))
prevtype = next(cur)[0]
return Block(self.db, previd, prevtype)
def next(self):
"""Return the next block in the chain. Returns None if there is no next block"""
cur = self.sqldb.cursor()
try:
cur.execute('select next from blocks where id=?', (self.id,))
nextid = next(cur)[0]
if nextid is None:
return None
except StopIteration:
return None
cur.execute('select type from blocks where id=?', (nextid,))
nexttype = next(cur)[0]
return Block(self.db, nextid, nexttype)
def sister(self):
if self.sister_ is not None:
return self.sister_
cur = self.sqldb.cursor()
cur.execute('select sister from block_info where block=?', (self.id,))
sister_id = next(cur)[0]
if sister_id is not None:
self.sister_ = Block(self.db, sister_id)
return self.sister_
if False:
def other(self):
"""
Return the "sister block" for certain types of blocks:
- For a send block return the corresponding receive/open block
- For a receive/open block return the source block
"""
if self.type in ['receive', 'open']:
cur = self.sqldb.cursor()
cur.execute('select source from blocks where id=?', (self.id,))
source = next(cur)[0]
if source is None:
# Genesis block has no source
return None
b = Block(self.db, source)
assert b.type == 'send'
return b
elif self.type == 'send':
cur = self.sqldb.cursor()
try:
cur.execute('select r.id, r.type from blocks s, blocks r where r.source==s.id and s.id=?', (self.id,))
id, type = next(cur)
assert type in ['open', 'receive']
b = Block(self.db, id, type)
return b
except StopIteration:
# No destination block, i.e. not pocketed
return None
elif self.type == 'change':
# XXX Return account changed to?
return None
raise ValueError('Block type should be send, receive or open (got: %s)' % self.type)
def account(self):
if self.account_ is not None:
return self.account_
cur = self.sqldb.cursor()
cur.execute('select account from block_info where block=?', (self.id,))
id = next(cur)[0]
self.account_ = self.db.account_from_id(id)
return self.account_
def chain_index(self):
"""Index of this block in the account chain (0 = open block)"""
if self.chain_index_ is not None:
return self.chain_index_
cur = self.sqldb.cursor()
cur.execute('select chain_index from block_info where block=?', (self.id,))
idx = next(cur)[0]
self.chain_index_ = idx
return idx
def global_index(self):
"""Index of this block in topological sort of all blocks (0 = genesis block)"""
if self.global_index_ is not None:
return self.global_index_
cur = self.sqldb.cursor()
cur.execute('select global_index from block_info where block=?', (self.id,))
idx = next(cur)[0]
self.global_index_ = idx
return idx
def destination(self):
"""For a send block return destination account.
For other block types return None"""
if self.type != 'send':
return None
if self.destination_ is not None:
return self.destination_
cur = self.sqldb.cursor()
cur.execute('select destination from blocks where id=?', (self.id,))
destid = next(cur)[0]
self.destination_ = self.db.account_from_id(destid)
return self.destination_
# XXX this needs more work
def balance(self):
"""
Return the account balance at this block in the chain
"""
if self.balance_ is not None:
return self.balance_
cur = self.sqldb.cursor()
cur.execute('select balance from block_info where block=?', (self.id,))
self.balance_ = int(next(cur)[0])
return self.balance_
if self.type == 'send':
cur = self.sqldb.cursor()
cur.execute('select balance from blocks where id=?', (self.id,))
self.balance_ = next(cur)[0]
elif self.type == 'receive':
prev_balance = self.previous().balance()
other_amount = self.sister().amount()
if prev_balance is not None and other_amount is not None:
return prev_balance + other_amount
elif self.type == 'open':
if self.id == 0:
# Genesis block
return GENESIS_AMOUNT
sister_block = self.sister()
if sister_block is None:
raise ValueError('Open block %d has no sister block' % self.id)
return sister_block.amount()
elif self.type == 'change':
return self.previous().balance()
return self.balance_
# XXX add balance_raw()
def amount(self):
"""
For a send/receive/open block compute the amount being transfered.
For other block types return None.
"""
# XXX if we retrieve none below we will still perform the query multiple times
if self.amount_ is not None:
return self.amount_
cur = self.sqldb.cursor()
cur.execute('select amount from block_info where block=?', (self.id,))
amount = next(cur)[0]
if amount is not None:
self.amount_ = int(amount)
return self.amount_
if self.type == 'send':
return self.previous().balance() - self.balance()
elif self.type == 'receive':
return self.sister().amount()
elif self.type == 'open':
if self.id == 0:
# Genesis block
return GENESIS_AMOUNT
other_block = self.sister()
if other_block is not None:
return other_block.amount()
return None
if __name__ == '__main__':
db = NanoDatabase(sys.argv[1])
db.check()
print(db.stats())
|
the-stack_0_20792 | #from cryptomath import *
#import Cryptoalphabet as ca
#alpha = ca.Cryptoalphabet("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
def affine_encode(plaintext, a, b):
process = ""
cipherFinal = ""
modulusValue = len(alphabet)
#removes punctuation from plaintext
for s in plaintext:
if s!= '.' and s!= ',' and s!= ' ' and s!= '!' and s!= '?' and s!= '\'':
process+=s
#converts to uppercase
process = process.upper()
# converts each character using y=ax+b(mod 26)
for letter in process:
ind = alphabet.index(letter)
step1 = ind * a
step2 = step1 + b
step3 = step2 % modulusValue
char = alphabet[step3]
cipherFinal+= char
# returns the ciphertext string
return cipherFinal
def affine_decode(ciphertext, c, d):
stringproc = ""
plainFinal = ""
modulusVal = len(alphabet)
#return plainFinal
# strip punctuation from ciphertext###
#convert to uppercase###
for s in ciphertext:
if s!= '.' and s!= ',' and s!= ' ' and s!= '!' and s!= '?' and s!= '\'':
stringproc+=s
stringproc = stringproc.upper()
# converts each character using x=cy+d (mod 26)
for letters in stringproc:
index = alphabet.index(letters)
stepone = index * c
steptwo = stepone + d
stepthr = steptwo % modulusVal
chars = alphabet[stepthr]
plainFinal += chars
# note the (c,d) pair are the inverse coefficients of
#the(a,b) pair used to encode
# returns the plaintext string
return plainFinal
def affine_crack(c1, p1, c2, p2):
return c2
#o c1,p1,c2,p2 are characters
# c1 is the encoded char of p1
# c2 is the encoded char of p2
# returns a pair (c,d) to use in affine_decode
# solves a linear system
# result: p1 = c * c1 + d2 and p2 = c * c2 + d2 (mod 26)
# returns the pair (c, d) or None if no solution can be found
def mod_inverse(a,m):
x = 1
for i in range(0,m-1):
if (a*i) % m == 1:
x = i
break
return x
#problemset1
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#test the encode function:
decrypted1 = "I KNOW' WHO.!?, PUT THE PUMPKIN ON THE CLOCK TOIQD"
cd1 = 21
dd1 = 8
print(affine_encode(decrypted1, cd1, dd1))
#decryptions:
encrypted1 = "UKVQCCZQLMRRZOLMALKUVQVRZOYFQYKRQUGT"
c1 = 5
d1 = -14
print(affine_decode(encrypted1, c1, d1))
encrypted2 = "lqpfzfaifstqufqqjjtakfnvfqnjisvkk"
c2 = -3
d2 = 15
print(affine_decode(encrypted2, c2, d2))
encrypted3 = "qgxetvepjyleexlkxujyxlksfbrqboxk"
c3 = 9
d3 = -21
print(affine_decode(encrypted3, c3, d3))
encrypted4 = "cpvvkmtsbkmtkgqlcbsepsbueqlvzcll"
c4 = 7
d4 = -14
print(affine_decode(encrypted4, c4, d4))
encrypted5 = "axhugoabuzabrloeusbxalxfubudxorhag"
c5 = 5
d5 = -18
print(affine_decode(encrypted5, c5, d5))
encrypted6 = "lqqlshykibymgsnfskvqlkmdmbmpoxqfma"
c6 = 21
d6 = -10
print(affine_decode(encrypted6, c6, d6))
encrypted7 = "mxfpyxmxyfyyxqykliimxeymfpkrryxyb" #the one letter crib
c7 = 17 #?????
d7 = -14 #????
print(affine_decode(encrypted7, c7, d7))
#test practice
print("TEST PRACTICE")
newalpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
encryptMeString = "TABLE"
theAValue = 15
theBValue = 11
print(affine_encode(encryptMeString,theAValue,theBValue)) #encoded to KLAUT
print()
newalpha2 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
decryptMeString = "XMRPQ"
theAValueE = 3
theBValueE = -9
#find C Value
theCValue = 0
#find D value
theDValue = 0
#use those in the method
print(affine_decode(decryptMeString,theCValue,theDValue))
#avals
# for a in range(1,26):
# #bvals
# for b in range(-26,1):
# string = affine_decode(encrypted7,a,b)
# print("A: ", a,"B: ", b, "String: " ,string)
#examples
#i = alpha.getIndex("H")
#c = alpha.charNum(i)
#d = alpha.charNum(100)
#print i + c + d
#print(gcd(124,296))
#print(lcm(148,2560))
#print(mod_inverse(13,142))
#print(mod_inverse(8,17)) #test modulus
|
the-stack_0_20794 | '''Tests about gzippy.files.'''
import os
import subprocess
import unittest
import mock
import gzippy
from . import scratch_file, scratch_dir
class FilesTest(unittest.TestCase):
'''Tests about gzippy'''
def test_round_trip(self):
'''Can round-trip content.'''
content = b'This is some test content.'
with scratch_file('example.gz') as path:
with gzippy.open(path, 'wb') as fout:
fout.write(content)
with gzippy.open(path, 'rb') as fin:
self.assertEqual(fin.read(), content)
def test_incremental_reads(self):
'''Incremental reads'''
content = b'This is some test content.'
with scratch_file('example.gz') as path:
with gzippy.open(path, 'wb') as fout:
fout.write(content)
with gzippy.open(path, 'rb') as fin:
self.assertEqual(fin.read(10), content[:10])
def test_gzip_compatible(self):
'''Output compatible with the gzip command-line utility.'''
content = b'This is some test content.'
with scratch_dir() as path:
zipped = os.path.join(path, 'example.gz')
unzipped = os.path.join(path, 'example')
with gzippy.GzipWriter.open(zipped) as fout:
fout.write(content)
subprocess.check_call(['gunzip', zipped], stderr=subprocess.STDOUT)
with open(unzipped, 'rb') as fin:
self.assertEqual(fin.read(), content)
def test_lines(self):
'''Can read the file line by line.'''
parts = [b'some\n', b'lines\n', b'in\n', b'a\n', b'file']
content = b''.join(parts)
with scratch_file('example.gz') as path:
with gzippy.open(path, 'wb') as fout:
fout.write(content)
with gzippy.open(path) as fin:
self.assertEqual(list(fin), parts)
def test_lines_consolidation(self):
'''Consolidates lines across multiple chunks.'''
parts = [b'some\n', b'lines\n', b'in\n', b'a\n', b'file']
chunks = [b'so', b'm', b'e\nlines\n', b'i', b'n', b'\n', b'a\nfile']
content = b''.join(chunks)
with scratch_file('example.gz') as path:
with gzippy.open(path, 'wb') as fout:
fout.write(content)
with gzippy.open(path) as fin:
self.assertEqual(list(fin), parts)
def test_reader_crc_mismatch(self):
'''Raises an exception when the crc doesn't match.'''
with scratch_file('example.gz') as path:
with gzippy.open(path, 'wb') as fout:
fout.write(b'This is some test content.')
# Rewrite the last eight bytes
with open(path, 'r+b') as fout:
fout.seek(-8, 2)
fout.write(b'\xFF' * 8)
with self.assertRaises(IOError):
with gzippy.open(path) as fin:
fin.read()
def test_reader_size_mismatch(self):
'''Raises an exception when the size doesn't match.'''
with scratch_file('example.gz') as path:
with gzippy.open(path, 'wb') as fout:
fout.write(b'This is some test content.')
# Rewrite the last four bytes
with open(path, 'r+b') as fout:
fout.seek(-4, 2)
fout.write(b'\xFF' * 4)
with self.assertRaises(IOError):
with gzippy.open(path) as fin:
fin.read()
|
the-stack_0_20795 | import cudf
import dask_cudf
import gc
from typing import Type
from ...charts.core.core_chart import BaseChart
def calc_value_counts(
a_gpu, stride, min_value, data_points, custom_binning=False
):
"""
description:
main function to calculate histograms
input:
- a_gpu: gpu array(cuda ndarray) -> 1-column only
- bins: number of bins
output:
frequencies(ndarray), bin_edge_values(ndarray)
"""
if isinstance(a_gpu, dask_cudf.core.Series):
if not custom_binning:
val_count = a_gpu.value_counts()
else:
val_count = (
((a_gpu - min_value) / stride)
.round()
.astype(a_gpu.dtype)
.value_counts()
)
val_count = val_count.compute().sort_index()
else:
if not custom_binning:
val_count = a_gpu.value_counts().sort_index()
else:
val_count = (
((a_gpu - min_value) / stride)
.round()
.astype(a_gpu.dtype)
.value_counts()
.sort_index()
)
return ((val_count.index.to_array(), val_count.to_array()), len(val_count))
def calc_groupby(chart: Type[BaseChart], data, agg=None):
"""
description:
main function to calculate histograms
input:
- chart
- data
output:
frequencies(ndarray), bin_edge_values(ndarray)
"""
temp_df = data[[chart.x]].dropna(subset=[chart.x])
if agg is None:
temp_df[chart.y] = data.dropna(subset=[chart.x])[chart.y]
if isinstance(temp_df, dask_cudf.core.DataFrame):
groupby_res = getattr(
temp_df.groupby(by=[chart.x], sort=True), chart.aggregate_fn
)()
groupby_res = groupby_res.reset_index().compute().to_pandas()
else:
groupby_res = (
temp_df.groupby(by=[chart.x], sort=True, as_index=False)
.agg({chart.y: chart.aggregate_fn})
.to_pandas()
)
else:
for key, agg_fn in agg.items():
temp_df[key] = data[key]
if isinstance(data, dask_cudf.core.DataFrame):
groupby_res = None
for key, agg_fn in agg.items():
groupby_res_temp = getattr(
temp_df[[chart.x, key]].groupby(chart.x, sort=True), agg_fn
)()
if groupby_res is None:
groupby_res = groupby_res_temp.reset_index().compute()
else:
groupby_res_temp = groupby_res_temp.reset_index().compute()
groupby_res = groupby_res.merge(
groupby_res_temp, on=chart.x
)
del groupby_res_temp
gc.collect()
groupby_res = groupby_res.to_pandas()
else:
groupby_res = (
temp_df.groupby(by=[chart.x], sort=True, as_index=False)
.agg(agg)
.to_pandas()
)
del temp_df
gc.collect()
return groupby_res.to_numpy().transpose()
def aggregated_column_unique(chart: Type[BaseChart], data):
"""
description:
main function to calculate histograms
input:
- chart
- data
output:
list_of_unique_values
"""
temp_df = cudf.DataFrame()
temp_df[chart.x] = (data[chart.x] / chart.stride) - chart.min_value
return temp_df[chart.x].unique().to_pandas().tolist()
|
the-stack_0_20798 | """Regular expression tests specific to _sre.py and accumulated during TDD."""
import os
import py
from py.test import raises, skip
from pypy.interpreter.gateway import app2interp_temp
def init_app_test(cls, space):
cls.w_s = space.appexec(
[space.wrap(os.path.realpath(os.path.dirname(__file__)))],
"""(this_dir):
import sys
# Uh-oh, ugly hack
sys.path.insert(0, this_dir)
try:
import support_test_app_sre
return support_test_app_sre
finally:
sys.path.pop(0)
""")
class AppTestSrePy:
def test_magic(self):
import _sre, sre_constants
assert sre_constants.MAGIC == _sre.MAGIC
def test_codesize(self):
import _sre
assert _sre.getcodesize() == _sre.CODESIZE
class AppTestSrePattern:
def setup_class(cls):
# This imports support_test_sre as the global "s"
init_app_test(cls, cls.space)
spaceconfig = {'usemodules': ['itertools']}
def test_copy(self):
# copy support is disabled by default in _sre.c
import re
p = re.compile("b")
raises(TypeError, p.__copy__) # p.__copy__() should raise
raises(TypeError, p.__deepcopy__) # p.__deepcopy__() should raise
def test_creation_attributes(self):
import re
pattern_string = b"(b)l(?P<g>a)"
p = re.compile(pattern_string, re.I | re.M)
assert pattern_string == p.pattern
assert re.I | re.M == p.flags
assert 2 == p.groups
assert {"g": 2} == p.groupindex
raises(TypeError, "p.groupindex['g'] = 3")
def test_repeat_minmax_overflow(self):
import re
string = "x" * 100000
assert re.match(r".{%d}" % (self.s.MAXREPEAT - 1), string) is None
assert re.match(r".{,%d}" % (self.s.MAXREPEAT - 1), string).span() == (0, 100000)
assert re.match(r".{%d,}?" % (self.s.MAXREPEAT - 1), string) is None
import sys
if sys.version_info[:3] <= (3, 2, 3):
# XXX: These are fixed in 3.2.4 or so
return
raises(OverflowError, re.compile, r".{%d}" % self.s.MAXREPEAT)
raises(OverflowError, re.compile, r".{,%d}" % self.s.MAXREPEAT)
raises(OverflowError, re.compile, r".{%d,}?" % self.s.MAXREPEAT)
def test_match_none(self):
import re
p = re.compile("bla")
none_matches = ["b", "bl", "blub", "jupidu"]
for string in none_matches:
assert None == p.match(string)
def test_pos_endpos(self):
import re
# XXX maybe fancier tests here
p = re.compile("bl(a)")
tests = [("abla", 0, 4), ("abla", 1, 4), ("ablaa", 1, 4)]
for string, pos, endpos in tests:
assert p.search(string, pos, endpos)
tests = [("abla", 0, 3), ("abla", 2, 4)]
for string, pos, endpos in tests:
assert not p.search(string, pos, endpos)
def test_findall(self):
import re
assert ["b"] == re.findall("b", "bla")
assert ["a", "u"] == re.findall("b(.)", "abalbus")
assert [("a", "l"), ("u", "s")] == re.findall("b(.)(.)", "abalbus")
assert [("a", ""), ("s", "s")] == re.findall("b(a|(s))", "babs")
assert ['', '', '', ''] == re.findall("X??", "1X4") # changes in 3.7
def test_findall_unicode(self):
import re
assert [u"\u1234"] == re.findall(u"\u1234", u"\u1000\u1234\u2000")
assert ["a", "u"] == re.findall("b(.)", "abalbus")
assert [("a", "l"), ("u", "s")] == re.findall("b(.)(.)", "abalbus")
assert [("a", ""), ("s", "s")] == re.findall("b(a|(s))", "babs")
assert [u"xyz"] == re.findall(u".*yz", u"xyz")
def test_finditer(self):
import re
it = re.finditer("b(.)", "brabbel")
assert "br" == next(it).group(0)
assert "bb" == next(it).group(0)
raises(StopIteration, next, it)
def test_split(self):
import re
assert ["a", "o", "u", ""] == re.split("b", "abobub")
assert ["a", "o", "ub"] == re.split("b", "abobub", 2)
assert ['', 'a', 'l', 'a', 'lla'] == re.split("b(a)", "balballa")
assert ['', 'a', None, 'l', 'u', None, 'lla'] == (
re.split("b([ua]|(s))", "balbulla"))
assert ['Hello \udce2\udc9c\udc93', ''] == re.split(r'\r\n|\r|\n',
'Hello \udce2\udc9c\udc93\n')
def test_weakref(self):
import re, _weakref
_weakref.ref(re.compile(r""))
def test_match_compat(self):
import re
res = re.match(r'(a)|(b)', 'b').start(1)
assert res == -1
def test_pattern_check(self):
import _sre
raises(TypeError, _sre.compile, {}, 0, [])
def test_fullmatch(self):
import re
assert re.compile(r"ab*c").fullmatch("abbcdef") is None
assert re.compile(r"ab*c").fullmatch("abbc") is not None
assert re.fullmatch(r"ab*c", "abbbcdef") is None
assert re.fullmatch(r"ab*c", "abbbc") is not None
def test_repr(self):
import re
r = re.compile(r'f(o"\d)', 0)
assert repr(r) == (
r"""re.compile('f(o"\\d)')""")
r = re.compile(r'f(o"\d)', re.IGNORECASE|re.DOTALL|re.VERBOSE)
assert repr(r) == (
r"""re.compile('f(o"\\d)', re.IGNORECASE|re.DOTALL|re.VERBOSE)""")
def test_pattern_compare(self):
import re
pattern1 = re.compile('abc', re.IGNORECASE)
# equal to itself
assert pattern1 == pattern1
assert not(pattern1 != pattern1)
# equal
re.purge()
pattern2 = re.compile('abc', re.IGNORECASE)
assert hash(pattern2) == hash(pattern1)
assert pattern2 == pattern1
# not equal: different pattern
re.purge()
pattern3 = re.compile('XYZ', re.IGNORECASE)
# warranty that hash values are different
assert pattern3 != pattern1
# not equal: different flag (flags=0)
re.purge()
pattern4 = re.compile('abc')
assert pattern4 != pattern1
# only == and != comparison operators are supported
raises(TypeError, "pattern1 < pattern2")
class AppTestSreMatch:
spaceconfig = dict(usemodules=('array', ))
def test_copy(self):
import re
# copy support is disabled by default in _sre.c
m = re.match("bla", "bla")
raises(TypeError, m.__copy__)
raises(TypeError, m.__deepcopy__)
def test_match_attributes(self):
import re
c = re.compile("bla")
m = c.match("blastring")
assert "blastring" == m.string
assert c == m.re
assert 0 == m.pos
assert 9 == m.endpos
assert None == m.lastindex
assert None == m.lastgroup
assert ((0, 3),) == m.regs
def test_match_attributes_with_groups(self):
import re
m = re.search("a(b)(?P<name>c)", "aabcd")
assert 0 == m.pos
assert 5 == m.endpos
assert 2 == m.lastindex
assert "name" == m.lastgroup
assert ((1, 4), (2, 3), (3, 4)) == m.regs
def test_regs_overlapping_groups(self):
import re
m = re.match("a((b)c)", "abc")
assert ((0, 3), (1, 3), (1, 2)) == m.regs
def test_start_end_span(self):
import re
m = re.search("a((b)c)", "aabcd")
assert (1, 4) == (m.start(), m.end())
assert (1, 4) == m.span()
assert (2, 4) == (m.start(1), m.end(1))
assert (2, 4) == m.span(1)
assert (2, 3) == (m.start(2), m.end(2))
assert (2, 3) == m.span(2)
raises(IndexError, m.start, 3)
raises(IndexError, m.end, 3)
raises(IndexError, m.span, 3)
raises(IndexError, m.start, -1)
def test_groups(self):
import re
m = re.search("a((.).)", "aabcd")
assert ("ab", "a") == m.groups()
assert ("ab", "a") == m.groups(True)
m = re.search("a((\d)|(\s))", "aa1b")
assert ("1", "1", None) == m.groups()
assert ("1", "1", True) == m.groups(True)
m = re.search("a((\d)|(\s))", "a ")
assert (" ", None, " ") == m.groups()
m = re.match("(a)", "a")
assert ("a",) == m.groups()
def test_groupdict(self):
import re
m = re.search("a((.).)", "aabcd")
assert {} == m.groupdict()
m = re.search("a((?P<first>.).)", "aabcd")
assert {"first": "a"} == m.groupdict()
m = re.search("a((?P<first>\d)|(?P<second>\s))", "aa1b")
assert {"first": "1", "second": None} == m.groupdict()
assert {"first": "1", "second": True} == m.groupdict(True)
def test_group(self):
import re
m = re.search("a((?P<first>\d)|(?P<second>\s))", "aa1b")
assert "a1" == m.group()
assert ("1", "1", None) == m.group(1, 2, 3)
assert ("1", None) == m.group("first", "second")
raises(IndexError, m.group, 1, 4)
assert ("1", None) == m.group(1, "second")
raises(IndexError, m.group, 'foobarbaz')
raises(IndexError, m.group, 'first', 'foobarbaz')
def test_group_takes_long(self):
import re
import sys
if sys.version_info < (2, 7, 9):
skip()
assert re.match("(foo)", "foo").group(1) == "foo"
exc = raises(IndexError, re.match("", "").group, sys.maxsize + 1)
assert str(exc.value) == "no such group"
def test_group_takes_index(self):
import re
class Index:
def __init__(self, value):
self.value = value
def __index__(self):
return self.value
assert re.match("(foo)", "foo").group(Index(1)) == "foo"
def test_getitem(self):
import re
assert re.match("(foo)bar", "foobar")[1] == "foo"
def test_expand(self):
import re
m = re.search("a(..)(?P<name>..)", "ab1bc")
assert "b1bcbc" == m.expand(r"\1\g<name>\2")
def test_sub_bytes(self):
import re
assert b"bbbbb" == re.sub(b"a", b"b", b"ababa")
assert (b"bbbbb", 3) == re.subn(b"a", b"b", b"ababa")
assert b"dddd" == re.sub(b"[abc]", b"d", b"abcd")
assert (b"dddd", 3) == re.subn(b"[abc]", b"d", b"abcd")
assert b"rbd\nbr\n" == re.sub(b"a(.)", br"b\1\n", b"radar")
assert (b"rbd\nbr\n", 2) == re.subn(b"a(.)", br"b\1\n", b"radar")
assert (b"bbbba", 2) == re.subn(b"a", b"b", b"ababa", 2)
def test_sub_unicode(self):
import re
assert isinstance(re.sub("a", "b", ""), str)
# the input is returned unmodified if no substitution is performed,
# which (if interpreted literally, as CPython does) gives the
# following strangeish rules:
assert isinstance(re.sub("a", "b", "diwoiioamoi"), str)
raises(TypeError, re.sub, "a", "b", b"diwoiiobmoi")
raises(TypeError, re.sub, 'x', b'y', b'x')
def test_sub_callable(self):
import re
def call_me(match):
ret = ""
for char in match.group():
ret += chr(ord(char) + 1)
return ret
assert ("bbbbb", 3) == re.subn("a", call_me, "ababa")
def test_sub_callable_returns_none(self):
import re
def call_me(match):
return None
assert "acd" == re.sub("b", call_me, "abcd")
def test_sub_subclass_of_str(self):
import re
class MyString(str):
pass
class MyBytes(bytes):
pass
s1 = MyString('zz')
s2 = re.sub('aa', 'bb', s1)
assert s2 == s1
assert type(s2) is str # and not MyString
u1 = MyBytes(b'zz')
u2 = re.sub(b'aa', b'bb', u1)
assert u2 == u1
assert type(u2) is bytes # and not MyBytes
def test_sub_bug(self):
import re
assert re.sub('=\w{2}', 'x', '=CA') == 'x'
def test_sub_emptymatch(self):
import re
assert re.sub(r"b*", "*", "abc") == "*a*c*" # changes in 3.7
def test_sub_bytearray(self):
import re
assert re.sub(b'a', bytearray(b'A'), b'axa') == b'AxA'
# this fails on CPython 3.5:
assert re.sub(b'a', bytearray(b'\\n'), b'axa') == b'\nx\n'
def test_match_array(self):
import re, array
a = array.array('b', b'hello')
m = re.match(b'hel+', a)
assert m.end() == 4
def test_match_typeerror(self):
import re
raises(TypeError, re.match, 'hel+', list('hello'))
def test_match_repr(self):
import re
m = re.search("ab+c", "xabbbcd")
assert repr(m) == "<_sre.SRE_Match object; span=(1, 6), match='abbbc'>"
def test_group_bugs(self):
import re
r = re.compile(r"""
\&(?:
(?P<escaped>\&) |
(?P<named>[_a-z][_a-z0-9]*) |
{(?P<braced>[_a-z][_a-z0-9]*)} |
(?P<invalid>)
)
""", re.IGNORECASE | re.VERBOSE)
matches = list(r.finditer('this &gift is for &{who} &&'))
assert len(matches) == 3
assert matches[0].groupdict() == {'escaped': None,
'named': 'gift',
'braced': None,
'invalid': None}
assert matches[1].groupdict() == {'escaped': None,
'named': None,
'braced': 'who',
'invalid': None}
assert matches[2].groupdict() == {'escaped': '&',
'named': None,
'braced': None,
'invalid': None}
matches = list(r.finditer('&who likes &{what)')) # note the ')'
assert len(matches) == 2
assert matches[0].groupdict() == {'escaped': None,
'named': 'who',
'braced': None,
'invalid': None}
assert matches[1].groupdict() == {'escaped': None,
'named': None,
'braced': None,
'invalid': ''}
def test_sub_typecheck(self):
import re
KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
raises(TypeError, KEYCRE.sub, "hello", {"%(": 1})
def test_sub_matches_stay_valid(self):
import re
matches = []
def callback(match):
matches.append(match)
return "x"
result = re.compile(r"[ab]").sub(callback, "acb")
assert result == "xcx"
assert len(matches) == 2
assert matches[0].group() == "a"
assert matches[1].group() == "b"
class AppTestSreScanner:
spaceconfig = {'usemodules': ['itertools']}
def test_scanner_attributes(self):
import re
p = re.compile("bla")
s = p.scanner("blablubla")
assert p == s.pattern
def test_scanner_match(self):
import re
p = re.compile(".").scanner("bla")
assert ("b", "l", "a") == (p.match().group(0),
p.match().group(0), p.match().group(0))
assert None == p.match()
def test_scanner_match_detail(self):
import re
p = re.compile("a").scanner("aaXaa")
assert "a" == p.match().group(0)
assert "a" == p.match().group(0)
assert None == p.match()
# the rest has been changed somewhere between Python 2.6.9
# and Python 2.7.18. PyPy now follows the 2.7.18 behavior
assert None == p.match()
assert None == p.match()
def test_scanner_search(self):
import re
p = re.compile("\d").scanner("bla23c5a")
assert ("2", "3", "5") == (p.search().group(0),
p.search().group(0), p.search().group(0))
assert None == p.search()
def test_scanner_zero_width_match(self):
import re, sys
if sys.version_info[:2] == (2, 3):
skip("2.3 is different here")
p = re.compile(".*").scanner("bla")
assert ("bla", "") == (p.search().group(0), p.search().group(0))
assert None == p.search()
def test_scanner_empty_match(self):
import re, sys
p = re.compile("a??").scanner("bac")
assert ("", "", "", "") == (p.search().group(0), p.search().group(0),
p.search().group(0), p.search().group(0))
assert None == p.search()
def test_no_pattern(self):
import sre_compile, sre_parse
sre_pattern = sre_compile.compile(
sre_parse.SubPattern(sre_parse.Pattern()))
assert sre_pattern.scanner('s') is not None
class AppTestGetlower:
spaceconfig = dict(usemodules=('_locale',))
def setup_class(cls):
# This imports support_test_sre as the global "s"
init_app_test(cls, cls.space)
def setup_method(self, method):
import locale
locale.setlocale(locale.LC_ALL, (None, None))
def teardown_method(self, method):
import locale
locale.setlocale(locale.LC_ALL, (None, None))
def test_getlower_no_flags(self):
s = self.s
UPPER_AE = "\xc4"
s.assert_lower_equal([("a", "a"), ("A", "a"), (UPPER_AE, UPPER_AE),
("\u00c4", "\u00c4"), ("\u4444", "\u4444")], 0)
def test_getlower_locale(self):
s = self.s
import locale, sre_constants
UPPER_AE = "\xc4"
LOWER_AE = "\xe4"
UPPER_PI = "\u03a0"
try:
locale.setlocale(locale.LC_ALL, "de_DE")
s.assert_lower_equal([("a", "a"), ("A", "a"), (UPPER_AE, LOWER_AE),
("\u00c4", "\u00e4"), (UPPER_PI, UPPER_PI)],
sre_constants.SRE_FLAG_LOCALE)
except locale.Error:
# skip test
skip("unsupported locale de_DE")
def test_getlower_unicode(self):
s = self.s
import sre_constants
UPPER_AE = "\xc4"
LOWER_AE = "\xe4"
UPPER_PI = "\u03a0"
LOWER_PI = "\u03c0"
s.assert_lower_equal([("a", "a"), ("A", "a"), (UPPER_AE, LOWER_AE),
("\u00c4", "\u00e4"), (UPPER_PI, LOWER_PI),
("\u4444", "\u4444")], sre_constants.SRE_FLAG_UNICODE)
class AppTestSimpleSearches:
spaceconfig = {'usemodules': ('array', 'itertools')}
def test_search_simple_literal(self):
import re
assert re.search("bla", "bla")
assert re.search("bla", "blab")
assert not re.search("bla", "blu")
def test_search_simple_ats(self):
import re
assert re.search("^bla", "bla")
assert re.search("^bla", "blab")
assert not re.search("^bla", "bbla")
assert re.search("bla$", "abla")
assert re.search("bla$", "bla\n")
assert not re.search("bla$", "blaa")
def test_search_simple_boundaries(self):
import re
UPPER_PI = "\u03a0"
assert re.search(r"bla\b", "bla")
assert re.search(r"bla\b", "bla ja")
assert re.search(r"bla\b", "bla%s" % UPPER_PI, re.ASCII)
assert not re.search(r"bla\b", "blano")
assert not re.search(r"bla\b", "bla%s" % UPPER_PI, re.UNICODE)
def test_search_simple_categories(self):
import re
LOWER_PI = "\u03c0"
INDIAN_DIGIT = "\u0966"
EM_SPACE = "\u2001"
LOWER_AE = "\xe4"
assert re.search(r"bla\d\s\w", "bla3 b")
assert re.search(r"b\d", "b%s" % INDIAN_DIGIT, re.UNICODE)
assert not re.search(r"b\D", "b%s" % INDIAN_DIGIT, re.UNICODE)
assert re.search(r"b\s", "b%s" % EM_SPACE, re.UNICODE)
assert not re.search(r"b\S", "b%s" % EM_SPACE, re.UNICODE)
assert re.search(r"b\w", "b%s" % LOWER_PI, re.UNICODE)
assert not re.search(r"b\W", "b%s" % LOWER_PI, re.UNICODE)
assert re.search(r"b\w", "b%s" % LOWER_AE, re.UNICODE)
def test_search_simple_any(self):
import re
assert re.search(r"b..a", "jboaas")
assert not re.search(r"b..a", "jbo\nas")
assert re.search(r"b..a", "jbo\nas", re.DOTALL)
def test_search_simple_in(self):
import re
UPPER_PI = "\u03a0"
LOWER_PI = "\u03c0"
EM_SPACE = "\u2001"
LINE_SEP = "\u2028"
assert re.search(r"b[\da-z]a", "bb1a")
assert re.search(r"b[\da-z]a", "bbsa")
assert not re.search(r"b[\da-z]a", "bbSa")
assert re.search(r"b[^okd]a", "bsa")
assert not re.search(r"b[^okd]a", "bda")
assert re.search("b[%s%s%s]a" % (LOWER_PI, UPPER_PI, EM_SPACE),
"b%sa" % UPPER_PI) # bigcharset
assert re.search("b[%s%s%s]a" % (LOWER_PI, UPPER_PI, EM_SPACE),
"b%sa" % EM_SPACE)
assert not re.search("b[%s%s%s]a" % (LOWER_PI, UPPER_PI, EM_SPACE),
"b%sa" % LINE_SEP)
def test_search_simple_literal_ignore(self):
import re
UPPER_PI = "\u03a0"
LOWER_PI = "\u03c0"
assert re.search(r"ba", "ba", re.IGNORECASE)
assert re.search(r"ba", "BA", re.IGNORECASE)
assert re.search("b%s" % UPPER_PI, "B%s" % LOWER_PI,
re.IGNORECASE | re.UNICODE)
def test_search_simple_in_ignore(self):
import re
UPPER_PI = "\u03a0"
LOWER_PI = "\u03c0"
assert re.search(r"ba[A-C]", "bac", re.IGNORECASE)
assert re.search(r"ba[a-c]", "baB", re.IGNORECASE)
assert re.search("ba[%s]" % UPPER_PI, "ba%s" % LOWER_PI,
re.IGNORECASE | re.UNICODE)
assert re.search(r"ba[^A-C]", "bar", re.IGNORECASE)
assert not re.search(r"ba[^A-C]", "baA", re.IGNORECASE)
assert not re.search(r"ba[^A-C]", "baa", re.IGNORECASE)
def test_search_simple_branch(self):
import re
assert re.search(r"a(bb|d[ef])b", "adeb")
assert re.search(r"a(bb|d[ef])b", "abbb")
def test_search_simple_repeat_one(self):
import re
assert re.search(r"aa+", "aa") # empty tail
assert re.search(r"aa+ab", "aaaab") # backtracking
assert re.search(r"aa*ab", "aab") # empty match
assert re.search(r"a[bc]+", "abbccb")
assert "abbcb" == re.search(r"a.+b", "abbcb\nb").group()
assert "abbcb\nb" == re.search(r"a.+b", "abbcb\nb", re.DOTALL).group()
assert re.search(r"ab+c", "aBbBbBc", re.IGNORECASE)
assert not re.search(r"aa{2,3}", "aa") # string too short
assert not re.search(r"aa{2,3}b", "aab") # too few repetitions
assert not re.search(r"aa+b", "aaaac") # tail doesn't match
def test_search_simple_min_repeat_one(self):
import re
assert re.search(r"aa+?", "aa") # empty tail
assert re.search(r"aa+?ab", "aaaab") # forward tracking
assert re.search(r"a[bc]+?", "abbccb")
assert "abb" == re.search(r"a.+?b", "abbcb\nb").group()
assert "a\nbb" == re.search(r"a.+b", "a\nbbc", re.DOTALL).group()
assert re.search(r"ab+?c", "aBbBbBc", re.IGNORECASE)
assert not re.search(r"aa+?", "a") # string too short
assert not re.search(r"aa{2,3}?b", "aab") # too few repetitions
assert not re.search(r"aa+?b", "aaaac") # tail doesn't match
assert re.match(".*?cd", "abcabcde").end(0) == 7
def test_search_simple_repeat_maximizing(self):
import re
assert not re.search(r"(ab){3,5}", "abab")
assert not re.search(r"(ab){3,5}", "ababa")
assert re.search(r"(ab){3,5}", "ababab")
assert re.search(r"(ab){3,5}", "abababababab").end(0) == 10
assert "ad" == re.search(r"(a.)*", "abacad").group(1)
assert ("abcg", "cg") == (
re.search(r"(ab(c.)*)+", "ababcecfabcg").groups())
assert ("cg", "cg") == (
re.search(r"(ab|(c.))+", "abcg").groups())
assert ("ab", "cf") == (
re.search(r"((c.)|ab)+", "cfab").groups())
assert re.search(r".*", "")
def test_search_simple_repeat_minimizing(self):
import re
assert not re.search(r"(ab){3,5}?", "abab")
assert re.search(r"(ab){3,5}?", "ababab")
assert re.search(r"b(a){3,5}?b", "baaaaab")
assert not re.search(r"b(a){3,5}?b", "baaaaaab")
assert re.search(r"a(b(.)+?)*", "abdbebb")
def test_search_simple_groupref(self):
import re
UPPER_PI = "\u03a0"
LOWER_PI = "\u03c0"
assert re.match(r"((ab)+)c\1", "ababcabab")
assert not re.match(r"((ab)+)c\1", "ababcab")
assert not re.search(r"(a|(b))\2", "aa")
assert re.match(r"((ab)+)c\1", "aBAbcAbaB", re.IGNORECASE)
assert re.match(r"((a.)+)c\1", "a%sca%s" % (UPPER_PI, LOWER_PI),
re.IGNORECASE | re.UNICODE)
def test_search_simple_groupref_exists(self):
import re, sys
if not sys.version_info[:2] == (2, 3):
assert re.search(r"(<)?bla(?(1)>)", "<bla>")
assert re.search(r"(<)?bla(?(1)>)", "bla")
assert not re.match(r"(<)?bla(?(1)>)", "<bla")
assert re.search(r"(<)?bla(?(1)>|u)", "blau")
def test_search_simple_assert(self):
import re
assert re.search(r"b(?=\d\d).{3,}", "b23a")
assert not re.search(r"b(?=\d\d).{3,}", "b2aa")
assert re.search(r"b(?<=\d.)a", "2ba")
assert not re.search(r"b(?<=\d.)a", "ba")
def test_search_simple_assert_not(self):
import re
assert re.search(r"b(?<!\d.)a", "aba")
assert re.search(r"b(?<!\d.)a", "ba")
assert not re.search(r"b(?<!\d.)a", "11ba")
class AppTestMarksStack:
spaceconfig = {'usemodules': ['itertools']}
def test_mark_stack_branch(self):
import re
m = re.match("b(.)a|b.b", "bob")
assert None == m.group(1)
assert None == m.lastindex
def test_mark_stack_repeat_one(self):
import re
m = re.match("\d+1((2)|(3))4", "2212413")
assert ("2", "2", None) == m.group(1, 2, 3)
assert 1 == m.lastindex
def test_mark_stack_min_repeat_one(self):
import re
m = re.match("\d+?1((2)|(3))44", "221341244")
assert ("2", "2", None) == m.group(1, 2, 3)
assert 1 == m.lastindex
def test_mark_stack_max_until(self):
import re
m = re.match("(\d)+1((2)|(3))4", "2212413")
assert ("2", "2", None) == m.group(2, 3, 4)
assert 2 == m.lastindex
def test_mark_stack_min_until(self):
import re
m = re.match("(\d)+?1((2)|(3))44", "221341244")
assert ("2", "2", None) == m.group(2, 3, 4)
assert 2 == m.lastindex
def test_bug_725149(self):
# mark_stack_base restoring before restoring marks
# test copied from CPython test
import re
assert re.match('(a)(?:(?=(b)*)c)*', 'abb').groups() == ('a', None)
assert re.match('(a)((?!(b)*))*', 'abb').groups() == ('a', None, None)
class AppTestOpcodes:
spaceconfig = dict(usemodules=('_locale',))
def setup_class(cls):
if cls.runappdirect:
py.test.skip("can only be run on py.py: _sre opcodes don't match")
# This imports support_test_sre as the global "s"
init_app_test(cls, cls.space)
def test_length_optimization(self):
s = self.s
pattern = "bla"
opcodes = [s.OPCODES["info"], 3, 3, len(pattern)] \
+ s.encode_literal(pattern) + [s.OPCODES["success"]]
s.assert_no_match(opcodes, ["b", "bl", "ab"])
def test_literal(self):
s = self.s
opcodes = s.encode_literal("bla") + [s.OPCODES["success"]]
s.assert_no_match(opcodes, ["bl", "blu"])
s.assert_match(opcodes, ["bla", "blab", "cbla", "bbla"])
def test_not_literal(self):
s = self.s
opcodes = s.encode_literal("b") \
+ [s.OPCODES["not_literal"], ord("a"), s.OPCODES["success"]]
s.assert_match(opcodes, ["bx", "ababy"])
s.assert_no_match(opcodes, ["ba", "jabadu"])
def test_unknown(self):
s = self.s
raises(RuntimeError, s.search, [55555], "b")
def test_at_beginning(self):
s = self.s
for atname in ["at_beginning", "at_beginning_string"]:
opcodes = [s.OPCODES["at"], s.ATCODES[atname]] \
+ s.encode_literal("bla") + [s.OPCODES["success"]]
s.assert_match(opcodes, "bla")
s.assert_no_match(opcodes, "abla")
def test_at_beginning_line(self):
s = self.s
opcodes = [s.OPCODES["at"], s.ATCODES["at_beginning_line"]] \
+ s.encode_literal("bla") + [s.OPCODES["success"]]
s.assert_match(opcodes, ["bla", "x\nbla"])
s.assert_no_match(opcodes, ["abla", "abla\nubla"])
def test_at_end(self):
s = self.s
opcodes = s.encode_literal("bla") \
+ [s.OPCODES["at"], s.ATCODES["at_end"], s.OPCODES["success"]]
s.assert_match(opcodes, ["bla", "bla\n"])
s.assert_no_match(opcodes, ["blau", "abla\nblau"])
def test_at_end_line(self):
s = self.s
opcodes = s.encode_literal("bla") \
+ [s.OPCODES["at"], s.ATCODES["at_end_line"], s.OPCODES["success"]]
s.assert_match(opcodes, ["bla\n", "bla\nx", "bla"])
s.assert_no_match(opcodes, ["blau"])
def test_at_end_string(self):
s = self.s
opcodes = s.encode_literal("bla") \
+ [s.OPCODES["at"], s.ATCODES["at_end_string"], s.OPCODES["success"]]
s.assert_match(opcodes, "bla")
s.assert_no_match(opcodes, ["blau", "bla\n"])
def test_at_boundary(self):
s = self.s
for atname in "at_boundary", "at_loc_boundary", "at_uni_boundary":
opcodes = s.encode_literal("bla") \
+ [s.OPCODES["at"], s.ATCODES[atname], s.OPCODES["success"]]
s.assert_match(opcodes, ["bla", "bla ha", "bla,x"])
s.assert_no_match(opcodes, ["blaja", ""])
opcodes = [s.OPCODES["at"], s.ATCODES[atname]] \
+ s.encode_literal("bla") + [s.OPCODES["success"]]
assert s.search(opcodes, "bla")
s.assert_no_match(opcodes, "")
def test_at_non_boundary(self):
s = self.s
for atname in "at_non_boundary", "at_loc_non_boundary", "at_uni_non_boundary":
opcodes = s.encode_literal("bla") \
+ [s.OPCODES["at"], s.ATCODES[atname], s.OPCODES["success"]]
assert s.search(opcodes, "blan")
s.assert_no_match(opcodes, ["bla ja", "bla"])
def test_at_loc_boundary(self):
s = self.s
import locale
try:
s.void_locale()
opcodes1 = s.encode_literal("bla") \
+ [s.OPCODES["at"], s.ATCODES["at_loc_boundary"], s.OPCODES["success"]]
opcodes2 = s.encode_literal("bla") \
+ [s.OPCODES["at"], s.ATCODES["at_loc_non_boundary"], s.OPCODES["success"]]
assert s.search(opcodes1, "bla\xFC")
s.assert_no_match(opcodes2, "bla\xFC")
oldlocale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, "de_DE")
s.assert_no_match(opcodes1, "bla\xFC")
assert s.search(opcodes2, "bla\xFC")
locale.setlocale(locale.LC_ALL, oldlocale)
except locale.Error:
# skip test
skip("locale error")
def test_at_uni_boundary(self):
s = self.s
UPPER_PI = "\u03a0"
LOWER_PI = "\u03c0"
opcodes = s.encode_literal("bl") + [s.OPCODES["any"], s.OPCODES["at"],
s.ATCODES["at_uni_boundary"], s.OPCODES["success"]]
s.assert_match(opcodes, ["bla ha", "bl%s ja" % UPPER_PI])
s.assert_no_match(opcodes, ["bla%s" % LOWER_PI])
opcodes = s.encode_literal("bl") + [s.OPCODES["any"], s.OPCODES["at"],
s.ATCODES["at_uni_non_boundary"], s.OPCODES["success"]]
s.assert_match(opcodes, ["blaha", "bl%sja" % UPPER_PI])
def test_category_loc_word(self):
s = self.s
import locale
try:
s.void_locale()
opcodes1 = s.encode_literal("b") \
+ [s.OPCODES["category"], s.CHCODES["category_loc_word"], s.OPCODES["success"]]
opcodes2 = s.encode_literal("b") \
+ [s.OPCODES["category"], s.CHCODES["category_loc_not_word"], s.OPCODES["success"]]
assert not s.search(opcodes1, u"b\xFC")
assert s.search(opcodes2, u"b\xFC")
locale.setlocale(locale.LC_ALL, "de_DE")
assert s.search(opcodes1, u"b\xFC")
assert not s.search(opcodes2, u"b\xFC")
s.void_locale()
except locale.Error:
# skip test
skip("locale error")
def test_any(self):
s = self.s
opcodes = s.encode_literal("b") + [s.OPCODES["any"]] \
+ s.encode_literal("a") + [s.OPCODES["success"]]
s.assert_match(opcodes, ["b a", "bla", "bboas"])
s.assert_no_match(opcodes, ["b\na", "oba", "b"])
def test_any_all(self):
s = self.s
opcodes = s.encode_literal("b") + [s.OPCODES["any_all"]] \
+ s.encode_literal("a") + [s.OPCODES["success"]]
s.assert_match(opcodes, ["b a", "bla", "bboas", "b\na"])
s.assert_no_match(opcodes, ["oba", "b"])
def test_in_failure(self):
s = self.s
opcodes = s.encode_literal("b") + [s.OPCODES["in"], 2, s.OPCODES["failure"]] \
+ s.encode_literal("a") + [s.OPCODES["success"]]
s.assert_no_match(opcodes, ["ba", "bla"])
def test_in_literal(self):
s = self.s
opcodes = s.encode_literal("b") + [s.OPCODES["in"], 7] \
+ s.encode_literal("la") + [s.OPCODES["failure"], s.OPCODES["failure"]] \
+ s.encode_literal("a") + [s.OPCODES["success"]]
s.assert_match(opcodes, ["bla", "baa", "blbla"])
s.assert_no_match(opcodes, ["ba", "bja", "blla"])
def test_in_category(self):
s = self.s
opcodes = s.encode_literal("b") + [s.OPCODES["in"], 6, s.OPCODES["category"],
s.CHCODES["category_digit"], s.OPCODES["category"], s.CHCODES["category_space"],
s.OPCODES["failure"]] + s.encode_literal("a") + [s.OPCODES["success"]]
s.assert_match(opcodes, ["b1a", "b a", "b4b\tas"])
s.assert_no_match(opcodes, ["baa", "b5"])
def test_in_charset_ucs2(self):
import _sre
if _sre.CODESIZE != 2:
return
s = self.s
# charset bitmap for characters "l" and "h"
bitmap = 6 * [0] + [4352] + 9 * [0]
opcodes = s.encode_literal("b") + [s.OPCODES["in"], 19, s.OPCODES["charset"]] \
+ bitmap + [s.OPCODES["failure"]] + s.encode_literal("a") + [s.OPCODES["success"]]
s.assert_match(opcodes, ["bla", "bha", "blbha"])
s.assert_no_match(opcodes, ["baa", "bl"])
def _test_in_bigcharset_ucs2(self):
# disabled because this actually only works on big-endian machines
if _sre.CODESIZE != 2:
return
s = self.s
# constructing bigcharset for lowercase pi (\u03c0)
UPPER_PI = u"\u03a0"
LOWER_PI = u"\u03c0"
bitmap = 6 * [0] + [4352] + 9 * [0]
opcodes = s.encode_literal("b") + [s.OPCODES["in"], 164, s.OPCODES["bigcharset"], 2] \
+ [0, 1] + 126 * [0] \
+ 16 * [0] \
+ 12 * [0] + [1] + 3 * [0] \
+ [s.OPCODES["failure"]] + s.encode_literal("a") + [s.OPCODES["success"]]
s.assert_match(opcodes, [u"b%sa" % LOWER_PI])
s.assert_no_match(opcodes, [u"b%sa" % UPPER_PI])
# XXX bigcharset test for ucs4 missing here
def test_in_range(self):
s = self.s
opcodes = s.encode_literal("b") + [s.OPCODES["in"], 5, s.OPCODES["range"],
ord("1"), ord("9"), s.OPCODES["failure"]] \
+ s.encode_literal("a") + [s.OPCODES["success"]]
s.assert_match(opcodes, ["b1a", "b56b7aa"])
s.assert_no_match(opcodes, ["baa", "b5"])
def test_in_negate(self):
s = self.s
opcodes = s.encode_literal("b") + [s.OPCODES["in"], 7, s.OPCODES["negate"]] \
+ s.encode_literal("la") + [s.OPCODES["failure"]] \
+ s.encode_literal("a") + [s.OPCODES["success"]]
s.assert_match(opcodes, ["b1a", "bja", "bubua"])
s.assert_no_match(opcodes, ["bla", "baa", "blbla"])
def test_literal_ignore(self):
s = self.s
opcodes = s.encode_literal("b") \
+ [s.OPCODES["literal_ignore"], ord("a"), s.OPCODES["success"]]
s.assert_match(opcodes, ["ba", "bA"])
s.assert_no_match(opcodes, ["bb", "bu"])
def test_not_literal_ignore(self):
s = self.s
UPPER_PI = "\u03a0"
opcodes = s.encode_literal("b") \
+ [s.OPCODES["not_literal_ignore"], ord("a"), s.OPCODES["success"]]
s.assert_match(opcodes, ["bb", "bu", "b%s" % UPPER_PI])
s.assert_no_match(opcodes, ["ba", "bA"])
def test_in_ignore(self):
s = self.s
opcodes = s.encode_literal("b") + [s.OPCODES["in_ignore"], 8] \
+ s.encode_literal("abc") + [s.OPCODES["failure"]] \
+ s.encode_literal("a") + [s.OPCODES["success"]]
s.assert_match(opcodes, ["baa", "bAa", "bbbBa"])
s.assert_no_match(opcodes, ["ba", "bja", "blla"])
def test_in_jump_info(self):
s = self.s
for opname in "jump", "info":
opcodes = s.encode_literal("b") \
+ [s.OPCODES[opname], 3, s.OPCODES["failure"], s.OPCODES["failure"]] \
+ s.encode_literal("a") + [s.OPCODES["success"]]
s.assert_match(opcodes, "ba")
def _test_mark(self):
s = self.s
# XXX need to rewrite this implementation-independent
opcodes = s.encode_literal("a") + [s.OPCODES["mark"], 0] \
+ s.encode_literal("b") + [s.OPCODES["mark"], 1, s.OPCODES["success"]]
state = self.create_state("abc")
_sre._sre_search(state, opcodes)
assert 1 == state.lastindex
assert 1 == state.lastmark
# NB: the following are indexes from the start of the match
assert [1, 2] == state.marks
def test_branch(self):
s = self.s
opcodes = [s.OPCODES["branch"], 7] + s.encode_literal("ab") \
+ [s.OPCODES["jump"], 9, 7] + s.encode_literal("cd") \
+ [s.OPCODES["jump"], 2, s.OPCODES["failure"], s.OPCODES["success"]]
s.assert_match(opcodes, ["ab", "cd"])
s.assert_no_match(opcodes, ["aacas", "ac", "bla"])
def test_repeat_one(self):
s = self.s
opcodes = [s.OPCODES["repeat_one"], 6, 1, self.s.MAXREPEAT] + s.encode_literal("a") \
+ [s.OPCODES["success"]] + s.encode_literal("ab") + [s.OPCODES["success"]]
s.assert_match(opcodes, ["aab", "aaaab"])
s.assert_no_match(opcodes, ["ab", "a"])
def test_min_repeat_one(self):
s = self.s
opcodes = [s.OPCODES["min_repeat_one"], 5, 1, self.s.MAXREPEAT, s.OPCODES["any"]] \
+ [s.OPCODES["success"]] + s.encode_literal("b") + [s.OPCODES["success"]]
s.assert_match(opcodes, ["aab", "ardb", "bb"])
s.assert_no_match(opcodes, ["b"])
def test_repeat_maximizing(self):
s = self.s
opcodes = [s.OPCODES["repeat"], 5, 1, self.s.MAXREPEAT] + s.encode_literal("a") \
+ [s.OPCODES["max_until"]] + s.encode_literal("b") + [s.OPCODES["success"]]
s.assert_match(opcodes, ["ab", "aaaab", "baabb"])
s.assert_no_match(opcodes, ["aaa", "", "ac"])
def test_max_until_zero_width_match(self):
# re.compile won't compile prospective zero-with matches (all of them?),
# so we can only produce an example by directly constructing bytecodes.
# CPython 2.3 fails with a recursion limit exceeded error here.
import sys
if not sys.version_info[:2] == (2, 3):
s = self.s
opcodes = [s.OPCODES["repeat"], 10, 1, self.s.MAXREPEAT, s.OPCODES["repeat_one"],
6, 0, self.s.MAXREPEAT] + s.encode_literal("a") + [s.OPCODES["success"],
s.OPCODES["max_until"], s.OPCODES["success"]]
s.assert_match(opcodes, ["ab", "bb"])
assert "" == s.search(opcodes, "bb").group(0)
def test_repeat_minimizing(self):
s = self.s
opcodes = [s.OPCODES["repeat"], 4, 1, self.s.MAXREPEAT, s.OPCODES["any"],
s.OPCODES["min_until"]] + s.encode_literal("b") + [s.OPCODES["success"]]
s.assert_match(opcodes, ["ab", "aaaab", "baabb"])
s.assert_no_match(opcodes, ["b"])
assert "aab" == s.search(opcodes, "aabb").group(0)
def test_groupref(self):
s = self.s
opcodes = [s.OPCODES["mark"], 0, s.OPCODES["any"], s.OPCODES["mark"], 1] \
+ s.encode_literal("a") + [s.OPCODES["groupref"], 0, s.OPCODES["success"]]
s.assert_match(opcodes, ["bab", "aaa", "dad"])
s.assert_no_match(opcodes, ["ba", "bad", "baad"])
def test_groupref_ignore(self):
s = self.s
opcodes = [s.OPCODES["mark"], 0, s.OPCODES["any"], s.OPCODES["mark"], 1] \
+ s.encode_literal("a") + [s.OPCODES["groupref_ignore"], 0, s.OPCODES["success"]]
s.assert_match(opcodes, ["bab", "baB", "Dad"])
s.assert_no_match(opcodes, ["ba", "bad", "baad"])
def test_assert(self):
s = self.s
opcodes = s.encode_literal("a") + [s.OPCODES["assert"], 4, 0] \
+ s.encode_literal("b") + [s.OPCODES["success"], s.OPCODES["success"]]
assert "a" == s.search(opcodes, "ab").group(0)
s.assert_no_match(opcodes, ["a", "aa"])
def test_assert_not(self):
s = self.s
opcodes = s.encode_literal("a") + [s.OPCODES["assert_not"], 4, 0] \
+ s.encode_literal("b") + [s.OPCODES["success"], s.OPCODES["success"]]
assert "a" == s.search(opcodes, "ac").group(0)
s.assert_match(opcodes, ["a"])
s.assert_no_match(opcodes, ["ab"])
class AppTestOptimizations:
"""These tests try to trigger optmized edge cases."""
spaceconfig = {'usemodules': ['itertools']}
def test_match_length_optimization(self):
import re
assert None == re.match("bla", "blub")
def test_fast_search(self):
import re
assert None == re.search("bl", "abaub")
assert None == re.search("bl", "b")
assert ["bl", "bl"] == re.findall("bl", "blbl")
assert ["a", "u"] == re.findall("bl(.)", "blablu")
def test_branch_literal_shortcut(self):
import re
assert None == re.search("bl|a|c", "hello")
def test_literal_search(self):
import re
assert re.search("b(\d)", "ababbbab1")
assert None == re.search("b(\d)", "ababbbab")
def test_repeat_one_literal_tail(self):
import re
assert re.search(".+ab", "wowowowawoabwowo")
assert None == re.search(".+ab", "wowowaowowo")
def test_split_nonempty(self):
import re
raises(ValueError, re.split, '', '')
re.split("a*", '') # -> warning
def test_type_names(self):
import re
assert repr(re.Pattern) == "<class 're.Pattern'>"
assert repr(re.Match) == "<class 're.Match'>"
class AppTestUnicodeExtra:
def test_string_attribute(self):
import re
match = re.search(u"\u1234", u"\u1233\u1234\u1235")
assert match.string == u"\u1233\u1234\u1235"
# check ascii version too
match = re.search(u"a", u"bac")
assert match.string == u"bac"
def test_match_start(self):
import re
match = re.search(u"\u1234", u"\u1233\u1234\u1235")
assert match.start() == 1
def test_match_repr_span(self):
import re
match = re.match(u"\u1234", u"\u1234")
assert match.span() == (0, 1)
assert "span=(0, 1), match='\u1234'" in repr(match)
def test_match_repr_truncation(self):
import re
s = "xy" + u"\u1234" * 50
match = re.match(s, s)
# this used to produce invalid utf-8 by truncating repr(s)
# after 50 bytes
assert "span=(0, 52), match=" + repr(s)[:50] + ">" in repr(match)
def test_pattern_repr_truncation(self):
import re
s = "xy" + u"\u1234" * 200
pattern = re.compile(s)
# this used to produce invalid utf-8 by truncating repr(s)
# after 200 bytes
assert repr(pattern) == "re.compile(%s)" % (repr(s)[:200],)
|
the-stack_0_20799 |
import ParaScript as ps
import MyFuncs as mf
import matplotlib.pyplot as plt
if __name__ == '__main__':
"""
main
"""
para = ps.ParaClass()
para.price = [20, 20]
para.travel_time = [20, 10, 20]
para.m = 1
para.g = 0.25
para.fxcost= [50, 50]
para.val_of_time = 1.0
para.opCostPerPas = 1.0
with open('TestResults.csv', 'w+') as f:
print("TestId,Price1,Price2,Time1,Time2,Time3,DiscountRatio,m,g,x1,x2,profit1,profit2,opCost1,opCost2", file=f)
mf.test_one_ParaSet(case_id=1, _para=para)
# mf.test_one_share(case_id=2, _para=para)
# mf.test_one_Bertand(case_id=3, _para=para)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.