max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
7
115
max_stars_count
int64
101
368k
id
stringlengths
2
8
content
stringlengths
6
1.03M
parsl/tests/test_checkpointing/test_python_checkpoint_1.py
vkhodygo/parsl
323
12615504
<filename>parsl/tests/test_checkpointing/test_python_checkpoint_1.py import argparse import os import pytest import parsl from parsl import python_app from parsl.tests.configs.local_threads import config @python_app(cache=True) def random_app(i): import random return random.randint(i, 100000) def launch_n_random(n=2): """1. Launch a few apps and write the checkpoint once a few have completed """ d = [random_app(i) for i in range(0, n)] print("Done launching") # Block till done return [i.result() for i in d] @pytest.mark.local def test_initial_checkpoint_write(n=2): """1. Launch a few apps and write the checkpoint once a few have completed """ parsl.load(config) results = launch_n_random(n) cpt_dir = parsl.dfk().checkpoint() cptpath = cpt_dir + '/dfk.pkl' print("Path exists : ", os.path.exists(cptpath)) assert os.path.exists( cptpath), "DFK checkpoint missing: {0}".format(cptpath) cptpath = cpt_dir + '/tasks.pkl' print("Path exists : ", os.path.exists(cptpath)) assert os.path.exists( cptpath), "Tasks checkpoint missing: {0}".format(cptpath) run_dir = parsl.dfk().run_dir parsl.clear() return run_dir, results if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-c", "--count", default="10", help="Count of apps to launch") parser.add_argument("-d", "--debug", action='store_true', help="Count of apps to launch") args = parser.parse_args() if args.debug: parsl.set_stream_logger() x = test_initial_checkpoint_write(n=4)
flexget/plugins/operate/max_reruns.py
Jeremiad/Flexget
1,322
12615507
from loguru import logger from flexget import plugin from flexget.event import event from flexget.task import Task logger = logger.bind(name='max_reruns') class MaxReRuns: """Overrides the maximum amount of re-runs allowed by a task.""" schema = {'type': 'integer'} def __init__(self): self.default = Task.RERUN_DEFAULT def reset(self, task): task.unlock_reruns() task.max_reruns = self.default logger.debug('changing max task rerun variable back to: {}', self.default) def on_task_start(self, task, config): self.default = task.max_reruns logger.debug('saving old max task rerun value: {}', self.default) task.max_reruns = int(config) task.lock_reruns() logger.debug('changing max task rerun variable to: {}', config) def on_task_exit(self, task, config): if task.rerun_count > task.max_reruns: self.reset(task) def on_task_abort(self, task, config): self.reset(task) @event('plugin.register') def register_plugin(): plugin.register(MaxReRuns, 'max_reruns', api_ver=2)
2021_03_17/dojo_test.py
devppjr/dojo
114
12615531
<filename>2021_03_17/dojo_test.py import unittest from dojo import separate_names, get_bigger_name, ordenados entrada = [['Joao', 'NO'], ['Carlos', 'YES'], ['Abner', 'NO'], ['Samuel', 'YES'], ['Ricardo', 'NO'], ['Abhay', 'YES'], ['Samuel', 'YES'], ['Andres', 'YES']] class DojoTest(unittest.TestCase): def test_separate_names(self): self.assertEqual(separate_names(entrada), (["Carlos", "Samuel", "Abhay", "Samuel", "Andres",],["Joao", "Abner", "Ricardo"])) def test_get_bigger_name(self): self.assertEqual(get_bigger_name(["Carlos", "Samuel", "Abhay", "Samuel", "Andres"]), "Carlos") def test_ordenados(self): self.assertEqual(ordenados(["Carlos", "Samuel", "Abhay", "Samuel", "Andres"]), ["Abhay", "Andres", "Carlos", "Samuel"]) if __name__ == '__main__': unittest.main() # Juan - Ingrid - Lara - Tiago # [['Joao', 'NO'], ['Carlos', 'YES'], ['Abner', 'NO'], ['Samuel', 'YES'], ['Ricardo', 'NO'], ['Abhay', 'YES'], ['Samuel', 'YES'], ['Andres', 'YES'], ['Roberto', 'NO'], ['Carlos', 'YES'], ['Samuel', 'YES'], ['Samuel', 'YES'], ['Abhay', 'YES'], ['Aline', 'YES'], ['Andres', 'YES']] # [[]] #['Joao','Abner', ] # 1 - Processar input -> Colocar numa lista de listas # 2 - Separar em pessoas que colocaram Yes e não # Enquanto estamos colocando as pessoas do Yes na lista: # Teremos uma variavel que vai ter o nome com maior quantidade de letras # quando for inserir um novo nome na lista do yes, verificar se a quantidade é maior # se for, troca a variavel, se não, não troca # 3 - No final ordena alfabeticamente as listas e faz um concat das que tem sim com não.''''Carlos','Abner''Samuel','Ricardo','Abhay'
3rd_party_software/pyca/Testing/CpuGpuUnitTest.py
ninamiolane/quicksilver
126
12615543
<reponame>ninamiolane/quicksilver<filename>3rd_party_software/pyca/Testing/CpuGpuUnitTest.py # # This file contains testing where PyCA results are compared to # results from numpy. All tests can be run from the command line by: # # > python -m unittest discover -v -p '*UnitTest.py' # # To run an individual test with graphical output from ipython: # # import CpuGpuUnitTest as cgtest # cgtc = cgtest.CpuGpuTestCase() # cgtc.test_Exp(disp=True) # import sys import unittest import PyCATest from PyCA.Core import * import PyCA.Common as common reload(common) import numpy as np try: import matplotlib.pyplot as plt plt.ion() except ImportError: print "Warning: matplotlib.pyplot not found, some functionality disabled" def CheckIm(hIm, dIm, name, disp): hdIm = dIm.copy() hdIm.toType(MEM_HOST) dImArr = np.squeeze(hdIm.asnp()) hImArr = np.squeeze(hIm.asnp()) diff = hImArr-dImArr if disp: title = name plt.figure(title) plt.subplot(1,3,1) plt.imshow(hImArr) plt.colorbar(); plt.title('host') plt.draw() plt.subplot(1,3,2) plt.imshow(dImArr) plt.colorbar(); plt.title('device') plt.draw() plt.subplot(1,3,3) plt.imshow(diff) plt.colorbar(); plt.title('diff') plt.draw() plt.show() diffMax = np.max(np.abs(diff)) diffAv = np.sum(np.abs(diff))/np.prod(diff.shape) return (diffAv, diffMax) def CheckField(hF,dF,name,disp): hdF = dF.copy() hdF.toType(MEM_HOST) dFArr_x, dFArr_y, dFArr_z = hdF.asnp() dFArr_x = np.squeeze(dFArr_x) dFArr_y = np.squeeze(dFArr_y) hFArr_x, hFArr_y, hFArr_z = hF.asnp() hFArr_x = np.squeeze(hFArr_x) hFArr_y = np.squeeze(hFArr_y) diff_x = hFArr_x-dFArr_x diff_y = hFArr_y-dFArr_y if disp: title = name plt.figure(title) plt.subplot(2,3,1) plt.imshow(np.squeeze(hFArr_x)) plt.colorbar(); plt.title('host x') plt.draw() plt.subplot(2,3,2) plt.imshow(np.squeeze(dFArr_x)) plt.colorbar(); plt.title('device x') plt.draw() plt.subplot(2,3,3) plt.imshow(np.squeeze(diff_x)) plt.colorbar(); plt.title('diff x') plt.draw() plt.subplot(2,3,4) plt.imshow(np.squeeze(hFArr_y)) plt.colorbar(); plt.title('host y') plt.draw() plt.subplot(2,3,5) plt.imshow(np.squeeze(dFArr_y)) plt.colorbar(); plt.title('device y') plt.draw() plt.subplot(2,3,6) plt.imshow(np.squeeze(diff_y)) plt.colorbar(); plt.title('diff y') plt.draw() plt.show() diffMax = max(np.max(np.abs(diff_x)), np.max(np.abs(diff_y))) diffAv = (np.sum(np.abs(diff_x)) + np.sum(np.abs(diff_y))) \ / (2*np.prod(diff_x.shape)) return (diffAv, diffMax) # # Test Class # class CpuGpuTestCase(unittest.TestCase): def __init__(self, methodName='runTest'): super(CpuGpuTestCase, self).__init__(methodName) self.cudaEnabled = (GetNumberOfCUDADevices() > 0) if self.cudaEnabled: # allowable average abs. diff self.AvEps = 1e-6 # allowable max abs. diff self.MaxEps = 1e-4 # image size self.sz = np.array([127, 119]) # spacing self.sp = np.array([1.5, 2.1]) # fluid parameters self.fluidParams = [1.0, 1.0, 0.0] self.vsz = np.append(self.sz, 2) self.imSz = Vec3Di(int(self.sz[0]), int(self.sz[1]), 1) self.imSp = Vec3Df(float(self.sp[0]), float(self.sp[1]), 1.0) # set up grid self.grid = GridInfo(self.imSz, self.imSp) # set up host / device images self.I0Arr = common.DrawEllipse(self.sz, self.sz/2, self.sz[0]/4, self.sz[1]/3) self.I1Arr = common.DrawEllipse(self.sz, self.sz/2, self.sz[0]/3, self.sz[1]/4) self.I0Arr = common.GaussianBlur(self.I0Arr,1.5) self.I1Arr = common.GaussianBlur(self.I1Arr,1.5) self.hI0Orig = common.ImFromNPArr(self.I0Arr, mType=MEM_HOST, sp=self.imSp) self.hI1Orig = common.ImFromNPArr(self.I1Arr, mType=MEM_HOST, sp=self.imSp) self.dI0Orig = common.ImFromNPArr(self.I0Arr, mType=MEM_DEVICE, sp=self.imSp) self.dI1Orig = common.ImFromNPArr(self.I1Arr, mType=MEM_DEVICE, sp=self.imSp) # automatically called before test functions def setUp(self): if not self.cudaEnabled: self.skipTest('Cannot run test, no CUDA device found or CUDA support not compiled') def TestIm(self, pycaI, npI, name, disp, avEps=None, maxEps=None): if avEps is None: avEps = self.AvEps if maxEps is None: maxEps = self.MaxEps diffAv, diffMax = CheckIm(pycaI, npI, name=name, disp=disp) self.assertLess(diffAv, avEps) self.assertLess(diffAv, self.MaxEps) def TestField(self, pycaF, npF, name, disp, avEps=None, maxEps=None): if avEps is None: avEps = self.AvEps if maxEps is None: maxEps = self.MaxEps diffAv, diffMax = CheckField(pycaF, npF, name=name, disp=disp) self.assertLess(diffAv, avEps) self.assertLess(diffAv, self.MaxEps) def setUpI0(self): self.hI0 = self.hI0Orig.copy() self.dI0 = self.dI0Orig.copy() def tearDownI0(self): self.hI0 = None self.dI0 = None def setUpI1(self): self.hI1 = self.hI0Orig.copy() self.dI1 = self.dI0Orig.copy() def tearDownI1(self): self.hI1 = None self.dI1 = None def setUpDiffOp(self): self.hDiffOp = FluidKernelFFTCPU() self.hDiffOp.setAlpha(self.fluidParams[0]) self.hDiffOp.setBeta(self.fluidParams[1]) self.hDiffOp.setGamma(self.fluidParams[2]) self.hDiffOp.setGrid(self.grid) self.dDiffOp = FluidKernelFFTGPU() self.dDiffOp.setAlpha(self.fluidParams[0]) self.dDiffOp.setBeta(self.fluidParams[1]) self.dDiffOp.setGamma(self.fluidParams[2]) self.dDiffOp.setGrid(self.grid) def tearDownDiffOp(self): self.hDiffOp = None self.dDiffOp = None def setUpGrad(self): self.hGrad = Field3D(self.grid, MEM_HOST) self.dGrad = Field3D(self.grid, MEM_DEVICE) Gradient(self.hGrad, self.hI0Orig) Gradient(self.dGrad, self.dI0Orig) def tearDownGrad(self): self.hGrad = None self.dGrad = None def randImSetUp(self): self.hRandIm = \ common.RandImage(self.sz, nSig=1.0, gSig=0.0, mType = MEM_HOST, sp = self.imSp) self.dRandIm = self.hRandIm.copy() self.dRandIm.toType(MEM_DEVICE) def randImTearDown(self): self.hRandIm = None self.dRandIm = None def randVPair(self): hV = common.RandField(self.sz, nSig=5.0, gSig=4.0, mType = MEM_HOST, sp = self.imSp) dV = hV.copy() dV.toType(MEM_DEVICE) return hV, dV def randVSetUp(self): hV, dV = self.randVPair() self.hRandV = hV self.dRandV = dV def randVTearDown(self): self.hRandV = None self.dRandV = None def randHSetUp(self): self.hRandH = \ common.RandField(self.sz, nSig=5.0, gSig=4.0, mType = MEM_HOST, sp = self.imSp) VtoH_I(self.hRandH) self.dRandH = self.hRandH.copy() self.dRandH.toType(MEM_DEVICE) def randHTearDown(self): self.hRandH = None self.dRandH = None def resultImSetUp(self): self.hIm = Image3D(self.grid, MEM_HOST) self.dIm = Image3D(self.grid, MEM_DEVICE) def resultImTearDown(self): self.hIm = None self.dIm = None def resultFieldSetUp(self): self.hField = Field3D(self.grid, MEM_HOST) self.dField = Field3D(self.grid, MEM_DEVICE) def resultFieldTearDown(self): self.hField = None self.dField = None def randMaskSetUp(self): randArr = np.random.rand(self.sz[0], self.sz[1]) maskArr = np.zeros(randArr.shape) maskArr[randArr > 0.5] = 1.0 self.hRandMask = common.ImFromNPArr(maskArr, mType=MEM_HOST, sp=self.imSp) self.dRandMask = self.hRandMask.copy() self.dRandMask.toType(MEM_DEVICE) def randMaskTearDown(self): self.hRandMask = None self.dRandMask = None ################################################################ # # Begin Tests # ################################################################ # # Check image generation # @PyCATest.AddSetUp(setUpI0, tearDownI0) @PyCATest.AddSetUp(setUpI1, tearDownI1) def test_ImageGen(self, disp=False): self.TestIm(self.hI0,self.dI0, name='I0',disp=disp) self.TestIm(self.hI1,self.dI1, name='I1',disp=disp) # # Gradient # @PyCATest.AddSetUp(setUpGrad, tearDownGrad) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultFieldSetUp, resultFieldTearDown) def test_Gradient(self, disp=False): self.TestField(self.hGrad,self.dGrad, name='Gradient',disp=disp) Gradient(self.hField, self.hRandIm) Gradient(self.dField, self.dRandIm) self.TestField(self.hField, self.dField, name='Gradient', disp=disp) # # Gradient2 (note: only CPU version implemented, should probably # just delete this function) # @PyCATest.AddSetUp(setUpI0, tearDownI0) @PyCATest.AddSetUp(setUpGrad, tearDownGrad) def test_Gradient2(self, disp=False): hGrad2 = Field3D(self.grid, MEM_HOST) Gradient2(hGrad2, self.hI0) self.TestField(hGrad2,self.dGrad, name='Grad2 v Grad',disp=disp) # # FluidKernelFFT # @PyCATest.AddSetUp(setUpDiffOp, tearDownDiffOp) @PyCATest.AddSetUp(setUpGrad, tearDownGrad) def test_DiffOp(self, disp=False): hKGrad = Field3D(self.grid, MEM_HOST) dKGrad = Field3D(self.grid, MEM_DEVICE) self.hDiffOp.applyInverseOperator(hKGrad, self.hGrad) self.dDiffOp.applyInverseOperator(dKGrad, self.dGrad) self.TestField(hKGrad, dKGrad, name='Kernel Inverse Op', disp=disp, avEps=2e-6) hLKGrad = Field3D(self.grid, MEM_HOST) dLKGrad = Field3D(self.grid, MEM_DEVICE) self.hDiffOp.applyOperator(hLKGrad, hKGrad) self.dDiffOp.applyOperator(dLKGrad, dKGrad) self.TestField(hLKGrad, dLKGrad, name='Kernel Forward Op', disp=disp, avEps=3e-6) self.TestField(self.hGrad, dLKGrad, name='Diff Op LK = Id', disp=disp, avEps=2e-6) self.sz = np.array([127, 121]) self.TestField(self.hGrad, dLKGrad, name='Diff Op LK = Id', disp=disp, avEps=2e-6) # # FiniteDiff # @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_FiniteDiff(self, disp=False): for dim in [DIM_X, DIM_Y, DIM_Z]: for diffType in [DIFF_FORWARD, DIFF_BACKWARD, DIFF_CENTRAL]: for bc in [BC_CLAMP, BC_WRAP, BC_APPROX]: FiniteDiff(self.hIm, self.hRandIm, dim, diffType, bc) FiniteDiff(self.dIm, self.dRandIm, dim, diffType, bc) pltname = '%s %s %s'%\ (PyCATest.DIMNAMES[dim], PyCATest.DIFFTNAMES[diffType], PyCATest.BCNAMES[bc]) self.TestIm(self.hIm,self.dIm, name=pltname,disp=disp) # # Jacobian Determinant # @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) @PyCATest.AddSetUp(randHSetUp, randHTearDown) def test_JacDet(self, disp=False): hVx = Field3D(self.grid, MEM_HOST) hVy = Field3D(self.grid, MEM_HOST) hVz = Field3D(self.grid, MEM_HOST) dVx = Field3D(self.grid, MEM_DEVICE) dVy = Field3D(self.grid, MEM_DEVICE) dVz = Field3D(self.grid, MEM_DEVICE) Jacobian(hVx, hVy, hVz, self.hRandH) JacDetH(self.hIm, hVx, hVy, hVz) Jacobian(dVx, dVy, dVz, self.dRandH) JacDetH(self.dIm, dVx, dVy, dVz) self.TestField(hVx, dVx, name='Jacobian Vx', disp=disp) self.TestField(hVy, dVy, name='Jacobian Vy', disp=disp) self.TestField(hVz, dVz, name='Jacobian Vz', disp=disp) self.TestIm(self.hIm,self.dIm, name='jac. det. (full)', disp=disp) hJD2 = Image3D(self.grid, MEM_HOST) dJD2 = Image3D(self.grid, MEM_DEVICE) JacDetH(hJD2, self.hRandH) JacDetH(dJD2, self.dRandH) self.TestIm(hJD2,dJD2, name='jac. det. (pointwise)', disp=disp) self.TestIm(self.hIm, dJD2, name='jac. det. (full vs. pointwise)', disp=disp) # # UpwindGradMag # @PyCATest.AddSetUp(setUpI0, tearDownI0) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_UpwindGradMag(self, disp=False): UpwindGradMag(self.hIm, self.hI0, self.hI0) UpwindGradMag(self.dIm, self.dI0, self.dI0) self.TestIm(self.hIm,self.dIm, name='UpwindGradMag', disp=disp) # # Splat (splatting) # @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(randHSetUp, randHTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_Splat(self, disp=False): Splat(self.hIm, self.hRandH, self.hRandIm) Splat(self.dIm, self.dRandH, self.dRandIm) self.TestIm(self.hIm,self.dIm, name='Splat', disp=disp, avEps=2e-6) # # SplatLargeRange (splatting images with large intensity values > 2048) # @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(randHSetUp, randHTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_SplatLargeRange(self, disp=False): MulC_I(self.hRandIm, 1000.00) MulC_I(self.dRandIm, 1000.00) Splat(self.hIm, self.hRandH, self.hRandIm) Splat(self.dIm, self.dRandH, self.dRandIm) self.TestIm(self.hIm,self.dIm, name='SplatLargeRange', disp=disp) # # Ad (Adjoint representation operator) # @PyCATest.AddSetUp(randVSetUp, randVTearDown) @PyCATest.AddSetUp(randHSetUp, randHTearDown) @PyCATest.AddSetUp(resultFieldSetUp, resultFieldTearDown) def test_Ad(self, disp=False): Ad(self.hField, self.hRandH, self.hRandV) Ad(self.dField, self.dRandH, self.dRandV) self.TestField(self.hField,self.dField, name='Ad', disp=disp, avEps=2e-6) # # CoAd (CoAdjoint operator) # @PyCATest.AddSetUp(randVSetUp, randVTearDown) @PyCATest.AddSetUp(randHSetUp, randHTearDown) @PyCATest.AddSetUp(resultFieldSetUp, resultFieldTearDown) def test_CoAd(self, disp=False): CoAd(self.hField, self.hRandH, self.hRandV) CoAd(self.dField, self.dRandH, self.dRandV) self.TestField(self.hField,self.dField, name='CoAd', disp=disp) # # UpwindDiff # @PyCATest.AddSetUp(setUpI0, tearDownI0) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_UpwindDiff(self, disp=False): for dim in [DIM_X, DIM_Y]: pltname = 'Upwind Diff %s'%PyCATest.DIMNAMES[dim] UpwindDiff(self.hIm, self.hI0, self.hRandIm, dim) UpwindDiff(self.dIm, self.dI0, self.dRandIm, dim) self.TestIm(self.hIm,self.dIm, name=pltname, disp=disp) # # Convolve # @PyCATest.AddSetUp(setUpI0, tearDownI0) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_Convolve(self, disp=False): # create convolution kernel (laplacian stencil) kArr = np.zeros([3,3]) kArr[1,:] = 1.0 kArr[:,1] = 1.0 kArr[1,1] = -4.0 kArr = np.atleast_3d(kArr) hK = Image3D(MEM_HOST) hK.fromlist(kArr.tolist()) dK = Image3D(MEM_DEVICE) dK.fromlist(kArr.tolist()) Convolve(self.hIm, self.hI0, hK) Convolve(self.dIm, self.dI0, dK) self.TestIm(self.hIm,self.dIm, name='Convolve', disp=disp) # # SubVol # @PyCATest.AddSetUp(setUpI0, tearDownI0) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_SubVol(self, disp=False): newSz = (0.66*self.sz).astype('int') newSz = Vec3Di(newSz[0], newSz[1], 1) newGrid = GridInfo(newSz) start = Vec3Di(5,7,0) hNewIm = Image3D(newGrid, MEM_HOST) dNewIm = Image3D(newGrid, MEM_DEVICE) SubVol(hNewIm, self.hRandIm, start) SubVol(dNewIm, self.dRandIm, start) self.TestIm(hNewIm, dNewIm, name='SubVol', disp=disp) # # SetSubVol_I # @PyCATest.AddSetUp(setUpI0, tearDownI0) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_SetSubVol_I(self, disp=False): newSz = (0.66*self.sz).astype('int') newSz = Vec3Di(newSz[0], newSz[1], 1) newGrid = GridInfo(newSz) start = Vec3Di(5,7,0) hNewIm = Image3D(newGrid, MEM_HOST) SetMem(hNewIm, 4.0) dNewIm = Image3D(newGrid, MEM_DEVICE) SetMem(dNewIm, 4.0) SetSubVol_I(self.hRandIm, hNewIm, start) SetSubVol_I(self.dRandIm, dNewIm, start) self.TestIm(self.hRandIm, self.dRandIm, name='SetSubVol_I', disp=disp) # # Resample # @PyCATest.AddSetUp(setUpI0, tearDownI0) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_Resample(self, disp=False): newSz = (0.66*self.sz).astype('int') newSz = Vec3Di(newSz[0], newSz[1], 1) newGrid = GridInfo(newSz) hNewIm = Image3D(newGrid, MEM_HOST) dNewIm = Image3D(newGrid, MEM_DEVICE) Resample(hNewIm, self.hRandIm) Resample(dNewIm, self.dRandIm) self.TestIm(hNewIm, dNewIm, name='Resample', disp=disp, avEps=4e-6, maxEps=4e-6) # # ResampleWorld # @PyCATest.AddSetUp(setUpI0, tearDownI0) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_ResampleWorld(self, disp=False): fac = 0.66 newSz = (fac*self.sz).astype('int') newSp = (1.0/fac)*self.sp newSz = Vec3Di(newSz[0], newSz[1], 1) newSp = Vec3Df(newSp[0], newSp[1], 1) newOr = Vec3Df(1.0, -2.0, 0.0) newGrid = GridInfo(newSz, newSp, newOr) hNewIm = Image3D(newGrid, MEM_HOST) dNewIm = Image3D(newGrid, MEM_DEVICE) ResampleWorld(hNewIm, self.hRandIm) ResampleWorld(dNewIm, self.dRandIm) self.TestIm(hNewIm, dNewIm, name='ResampleWorld', disp=disp, avEps=4e-6, maxEps=4e-6) # # ApplyH # @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(randHSetUp, randHTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_ApplyH(self, disp=False): ApplyH(self.hIm, self.hRandIm, self.hRandH) ApplyH(self.dIm, self.dRandIm, self.dRandH) self.TestIm(self.hIm,self.dIm, name='ApplyH', disp=disp) # # ApplyV # @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(randVSetUp, randVTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_ApplyV(self, disp=False): ApplyV(self.hIm, self.hRandIm, self.hRandV) ApplyV(self.dIm, self.dRandIm, self.dRandV) self.TestIm(self.hIm,self.dIm, name='ApplyV', disp=disp) # # ApplyVInv # @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(randVSetUp, randVTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_ApplyVInv(self, disp=False): ApplyVInv(self.hIm, self.hRandIm, self.hRandV) ApplyVInv(self.dIm, self.dRandIm, self.dRandV) self.TestIm(self.hIm,self.dIm, name='ApplyVInv', disp=disp) # # ComposeHH # @PyCATest.AddSetUp(setUpI0, tearDownI0) @PyCATest.AddSetUp(randHSetUp, randHTearDown) @PyCATest.AddSetUp(setUpGrad, tearDownGrad) @PyCATest.AddSetUp(resultFieldSetUp, resultFieldTearDown) def test_ComposeHV(self, disp=False): VtoH_I(self.hGrad) VtoH_I(self.dGrad) ComposeHH(self.hField, self.hRandH, self.hGrad) ComposeHH(self.dField, self.dRandH, self.dGrad) self.TestField(self.hField,self.dField, name='ComposeHH', disp=disp) hF2 = Field3D(self.grid, MEM_HOST) dF2 = Field3D(self.grid, MEM_DEVICE) bg = BACKGROUND_STRATEGY_PARTIAL_ID ApplyH(hF2, self.hRandH, self.hGrad, bg) ApplyH(dF2, self.dRandH, self.dGrad, bg) self.TestField(hF2, dF2, name='ApplyH', disp=disp) self.TestField(self.hField, dF2, name='ComposeHH vs. ApplyH', disp=disp) # # ComposeHV # @PyCATest.AddSetUp(setUpI0, tearDownI0) @PyCATest.AddSetUp(randHSetUp, randHTearDown) @PyCATest.AddSetUp(setUpGrad, tearDownGrad) @PyCATest.AddSetUp(resultFieldSetUp, resultFieldTearDown) def test_ComposeHV(self, disp=False): ComposeHV(self.hField, self.hRandH, self.hGrad) ComposeHV(self.dField, self.dRandH, self.dGrad) self.TestField(self.hField,self.dField, name='ComposeHV', disp=disp) hF2 = Field3D(self.grid, MEM_HOST) dF2 = Field3D(self.grid, MEM_DEVICE) bg = BACKGROUND_STRATEGY_PARTIAL_ID ApplyV(hF2, self.hRandH, self.hGrad, bg) ApplyV(dF2, self.dRandH, self.dGrad, bg) self.TestField(hF2, dF2, name='ApplyV', disp=disp) self.TestField(self.hField, dF2, name='ComposeHV vs. ApplyV', disp=disp) # # ComposeHVInv # @PyCATest.AddSetUp(setUpI0, tearDownI0) @PyCATest.AddSetUp(randHSetUp, randHTearDown) @PyCATest.AddSetUp(setUpGrad, tearDownGrad) @PyCATest.AddSetUp(resultFieldSetUp, resultFieldTearDown) def test_ComposeHVInv(self, disp=False): ComposeHVInv(self.hField, self.hRandH, self.hGrad) ComposeHVInv(self.dField, self.dRandH, self.dGrad) self.TestField(self.hField,self.dField, name='ComposeHVInv', disp=disp) hF2 = Field3D(self.grid, MEM_HOST) dF2 = Field3D(self.grid, MEM_DEVICE) bg = BACKGROUND_STRATEGY_PARTIAL_ID ApplyVInv(hF2, self.hRandH, self.hGrad, bg) ApplyVInv(dF2, self.dRandH, self.dGrad, bg) self.TestField(hF2, dF2, name='ApplyVInv', disp=disp) self.TestField(self.hField, dF2, name='ComposeHVInv vs. ApplyVInv', disp=disp) # # ComposeVH # @PyCATest.AddSetUp(setUpI0, tearDownI0) @PyCATest.AddSetUp(randHSetUp, randHTearDown) @PyCATest.AddSetUp(setUpGrad, tearDownGrad) @PyCATest.AddSetUp(resultFieldSetUp, resultFieldTearDown) def test_ComposeVH(self, disp=False): ComposeVH(self.hField, self.hGrad, self.hRandH) ComposeVH(self.dField, self.dGrad, self.dRandH) self.TestField(self.hField,self.dField, name='ComposeVH', disp=disp) hF2 = Field3D(self.grid, MEM_HOST) dF2 = Field3D(self.grid, MEM_DEVICE) bg = BACKGROUND_STRATEGY_PARTIAL_ZERO ApplyH(hF2, self.hGrad, self.hRandH, bg) ApplyH(dF2, self.dGrad, self.dRandH, bg) self.TestField(hF2, dF2, name='ApplyH', disp=disp) invSp = self.imSp.inverse() MulC_I(hF2, invSp) MulC_I(dF2, invSp) Add_I(hF2, self.hRandH) Add_I(dF2, self.dRandH) self.TestField(hF2, self.dField, name='ApplyH vs. ComposeVH', disp=disp) self.TestField(self.hField, dF2, name='ComposeVH vs. ApplyH', disp=disp) # # ComposeVInvH # @PyCATest.AddSetUp(setUpI0, tearDownI0) @PyCATest.AddSetUp(randHSetUp, randHTearDown) @PyCATest.AddSetUp(setUpGrad, tearDownGrad) @PyCATest.AddSetUp(resultFieldSetUp, resultFieldTearDown) def test_ComposeVInvH(self, disp=False): ComposeVInvH(self.hField, self.hGrad, self.hRandH) ComposeVInvH(self.dField, self.dGrad, self.dRandH) self.TestField(self.hField,self.dField, name='ComposeVInvH', disp=disp) hF2 = Field3D(self.grid, MEM_HOST) dF2 = Field3D(self.grid, MEM_DEVICE) bg = BACKGROUND_STRATEGY_PARTIAL_ZERO ApplyH(hF2, self.hGrad, self.hRandH, bg) ApplyH(dF2, self.dGrad, self.dRandH, bg) self.TestField(hF2, dF2, name='ApplyH', disp=disp) invSp = self.imSp.inverse() MulC_I(hF2, invSp) MulC_I(dF2, invSp) Sub(hF2, self.hRandH, hF2) Sub(dF2, self.dRandH, dF2) self.TestField(self.hField, dF2, name='ComposeVInvH vs. ApplyH', disp=disp) @PyCATest.AddSetUp(randMaskSetUp, randMaskTearDown) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_CopyMasked(self, disp=False): SetMem(self.hIm, 0.0) SetMem(self.dIm, 0.0) Copy(self.hIm, self.hRandIm, self.hRandMask) Copy(self.dIm, self.dRandIm, self.dRandMask) self.TestIm(self.hIm,self.dIm, name='CopyMasked', disp=disp) @PyCATest.AddSetUp(randMaskSetUp, randMaskTearDown) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_AbsMasked(self, disp=False): SetMem(self.hIm, 0.0) SetMem(self.dIm, 0.0) Abs(self.hIm, self.hRandIm, self.hRandMask) Abs(self.dIm, self.dRandIm, self.dRandMask) self.TestIm(self.hIm,self.dIm, name='AbsMasked', disp=disp) @PyCATest.AddSetUp(randMaskSetUp, randMaskTearDown) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultImSetUp, resultImTearDown) def test_AddMasked(self, disp=False): SetMem(self.hIm, 0.0) SetMem(self.dIm, 0.0) hIm2 = common.RandImage(self.sz, nSig=1.0, gSig=0.0, mType = MEM_HOST, sp = self.imSp) dIm2 = hIm2.copy() dIm2.toType(MEM_DEVICE) Add(self.hIm, self.hRandIm, hIm2, self.hRandMask) Add(self.dIm, self.dRandIm, dIm2, self.dRandMask) self.TestIm(self.hIm,self.dIm, name='AddMasked', disp=disp) # # ImageField Opers # @PyCATest.AddSetUp(randVSetUp, randVTearDown) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultFieldSetUp, resultFieldTearDown) def test_IF_BinaryOps(self, disp=False): Add(self.hField, self.hRandV, self.hRandIm) Add(self.dField, self.dRandV, self.dRandIm) self.TestField(self.hField, self.dField, name='IF_Add', disp=disp) Sub(self.hField, self.hRandV, self.hRandIm) Sub(self.dField, self.dRandV, self.dRandIm) self.TestField(self.hField, self.dField, name='IF_Sub', disp=disp) Mul(self.hField, self.hRandV, self.hRandIm) Mul(self.dField, self.dRandV, self.dRandIm) self.TestField(self.hField, self.dField, name='IF_Mul', disp=disp) Abs_I(self.hRandIm) Add_I(self.hRandIm, 1.0) Abs_I(self.dRandIm) Add_I(self.dRandIm, 1.0) Div(self.hField, self.hRandV, self.hRandIm) Div(self.dField, self.dRandV, self.dRandIm) self.TestField(self.hField, self.dField, name='IF_Div', disp=disp) @PyCATest.AddSetUp(randVSetUp, randVTearDown) @PyCATest.AddSetUp(randImSetUp, randImTearDown) def test_IF_Add_I(self, disp=False): Add_I(self.hRandV, self.hRandIm) Add_I(self.dRandV, self.dRandIm) self.TestField(self.hRandV, self.dRandV, name='IF_Add_I', disp=disp) @PyCATest.AddSetUp(randVSetUp, randVTearDown) @PyCATest.AddSetUp(randImSetUp, randImTearDown) def test_IF_Sub_I(self, disp=False): Sub_I(self.hRandV, self.hRandIm) Sub_I(self.dRandV, self.dRandIm) self.TestField(self.hRandV, self.dRandV, name='IF_Add_I', disp=disp) @PyCATest.AddSetUp(randVSetUp, randVTearDown) @PyCATest.AddSetUp(randImSetUp, randImTearDown) def test_IF_Mul_I(self, disp=False): Mul_I(self.hRandV, self.hRandIm) Mul_I(self.dRandV, self.dRandIm) self.TestField(self.hRandV, self.dRandV, name='IF_Add_I', disp=disp) @PyCATest.AddSetUp(randVSetUp, randVTearDown) @PyCATest.AddSetUp(randImSetUp, randImTearDown) def test_IF_Div_I(self, disp=False): Abs_I(self.hRandIm) Add_I(self.hRandIm, 1.0) Abs_I(self.dRandIm) Add_I(self.dRandIm, 1.0) Div_I(self.hRandV, self.hRandIm) Div_I(self.dRandV, self.dRandIm) self.TestField(self.hRandV, self.dRandV, name='IF_Add_I', disp=disp) @PyCATest.AddSetUp(randVSetUp, randVTearDown) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultFieldSetUp, resultFieldTearDown) def test_IF_Add_Mul(self, disp=False): hV1, dV1 = self.randVPair() Add_Mul(self.hField, self.hRandV, hV1, self.hRandIm) Add_Mul(self.dField, self.dRandV, dV1, self.dRandIm) self.TestField(self.hField, self.dField, name='IF_Add_Mul', disp=disp) Add_Mul_I(self.hRandV, hV1, self.hRandIm) Add_Mul_I(self.dRandV, dV1, self.dRandIm) self.TestField(self.hField, self.dRandV, name='IF_Add_Mul_I', disp=disp) self.TestField(self.hRandV, self.dField, name='IF_Add_Mul_I', disp=disp) @PyCATest.AddSetUp(randVSetUp, randVTearDown) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultFieldSetUp, resultFieldTearDown) def test_IF_Sub_Mul(self, disp=False): hV1, dV1 = self.randVPair() Sub_Mul(self.hField, self.hRandV, hV1, self.hRandIm) Sub_Mul(self.dField, self.dRandV, dV1, self.dRandIm) self.TestField(self.hField, self.dField, name='IF_Sub_Mul', disp=disp) Sub_Mul_I(self.hRandV, hV1, self.hRandIm) Sub_Mul_I(self.dRandV, dV1, self.dRandIm) self.TestField(self.hField, self.dRandV, name='IF_Sub_Mul_I', disp=disp) self.TestField(self.hRandV, self.dField, name='IF_Sub_Mul_I', disp=disp) @PyCATest.AddSetUp(randVSetUp, randVTearDown) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultFieldSetUp, resultFieldTearDown) def test_IF_MulMulC(self, disp=False): randNum = np.random.rand() MulMulC(self.hField, self.hRandV, self.hRandIm, randNum) MulMulC(self.dField, self.dRandV, self.dRandIm, randNum) self.TestField(self.hField, self.dField, name='IF_MulMulC', disp=disp) MulMulC_I(self.hRandV, self.hRandIm, randNum) MulMulC_I(self.dRandV, self.dRandIm, randNum) self.TestField(self.hField, self.dRandV, name='IF_MulMulC_I', disp=disp) self.TestField(self.hRandV, self.dField, name='IF_MulMulC_I', disp=disp) @PyCATest.AddSetUp(randVSetUp, randVTearDown) @PyCATest.AddSetUp(randImSetUp, randImTearDown) @PyCATest.AddSetUp(resultFieldSetUp, resultFieldTearDown) def test_IF_Add_MulMulC(self, disp=False): hV1, dV1 = self.randVPair() randNum = np.random.rand() Add_MulMulC(self.hField, self.hRandV, hV1, self.hRandIm, randNum) Add_MulMulC(self.dField, self.dRandV, dV1, self.dRandIm, randNum) self.TestField(self.hField, self.dField, name='IF_Add_MulMulC', disp=disp) Add_MulMulC_I(self.hRandV, hV1, self.hRandIm, randNum) Add_MulMulC_I(self.dRandV, dV1, self.dRandIm, randNum) self.TestField(self.hField, self.dRandV, name='IF_Add_MulMulC_I', disp=disp) self.TestField(self.hRandV, self.dField, name='IF_Add_MulMulC_I', disp=disp) # runTest is only added so that the class can be instantiated # directly in order to call individual tests def runTest(): print 'No tests to run directly, all are member functions'
dface/prepare_data/assemble_onet_imglist.py
Clock966/Face_recognition
1,161
12615547
<filename>dface/prepare_data/assemble_onet_imglist.py import os import dface.config as config import dface.prepare_data.assemble as assemble if __name__ == '__main__': anno_list = [] net_landmark_file = os.path.join(config.ANNO_STORE_DIR,config.ONET_LANDMARK_ANNO_FILENAME) net_postive_file = os.path.join(config.ANNO_STORE_DIR,config.ONET_POSTIVE_ANNO_FILENAME) net_part_file = os.path.join(config.ANNO_STORE_DIR,config.ONET_PART_ANNO_FILENAME) net_neg_file = os.path.join(config.ANNO_STORE_DIR,config.ONET_NEGATIVE_ANNO_FILENAME) anno_list.append(net_postive_file) anno_list.append(net_part_file) anno_list.append(net_neg_file) anno_list.append(net_landmark_file) imglist_filename = config.ONET_TRAIN_IMGLIST_FILENAME anno_dir = config.ANNO_STORE_DIR imglist_file = os.path.join(anno_dir, imglist_filename) chose_count = assemble.assemble_data(imglist_file ,anno_list) print("PNet train annotation result file path:%s" % imglist_file)
tests/asgi/_asgi_test_app.py
the-bets/falcon
8,217
12615552
import asyncio from collections import Counter import hashlib import platform import sys import time import falcon import falcon.asgi import falcon.errors import falcon.util SSE_TEST_MAX_DELAY_SEC = 1 _WIN32 = sys.platform.startswith('win') _X86_64 = platform.machine() == 'x86_64' class Things: def __init__(self): self._counter = Counter() async def on_get(self, req, resp): await asyncio.sleep(0.01) resp.text = req.remote_addr async def on_post(self, req, resp): resp.data = await req.stream.read(req.content_length or 0) resp.set_header('X-Counter', str(self._counter['backround:things:on_post'])) async def background_job_async(): await asyncio.sleep(0.01) self._counter['backround:things:on_post'] += 1 def background_job_sync(): time.sleep(0.01) self._counter['backround:things:on_post'] += 1000 resp.schedule(background_job_async) resp.schedule_sync(background_job_sync) resp.schedule(background_job_async) resp.schedule_sync(background_job_sync) async def on_put(self, req, resp): # NOTE(kgriffs): Test that reading past the end does # not hang. chunks = [] for i in range(req.content_length + 1): # NOTE(kgriffs): In the ASGI interface, bounded_stream is an # alias for req.stream. We'll use the alias here just as # a sanity check. chunk = await req.bounded_stream.read(1) chunks.append(chunk) # NOTE(kgriffs): body should really be set to a string, but # Falcon is lenient and will allow bytes as well (although # it is slightly less performant). # TODO(kgriffs): Perhaps in Falcon 4.0 be more strict? We would # also have to change the WSGI behavior to match. resp.text = b''.join(chunks) # ================================================================= # NOTE(kgriffs): Test the sync_to_async helpers here to make sure # they work as expected in the context of a real ASGI server. # ================================================================= safely_tasks = [] safely_values = [] def callmesafely(a, b, c=None): # NOTE(kgriffs): Sleep to prove that there isn't another instance # running in parallel that is able to race ahead. time.sleep(0.001) safely_values.append((a, b, c)) cms = falcon.util.wrap_sync_to_async(callmesafely, threadsafe=False) loop = falcon.util.get_running_loop() # NOTE(caselit): on windows it takes more time so create less tasks # NOTE(vytas): Tests on non-x86 platforms are run using software # emulation via single-thread QEMU Docker containers, making them # considerably slower as well. num_cms_tasks = 100 if _WIN32 or not _X86_64 else 1000 for i in range(num_cms_tasks): # NOTE(kgriffs): create_task() is used here, so that the coroutines # are scheduled immediately in the order created; under Python # 3.6, asyncio.gather() does not seem to always schedule # them in order, so we do it this way to make it predictable. safely_tasks.append(loop.create_task(cms(i, i + 1, c=i + 2))) await asyncio.gather(*safely_tasks) assert len(safely_values) == num_cms_tasks for i, val in enumerate(safely_values): assert safely_values[i] == (i, i + 1, i + 2) def callmeshirley(a=42, b=None): return (a, b) assert (42, None) == await falcon.util.sync_to_async(callmeshirley) assert (1, 2) == await falcon.util.sync_to_async(callmeshirley, 1, 2) assert (5, None) == await falcon.util.sync_to_async(callmeshirley, 5) assert (3, 4) == await falcon.util.sync_to_async(callmeshirley, 3, b=4) class Bucket: async def on_post(self, req, resp): resp.text = await req.stream.read() class Feed: async def on_websocket(self, req, ws, feed_id): await ws.accept() await ws.send_text(feed_id) class Events: async def on_get(self, req, resp): async def emit(): s = 0 while s <= SSE_TEST_MAX_DELAY_SEC: yield falcon.asgi.SSEvent(text='hello world') await asyncio.sleep(s) s += SSE_TEST_MAX_DELAY_SEC / 4 resp.sse = emit() async def on_websocket(self, req, ws): # noqa: C901 recv_command = req.get_header('X-Command') == 'recv' send_mismatched = req.get_header('X-Mismatch') == 'send' recv_mismatched = req.get_header('X-Mismatch') == 'recv' mismatch_type = req.get_header('X-Mismatch-Type', default='text') raise_error = req.get_header('X-Raise-Error') close = req.get_header('X-Close') close_code = req.get_header('X-Close-Code') if close_code: close_code = int(close_code) accept = req.get_header('X-Accept', default='accept') if accept == 'accept': subprotocol = req.get_header('X-Subprotocol') if subprotocol == '*': subprotocol = ws.subprotocols[0] if subprotocol: await ws.accept(subprotocol) else: await ws.accept() elif accept == 'reject': if close: await ws.close() return if send_mismatched: if mismatch_type == 'text': await ws.send_text(b'fizzbuzz') else: await ws.send_data('fizzbuzz') if recv_mismatched: if mismatch_type == 'text': await ws.receive_text() else: await ws.receive_data() start = time.time() while time.time() - start < 1: try: msg = None if recv_command: msg = await ws.receive_media() else: msg = None await ws.send_text('hello world') print('on_websocket:send_text') if msg and msg['command'] == 'echo': await ws.send_text(msg['echo']) await ws.send_data(b'hello\x00world') await asyncio.sleep(0.2) except falcon.errors.WebSocketDisconnected: print('on_websocket:WebSocketDisconnected') raise if raise_error == 'generic': raise Exception('Test: Generic Unhandled Error') elif raise_error == 'http': raise falcon.HTTPBadRequest() if close: # NOTE(kgriffs): Tests that the default is used # when close_code is None. await ws.close(close_code) class Multipart: async def on_post(self, req, resp): parts = {} form = await req.get_media() async for part in form: # NOTE(vytas): SHA1 is no longer recommended for cryptographic # purposes, but here we are only using it for integrity checking. sha1 = hashlib.sha1() async for chunk in part.stream: sha1.update(chunk) parts[part.name] = { 'filename': part.filename, 'sha1': sha1.hexdigest(), } resp.media = parts class LifespanHandler: def __init__(self): self.startup_succeeded = False self.shutdown_succeeded = False async def process_startup(self, scope, event): assert scope['type'] == 'lifespan' assert event['type'] == 'lifespan.startup' self.startup_succeeded = True async def process_shutdown(self, scope, event): assert scope['type'] == 'lifespan' assert event['type'] == 'lifespan.shutdown' self.shutdown_succeeded = True class TestJar: async def on_get(self, req, resp): # NOTE(myusko): In the future we shouldn't change the cookie # a test depends on the input. # NOTE(kgriffs): This is the only test that uses a single # cookie (vs. multiple) as input; if this input ever changes, # a separate test will need to be added to explicitly verify # this use case. resp.set_cookie('has_permission', 'true') async def on_post(self, req, resp): if req.cookies['has_permission'] == 'true': resp.status = falcon.HTTP_200 else: resp.status = falcon.HTTP_403 def create_app(): app = falcon.asgi.App() app.add_route('/', Things()) app.add_route('/bucket', Bucket()) app.add_route('/events', Events()) app.add_route('/forms', Multipart()) app.add_route('/jars', TestJar()) app.add_route('/feeds/{feed_id}', Feed()) lifespan_handler = LifespanHandler() app.add_middleware(lifespan_handler) async def _on_ws_error(req, resp, error, params, ws=None): if not ws: raise if ws.unaccepted: await ws.accept() if not ws.closed: await ws.send_text(error.__class__.__name__) await ws.close() app.add_error_handler(falcon.errors.OperationNotAllowed, _on_ws_error) app.add_error_handler(ValueError, _on_ws_error) return app application = create_app()
webdriver/tests/new_session/platform_name.py
ziransun/wpt
14,668
12615553
<reponame>ziransun/wpt<filename>webdriver/tests/new_session/platform_name.py import pytest from tests.support import platform_name from tests.support.asserts import assert_success @pytest.mark.skipif(platform_name is None, reason="Unsupported platform {}".format(platform_name)) def test_corresponds_to_local_system(new_session, add_browser_capabilities): response, _ = new_session({"capabilities": {"alwaysMatch": add_browser_capabilities({})}}) value = assert_success(response) assert value["capabilities"]["platformName"] == platform_name
examples/pandocfilters/tikz.py
jacobwhall/panflute
361
12615559
<filename>examples/pandocfilters/tikz.py<gh_stars>100-1000 #!/usr/bin/env python """ Pandoc filter to process raw latex tikz environments into images. Assumes that pdflatex is in the path, and that the standalone package is available. Also assumes that ImageMagick's convert is in the path. Images are put in the tikz-images directory. """ import hashlib import re import os import sys import shutil from pandocfilters import toJSONFilter, Para, Image from subprocess import Popen, PIPE, call from tempfile import mkdtemp imagedir = "tikz-images" def sha1(x): return hashlib.sha1(x.encode(sys.getfilesystemencoding())).hexdigest() def tikz2image(tikz, filetype, outfile): tmpdir = mkdtemp() olddir = os.getcwd() os.chdir(tmpdir) f = open('tikz.tex', 'w') f.write("""\\documentclass{standalone} \\usepackage{tikz} \\begin{document} """) f.write(tikz) f.write("\n\\end{document}\n") f.close() p = call(["pdflatex", 'tikz.tex'], stdout=sys.stderr) os.chdir(olddir) if filetype == 'pdf': shutil.copyfile(tmpdir + '/tikz.pdf', outfile + '.pdf') else: call(["convert", tmpdir + '/tikz.pdf', outfile + '.' + filetype]) shutil.rmtree(tmpdir) def tikz(key, value, format, meta): if key == 'RawBlock': [fmt, code] = value if fmt == "latex" and re.match("\\\\begin{tikzpicture}", code): outfile = imagedir + '/' + sha1(code) if format == "html": filetype = "png" elif format == "latex": filetype = "pdf" else: filetype = "png" src = outfile + '.' + filetype if not os.path.isfile(src): try: os.mkdir(imagedir) sys.stderr.write('Created directory ' + imagedir + '\n') except OSError: pass tikz2image(code, filetype, outfile) sys.stderr.write('Created image ' + src + '\n') return Para([Image(['', [], []], [], [src, ""])]) if __name__ == "__main__": toJSONFilter(tikz)
opytimizer/optimizers/evolutionary/ga.py
anukaal/opytimizer
528
12615575
"""Genetic Algorithm. """ import copy import numpy as np import opytimizer.math.distribution as d import opytimizer.math.general as g import opytimizer.math.random as r import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.logging as l from opytimizer.core import Optimizer logger = l.get_logger(__name__) class GA(Optimizer): """An GA class, inherited from Optimizer. This is the designed class to define GA-related variables and methods. References: <NAME>. An introduction to genetic algorithms. MIT Press (1998). """ def __init__(self, params=None): """Initialization method. Args: params (dict): Contains key-value parameters to the meta-heuristics. """ # Overrides its parent class with the receiving params super(GA, self).__init__() # Probability of selection self.p_selection = 0.75 # Probability of mutation self.p_mutation = 0.25 # Probability of crossover self.p_crossover = 0.5 # Builds the class self.build(params) logger.info('Class overrided.') @property def p_selection(self): """float: Probability of selection. """ return self._p_selection @p_selection.setter def p_selection(self, p_selection): if not isinstance(p_selection, (float, int)): raise e.TypeError('`p_selection` should be a float or integer') if p_selection < 0 or p_selection > 1: raise e.ValueError('`p_selection` should be between 0 and 1') self._p_selection = p_selection @property def p_mutation(self): """float: Probability of mutation. """ return self._p_mutation @p_mutation.setter def p_mutation(self, p_mutation): if not isinstance(p_mutation, (float, int)): raise e.TypeError('`p_mutation` should be a float or integer') if p_mutation < 0 or p_mutation > 1: raise e.ValueError('`p_mutation` should be between 0 and 1') self._p_mutation = p_mutation @property def p_crossover(self): """float: Probability of crossover. """ return self._p_crossover @p_crossover.setter def p_crossover(self, p_crossover): if not isinstance(p_crossover, (float, int)): raise e.TypeError('`p_crossover` should be a float or integer') if p_crossover < 0 or p_crossover > 1: raise e.ValueError('`p_crossover` should be between 0 and 1') self._p_crossover = p_crossover def _roulette_selection(self, n_agents, fitness): """Performs a roulette selection on the population (p. 8). Args: n_agents (int): Number of agents allowed in the space. fitness (list): A fitness list of every agent. Returns: The selected indexes of the population. """ # Calculates the number of selected individuals n_individuals = int(n_agents * self.p_selection) # Checks if `n_individuals` is an odd number if n_individuals % 2 != 0: # If it is, increase it by one n_individuals += 1 # Defines the maximum fitness of current generation max_fitness = np.max(fitness) # Re-arrange the list of fitness by inverting it # Note that we apply a trick due to it being designed for minimization # f'(x) = f_max - f(x) inv_fitness = [max_fitness - fit + c.EPSILON for fit in fitness] # Calculates the total inverted fitness total_fitness = np.sum(inv_fitness) # Calculates the probability of each inverted fitness probs = [fit / total_fitness for fit in inv_fitness] # Performs the selection process selected = d.generate_choice_distribution(n_agents, probs, n_individuals) return selected def _crossover(self, father, mother): """Performs the crossover between a pair of parents (p. 8). Args: father (Agent): Father to produce the offsprings. mother (Agent): Mother to produce the offsprings. Returns: Two generated offsprings based on parents. """ # Makes a deep copy of father and mother alpha, beta = copy.deepcopy(father), copy.deepcopy(mother) # Generates a uniform random number r1 = r.generate_uniform_random_number() # If random number is smaller than crossover probability if r1 < self.p_crossover: # Generates another uniform random number r2 = r.generate_uniform_random_number() # Calculates the crossover based on a linear combination between father and mother alpha.position = r2 * father.position + (1 - r2) * mother.position # Calculates the crossover based on a linear combination between father and mother beta.position = r2 * mother.position + (1 - r2) * father.position return alpha, beta def _mutation(self, alpha, beta): """Performs the mutation over offsprings (p. 8). Args: alpha (Agent): First offspring. beta (Agent): Second offspring. Returns: Two mutated offsprings. """ # For every decision variable for j in range(alpha.n_variables): # Generates a uniform random number r1 = r.generate_uniform_random_number() # If random number is smaller than probability of mutation if r1 < self.p_mutation: # Mutates the offspring alpha.position[j] += r.generate_gaussian_random_number() # Generates another uniform random number r2 = r.generate_uniform_random_number() # If random number is smaller than probability of mutation if r2 < self.p_mutation: # Mutates the offspring beta.position[j] += r.generate_gaussian_random_number() return alpha, beta def update(self, space, function): """Wraps Genetic Algorithm over all agents and variables. Args: space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. """ # Creates a list to hold the new population new_agents = [] # Retrieves the number of agents n_agents = len(space.agents) # Calculates a list of fitness from every agent fitness = [agent.fit + c.EPSILON for agent in space.agents] # Selects the parents selected = self._roulette_selection(n_agents, fitness) # For every pair of selected parents for s in g.n_wise(selected): # Performs the crossover and mutation alpha, beta = self._crossover(space.agents[s[0]], space.agents[s[1]]) alpha, beta = self._mutation(alpha, beta) # Checking `alpha` and `beta` limits alpha.clip_by_bound() beta.clip_by_bound() # Calculates new fitness for `alpha` and `beta` alpha.fit = function(alpha.position) beta.fit = function(beta.position) # Appends the mutated agents to the children new_agents.extend([alpha, beta]) # Joins both populations, sort agents and gathers best `n_agents` space.agents += new_agents space.agents.sort(key=lambda x: x.fit) space.agents = space.agents[:n_agents]
homeassistant/components/demo/siren.py
mtarjoianu/core
30,023
12615593
"""Demo platform that offers a fake siren device.""" from __future__ import annotations from typing import Any from homeassistant.components.siren import SirenEntity, SirenEntityFeature from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType SUPPORT_FLAGS = SirenEntityFeature.TURN_OFF | SirenEntityFeature.TURN_ON async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the Demo siren devices.""" async_add_entities( [ DemoSiren(name="Siren"), DemoSiren( name="Siren with all features", available_tones=["fire", "alarm"], support_volume_set=True, support_duration=True, ), ] ) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the Demo siren devices config entry.""" await async_setup_platform(hass, {}, async_add_entities) class DemoSiren(SirenEntity): """Representation of a demo siren device.""" def __init__( self, name: str, available_tones: list[str | int] | None = None, support_volume_set: bool = False, support_duration: bool = False, is_on: bool = True, ) -> None: """Initialize the siren device.""" self._attr_name = name self._attr_should_poll = False self._attr_supported_features = SUPPORT_FLAGS self._attr_is_on = is_on if available_tones is not None: self._attr_supported_features |= SirenEntityFeature.TONES if support_volume_set: self._attr_supported_features |= SirenEntityFeature.VOLUME_SET if support_duration: self._attr_supported_features |= SirenEntityFeature.DURATION self._attr_available_tones = available_tones async def async_turn_on(self, **kwargs: Any) -> None: """Turn the siren on.""" self._attr_is_on = True self.async_write_ha_state() async def async_turn_off(self, **kwargs: Any) -> None: """Turn the siren off.""" self._attr_is_on = False self.async_write_ha_state()
examples/hello-world/scripts/plot-hist.py
graingert/snakemake
1,326
12615596
<reponame>graingert/snakemake import matplotlib.pyplot as plt import pandas as pd cities = pd.read_csv(snakemake.input[0]) plt.hist(cities["Population"], bins=50) plt.savefig(snakemake.output[0])
whatsapp-bot-venv/Lib/site-packages/twilio/rest/taskrouter/__init__.py
RedaMastouri/ConversationalPythonicChatBot
1,362
12615603
<gh_stars>1000+ # coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from twilio.base.domain import Domain from twilio.rest.taskrouter.v1 import V1 class Taskrouter(Domain): def __init__(self, twilio): """ Initialize the Taskrouter Domain :returns: Domain for Taskrouter :rtype: twilio.rest.taskrouter.Taskrouter """ super(Taskrouter, self).__init__(twilio) self.base_url = 'https://taskrouter.twilio.com' # Versions self._v1 = None @property def v1(self): """ :returns: Version v1 of taskrouter :rtype: twilio.rest.taskrouter.v1.V1 """ if self._v1 is None: self._v1 = V1(self) return self._v1 @property def workspaces(self): """ :rtype: twilio.rest.taskrouter.v1.workspace.WorkspaceList """ return self.v1.workspaces def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Taskrouter>'
test/test_parser_equivalence.py
bookofproofs/TatSu
259
12615614
<gh_stars>100-1000 import importlib from pathlib import Path import pytest # noqa from tatsu.exceptions import FailedParse from tatsu.tool import compile, gencode INPUT = """ 1d3 """ OUTPUT = {'number_of_dice': '1', 'sides': '3'} GRAMMAR = """ start = expression $; int = /-?\d+/ ; dice = number_of_dice:factor /d|D/ sides:factor; expression = addition ; addition = | left:dice_expr op:('+' | '-') ~ right:addition | dice_expr ; dice_expr = | dice | factor ; factor = | '(' ~ @:expression ')' | int ; """ def generate_and_load_parser(name, grammar): init_filename = Path('./tmp/__init__') init_filename.touch(exist_ok=True) parser = gencode(name='Test', grammar=grammar) parser_filename = Path(f'./tmp/{name}.py') with open(parser_filename, 'wt') as f: f.write(parser) try: importlib.invalidate_caches() module = importlib.import_module(f'tmp.{name}', 'tmp') importlib.reload(module) try: return module.UnknownParser() # noqa except (AttributeError, ImportError): return module.TestParser() # noqa finally: pass # parser_filename.unlink() def test_model_parse(): model = compile(name='Test', grammar=GRAMMAR) assert OUTPUT == model.parse(INPUT) def test_codegen_parse(): tmp_dir = Path('./tmp') tmp_dir.mkdir(parents=True, exist_ok=True) init_filename = Path('./tmp/__init__.py') init_filename.touch(exist_ok=True) parser = generate_and_load_parser('test_codegen_parse', GRAMMAR) output = parser.parse(INPUT, parseinfo=False) assert output == OUTPUT # @pytest.mark.skip('work in progress') def test_error_messages(): grammar = ''' @@grammar :: ORDER alphabet = a b others $ ; a = 'a' ; b = 'b' ; others = 'c' | 'd' | 'e' | 'f' |'g' | 'h' | 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o'; ''' input = 'a b' e1 = None model = compile(grammar) try: model.parse(input) except FailedParse as e: # noqa e1 = str(e) assert "expecting one of: 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o'" in e1 # @pytest.mark.skip('work in progress') def test_name_checked(): grammar = ''' @@grammar :: Test @@ignorecase :: True @@keyword :: if start = rule ; rule = @:word if_exp $ ; if_exp = 'if' digit ; @name word = /\w+/ ; digit = /\d/ ; ''' def subtest(parser): parser.parse('nonIF if 1', trace=False) with pytest.raises(FailedParse): parser.parse('if if 1', trace=False) with pytest.raises(FailedParse): parser.parse('IF if 1', trace=False) parser = compile(grammar, 'Test') subtest(parser) parser = generate_and_load_parser('test_name_checked', grammar) subtest(parser)
python/src/nnabla/core/modules.py
daniel-falk/nnabla
2,792
12615628
<reponame>daniel-falk/nnabla<gh_stars>1000+ # Copyright 2020,2021 Sony Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import nnabla.functions as F import nnabla.parametric_functions as PF from nnabla.core.module import Module # TODO def complete_dims(s, dims): if not hasattr(s, '__iter__'): return (s,) * dims if len(s) == dims: return s raise ValueError('') def get_conv_same_pad(k): return tuple(kk // 2 for kk in k) class ConvBn(Module): def __init__(self, outmaps, kernel=1, stride=1, act=None): self.outmaps = outmaps self.kernel = kernel self.stride = stride self.act = act def call(self, x, training=True): kernel = complete_dims(self.kernel, 2) pad = get_conv_same_pad(kernel) stride = complete_dims(self.stride, 2) h = PF.convolution(x, self.outmaps, kernel, pad, stride, with_bias=False) h = PF.batch_normalization(h, batch_stat=training) if self.act is None: return h return self.act(h) class ResUnit(Module): def __init__(self, channels, stride=1, skip_by_conv=True): self.conv1 = ConvBn(channels // 4, 1, 1, act=lambda x: F.relu(x, inplace=True)) self.conv2 = ConvBn(channels // 4, 3, stride, act=lambda x: F.relu(x, inplace=True)) self.conv3 = ConvBn(channels, 1) self.skip_by_conv = skip_by_conv self.skip = ConvBn(channels, 1, stride) def call(self, x, training=True): h = self.conv1(x) h = self.conv2(h) h = self.conv3(h) s = x if self.skip_by_conv: s = self.skip(s) h = F.relu(F.add2(h, s, inplace=True), inplace=True) return h
tools/python/util/convert_onnx_models_to_ort.py
mszhanyi/onnxruntime
669
12615641
#!/usr/bin/env python3 # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import argparse import contextlib import enum import os import pathlib import tempfile import typing import onnxruntime as ort from .file_utils import files_from_file_or_dir, path_match_suffix_ignore_case from .onnx_model_utils import get_optimization_level from .ort_format_model import create_config_from_models class OptimizationStyle(enum.Enum): Fixed = 0 Runtime = 1 def _optimization_suffix(optimization_level_str: str, optimization_style: OptimizationStyle, suffix: str): return "{}{}{}".format( f".{optimization_level_str}" if optimization_level_str != "all" else "", ".with_runtime_opt" if optimization_style == OptimizationStyle.Runtime else "", suffix, ) def _create_config_file_path( model_path_or_dir: pathlib.Path, optimization_level_str: str, optimization_style: OptimizationStyle, enable_type_reduction: bool, ): config_name = "{}{}".format( "required_operators_and_types" if enable_type_reduction else "required_operators", _optimization_suffix(optimization_level_str, optimization_style, ".config"), ) if model_path_or_dir.is_dir(): return model_path_or_dir / config_name return model_path_or_dir.with_suffix(f".{config_name}") def _create_session_options( optimization_level: ort.GraphOptimizationLevel, output_model_path: pathlib.Path, custom_op_library: pathlib.Path, session_options_config_entries: typing.Dict[str, str], ): so = ort.SessionOptions() so.optimized_model_filepath = str(output_model_path) so.graph_optimization_level = optimization_level if custom_op_library: so.register_custom_ops_library(str(custom_op_library)) for key, value in session_options_config_entries.items(): so.add_session_config_entry(key, value) return so def _convert( model_path_or_dir: pathlib.Path, output_dir: typing.Optional[pathlib.Path], optimization_level_str: str, optimization_style: OptimizationStyle, custom_op_library: pathlib.Path, create_optimized_onnx_model: bool, allow_conversion_failures: bool, target_platform: str, session_options_config_entries: typing.Dict[str, str], ) -> typing.List[pathlib.Path]: model_dir = model_path_or_dir if model_path_or_dir.is_dir() else model_path_or_dir.parent output_dir = output_dir or model_dir optimization_level = get_optimization_level(optimization_level_str) def is_model_file_to_convert(file_path: pathlib.Path): if not path_match_suffix_ignore_case(file_path, ".onnx"): return False # ignore any files with an extension of .optimized.onnx which are presumably from previous executions # of this script if path_match_suffix_ignore_case(file_path, ".optimized.onnx"): print(f"Ignoring '{file_path}'") return False return True models = files_from_file_or_dir(model_path_or_dir, is_model_file_to_convert) if len(models) == 0: raise ValueError("No model files were found in '{}'".format(model_path_or_dir)) providers = ["CPUExecutionProvider"] # if the optimization level is 'all' we manually exclude the NCHWc transformer. It's not applicable to ARM # devices, and creates a device specific model which won't run on all hardware. # If someone really really really wants to run it they could manually create an optimized onnx model first, # or they could comment out this code. optimizer_filter = None if optimization_level == ort.GraphOptimizationLevel.ORT_ENABLE_ALL and target_platform != "amd64": optimizer_filter = ["NchwcTransformer"] converted_models = [] for model in models: try: relative_model_path = model.relative_to(model_dir) (output_dir / relative_model_path).parent.mkdir(parents=True, exist_ok=True) ort_target_path = (output_dir / relative_model_path).with_suffix( _optimization_suffix(optimization_level_str, optimization_style, ".ort") ) if create_optimized_onnx_model: # Create an ONNX file with the same optimization level that will be used for the ORT format file. # This allows the ONNX equivalent of the ORT format model to be easily viewed in Netron. # If runtime optimizations are saved in the ORT format model, there may be some difference in the # graphs at runtime between the ORT format model and this saved ONNX model. optimized_target_path = (output_dir / relative_model_path).with_suffix( _optimization_suffix(optimization_level_str, optimization_style, ".optimized.onnx") ) so = _create_session_options( optimization_level, optimized_target_path, custom_op_library, session_options_config_entries ) if optimization_style == OptimizationStyle.Runtime: # Limit the optimizations to those that can run in a model with runtime optimizations. so.add_session_config_entry("optimization.minimal_build_optimizations", "apply") print("Saving optimized ONNX model {} to {}".format(model, optimized_target_path)) _ = ort.InferenceSession( str(model), sess_options=so, providers=providers, disabled_optimizers=optimizer_filter ) # Load ONNX model, optimize, and save to ORT format so = _create_session_options( optimization_level, ort_target_path, custom_op_library, session_options_config_entries ) so.add_session_config_entry("session.save_model_format", "ORT") if optimization_style == OptimizationStyle.Runtime: so.add_session_config_entry("optimization.minimal_build_optimizations", "save") print("Converting optimized ONNX model {} to ORT format model {}".format(model, ort_target_path)) _ = ort.InferenceSession( str(model), sess_options=so, providers=providers, disabled_optimizers=optimizer_filter ) converted_models.append(ort_target_path) # orig_size = os.path.getsize(onnx_target_path) # new_size = os.path.getsize(ort_target_path) # print("Serialized {} to {}. Sizes: orig={} new={} diff={} new:old={:.4f}:1.0".format( # onnx_target_path, ort_target_path, orig_size, new_size, new_size - orig_size, new_size / orig_size)) except Exception as e: print("Error converting {}: {}".format(model, e)) if not allow_conversion_failures: raise print("Converted {}/{} models successfully.".format(len(converted_models), len(models))) return converted_models def parse_args(): parser = argparse.ArgumentParser( os.path.basename(__file__), description="""Convert the ONNX format model/s in the provided directory to ORT format models. All files with a `.onnx` extension will be processed. For each one, an ORT format model will be created in the same directory. A configuration file will also be created containing the list of required operators for all converted models. This configuration file should be used as input to the minimal build via the `--include_ops_by_config` parameter. """, ) parser.add_argument( "--optimization_style", nargs="+", default=[OptimizationStyle.Fixed.name, OptimizationStyle.Runtime.name], choices=[e.name for e in OptimizationStyle], help="Style of optimization to perform on the ORT format model. " "Multiple values may be provided. The conversion will run once for each value. " "The general guidance is to use models optimized with " f"'{OptimizationStyle.Runtime.name}' style when using NNAPI or CoreML and " f"'{OptimizationStyle.Fixed.name}' style otherwise. " f"'{OptimizationStyle.Fixed.name}': Run optimizations directly before saving the ORT " "format model. This bakes in any platform-specific optimizations. " f"'{OptimizationStyle.Runtime.name}': Run basic optimizations directly and save certain " "other optimizations to be applied at runtime if possible. This is useful when using a " "compiling EP like NNAPI or CoreML that may run an unknown (at model conversion time) " "number of nodes. The saved optimizations can further optimize nodes not assigned to the " "compiling EP at runtime.", ) parser.add_argument( "--enable_type_reduction", action="store_true", help="Add operator specific type information to the configuration file to potentially reduce " "the types supported by individual operator implementations.", ) parser.add_argument( "--custom_op_library", type=pathlib.Path, default=None, help="Provide path to shared library containing custom operator kernels to register.", ) parser.add_argument( "--save_optimized_onnx_model", action="store_true", help="Save the optimized version of each ONNX model. " "This will have the same level of optimizations applied as the ORT format model.", ) parser.add_argument( "--allow_conversion_failures", action="store_true", help="Whether to proceed after encountering model conversion failures.", ) parser.add_argument( "--target_platform", type=str, default=None, choices=["arm", "amd64"], help="Specify the target platform where the exported model will be used. " "This parameter can be used to choose between platform-specific options, " "such as QDQIsInt8Allowed(arm), NCHWc (amd64) and NHWC (arm/amd64) format, different " "optimizer level options, etc.", ) parser.add_argument( "model_path_or_dir", type=pathlib.Path, help="Provide path to ONNX model or directory containing ONNX model/s to convert. " "All files with a .onnx extension, including those in subdirectories, will be " "processed.", ) return parser.parse_args() def convert_onnx_models_to_ort(): args = parse_args() optimization_styles = [OptimizationStyle[style_str] for style_str in args.optimization_style] # setting optimization level is not expected to be needed by typical users, but it can be set with this # environment variable optimization_level_str = os.getenv("ORT_CONVERT_ONNX_MODELS_TO_ORT_OPTIMIZATION_LEVEL", "all") model_path_or_dir = args.model_path_or_dir.resolve() custom_op_library = args.custom_op_library.resolve() if args.custom_op_library else None if not model_path_or_dir.is_dir() and not model_path_or_dir.is_file(): raise FileNotFoundError("Model path '{}' is not a file or directory.".format(model_path_or_dir)) if custom_op_library and not custom_op_library.is_file(): raise FileNotFoundError("Unable to find custom operator library '{}'".format(custom_op_library)) session_options_config_entries = {} if args.target_platform == "arm": session_options_config_entries["session.qdqisint8allowed"] = "1" else: session_options_config_entries["session.qdqisint8allowed"] = "0" for optimization_style in optimization_styles: print( "Converting models with optimization style '{}' and level '{}'".format( optimization_style.name, optimization_level_str ) ) converted_models = _convert( model_path_or_dir=model_path_or_dir, output_dir=None, optimization_level_str=optimization_level_str, optimization_style=optimization_style, custom_op_library=custom_op_library, create_optimized_onnx_model=args.save_optimized_onnx_model, allow_conversion_failures=args.allow_conversion_failures, target_platform=args.target_platform, session_options_config_entries=session_options_config_entries, ) with contextlib.ExitStack() as context_stack: if optimization_style == OptimizationStyle.Runtime: # Convert models again without runtime optimizations. # Runtime optimizations may not end up being applied, so we need to use both converted models with and # without runtime optimizations to get a complete set of ops that may be needed for the config file. model_dir = model_path_or_dir if model_path_or_dir.is_dir() else model_path_or_dir.parent temp_output_dir = context_stack.enter_context( tempfile.TemporaryDirectory(dir=model_dir, suffix=".without_runtime_opt") ) session_options_config_entries_for_second_conversion = session_options_config_entries.copy() # Limit the optimizations to those that can run in a model with runtime optimizations. session_options_config_entries_for_second_conversion[ "optimization.minimal_build_optimizations" ] = "apply" print( "Converting models again without runtime optimizations to generate a complete config file. " "These converted models are temporary and will be deleted." ) converted_models += _convert( model_path_or_dir=model_path_or_dir, output_dir=temp_output_dir, optimization_level_str=optimization_level_str, optimization_style=OptimizationStyle.Fixed, custom_op_library=custom_op_library, create_optimized_onnx_model=False, # not useful as they would be created in a temp directory allow_conversion_failures=args.allow_conversion_failures, target_platform=args.target_platform, session_options_config_entries=session_options_config_entries_for_second_conversion, ) print( "Generating config file from ORT format models with optimization style '{}' and level '{}'".format( optimization_style.name, optimization_level_str ) ) config_file = _create_config_file_path( model_path_or_dir, optimization_level_str, optimization_style, args.enable_type_reduction ) create_config_from_models(converted_models, config_file, args.enable_type_reduction) if __name__ == "__main__": convert_onnx_models_to_ort()
tests/test_0046-histograms-bh-hist.py
eic/uproot4
133
12615668
<filename>tests/test_0046-histograms-bh-hist.py<gh_stars>100-1000 # BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE from __future__ import absolute_import import numpy import pytest import skhep_testdata import uproot def test_numpy_1d(): with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f: values, edges = f["hpx"].to_numpy(flow=True) assert values.tolist() == [ 2.0, 2.0, 3.0, 1.0, 1.0, 2.0, 4.0, 6.0, 12.0, 8.0, 9.0, 15.0, 15.0, 31.0, 35.0, 40.0, 64.0, 64.0, 81.0, 108.0, 124.0, 156.0, 165.0, 209.0, 262.0, 297.0, 392.0, 432.0, 466.0, 521.0, 604.0, 657.0, 788.0, 903.0, 1079.0, 1135.0, 1160.0, 1383.0, 1458.0, 1612.0, 1770.0, 1868.0, 1861.0, 1946.0, 2114.0, 2175.0, 2207.0, 2273.0, 2276.0, 2329.0, 2325.0, 2381.0, 2417.0, 2364.0, 2284.0, 2188.0, 2164.0, 2130.0, 1940.0, 1859.0, 1763.0, 1700.0, 1611.0, 1459.0, 1390.0, 1237.0, 1083.0, 1046.0, 888.0, 752.0, 742.0, 673.0, 555.0, 533.0, 366.0, 378.0, 272.0, 256.0, 200.0, 174.0, 132.0, 118.0, 100.0, 89.0, 86.0, 39.0, 37.0, 25.0, 23.0, 20.0, 16.0, 14.0, 9.0, 13.0, 8.0, 2.0, 2.0, 6.0, 1.0, 0.0, 1.0, 4.0, ] assert edges.tolist() == [ -numpy.inf, -4.0, -3.92, -3.84, -3.76, -3.68, -3.6, -3.52, -3.44, -3.36, -3.2800000000000002, -3.2, -3.12, -3.04, -2.96, -2.88, -2.8, -2.7199999999999998, -2.6399999999999997, -2.56, -2.48, -2.4, -2.3200000000000003, -2.24, -2.16, -2.08, -2.0, -1.92, -1.8399999999999999, -1.7599999999999998, -1.6800000000000002, -1.6, -1.52, -1.44, -1.3599999999999999, -1.2799999999999998, -1.1999999999999997, -1.12, -1.04, -0.96, -0.8799999999999999, -0.7999999999999998, -0.7199999999999998, -0.6400000000000001, -0.56, -0.48, -0.3999999999999999, -0.31999999999999984, -0.23999999999999977, -0.16000000000000014, -0.08000000000000007, 0.0, 0.08000000000000007, 0.16000000000000014, 0.2400000000000002, 0.3200000000000003, 0.40000000000000036, 0.4800000000000004, 0.5600000000000005, 0.6399999999999997, 0.7199999999999998, 0.7999999999999998, 0.8799999999999999, 0.96, 1.04, 1.12, 1.2000000000000002, 1.2800000000000002, 1.3600000000000003, 1.4400000000000004, 1.5200000000000005, 1.6000000000000005, 1.6799999999999997, 1.7599999999999998, 1.8399999999999999, 1.92, 2.0, 2.08, 2.16, 2.24, 2.3200000000000003, 2.4000000000000004, 2.4800000000000004, 2.5600000000000005, 2.6400000000000006, 2.7199999999999998, 2.8, 2.88, 2.96, 3.04, 3.12, 3.2, 3.2800000000000002, 3.3600000000000003, 3.4400000000000004, 3.5200000000000005, 3.6000000000000005, 3.6799999999999997, 3.76, 3.84, 3.92, 4.0, numpy.inf, ] f["hpx"].errors() def test_numpy_2d(): with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f: values, xedges, yedges = f["hpxpy"].to_numpy(flow=True) assert values.tolist() == [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 2.0, 4.0, 1.0, 0.0, 2.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 2.0, 0.0, 2.0, 2.0, 0.0, 1.0, 1.0, 2.0, 2.0, 0.0, 1.0, 5.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 2.0, 0.0, 2.0, 1.0, 3.0, 4.0, 3.0, 4.0, 4.0, 3.0, 3.0, 6.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 4.0, 1.0, 4.0, 5.0, 2.0, 7.0, 7.0, 9.0, 13.0, 10.0, 4.0, 3.0, 3.0, 4.0, 6.0, 3.0, 1.0, 1.0, 0.0, 3.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 3.0, 2.0, 9.0, 4.0, 8.0, 7.0, 8.0, 10.0, 17.0, 10.0, 13.0, 17.0, 17.0, 9.0, 12.0, 1.0, 6.0, 7.0, 2.0, 1.0, 1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 2.0, 1.0, 0.0, 2.0, 2.0, 7.0, 7.0, 11.0, 12.0, 13.0, 16.0, 25.0, 16.0, 18.0, 21.0, 22.0, 20.0, 19.0, 9.0, 9.0, 16.0, 7.0, 3.0, 4.0, 6.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 5.0, 4.0, 7.0, 5.0, 12.0, 5.0, 16.0, 23.0, 28.0, 28.0, 25.0, 37.0, 41.0, 41.0, 27.0, 24.0, 21.0, 19.0, 16.0, 15.0, 11.0, 4.0, 4.0, 2.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 4.0, 1.0, 6.0, 6.0, 14.0, 14.0, 21.0, 26.0, 46.0, 42.0, 47.0, 52.0, 44.0, 51.0, 53.0, 41.0, 56.0, 30.0, 24.0, 19.0, 20.0, 21.0, 12.0, 8.0, 1.0, 2.0, 3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 3.0, 2.0, 3.0, 3.0, 4.0, 6.0, 11.0, 8.0, 20.0, 36.0, 47.0, 40.0, 49.0, 61.0, 61.0, 70.0, 87.0, 95.0, 90.0, 74.0, 62.0, 66.0, 50.0, 42.0, 24.0, 14.0, 16.0, 7.0, 7.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 4.0, 5.0, 9.0, 10.0, 21.0, 28.0, 31.0, 39.0, 48.0, 88.0, 87.0, 80.0, 102.0, 92.0, 108.0, 100.0, 97.0, 100.0, 71.0, 76.0, 35.0, 32.0, 26.0, 31.0, 12.0, 9.0, 4.0, 4.0, 2.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 6.0, 5.0, 11.0, 9.0, 18.0, 23.0, 32.0, 54.0, 69.0, 81.0, 106.0, 105.0, 126.0, 132.0, 140.0, 148.0, 137.0, 130.0, 121.0, 104.0, 88.0, 68.0, 53.0, 35.0, 30.0, 16.0, 9.0, 6.0, 3.0, 8.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 1.0, 0.0, 1.0, 4.0, 1.0, 5.0, 7.0, 22.0, 20.0, 44.0, 57.0, 60.0, 100.0, 149.0, 148.0, 155.0, 201.0, 198.0, 198.0, 216.0, 207.0, 182.0, 159.0, 153.0, 102.0, 104.0, 66.0, 44.0, 28.0, 21.0, 8.0, 11.0, 4.0, 4.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, ], [ 0.0, 0.0, 0.0, 0.0, 2.0, 2.0, 3.0, 6.0, 8.0, 16.0, 34.0, 53.0, 58.0, 88.0, 106.0, 131.0, 179.0, 215.0, 206.0, 274.0, 236.0, 261.0, 243.0, 240.0, 207.0, 162.0, 138.0, 115.0, 85.0, 65.0, 59.0, 27.0, 22.0, 13.0, 7.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 1.0, 2.0, 0.0, 2.0, 1.0, 5.0, 6.0, 9.0, 13.0, 20.0, 39.0, 60.0, 74.0, 94.0, 145.0, 171.0, 211.0, 253.0, 281.0, 321.0, 311.0, 354.0, 317.0, 289.0, 269.0, 221.0, 199.0, 139.0, 97.0, 73.0, 50.0, 31.0, 29.0, 9.0, 11.0, 4.0, 3.0, 2.0, 0.0, 0.0, 1.0, 0.0, ], [ 0.0, 0.0, 0.0, 1.0, 0.0, 2.0, 3.0, 17.0, 17.0, 29.0, 42.0, 73.0, 93.0, 104.0, 169.0, 222.0, 232.0, 250.0, 361.0, 346.0, 375.0, 363.0, 349.0, 333.0, 312.0, 247.0, 195.0, 176.0, 109.0, 92.0, 51.0, 43.0, 26.0, 17.0, 7.0, 6.0, 2.0, 2.0, 2.0, 0.0, 1.0, 0.0, ], [ 0.0, 0.0, 0.0, 2.0, 1.0, 2.0, 6.0, 8.0, 16.0, 33.0, 51.0, 95.0, 93.0, 134.0, 164.0, 231.0, 298.0, 353.0, 341.0, 420.0, 432.0, 425.0, 404.0, 360.0, 326.0, 301.0, 211.0, 175.0, 139.0, 93.0, 62.0, 56.0, 26.0, 11.0, 11.0, 11.0, 1.0, 0.0, 2.0, 1.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 2.0, 1.0, 1.0, 9.0, 13.0, 28.0, 21.0, 47.0, 82.0, 106.0, 150.0, 199.0, 241.0, 284.0, 334.0, 403.0, 479.0, 445.0, 438.0, 408.0, 386.0, 316.0, 300.0, 218.0, 231.0, 135.0, 111.0, 77.0, 68.0, 27.0, 27.0, 12.0, 3.0, 6.0, 0.0, 1.0, 0.0, 0.0, 1.0, ], [ 0.0, 0.0, 0.0, 0.0, 1.0, 5.0, 6.0, 13.0, 16.0, 35.0, 68.0, 68.0, 95.0, 142.0, 190.0, 260.0, 287.0, 363.0, 403.0, 448.0, 478.0, 446.0, 439.0, 401.0, 396.0, 314.0, 245.0, 226.0, 134.0, 114.0, 66.0, 44.0, 29.0, 23.0, 14.0, 8.0, 12.0, 6.0, 3.0, 0.0, 2.0, 0.0, ], [ 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 9.0, 14.0, 22.0, 34.0, 60.0, 86.0, 129.0, 179.0, 210.0, 270.0, 275.0, 370.0, 416.0, 445.0, 497.0, 449.0, 440.0, 426.0, 385.0, 278.0, 273.0, 210.0, 141.0, 115.0, 77.0, 50.0, 32.0, 25.0, 15.0, 8.0, 5.0, 3.0, 3.0, 0.0, 0.0, 0.0, ], [ 1.0, 0.0, 0.0, 0.0, 1.0, 4.0, 5.0, 11.0, 24.0, 19.0, 41.0, 88.0, 126.0, 120.0, 197.0, 260.0, 281.0, 344.0, 398.0, 411.0, 476.0, 436.0, 488.0, 393.0, 331.0, 302.0, 236.0, 205.0, 171.0, 115.0, 61.0, 65.0, 23.0, 19.0, 11.0, 4.0, 5.0, 2.0, 0.0, 3.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 2.0, 2.0, 4.0, 2.0, 13.0, 22.0, 32.0, 47.0, 72.0, 103.0, 135.0, 209.0, 200.0, 284.0, 341.0, 360.0, 391.0, 412.0, 424.0, 443.0, 370.0, 323.0, 262.0, 221.0, 180.0, 159.0, 91.0, 75.0, 38.0, 28.0, 24.0, 10.0, 6.0, 1.0, 2.0, 0.0, 1.0, 0.0, 0.0, ], [ 1.0, 0.0, 0.0, 0.0, 3.0, 1.0, 4.0, 6.0, 18.0, 30.0, 37.0, 66.0, 98.0, 119.0, 141.0, 203.0, 233.0, 303.0, 345.0, 348.0, 360.0, 367.0, 350.0, 302.0, 280.0, 251.0, 203.0, 155.0, 121.0, 64.0, 49.0, 43.0, 28.0, 21.0, 8.0, 4.0, 2.0, 1.0, 1.0, 1.0, 0.0, 0.0, ], [ 0.0, 1.0, 0.0, 0.0, 0.0, 4.0, 4.0, 10.0, 17.0, 28.0, 43.0, 52.0, 75.0, 108.0, 162.0, 155.0, 211.0, 268.0, 278.0, 339.0, 331.0, 339.0, 305.0, 239.0, 241.0, 223.0, 161.0, 136.0, 93.0, 86.0, 63.0, 32.0, 25.0, 15.0, 10.0, 0.0, 2.0, 1.0, 0.0, 0.0, 0.0, 1.0, ], [ 2.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 5.0, 10.0, 17.0, 27.0, 40.0, 86.0, 91.0, 123.0, 150.0, 172.0, 197.0, 247.0, 237.0, 255.0, 279.0, 271.0, 218.0, 189.0, 194.0, 152.0, 108.0, 92.0, 52.0, 41.0, 32.0, 16.0, 22.0, 5.0, 1.0, 4.0, 1.0, 0.0, 0.0, 0.0, 0.0, ], [ 1.0, 1.0, 0.0, 0.0, 1.0, 2.0, 6.0, 4.0, 6.0, 14.0, 22.0, 28.0, 57.0, 56.0, 87.0, 111.0, 142.0, 169.0, 206.0, 202.0, 211.0, 209.0, 181.0, 174.0, 158.0, 157.0, 105.0, 89.0, 62.0, 44.0, 34.0, 20.0, 15.0, 12.0, 9.0, 7.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 5.0, 4.0, 8.0, 15.0, 27.0, 33.0, 38.0, 64.0, 67.0, 84.0, 119.0, 131.0, 153.0, 165.0, 151.0, 151.0, 129.0, 126.0, 125.0, 92.0, 70.0, 46.0, 33.0, 23.0, 22.0, 10.0, 7.0, 2.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 2.0, 7.0, 8.0, 11.0, 16.0, 15.0, 35.0, 43.0, 39.0, 61.0, 86.0, 99.0, 83.0, 131.0, 131.0, 107.0, 101.0, 112.0, 86.0, 76.0, 69.0, 57.0, 39.0, 32.0, 17.0, 11.0, 8.0, 1.0, 3.0, 3.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 2.0, 6.0, 4.0, 11.0, 17.0, 22.0, 20.0, 34.0, 27.0, 46.0, 80.0, 69.0, 71.0, 76.0, 79.0, 66.0, 82.0, 67.0, 58.0, 49.0, 32.0, 21.0, 22.0, 21.0, 9.0, 5.0, 4.0, 5.0, 2.0, 3.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 4.0, 8.0, 19.0, 15.0, 16.0, 26.0, 26.0, 49.0, 54.0, 51.0, 45.0, 46.0, 55.0, 39.0, 33.0, 40.0, 24.0, 22.0, 20.0, 15.0, 8.0, 11.0, 4.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 2.0, 1.0, 6.0, 8.0, 12.0, 15.0, 28.0, 24.0, 25.0, 30.0, 39.0, 34.0, 28.0, 27.0, 27.0, 22.0, 18.0, 10.0, 11.0, 6.0, 4.0, 9.0, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 2.0, 0.0, 4.0, 5.0, 5.0, 9.0, 12.0, 13.0, 22.0, 22.0, 19.0, 23.0, 21.0, 20.0, 20.0, 10.0, 20.0, 11.0, 8.0, 5.0, 5.0, 4.0, 0.0, 2.0, 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 1.0, 1.0, 0.0, 1.0, 3.0, 2.0, 3.0, 1.0, 4.0, 4.0, 10.0, 11.0, 13.0, 16.0, 12.0, 9.0, 18.0, 19.0, 6.0, 8.0, 5.0, 5.0, 1.0, 4.0, 0.0, 2.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 3.0, 5.0, 3.0, 1.0, 5.0, 11.0, 2.0, 5.0, 3.0, 8.0, 4.0, 3.0, 6.0, 4.0, 1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 4.0, 0.0, 3.0, 2.0, 3.0, 4.0, 4.0, 8.0, 3.0, 6.0, 2.0, 2.0, 4.0, 1.0, 1.0, 2.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 4.0, 2.0, 1.0, 2.0, 4.0, 1.0, 1.0, 1.0, 1.0, 2.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 2.0, 3.0, 1.0, 0.0, 2.0, 3.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], ] assert xedges.tolist() == [ -numpy.inf, -4.0, -3.8, -3.6, -3.4, -3.2, -3.0, -2.8, -2.5999999999999996, -2.4, -2.2, -2.0, -1.7999999999999998, -1.5999999999999996, -1.4, -1.1999999999999997, -1.0, -0.7999999999999998, -0.5999999999999996, -0.3999999999999999, -0.19999999999999973, 0.0, 0.20000000000000018, 0.40000000000000036, 0.6000000000000005, 0.8000000000000007, 1.0, 1.2000000000000002, 1.4000000000000004, 1.6000000000000005, 1.8000000000000007, 2.0, 2.2, 2.4000000000000004, 2.6000000000000005, 2.8000000000000007, 3.0, 3.2, 3.4000000000000004, 3.6000000000000005, 3.8000000000000007, 4.0, numpy.inf, ] assert yedges.tolist() == [ -numpy.inf, -4.0, -3.8, -3.6, -3.4, -3.2, -3.0, -2.8, -2.5999999999999996, -2.4, -2.2, -2.0, -1.7999999999999998, -1.5999999999999996, -1.4, -1.1999999999999997, -1.0, -0.7999999999999998, -0.5999999999999996, -0.3999999999999999, -0.19999999999999973, 0.0, 0.20000000000000018, 0.40000000000000036, 0.6000000000000005, 0.8000000000000007, 1.0, 1.2000000000000002, 1.4000000000000004, 1.6000000000000005, 1.8000000000000007, 2.0, 2.2, 2.4000000000000004, 2.6000000000000005, 2.8000000000000007, 3.0, 3.2, 3.4000000000000004, 3.6000000000000005, 3.8000000000000007, 4.0, numpy.inf, ] f["hpxpy"].errors() def test_numpy_profile(): # python -c 'import ROOT, skhep_testdata; f = ROOT.TFile(skhep_testdata.data_path("uproot-hepdata-example.root")); h = f.Get("hprof"); h.SetErrorOption("g"); print(repr(h.GetErrorOption())); print([h.GetBinError(i) for i in range(102)])' with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f: obj = f["hprof"] assert obj.axis().edges(flow=True).tolist() == [ -numpy.inf, -4.0, -3.92, -3.84, -3.76, -3.68, -3.6, -3.52, -3.44, -3.36, -3.2800000000000002, -3.2, -3.12, -3.04, -2.96, -2.88, -2.8, -2.7199999999999998, -2.6399999999999997, -2.56, -2.48, -2.4, -2.3200000000000003, -2.24, -2.16, -2.08, -2.0, -1.92, -1.8399999999999999, -1.7599999999999998, -1.6800000000000002, -1.6, -1.52, -1.44, -1.3599999999999999, -1.2799999999999998, -1.1999999999999997, -1.12, -1.04, -0.96, -0.8799999999999999, -0.7999999999999998, -0.7199999999999998, -0.6400000000000001, -0.56, -0.48, -0.3999999999999999, -0.31999999999999984, -0.23999999999999977, -0.16000000000000014, -0.08000000000000007, 0.0, 0.08000000000000007, 0.16000000000000014, 0.2400000000000002, 0.3200000000000003, 0.40000000000000036, 0.4800000000000004, 0.5600000000000005, 0.6399999999999997, 0.7199999999999998, 0.7999999999999998, 0.8799999999999999, 0.96, 1.04, 1.12, 1.2000000000000002, 1.2800000000000002, 1.3600000000000003, 1.4400000000000004, 1.5200000000000005, 1.6000000000000005, 1.6799999999999997, 1.7599999999999998, 1.8399999999999999, 1.92, 2.0, 2.08, 2.16, 2.24, 2.3200000000000003, 2.4000000000000004, 2.4800000000000004, 2.5600000000000005, 2.6400000000000006, 2.7199999999999998, 2.8, 2.88, 2.96, 3.04, 3.12, 3.2, 3.2800000000000002, 3.3600000000000003, 3.4400000000000004, 3.5200000000000005, 3.6000000000000005, 3.6799999999999997, 3.76, 3.84, 3.92, 4.0, numpy.inf, ] assert obj.values(flow=True).tolist() == [ 17.99833583831787, 17.05295467376709, 16.96826426188151, 15.189482688903809, 13.73788833618164, 13.375219821929932, 13.510369300842285, 12.646300633748373, 12.66011929512024, 11.824836373329163, 11.623446782430014, 11.472076733907064, 10.052986780802408, 10.030597317603327, 9.614417321341378, 8.776622557640076, 8.620806604623795, 8.179968640208244, 7.4127079410317505, 7.497226472254153, 6.980819525257234, 6.505285000189756, 6.251851732080633, 5.813575813074431, 5.584403858840011, 5.011047506171846, 4.91228925087014, 4.524659741255972, 4.24002511460382, 4.077462992146468, 3.638793389923525, 3.5221418274773493, 3.255871357954093, 2.961020285108953, 2.706199676046999, 2.5841911697177635, 2.3627997641933374, 2.1493446517490598, 2.0077903614940302, 1.8382392522714865, 1.712551970266353, 1.6131308919867815, 1.449079261311019, 1.3471352570103472, 1.245844892917823, 1.1707659457058741, 1.1247396327430272, 1.1198479739799145, 1.0281285326813325, 1.0417602170529079, 1.0197545518784679, 1.0003131686022901, 1.0794705348466953, 1.02964734215157, 1.0603044479791786, 1.1542847645715888, 1.1745855332784314, 1.317462644113901, 1.2909844154549628, 1.4553258675057892, 1.5839730073833629, 1.7274112791524214, 1.8171250952244693, 1.999616364569922, 2.1976474514968105, 2.332895248766955, 2.573682461088714, 2.7457328102556744, 2.9121971759978718, 3.157701852473807, 3.3310595230272195, 3.685565097902363, 4.011118740219254, 4.3144918141177175, 4.548257073418039, 4.93563452094951, 5.191882547210245, 5.4767660945653915, 5.7347985672950745, 6.18110868574559, 6.4068912520553125, 7.048662836268797, 7.238576850891113, 7.555341683077009, 8.169158785842185, 9.019065893613375, 8.789572896184149, 9.365243797302247, 9.570246945256772, 10.279665088653564, 11.086111783981323, 11.118131773812431, 12.656685405307346, 12.176475048065186, 12.393176078796387, 16.518978118896484, 13.303139686584473, 14.635026613871256, 14.96741771697998, 0.0, 18.32199478149414, 17.8403746287028, ] assert obj.errors(flow=True).tolist() == [ 0.2425426377130359, 0.7421210342302459, 0.4940066334987832, 0.0, 0.0, 0.2464980351520863, 0.5555373736396868, 0.24357921956140027, 0.224616129931814, 0.34906168361481404, 0.4356334723283742, 0.5128651082538828, 0.2086307384620165, 0.28308077003120913, 0.2891541406820913, 0.16769727425722117, 0.1725773236590863, 0.12765099099147656, 0.10176558165942572, 0.15209837443095275, 0.11509671433352467, 0.10149120489291587, 0.11432069747168126, 0.09759737443630617, 0.0925726825400381, 0.06761852807106097, 0.07883833461255244, 0.06391971743421765, 0.07016808339801081, 0.0679063456384074, 0.05330254783019173, 0.056304893803072076, 0.055238305812566516, 0.047974962128087315, 0.042558147198316985, 0.04422411577185198, 0.0408986879854767, 0.03453675368752007, 0.039438577439864786, 0.03461426584130604, 0.036187944978430614, 0.034085467706933194, 0.03170797279308202, 0.031219377450826796, 0.03011256422687173, 0.02926608780683337, 0.0301281364334744, 0.029773650810830235, 0.029748389712173053, 0.03081957669527989, 0.03132949553456636, 0.02939420318612115, 0.029258470846132534, 0.02930430026995912, 0.02804401796249436, 0.031175984988258274, 0.030108329759273612, 0.03149116682767534, 0.029094905772258012, 0.03256760040302268, 0.034455467521643364, 0.03480207320474039, 0.032712202513451534, 0.03860859020725239, 0.03885261043325975, 0.03856340740992072, 0.04624045482680718, 0.04543317885660241, 0.04864621055120345, 0.05203738725490573, 0.043244016740287015, 0.05850656051444226, 0.059709748394490884, 0.06594229969906718, 0.07220151434675717, 0.08170131663135467, 0.08712811029061408, 0.08092332833341198, 0.09191356506835095, 0.10837656197125221, 0.10509032780349721, 0.1549338147492931, 0.12013956272890565, 0.11435861802671626, 0.18394299511064918, 0.36368702093446753, 0.13346262669376094, 0.18325723104438668, 0.17988975869975438, 0.1926530171606879, 0.352473088726965, 0.18420322865597596, 0.5959353241264886, 0.21540243485684468, 0.11755951260322403, 1.6619844323502102, 0.1352812684763272, 0.4534391377411209, 0.0, 0.0, 0.0, 0.16817919583370047, ] assert obj.errors( flow=True, error_mode=uproot.behaviors.TProfile._kERRORSPREAD ).tolist() == [ 0.34300708770751953, 1.0495176315307617, 0.8556445884959498, 0.0, 0.0, 0.3486008644104004, 1.1110747472793736, 0.5966447998707816, 0.7780930984827886, 0.9872955341457128, 1.3069004169851226, 1.9863180231181519, 0.8080233755703451, 1.5761270231822468, 1.7106589658888625, 1.0606106881094808, 1.3806185892726903, 1.0212079279318125, 0.9158902349348315, 1.5806526735782713, 1.281662768690052, 1.2676247428226026, 1.4684759475789604, 1.4109488746385728, 1.4984197698897908, 1.1653166117127, 1.560919388615718, 1.3285463784181335, 1.5147207420285738, 1.549991160077581, 1.3099853470686935, 1.443207670599461, 1.5506131361772943, 1.4416456163169384, 1.3979557820249364, 1.4898998932597651, 1.39295911912831, 1.284377246895075, 1.5059134195962758, 1.3897530746031688, 1.5224763480325734, 1.473186374916331, 1.367860043067912, 1.377195694990315, 1.3845231787179089, 1.3648794718765778, 1.4153812430343926, 1.419488271301224, 1.419219569870578, 1.4873439583962957, 1.5106535672672314, 1.4343045945107848, 1.4384340328933711, 1.4248038889030987, 1.340257624082002, 1.4582898146438432, 1.4006037738107093, 1.453377907771706, 1.2814976672937608, 1.4041886411676958, 1.446719393622703, 1.4349262381362273, 1.3129783240312063, 1.4747268574003336, 1.4485303652651937, 1.3563140181188076, 1.5217255253773476, 1.4693963839287074, 1.449624425594751, 1.4270014133077806, 1.1779530457556422, 1.517791441678946, 1.406668404280142, 1.522396207351309, 1.3812963022723197, 1.5884551434189818, 1.4369536067546675, 1.2947732533345917, 1.2998541028572388, 1.429585037043725, 1.2073959432248138, 1.6830120202858494, 1.2013956272890565, 1.0788570447521093, 1.705817161574992, 2.271224717779226, 0.811821464847988, 0.9162861552219334, 0.8627209754934005, 0.8615704848834633, 1.40989235490786, 0.6892253711682418, 1.787805972379466, 0.7461759224922005, 0.3325085142189005, 2.350400924682617, 0.1913166046142578, 1.1106945168733242, 0.0, 0.0, 0.0, 0.29129491196004526, ] assert obj.errors( flow=True, error_mode=uproot.behaviors.TProfile._kERRORSPREADI ).tolist() == [ 0.2425426377130359, 0.7421210342302459, 0.4940066334987832, 0.2886751345948129, 0.2886751345948129, 0.2464980351520863, 0.5555373736396868, 0.24357921956140027, 0.224616129931814, 0.34906168361481404, 0.4356334723283742, 0.5128651082538828, 0.2086307384620165, 0.28308077003120913, 0.2891541406820913, 0.16769727425722117, 0.1725773236590863, 0.12765099099147656, 0.10176558165942572, 0.15209837443095275, 0.11509671433352467, 0.10149120489291587, 0.11432069747168126, 0.09759737443630617, 0.0925726825400381, 0.06761852807106097, 0.07883833461255244, 0.06391971743421765, 0.07016808339801081, 0.0679063456384074, 0.05330254783019173, 0.056304893803072076, 0.055238305812566516, 0.047974962128087315, 0.042558147198316985, 0.04422411577185198, 0.0408986879854767, 0.03453675368752007, 0.039438577439864786, 0.03461426584130604, 0.036187944978430614, 0.034085467706933194, 0.03170797279308202, 0.031219377450826796, 0.03011256422687173, 0.02926608780683337, 0.0301281364334744, 0.029773650810830235, 0.029748389712173053, 0.03081957669527989, 0.03132949553456636, 0.02939420318612115, 0.029258470846132534, 0.02930430026995912, 0.02804401796249436, 0.031175984988258274, 0.030108329759273612, 0.03149116682767534, 0.029094905772258012, 0.03256760040302268, 0.034455467521643364, 0.03480207320474039, 0.032712202513451534, 0.03860859020725239, 0.03885261043325975, 0.03856340740992072, 0.04624045482680718, 0.04543317885660241, 0.04864621055120345, 0.05203738725490573, 0.043244016740287015, 0.05850656051444226, 0.059709748394490884, 0.06594229969906718, 0.07220151434675717, 0.08170131663135467, 0.08712811029061408, 0.08092332833341198, 0.09191356506835095, 0.10837656197125221, 0.10509032780349721, 0.1549338147492931, 0.12013956272890565, 0.11435861802671626, 0.18394299511064918, 0.36368702093446753, 0.13346262669376094, 0.18325723104438668, 0.17988975869975438, 0.1926530171606879, 0.352473088726965, 0.18420322865597596, 0.5959353241264886, 0.21540243485684468, 0.11755951260322403, 1.6619844323502102, 0.1352812684763272, 0.4534391377411209, 0.2886751345948129, 0.0, 0.2886751345948129, 0.16817919583370047, ] assert obj.errors( flow=True, error_mode=uproot.behaviors.TProfile._kERRORSPREADG ).tolist() == [ 0.7071067811865475, 0.7071067811865475, 0.5773502691896258, 1.0, 1.0, 0.7071067811865475, 0.5, 0.4082482904638631, 0.2886751345948129, 0.35355339059327373, 0.3333333333333333, 0.2581988897471611, 0.2581988897471611, 0.1796053020267749, 0.1690308509457033, 0.15811388300841897, 0.125, 0.125, 0.1111111111111111, 0.09622504486493763, 0.08980265101338746, 0.08006407690254357, 0.0778498944161523, 0.06917144638660747, 0.06178020632152154, 0.058025885318565944, 0.050507627227610534, 0.048112522432468816, 0.04632410546120795, 0.04381079543383235, 0.04068942293855797, 0.03901371573204352, 0.035623524993954825, 0.033277916281986085, 0.03044312827739915, 0.02968260885977624, 0.029361010975735173, 0.026889882837002246, 0.026189140043946204, 0.024906774069335894, 0.023769134427076417, 0.023137240669137377, 0.023180714250535184, 0.022668802672263903, 0.021749411414517784, 0.021442250696755896, 0.021286234067143354, 0.020974918506045256, 0.020961090407515925, 0.020721216851891204, 0.020739033894608506, 0.02049369659597791, 0.020340502363726694, 0.02056725174474318, 0.02092434876593436, 0.02137845624045064, 0.02149667901961739, 0.021667569500871973, 0.022703830459324992, 0.023193180352135665, 0.023816275411477048, 0.024253562503633298, 0.024914503091731197, 0.026180163474687157, 0.026822089039291005, 0.028432506701809173, 0.0303868562731382, 0.030919620705155318, 0.033557802760701215, 0.03646624787447364, 0.036711154910717615, 0.03854716722458499, 0.04244763599780089, 0.043314808182421, 0.05227083734893167, 0.05143444998736397, 0.06063390625908324, 0.0625, 0.07071067811865475, 0.07580980435789034, 0.08703882797784893, 0.09205746178983235, 0.1, 0.105999788000636, 0.10783277320343841, 0.16012815380508713, 0.1643989873053573, 0.2, 0.20851441405707477, 0.22360679774997896, 0.25, 0.2672612419124244, 0.3333333333333333, 0.2886751345948129, 0.35355339059327373, 0.7071067811865475, 0.7071067811865475, 0.4082482904638631, 1.0, 0.0, 1.0, 0.5773502691896258, ] def test_boost_1d(): boost_histogram = pytest.importorskip("boost_histogram") with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f: f["hpx"].to_boost() def test_boost_2d(): boost_histogram = pytest.importorskip("boost_histogram") with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f: f["hpxpy"].to_boost() def test_hist_1d(): hist = pytest.importorskip("hist") with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f: f["hpx"].to_hist() def test_hist_2d(): hist = pytest.importorskip("hist") with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f: f["hpxpy"].to_hist()
akshare/stock_fundamental/stock_register.py
NovelResearchInvestment/akshare
721
12615706
# -*- coding:utf-8 -*- #!/usr/bin/env python """ Date: 2022/1/7 17:19 Desc: 东方财富网-数据中心-新股数据-注册制审核 http://data.eastmoney.com/kcb/?type=nsb """ import pandas as pd import requests def stock_register_kcb() -> pd.DataFrame: """ 东方财富网-数据中心-新股数据-注册制审核-科创板 http://data.eastmoney.com/kcb/?type=nsb :return: 科创板注册制审核结果 :rtype: pandas.DataFrame """ url = "https://datacenter.eastmoney.com/securities/api/data/get" params = { 'st': 'UPDATE_DATE', 'sr': '-1', 'ps': '5000', 'p': '1', 'type': 'RPT_REGISTERED_INFO', 'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE', 'token': '<KEY>', 'client': 'WEB', 'filter': '(TOLIST_MARKET="科创板")', } headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36' } r = requests.get(url, params=params, headers=headers) data_json = r.json() page_num = data_json['result']['pages'] big_df = pd.DataFrame() for page in range(1, page_num+1): params = { 'st': 'UPDATE_DATE', 'sr': '-1', 'ps': '5000', 'p': page, 'type': 'RPT_REGISTERED_INFO', 'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE', 'token': '<KEY>', 'client': 'WEB', 'filter': '(TOLIST_MARKET="科创板")', } headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36' } r = requests.get(url, params=params, headers=headers) data_json = r.json() temp_df = pd.DataFrame(data_json['result']["data"]) big_df = big_df.append(temp_df, ignore_index=True) big_df.reset_index(inplace=True) big_df['index'] = range(1, len(big_df) + 1) big_df.columns = [ "序号", "_", "_", "发行人全称", "审核状态", "_", "注册地", "证监会行业", "保荐机构", "律师事务所", "会计师事务所", "更新日期", "受理日期", "拟上市地点", "_", ] big_df = big_df[ [ "序号", "发行人全称", "审核状态", "注册地", "证监会行业", "保荐机构", "律师事务所", "会计师事务所", "更新日期", "受理日期", "拟上市地点", ] ] big_df['更新日期'] = pd.to_datetime(big_df['更新日期']).dt.date big_df['受理日期'] = pd.to_datetime(big_df['受理日期']).dt.date return big_df def stock_register_cyb() -> pd.DataFrame: """ 东方财富网-数据中心-新股数据-注册制审核-创业板 http://data.eastmoney.com/xg/cyb/ :return: 创业板注册制审核结果 :rtype: pandas.DataFrame """ url = "https://datacenter.eastmoney.com/securities/api/data/get" params = { 'st': 'UPDATE_DATE', 'sr': '-1', 'ps': '5000', 'p': '1', 'type': 'RPT_REGISTERED_INFO', 'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE', 'token': '<KEY>', 'client': 'WEB', 'filter': '(TOLIST_MARKET="创业板")', } headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36' } r = requests.get(url, params=params, headers=headers) data_json = r.json() page_num = data_json['result']['pages'] big_df = pd.DataFrame() for page in range(1, page_num+1): params = { 'st': 'UPDATE_DATE', 'sr': '-1', 'ps': '5000', 'p': page, 'type': 'RPT_REGISTERED_INFO', 'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE', 'token': '<KEY>', 'client': 'WEB', 'filter': '(TOLIST_MARKET="创业板")', } headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36' } r = requests.get(url, params=params, headers=headers) data_json = r.json() temp_df = pd.DataFrame(data_json['result']["data"]) big_df = big_df.append(temp_df, ignore_index=True) big_df.reset_index(inplace=True) big_df['index'] = big_df.index + 1 big_df.columns = [ "序号", "_", "_", "发行人全称", "审核状态", "_", "注册地", "证监会行业", "保荐机构", "律师事务所", "会计师事务所", "更新日期", "受理日期", "拟上市地点", "_", ] big_df = big_df[ [ "序号", "发行人全称", "审核状态", "注册地", "证监会行业", "保荐机构", "律师事务所", "会计师事务所", "更新日期", "受理日期", "拟上市地点", ] ] big_df['更新日期'] = pd.to_datetime(big_df['更新日期']).dt.date big_df['受理日期'] = pd.to_datetime(big_df['受理日期']).dt.date return big_df def stock_register_db() -> pd.DataFrame: """ 东方财富网-数据中心-新股数据-注册制审核-达标企业 http://data.eastmoney.com/xg/cyb/ :return: 达标企业 :rtype: pandas.DataFrame """ # TODO url = "https://datacenter-web.eastmoney.com/api/data/v1/get" params = { 'sortColumns': 'NOTICE_DATE,SECURITY_CODE', 'sortTypes': '-1,-1', 'pageSize': '50', 'pageNumber': '1', 'reportName': 'RPT_KCB_IPO', 'columns': 'KCB_LB', 'source': 'WEB', 'client': 'WEB', 'filter': '(ORG_TYPE_CODE="03")', } headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36' } r = requests.get(url, params=params, headers=headers) data_json = r.json() page_num = data_json['result']['pages'] big_df = pd.DataFrame() for page in range(1, page_num+1): params.update({'pageNumber': page}) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36' } r = requests.get(url, params=params, headers=headers) data_json = r.json() temp_df = pd.DataFrame(data_json['result']['data']) big_df = big_df.append(temp_df, ignore_index=True) big_df.reset_index(inplace=True) big_df['index'] = range(1, len(big_df) + 1) big_df.columns = [ "序号", "_", "_", "_", "企业名称", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "经营范围", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "近三年营业收入-2019", "近三年净利润-2019", "近三年研发费用-2019", "近三年营业收入-2018", "近三年净利润-2018", "近三年研发费用-2018", "近三年营业收入-2017", "近三年净利润-2017", "近三年研发费用-2017", "近两年累计净利润", "_", "_", "_", "_", "_", ] big_df = big_df[ [ "序号", "企业名称", "经营范围", "近三年营业收入-2019", "近三年净利润-2019", "近三年研发费用-2019", "近三年营业收入-2018", "近三年净利润-2018", "近三年研发费用-2018", "近三年营业收入-2017", "近三年净利润-2017", "近三年研发费用-2017", "近两年累计净利润", ] ] return big_df if __name__ == "__main__": stock_register_kcb_df = stock_register_kcb() print(stock_register_kcb_df) stock_register_cyb_df = stock_register_cyb() print(stock_register_cyb_df) stock_register_db_df = stock_register_db() print(stock_register_db_df)
source/tomopy/misc/phantom.py
WilliamJudge94/tomopy
229
12615726
<filename>source/tomopy/misc/phantom.py<gh_stars>100-1000 #!/usr/bin/env python # -*- coding: utf-8 -*- # ######################################################################### # Copyright (c) 2015-2019, UChicago Argonne, LLC. All rights reserved. # # # # Copyright 2015-2019. UChicago Argonne, LLC. This software was produced # # under U.S. Government contract DE-AC02-06CH11357 for Argonne National # # Laboratory (ANL), which is operated by UChicago Argonne, LLC for the # # U.S. Department of Energy. The U.S. Government has rights to use, # # reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR # # UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR # # ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is # # modified to produce derivative works, such modified software should # # be clearly marked, so as not to confuse it with the version available # # from ANL. # # # # Additionally, redistribution and use in source and binary forms, with # # or without modification, are permitted provided that the following # # conditions are met: # # # # * Redistributions of source code must retain the above copyright # # notice, this list of conditions and the following disclaimer. # # # # * Redistributions in binary form must reproduce the above copyright # # notice, this list of conditions and the following disclaimer in # # the documentation and/or other materials provided with the # # distribution. # # # # * Neither the name of UChicago Argonne, LLC, Argonne National # # Laboratory, ANL, the U.S. Government, nor the names of its # # contributors may be used to endorse or promote products derived # # from this software without specific prior written permission. # # # # THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago # # Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # # ######################################################################### """ Module for generating synthetic phantoms. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np import skimage import skimage.transform import tifffile import os.path import logging logger = logging.getLogger(__name__) __author__ = "<NAME>" __copyright__ = "Copyright (c) 2015, UChicago Argonne, LLC." __docformat__ = 'restructuredtext en' __all__ = ['baboon', 'barbara', 'cameraman', 'checkerboard', 'lena', 'peppers', 'shepp2d', 'shepp3d', 'phantom'] DATA_PATH = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', 'data')) try: resize_kwargs = {'anti_aliasing': False} ignore = skimage.transform.resize(np.zeros(5), [2], mode='constant', **resize_kwargs) except TypeError: logger.debug("Determined that the anti_aliasing keyword is not needed.") resize_kwargs = dict() def baboon(size=512, dtype='float32'): """ Load test baboon image array. Parameters ---------- size : int or tuple of int, optional Size of the output image. dtype : str, optional The desired data-type for the array. Returns ------- ndarray Output 3D test image. """ size = _totuple(size, 2) fname = os.path.join(DATA_PATH, 'baboon.tif') im = tifffile.imread(fname) im = skimage.transform.resize(im, size, order=3, preserve_range=True, mode='constant', **resize_kwargs) im = np.expand_dims(im, 0) im = im.astype(dtype) return im def barbara(size=512, dtype='float32'): """ Load test Barbara image array. Parameters ---------- size : int or tuple of int, optional Size of the output image. dtype : str, optional The desired data-type for the array. Returns ------- ndarray Output 3D test image. """ size = _totuple(size, 2) fname = os.path.join(DATA_PATH, 'barbara.tif') im = tifffile.imread(fname) im = skimage.transform.resize(im, size, order=3, preserve_range=True, mode='constant', **resize_kwargs) im = np.expand_dims(im, 0) return im.astype(dtype) def cameraman(size=512, dtype='float32'): """ Load test cameraman image array. Parameters ---------- size : int or tuple of int, optional Size of the output image. dtype : str, optional The desired data-type for the array. Returns ------- ndarray Output 3D test image. """ size = _totuple(size, 2) fname = os.path.join(DATA_PATH, 'cameraman.tif') im = tifffile.imread(fname) im = skimage.transform.resize(im, size, order=3, preserve_range=True, mode='constant', **resize_kwargs) im = np.expand_dims(im, 0) return im.astype(dtype) def checkerboard(size=512, dtype='float32'): """ Load test checkerboard image array. Parameters ---------- size : int or tuple of int, optional Size of the output image. dtype : str, optional The desired data-type for the array. Returns ------- ndarray Output 3D test image. """ size = _totuple(size, 2) fname = os.path.join(DATA_PATH, 'checkerboard.tif') im = tifffile.imread(fname) im = skimage.transform.resize(im, size, order=3, preserve_range=True, mode='constant', **resize_kwargs) im = np.expand_dims(im, 0) return im.astype(dtype) def lena(size=512, dtype='float32'): """ Load test Lena image array. Parameters ---------- size : int or tuple of int, optional Size of the output image. dtype : str, optional The desired data-type for the array. Returns ------- ndarray Output 3D test image. """ size = _totuple(size, 2) fname = os.path.join(DATA_PATH, 'lena.tif') im = tifffile.imread(fname) im = skimage.transform.resize(im, size, order=3, preserve_range=True, mode='constant', **resize_kwargs) im = np.expand_dims(im, 0) return im.astype(dtype) def peppers(size=512, dtype='float32'): """ Load test peppers image array. Parameters ---------- size : int or tuple of int, optional Size of the output image. dtype : str, optional The desired data-type for the array. Returns ------- ndarray Output 3D test image. """ size = _totuple(size, 2) fname = os.path.join(DATA_PATH, 'peppers.tif') im = tifffile.imread(fname) im = skimage.transform.resize(im, size, order=3, preserve_range=True, mode='constant', **resize_kwargs) im = np.expand_dims(im, 0) return im.astype(dtype) def shepp2d(size=512, dtype='float32'): """ Load test Shepp-Logan image array. Parameters ---------- size : int or tuple of int, optional Size of the output image. dtype : str, optional The desired data-type for the array. Returns ------- ndarray Output 3D test image. """ size = _totuple(size, 2) fname = os.path.join(DATA_PATH, 'shepp2d.tif') im = tifffile.imread(fname) im = skimage.transform.resize(im, size, order=3, preserve_range=True, mode='constant', **resize_kwargs) im = np.expand_dims(im, 0) return im.astype(dtype) def _totuple(size, dim): """ Converts size to tuple. """ if not isinstance(size, tuple): if dim == 2: size = (size, size) elif dim == 3: size = (size, size, size) return size def shepp3d(size=128, dtype='float32'): """ Load 3D Shepp-Logan image array. Parameters ---------- size : int or tuple, optional Size of the 3D data. dtype : str, optional The desired data-type for the array. Returns ------- ndarray Output 3D test image. """ size = _totuple(size, 3) shepp_params = _array_to_params(_get_shepp_array()) return phantom(size, shepp_params, dtype).clip(0, np.inf) def phantom(size, params, dtype='float32'): """ Generate a cube of given size using a list of ellipsoid parameters. Parameters ---------- size: tuple of int Size of the output cube. params: list of dict List of dictionaries with the parameters defining the ellipsoids to include in the cube. dtype: str, optional Data type of the output ndarray. Returns ------- ndarray 3D object filled with the specified ellipsoids. """ # instantiate ndarray cube obj = np.zeros(size, dtype=dtype) # define coords coords = _define_coords(size) # recursively add ellipsoids to cube for param in params: _ellipsoid(param, out=obj, coords=coords) return obj def _ellipsoid(params, shape=None, out=None, coords=None): """ Generate a cube containing an ellipsoid defined by its parameters. If out is given, fills the given cube instead of creating a new one. """ # handle inputs if shape is None and out is None: raise ValueError("You need to set shape or out") if out is None: out = np.zeros(shape) if shape is None: shape = out.shape if len(shape) == 1: shape = shape, shape, shape elif len(shape) == 2: shape = shape[0], shape[1], 1 elif len(shape) > 3: raise ValueError("input shape must be lower or equal to 3") if coords is None: coords = _define_coords(shape) # rotate coords coords = _transform(coords, params) # recast as ndarray coords = np.asarray(coords) np.square(coords, out=coords) ellip_mask = coords.sum(axis=0) <= 1. ellip_mask.resize(shape) # fill ellipsoid with value out[ ellip_mask ] += params['A'] return out def _rotation_matrix(p): """ Defines an Euler rotation matrix from angles phi, theta and psi. """ cphi = np.cos(np.radians(p['phi'])) sphi = np.sin(np.radians(p['phi'])) ctheta = np.cos(np.radians(p['theta'])) stheta = np.sin(np.radians(p['theta'])) cpsi = np.cos(np.radians(p['psi'])) spsi = np.sin(np.radians(p['psi'])) alpha = [[cpsi * cphi - ctheta * sphi * spsi, cpsi * sphi + ctheta * cphi * spsi, spsi * stheta], [-spsi * cphi - ctheta * sphi * cpsi, -spsi * sphi + ctheta * cphi * cpsi, cpsi * stheta], [stheta * sphi, -stheta * cphi, ctheta]] return np.asarray(alpha) def _define_coords(shape): """ Generate a tuple of coords in 3D with a given shape. """ mgrid = np.lib.index_tricks.nd_grid() cshape = np.asarray(1j) * shape x, y, z = mgrid[-1:1:cshape[0], -1:1:cshape[1], -1:1:cshape[2]] return x, y, z def _transform(coords, p): """ Apply rotation, translation and rescaling to a 3-tuple of coords. """ alpha = _rotation_matrix(p) out_coords = np.tensordot(alpha, coords, axes=1) _shape = (3,) + (1,) * ( out_coords.ndim - 1 ) _dt = out_coords.dtype M0 = np.array([p['x0'], p['y0'], p['z0']], dtype=_dt).reshape(_shape) sc = np.array([p['a'], p['b'], p['c']], dtype=_dt).reshape(_shape) out_coords -= M0 out_coords /= sc return out_coords def _get_shepp_array(): """ Returns the parameters for generating modified Shepp-Logan phantom. """ shepp_array = [ [1., .6900, .920, .810, 0., 0., 0., 90., 90., 90.], [-.8, .6624, .874, .780, 0., -.0184, 0., 90., 90., 90.], [-.2, .1100, .310, .220, .22, 0., 0., -108., 90., 100.], [-.2, .1600, .410, .280, -.22, 0., 0., 108., 90., 100.], [.1, .2100, .250, .410, 0., .35, -.15, 90., 90., 90.], [.1, .0460, .046, .050, 0., .1, .25, 90., 90., 90.], [.1, .0460, .046, .050, 0., -.1, .25, 90., 90., 90.], [.1, .0460, .023, .050, -.08, -.605, 0., 90., 90., 90.], [.1, .0230, .023, .020, 0., -.606, 0., 90., 90., 90.], [.1, .0230, .046, .020, .06, -.605, 0., 90., 90., 90.]] return shepp_array def _array_to_params(array): """ Converts list to a dictionary. """ # mandatory parameters to define an ellipsoid params_tuple = [ 'A', 'a', 'b', 'c', 'x0', 'y0', 'z0', 'phi', 'theta', 'psi'] array = np.asarray(array) out = [] for i in range(array.shape[0]): tmp = dict() for k, j in zip(params_tuple, list(range(array.shape[1]))): tmp[k] = array[i, j] out.append(tmp) return out
riddle/feature_importance.py
LaudateCorpus1/RIDDLE-1
110
12615741
"""feature_importance.py Computes feature contribution scores via DeepLIFT (Shrikumar et al., 2016) & determines most important features via paired t-test with adjustment for multiple comparisons (Bonferroni correction) using said scores. Requires: NumPy, SciPy, DeepLIFT (and their dependencies) Author: <NAME>, Rzhetsky Lab Copyright: 2018, all rights reserved """ from __future__ import print_function from collections import OrderedDict from os.path import abspath from os.path import dirname import sys import time import numpy as np from scipy import stats from .models import chunks from .summary import Summary # import deeplift, configure path if not already installed sys.path.append(dirname(dirname(abspath(__file__))) + '/deeplift') from deeplift.conversion import kerasapi_conversion as kc from deeplift.layers import NonlinearMxtsMode # how to handle floating pt errs np.seterr(divide='ignore', over='raise', under='raise') class FeatureImportanceSummary(Summary): """Feature importance summary.""" def __init__(self, sums_D, sums_D2, idx_feat_dict, idx_class_dict, icd9_descript_dict, pairs, num_sample): """Initialize feature importance summary. Arguments: sums_D: np.ndarray, float 2-D array of sums of differences in DeepLIFT contribution scores with shape (num_pair, num_feature); the outer (0) dim represents the pair of compared classes, and the inner dim (1) represents the sum of differences in scores across features sums_D2: np.ndarray, float 2-D array of sums of squared differences in DeepLIFT contribution scores with shape (num_pair, num_feature); the outer (0) dim represents the pair of compared classes, and the inner dim (1) represents the sum of squared differences in scores across features idx_feat_dict: {int: string} dictionary mapping feature indices to features idx_class_dict: {int: string} dictionary mapping class indices to classes icd9_descript_dict: {string: string} dictionary mapping ICD9 codes to description text pairs: [(int, int)] list of pairs of classes which were compared during interpretation num_sample: int number of samples present in the dataset """ num_feature = len(idx_feat_dict) num_pair = len(pairs) unadjusted_t_values, p_values = _paired_ttest_with_diff_sums( sums_D, sums_D2, pairs=pairs, num_sample=num_sample) list_unadjusted_t, list_p = _get_list_signif_scores( unadjusted_t_values, p_values) list_pairs = _get_list_pairs(pairs, idx_class_dict=idx_class_dict, num_feature=num_feature) list_feat_names = _get_list_feat_names(idx_feat_dict, num_pair) list_feat_descripts = _get_list_feat_descripts( list_feat_names, icd9_descript_dict=icd9_descript_dict) super(FeatureImportanceSummary, self).__init__(OrderedDict( [('feat', list_feat_names), ('descript', list_feat_descripts), ('pair', list_pairs), ('unadjusted_t', list_unadjusted_t), ('p', list_p)])) def get_diff_sums(hdf5_path, x_test, process_x_func, num_feature, num_class, batch_size=1024): """Get differences in sums of contribution score values. Performs preparations for determining hich features are important for discriminating between two classes, computing DeepLIFT contribution scores, and sums for differences of these scores between classes (to be used for paired t-tests). Arguments: hdf5_path: str path to saved HDF5 Keras Model process_x_func: function function for vectorizing feature data num_feature: int number of features present in the dataset num_class: int number of classes batch_size: int batch size Returns: sums_D: np.ndarray, float 2-D array of sums of differences in DeepLIFT contribution scores with shape (num_pair, num_feature); the outer (0) dim represents the pair of compared classes, and the inner dim (1) represents the sum of differences in scores across features sums_D2: np.ndarray, float 2-D array of sums of squared differences in DeepLIFT contribution scores with shape (num_pair, num_feature); the outer (0) dim represents the pair of compared classes, and the inner dim (1) represents the sum of squared differences in scores across features sums: np.ndarray, float 2-D array of sums of DeepLIFT contribution scores with shape (num_class, num_feature); the outer (0) dim represents the pair of compared classes, and the inner dim (1) represents the sum of differences in scores across features pairs: [(int, int)] list of pairs of classes which were compared during interpretation """ dlc_generator = _deeplift_contribs_generator( hdf5_path, x_test, process_x_func, num_feature=num_feature, num_class=num_class, batch_size=batch_size) sums_D, sums_D2, sums_contribs, pairs = _diff_sums_from_generator( dlc_generator, num_feature=num_feature, num_class=num_class) return sums_D, sums_D2, sums_contribs, pairs def _deeplift_contribs_generator(hdf5_path, x_test, process_x_func, num_feature, num_class, batch_size): """Generator which yields DeepLIFT contribution scores. Applies vectorization batch-by-batch to avoid memory overflow. Arguments: hdf5_path: str path to saved HDF5 Keras Model process_x_func: function function for vectorizing feature data num_feature: int number of features present in the dataset num_class: int number of classes batch_size: int batch size """ # convert Keras model, and get relevant function deeplift_model = kc.convert_model_from_saved_files( hdf5_path, nonlinear_mxts_mode=NonlinearMxtsMode.RevealCancel) # input layer is 0, since we have a softmax layer the target layer is -2 get_deeplift_contribs = deeplift_model.get_target_contribs_func( find_scores_layer_idx=0, target_layer_idx=-2) num_batch = int(round(float(len(x_test)) / batch_size)) # yield a 3D array detailing the DeepLIFT contrib scores for batch_idx, x in enumerate(chunks(x_test, batch_size)): start = time.time() x = process_x_func(x) batch_size = len(x) zeros = [0.0] * batch_size # reference data all_batch_contribs = np.zeros((num_class, batch_size, num_feature)) for c in range(num_class): batch_contribs = get_deeplift_contribs( task_idx=c, input_data_list=[x], input_references_list=zeros, batch_size=1024, progress_update=None) all_batch_contribs[c] = batch_contribs if not batch_idx % 10: print('{}/{} in {:.2f} s'.format(batch_idx, num_batch, time.time() - start)) yield all_batch_contribs def _diff_sums_from_generator(generator, num_feature, num_class): """Computes sums of DeepLIFT contribution scores from a generator. Arguments: generator: generator generator which yields DeepLIFT contribution scores. num_feature: int number of features present in the dataset num_class: int number of classes Returns: sums_D: np.ndarray, float 2-D array of sums of differences in DeepLIFT contribution scores with shape (num_pair, num_feature); the outer (0) dim represents the pair of compared classes, and the inner dim (1) represents the sum of differences in scores across features sums_D2: np.ndarray, float 2-D array of sums of squared differences in DeepLIFT contribution scores with shape (num_pair, num_feature); the outer (0) dim represents the pair of compared classes, and the inner dim (1) represents the sum of squared differences in scores across features sums: np.ndarray, float 2-D array of sums of DeepLIFT contribution scores with shape (num_class, num_feature); the outer (0) dim represents the pair of compared classes, and the inner dim (1) represents the sum of differences in scores across features pairs: [(int, int)] list of pairs of classes which were compared during interpretation """ # find unique pairs pairs = [[(i, j) for j in range(i + 1, num_class)] for i in range(num_class)] pairs = [p for sublist in pairs for p in sublist] # flatten num_pair = len(pairs) # array of running sums of differences (D) and D^2 (D2) # for each pair (row) for each feature (column) running_sums_D = np.zeros((num_pair, num_feature)) running_sums_D2 = np.zeros((num_pair, num_feature)) # array of running sums of contribution scores # for each class (row) for each feature (column) running_sums_contribs = np.zeros((num_class, num_feature)) # compute running sums for each pair of classes and their D, D2 values, # updating these values batch-by-batch for _, batch_contrib_scores in enumerate(generator): for class_idx in range(num_class): contribs = batch_contrib_scores[class_idx] # if only 1 row (e.g., vector), do not sum, will sum all elements if contribs.ndim > 1: contribs = np.sum(contribs, axis=0) running_sums_contribs[class_idx] = np.add( running_sums_contribs[class_idx], contribs) for pair_idx, (i, j) in enumerate(pairs): D = np.subtract(batch_contrib_scores[i], batch_contrib_scores[j]) D2 = np.square(D) # if only 1 row (e.g., vector), do not sum, will sum all elements assert D.ndim == D2.ndim if D.ndim > 1: D = np.sum(D, axis=0) D2 = np.sum(D2, axis=0) assert D.shape == (num_feature, ) assert D2.shape == (num_feature, ) running_sums_D[pair_idx] = np.add(running_sums_D[pair_idx], D) running_sums_D2[pair_idx] = np.add(running_sums_D2[pair_idx], D2) return running_sums_D, running_sums_D2, running_sums_contribs, pairs def _paired_ttest_with_diff_sums(sums_D, sums_D2, pairs, num_sample): """Performs paired t-tests with sums of differences, D and D^2. Arguments: sums_D: np.ndarray, float 2-D array of sums of differences with shape (num_pair, num_feature); the outer (0) dim represents the pair of compared classes, and the inner dim (1) represents the sum of differences across features sums_D2: np.ndarray, float 2-D array of sums of squared differences with shape (num_pair, num_feature); the outer (0) dim represents the pair of compared classes, and the inner dim (1) represents the sum of squared differences in scores features pairs: [(int, int)] list of pairs of classes which were compared during interpretation num_sample: int number of samples Returns: unadjusted_t_values: np.ndarray, float 2-D array of unadjusted T values with shape (num_pair, num_feature); the outer (0) dim represents the pair of compared classes, and the inner dim (1) represents the T value across features p_values: np.ndarray, float 2-D array of adjusted p-values with shape (num_pair, num_feature); the outer (0) dim represents the pair of compared classes, and the inner dim (1) represents the adjusted p-value across features """ num_pair = len(pairs) num_feature = len(sums_D[0]) # compute T for each pair of classes unadjusted_t_values = np.empty((num_pair, num_feature)) # placeholder for pair_idx in range(len(pairs)): sum_D = sums_D[pair_idx] sum_D2 = sums_D2[pair_idx] assert np.all(~np.isnan(sum_D)) assert np.all(~np.isnan(sum_D2)) N = float(num_sample) N_minus_1 = float(num_sample - 1) # paired t-test formula from sums of differences t = sum_D / np.sqrt((sum_D2 * N - sum_D * sum_D) / N_minus_1) unadjusted_t_values[pair_idx] = t dof = num_sample - 1 # degrees of freedom # compute two-sided p-value, e.g., Pr(abs(t)> tt) unadjusted_p_values = stats.t.sf(np.abs(unadjusted_t_values), dof) * 2 assert unadjusted_p_values.shape == (num_pair, num_feature) # apply Bonferroni adjustment to p-values (multiply by # comparisons) num_comparison = len(pairs) * num_feature p_values = _bonferroni(unadjusted_p_values, num_comparison=num_comparison) assert p_values.shape == (num_pair, num_feature) return unadjusted_t_values, p_values def _bonferroni(p_values, num_comparison): """Applies Bonferroni adjustment to p-values. Arguments: p_values: np.ndarray, float array of p-values num_comparison: number of comparisons Returns: adjusted_p_values: np.ndarray, float array of adjusted p-values with the same shape as p_values """ adjust = np.vectorize(lambda pv: min(1.0, pv * num_comparison)) adjusted_p_values = adjust(p_values) assert np.all(adjusted_p_values[~np.isnan(adjusted_p_values)] <= 1.0) assert np.all(adjusted_p_values[~np.isnan(adjusted_p_values)] >= 0.0) return adjusted_p_values def _get_list_signif_scores(unadjusted_t_values, p_values): """Creates two flattened lists of unadjusted T and adjusted p-values. Flattens arrays so that scores corresponding to the same pair of compared classes are contiguous, e.g., [f0_p0, f1_p0, f2_p0, f0_p1, f1_p1, ...]. Arguments: unadjusted_t_values: np.ndarray, float 2-D array of unadjusted T values with shape (num_pair, num_feature); the outer (0) dim represents the pair of compared classes, and the inner dim (1) represents the T value across features p_values: np.ndarray, float 2-D array of adjusted p-values with shape (num_pair, num_feature); the outer (0) dim represents the pair of compared classes, and the inner dim (1) represents the adjusted p-value across features Returns: list_unadjusted_t [float] list of unadjusted T values with length num_feature * num_pair list_p: [float] list of adjusted p-values with length num_feature * num_pair """ num_pair = unadjusted_t_values.shape[0] num_feature = unadjusted_t_values.shape[1] # flatten nested lists ('C' for row-major, e.g. C style) # e.g., np.array([[1, 2, 3], [4, 5, 6]]) => np.array([1, 2, 3, 4, 5, 6]) # e.g., corresponds to concatenated rows [row0_col0, row1_col0, row2_col0, # row0_col1, row1_col1, row2_col1, row0_col2, row1_col2, row2_col2] flat_utv = unadjusted_t_values.flatten('C') flat_pv = p_values.flatten('C') assert flat_utv.shape == (num_feature * num_pair, ) assert flat_pv.shape == (num_feature * num_pair, ) return flat_utv.tolist(), flat_pv.tolist() def _get_list_pairs(pairs, idx_class_dict, num_feature): """Creates flattened list of (repeated) pairs. The indexing corresponds with the flattened list of T values and the flattened list of p-values obtained from _get_list_signif_scores(). Arguments: pairs: [(int, int)] list of pairs of classes which were compared during interpretation idx_class_dict: {int: string} dictionary mapping class indices to classes num_feature: int number of features Returns: list_pairs: [(string, string)] list of pairs of compared classes with length num_feature * num_pair """ list_pairs = [[p] * num_feature for p in pairs] list_pairs = [p for sublist in list_pairs for p in sublist] # flatten list_pairs = [[idx_class_dict[p[0]], idx_class_dict[p[1]]] for p in list_pairs] # lookup class return list_pairs def _get_list_feat_names(idx_feat_dict, num_pair): """Creates flattened list of (repeated) feature names. The indexing corresponds with the flattened list of T values and the flattened list of p-values obtained from _get_list_signif_scores(). Arguments: idx_feat_dict: {int: string} dictionary mapping feature indices to faetures num_class: int number of classes Returns: list_feat_names: [string] list of feature names with length num_feature * num_pair """ num_feature = len(idx_feat_dict) return [idx_feat_dict[feat_idx] for feat_idx in range(num_feature)] \ * num_pair def _get_list_feat_descripts(list_feat_names, icd9_descript_dict): """Creates flattened list of (repeated) feature descriptions. The indexing corresponds with the flattened list of T values and the flattened list of p-values obtained from _get_list_signif_scores(). Arguments: list_feat_names: [string] list of feature names corresponding with length num_feature * num_pair icd9_descript_dict: {string: string} dictionary mapping ICD9 codes to description text Returns: list_feat_descripts: [string] list of feature descriptions with length num_feature * num_pair """ # returns the description for a feature; expects the string feature name def _get_descript(feat, icd9_descript_dict): if feat[:6] == 'gender': return 'gender' elif feat[:3] == 'age': return 'age on record' elif feat in icd9_descript_dict: return icd9_descript_dict[feat] raise ValueError('`{}` not age/gender; not found in icd9_descript_dict' .format(feat)) list_feat_descripts = [ _get_descript(f, icd9_descript_dict=icd9_descript_dict) for f in list_feat_names] return list_feat_descripts
calliope/test/common/util.py
FraSanvit/calliope
180
12615743
<reponame>FraSanvit/calliope import os import sys import ast import pytest from pyomo.core.expr.current import identify_variables import pyomo.core as po import calliope from calliope import AttrDict constraint_sets = { k: [ast.literal_eval(i) for i in v] for k, v in AttrDict.from_yaml( os.path.join(os.path.dirname(__file__), "constraint_sets.yaml") ) .as_dict_flat() .items() } defaults = AttrDict.from_yaml( os.path.join(os.path.dirname(calliope.__file__), "config", "defaults.yaml") ) subsets_config = AttrDict.from_yaml( os.path.join(os.path.dirname(calliope.__file__), "config", "subsets.yaml") ) python36_or_higher = pytest.mark.skipif( sys.version_info < (3, 6), reason="Requires ordered dicts from Python >= 3.6" ) def build_test_model( override_dict=None, scenario=None, model_file="model.yaml", timeseries_dataframes=None, ): return calliope.Model( os.path.join(os.path.dirname(__file__), "test_model", model_file), override_dict=override_dict, scenario=scenario, timeseries_dataframes=timeseries_dataframes, ) def check_error_or_warning(error_warning, test_string_or_strings): if hasattr(error_warning, "list"): output = ",".join( str(error_warning.list[i]) for i in range(len(error_warning.list)) ) else: output = str(error_warning.value) if isinstance(test_string_or_strings, list): result = all(test_string in output for test_string in test_string_or_strings) else: result = test_string_or_strings in output return result def check_variable_exists(backend_model, constraint, variable, idx=None): """ Search for existence of a decision variable in a Pyomo constraint. Parameters ---------- backend_model : Pyomo ConcreteModel constraint : str, name of constraint which could exist in the backend variable : str, string to search in the list of variables to check if existing """ if getattr(backend_model, constraint) in backend_model.component_objects( ctype=po.Constraint ): expression_accessor = "body" elif getattr(backend_model, constraint) in backend_model.component_objects( ctype=po.Expression ): expression_accessor = "value" if idx is not None: if idx in getattr(backend_model, constraint)._index: variables = identify_variables( getattr(getattr(backend_model, constraint)[idx], expression_accessor) ) return any(variable in j.getname() for j in list(variables)) else: return False else: exists = [] for v in getattr(backend_model, constraint).values(): variables = identify_variables(getattr(v, expression_accessor)) exists.append(any(variable in j.getname() for j in list(variables))) return any(exists)
leetcode.com/python/1288_Remove_Covered_Intervals.py
vansh-tiwari/coding-interview-gym
713
12615804
<reponame>vansh-tiwari/coding-interview-gym<filename>leetcode.com/python/1288_Remove_Covered_Intervals.py class Solution(object): def removeCoveredIntervals(self, intervals): """ :type intervals: List[List[int]] :rtype: int """ # Sort by start point. # If two intervals share the same start point # put the longer one to be the first. intervals.sort(key=lambda x: (x[0], -x[1])) count = 0 prevEnd = 0 for _, end in intervals: # if current interval is not covered # by the previous one if end > prevEnd: count += 1 prevEnd = end return count
events/swagger_params.py
jordanm88/Django-CRM
1,334
12615810
<reponame>jordanm88/Django-CRM<gh_stars>1000+ from drf_yasg import openapi organization_params_in_header = openapi.Parameter( "org", openapi.IN_HEADER, required=True, type=openapi.TYPE_INTEGER ) organization_params = [ organization_params_in_header, ] event_list_get_params = [ organization_params_in_header, openapi.Parameter("name", openapi.IN_QUERY, type=openapi.TYPE_STRING), openapi.Parameter("created_by", openapi.IN_QUERY, type=openapi.TYPE_STRING), openapi.Parameter("assigned_users", openapi.IN_QUERY, type=openapi.TYPE_STRING), openapi.Parameter( "date_of_meeting", openapi.IN_QUERY, type=openapi.FORMAT_DATE, example="2021-01-01", ), ] event_detail_post_params = [ organization_params_in_header, openapi.Parameter( "event_attachment", openapi.IN_QUERY, type=openapi.TYPE_FILE, ), openapi.Parameter("comment", openapi.IN_QUERY, type=openapi.TYPE_STRING), ] event_create_post_params = [ organization_params_in_header, openapi.Parameter( "name", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING ), openapi.Parameter( "event_type", openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, enum=["Recurring", "Non-Recurring"], ), openapi.Parameter( "contacts", openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, ), openapi.Parameter( "start_date", openapi.IN_QUERY, type=openapi.FORMAT_DATE, example="2021-01-01" ), openapi.Parameter( "start_time", openapi.IN_QUERY, type=openapi.FORMAT_DATETIME, example="13:01:01" ), openapi.Parameter( "end_date", openapi.IN_QUERY, type=openapi.FORMAT_DATE, example="2021-01-01" ), openapi.Parameter( "end_time", openapi.IN_QUERY, type=openapi.FORMAT_DATETIME, example="13:01:01" ), openapi.Parameter( "teams", openapi.IN_QUERY, type=openapi.TYPE_STRING, ), openapi.Parameter("assigned_to", openapi.IN_QUERY, type=openapi.TYPE_STRING), openapi.Parameter("description", openapi.IN_QUERY, type=openapi.TYPE_STRING), openapi.Parameter("recurring_days", openapi.IN_QUERY, type=openapi.TYPE_STRING), ] event_comment_edit_params = [ organization_params_in_header, openapi.Parameter("comment", openapi.IN_QUERY, type=openapi.TYPE_STRING), ]
pypy/module/_cffi_backend/realize_c_type.py
m4sterchain/mesapy
381
12615820
<reponame>m4sterchain/mesapy import sys from rpython.rlib import jit from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import specialize from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import oefmt from pypy.interpreter.baseobjspace import W_Root from pypy.module import _cffi_backend from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend import cffi_opcode, newtype, ctypestruct from pypy.module._cffi_backend import ctypeprim from pypy.module._cffi_backend import parse_c_type @specialize.ll() def getop(op): return rffi.cast(rffi.SIGNED, op) & 0xFF @specialize.ll() def getarg(op): return rffi.cast(rffi.SIGNED, op) >> 8 class RealizeCache: NAMES = [None, "_Bool", "char", "signed char", "unsigned char", "short", "unsigned short", "int", "unsigned int", "long", "unsigned long", "long long", "unsigned long long", "float", "double", "long double", "wchar_t", "int8_t", "uint8_t", "int16_t", "uint16_t", "int32_t", "uint32_t", "int64_t", "uint64_t", "intptr_t", "uintptr_t", "ptrdiff_t", "size_t", "ssize_t", "int_least8_t", "uint_least8_t", "int_least16_t", "uint_least16_t", "int_least32_t", "uint_least32_t", "int_least64_t", "uint_least64_t", "int_fast8_t", "uint_fast8_t", "int_fast16_t", "uint_fast16_t", "int_fast32_t", "uint_fast32_t", "int_fast64_t", "uint_fast64_t", "intmax_t", "uintmax_t", "float _Complex", "double _Complex", "char16_t", "char32_t", ] assert len(NAMES) == cffi_opcode._NUM_PRIM def __init__(self, space): self.space = space self.all_primitives = [None] * cffi_opcode._NUM_PRIM self.file_struct = None def get_file_struct(self): if self.file_struct is None: self.file_struct = ctypestruct.W_CTypeStruct(self.space, "FILE") return self.file_struct def get_primitive_type(ffi, num): space = ffi.space if not (0 <= num < cffi_opcode._NUM_PRIM): if num == cffi_opcode._UNKNOWN_PRIM: raise oefmt(ffi.w_FFIError, "primitive integer type with an " "unexpected size (or not an integer type at all)") elif num == cffi_opcode._UNKNOWN_FLOAT_PRIM: raise oefmt(ffi.w_FFIError, "primitive floating-point type with an " "unexpected size (or not a float type at all)") elif num == cffi_opcode._UNKNOWN_LONG_DOUBLE: raise oefmt(ffi.w_FFIError, "primitive floating-point type is " "'long double', not supported for now with " "the syntax 'typedef double... xxx;'") else: raise oefmt(space.w_NotImplementedError, "prim=%d", num) realize_cache = space.fromcache(RealizeCache) w_ctype = realize_cache.all_primitives[num] if w_ctype is None: if num == cffi_opcode.PRIM_VOID: w_ctype = newtype.new_void_type(space) else: assert RealizeCache.NAMES[num] w_ctype = newtype.new_primitive_type(space, RealizeCache.NAMES[num]) realize_cache.all_primitives[num] = w_ctype return w_ctype def get_array_type(ffi, opcodes, itemindex, length): w_ctitem = realize_c_type(ffi, opcodes, itemindex) w_ctitemptr = newtype.new_pointer_type(ffi.space, w_ctitem) return newtype._new_array_type(ffi.space, w_ctitemptr, length) FUNCPTR_FETCH_CHARP = lltype.Ptr(lltype.FuncType([rffi.CCHARP], lltype.Void)) FUNCPTR_FETCH_LONGLONG = lltype.Ptr(lltype.FuncType( [lltype.Ptr(parse_c_type.GETCONST_S)], rffi.INT)) def realize_global_int(ffi, g, gindex): fetch_fnptr = rffi.cast(FUNCPTR_FETCH_LONGLONG, g.c_address) with lltype.scoped_alloc(parse_c_type.GETCONST_S) as p_value: p_value.c_ctx = ffi.ctxobj.ctx rffi.setintfield(p_value, 'c_gindex', gindex) neg = fetch_fnptr(p_value) value = p_value.c_value neg = rffi.cast(lltype.Signed, neg) if neg == 0: # positive if value <= rffi.cast(rffi.ULONGLONG, sys.maxint): return ffi.space.newint(intmask(value)) else: return ffi.space.newint(value) elif neg == 1: # negative value = rffi.cast(rffi.LONGLONG, value) if value >= -sys.maxint-1: return ffi.space.newint(intmask(value)) else: return ffi.space.newint(value) if neg == 2: got = "%d (0x%x)" % (value, value) else: got = "%d" % (rffi.cast(rffi.LONGLONG, value),) raise oefmt(ffi.w_FFIError, "the C compiler says '%s' is equal to %s, " "but the cdef disagrees", rffi.charp2str(g.c_name), got) class W_RawFuncType(W_Root): """Temporary: represents a C function type (not a function pointer)""" _immutable_fields_ = ['nostruct_ctype', 'nostruct_locs', 'nostruct_nargs'] _ctfuncptr = None nostruct_ctype = None nostruct_locs = None nostruct_nargs = 0 def __init__(self, opcodes, base_index): self.opcodes = opcodes self.base_index = base_index def _unpack(self, ffi): opcodes = self.opcodes base_index = self.base_index assert getop(opcodes[base_index]) == cffi_opcode.OP_FUNCTION fret = realize_c_type(ffi, opcodes, getarg(opcodes[base_index])) base_index += 1 num_args = 0 OP_FUNCTION_END = cffi_opcode.OP_FUNCTION_END while getop(opcodes[base_index + num_args]) != OP_FUNCTION_END: num_args += 1 # ellipsis = (getarg(opcodes[base_index + num_args]) & 0x01) != 0 abi = (getarg(opcodes[base_index + num_args]) & 0xFE) if abi == 0: abi = _cffi_backend.FFI_DEFAULT_ABI elif abi == 2: if _cffi_backend.has_stdcall: abi = _cffi_backend.FFI_STDCALL else: abi = _cffi_backend.FFI_DEFAULT_ABI else: raise oefmt(ffi.w_FFIError, "abi number %d not supported", abi) # fargs = [realize_c_type(ffi, opcodes, base_index + i) for i in range(num_args)] return fargs, fret, ellipsis, abi def unwrap_as_fnptr(self, ffi): if self._ctfuncptr is None: fargs, fret, ellipsis, abi = self._unpack(ffi) self._ctfuncptr = newtype._new_function_type( ffi.space, fargs, fret, ellipsis, abi) return self._ctfuncptr def unwrap_as_fnptr_in_elidable(self): assert self._ctfuncptr is not None return self._ctfuncptr @jit.dont_look_inside def prepare_nostruct_fnptr(self, ffi): # tweaked version: instead of returning the ctfuncptr # corresponding exactly to the OP_FUNCTION ... OP_FUNCTION_END # opcodes, this builds in self.nostruct_ctype another one in # which the struct args are replaced with ptr-to- struct, and # a struct return value is replaced with a hidden first arg of # type ptr-to-struct. This is how recompiler.py produces # trampoline functions for PyPy. (Same with complex numbers.) if self.nostruct_ctype is None: fargs, fret, ellipsis, abi = self._unpack(ffi) # 'locs' will be a string of the same length as the final fargs, # containing 'A' where a struct argument was detected, and 'R' # in first position if a struct return value was detected locs = ['\x00'] * len(fargs) for i in range(len(fargs)): farg = fargs[i] if (isinstance(farg, ctypestruct.W_CTypeStructOrUnion) or isinstance(farg, ctypeprim.W_CTypePrimitiveComplex)): farg = newtype.new_pointer_type(ffi.space, farg) fargs[i] = farg locs[i] = 'A' if (isinstance(fret, ctypestruct.W_CTypeStructOrUnion) or isinstance(fret, ctypeprim.W_CTypePrimitiveComplex)): fret = newtype.new_pointer_type(ffi.space, fret) fargs = [fret] + fargs locs = ['R'] + locs fret = newtype.new_void_type(ffi.space) ctfuncptr = newtype._new_function_type( ffi.space, fargs, fret, ellipsis, abi) if locs == ['\x00'] * len(locs): locs = None else: locs = ''.join(locs) self.nostruct_ctype = ctfuncptr self.nostruct_locs = locs self.nostruct_nargs = len(ctfuncptr.fargs) - (locs is not None and locs[0] == 'R') def repr_fn_type(self, ffi, repl=""): fargs, fret, ellipsis, abi = self._unpack(ffi) argnames = [farg.name for farg in fargs] if ellipsis: argnames.append('...') sargs = ', '.join(argnames) sret1 = fret.name[:fret.name_position] sret2 = fret.name[fret.name_position:] if len(repl) > 0 and not sret1.endswith('*'): repl = " " + repl return '%s%s(%s)%s' % (sret1, repl, sargs, sret2) def unexpected_fn_type(self, ffi): raise oefmt(ffi.w_FFIError, "the type '%s' is a function type, not a " "pointer-to-function type", self.repr_fn_type(ffi)) def realize_c_type(ffi, opcodes, index): """Interpret an opcodes[] array. If opcodes == ffi.ctxobj.ctx.c_types, store all the intermediate types back in the opcodes[]. """ x = realize_c_type_or_func(ffi, opcodes, index) if not isinstance(x, W_CType): assert isinstance(x, W_RawFuncType) raise x.unexpected_fn_type(ffi) return x def _realize_name(prefix, charp_src_name): # "xyz" => "struct xyz" # "$xyz" => "xyz" # "$1" => "struct $1" if (charp_src_name[0] == '$' and charp_src_name[1] != '$' and not ('0' <= charp_src_name[1] <= '9')): return rffi.charp2str(rffi.ptradd(charp_src_name, 1)) else: return prefix + rffi.charp2str(charp_src_name) def _realize_c_struct_or_union(ffi, sindex): if sindex == cffi_opcode._IO_FILE_STRUCT: # returns a single global cached opaque type return ffi.space.fromcache(RealizeCache).get_file_struct() s = ffi.ctxobj.ctx.c_struct_unions[sindex] type_index = rffi.getintfield(s, 'c_type_index') if ffi.cached_types[type_index] is not None: return ffi.cached_types[type_index] #found already in the "primary" slot space = ffi.space w_ctype = None c_flags = rffi.getintfield(s, 'c_flags') c_first_field_index = rffi.getintfield(s, 'c_first_field_index') if (c_flags & cffi_opcode.F_EXTERNAL) == 0: if (c_flags & cffi_opcode.F_UNION) != 0: name = _realize_name("union ", s.c_name) x = ctypestruct.W_CTypeUnion(space, name) else: name = _realize_name("struct ", s.c_name) if name == "struct _IO_FILE": x = space.fromcache(RealizeCache).get_file_struct() else: x = ctypestruct.W_CTypeStruct(space, name) if (c_flags & cffi_opcode.F_OPAQUE) == 0: assert c_first_field_index >= 0 w_ctype = x w_ctype.size = rffi.getintfield(s, 'c_size') w_ctype.alignment = rffi.getintfield(s, 'c_alignment') # w_ctype._field_list and other underscore fields are still # None, making it a "lazy" (i.e. "non-forced") kind of struct w_ctype._lazy_ffi = ffi w_ctype._lazy_s = s else: assert c_first_field_index < 0 else: assert c_first_field_index < 0 x = _fetch_external_struct_or_union(s, ffi.included_ffis_libs) if x is None: raise oefmt(ffi.w_FFIError, "'%s %s' should come from ffi.include() but was not found", "union" if c_flags & cffi_opcode.F_UNION else "struct", rffi.charp2str(s.c_name)) assert isinstance(x, ctypestruct.W_CTypeStructOrUnion) if (c_flags & cffi_opcode.F_OPAQUE) == 0 and x.size < 0: prefix = "union" if c_flags & cffi_opcode.F_UNION else "struct" name = rffi.charp2str(s.c_name) raise oefmt(space.w_NotImplementedError, "'%s %s' is opaque in the ffi.include(), but no " "longer in the ffi doing the include (workaround: don't " "use ffi.include() but duplicate the declarations of " "everything using %s %s)", prefix, name, prefix, name) # Update the "primary" OP_STRUCT_UNION slot ffi.cached_types[type_index] = x if w_ctype is not None and rffi.getintfield(s, 'c_size') == -2: # oops, this struct is unnamed and we couldn't generate # a C expression to get its size. We have to rely on # complete_struct_or_union() to compute it now. try: do_realize_lazy_struct(w_ctype) except: ffi.cached_types[type_index] = None raise return x def _realize_c_enum(ffi, eindex): e = ffi.ctxobj.ctx.c_enums[eindex] type_index = rffi.getintfield(e, 'c_type_index') if ffi.cached_types[type_index] is not None: return ffi.cached_types[type_index] #found already in the "primary" slot space = ffi.space w_basetd = get_primitive_type(ffi, rffi.getintfield(e, 'c_type_prim')) enumerators_w = [] enumvalues_w = [] p = e.c_enumerators if p[0] != '\x00': while True: j = 0 while p[j] != ',' and p[j] != '\x00': j += 1 enname = rffi.charpsize2str(p, j) enumerators_w.append(space.newtext(enname)) gindex = parse_c_type.search_in_globals(ffi.ctxobj.ctx, enname) assert gindex >= 0 g = ffi.ctxobj.ctx.c_globals[gindex] assert getop(g.c_type_op) == cffi_opcode.OP_ENUM assert getarg(g.c_type_op) == -1 w_integer_value = realize_global_int(ffi, g, gindex) enumvalues_w.append(w_integer_value) p = rffi.ptradd(p, j) if p[0] == '\x00': break p = rffi.ptradd(p, 1) name = _realize_name("enum ", e.c_name) w_ctype = newtype.new_enum_type(space, name, space.newlist(enumerators_w), space.newlist(enumvalues_w), w_basetd) # Update the "primary" OP_ENUM slot ffi.cached_types[type_index] = w_ctype return w_ctype def realize_c_type_or_func(ffi, opcodes, index): op = opcodes[index] from_ffi = (opcodes == ffi.ctxobj.ctx.c_types) if from_ffi and ffi.cached_types[index] is not None: return ffi.cached_types[index] case = getop(op) if case == cffi_opcode.OP_PRIMITIVE: x = get_primitive_type(ffi, getarg(op)) elif case == cffi_opcode.OP_POINTER: y = realize_c_type_or_func(ffi, opcodes, getarg(op)) if isinstance(y, W_CType): x = newtype.new_pointer_type(ffi.space, y) elif isinstance(y, W_RawFuncType): x = y.unwrap_as_fnptr(ffi) else: raise NotImplementedError elif case == cffi_opcode.OP_ARRAY: x = get_array_type(ffi, opcodes, getarg(op), rffi.cast(rffi.SIGNED, opcodes[index + 1])) elif case == cffi_opcode.OP_OPEN_ARRAY: x = get_array_type(ffi, opcodes, getarg(op), -1) elif case == cffi_opcode.OP_STRUCT_UNION: x = _realize_c_struct_or_union(ffi, getarg(op)) elif case == cffi_opcode.OP_ENUM: x = _realize_c_enum(ffi, getarg(op)) elif case == cffi_opcode.OP_FUNCTION: x = W_RawFuncType(opcodes, index) elif case == cffi_opcode.OP_NOOP: x = realize_c_type_or_func(ffi, opcodes, getarg(op)) elif case == cffi_opcode.OP_TYPENAME: # essential: the TYPENAME opcode resolves the type index looked # up in the 'ctx.c_typenames' array, but it does so in 'ctx.c_types' # instead of in 'opcodes'! type_index = rffi.getintfield(ffi.ctxobj.ctx.c_typenames[getarg(op)], 'c_type_index') x = realize_c_type_or_func(ffi, ffi.ctxobj.ctx.c_types, type_index) else: raise oefmt(ffi.space.w_NotImplementedError, "op=%d", case) if from_ffi: assert ffi.cached_types[index] is None or ffi.cached_types[index] is x ffi.cached_types[index] = x return x def do_realize_lazy_struct(w_ctype): """This is called by W_CTypeStructOrUnion.force_lazy_struct(). """ assert isinstance(w_ctype, ctypestruct.W_CTypeStructOrUnion) space = w_ctype.space ffi = w_ctype._lazy_ffi s = w_ctype._lazy_s assert w_ctype.size != -1 # not an opaque (but may be -2) assert ffi is not None # still lazy first_field = rffi.getintfield(s, 'c_first_field_index') num_fields = rffi.getintfield(s, 'c_num_fields') fields_w = [None] * num_fields for i in range(num_fields): fld = ffi.ctxobj.ctx.c_fields[first_field + i] field_name = rffi.charp2str(fld.c_name) field_size = rffi.getintfield(fld, 'c_field_size') field_offset = rffi.getintfield(fld, 'c_field_offset') op = rffi.getintfield(fld, 'c_field_type_op') case = getop(op) if case == cffi_opcode.OP_NOOP: fbitsize = -1 # standard field elif case == cffi_opcode.OP_BITFIELD: assert field_size >= 0 fbitsize = field_size else: raise oefmt(space.w_NotImplementedError, "field op=%d", case) w_ctf = realize_c_type(ffi, ffi.ctxobj.ctx.c_types, getarg(op)) if field_offset == -1: # unnamed struct, with field positions and sizes entirely # determined by complete_struct_or_union() and not checked. # Or, bitfields (field_size >= 0), similarly not checked. assert field_size == -1 or fbitsize >= 0 else: newtype.detect_custom_layout(w_ctype, newtype.SF_STD_FIELD_POS, w_ctf.size, field_size, "wrong size for field '", field_name, "'") fields_w[i] = space.newtuple([ space.newtext(field_name), w_ctf, space.newint(fbitsize), space.newint(field_offset)]) sflags = 0 c_flags = rffi.getintfield(s, 'c_flags') if c_flags & cffi_opcode.F_CHECK_FIELDS: sflags |= newtype.SF_STD_FIELD_POS if c_flags & cffi_opcode.F_PACKED: sflags |= newtype.SF_PACKED assert w_ctype.size == rffi.getintfield(s, 'c_size') assert w_ctype.alignment == rffi.getintfield(s, 'c_alignment') try: w_ctype.size = -1 # make opaque again newtype.complete_struct_or_union( space, w_ctype, space.newlist(fields_w), space.w_None, totalsize = rffi.getintfield(s, 'c_size'), totalalignment = rffi.getintfield(s, 'c_alignment'), sflags = sflags) except: w_ctype.size = rffi.getintfield(s, 'c_size') # restore w_ctype.alignment = rffi.getintfield(s, 'c_alignment') # restore raise if rffi.getintfield(s, 'c_size') >= 0: assert w_ctype.size == rffi.getintfield(s, 'c_size') assert w_ctype.alignment > 0 if rffi.getintfield(s, 'c_alignment') != -1: assert w_ctype.alignment == rffi.getintfield(s, 'c_alignment') assert w_ctype._fields_list is not None # not lazy any more w_ctype._lazy_ffi = None w_ctype._lazy_s = lltype.nullptr(parse_c_type.STRUCT_UNION_S) def _fetch_external_struct_or_union(s, included_ffis_libs): name = rffi.charp2str(s.c_name) # for ffi1, _ in included_ffis_libs: ctx1 = ffi1.ctxobj.ctx sindex = parse_c_type.search_in_struct_unions(ctx1, name) if sindex < 0: # not found at all continue s1 = ctx1.c_struct_unions[sindex] s1_flags = rffi.getintfield(s1, 'c_flags') s_flags = rffi.getintfield(s, 'c_flags') if ((s1_flags & (cffi_opcode.F_EXTERNAL | cffi_opcode.F_UNION)) == (s_flags & cffi_opcode.F_UNION)): # s1 is not external, and the same kind (struct or union) as s return _realize_c_struct_or_union(ffi1, sindex) # not found, look more recursively if len(ffi1.included_ffis_libs) > 0: w_res = _fetch_external_struct_or_union(s, ffi1.included_ffis_libs) if w_res is not None: return w_res return None
plexapi/server.py
phrawzty/python-plexapi
749
12615831
<gh_stars>100-1000 # -*- coding: utf-8 -*- from urllib.parse import urlencode from xml.etree import ElementTree import requests from plexapi import (BASE_HEADERS, CONFIG, TIMEOUT, X_PLEX_CONTAINER_SIZE, log, logfilter) from plexapi import utils from plexapi.alert import AlertListener from plexapi.base import PlexObject from plexapi.client import PlexClient from plexapi.collection import Collection from plexapi.exceptions import BadRequest, NotFound, Unauthorized from plexapi.library import Hub, Library, Path, File from plexapi.media import Conversion, Optimized from plexapi.playlist import Playlist from plexapi.playqueue import PlayQueue from plexapi.settings import Settings from plexapi.utils import deprecated from requests.status_codes import _codes as codes # Need these imports to populate utils.PLEXOBJECTS from plexapi import audio as _audio # noqa: F401 from plexapi import collection as _collection # noqa: F401 from plexapi import media as _media # noqa: F401 from plexapi import photo as _photo # noqa: F401 from plexapi import playlist as _playlist # noqa: F401 from plexapi import video as _video # noqa: F401 class PlexServer(PlexObject): """ This is the main entry point to interacting with a Plex server. It allows you to list connected clients, browse your library sections and perform actions such as emptying trash. If you do not know the auth token required to access your Plex server, or simply want to access your server with your username and password, you can also create an PlexServer instance from :class:`~plexapi.myplex.MyPlexAccount`. Parameters: baseurl (str): Base url for to access the Plex Media Server (default: 'http://localhost:32400'). token (str): Required Plex authentication token to access the server. session (requests.Session, optional): Use your own session object if you want to cache the http responses from the server. timeout (int, optional): Timeout in seconds on initial connection to the server (default config.TIMEOUT). Attributes: allowCameraUpload (bool): True if server allows camera upload. allowChannelAccess (bool): True if server allows channel access (iTunes?). allowMediaDeletion (bool): True is server allows media to be deleted. allowSharing (bool): True is server allows sharing. allowSync (bool): True is server allows sync. backgroundProcessing (bool): Unknown certificate (bool): True if server has an HTTPS certificate. companionProxy (bool): Unknown diagnostics (bool): Unknown eventStream (bool): Unknown friendlyName (str): Human friendly name for this server. hubSearch (bool): True if `Hub Search <https://www.plex.tv/blog /seek-plex-shall-find-leveling-web-app/>`_ is enabled. I believe this is enabled for everyone machineIdentifier (str): Unique ID for this server (looks like an md5). multiuser (bool): True if `multiusers <https://support.plex.tv/hc/en-us/articles /200250367-Multi-User-Support>`_ are enabled. myPlex (bool): Unknown (True if logged into myPlex?). myPlexMappingState (str): Unknown (ex: mapped). myPlexSigninState (str): Unknown (ex: ok). myPlexSubscription (bool): True if you have a myPlex subscription. myPlexUsername (str): Email address if signed into myPlex (<EMAIL>) ownerFeatures (list): List of features allowed by the server owner. This may be based on your PlexPass subscription. Features include: camera_upload, cloudsync, content_filter, dvr, hardware_transcoding, home, lyrics, music_videos, pass, photo_autotags, premium_music_metadata, session_bandwidth_restrictions, sync, trailers, webhooks (and maybe more). photoAutoTag (bool): True if photo `auto-tagging <https://support.plex.tv/hc/en-us /articles/234976627-Auto-Tagging-of-Photos>`_ is enabled. platform (str): Platform the server is hosted on (ex: Linux) platformVersion (str): Platform version (ex: '6.1 (Build 7601)', '4.4.0-59-generic'). pluginHost (bool): Unknown readOnlyLibraries (bool): Unknown requestParametersInCookie (bool): Unknown streamingBrainVersion (bool): Current `Streaming Brain <https://www.plex.tv/blog /mcstreamy-brain-take-world-two-easy-steps/>`_ version. sync (bool): True if `syncing to a device <https://support.plex.tv/hc/en-us/articles /201053678-Sync-Media-to-a-Device>`_ is enabled. transcoderActiveVideoSessions (int): Number of active video transcoding sessions. transcoderAudio (bool): True if audio transcoding audio is available. transcoderLyrics (bool): True if audio transcoding lyrics is available. transcoderPhoto (bool): True if audio transcoding photos is available. transcoderSubtitles (bool): True if audio transcoding subtitles is available. transcoderVideo (bool): True if audio transcoding video is available. transcoderVideoBitrates (bool): List of video bitrates. transcoderVideoQualities (bool): List of video qualities. transcoderVideoResolutions (bool): List of video resolutions. updatedAt (int): Datetime the server was updated. updater (bool): Unknown version (str): Current Plex version (ex: 1.3.2.3112-1751929) voiceSearch (bool): True if voice search is enabled. (is this Google Voice search?) _baseurl (str): HTTP address of the client. _token (str): Token used to access this client. _session (obj): Requests session object used to access this client. """ key = '/' def __init__(self, baseurl=None, token=None, session=None, timeout=None): self._baseurl = baseurl or CONFIG.get('auth.server_baseurl', 'http://localhost:32400') self._baseurl = self._baseurl.rstrip('/') self._token = logfilter.add_secret(token or CONFIG.get('auth.server_token')) self._showSecrets = CONFIG.get('log.show_secrets', '').lower() == 'true' self._session = session or requests.Session() self._timeout = timeout self._library = None # cached library self._settings = None # cached settings self._myPlexAccount = None # cached myPlexAccount self._systemAccounts = None # cached list of SystemAccount self._systemDevices = None # cached list of SystemDevice data = self.query(self.key, timeout=self._timeout) super(PlexServer, self).__init__(self, data, self.key) def _loadData(self, data): """ Load attribute values from Plex XML response. """ self._data = data self.allowCameraUpload = utils.cast(bool, data.attrib.get('allowCameraUpload')) self.allowChannelAccess = utils.cast(bool, data.attrib.get('allowChannelAccess')) self.allowMediaDeletion = utils.cast(bool, data.attrib.get('allowMediaDeletion')) self.allowSharing = utils.cast(bool, data.attrib.get('allowSharing')) self.allowSync = utils.cast(bool, data.attrib.get('allowSync')) self.backgroundProcessing = utils.cast(bool, data.attrib.get('backgroundProcessing')) self.certificate = utils.cast(bool, data.attrib.get('certificate')) self.companionProxy = utils.cast(bool, data.attrib.get('companionProxy')) self.diagnostics = utils.toList(data.attrib.get('diagnostics')) self.eventStream = utils.cast(bool, data.attrib.get('eventStream')) self.friendlyName = data.attrib.get('friendlyName') self.hubSearch = utils.cast(bool, data.attrib.get('hubSearch')) self.machineIdentifier = data.attrib.get('machineIdentifier') self.multiuser = utils.cast(bool, data.attrib.get('multiuser')) self.myPlex = utils.cast(bool, data.attrib.get('myPlex')) self.myPlexMappingState = data.attrib.get('myPlexMappingState') self.myPlexSigninState = data.attrib.get('myPlexSigninState') self.myPlexSubscription = utils.cast(bool, data.attrib.get('myPlexSubscription')) self.myPlexUsername = data.attrib.get('myPlexUsername') self.ownerFeatures = utils.toList(data.attrib.get('ownerFeatures')) self.photoAutoTag = utils.cast(bool, data.attrib.get('photoAutoTag')) self.platform = data.attrib.get('platform') self.platformVersion = data.attrib.get('platformVersion') self.pluginHost = utils.cast(bool, data.attrib.get('pluginHost')) self.readOnlyLibraries = utils.cast(int, data.attrib.get('readOnlyLibraries')) self.requestParametersInCookie = utils.cast(bool, data.attrib.get('requestParametersInCookie')) self.streamingBrainVersion = data.attrib.get('streamingBrainVersion') self.sync = utils.cast(bool, data.attrib.get('sync')) self.transcoderActiveVideoSessions = int(data.attrib.get('transcoderActiveVideoSessions', 0)) self.transcoderAudio = utils.cast(bool, data.attrib.get('transcoderAudio')) self.transcoderLyrics = utils.cast(bool, data.attrib.get('transcoderLyrics')) self.transcoderPhoto = utils.cast(bool, data.attrib.get('transcoderPhoto')) self.transcoderSubtitles = utils.cast(bool, data.attrib.get('transcoderSubtitles')) self.transcoderVideo = utils.cast(bool, data.attrib.get('transcoderVideo')) self.transcoderVideoBitrates = utils.toList(data.attrib.get('transcoderVideoBitrates')) self.transcoderVideoQualities = utils.toList(data.attrib.get('transcoderVideoQualities')) self.transcoderVideoResolutions = utils.toList(data.attrib.get('transcoderVideoResolutions')) self.updatedAt = utils.toDatetime(data.attrib.get('updatedAt')) self.updater = utils.cast(bool, data.attrib.get('updater')) self.version = data.attrib.get('version') self.voiceSearch = utils.cast(bool, data.attrib.get('voiceSearch')) def _headers(self, **kwargs): """ Returns dict containing base headers for all requests to the server. """ headers = BASE_HEADERS.copy() if self._token: headers['X-Plex-Token'] = self._token headers.update(kwargs) return headers def _uriRoot(self): return 'server://%s/com.plexapp.plugins.library' % self.machineIdentifier @property def library(self): """ Library to browse or search your media. """ if not self._library: try: data = self.query(Library.key) self._library = Library(self, data) except BadRequest: data = self.query('/library/sections/') # Only the owner has access to /library # so just return the library without the data. return Library(self, data) return self._library @property def settings(self): """ Returns a list of all server settings. """ if not self._settings: data = self.query(Settings.key) self._settings = Settings(self, data) return self._settings def account(self): """ Returns the :class:`~plexapi.server.Account` object this server belongs to. """ data = self.query(Account.key) return Account(self, data) def claim(self, account): """ Claim the Plex server using a :class:`~plexapi.myplex.MyPlexAccount`. This will only work with an unclaimed server on localhost or the same subnet. Parameters: account (:class:`~plexapi.myplex.MyPlexAccount`): The account used to claim the server. """ key = '/myplex/claim' params = {'token': account.claimToken()} data = self.query(key, method=self._session.post, params=params) return Account(self, data) def unclaim(self): """ Unclaim the Plex server. This will remove the server from your :class:`~plexapi.myplex.MyPlexAccount`. """ data = self.query(Account.key, method=self._session.delete) return Account(self, data) @property def activities(self): """Returns all current PMS activities.""" activities = [] for elem in self.query(Activity.key): activities.append(Activity(self, elem)) return activities def agents(self, mediaType=None): """ Returns the :class:`~plexapi.media.Agent` objects this server has available. """ key = '/system/agents' if mediaType: key += '?mediaType=%s' % mediaType return self.fetchItems(key) def createToken(self, type='delegation', scope='all'): """ Create a temp access token for the server. """ if not self._token: # Handle unclaimed servers return None q = self.query('/security/token?type=%s&scope=%s' % (type, scope)) return q.attrib.get('token') def switchUser(self, username, session=None, timeout=None): """ Returns a new :class:`~plexapi.server.PlexServer` object logged in as the given username. Note: Only the admin account can switch to other users. Parameters: username (str): Username, email or user id of the user to log in to the server. session (requests.Session, optional): Use your own session object if you want to cache the http responses from the server. This will default to the same session as the admin account if no new session is provided. timeout (int, optional): Timeout in seconds on initial connection to the server. This will default to the same timeout as the admin account if no new timeout is provided. Example: .. code-block:: python from plexapi.server import PlexServer # Login to the Plex server using the admin token plex = PlexServer('http://plexserver:32400', token='<KEY>') # Login to the same Plex server using a different account userPlex = plex.switchUser("Username") """ user = self.myPlexAccount().user(username) userToken = user.get_token(self.machineIdentifier) if session is None: session = self._session if timeout is None: timeout = self._timeout return PlexServer(self._baseurl, token=userToken, session=session, timeout=timeout) def systemAccounts(self): """ Returns a list of :class:`~plexapi.server.SystemAccount` objects this server contains. """ if self._systemAccounts is None: key = '/accounts' self._systemAccounts = self.fetchItems(key, SystemAccount) return self._systemAccounts def systemAccount(self, accountID): """ Returns the :class:`~plexapi.server.SystemAccount` object for the specified account ID. Parameters: accountID (int): The :class:`~plexapi.server.SystemAccount` ID. """ try: return next(account for account in self.systemAccounts() if account.id == accountID) except StopIteration: raise NotFound('Unknown account with accountID=%s' % accountID) from None def systemDevices(self): """ Returns a list of :class:`~plexapi.server.SystemDevice` objects this server contains. """ if self._systemDevices is None: key = '/devices' self._systemDevices = self.fetchItems(key, SystemDevice) return self._systemDevices def systemDevice(self, deviceID): """ Returns the :class:`~plexapi.server.SystemDevice` object for the specified device ID. Parameters: deviceID (int): The :class:`~plexapi.server.SystemDevice` ID. """ try: return next(device for device in self.systemDevices() if device.id == deviceID) except StopIteration: raise NotFound('Unknown device with deviceID=%s' % deviceID) from None def myPlexAccount(self): """ Returns a :class:`~plexapi.myplex.MyPlexAccount` object using the same token to access this server. If you are not the owner of this PlexServer you're likley to recieve an authentication error calling this. """ if self._myPlexAccount is None: from plexapi.myplex import MyPlexAccount self._myPlexAccount = MyPlexAccount(token=self._token) return self._myPlexAccount def _myPlexClientPorts(self): """ Sometimes the PlexServer does not properly advertise port numbers required to connect. This attemps to look up device port number from plex.tv. See issue #126: Make PlexServer.clients() more user friendly. https://github.com/pkkid/python-plexapi/issues/126 """ try: ports = {} account = self.myPlexAccount() for device in account.devices(): if device.connections and ':' in device.connections[0][6:]: ports[device.clientIdentifier] = device.connections[0].split(':')[-1] return ports except Exception as err: log.warning('Unable to fetch client ports from myPlex: %s', err) return ports def browse(self, path=None, includeFiles=True): """ Browse the system file path using the Plex API. Returns list of :class:`~plexapi.library.Path` and :class:`~plexapi.library.File` objects. Parameters: path (:class:`~plexapi.library.Path` or str, optional): Full path to browse. includeFiles (bool): True to include files when browsing (Default). False to only return folders. """ if isinstance(path, Path): key = path.key elif path is not None: base64path = utils.base64str(path) key = '/services/browse/%s' % base64path else: key = '/services/browse' if includeFiles: key += '?includeFiles=1' return self.fetchItems(key) def walk(self, path=None): """ Walk the system file tree using the Plex API similar to `os.walk`. Yields a 3-tuple `(path, paths, files)` where `path` is a string of the directory path, `paths` is a list of :class:`~plexapi.library.Path` objects, and `files` is a list of :class:`~plexapi.library.File` objects. Parameters: path (:class:`~plexapi.library.Path` or str, optional): Full path to walk. """ paths = [] files = [] for item in self.browse(path): if isinstance(item, Path): paths.append(item) elif isinstance(item, File): files.append(item) if isinstance(path, Path): path = path.path yield path or '', paths, files for _path in paths: for path, paths, files in self.walk(_path): yield path, paths, files def clients(self): """ Returns list of all :class:`~plexapi.client.PlexClient` objects connected to server. """ items = [] ports = None for elem in self.query('/clients'): port = elem.attrib.get('port') if not port: log.warning('%s did not advertise a port, checking plex.tv.', elem.attrib.get('name')) ports = self._myPlexClientPorts() if ports is None else ports port = ports.get(elem.attrib.get('machineIdentifier')) baseurl = 'http://%s:%s' % (elem.attrib['host'], port) items.append(PlexClient(baseurl=baseurl, server=self, token=self._token, data=elem, connect=False)) return items def client(self, name): """ Returns the :class:`~plexapi.client.PlexClient` that matches the specified name. Parameters: name (str): Name of the client to return. Raises: :exc:`~plexapi.exceptions.NotFound`: Unknown client name. """ for client in self.clients(): if client and client.title == name: return client raise NotFound('Unknown client name: %s' % name) def createCollection(self, title, section, items=None, smart=False, limit=None, libtype=None, sort=None, filters=None, **kwargs): """ Creates and returns a new :class:`~plexapi.collection.Collection`. Parameters: title (str): Title of the collection. section (:class:`~plexapi.library.LibrarySection`, str): The library section to create the collection in. items (List): Regular collections only, list of :class:`~plexapi.audio.Audio`, :class:`~plexapi.video.Video`, or :class:`~plexapi.photo.Photo` objects to be added to the collection. smart (bool): True to create a smart collection. Default False. limit (int): Smart collections only, limit the number of items in the collection. libtype (str): Smart collections only, the specific type of content to filter (movie, show, season, episode, artist, album, track, photoalbum, photo). sort (str or list, optional): Smart collections only, a string of comma separated sort fields or a list of sort fields in the format ``column:dir``. See :func:`~plexapi.library.LibrarySection.search` for more info. filters (dict): Smart collections only, a dictionary of advanced filters. See :func:`~plexapi.library.LibrarySection.search` for more info. **kwargs (dict): Smart collections only, additional custom filters to apply to the search results. See :func:`~plexapi.library.LibrarySection.search` for more info. Raises: :class:`plexapi.exceptions.BadRequest`: When no items are included to create the collection. :class:`plexapi.exceptions.BadRequest`: When mixing media types in the collection. Returns: :class:`~plexapi.collection.Collection`: A new instance of the created Collection. """ return Collection.create( self, title, section, items=items, smart=smart, limit=limit, libtype=libtype, sort=sort, filters=filters, **kwargs) def createPlaylist(self, title, section=None, items=None, smart=False, limit=None, libtype=None, sort=None, filters=None, **kwargs): """ Creates and returns a new :class:`~plexapi.playlist.Playlist`. Parameters: title (str): Title of the playlist. section (:class:`~plexapi.library.LibrarySection`, str): Smart playlists only, library section to create the playlist in. items (List): Regular playlists only, list of :class:`~plexapi.audio.Audio`, :class:`~plexapi.video.Video`, or :class:`~plexapi.photo.Photo` objects to be added to the playlist. smart (bool): True to create a smart playlist. Default False. limit (int): Smart playlists only, limit the number of items in the playlist. libtype (str): Smart playlists only, the specific type of content to filter (movie, show, season, episode, artist, album, track, photoalbum, photo). sort (str or list, optional): Smart playlists only, a string of comma separated sort fields or a list of sort fields in the format ``column:dir``. See :func:`~plexapi.library.LibrarySection.search` for more info. filters (dict): Smart playlists only, a dictionary of advanced filters. See :func:`~plexapi.library.LibrarySection.search` for more info. **kwargs (dict): Smart playlists only, additional custom filters to apply to the search results. See :func:`~plexapi.library.LibrarySection.search` for more info. Raises: :class:`plexapi.exceptions.BadRequest`: When no items are included to create the playlist. :class:`plexapi.exceptions.BadRequest`: When mixing media types in the playlist. Returns: :class:`~plexapi.playlist.Playlist`: A new instance of the created Playlist. """ return Playlist.create( self, title, section=section, items=items, smart=smart, limit=limit, libtype=libtype, sort=sort, filters=filters, **kwargs) def createPlayQueue(self, item, **kwargs): """ Creates and returns a new :class:`~plexapi.playqueue.PlayQueue`. Parameters: item (Media or Playlist): Media or playlist to add to PlayQueue. kwargs (dict): See `~plexapi.playqueue.PlayQueue.create`. """ return PlayQueue.create(self, item, **kwargs) def downloadDatabases(self, savepath=None, unpack=False): """ Download databases. Parameters: savepath (str): Defaults to current working dir. unpack (bool): Unpack the zip file. """ url = self.url('/diagnostics/databases') filepath = utils.download(url, self._token, None, savepath, self._session, unpack=unpack) return filepath def downloadLogs(self, savepath=None, unpack=False): """ Download server logs. Parameters: savepath (str): Defaults to current working dir. unpack (bool): Unpack the zip file. """ url = self.url('/diagnostics/logs') filepath = utils.download(url, self._token, None, savepath, self._session, unpack=unpack) return filepath @deprecated('use "checkForUpdate" instead') def check_for_update(self, force=True, download=False): return self.checkForUpdate() def checkForUpdate(self, force=True, download=False): """ Returns a :class:`~plexapi.base.Release` object containing release info. Parameters: force (bool): Force server to check for new releases download (bool): Download if a update is available. """ part = '/updater/check?download=%s' % (1 if download else 0) if force: self.query(part, method=self._session.put) releases = self.fetchItems('/updater/status') if len(releases): return releases[0] def isLatest(self): """ Check if the installed version of PMS is the latest. """ release = self.checkForUpdate(force=True) return release is None def installUpdate(self): """ Install the newest version of Plex Media Server. """ # We can add this but dunno how useful this is since it sometimes # requires user action using a gui. part = '/updater/apply' release = self.checkForUpdate(force=True, download=True) if release and release.version != self.version: # figure out what method this is.. return self.query(part, method=self._session.put) def history(self, maxresults=9999999, mindate=None, ratingKey=None, accountID=None, librarySectionID=None): """ Returns a list of media items from watched history. If there are many results, they will be fetched from the server in batches of X_PLEX_CONTAINER_SIZE amounts. If you're only looking for the first <num> results, it would be wise to set the maxresults option to that amount so this functions doesn't iterate over all results on the server. Parameters: maxresults (int): Only return the specified number of results (optional). mindate (datetime): Min datetime to return results from. This really helps speed up the result listing. For example: datetime.now() - timedelta(days=7) ratingKey (int/str) Request history for a specific ratingKey item. accountID (int/str) Request history for a specific account ID. librarySectionID (int/str) Request history for a specific library section ID. """ results, subresults = [], '_init' args = {'sort': 'viewedAt:desc'} if ratingKey: args['metadataItemID'] = ratingKey if accountID: args['accountID'] = accountID if librarySectionID: args['librarySectionID'] = librarySectionID if mindate: args['viewedAt>'] = int(mindate.timestamp()) args['X-Plex-Container-Start'] = 0 args['X-Plex-Container-Size'] = min(X_PLEX_CONTAINER_SIZE, maxresults) while subresults and maxresults > len(results): key = '/status/sessions/history/all%s' % utils.joinArgs(args) subresults = self.fetchItems(key) results += subresults[:maxresults - len(results)] args['X-Plex-Container-Start'] += args['X-Plex-Container-Size'] return results def playlists(self, playlistType=None, sectionId=None, title=None, sort=None, **kwargs): """ Returns a list of all :class:`~plexapi.playlist.Playlist` objects on the server. Parameters: playlistType (str, optional): The type of playlists to return (audio, video, photo). Default returns all playlists. sectionId (int, optional): The section ID (key) of the library to search within. title (str, optional): General string query to search for. Partial string matches are allowed. sort (str or list, optional): A string of comma separated sort fields in the format ``column:dir``. """ args = {} if playlistType is not None: args['playlistType'] = playlistType if sectionId is not None: args['sectionID'] = sectionId if title is not None: args['title'] = title if sort is not None: # TODO: Automatically retrieve and validate sort field similar to LibrarySection.search() args['sort'] = sort key = '/playlists%s' % utils.joinArgs(args) return self.fetchItems(key, **kwargs) def playlist(self, title): """ Returns the :class:`~plexapi.client.Playlist` that matches the specified title. Parameters: title (str): Title of the playlist to return. Raises: :exc:`~plexapi.exceptions.NotFound`: Unable to find playlist. """ try: return self.playlists(title=title, title__iexact=title)[0] except IndexError: raise NotFound('Unable to find playlist with title "%s".' % title) from None def optimizedItems(self, removeAll=None): """ Returns list of all :class:`~plexapi.media.Optimized` objects connected to server. """ if removeAll is True: key = '/playlists/generators?type=42' self.query(key, method=self._server._session.delete) else: backgroundProcessing = self.fetchItem('/playlists?type=42') return self.fetchItems('%s/items' % backgroundProcessing.key, cls=Optimized) @deprecated('use "plexapi.media.Optimized.items()" instead') def optimizedItem(self, optimizedID): """ Returns single queued optimized item :class:`~plexapi.media.Video` object. Allows for using optimized item ID to connect back to source item. """ backgroundProcessing = self.fetchItem('/playlists?type=42') return self.fetchItem('%s/items/%s/items' % (backgroundProcessing.key, optimizedID)) def conversions(self, pause=None): """ Returns list of all :class:`~plexapi.media.Conversion` objects connected to server. """ if pause is True: self.query('/:/prefs?BackgroundQueueIdlePaused=1', method=self._server._session.put) elif pause is False: self.query('/:/prefs?BackgroundQueueIdlePaused=0', method=self._server._session.put) else: return self.fetchItems('/playQueues/1', cls=Conversion) def currentBackgroundProcess(self): """ Returns list of all :class:`~plexapi.media.TranscodeJob` objects running or paused on server. """ return self.fetchItems('/status/sessions/background') def query(self, key, method=None, headers=None, timeout=None, **kwargs): """ Main method used to handle HTTPS requests to the Plex server. This method helps by encoding the response to utf-8 and parsing the returned XML into and ElementTree object. Returns None if no data exists in the response. """ url = self.url(key) method = method or self._session.get timeout = timeout or TIMEOUT log.debug('%s %s', method.__name__.upper(), url) headers = self._headers(**headers or {}) response = method(url, headers=headers, timeout=timeout, **kwargs) if response.status_code not in (200, 201, 204): codename = codes.get(response.status_code)[0] errtext = response.text.replace('\n', ' ') message = '(%s) %s; %s %s' % (response.status_code, codename, response.url, errtext) if response.status_code == 401: raise Unauthorized(message) elif response.status_code == 404: raise NotFound(message) else: raise BadRequest(message) data = response.text.encode('utf8') return ElementTree.fromstring(data) if data.strip() else None def search(self, query, mediatype=None, limit=None, sectionId=None): """ Returns a list of media items or filter categories from the resulting `Hub Search <https://www.plex.tv/blog/seek-plex-shall-find-leveling-web-app/>`_ against all items in your Plex library. This searches genres, actors, directors, playlists, as well as all the obvious media titles. It performs spell-checking against your search terms (because KUROSAWA is hard to spell). It also provides contextual search results. So for example, if you search for 'Pernice', it’ll return '<NAME>' as the artist result, but we’ll also go ahead and return your most-listened to albums and tracks from the artist. If you type 'Arnold' you’ll get a result for the actor, but also the most recently added movies he’s in. Parameters: query (str): Query to use when searching your library. mediatype (str, optional): Limit your search to the specified media type. actor, album, artist, autotag, collection, director, episode, game, genre, movie, photo, photoalbum, place, playlist, shared, show, tag, track limit (int, optional): Limit to the specified number of results per Hub. sectionId (int, optional): The section ID (key) of the library to search within. """ results = [] params = { 'query': query, 'includeCollections': 1, 'includeExternalMedia': 1} if limit: params['limit'] = limit if sectionId: params['sectionId'] = sectionId key = '/hubs/search?%s' % urlencode(params) for hub in self.fetchItems(key, Hub): if mediatype: if hub.type == mediatype: return hub.items else: results += hub.items return results def sessions(self): """ Returns a list of all active session (currently playing) media objects. """ return self.fetchItems('/status/sessions') def transcodeSessions(self): """ Returns a list of all active :class:`~plexapi.media.TranscodeSession` objects. """ return self.fetchItems('/transcode/sessions') def startAlertListener(self, callback=None): """ Creates a websocket connection to the Plex Server to optionally recieve notifications. These often include messages from Plex about media scans as well as updates to currently running Transcode Sessions. NOTE: You need websocket-client installed in order to use this feature. >> pip install websocket-client Parameters: callback (func): Callback function to call on recieved messages. Raises: :exc:`~plexapi.exception.Unsupported`: Websocket-client not installed. """ notifier = AlertListener(self, callback) notifier.start() return notifier def transcodeImage(self, imageUrl, height, width, opacity=None, saturation=None, blur=None, background=None, minSize=True, upscale=True, imageFormat=None): """ Returns the URL for a transcoded image. Parameters: imageUrl (str): The URL to the image (eg. returned by :func:`~plexapi.mixins.PosterUrlMixin.thumbUrl` or :func:`~plexapi.mixins.ArtUrlMixin.artUrl`). The URL can be an online image. height (int): Height to transcode the image to. width (int): Width to transcode the image to. opacity (int, optional): Change the opacity of the image (0 to 100) saturation (int, optional): Change the saturation of the image (0 to 100). blur (int, optional): The blur to apply to the image in pixels (e.g. 3). background (str, optional): The background hex colour to apply behind the opacity (e.g. '000000'). minSize (bool, optional): Maintain smallest dimension. Default True. upscale (bool, optional): Upscale the image if required. Default True. imageFormat (str, optional): 'jpeg' (default) or 'png'. """ params = { 'url': imageUrl, 'height': height, 'width': width, 'minSize': int(bool(minSize)), 'upscale': int(bool(upscale)) } if opacity is not None: params['opacity'] = opacity if saturation is not None: params['saturation'] = saturation if blur is not None: params['blur'] = blur if background is not None: params['background'] = str(background).strip('#') if imageFormat is not None: params['format'] = imageFormat.lower() key = '/photo/:/transcode%s' % utils.joinArgs(params) return self.url(key, includeToken=True) def url(self, key, includeToken=None): """ Build a URL string with proper token argument. Token will be appended to the URL if either includeToken is True or CONFIG.log.show_secrets is 'true'. """ if self._token and (includeToken or self._showSecrets): delim = '&' if '?' in key else '?' return '%s%s%sX-Plex-Token=%s' % (self._baseurl, key, delim, self._token) return '%s%s' % (self._baseurl, key) def refreshSynclist(self): """ Force PMS to download new SyncList from Plex.tv. """ return self.query('/sync/refreshSynclists', self._session.put) def refreshContent(self): """ Force PMS to refresh content for known SyncLists. """ return self.query('/sync/refreshContent', self._session.put) def refreshSync(self): """ Calls :func:`~plexapi.server.PlexServer.refreshSynclist` and :func:`~plexapi.server.PlexServer.refreshContent`, just like the Plex Web UI does when you click 'refresh'. """ self.refreshSynclist() self.refreshContent() def _allowMediaDeletion(self, toggle=False): """ Toggle allowMediaDeletion. Parameters: toggle (bool): True enables Media Deletion False or None disable Media Deletion (Default) """ if self.allowMediaDeletion and toggle is False: log.debug('Plex is currently allowed to delete media. Toggling off.') elif self.allowMediaDeletion and toggle is True: log.debug('Plex is currently allowed to delete media. Toggle set to allow, exiting.') raise BadRequest('Plex is currently allowed to delete media. Toggle set to allow, exiting.') elif self.allowMediaDeletion is None and toggle is True: log.debug('Plex is currently not allowed to delete media. Toggle set to allow.') else: log.debug('Plex is currently not allowed to delete media. Toggle set to not allow, exiting.') raise BadRequest('Plex is currently not allowed to delete media. Toggle set to not allow, exiting.') value = 1 if toggle is True else 0 return self.query('/:/prefs?allowMediaDeletion=%s' % value, self._session.put) def bandwidth(self, timespan=None, **kwargs): """ Returns a list of :class:`~plexapi.server.StatisticsBandwidth` objects with the Plex server dashboard bandwidth data. Parameters: timespan (str, optional): The timespan to bin the bandwidth data. Default is seconds. Available timespans: seconds, hours, days, weeks, months. **kwargs (dict, optional): Any of the available filters that can be applied to the bandwidth data. The time frame (at) and bytes can also be filtered using less than or greater than (see examples below). * accountID (int): The :class:`~plexapi.server.SystemAccount` ID to filter. * at (datetime): The time frame to filter (inclusive). The time frame can be either: 1. An exact time frame (e.g. Only December 1st 2020 `at=datetime(2020, 12, 1)`). 2. Before a specific time (e.g. Before and including December 2020 `at<=datetime(2020, 12, 1)`). 3. After a specific time (e.g. After and including January 2021 `at>=datetime(2021, 1, 1)`). * bytes (int): The amount of bytes to filter (inclusive). The bytes can be either: 1. An exact number of bytes (not very useful) (e.g. `bytes=1024**3`). 2. Less than or equal number of bytes (e.g. `bytes<=1024**3`). 3. Greater than or equal number of bytes (e.g. `bytes>=1024**3`). * deviceID (int): The :class:`~plexapi.server.SystemDevice` ID to filter. * lan (bool): True to only retrieve local bandwidth, False to only retrieve remote bandwidth. Default returns all local and remote bandwidth. Raises: :exc:`~plexapi.exceptions.BadRequest`: When applying an invalid timespan or unknown filter. Example: .. code-block:: python from plexapi.server import PlexServer plex = PlexServer('http://localhost:32400', token='<KEY>') # Filter bandwidth data for December 2020 and later, and more than 1 GB used. filters = { 'at>': datetime(2020, 12, 1), 'bytes>': 1024**3 } # Retrieve bandwidth data in one day timespans. bandwidthData = plex.bandwidth(timespan='days', **filters) # Print out bandwidth usage for each account and device combination. for bandwidth in sorted(bandwidthData, key=lambda x: x.at): account = bandwidth.account() device = bandwidth.device() gigabytes = round(bandwidth.bytes / 1024**3, 3) local = 'local' if bandwidth.lan else 'remote' date = bandwidth.at.strftime('%Y-%m-%d') print('%s used %s GB of %s bandwidth on %s from %s' % (account.name, gigabytes, local, date, device.name)) """ params = {} if timespan is None: params['timespan'] = 6 # Default to seconds else: timespans = { 'seconds': 6, 'hours': 4, 'days': 3, 'weeks': 2, 'months': 1 } try: params['timespan'] = timespans[timespan] except KeyError: raise BadRequest('Invalid timespan specified: %s. ' 'Available timespans: %s' % (timespan, ', '.join(timespans.keys()))) filters = {'accountID', 'at', 'at<', 'at>', 'bytes', 'bytes<', 'bytes>', 'deviceID', 'lan'} for key, value in kwargs.items(): if key not in filters: raise BadRequest('Unknown filter: %s=%s' % (key, value)) if key.startswith('at'): try: value = utils.cast(int, value.timestamp()) except AttributeError: raise BadRequest('Time frame filter must be a datetime object: %s=%s' % (key, value)) elif key.startswith('bytes') or key == 'lan': value = utils.cast(int, value) elif key == 'accountID': if value == self.myPlexAccount().id: value = 1 # The admin account is accountID=1 params[key] = value key = '/statistics/bandwidth?%s' % urlencode(params) return self.fetchItems(key, StatisticsBandwidth) def resources(self): """ Returns a list of :class:`~plexapi.server.StatisticsResources` objects with the Plex server dashboard resources data. """ key = '/statistics/resources?timespan=6' return self.fetchItems(key, StatisticsResources) def _buildWebURL(self, base=None, endpoint=None, **kwargs): """ Build the Plex Web URL for the object. Parameters: base (str): The base URL before the fragment (``#!``). Default is https://app.plex.tv/desktop. endpoint (str): The Plex Web URL endpoint. None for server, 'playlist' for playlists, 'details' for all other media types. **kwargs (dict): Dictionary of URL parameters. """ if base is None: base = 'https://app.plex.tv/desktop/' if endpoint: return '%s#!/server/%s/%s%s' % ( base, self.machineIdentifier, endpoint, utils.joinArgs(kwargs) ) else: return '%s#!/media/%s/com.plexapp.plugins.library%s' % ( base, self.machineIdentifier, utils.joinArgs(kwargs) ) def getWebURL(self, base=None, playlistTab=None): """ Returns the Plex Web URL for the server. Parameters: base (str): The base URL before the fragment (``#!``). Default is https://app.plex.tv/desktop. playlistTab (str): The playlist tab (audio, video, photo). Only used for the playlist URL. """ if playlistTab is not None: params = {'source': 'playlists', 'pivot': 'playlists.%s' % playlistTab} else: params = {'key': '/hubs', 'pageType': 'hub'} return self._buildWebURL(base=base, **params) class Account(PlexObject): """ Contains the locally cached MyPlex account information. The properties provided don't match the :class:`~plexapi.myplex.MyPlexAccount` object very well. I believe this exists because access to myplex is not required to get basic plex information. I can't imagine object is terribly useful except unless you were needed this information while offline. Parameters: server (:class:`~plexapi.server.PlexServer`): PlexServer this account is connected to (optional) data (ElementTree): Response from PlexServer used to build this object (optional). Attributes: authToken (str): Plex authentication token to access the server. mappingError (str): Unknown mappingErrorMessage (str): Unknown mappingState (str): Unknown privateAddress (str): Local IP address of the Plex server. privatePort (str): Local port of the Plex server. publicAddress (str): Public IP address of the Plex server. publicPort (str): Public port of the Plex server. signInState (str): Signin state for this account (ex: ok). subscriptionActive (str): True if the account subscription is active. subscriptionFeatures (str): List of features allowed by the server for this account. This may be based on your PlexPass subscription. Features include: camera_upload, cloudsync, content_filter, dvr, hardware_transcoding, home, lyrics, music_videos, pass, photo_autotags, premium_music_metadata, session_bandwidth_restrictions, sync, trailers, webhooks' (and maybe more). subscriptionState (str): 'Active' if this subscription is active. username (str): Plex account username (<EMAIL>). """ key = '/myplex/account' def _loadData(self, data): self._data = data self.authToken = data.attrib.get('authToken') self.username = data.attrib.get('username') self.mappingState = data.attrib.get('mappingState') self.mappingError = data.attrib.get('mappingError') self.mappingErrorMessage = data.attrib.get('mappingErrorMessage') self.signInState = data.attrib.get('signInState') self.publicAddress = data.attrib.get('publicAddress') self.publicPort = data.attrib.get('publicPort') self.privateAddress = data.attrib.get('privateAddress') self.privatePort = data.attrib.get('privatePort') self.subscriptionFeatures = utils.toList(data.attrib.get('subscriptionFeatures')) self.subscriptionActive = utils.cast(bool, data.attrib.get('subscriptionActive')) self.subscriptionState = data.attrib.get('subscriptionState') class Activity(PlexObject): """A currently running activity on the PlexServer.""" key = '/activities' def _loadData(self, data): self._data = data self.cancellable = utils.cast(bool, data.attrib.get('cancellable')) self.progress = utils.cast(int, data.attrib.get('progress')) self.title = data.attrib.get('title') self.subtitle = data.attrib.get('subtitle') self.type = data.attrib.get('type') self.uuid = data.attrib.get('uuid') @utils.registerPlexObject class Release(PlexObject): TAG = 'Release' key = '/updater/status' def _loadData(self, data): self.download_key = data.attrib.get('key') self.version = data.attrib.get('version') self.added = data.attrib.get('added') self.fixed = data.attrib.get('fixed') self.downloadURL = data.attrib.get('downloadURL') self.state = data.attrib.get('state') class SystemAccount(PlexObject): """ Represents a single system account. Attributes: TAG (str): 'Account' autoSelectAudio (bool): True or False if the account has automatic audio language enabled. defaultAudioLanguage (str): The default audio language code for the account. defaultSubtitleLanguage (str): The default subtitle language code for the account. id (int): The Plex account ID. key (str): API URL (/accounts/<id>) name (str): The username of the account. subtitleMode (bool): The subtitle mode for the account. thumb (str): URL for the account thumbnail. """ TAG = 'Account' def _loadData(self, data): self._data = data self.autoSelectAudio = utils.cast(bool, data.attrib.get('autoSelectAudio')) self.defaultAudioLanguage = data.attrib.get('defaultAudioLanguage') self.defaultSubtitleLanguage = data.attrib.get('defaultSubtitleLanguage') self.id = utils.cast(int, data.attrib.get('id')) self.key = data.attrib.get('key') self.name = data.attrib.get('name') self.subtitleMode = utils.cast(int, data.attrib.get('subtitleMode')) self.thumb = data.attrib.get('thumb') # For backwards compatibility self.accountID = self.id self.accountKey = self.key class SystemDevice(PlexObject): """ Represents a single system device. Attributes: TAG (str): 'Device' clientIdentifier (str): The unique identifier for the device. createdAt (datatime): Datetime the device was created. id (int): The ID of the device (not the same as :class:`~plexapi.myplex.MyPlexDevice` ID). key (str): API URL (/devices/<id>) name (str): The name of the device. platform (str): OS the device is running (Linux, Windows, Chrome, etc.) """ TAG = 'Device' def _loadData(self, data): self._data = data self.clientIdentifier = data.attrib.get('clientIdentifier') self.createdAt = utils.toDatetime(data.attrib.get('createdAt')) self.id = utils.cast(int, data.attrib.get('id')) self.key = '/devices/%s' % self.id self.name = data.attrib.get('name') self.platform = data.attrib.get('platform') class StatisticsBandwidth(PlexObject): """ Represents a single statistics bandwidth data. Attributes: TAG (str): 'StatisticsBandwidth' accountID (int): The associated :class:`~plexapi.server.SystemAccount` ID. at (datatime): Datetime of the bandwidth data. bytes (int): The total number of bytes for the specified timespan. deviceID (int): The associated :class:`~plexapi.server.SystemDevice` ID. lan (bool): True or False wheter the bandwidth is local or remote. timespan (int): The timespan for the bandwidth data. 1: months, 2: weeks, 3: days, 4: hours, 6: seconds. """ TAG = 'StatisticsBandwidth' def _loadData(self, data): self._data = data self.accountID = utils.cast(int, data.attrib.get('accountID')) self.at = utils.toDatetime(data.attrib.get('at')) self.bytes = utils.cast(int, data.attrib.get('bytes')) self.deviceID = utils.cast(int, data.attrib.get('deviceID')) self.lan = utils.cast(bool, data.attrib.get('lan')) self.timespan = utils.cast(int, data.attrib.get('timespan')) def __repr__(self): return '<%s>' % ':'.join([p for p in [ self.__class__.__name__, self._clean(self.accountID), self._clean(self.deviceID), self._clean(int(self.at.timestamp())) ] if p]) def account(self): """ Returns the :class:`~plexapi.server.SystemAccount` associated with the bandwidth data. """ return self._server.systemAccount(self.accountID) def device(self): """ Returns the :class:`~plexapi.server.SystemDevice` associated with the bandwidth data. """ return self._server.systemDevice(self.deviceID) class StatisticsResources(PlexObject): """ Represents a single statistics resources data. Attributes: TAG (str): 'StatisticsResources' at (datatime): Datetime of the resource data. hostCpuUtilization (float): The system CPU usage %. hostMemoryUtilization (float): The Plex Media Server CPU usage %. processCpuUtilization (float): The system RAM usage %. processMemoryUtilization (float): The Plex Media Server RAM usage %. timespan (int): The timespan for the resource data (6: seconds). """ TAG = 'StatisticsResources' def _loadData(self, data): self._data = data self.at = utils.toDatetime(data.attrib.get('at')) self.hostCpuUtilization = utils.cast(float, data.attrib.get('hostCpuUtilization')) self.hostMemoryUtilization = utils.cast(float, data.attrib.get('hostMemoryUtilization')) self.processCpuUtilization = utils.cast(float, data.attrib.get('processCpuUtilization')) self.processMemoryUtilization = utils.cast(float, data.attrib.get('processMemoryUtilization')) self.timespan = utils.cast(int, data.attrib.get('timespan')) def __repr__(self): return '<%s>' % ':'.join([p for p in [ self.__class__.__name__, self._clean(int(self.at.timestamp())) ] if p])
office365/sharepoint/server_settings.py
vgrem/Office365-REST-Python-Client
544
12615895
<filename>office365/sharepoint/server_settings.py from office365.runtime.client_result import ClientResult from office365.runtime.client_value_collection import ClientValueCollection from office365.runtime.queries.service_operation_query import ServiceOperationQuery from office365.runtime.paths.resource_path import ResourcePath from office365.sharepoint.base_entity import BaseEntity from office365.sharepoint.sites.language_collection import LanguageCollection class ServerSettings(BaseEntity): """Provides methods for obtaining server properties.""" def __init__(self, context): super(ServerSettings, self).__init__(context, ResourcePath("SP.ServerSettings")) @staticmethod def is_sharepoint_online(context): """ :type context: office365.sharepoint.client_context.ClientContext """ binding_type = ServerSettings(context) return_type = ClientResult(context) qry = ServiceOperationQuery(binding_type, "IsSharePointOnline", None, None, None, return_type) qry.static = True context.add_query(qry) return return_type @staticmethod def get_blocked_file_extensions(context): """ :type context: office365.sharepoint.client_context.ClientContext """ binding_type = ServerSettings(context) return_type = ClientResult(context, ClientValueCollection(str)) qry = ServiceOperationQuery(binding_type, "GetBlockedFileExtensions", None, None, None, return_type) qry.static = True context.add_query(qry) return return_type @staticmethod def get_global_installed_languages(context, compatibility_level): """ Gets a list of installed languages that are compatible with a given version of SharePoint. :type context: office365.sharepoint.client_context.ClientContext :param int compatibility_level: The value of the major SharePoint version to query for installed languages. """ binding_type = ServerSettings(context) return_type = LanguageCollection(context) qry = ServiceOperationQuery(binding_type, "GetGlobalInstalledLanguages", [compatibility_level], None, None, return_type) qry.static = True context.add_query(qry) return return_type
2-Bring-Your-Own/lambda-code/MLOps-BYO-DeployModel.py
VaibhavSingh98/mlops-amazon-sagemaker-devops-with-ml
108
12615914
import boto3 import os import tempfile import json from boto3.session import Session sagemaker = boto3.client('sagemaker') code_pipeline = boto3.client('codepipeline') def lambda_handler(event, context): try: # Read in information from previous get_status job previousStepEvent = read_job_info(event) jobName = previousStepEvent['TrainingJobName'] jobArn = previousStepEvent['TrainingJobArn'] print("[INFO]TrainingJobName:", jobName) print("[INFO]TrainingJobArn:", jobArn) modelArtifact = previousStepEvent['ModelArtifacts']['S3ModelArtifacts'] print("[INFO]Model Artifacts:", modelArtifact) trainingImage = previousStepEvent['AlgorithmSpecification']['TrainingImage'] print("[INFO]TrainingImage:", trainingImage) print("[INFO]Creating new endpoint configuration") configText = event['CodePipeline.job']['data']['actionConfiguration']['configuration']['UserParameters'] config_param = json.loads(configText) event['stage'] = 'Deployment' event['status'] = 'Creating' endpoint_environment = config_param["EndpointConfigName"] print("[INFO]Endpoint environment:", endpoint_environment) # endpoint_environment can be changed based on specific environment setup # valid values are 'Dev','Test','Prod' # value input below should be representative to the first target environment in your pipeline (typically Dev or Test) if endpoint_environment == 'Dev': print("[INFO]Environment Input is Dev so Creating model resource from training artifact") create_model(jobName, trainingImage , modelArtifact) else: print("[INFO]Environment Input is not equal to Dev meaning model already exists - no need to recreate") endpoint_config_name= jobName+'-'+ endpoint_environment print("[INFO]EndpointConfigName:", endpoint_config_name) event['message'] = 'Creating Endpoint Hosting"{} started."'.format(endpoint_config_name) create_endpoint_config(jobName,endpoint_config_name,config_param) create_endpoint(endpoint_config_name) event['models'] = 'ModelName:"'.format(jobName) event['status'] = 'InService' event['endpoint'] = endpoint_config_name event['endpoint_config'] = endpoint_config_name event['job_name'] = jobName write_job_info_s3(event) put_job_success(event) except Exception as e: print(e) print('Unable to create deployment job.') event['message'] = str(e) put_job_failure(event) return event def create_model(jobName, trainingImage, modelArtifact): """ Create SageMaker model. Args: jobName (string): Name to label model with trainingImage (string): Registry path of the Docker image that contains the model algorithm modelArtifact (string): URL of the model artifacts created during training to download to container Returns: (None) """ # Role to pass to SageMaker training job that has access to training data in S3, etc SageMakerRole = os.environ['SageMakerExecutionRole'] try: response=sagemaker.create_model( ModelName=jobName, PrimaryContainer={ 'Image': trainingImage, 'ModelDataUrl': modelArtifact }, ExecutionRoleArn=SageMakerRole ) except Exception as e: print(e) print("ERROR:", "create_model", response) raise(e) def create_endpoint_config(jobName,endpoint_config_name,config_param): """ Create SageMaker endpoint configuration. Args: jobName (string): Name to label endpoint configuration with. For easy identification of model deployed behind endpoint the endpoint name will match the trainingjob Returns: (None) { "InitialInstanceCount": "1", "InitialVariantWeight": "1", "InstanceType": "ml.t2.medium", "EndpointConfigName": "Dev" } """ try: deploy_instance_type = config_param['InstanceType'] initial_variant_weight = config_param['InitialVariantWeight'] initial_instance_count = config_param['InitialInstanceCount'] print('[INFO]DEPLOY_INSTANCE_TYPE:', deploy_instance_type) print('[INFO]INITIAL_VARIANT_WEIGHT:', initial_variant_weight) print('[INFO]INITIAL_INSTANCE_COUNT:', initial_instance_count) response = sagemaker.create_endpoint_config( EndpointConfigName=endpoint_config_name, ProductionVariants=[ { 'VariantName': 'AllTraffic', 'ModelName': jobName, 'InitialInstanceCount': initial_instance_count, 'InitialVariantWeight': initial_variant_weight, 'InstanceType': deploy_instance_type, } ] ) print("[SUCCESS]create_endpoint_config:", response) return response except Exception as e: print(e) print("[ERROR]create_endpoint_config:", response) raise(e) def check_endpoint_exists(endpoint_name): """ Check if SageMaker endpoint for model already exists. Args: endpoint_name (string): Name of endpoint to check if exists. Returns: (boolean) True if endpoint already exists. False otherwise. """ try: response = sagemaker.describe_endpoint( EndpointName=endpoint_name ) print("[SUCCESS]check_endpoint_exists:", response) return True except Exception as e: print("[ERROR]check_endpoint_exists:", response) return False def create_endpoint(endpoint_config_name): print("[INFO]Creating Endpoint") """ Create SageMaker endpoint with input endpoint configuration. Args: jobName (string): Name of endpoint to create. EndpointConfigName (string): Name of endpoint configuration to create endpoint with. Returns: (None) """ try: response = sagemaker.create_endpoint( EndpointName=endpoint_config_name, EndpointConfigName=endpoint_config_name ) print("[SUCCESS]create_endpoint:", response) return response except Exception as e: print(e) print("[ERROR]create_endpoint:", response) raise(e) def update_endpoint(endpoint_name, config_name): """ Update SageMaker endpoint to input endpoint configuration. Args: endpoint_name (string): Name of endpoint to update. config_name (string): Name of endpoint configuration to update endpoint with. Returns: (None) """ try: sagemaker.update_endpoint( EndpointName=endpoint_name, EndpointConfigName=config_name ) except Exception as e: print(e) print("[ERROR]update_endpoint:", e) raise(e) def read_job_info(event): tmp_file = tempfile.NamedTemporaryFile() #objectKey = event['CodePipeline.job']['data']['inputArtifacts'][0]['location']['s3Location']['objectKey'] objectKey = event['CodePipeline.job']['data']['inputArtifacts'][0]['location']['s3Location']['objectKey'] print("[INFO]Object:", objectKey) bucketname = event['CodePipeline.job']['data']['inputArtifacts'][0]['location']['s3Location']['bucketName'] print("[INFO]Bucket:", bucketname) artifactCredentials = event['CodePipeline.job']['data']['artifactCredentials'] session = Session(aws_access_key_id=artifactCredentials['accessKeyId'], aws_secret_access_key=artifactCredentials['secretAccessKey'], aws_session_token=artifactCredentials['sessionToken']) s3 = session.resource('s3') obj = s3.Object(bucketname,objectKey) item = json.loads(obj.get()['Body'].read().decode('utf-8')) print("Item:", item) return item def write_job_info_s3(event): print(event) objectKey = event['CodePipeline.job']['data']['outputArtifacts'][0]['location']['s3Location']['objectKey'] bucketname = event['CodePipeline.job']['data']['outputArtifacts'][0]['location']['s3Location']['bucketName'] artifactCredentials = event['CodePipeline.job']['data']['artifactCredentials'] artifactName = event['CodePipeline.job']['data']['outputArtifacts'][0]['name'] # S3 Managed Key for Encryption S3SSEKey = os.environ['SSEKMSKeyIdIn'] json_data = json.dumps(event) print(json_data) session = Session(aws_access_key_id=artifactCredentials['accessKeyId'], aws_secret_access_key=artifactCredentials['secretAccessKey'], aws_session_token=artifactCredentials['sessionToken']) s3 = session.resource("s3") #object = s3.Object(bucketname, objectKey + '/event.json') object = s3.Object(bucketname, objectKey) print(object) object.put(Body=json_data, ServerSideEncryption='aws:kms', SSEKMSKeyId=S3SSEKey) print('event written to s3') def put_job_success(event): print("[SUCCESS]Endpoint Deployed") print(event['message']) code_pipeline.put_job_success_result(jobId=event['CodePipeline.job']['id']) def put_job_failure(event): print('[ERROR]Putting job failure') print(event['message']) #code_pipeline.put_job_failure_result(jobId=event['CodePipeline.job']['id'], failureDetails={'message': event['message'], 'type': 'JobFailed'}) #temporary very ugly fix - stuck in loop and need to adding logic checking existing - it is creating model, endpoint config and endpoint successfully code_pipeline.put_job_success_result(jobId=event['CodePipeline.job']['id'])
bayespy/utils/tests/test_linalg.py
dungvtdev/upsbayescpm
622
12615929
################################################################################ # Copyright (C) 2013 <NAME> # # This file is licensed under the MIT License. ################################################################################ """ Unit tests for bayespy.utils.linalg module. """ import numpy as np from .. import misc from .. import linalg class TestDot(misc.TestCase): def test_dot(self): """ Test dot product multiple multi-dimensional arrays. """ # If no arrays, return 0 self.assertAllClose(linalg.dot(), 0) # If only one array, return itself self.assertAllClose(linalg.dot([[1,2,3], [4,5,6]]), [[1,2,3], [4,5,6]]) # Basic test of two arrays: (2,3) * (3,2) self.assertAllClose(linalg.dot([[1,2,3], [4,5,6]], [[7,8], [9,1], [2,3]]), [[31,19], [85,55]]) # Basic test of four arrays: (2,3) * (3,2) * (2,1) * (1,2) self.assertAllClose(linalg.dot([[1,2,3], [4,5,6]], [[7,8], [9,1], [2,3]], [[4], [5]], [[6,7]]), [[1314,1533], [3690,4305]]) # Test broadcasting: (2,2,2) * (2,2,2,2) self.assertAllClose(linalg.dot([[[1,2], [3,4]], [[5,6], [7,8]]], [[[[1,2], [3,4]], [[5,6], [7,8]]], [[[9,1], [2,3]], [[4,5], [6,7]]]]), [[[[ 7, 10], [ 15, 22]], [[ 67, 78], [ 91, 106]]], [[[ 13, 7], [ 35, 15]], [[ 56, 67], [ 76, 91]]]]) # Inconsistent shapes: (2,3) * (2,3) self.assertRaises(ValueError, linalg.dot, [[1,2,3], [4,5,6]], [[1,2,3], [4,5,6]]) # Other axes do not broadcast: (2,2,2) * (3,2,2) self.assertRaises(ValueError, linalg.dot, [[[1,2], [3,4]], [[5,6], [7,8]]], [[[1,2], [3,4]], [[5,6], [7,8]], [[9,1], [2,3]]]) # Do not broadcast matrix axes: (2,1) * (3,2) self.assertRaises(ValueError, linalg.dot, [[1], [2]], [[1,2,3], [4,5,6]]) # Do not accept less than 2-D arrays: (2) * (2,2) self.assertRaises(ValueError, linalg.dot, [1,2], [[1,2,3], [4,5,6]]) class TestBandedSolve(misc.TestCase): def test_block_banded_solve(self): """ Test the Gaussian elimination algorithm for block-banded matrices. """ # # Create a block-banded matrix # # Number of blocks N = 40 # Random sizes of the blocks #D = np.random.randint(5, 10, size=N) # Fixed sizes of the blocks D = 5*np.ones(N, dtype=np.int) # Some helpful variables to create the covariances W = [np.random.randn(D[i], 2*D[i]) for i in range(N)] # The diagonal blocks (covariances) A = [np.dot(W[i], W[i].T) for i in range(N)] # The superdiagonal blocks (cross-covariances) B = [np.dot(W[i][:,-1:], W[i+1][:,:1].T) for i in range(N-1)] C = misc.block_banded(A, B) # Create the system to be solved: y=C*x x_true = np.random.randn(np.sum(D)) y = np.dot(C, x_true) x_true = np.reshape(x_true, (N, -1)) y = np.reshape(y, (N, -1)) # # Run tests # # The correct inverse invC = np.linalg.inv(C) # Inverse from the function that is tested (invA, invB, x, ldet) = linalg.block_banded_solve(np.asarray(A), np.asarray(B), np.asarray(y)) # Check that you get the correct number of blocks self.assertEqual(len(invA), N) self.assertEqual(len(invB), N-1) # Check each block i0 = 0 for i in range(N-1): i1 = i0 + D[i] i2 = i1 + D[i+1] # Check diagonal block self.assertTrue(np.allclose(invA[i], invC[i0:i1, i0:i1])) # Check super-diagonal block self.assertTrue(np.allclose(invB[i], invC[i0:i1, i1:i2])) i0 = i1 # Check last block self.assertTrue(np.allclose(invA[-1], invC[i0:, i0:])) # Check the solution of the system self.assertTrue(np.allclose(x_true, x)) # Check the log determinant self.assertAlmostEqual(ldet/np.linalg.slogdet(C)[1], 1)
recipes/Python/577629_namedtupleabc__abstract_base_class__mixnamed/recipe-577629.py
tdiprima/code
2,023
12615930
<filename>recipes/Python/577629_namedtupleabc__abstract_base_class__mixnamed/recipe-577629.py #!/usr/bin/env python # Copyright (c) 2011 <NAME> (zuo). Available under the MIT License. """ namedtuple_with_abc.py: * named tuple mix-in + ABC (abstract base class) recipe, * works under Python 2.6, 2.7 as well as 3.x. Import this module to patch collections.namedtuple() factory function -- enriching it with the 'abc' attribute (an abstract base class + mix-in for named tuples) and decorating it with a wrapper that registers each newly created named tuple as a subclass of namedtuple.abc. How to import: import collections, namedtuple_with_abc or: import namedtuple_with_abc from collections import namedtuple # ^ in this variant you must import namedtuple function # *after* importing namedtuple_with_abc module or simply: from namedtuple_with_abc import namedtuple Simple usage example: class Credentials(namedtuple.abc): _fields = 'username password' def __str__(self): return ('{0.__class__.__name__}' '(username={0.username}, password=...)'.format(self)) print(Credentials("alice", "<PASSWORD>'s password")) For more advanced examples -- see below the "if __name__ == '__main__':". """ import collections from abc import ABCMeta, abstractproperty from functools import wraps from sys import version_info __all__ = ('namedtuple',) _namedtuple = collections.namedtuple class _NamedTupleABCMeta(ABCMeta): '''The metaclass for the abstract base class + mix-in for named tuples.''' def __new__(mcls, name, bases, namespace): fields = namespace.get('_fields') for base in bases: if fields is not None: break fields = getattr(base, '_fields', None) if not isinstance(fields, abstractproperty): basetuple = _namedtuple(name, fields) bases = (basetuple,) + bases namespace.pop('_fields', None) namespace.setdefault('__doc__', basetuple.__doc__) namespace.setdefault('__slots__', ()) return ABCMeta.__new__(mcls, name, bases, namespace) exec( # Python 2.x metaclass declaration syntax """class _NamedTupleABC(object): '''The abstract base class + mix-in for named tuples.''' __metaclass__ = _NamedTupleABCMeta _fields = abstractproperty()""" if version_info[0] < 3 else # Python 3.x metaclass declaration syntax """class _NamedTupleABC(metaclass=_NamedTupleABCMeta): '''The abstract base class + mix-in for named tuples.''' _fields = abstractproperty()""" ) _namedtuple.abc = _NamedTupleABC #_NamedTupleABC.register(type(version_info)) # (and similar, in the future...) @wraps(_namedtuple) def namedtuple(*args, **kwargs): '''Named tuple factory with namedtuple.abc subclass registration.''' cls = _namedtuple(*args, **kwargs) _NamedTupleABC.register(cls) return cls collections.namedtuple = namedtuple if __name__ == '__main__': '''Examples and explanations''' # Simple usage class MyRecord(namedtuple.abc): _fields = 'x y z' # such form will be transformed into ('x', 'y', 'z') def _my_custom_method(self): return list(self._asdict().items()) # (the '_fields' attribute belongs to the named tuple public API anyway) rec = MyRecord(1, 2, 3) print(rec) print(rec._my_custom_method()) print(rec._replace(y=222)) print(rec._replace(y=222)._my_custom_method()) # Custom abstract classes... class MyAbstractRecord(namedtuple.abc): def _my_custom_method(self): return list(self._asdict().items()) try: MyAbstractRecord() # (abstract classes cannot be instantiated) except TypeError as exc: print(exc) class AnotherAbstractRecord(MyAbstractRecord): def __str__(self): return '<<<{0}>>>'.format(super(AnotherAbstractRecord, self).__str__()) # ...and their non-abstract subclasses class MyRecord2(MyAbstractRecord): _fields = 'a, b' class MyRecord3(AnotherAbstractRecord): _fields = 'p', 'q', 'r' rec2 = MyRecord2('foo', 'bar') print(rec2) print(rec2._my_custom_method()) print(rec2._replace(b=222)) print(rec2._replace(b=222)._my_custom_method()) rec3 = MyRecord3('foo', 'bar', 'baz') print(rec3) print(rec3._my_custom_method()) print(rec3._replace(q=222)) print(rec3._replace(q=222)._my_custom_method()) # You can also subclass non-abstract ones... class MyRecord33(MyRecord3): def __str__(self): return '< {0!r}, ..., {0!r} >'.format(self.p, self.r) rec33 = MyRecord33('foo', 'bar', 'baz') print(rec33) print(rec33._my_custom_method()) print(rec33._replace(q=222)) print(rec33._replace(q=222)._my_custom_method()) # ...and even override the magic '_fields' attribute again class MyRecord345(MyRecord3): _fields = 'e f g h i j k' rec345 = MyRecord345(1, 2, 3, 4, 3, 2, 1) print(rec345) print(rec345._my_custom_method()) print(rec345._replace(f=222)) print(rec345._replace(f=222)._my_custom_method()) # Mixing-in some other classes is also possible: class MyMixIn(object): def method(self): return "MyMixIn.method() called" def _my_custom_method(self): return "MyMixIn._my_custom_method() called" def count(self, item): return "MyMixIn.count({0}) called".format(item) def _asdict(self): # (cannot override a namedtuple method, see below) return "MyMixIn._asdict() called" class MyRecord4(MyRecord33, MyMixIn): # mix-in on the right _fields = 'j k l x' class MyRecord5(MyMixIn, MyRecord33): # mix-in on the left _fields = 'j k l x y' rec4 = MyRecord4(1, 2, 3, 2) print(rec4) print(rec4.method()) print(rec4._my_custom_method()) # MyRecord33's print(rec4.count(2)) # tuple's print(rec4._replace(k=222)) print(rec4._replace(k=222).method()) print(rec4._replace(k=222)._my_custom_method()) # MyRecord33's print(rec4._replace(k=222).count(8)) # tuple's rec5 = MyRecord5(1, 2, 3, 2, 1) print(rec5) print(rec5.method()) print(rec5._my_custom_method()) # MyMixIn's print(rec5.count(2)) # MyMixIn's print(rec5._replace(k=222)) print(rec5._replace(k=222).method()) print(rec5._replace(k=222)._my_custom_method()) # MyMixIn's print(rec5._replace(k=222).count(2)) # MyMixIn's # None that behavior: the standard namedtuple methods cannot be # overriden by a foreign mix-in -- even if the mix-in is declared # as the leftmost base class (but, obviously, you can override them # in the defined class or its subclasses): print(rec4._asdict()) # (returns a dict, not "MyMixIn._asdict() called") print(rec5._asdict()) # (returns a dict, not "MyMixIn._asdict() called") class MyRecord6(MyRecord33): _fields = 'j k l x y z' def _asdict(self): return "MyRecord6._asdict() called" rec6 = MyRecord6(1, 2, 3, 1, 2, 3) print(rec6._asdict()) # (this returns "MyRecord6._asdict() called") # All that record classes are real subclasses of namedtuple.abc: assert issubclass(MyRecord, namedtuple.abc) assert issubclass(MyAbstractRecord, namedtuple.abc) assert issubclass(AnotherAbstractRecord, namedtuple.abc) assert issubclass(MyRecord2, namedtuple.abc) assert issubclass(MyRecord3, namedtuple.abc) assert issubclass(MyRecord33, namedtuple.abc) assert issubclass(MyRecord345, namedtuple.abc) assert issubclass(MyRecord4, namedtuple.abc) assert issubclass(MyRecord5, namedtuple.abc) assert issubclass(MyRecord6, namedtuple.abc) # ...but abstract ones are not subclasses of tuple # (and this is what you probably want): assert not issubclass(MyAbstractRecord, tuple) assert not issubclass(AnotherAbstractRecord, tuple) assert issubclass(MyRecord, tuple) assert issubclass(MyRecord2, tuple) assert issubclass(MyRecord3, tuple) assert issubclass(MyRecord33, tuple) assert issubclass(MyRecord345, tuple) assert issubclass(MyRecord4, tuple) assert issubclass(MyRecord5, tuple) assert issubclass(MyRecord6, tuple) # Named tuple classes created with namedtuple() factory function # (in the "traditional" way) are registered as "virtual" subclasses # of namedtuple.abc: MyTuple = namedtuple('MyTuple', 'a b c') mt = MyTuple(1, 2, 3) assert issubclass(MyTuple, namedtuple.abc) assert isinstance(mt, namedtuple.abc)
examples/ngcf/utils.py
zbmain/PGL
1,389
12615938
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle from paddle import nn, optimizer import numpy as np from paddle import log from dataloader import BasicDataset from paddle.io import Dataset from time import time from sklearn.metrics import roc_auc_score import random import os from tqdm import tqdm def UniformSample_original_python(dataset): """ the original impliment of BPR Sampling in LightGCN :return: np.array """ total_start = time() dataset: BasicDataset user_num = dataset.trainDataSize users = np.random.randint(0, dataset.n_users, user_num) allPos = dataset.allPos S = [] sample_time1 = 0. sample_time2 = 0. for i, user in enumerate(tqdm(users)): start = time() posForUser = allPos[user] if len(posForUser) == 0: continue sample_time2 += time() - start posindex = np.random.randint(0, len(posForUser)) positem = posForUser[posindex] while True: negitem = np.random.randint(0, dataset.m_items) if negitem in posForUser: continue else: break S.append([user, positem, negitem]) end = time() sample_time1 += end - start total = time() - total_start return np.array(S) def shuffle(*arrays, **kwargs): require_indices = kwargs.get('indices', False) if len(set(len(x) for x in arrays)) != 1: raise ValueError('All inputs to shuffle must have ' 'the same length.') shuffle_indices = np.arange(len(arrays[0])) np.random.shuffle(shuffle_indices) if len(arrays) == 1: result = arrays[0][shuffle_indices] else: result = tuple(x[shuffle_indices] for x in arrays) if require_indices: return result, shuffle_indices else: return result def minibatch(*tensors, **kwargs): batch_size = kwargs.get('batch_size', 128) if len(tensors) == 1: tensor = tensors[0] for i in range(0, len(tensor), batch_size): yield tensor[i:i + batch_size] else: for i in range(0, len(tensors[0]), batch_size): yield tuple(x[i:i + batch_size] for x in tensors) class timer: """ Time context manager for code block with timer(): do something timer.get() """ from time import time TAPE = [-1] # global time record NAMED_TAPE = {} @staticmethod def get(): if len(timer.TAPE) > 1: return timer.TAPE.pop() else: return -1 @staticmethod def dict(select_keys=None): hint = "|" if select_keys is None: for key, value in timer.NAMED_TAPE.items(): hint = hint + f"{key}:{value:.2f}|" else: for key in select_keys: value = timer.NAMED_TAPE[key] hint = hint + f"{key}:{value:.2f}|" return hint @staticmethod def zero(select_keys=None): if select_keys is None: for key, value in timer.NAMED_TAPE.items(): timer.NAMED_TAPE[key] = 0 else: for key in select_keys: timer.NAMED_TAPE[key] = 0 def __init__(self, tape=None, **kwargs): if kwargs.get('name'): timer.NAMED_TAPE[kwargs['name']] = timer.NAMED_TAPE[kwargs[ 'name']] if timer.NAMED_TAPE.get(kwargs['name']) else 0. self.named = kwargs['name'] if kwargs.get("group"): #TODO: add group function pass else: self.named = False self.tape = tape or timer.TAPE def __enter__(self): self.start = timer.time() return self def __exit__(self, exc_type, exc_val, exc_tb): if self.named: timer.NAMED_TAPE[self.named] += timer.time() - self.start else: self.tape.append(timer.time() - self.start) # ====================Metrics============================== # ========================================================= def RecallPrecision_ATk(test_data, r, k): """ test_data should be a list? cause users may have different amount of pos items. shape (test_batch, k) pred_data : shape (test_batch, k) NOTE: pred_data should be pre-sorted k : top-k """ right_pred = r[:, :k].sum(1) precis_n = k recall_n = np.array([len(test_data[i]) for i in range(len(test_data))]) recall = np.sum(right_pred / recall_n) precis = np.sum(right_pred) / precis_n return {'recall': recall, 'precision': precis} def MRRatK_r(r, k): """ Mean Reciprocal Rank """ pred_data = r[:, :k] scores = np.log2(1. / np.arange(1, k + 1)) pred_data = pred_data / scores pred_data = pred_data.sum(1) return np.sum(pred_data) def NDCGatK_r(test_data, r, k): """ Normalized Discounted Cumulative Gain rel_i = 1 or 0, so 2^{rel_i} - 1 = 1 or 0 """ assert len(r) == len(test_data) pred_data = r[:, :k] test_matrix = np.zeros((len(pred_data), k)) for i, items in enumerate(test_data): length = k if k <= len(items) else len(items) test_matrix[i, :length] = 1 max_r = test_matrix idcg = np.sum(max_r * 1. / np.log2(np.arange(2, k + 2)), axis=1) dcg = pred_data * (1. / np.log2(np.arange(2, k + 2))) dcg = np.sum(dcg, axis=1) idcg[idcg == 0.] = 1. ndcg = dcg / idcg ndcg[np.isnan(ndcg)] = 0. return np.sum(ndcg) def AUC(all_item_scores, dataset, test_data): """ design for a single user """ dataset: BasicDataset r_all = np.zeros((dataset.m_items, )) r_all[test_data] = 1 r = r_all[all_item_scores >= 0] test_item_scores = all_item_scores[all_item_scores >= 0] return roc_auc_score(r, test_item_scores) def getLabel(test_data, pred_data): r = [] for i in range(len(test_data)): groundTrue = test_data[i] predictTopK = pred_data[i] pred = list(map(lambda x: x in groundTrue, predictTopK)) pred = np.array(pred).astype("float") r.append(pred) return np.array(r).astype('float')
rest-service/manager_rest/test/endpoints/test_deployment_labels_dependencies.py
cloudify-cosmo/cloudify-manager
124
12615954
from mock import patch from cloudify_rest_client.exceptions import CloudifyClientError from cloudify.models_states import DeploymentState from manager_rest.rest.rest_utils import RecursiveDeploymentLabelsDependencies from manager_rest.test import base_test from manager_rest.test.attribute import attr from manager_rest.test.base_test import BaseServerTestCase @attr(client_min_version=3.1, client_max_version=base_test.LATEST_API_VERSION) class DeploymentLabelsDependenciesTest(BaseServerTestCase): def _create_deployment_objects(self, parent_name, deployment_type, size): for service in range(1, size + 1): self.put_deployment_with_labels( [ { 'csys-obj-parent': parent_name }, { 'csys-obj-type': deployment_type, } ], resource_id='{0}_{1}_{2}'.format( deployment_type, service, parent_name) ) def _populate_deployment_labels_dependencies(self): self.put_mock_deployments('dep_0', 'dep_1') self.put_mock_deployments('dep_2', 'dep_3') self.put_mock_deployments('dep_4', 'dep_5') self.client.deployments.update_labels('dep_0', [ { 'csys-obj-parent': 'dep_1' } ] ) self.client.deployments.update_labels('dep_2', [ { 'csys-obj-parent': 'dep_3' } ] ) self.client.deployments.update_labels('dep_4', [ { 'csys-obj-parent': 'dep_5' } ] ) @patch('manager_rest.resource_manager.ResourceManager' '.handle_deployment_labels_graph') @patch('manager_rest.resource_manager.ResourceManager' '.verify_attaching_deployment_to_parents') def test_deployment_with_empty_labels(self, verify_parents_mock, handle_labels_graph_mock): self.put_deployment('deployment_with_no_labels') verify_parents_mock.assert_not_called() handle_labels_graph_mock.assert_not_called() @patch('manager_rest.resource_manager.ResourceManager' '.handle_deployment_labels_graph') @patch('manager_rest.resource_manager.ResourceManager' '.verify_attaching_deployment_to_parents') def test_deployment_with_non_parent_labels(self, verify_parents_mock, handle_labels_graph_mock): self.put_deployment_with_labels([{'env': 'aws'}, {'arch': 'k8s'}]) verify_parents_mock.assert_not_called() handle_labels_graph_mock.assert_not_called() def test_deployment_with_single_parent_label(self): self.put_deployment('parent') self.put_deployment_with_labels([{'csys-obj-parent': 'parent'}]) # deployment response deployment = self.client.deployments.get('parent') self.assertEqual(deployment.sub_services_count, 1) self.assertEqual(deployment.sub_environments_count, 0) def test_deploy_blueprint_with_invalid_parent_id_on_dsl(self): with self.assertRaisesRegex( CloudifyClientError, 'using label `csys-obj-parent` that does not exist'): self.put_deployment( blueprint_id='bp1', blueprint_file_name='blueprint_with_invalid_parent_labels.yaml' ) def test_upload_blueprint_with_valid_parent_id_on_dsl(self): self.put_deployment('valid-id') self.put_blueprint( blueprint_id='bp1', blueprint_file_name='blueprint_with_valid_parent_labels.yaml' ) def test_deployment_with_multiple_parent_labels(self): self.put_deployment(deployment_id='parent_1', blueprint_id='blueprint_1') self.put_deployment(deployment_id='parent_2', blueprint_id='blueprint_2') self.put_deployment_with_labels( [ { 'csys-obj-parent': 'parent_1' }, { 'csys-obj-parent': 'parent_2' } ] ) deployment_1 = self.client.deployments.get('parent_1') deployment_2 = self.client.deployments.get('parent_2') self.assertEqual(deployment_1.sub_services_count, 1) self.assertEqual(deployment_1.sub_environments_count, 0) self.assertEqual(deployment_2.sub_services_count, 1) self.assertEqual(deployment_2.sub_environments_count, 0) def test_deployment_with_invalid_parent_label(self): error_message = 'label `csys-obj-parent` that does not exist' with self.assertRaisesRegex(CloudifyClientError, error_message): self.put_deployment_with_labels( [ { 'csys-obj-parent': 'notexist' } ], resource_id='invalid_label_dep' ) def test_deployment_with_valid_and_invalid_parent_labels(self): self.put_deployment(deployment_id='parent_1') error_message = 'label `csys-obj-parent` that does not exist' with self.assertRaisesRegex(CloudifyClientError, error_message): self.put_deployment_with_labels( [ { 'csys-obj-parent': 'parent_1' }, { 'csys-obj-parent': 'notexist' } ], resource_id='invalid_label_dep' ) def test_add_valid_label_parent_to_created_deployment(self): self.put_deployment(deployment_id='parent_1', blueprint_id='blueprint_1') self.put_deployment(deployment_id='parent_2', blueprint_id='blueprint_2') self.put_deployment_with_labels([{'csys-obj-parent': 'parent_1'}], resource_id='label_dep') self.client.deployments.update_labels('label_dep', [ { 'csys-obj-parent': 'parent_1' }, { 'csys-obj-parent': 'parent_2' } ] ) deployment_1 = self.client.deployments.get('parent_1') deployment_2 = self.client.deployments.get('parent_2') self.assertEqual(deployment_1.sub_services_count, 1) self.assertEqual(deployment_1.sub_environments_count, 0) self.assertEqual(deployment_2.sub_services_count, 1) self.assertEqual(deployment_2.sub_environments_count, 0) def test_add_invalid_label_parent_to_created_deployment(self): error_message = 'label `csys-obj-parent` that does not exist' self.put_deployment(deployment_id='parent_1', blueprint_id='blueprint_1') self.put_deployment_with_labels([{'csys-obj-parent': 'parent_1'}], resource_id='invalid_label_dep') with self.assertRaisesRegex(CloudifyClientError, error_message): self.client.deployments.update_labels('invalid_label_dep', [ { 'csys-obj-parent': 'parent_1' }, { 'csys-obj-parent': 'notexist' } ] ) def test_cyclic_dependencies_between_deployments(self): error_message = 'cyclic deployment-labels dependencies.' self.put_deployment(deployment_id='deployment_1', blueprint_id='deployment_1') self.put_deployment_with_labels( [ { 'csys-obj-parent': 'deployment_1' } ], resource_id='deployment_2' ) with self.assertRaisesRegex(CloudifyClientError, error_message): self.client.deployments.update_labels('deployment_1', [ { 'csys-obj-parent': 'deployment_2' } ]) deployment_1 = self.client.deployments.get('deployment_1') deployment_2 = self.client.deployments.get('deployment_2') self.assertEqual(deployment_1.sub_services_count, 1) self.assertEqual(deployment_2.sub_services_count, 0) self.assertEqual(len(deployment_1.labels), 0) def test_number_of_direct_services_deployed_inside_environment(self): self.put_deployment(deployment_id='env', blueprint_id='env') self._create_deployment_objects('env', 'service', 2) deployment = self.client.deployments.get( 'env', all_sub_deployments=False) self.assertEqual(deployment.sub_services_count, 2) def test_number_of_total_services_deployed_inside_environment(self): self.put_deployment(deployment_id='env', blueprint_id='env') self._create_deployment_objects('env', 'service', 2) self.put_deployment_with_labels( [ { 'csys-obj-parent': 'env' }, { 'csys-obj-type': 'environment', } ], resource_id='env_1' ) self._create_deployment_objects('env_1', 'service', 2) deployment = self.client.deployments.get('env') self.assertEqual(deployment.sub_services_count, 4) deployment = self.client.deployments.get('env', all_sub_deployments=False) self.assertEqual(deployment.sub_services_count, 2) def test_number_of_direct_environments_deployed_inside_environment(self): self.put_deployment(deployment_id='env', blueprint_id='env') self._create_deployment_objects('env', 'environment', 2) deployment = self.client.deployments.get( 'env', all_sub_deployments=False) self.assertEqual(deployment.sub_environments_count, 2) def test_number_of_total_environments_deployed_inside_environment(self): self.put_deployment(deployment_id='env', blueprint_id='env') self._create_deployment_objects('env', 'environment', 2) self.put_deployment_with_labels( [ { 'csys-obj-parent': 'env' }, { 'csys-obj-type': 'environment', } ], resource_id='env_1' ) self._create_deployment_objects('env_1', 'environment', 2) deployment = self.client.deployments.get('env') self.assertEqual(deployment.sub_environments_count, 5) deployment = self.client.deployments.get('env', all_sub_deployments=False) self.assertEqual(deployment.sub_environments_count, 3) def test_add_sub_deployments_after_deployment_update(self): _, _, _, deployment = self.put_deployment( deployment_id='env', blueprint_id='env' ) _, _, _, deployment_1 = self.put_deployment( deployment_id='env_1', blueprint_id='env_1' ) self.assertEqual(deployment.sub_services_count, 0) self.assertEqual(deployment.sub_services_count, 0) self.assertEqual(deployment_1.sub_environments_count, 0) self.assertEqual(deployment_1.sub_environments_count, 0) self.put_deployment(deployment_id='sub_srv', blueprint_id='srv') self.put_deployment_with_labels( [ { 'csys-obj-type': 'environment', } ], resource_id='sub_env' ) self.put_blueprint( blueprint_id='update_sub_srv', blueprint_file_name='blueprint_with_parent_labels.yaml' ) self.put_blueprint( blueprint_id='update_sub_env', blueprint_file_name='blueprint_with_parent_labels.yaml' ) self.client.deployment_updates.update_with_existing_blueprint( 'sub_srv', blueprint_id='update_sub_srv' ) self.client.deployment_updates.update_with_existing_blueprint( 'sub_env', blueprint_id='update_sub_env' ) deployment = self.client.deployments.get('env') deployment_1 = self.client.deployments.get('env_1') self.assertEqual(deployment.sub_services_count, 1) self.assertEqual(deployment.sub_services_count, 1) self.assertEqual(deployment_1.sub_environments_count, 1) self.assertEqual(deployment_1.sub_environments_count, 1) def test_detach_all_services_from_deployment(self): self.put_deployment( deployment_id='env', blueprint_id='env' ) self._create_deployment_objects('env', 'service', 2) self._create_deployment_objects('env', 'environment', 2) deployment = self.client.deployments.get('env') self.assertEqual(deployment.sub_services_count, 2) self.assertEqual(deployment.sub_environments_count, 2) self.client.deployments.update_labels( 'service_1_env', [ { 'csys-obj-type': 'service' }, ] ) self.client.deployments.update_labels( 'service_2_env', [ { 'csys-obj-type': 'service' }, ] ) deployment = self.client.deployments.get('env') self.assertEqual(deployment.sub_services_count, 0) self.assertEqual(deployment.sub_environments_count, 2) def test_detach_all_environments_from_deployment(self): self.put_deployment( deployment_id='env', blueprint_id='env' ) self._create_deployment_objects('env', 'service', 2) self._create_deployment_objects('env', 'environment', 2) deployment = self.client.deployments.get('env') self.assertEqual(deployment.sub_services_count, 2) self.assertEqual(deployment.sub_environments_count, 2) self.client.deployments.update_labels( 'environment_1_env', [ { 'csys-obj-type': 'environment' } ] ) self.client.deployments.update_labels( 'environment_2_env', [ { 'csys-obj-type': 'environment' } ] ) deployment = self.client.deployments.get('env') self.assertEqual(deployment.sub_services_count, 2) self.assertEqual(deployment.sub_environments_count, 0) def test_deployment_statuses_after_creation_without_sub_deployments(self): self.put_deployment('dep1') deployment = self.client.deployments.get('dep1') self.assertEqual( deployment.deployment_status, DeploymentState.REQUIRE_ATTENTION ) self.assertIsNone(deployment.sub_services_status) self.assertIsNone(deployment.sub_environments_status) def test_deployment_statuses_after_creation_with_sub_deployments(self): self.put_deployment('parent') self._create_deployment_objects('parent', 'environment', 2) self._create_deployment_objects('parent', 'service', 2) deployment = self.client.deployments.get('parent') self.assertEqual( deployment.deployment_status, DeploymentState.REQUIRE_ATTENTION ) self.assertEqual( deployment.sub_environments_status, DeploymentState.REQUIRE_ATTENTION ) self.assertEqual( deployment.sub_services_status, DeploymentState.REQUIRE_ATTENTION ) def test_delete_deployment_with_sub_deployments(self): self.put_deployment('parent') self._create_deployment_objects('parent', 'service', 2) with self.assertRaisesRegex( CloudifyClientError, 'Can\'t delete deployment'): self.client.deployments.delete('parent') def test_stop_deployment_with_sub_deployments(self): self.put_deployment('parent') self._create_deployment_objects('parent', 'service', 2) with self.assertRaisesRegex( CloudifyClientError, 'Can\'t execute workflow `stop`'): self.client.executions.start('parent', 'stop') def test_uninstall_deployment_with_sub_deployments(self): self.put_deployment('parent') self._create_deployment_objects('parent', 'service', 2) with self.assertRaisesRegex( CloudifyClientError, 'Can\'t execute workflow `uninstall`'): self.client.executions.start('parent', 'uninstall') def test_create_deployment_labels_dependencies_graph(self): self._populate_deployment_labels_dependencies() dep_graph = RecursiveDeploymentLabelsDependencies(self.sm) dep_graph.create_dependencies_graph() self.assertEqual(dep_graph.graph['dep_1'], {'dep_0'}) self.assertEqual(dep_graph.graph['dep_3'], {'dep_2'}) self.assertEqual(dep_graph.graph['dep_5'], {'dep_4'}) def test_add_to_deployment_labels_dependencies_graph(self): self._populate_deployment_labels_dependencies() dep_graph = RecursiveDeploymentLabelsDependencies(self.sm) dep_graph.create_dependencies_graph() dep_graph.add_dependency_to_graph('dep_00', 'dep_1') dep_graph.add_dependency_to_graph('dep_1', 'dep_6') self.assertEqual(dep_graph.graph['dep_1'], {'dep_0', 'dep_00'}) self.assertEqual(dep_graph.graph['dep_6'], {'dep_1'}) def test_remove_deployment_labels_dependencies_from_graph(self): self._populate_deployment_labels_dependencies() dep_graph = RecursiveDeploymentLabelsDependencies(self.sm) dep_graph.create_dependencies_graph() dep_graph.remove_dependency_from_graph('dep_0', 'dep_1') self.assertNotIn('dep_1', dep_graph.graph) def test_find_recursive_deployments_from_graph(self): self._populate_deployment_labels_dependencies() self.client.deployments.update_labels('dep_0', [ { 'csys-obj-parent': 'dep_1' } ] ) self.put_deployment(deployment_id='dep_11', blueprint_id='dep_11') self.put_deployment(deployment_id='dep_12', blueprint_id='dep_12') self.put_deployment(deployment_id='dep_13', blueprint_id='dep_13') self.put_deployment(deployment_id='dep_14', blueprint_id='dep_14') self.client.deployments.update_labels('dep_1', [ { 'csys-obj-parent': 'dep_11' } ] ) self.client.deployments.update_labels('dep_11', [ { 'csys-obj-parent': 'dep_12' } ] ) self.client.deployments.update_labels('dep_12', [ { 'csys-obj-parent': 'dep_13' } ] ) self.client.deployments.update_labels('dep_13', [ { 'csys-obj-parent': 'dep_14' } ] ) dep_graph = RecursiveDeploymentLabelsDependencies(self.sm) dep_graph.create_dependencies_graph() targets = dep_graph.find_recursive_deployments(['dep_0']) self.assertEqual(len(targets), 5) self.assertIn('dep_1', targets) self.assertIn('dep_11', targets) self.assertIn('dep_12', targets) self.assertIn('dep_13', targets) self.assertIn('dep_14', targets) def test_sub_deployments_counts_after_convert_to_service(self): self.put_deployment(deployment_id='env', blueprint_id='env') self._create_deployment_objects('env', 'environment', 2) self.put_deployment_with_labels( [ { 'csys-obj-parent': 'env' }, { 'csys-obj-type': 'environment', } ], resource_id='env_1' ) self._create_deployment_objects('env_1', 'environment', 2) deployment = self.client.deployments.get('env') self.assertEqual(deployment.sub_environments_count, 5) # Remove the csys-obj-type and convert it to service instead self.client.deployments.update_labels('env_1', [ { 'csys-obj-parent': 'env' } ] ) deployment = self.client.deployments.get('env') self.assertEqual(deployment.sub_environments_count, 4) self.assertEqual(deployment.sub_services_count, 1) def test_sub_deployments_counts_after_convert_to_environment(self): self.put_deployment(deployment_id='env', blueprint_id='env') self._create_deployment_objects('env', 'environment', 2) self.put_deployment_with_labels( [ { 'csys-obj-parent': 'env' } ], resource_id='srv_1' ) self._create_deployment_objects('srv_1', 'service', 2) deployment = self.client.deployments.get('env') self.assertEqual(deployment.sub_environments_count, 2) self.assertEqual(deployment.sub_services_count, 3) # Add the csys-obj-type and convert it to environment instead self.client.deployments.update_labels('srv_1', [ { 'csys-obj-parent': 'env' }, { 'csys-obj-type': 'environment' } ] ) deployment = self.client.deployments.get('env') self.assertEqual(deployment.sub_environments_count, 3) self.assertEqual(deployment.sub_services_count, 2) def test_csys_env_type(self): dep1 = self.put_deployment_with_labels([{'key1': 'val1'}, {'key2': 'val3'}, {'key3': 'val3'}], 'dep1') self.assertEqual(dep1.environment_type, '') subcloud = self.put_deployment_with_labels( [{'csys-env-type': 'subcloud'}, {'key1': 'val1'}, {'key2': 'val2'}], 'subcloud') self.assertEqual(subcloud.environment_type, 'subcloud') basic = self.put_deployment_with_labels( [{'csys-env-type': 'basic'}, {'key2': 'val2'}, {'key3': 'val3'}], 'basic') self.assertEqual(basic.environment_type, 'basic') controller = self.put_deployment_with_labels( [{'csys-env-type': 'controller'}, {'key1': 'val1'}, {'key3': 'val3'}], 'controller') self.assertEqual(controller.environment_type, 'controller') deployments = self.client.deployments.list(sort='environment_type') self.assertEqual([dep.id for dep in deployments], ['dep1', 'basic', 'controller', 'subcloud'])
alipay/aop/api/response/AlipayCommerceEducateTuitioncodeApplySendResponse.py
antopen/alipay-sdk-python-all
213
12615956
<reponame>antopen/alipay-sdk-python-all #!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.response.AlipayResponse import AlipayResponse class AlipayCommerceEducateTuitioncodeApplySendResponse(AlipayResponse): def __init__(self): super(AlipayCommerceEducateTuitioncodeApplySendResponse, self).__init__() self._apply_id = None @property def apply_id(self): return self._apply_id @apply_id.setter def apply_id(self, value): self._apply_id = value def parse_response_content(self, response_content): response = super(AlipayCommerceEducateTuitioncodeApplySendResponse, self).parse_response_content(response_content) if 'apply_id' in response: self.apply_id = response['apply_id']
observations/r/wood.py
hajime9652/observations
199
12615964
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function import csv import numpy as np import os import sys from observations.util import maybe_download_and_extract def wood(path): """Modified Data on Wood Specific Gravity The original data are from Draper and Smith (1966) and were used to determine the influence of anatomical factors on wood specific gravity, with five explanatory variables and an intercept. These data were contaminated by replacing a few observations with outliers. A data frame with 20 observations on the following 6 variables. x1, x2, x3, x4, x5 explanatory “anatomical” wood variables. y wood specific gravity, the target variable. Draper and Smith (1966, p.227) <NAME> and <NAME> (1987) *Robust Regression and Outlier Detection* Wiley, p.243, table 8. Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `wood.csv`. Returns: Tuple of np.ndarray `x_train` with 20 rows and 6 columns and dictionary `metadata` of column headers (feature names). """ import pandas as pd path = os.path.expanduser(path) filename = 'wood.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/robustbase/wood.csv' maybe_download_and_extract(path, url, save_file_name='wood.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return x_train, metadata
tests/test_discriminator.py
dustymugs/PynamoDB
1,586
12615977
<reponame>dustymugs/PynamoDB<filename>tests/test_discriminator.py import pytest from pynamodb.attributes import DiscriminatorAttribute from pynamodb.attributes import DynamicMapAttribute from pynamodb.attributes import ListAttribute from pynamodb.attributes import MapAttribute from pynamodb.attributes import NumberAttribute from pynamodb.attributes import UnicodeAttribute from pynamodb.models import Model class_name = lambda cls: cls.__name__ class TypedValue(MapAttribute): _cls = DiscriminatorAttribute(attr_name = 'cls') name = UnicodeAttribute() class NumberValue(TypedValue, discriminator=class_name): value = NumberAttribute() class StringValue(TypedValue, discriminator=class_name): value = UnicodeAttribute() class RenamedValue(TypedValue, discriminator='custom_name'): value = UnicodeAttribute() class DiscriminatorTestModel(Model, discriminator='Parent'): class Meta: host = 'http://localhost:8000' table_name = 'test' hash_key = UnicodeAttribute(hash_key=True) value = TypedValue() values = ListAttribute(of=TypedValue) type = DiscriminatorAttribute() class ChildModel(DiscriminatorTestModel, discriminator='Child'): value = UnicodeAttribute() class DynamicSubclassedMapAttribute(DynamicMapAttribute): string_attr = UnicodeAttribute() class DynamicMapDiscriminatorTestModel(Model, discriminator='Parent'): class Meta: host = 'http://localhost:8000' table_name = 'test' hash_key = UnicodeAttribute(hash_key=True) value = DynamicSubclassedMapAttribute(default=dict) type = DiscriminatorAttribute() class DynamicMapDiscriminatorChildTestModel(DynamicMapDiscriminatorTestModel, discriminator='Child'): value = UnicodeAttribute() class TestDiscriminatorAttribute: def test_serialize(self): dtm = DiscriminatorTestModel() dtm.hash_key = 'foo' dtm.value = StringValue(name='foo', value='Hello') dtm.values = [NumberValue(name='bar', value=5), RenamedValue(name='baz', value='World')] assert dtm.serialize() == { 'hash_key': {'S': 'foo'}, 'type': {'S': 'Parent'}, 'value': {'M': {'cls': {'S': 'StringValue'}, 'name': {'S': 'foo'}, 'value': {'S': 'Hello'}}}, 'values': {'L': [ {'M': {'cls': {'S': 'NumberValue'}, 'name': {'S': 'bar'}, 'value': {'N': '5'}}}, {'M': {'cls': {'S': 'custom_name'}, 'name': {'S': 'baz'}, 'value': {'S': 'World'}}} ]} } def test_deserialize(self): item = { 'hash_key': {'S': 'foo'}, 'type': {'S': 'Parent'}, 'value': {'M': {'cls': {'S': 'StringValue'}, 'name': {'S': 'foo'}, 'value': {'S': 'Hello'}}}, 'values': {'L': [ {'M': {'cls': {'S': 'NumberValue'}, 'name': {'S': 'bar'}, 'value': {'N': '5'}}}, {'M': {'cls': {'S': 'custom_name'}, 'name': {'S': 'baz'}, 'value': {'S': 'World'}}} ]} } dtm = DiscriminatorTestModel.from_raw_data(item) assert dtm.hash_key == 'foo' assert dtm.value.value == 'Hello' assert dtm.values[0].value == 5 assert dtm.values[1].value == 'World' def test_condition_expression(self): condition = DiscriminatorTestModel.value._cls == RenamedValue placeholder_names, expression_attribute_values = {}, {} expression = condition.serialize(placeholder_names, expression_attribute_values) assert expression == "#0.#1 = :0" assert placeholder_names == {'value': '#0', 'cls': '#1'} assert expression_attribute_values == {':0': {'S': 'custom_name'}} def test_multiple_discriminator_values(self): class TestAttribute(MapAttribute, discriminator='new_value'): cls = DiscriminatorAttribute() TestAttribute.cls.register_class(TestAttribute, 'old_value') # ensure the first registered value is used during serialization assert TestAttribute.cls.get_discriminator(TestAttribute) == 'new_value' assert TestAttribute.cls.serialize(TestAttribute) == 'new_value' # ensure the second registered value can be used to deserialize assert TestAttribute.cls.deserialize('old_value') == TestAttribute assert TestAttribute.cls.deserialize('new_value') == TestAttribute def test_multiple_discriminator_classes(self): with pytest.raises(ValueError): # fail when attempting to register a class with an existing discriminator value class RenamedValue2(TypedValue, discriminator='custom_name'): pass class TestDiscriminatorModel: def test_serialize(self): cm = ChildModel() cm.hash_key = 'foo' cm.value = 'bar' cm.values = [] assert cm.serialize() == { 'hash_key': {'S': 'foo'}, 'type': {'S': 'Child'}, 'value': {'S': 'bar'}, 'values': {'L': []} } def test_deserialize(self): item = { 'hash_key': {'S': 'foo'}, 'type': {'S': 'Child'}, 'value': {'S': 'bar'}, 'values': {'L': []} } cm = DiscriminatorTestModel.from_raw_data(item) assert isinstance(cm, ChildModel) assert cm.hash_key == 'foo' assert cm.value == 'bar' class TestDynamicDiscriminatorModel: def test_serialize_parent(self): m = DynamicMapDiscriminatorTestModel() m.hash_key = 'foo' m.value.string_attr = 'foostr' m.value.bar_attribute = 3 assert m.serialize() == { 'hash_key': {'S': 'foo'}, 'type': {'S': 'Parent'}, 'value': {'M': {'string_attr': {'S': 'foostr'}, 'bar_attribute': {'N': '3'}}}, } def test_deserialize_parent(self): item = { 'hash_key': {'S': 'foo'}, 'type': {'S': 'Parent'}, 'value': { 'M': {'string_attr': {'S': 'foostr'}, 'bar_attribute': {'N': '3'}} } } m = DynamicMapDiscriminatorTestModel.from_raw_data(item) assert m.hash_key == 'foo' assert m.value assert m.value.string_attr == 'foostr' assert m.value.bar_attribute == 3 def test_serialize_child(self): m = DynamicMapDiscriminatorChildTestModel() m.hash_key = 'foo' m.value = 'string val' assert m.serialize() == { 'hash_key': {'S': 'foo'}, 'type': {'S': 'Child'}, 'value': {'S': 'string val'} } def test_deserialize_child(self): item = { 'hash_key': {'S': 'foo'}, 'type': {'S': 'Child'}, 'value': {'S': 'string val'} } m = DynamicMapDiscriminatorChildTestModel.from_raw_data(item) assert m.hash_key == 'foo' assert m.value == 'string val'
lingua_franca/lang/common_data_cs.py
NeonDaniel/lingua-franca
191
12616033
<reponame>NeonDaniel/lingua-franca # # Copyright 2017 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict #_ARTICLES_CS = {} _NUM_STRING_CS = { 0: 'nula', 1: 'jedna', 2: 'dva', 3: 'tři', 4: 'čtyři', 5: 'pět', 6: 'šest', 7: 'sedm', 8: 'osm', 9: 'devět', 10: 'deset', 11: 'jedenáct', 12: 'dvanáct', 13: 'třináct', 14: 'čtrnáct', 15: 'patnáct', 16: 'šestnáct', 17: 'sedmnáct', 18: 'osmnáct', 19: 'devatenáct', 20: 'dvacet', 30: 'třicet', 40: 'čtyřicet', 50: 'padesát', 60: 'šedesát', 70: 'sedmdesát', 80: 'osmdesát', 90: 'devadesát' } _FRACTION_STRING_CS = { 2: 'polovina', 3: 'třetina', 4: 'čtvrtina', 5: 'pětina', 6: 'šestina', 7: 'sedmina', 8: 'osmina', 9: 'devítina', 10: 'desetina', 11: 'jedenáctina', 12: 'dvanáctina', 13: 'třináctina', 14: 'čtrnáctina', 15: 'patnáctina', 16: 'šestnáctina', 17: 'sedmnáctina', 18: 'osmnáctina', 19: 'devatenáctina', 20: 'dvacetina', 30: 'třicetina', 40: 'čtyřicetina', 50: 'padesátina', 60: 'šedesátina', 70: 'sedmdesátina', 80: 'osmdesátina', 90: 'devadesátina', 1e2: 'setina', 1e3: 'tisícina' } _LONG_SCALE_CS = OrderedDict([ (100, 'sto'), (1000, 'tisíc'), (1000000, 'milion'), (1e9, "miliarda"), (1e12, "bilion"), (1e15, "biliarda"), (1e18, "trilion"), (1e21, "triliarda"), (1e24, "kvadrilion"), (1e27, "kvadriliarda"), (1e30, "kvintilion"), (1e33, "kvintiliarda"), (1e36, "sextilion"), (1e39, "sextiliarda"), (1e42, "septilion"), (1e45, "septiliarda"), (1e48, "oktilion"), (1e51, "oktiliarda"), (1e54, "nonilion"), (1e57, "noniliarda"), (1e60, "decilion"), (1e63, "deciliarda"), (1e120, "vigintilion"), (1e180, "trigintilion"), (1e303, "kvinkvagintiliarda"), (1e600, "centilion"), (1e603, "centiliarda") ]) _SHORT_SCALE_CS = OrderedDict([ (100, 'sto'), (1000, 'tisíc'), (1000000, 'million'), (1e9, "billion"), (1e12, 'trillion'), (1e15, "quadrillion"), (1e18, "quintillion"), (1e21, "sextillion"), (1e24, "septillion"), (1e27, "octillion"), (1e30, "nonillion"), (1e33, "decillion"), (1e36, "undecillion"), (1e39, "duodecillion"), (1e42, "tredecillion"), (1e45, "quadrdecillion"), (1e48, "quindecillion"), (1e51, "sexdecillion"), (1e54, "septendecillion"), (1e57, "octodecillion"), (1e60, "novemdecillion"), (1e63, "vigintillion"), (1e66, "unvigintillion"), (1e69, "uuovigintillion"), (1e72, "tresvigintillion"), (1e75, "quattuorvigintillion"), (1e78, "quinquavigintillion"), (1e81, "qesvigintillion"), (1e84, "septemvigintillion"), (1e87, "octovigintillion"), (1e90, "novemvigintillion"), (1e93, "trigintillion"), (1e96, "untrigintillion"), (1e99, "duotrigintillion"), (1e102, "trestrigintillion"), (1e105, "quattuortrigintillion"), (1e108, "quinquatrigintillion"), (1e111, "sestrigintillion"), (1e114, "septentrigintillion"), (1e117, "octotrigintillion"), (1e120, "noventrigintillion"), (1e123, "quadragintillion"), (1e153, "quinquagintillion"), (1e183, "sexagintillion"), (1e213, "septuagintillion"), (1e243, "octogintillion"), (1e273, "nonagintillion"), (1e303, "centillion"), (1e306, "uncentillion"), (1e309, "duocentillion"), (1e312, "trescentillion"), (1e333, "decicentillion"), (1e336, "undecicentillion"), (1e363, "viginticentillion"), (1e366, "unviginticentillion"), (1e393, "trigintacentillion"), (1e423, "quadragintacentillion"), (1e453, "quinquagintacentillion"), (1e483, "sexagintacentillion"), (1e513, "septuagintacentillion"), (1e543, "ctogintacentillion"), (1e573, "nonagintacentillion"), (1e603, "ducentillion"), (1e903, "trecentillion"), (1e1203, "quadringentillion"), (1e1503, "quingentillion"), (1e1803, "sescentillion"), (1e2103, "septingentillion"), (1e2403, "octingentillion"), (1e2703, "nongentillion"), (1e3003, "millinillion") ]) _ORDINAL_BASE_CS = { 1: 'první', 2: 'druhý', 3: 'třetí', 4: 'čtvrtý', 5: 'pátý', 6: 'šestý', 7: 'sedmý', 8: 'osmý', 9: 'devátý', 10: 'desátý', 11: 'jedenáctý', 12: 'dvanáctý', 13: 'třináctý', 14: 'čtrnáctý', 15: 'patnáctý', 16: 'šestnáctý', 17: 'sedmnáctý', 18: 'osmnáctý', 19: 'devatenáctý', 20: 'dvacátý', 30: 'třicátý', 40: "čtyřicátý", 50: "padesátý", 60: "šedesátý", 70: "sedmdesátý", 80: "osmdesátý", 90: "devadesátý", 1e2: "stý", 1e3: "tisící" } _SHORT_ORDINAL_CS = { 1e6: "miliontý", 1e9: "billiontý", 1e12: "trilliontý", 1e15: "quadrilliontý", 1e18: "quintilliontý", 1e21: "sextilliontý", 1e24: "septilliontý", 1e27: "oktiliontý", 1e30: "nonilliontý", 1e33: "decilliontý" # TODO > 1e-33 } _SHORT_ORDINAL_CS.update(_ORDINAL_BASE_CS) _LONG_ORDINAL_CS = { 1e6: "miliontý", 1e9: "miliardtý", 1e12: "biliontý", 1e15: "biliardtý", 1e18: "triliontý", 1e21: "triliardtý", 1e24: "kvadriliontý", 1e27: "kvadriliardtý", 1e30: "kvintiliontý", 1e33: "kvintiliardtý", 1e36: "sextiliontý", 1e39: "sextiliardtý", 1e42: "septiliontý", 1e45: "septiliardtý", 1e48: "oktilion", 1e51: "oktiliardtý", 1e54: "noniliontý", 1e57: "noniliardtý", 1e60: "deciliontý" # TODO > 1e60 } _LONG_ORDINAL_CS.update(_ORDINAL_BASE_CS) # Months _MONTHS_CONVERSION = { 0: "january", 1: "february", 2: "march", 3: "april", 4: "may", 5: "june", 6: "july", 7: "august", 8: "september", 9: "october", 10: "november", 11: "december" } _MONTHS_CZECH = ['leden', 'únor', 'březen', 'duben', 'květen', 'červen', 'červenec', 'srpen', 'září', 'říjen', 'listopad', 'prosinec'] # Time _TIME_UNITS_CONVERSION = { 'mikrosekund': 'microseconds', 'milisekund': 'milliseconds', 'sekundu': 'seconds', 'sekundy': 'seconds', 'sekund': 'seconds', 'minutu': 'minutes', 'minuty': 'minutes', 'minut': 'minutes', 'hodin': 'hours', 'den': 'days', # 1 day 'dny': 'days', # 2-4 days 'dnů': 'days', # 5+ days 'dní': 'days', # 5+ days - different inflection 'dne': 'days', # a half day 'týden': 'weeks', 'týdny': 'weeks', 'týdnů': 'weeks' }
tests/test_runner_apps/databases/tests.py
KaushikSathvara/django
61,676
12616041
import unittest class NoDatabaseTests(unittest.TestCase): def test_nothing(self): pass class DefaultDatabaseTests(NoDatabaseTests): databases = {'default'} class DefaultDatabaseSerializedTests(NoDatabaseTests): databases = {'default'} serialized_rollback = True class OtherDatabaseTests(NoDatabaseTests): databases = {'other'} class AllDatabasesTests(NoDatabaseTests): databases = '__all__'
src/ecdsa/test_ecdsa.py
kianmeng/python-ecdsa
616
12616067
<reponame>kianmeng/python-ecdsa<gh_stars>100-1000 from __future__ import print_function import sys import hypothesis.strategies as st from hypothesis import given, settings, note, example try: import unittest2 as unittest except ImportError: import unittest import pytest from .ecdsa import ( Private_key, Public_key, Signature, generator_192, digest_integer, ellipticcurve, point_is_valid, generator_224, generator_256, generator_384, generator_521, generator_secp256k1, curve_192, InvalidPointError, curve_112r2, generator_112r2, int_to_string, ) HYP_SETTINGS = {} # old hypothesis doesn't have the "deadline" setting if sys.version_info > (2, 7): # pragma: no branch # SEC521p is slow, allow long execution for it HYP_SETTINGS["deadline"] = 5000 class TestP192FromX9_62(unittest.TestCase): """Check test vectors from X9.62""" @classmethod def setUpClass(cls): cls.d = 651056770906015076056810763456358567190100156695615665659 cls.Q = cls.d * generator_192 cls.k = 6140507067065001063065065565667405560006161556565665656654 cls.R = cls.k * generator_192 cls.msg = 968236873715988614170569073515315707566766479517 cls.pubk = Public_key(generator_192, generator_192 * cls.d) cls.privk = Private_key(cls.pubk, cls.d) cls.sig = cls.privk.sign(cls.msg, cls.k) def test_point_multiplication(self): assert self.Q.x() == 0x62B12D60690CDCF330BABAB6E69763B471F994DD702D16A5 def test_point_multiplication_2(self): assert self.R.x() == 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD assert self.R.y() == 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835 def test_mult_and_addition(self): u1 = 2563697409189434185194736134579731015366492496392189760599 u2 = 6266643813348617967186477710235785849136406323338782220568 temp = u1 * generator_192 + u2 * self.Q assert temp.x() == 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD assert temp.y() == 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835 def test_signature(self): r, s = self.sig.r, self.sig.s assert r == 3342403536405981729393488334694600415596881826869351677613 assert s == 5735822328888155254683894997897571951568553642892029982342 def test_verification(self): assert self.pubk.verifies(self.msg, self.sig) def test_rejection(self): assert not self.pubk.verifies(self.msg - 1, self.sig) class TestPublicKey(unittest.TestCase): def test_equality_public_keys(self): gen = generator_192 x = 0xC58D61F88D905293BCD4CD0080BCB1B7F811F2FFA41979F6 y = 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F point = ellipticcurve.Point(gen.curve(), x, y) pub_key1 = Public_key(gen, point) pub_key2 = Public_key(gen, point) self.assertEqual(pub_key1, pub_key2) def test_inequality_public_key(self): gen = generator_192 x1 = <KEY> y1 = 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F point1 = ellipticcurve.Point(gen.curve(), x1, y1) x2 = 0x6A223D00BD22C52833409A163E057E5B5DA1DEF2A197DD15 y2 = 0x7B482604199367F1F303F9EF627F922F97023E90EAE08ABF point2 = ellipticcurve.Point(gen.curve(), x2, y2) pub_key1 = Public_key(gen, point1) pub_key2 = Public_key(gen, point2) self.assertNotEqual(pub_key1, pub_key2) def test_inequality_different_curves(self): gen = generator_192 x1 = 0xC58D61F88D905293BCD4CD0080BCB1B7F811F2FFA41979F6 y1 = 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F point1 = ellipticcurve.Point(gen.curve(), x1, y1) x2 = 0x722BA0FB6B8FC8898A4C6AB49E66 y2 = 0x2B7344BB57A7ABC8CA0F1A398C7D point2 = ellipticcurve.Point(generator_112r2.curve(), x2, y2) pub_key1 = Public_key(gen, point1) pub_key2 = Public_key(generator_112r2, point2) self.assertNotEqual(pub_key1, pub_key2) def test_inequality_public_key_not_implemented(self): gen = generator_192 x = 0xC<KEY>3BCD4CD0080BCB1B7F811F2FFA41979F6 y = 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F point = ellipticcurve.Point(gen.curve(), x, y) pub_key = Public_key(gen, point) self.assertNotEqual(pub_key, None) def test_public_key_with_generator_without_order(self): gen = ellipticcurve.PointJacobi( generator_192.curve(), generator_192.x(), generator_192.y(), 1 ) x = 0xC58D61F88D905293BCD4CD0080BCB1B7F811F2FFA41979F6 y = 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F point = ellipticcurve.Point(gen.curve(), x, y) with self.assertRaises(InvalidPointError) as e: Public_key(gen, point) self.assertIn("Generator point must have order", str(e.exception)) def test_public_point_on_curve_not_scalar_multiple_of_base_point(self): x = 2 y = 0xBE6AA4938EF7CFE6FE29595B6B00 # we need a curve with cofactor != 1 point = ellipticcurve.PointJacobi(curve_112r2, x, y, 1) self.assertTrue(curve_112r2.contains_point(x, y)) with self.assertRaises(InvalidPointError) as e: Public_key(generator_112r2, point) self.assertIn("Generator point order", str(e.exception)) def test_point_is_valid_with_not_scalar_multiple_of_base_point(self): x = 2 y = 0xBE6AA4938EF7CFE6FE29595B6B00 self.assertFalse(point_is_valid(generator_112r2, x, y)) # the tests to verify the extensiveness of tests in ecdsa.ecdsa # if PointJacobi gets modified to calculate the x and y mod p the tests # below will need to use a fake/mock object def test_invalid_point_x_negative(self): pt = ellipticcurve.PointJacobi(curve_192, -1, 0, 1) with self.assertRaises(InvalidPointError) as e: Public_key(generator_192, pt) self.assertIn("The public point has x or y", str(e.exception)) def test_invalid_point_x_equal_p(self): pt = ellipticcurve.PointJacobi(curve_192, curve_192.p(), 0, 1) with self.assertRaises(InvalidPointError) as e: Public_key(generator_192, pt) self.assertIn("The public point has x or y", str(e.exception)) def test_invalid_point_y_negative(self): pt = ellipticcurve.PointJacobi(curve_192, 0, -1, 1) with self.assertRaises(InvalidPointError) as e: Public_key(generator_192, pt) self.assertIn("The public point has x or y", str(e.exception)) def test_invalid_point_y_equal_p(self): pt = ellipticcurve.PointJacobi(curve_192, 0, curve_192.p(), 1) with self.assertRaises(InvalidPointError) as e: Public_key(generator_192, pt) self.assertIn("The public point has x or y", str(e.exception)) class TestPublicKeyVerifies(unittest.TestCase): # test all the different ways that a signature can be publicly invalid @classmethod def setUpClass(cls): gen = generator_192 x = 0xC58D61F88D905293BCD4CD0080BCB1B7F811F2FFA41979F6 y = 0x8804DC7A7C<KEY>7D6DE8A0E11867F point = ellipticcurve.Point(gen.curve(), x, y) cls.pub_key = Public_key(gen, point) def test_sig_with_r_zero(self): sig = Signature(0, 1) self.assertFalse(self.pub_key.verifies(1, sig)) def test_sig_with_r_order(self): sig = Signature(generator_192.order(), 1) self.assertFalse(self.pub_key.verifies(1, sig)) def test_sig_with_s_zero(self): sig = Signature(1, 0) self.assertFalse(self.pub_key.verifies(1, sig)) def test_sig_with_s_order(self): sig = Signature(1, generator_192.order()) self.assertFalse(self.pub_key.verifies(1, sig)) class TestPrivateKey(unittest.TestCase): @classmethod def setUpClass(cls): gen = generator_192 x = 0xC58D61F88D905293BCD4CD0080BCB1B7F811F2FFA41979F6 y = 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F point = ellipticcurve.Point(gen.curve(), x, y) cls.pub_key = Public_key(gen, point) def test_equality_private_keys(self): pr_key1 = Private_key(self.pub_key, 100) pr_key2 = Private_key(self.pub_key, 100) self.assertEqual(pr_key1, pr_key2) def test_inequality_private_keys(self): pr_key1 = Private_key(self.pub_key, 100) pr_key2 = Private_key(self.pub_key, 200) self.assertNotEqual(pr_key1, pr_key2) def test_inequality_private_keys_not_implemented(self): pr_key = Private_key(self.pub_key, 100) self.assertNotEqual(pr_key, None) # Testing point validity, as per ECDSAVS.pdf B.2.2: P192_POINTS = [ ( generator_192, 0xCD6D0F029A023E9AACA429615B8F577ABEE685D8257CC83A, 0x00019C410987680E9FB6C0B6ECC01D9A2647C8BAE27721BACDFC, False, ), ( generator_192, 0x00017F2FCE203639E9EAF9FB50B81FC32776B30E3B02AF16C73B, 0x95DA95C5E72DD48E229D4748D4EEE658A9A54111B23B2ADB, False, ), ( generator_192, 0x4F77F8BC7FCCBADD5760F4938746D5F253EE2168C1CF2792, 0x000147156FF824D131629739817EDB197717C41AAB5C2A70F0F6, False, ), ( generator_192, 0xC58D61F88D905293BCD4CD0080BCB1B7F811F2FFA41979F6, 0x8804DC7A7C4C7F8B5D437F5156F3312CA7D6DE8A0E11867F, True, ), ( generator_192, 0xCDF56C1AA3D8AFC53C521ADF3FFB96734A6A630A4A5B5A70, 0x97C1C44A5FB229007B5EC5D25F7413D170068FFD023CAA4E, True, ), ( generator_192, 0x89009C0DC361C81E99280C8E91DF578DF88CDF4B0CDEDCED, 0x27BE44A529B7513E727251F128B34262A0FD4D8EC82377B9, True, ), ( generator_192, 0x6A223D00BD22C52833409A163E057E5B5DA1DEF2A197DD15, 0x7B482604199367F1F303F9EF627F922F97023E90EAE08ABF, True, ), ( generator_192, 0x6DCCBDE75C0948C98DAB32EA0BC59FE125CF0FB1A3798EDA, 0x0001171A3E0FA60CF3096F4E116B556198DE430E1FBD330C8835, False, ), ( generator_192, 0xD266B39E1F491FC4ACBBBC7D098430931CFA66D55015AF12, 0x193782EB909E391A3148B7764E6B234AA94E48D30A16DBB2, False, ), ( generator_192, 0x9D6DDBCD439BAA0C6B80A654091680E462A7D1D3F1FFEB43, 0x6AD8EFC4D133CCF167C44EB4691C80ABFFB9F82B932B8CAA, False, ), ( generator_192, 0x146479D944E6BDA87E5B35818AA666A4C998A71F4E95EDBC, 0xA86D6FE62BC8FBD88139693F842635F687F132255858E7F6, False, ), ( generator_192, 0xE594D4A598046F3598243F50FD2C7BD7D380EDB055802253, 0x509014C0C4D6B536E3CA750EC09066AF39B4C8616A53A923, False, ), ] @pytest.mark.parametrize("generator,x,y,expected", P192_POINTS) def test_point_validity(generator, x, y, expected): """ `generator` defines the curve; is `(x, y)` a point on this curve? `expected` is True if the right answer is Yes. """ assert point_is_valid(generator, x, y) == expected # Trying signature-verification tests from ECDSAVS.pdf B.2.4: CURVE_192_KATS = [ ( generator_192, int( "0x84ce72aa8699df436059f052ac51b6398d2511e49631bcb7e71f89c499b9ee" "<KEY>" "d2b0fbd8b2c4e102e16d828374bbc47b93852f212d5043c3ea720f086178ff79" "8cc4f63f787b9c2e419efa033e7644ea7936f54462dc21a6c4580725f7f0e7d1" "58", 16, ), 0xD9DBFB332AA8E5FF091E8CE535857C37C73F6250FFB2E7AC, 0x282102E364FEDED3AD15DDF968F88D8321AA268DD483EBC4, 0x64DCA58A20787C488D11D6DD96313F1B766F2D8EFE122916, 0x1ECBA28141E84AB4ECAD92F56720E2CC83EB3D22DEC72479, True, ), ( generator_192, int( "0x94bb5bacd5f8ea765810024db87f4224ad71362a3c28284b2b9f39fab86db1" "2e8beb94aae899768229be8fdb6c4f12f28912bb604703a79ccff769c1607f5a" "<KEY>" "26ab6f5a659113a9034e54be7b041ced9dcf6458d7fb9cbfb2744d999f7dfd63" "f4", 16, ), 0x3E53EF8D3112AF3285C0E74842090712CD324832D4277AE7, 0xCC75F8952D30AEC2CBB719FC6AA9934590B5D0FF5A83ADB7, 0x8285261607283BA18F335026130BAB31840DCFD9C3E555AF, 0x356D89E1B04541AFC9704A45E9C535CE4A50929E33D7E06C, True, ), ( generator_192, int( "0xf6227a8eeb34afed1621dcc89a91d72ea212cb2f476839d9b4243c66877911" "b37b4ad6f4448792a7bbba76c63bdd63414b6facab7dc71c3396a73bd7ee14cd" "d41a659c61c99b779cecf07bc51ab391aa3252386242b9853ea7da67fd768d30" "3f1b9b513d401565b6f1eb722dfdb96b519fe4f9bd5de67ae131e64b40e78c42" "dd", 16, ), 0x16335DBE95F8E8254A4E04575D736BEFB258B8657F773CB7, 0x421B13379C59BC9DCE38A1099CA79BBD06D647C7F6242336, 0x4141BD5D64EA36C5B0BD21EF28C02DA216ED9D04522B1E91, 0x159A6AA852BCC579E821B7BB0994C0861FB08280C38DAA09, False, ), ( generator_192, int( "0x16b5f93afd0d02246f662761ed8e0dd9504681ed02a253006eb36736b56309" "7ba39f81c8e1bce7a16c1339e345efabbc6baa3efb0612948ae51103382a8ee8" "bc448e3ef71e9f6f7a9676694831d7f5dd0db5446f179bcb737d4a526367a447" "bfe2c857521c7f40b6d7d7e01a180d92431fb0bbd29c04a0c420a57b3ed26ccd" "8a", 16, ), 0xFD14CDF1607F5EFB7B1793037B15BDF4BAA6F7C16341AB0B, 0x83FA0795CC6C4795B9016DAC928FD6BAC32F3229A96312C4, 0x8DFDB832951E0167C5D762A473C0416C5C15BC1195667DC1, 0x1720288A2DC13FA1EC78F763F8FE2FF7354A7E6FDDE44520, False, ), ( generator_192, int( "0x08a2024b61b79d260e3bb43ef15659aec89e5b560199bc82cf7c65c77d3919" "2e03b9a895d766655105edd9188242b91fbde4167f7862d4ddd61e5d4ab55196" "683d4f13ceb90d87aea6e07eb50a874e33086c4a7cb0273a8e1c4408f4b846bc" "eae1ebaac1b2b2ea851a9b09de322efe34cebe601653efd6ddc876ce8c2f2072" "fb", 16, ), 0x674F941DC1A1F8B763C9334D726172D527B90CA324DB8828, 0x65ADFA32E8B236CB33A3E84CF59BFB9417AE7E8EDE57A7FF, 0x9508B9FDD7DAF0D8126F9E2BC5A35E4C6D800B5B804D7796, 0x36F2BF6B21B987C77B53BB801B3435A577E3D493744BFAB0, False, ), ( generator_192, int( "0x1843aba74b0789d4ac6b0b8923848023a644a7b70afa23b1191829bbe4397c" "e15b629bf21a8838298653ed0c19222b95fa4f7390d1b4c844d96e645537e0aa" "e98afb5c0ac3bd0e4c37f8daaff25556c64e98c319c52687c904c4de7240a1cc" "55cd9756b7edaef184e6e23b385726e9ffcba8001b8f574987c1a3fedaaa83ca" "6d", 16, ), 0x10ECCA1AAD7220B56A62008B35170BFD5E35885C4014A19F, 0x04EB61984C6C12ADE3BC47F3C629ECE7AA0A033B9948D686, 0x82BFA4E82C0DFE9274169B86694E76CE993FD83B5C60F325, 0xA97685676C59A65DBDE002FE9D613431FB183E8006D05633, False, ), ( generator_192, int( "0x5a478f4084ddd1a7fea038aa9732a822106385797d02311aeef4d0264f824f" "698df7a48cfb6b578cf3da416bc0799425bb491be5b5ecc37995b85b03420a98" "f2c4dc5c31a69a379e9e322fbe706bbcaf0f77175e05cbb4fa162e0da82010a2" "78461e3e974d137bc746d1880d6eb02aa95216014b37480d84b87f717bb13f76" "e1", 16, ), 0x6636653CB5B894CA65C448277B29DA3AD101C4C2300F7C04, 0xFDF1CBB3FC3FD6A4F890B59E554544175FA77DBDBEB656C1, 0xEAC2DDECDDFB79931A9C3D49C08DE0645C783A24CB365E1C, 0x3549FEE3CFA7E5F93BC47D92D8BA100E881A2A93C22F8D50, False, ), ( generator_192, int( "0xc598774259a058fa65212ac57eaa4f52240e629ef4c310722088292d1d4af6" "c39b49ce06ba77e4247b20637174d0bd67c9723feb57b5ead232b47ea452d5d7" "a089f17c00b8b6767e434a5e16c231ba0efa718a340bf41d67ea2d295812ff1b" "9277daacb8bc27b50ea5e6443bcf95ef4e9f5468fe78485236313d53d1c68f6b" "a2", 16, ), 0xA82BD718D01D354001148CD5F69B9EBF38FF6F21898F8AAA, 0xE67CEEDE07FC2EBFAFD62462A51E4B6C6B3D5B537B7CAF3E, 0x4D292486C620C3DE20856E57D3BB72FCDE4A73AD26376955, 0xA85289591A6081D5728825520E62FF1C64F94235C04C7F95, False, ), ( generator_192, int( "0xca98ed9db081a07b7557f24ced6c7b9891269a95d2026747add9e9eb80638a" "961cf9c71a1b9f2c29744180bd4c3d3db60f2243c5c0b7cc8a8d40a3f9a7fc91" "0250f2187136ee6413ffc67f1a25e1c4c204fa9635312252ac0e0481d89b6d53" "808f0c496ba87631803f6c572c1f61fa049737fdacce4adff757afed4f05beb6" "58", 16, ), 0x7D3B016B57758B160C4FCA73D48DF07AE3B6B30225126C2F, 0x4AF3790D9775742BDE46F8DA876711BE1B65244B2B39E7EC, 0x95F778F5F656511A5AB49A5D69DDD0929563C29CBC3A9E62, 0x75C87FC358C251B4C83D2DD979FAAD496B539F9F2EE7A289, False, ), ( generator_192, int( "0x31dd9a54c8338bea06b87eca813d555ad1850fac9742ef0bbe40dad400e102" "88acc9c11ea7dac79eb16378ebea9490e09536099f1b993e2653cd50240014c9" "0a9c987f64545abc6a536b9bd2435eb5e911fdfde2f13be96ea36ad38df4ae9e" "a387b29cced599af777338af2794820c9cce43b51d2112380a35802ab7e396c9" "7a", 16, ), 0x9362F28C4EF96453D8A2F849F21E881CD7566887DA8BEB4A, 0xE64D26D8D74C48A024AE85D982EE74CD16046F4EE5333905, 0xF3923476A296C88287E8DE914B0B324AD5A963319A4FE73B, 0xF0BAEED7624ED00D15244D8BA2AEDE085517DBDEC8AC65F5, True, ), ( generator_192, int( "0xb2b94e4432267c92f9fdb9dc6040c95ffa477652761290d3c7de312283f645" "0d89cc4aabe748554dfb6056b2d8e99c7aeaad9cdddebdee9dbc099839562d90" "64e68e7bb5f3a6bba0749ca9a538181fc785553a4000785d73cc207922f63e8c" "e1112768cb1de7b673aed83a1e4a74592f1268d8e2a4e9e63d414b5d442bd045" "6d", 16, ), 0xCC6FC032A846AAAC25533EB033522824F94E670FA997ECEF, 0xE25463EF77A029ECCDA8B294FD63DD694E38D223D30862F1, 0x066B1D07F3A40E679B620EDA7F550842A35C18B80C5EBE06, 0xA0B0FB201E8F2DF65E2C4508EF303BDC90D934016F16B2DC, False, ), ( generator_192, int( "0x4366fcadf10d30d086911de30143da6f579527036937007b337f7282460eae" "5678b15cccda853193ea5fc4bc0a6b9d7a31128f27e1214988592827520b214e" "ed5052f7775b750b0c6b15f145453ba3fee24a085d65287e10509eb5d5f602c4" "40341376b95c24e5c4727d4b859bfe1483d20538acdd92c7997fa9c614f0f839" "d7", 16, ), 0x955C908FE900A996F7E2089BEE2F6376830F76A19135E753, 0xBA0C42A91D3847DE4A592A46DC3FDAF45A7CC709B90DE520, 0x1F58AD77FC04C782815A1405B0925E72095D906CBF52A668, 0xF2E93758B3AF75EDF784F05A6761C9B9A6043C66B845B599, False, ), ( generator_192, int( "0x543f8af57d750e33aa8565e0cae92bfa7a1ff78833093421c2942cadf99866" "70a5ff3244c02a8225e790fbf30ea84c74720abf99cfd10d02d34377c3d3b412" "69bea763384f372bb786b5846f58932defa68023136cd571863b304886e95e52" "e7877f445b9364b3f06f3c28da12707673fecb4b8071de06b6e0a3c87da160ce" "f3", 16, ), 0x31F7FA05576D78A949B24812D4383107A9A45BB5FCCDD835, 0x8DC0EB65994A90F02B5E19BD18B32D61150746C09107E76B, 0xBE26D59E4E883DDE7C286614A767B31E49AD88789D3A78FF, 0x8762CA831C1CE42DF77893C9B03119428E7A9B819B619068, False, ), ( generator_192, int( "0xd2e8454143ce281e609a9d748014dcebb9d0bc53adb02443a6aac2ffe6cb009f" "387c346ecb051791404f79e902ee333ad65e5c8cb38dc0d1d39a8dc90add502357" "2720e5b94b190d43dd0d7873397504c0c7aef2727e628eb6a74411f2e400c65670" "716cb4a815dc91cbbfeb7cfe8c929e93184c938af2c078584da045e8f8d1", 16, ), 0x66AA8EDBBDB5CF8E28CEB51B5BDA891CAE2DF84819FE25C0, 0x0C6BC2F69030A7CE58D4A00E3B3349844784A13B8936F8DA, 0xA4661E69B1734F4A71B788410A464B71E7FFE42334484F23, 0x738421CF5E049159D69C57A915143E226CAC8355E149AFE9, False, ), ( generator_192, int( "0x6660717144040f3e2f95a4e25b08a7079c702a8b29babad5a19a87654bc5c5af" "a261512a11b998a4fb36b5d8fe8bd942792ff0324b108120de86d63f65855e5461" "184fc96a0a8ffd2ce6d5dfb0230cbbdd98f8543e361b3205f5da3d500fdc8bac6d" "b377d75ebef3cb8f4d1ff738071ad0938917889250b41dd1d98896ca06fb", 16, ), 0xBCFACF45139B6F5F690A4C35A5FFFA498794136A2353FC77, 0x6F4A6C906316A6AFC6D98FE1F0399D056F128FE0270B0F22, 0x9DB679A3DAFE48F7CCAD122933ACFE9DA0970B71C94C21C1, 0x984C2DB99827576C0A41A5DA41E07D8CC768BC82F18C9DA9, False, ), ] @pytest.mark.parametrize("gen,msg,qx,qy,r,s,expected", CURVE_192_KATS) def test_signature_validity(gen, msg, qx, qy, r, s, expected): """ `msg` = message, `qx` and `qy` represent the base point on elliptic curve of `gen`, `r` and `s` are the signature, and `expected` is True iff the signature is expected to be valid.""" pubk = Public_key(gen, ellipticcurve.Point(gen.curve(), qx, qy)) assert expected == pubk.verifies(digest_integer(msg), Signature(r, s)) @pytest.mark.parametrize( "gen,msg,qx,qy,r,s,expected", [x for x in CURVE_192_KATS if x[6]] ) def test_pk_recovery(gen, msg, r, s, qx, qy, expected): del expected sign = Signature(r, s) pks = sign.recover_public_keys(digest_integer(msg), gen) assert pks # Test if the signature is valid for all found public keys for pk in pks: q = pk.point test_signature_validity(gen, msg, q.x(), q.y(), r, s, True) # Test if the original public key is in the set of found keys original_q = ellipticcurve.Point(gen.curve(), qx, qy) points = [pk.point for pk in pks] assert original_q in points @st.composite def st_random_gen_key_msg_nonce(draw): """Hypothesis strategy for test_sig_verify().""" name_gen = { "generator_192": generator_192, "generator_224": generator_224, "generator_256": generator_256, "generator_secp256k1": generator_secp256k1, "generator_384": generator_384, "generator_521": generator_521, } name = draw(st.sampled_from(sorted(name_gen.keys()))) note("Generator used: {0}".format(name)) generator = name_gen[name] order = int(generator.order()) key = draw(st.integers(min_value=1, max_value=order)) msg = draw(st.integers(min_value=1, max_value=order)) nonce = draw( st.integers(min_value=1, max_value=order + 1) | st.integers(min_value=order >> 1, max_value=order) ) return generator, key, msg, nonce SIG_VER_SETTINGS = dict(HYP_SETTINGS) SIG_VER_SETTINGS["max_examples"] = 10 @settings(**SIG_VER_SETTINGS) @example((generator_224, 4, 1, 1)) @given(st_random_gen_key_msg_nonce()) def test_sig_verify(args): """ Check if signing and verification works for arbitrary messages and that signatures for other messages are rejected. """ generator, sec_mult, msg, nonce = args pubkey = Public_key(generator, generator * sec_mult) privkey = Private_key(pubkey, sec_mult) signature = privkey.sign(msg, nonce) assert pubkey.verifies(msg, signature) assert not pubkey.verifies(msg - 1, signature) def test_int_to_string_with_zero(): assert int_to_string(0) == b"\x00"
tools/accuracy_checker/openvino/tools/accuracy_checker/representation/nlp_representation.py
TolyaTalamanov/open_model_zoo
2,201
12616128
""" Copyright (c) 2018-2022 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import numpy as np from .base_representation import BaseRepresentation from .classification_representation import ( ClassificationAnnotation, SequenceClassificationAnnotation, MultiLabelClassificationAnnotation ) class MachineTranslationRepresentation(BaseRepresentation): pass class MachineTranslationAnnotation(MachineTranslationRepresentation): def __init__(self, identifier, source='', reference=''): super().__init__(identifier) self.source = source self.reference = reference class MachineTranslationPrediction(MachineTranslationRepresentation): def __init__(self, identifier, translation=''): super().__init__(identifier) self.translation = translation class LanguageModeling(BaseRepresentation): def __init__(self, identifier=''): super().__init__(identifier) class LanguageModelingAnnotation(LanguageModeling): def __init__(self, identifier, unique_id, input_ids, tokens, labels=None): super().__init__(identifier) self.unique_id = unique_id self.tokens = tokens self.input_ids = input_ids self.labels = labels if labels is not None else [] class LanguageModelingPrediction(LanguageModeling): def __init__(self, identifier, logits): super().__init__(identifier) self.logits = logits class QuestionAnswering(BaseRepresentation): def __init__(self, identifier=''): super().__init__(identifier) class QuestionAnsweringAnnotation(QuestionAnswering): def __init__(self, identifier, question_id, unique_id, input_ids, input_mask, segment_ids, position_ids, cls_index, p_mask, orig_answer_text=None, paragraph_text=None, doc_tokens=None, is_impossible=False, paragraph_len=None, tokens=None, token_is_max_context=None, token_to_orig_map=None): super().__init__(identifier) self.orig_answer_text = orig_answer_text if orig_answer_text is not None else '' self.question_id = question_id self.unique_id = unique_id self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.position_ids = position_ids self.cls_index = cls_index self.tokens = tokens self.p_mask = p_mask self.paragraph_text = paragraph_text if paragraph_text is not None else '' self.doc_tokens = doc_tokens if doc_tokens is not None else [] self.is_impossible = is_impossible self.paragraph_len = paragraph_len self.token_is_max_context = token_is_max_context self.token_to_orig_map = token_to_orig_map class QuestionAnsweringPrediction(QuestionAnswering): def __init__(self, identifier, start_logits=None, end_logits=None, start_index=None, end_index=None, tokens=None): super().__init__(identifier) self.start_logits = start_logits if start_logits is not None else [] self.end_logits = end_logits if end_logits is not None else [] self.start_index = start_index if start_index is not None else [] self.end_index = end_index if end_index is not None else [] self.tokens = tokens if tokens is not None else [] class QuestionAnsweringEmbeddingAnnotation(QuestionAnswering): def __init__(self, identifier, input_ids, input_mask, segment_ids, position_ids, context_pos_identifier): super().__init__(identifier) self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.position_ids = position_ids self.context_pos_indetifier = context_pos_identifier class QuestionAnsweringEmbeddingPrediction(QuestionAnswering): def __init__(self, identifier, embedding): super().__init__(identifier) self.embedding = embedding class QuestionAnsweringBiDAFAnnotation(QuestionAnswering): def __init__(self, identifier, title, context, query, answers, context_word, context_char, query_word, query_char, question_id, words_idx_in_context): super().__init__(identifier) self.title = title self.context = context self.query = query self.orig_answer_text = answers self.context_word = context_word self.context_char = context_char self.query_word = query_word self.query_char = query_char self.question_id = question_id self.words_idx_in_context = words_idx_in_context class TextClassificationAnnotation(ClassificationAnnotation): def __init__(self, identifier, label, input_ids, input_mask=None, segment_ids=None, tokens=None): super().__init__(identifier, label) self.input_ids = input_ids self.input_mask = input_mask if input_mask is not None else [] self.segment_ids = segment_ids if segment_ids is not None else [] self.tokens = tokens if tokens is not None else [] class BERTNamedEntityRecognitionAnnotation(SequenceClassificationAnnotation): def __init__(self, identifier, input_ids, input_mask, segment_ids, label_id, valid_ids=None, label_mask=None): super().__init__(identifier, label_id) self.input_ids = input_ids self.input_mask = input_mask if input_mask is not None else [] self.segment_ids = segment_ids if segment_ids is not None else [] self.valid_ids = np.array(valid_ids, dtype=bool) if valid_ids is not None else valid_ids self.label_mask = np.array(label_mask, dtype=bool) if label_mask is not None else label_mask class SentenceSimilarityAnnotation(BaseRepresentation): def __init__( self, identifier, idx, pair_id, similarity_score, input_ids, input_mask, segment_ids ): super().__init__(identifier) self.id = idx self.pair_id = pair_id self.input_ids = input_ids self.similarity_score = similarity_score self.input_mask = input_mask if input_mask is not None else [] self.segment_ids = segment_ids if segment_ids is not None else [] class MultiLabelTextClassification(MultiLabelClassificationAnnotation): def __init__(self, identifier, label, input_ids, input_mask=None, segment_ids=None, tokens=None): super().__init__(identifier, label) self.input_ids = input_ids self.input_mask = input_mask if input_mask is not None else [] self.segment_ids = segment_ids if segment_ids is not None else [] self.tokens = tokens if tokens is not None else []
application/operator/NGSI-LD-Adapter/module/common_utilities/user.py
jason-fox/fogflow
102
12616142
<gh_stars>100-1000 import requests USERS_URL = 'http://jsonplaceholder.typicode.com/use' def get_users(): """Get list of users""" response = requests.get(USERS_URL) print(response.status_code) if response.ok: return response else: return None
baseline/tf/classify/train.py
shar999/mead-baseline
241
12616159
<gh_stars>100-1000 """Train a classifier with TensorFlow This module supports several different ways of training a model 1. eager mode (`default` for eager) 2. distributed eager mode (`distributed`) """ from baseline.tf.classify.training import *
openbb_terminal/cryptocurrency/defi/cryptosaurio_view.py
joshuabuildsthings/GamestonkTerminal
255
12616160
"""Cryptosaurio View""" __docformat__ = "numpy" import logging import os from typing import List, Optional import matplotlib.pyplot as plt from openbb_terminal import config_terminal as cfg from openbb_terminal.config_plot import PLOT_DPI from openbb_terminal.cryptocurrency.defi import cryptosaurio_model from openbb_terminal.decorators import log_start_end from openbb_terminal.helper_funcs import ( export_data, plot_autoscale, print_rich_table, is_valid_axes_count, ) from openbb_terminal.rich_config import console logger = logging.getLogger(__name__) @log_start_end(log=logger) def display_anchor_data( address: str = "", export: str = "", show_transactions: bool = False, external_axes: Optional[List[plt.Axes]] = None, ) -> None: """Displays anchor protocol earnings data of a certain terra address [Source: https://cryptosaurio.com/] Parameters ---------- asset : str Terra asset {ust,luna,sdt} address : str Terra address. Valid terra addresses start with 'terra' show_transactions : bool Flag to show history of transactions in Anchor protocol for address specified. Default False export : str Export dataframe data to csv,json,xlsx file external_axes : Optional[List[plt.Axes]], optional External axes (1 axis is expected in the list), by default None """ df, df_deposits, stats_str = cryptosaurio_model.get_anchor_data(address=address) # This plot has 1 axis if not external_axes: _, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI) elif is_valid_axes_count(external_axes, 1): (ax,) = external_axes else: return print("") console.print(stats_str) print("") if show_transactions: print_rich_table( df_deposits, headers=list(df_deposits.columns), show_index=False, title="Transactions history in Anchor Earn", ) ax.plot(df["time"], df["yield"]) ax.set_ylabel("Earnings Value [UST]") ax.set_title("Earnings in Anchor Earn") cfg.theme.style_primary_axis(ax) if not external_axes: cfg.theme.visualize_output() export_data( export, os.path.dirname(os.path.abspath(__file__)), "anchor", df, )
tests/test_feature_conversion.py
vishalbelsare/superintendent
157
12616173
<filename>tests/test_feature_conversion.py<gh_stars>100-1000 import pytest # noqa import numpy as np import pandas as pd from hypothesis import given, settings from hypothesis.strategies import ( booleans, floats, integers, lists, tuples, one_of, sampled_from, text, composite, ) from hypothesis.extra.pandas import data_frames, column, range_indexes from hypothesis.extra.numpy import ( arrays, scalar_dtypes, unsigned_integer_dtypes, datetime64_dtypes, floating_dtypes, integer_dtypes, ) from hypothesis import HealthCheck from superintendent.queueing.utils import _features_to_array guaranteed_dtypes = one_of( scalar_dtypes(), unsigned_integer_dtypes(), datetime64_dtypes(), floating_dtypes(), integer_dtypes(), ) @composite def dataframe(draw): n_cols = draw(integers(min_value=1, max_value=20)) dtypes = draw( lists( sampled_from([float, int, str]), min_size=n_cols, max_size=n_cols ) ) colnames = draw( lists( text() | integers(), min_size=n_cols, max_size=n_cols, unique=True ) ) return draw( data_frames( columns=[ column(name=name, dtype=dtype) for dtype, name in zip(dtypes, colnames) ], index=range_indexes(min_size=1), ) ) def exact_element_match(a, b): if isinstance(a, np.ndarray) and isinstance(b, np.ndarray): try: return ((a == b) | (np.isnan(a) & np.isnan(b))).all() except TypeError: return (a == b).all() elif isinstance(a, pd.DataFrame) and isinstance(b, pd.DataFrame): a = a.reset_index(drop=True) b = b.reset_index(drop=True) return ( ((a == b) | (a.isnull() & b.isnull())).all().all() or a.empty or b.empty ) else: return all( [ a_ == b_ or (np.isnan(a_) and np.isnan(b_)) for a_, b_ in zip(a, b) ] ) @given(inp=lists(floats() | integers() | text() | booleans())) def test_list_round_trip(inp): assert exact_element_match(inp, _features_to_array(inp)) @given( inp=arrays( guaranteed_dtypes, tuples( integers(min_value=1, max_value=50), integers(min_value=1, max_value=50), ), ) ) @settings(suppress_health_check=(HealthCheck.too_slow,)) def test_array_round_trip(inp): inp_list = list(inp) assert exact_element_match(inp, _features_to_array(inp_list)) @given(inp=dataframe()) @settings(suppress_health_check=(HealthCheck.too_slow,)) def test_df_round_trip(inp): inp_list = [row for _, row in inp.iterrows()] if not inp.empty: assert exact_element_match(inp, _features_to_array(inp_list)) @given(inp=dataframe()) @settings(suppress_health_check=(HealthCheck.too_slow,)) def test_dfs_round_trip(inp): inp_list = [row.to_frame().T for _, row in inp.iterrows()] if not inp.empty: assert exact_element_match(inp, _features_to_array(inp_list))
slack_bolt/middleware/url_verification/async_url_verification.py
hirosassa/bolt-python
504
12616218
<filename>slack_bolt/middleware/url_verification/async_url_verification.py<gh_stars>100-1000 from typing import Callable, Awaitable from slack_bolt.logger import get_bolt_logger from .url_verification import UrlVerification from slack_bolt.middleware.async_middleware import AsyncMiddleware from slack_bolt.request.async_request import AsyncBoltRequest from slack_bolt.response import BoltResponse class AsyncUrlVerification(UrlVerification, AsyncMiddleware): def __init__(self): self.logger = get_bolt_logger(AsyncUrlVerification) async def async_process( self, *, req: AsyncBoltRequest, resp: BoltResponse, next: Callable[[], Awaitable[BoltResponse]], ) -> BoltResponse: if self._is_url_verification_request(req.body): return self._build_success_response(req.body) else: return await next()
tests/onnx_test_runner.py
kendryte/nncase
510
12616226
from onnx import version_converter, helper import onnxsim import onnxruntime as ort import onnx import torch import shutil import os import numpy as np from test_runner import * class OnnxTestRunner(TestRunner): def __init__(self, case_name, targets=None, overwrite_configs: dict = None): super().__init__(case_name, targets, overwrite_configs) self.model_type = "onnx" def from_torch(self, module, in_shape, opset_version=11): # export model dummy_input = torch.randn(*in_shape) model_file = os.path.join(self.case_dir, 'test.onnx') torch.onnx.export(module, dummy_input, model_file, operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK, opset_version=opset_version) return model_file def from_onnx_helper(self, model_def): try: onnx.checker.check_model(model_def) except onnx.checker.ValidationError as e: print('The model is invalid: %s' % e) else: print('The model is valid!') model_file = os.path.join(self.case_dir, 'test.onnx') onnx.save(model_def, model_file) return model_file def run(self, model_file): if model_file.startswith('examples'): model_file = os.path.join(os.path.dirname(__file__), '..', model_file) elif model_file.startswith('onnx-models'): model_file = os.path.join(os.getenv('ONNX_MODELS_DIR'), model_file[len('onnx-models/'):]) if self.case_dir != os.path.dirname(model_file): new_file = os.path.join(self.case_dir, 'test.onnx') shutil.copy(model_file, new_file) model_file = new_file if not self.inputs: self.parse_model_input_output(model_file) # preprocess model old_onnx_model = onnx.load(model_file) onnx_model = self.preprocess_model(old_onnx_model) onnx_model = onnx_model or self.preprocess_model( old_onnx_model, convert_version=False) onnx_model = onnx_model or self.preprocess_model( old_onnx_model, simplify=False) onnx_model = onnx_model or self.preprocess_model( old_onnx_model, convert_version=False, simplify=False) onnx_model = onnx_model or self.preprocess_model( old_onnx_model, fix_bn=False, convert_version=False, simplify=False) model_file = os.path.join( os.path.dirname(model_file), 'simplified.onnx') onnx.save_model(onnx_model, model_file) super().run(model_file) def preprocess_model(self, onnx_model, fix_bn=True, convert_version=True, simplify=True, import_test=True): args = {'fix_bn': fix_bn, 'convert_version': convert_version, 'simplify': simplify, 'import_test': import_test} try: if fix_bn: # fix https://github.com/onnx/models/issues/242 for node in onnx_model.graph.node: if(node.op_type == "BatchNormalization"): for attr in node.attribute: if (attr.name == "spatial"): attr.i = 1 if convert_version: curret_version = onnx_model.opset_import[0].version for i in range(curret_version, 8): onnx_model = version_converter.convert_version( onnx_model, i + 1) if simplify: onnx_model = onnx.shape_inference.infer_shapes(onnx_model) input_shapes = {} for input in self.inputs: input_shapes[input['name']] = input['shape'] onnx_model, check = onnxsim.simplify(onnx_model, input_shapes=input_shapes) assert check, "Simplified ONNX model could not be validated" print('[info]: preprocess ONNX model success: ', args) return onnx_model except Exception as e: print('[info]: preprocess ONNX model failed: ', args) print(e) # traceback.print_exc() return None def parse_model_input_output(self, model_file: str): onnx_model = onnx.load(model_file) input_all = [node.name for node in onnx_model.graph.input] input_initializer = [node.name for node in onnx_model.graph.initializer] input_names = list(set(input_all) - set(input_initializer)) input_tensors = [node for node in onnx_model.graph.input if node.name in input_names] # input for _, e in enumerate(input_tensors): onnx_type = e.type.tensor_type input_dict = {} input_dict['name'] = e.name input_dict['dtype'] = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[onnx_type.elem_type] input_dict['shape'] = [(i.dim_value if i.dim_value != 0 else d) for i, d in zip( onnx_type.shape.dim, [1, 3, 224, 224])] input_dict['model_shape'] = [(i.dim_value if i.dim_value != 0 else d) for i, d in zip( onnx_type.shape.dim, [1, 3, 224, 224])] self.inputs.append(input_dict) self.calibs.append(copy.deepcopy(input_dict)) # output for e in onnx_model.graph.output: output_dict = {} onnx_type = e.type.tensor_type output_dict['name'] = e.name output_dict['dtype'] = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[onnx_type.elem_type] output_dict['model_shape'] = [i.dim_value for i in onnx_type.shape.dim] print(output_dict) self.outputs.append(output_dict) def cpu_infer(self, case_dir: str, model_file: bytes, type: str): # create session try: print('[onnx]: using simplified model') sess = ort.InferenceSession(model_file) except Exception as e: print(e) try: print('[onnx]: using origin model') model_file = os.path.join(case_dir, 'test.onnx') sess = ort.InferenceSession(model_file) except Exception as e: print(e) print('[onnx]: using converted model') onnx_model = onnx.load(model_file) onnx_model = version_converter.convert_version(onnx_model, 8) model_file = os.path.join(case_dir, 'converted.onnx') onnx.save_model(onnx_model, model_file) sess = ort.InferenceSession(model_file) input_dict = {} for input in self.inputs: input_dict[input['name']] = self.transform_input( self.data_pre_process(input['data']), "float32", "CPU") outputs = sess.run(None, input_dict) i = 0 for output in outputs: bin_file = os.path.join(case_dir, f'cpu_result_{i}.bin') text_file = os.path.join(case_dir, f'cpu_result_{i}.txt') self.output_paths.append((bin_file, text_file)) output.tofile(bin_file) self.totxtfile(text_file, output) i += 1 def import_model(self, compiler, model_content, import_options): compiler.import_onnx(model_content, import_options)
demos/kitchen_sink/libs/baseclass/dialog_change_theme.py
Jonypr-code/KivyMD
1,111
12616241
import os from kivy.properties import StringProperty from kivy.uix.modalview import ModalView from kivy.utils import get_color_from_hex, get_hex_from_color from kivymd.color_definitions import colors, palette from kivymd.theming import ThemableBehavior class KitchenSinkBaseDialog(ThemableBehavior, ModalView): pass class KitchenSinkDialogDev(KitchenSinkBaseDialog): pass class KitchenSinkUsageCode(KitchenSinkBaseDialog): code = StringProperty() title = StringProperty() website = StringProperty() class KitchenSinkDialogLicense(KitchenSinkBaseDialog): def on_open(self): with open( os.path.join(os.environ["KITCHEN_SINK_ROOT"], "LICENSE"), encoding="utf-8", ) as license: self.ids.text_label.text = license.read().format( COLOR=get_hex_from_color(self.theme_cls.primary_color) ) class KitchenSinkDialogChangeTheme(KitchenSinkBaseDialog): def set_list_colors_themes(self): for name_theme in palette: self.ids.rv.data.append( { "viewclass": "KitchenSinkOneLineLeftWidgetItem", "color": get_color_from_hex(colors[name_theme]["500"]), "text": name_theme, } )
testing/kfctl/kfctl_delete_test.py
fyuan1316/kubeflow
2,527
12616264
<gh_stars>1000+ """Run kfctl delete as a pytest. We use this in order to generate a junit_xml file. """ import datetime import logging import os import subprocess import tempfile import uuid from retrying import retry import pytest from kubeflow.testing import util from googleapiclient import discovery from oauth2client.client import GoogleCredentials # TODO(gabrielwen): Move this to a separate test "kfctl_go_check_post_delete" def get_endpoints_list(project): cred = GoogleCredentials.get_application_default() services_mgt = discovery.build('servicemanagement', 'v1', credentials=cred) services = services_mgt.services() next_page_token = None endpoints = [] while True: results = services.list(producerProjectId=project, pageToken=next_page_token).execute() for s in results.get("services", {}): name = s.get("serviceName", "") endpoints.append(name) if not "nextPageToken" in results: break next_page_token = results["nextPageToken"] return endpoints def test_kfctl_delete(kfctl_path, app_path, project): if not kfctl_path: raise ValueError("kfctl_path is required") if not app_path: raise ValueError("app_path is required") logging.info("Using kfctl path %s", kfctl_path) logging.info("Using app path %s", app_path) util.run([kfctl_path, "delete", "all", "--delete_storage", "-V"], cwd=app_path) # Use services.list instead of services.get because error returned is not # 404, it's 403 which is confusing. name = os.path.basename(app_path) endpoint_name = "{deployment}.endpoints.{project}.cloud.goog".format( deployment=name, project=project) logging.info("Verify endpoint service is deleted: " + endpoint_name) if endpoint_name in get_endpoints_list(project): msg = "Endpoint is not deleted: " + endpoint_name logging.error(msg) raise AssertionError(msg) else: logging.info("Verified endpoint service is deleted.") if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format=('%(levelname)s|%(asctime)s' '|%(pathname)s|%(lineno)d| %(message)s'), datefmt='%Y-%m-%dT%H:%M:%S', ) logging.getLogger().setLevel(logging.INFO) pytest.main()
examples/advanced/skeletonize.py
hadivafaii/vedo
836
12616291
<reponame>hadivafaii/vedo<filename>examples/advanced/skeletonize.py """Using 1D Moving Least Squares to skeletonize a surface""" print(__doc__) from vedo import * N = 9 # nr of iterations f = 0.2 # fraction of neighbours pts = Mesh(dataurl+"man.vtk").clean(tol=0.02).points() pc = Points(pts) for i in range(N): pc = pc.clone().smoothMLS1D(f=f).color(i) show(pc, at=i, N=N, elevation=-5) interactive().close()
docs/sphinx_keras2onnx_extension.py
TomWildenhain-Microsoft/keras-onnx
362
12616295
# SPDX-License-Identifier: Apache-2.0 """ Extension for sphinx. """ import sphinx from docutils import nodes from docutils.parsers.rst import Directive import keras2onnx import onnxruntime def kerasonnx_version_role(role, rawtext, text, lineno, inliner, options=None, content=None): """ Defines custom role *keras2onnx-version* which returns *keras2onnx* version. """ if options is None: options = {} if content is None: content = [] if text == 'v': version = 'v' + keras2onnx.__version__ elif text == 'rt': version = 'v' + onnxruntime.__version__ else: raise RuntimeError("keras2onnx_version_role cannot interpret content '{0}'.".format(text)) node = nodes.Text(version) return [node], [] def setup(app): # Placeholder to initialize the folder before # generating the documentation. app.add_role('keras2onnxversion', kerasonnx_version_role) return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
threat_intel/alexaranking.py
Yelp/threat_intel
264
12616296
# -*- coding: utf-8 -*- # # AlexaRankingsAPI makes calls to the Alexa Ranking API # from threat_intel.util.api_cache import ApiCache from threat_intel.util.http import MultiRequest import xml.etree.ElementTree as ET from xml.etree.ElementTree import ParseError class AlexaRankingApi(object): BASE_URL = u'https://data.alexa.com/data?cli=10' def __init__(self, resources_per_req=10, cache_file_name=None, update_cache=True, req_timeout=None): """Establishes basic HTTP params and loads a cache. Args: resources_per_req: Maximum number of resources (hashes, URLs) to be send in a single request cache_file_name: String file name of cache. update_cache: Determines whether cache should be written out back to the disk when closing it. Default is `True`. req_timeout: Maximum number of seconds to wait without reading a response byte before deciding an error has occurred. Default is None. """ self._resources_per_req = resources_per_req self._requests = MultiRequest(req_timeout=req_timeout) # Create an ApiCache if instructed to self._cache = ApiCache(cache_file_name, update_cache) if cache_file_name else None @MultiRequest.error_handling def get_alexa_rankings(self, domains): """Retrieves the most recent VT info for a set of domains. Args: domains: list of string domains. Returns: A dict with the domain as key and the VT report as value. """ api_name = 'alexa_rankings' (all_responses, domains) = self._bulk_cache_lookup(api_name, domains) responses = self._request_reports(domains) for domain, response in zip(domains, responses): xml_response = self._extract_response_xml(domain, response) if self._cache: self._cache.cache_value(api_name, domain, response) all_responses[domain] = xml_response return all_responses def _request_reports(self, domains): """Sends multiples requests for the resources to a particular endpoint. Args: resource_param_name: a string name of the resource parameter. resources: list of of the resources. endpoint_name: AlexaRankingApi endpoint URL suffix. Returns: A list of the responses. """ params = [{'url': domain} for domain in domains] responses = self._requests.multi_get( self.BASE_URL, query_params=params, to_json=False) return responses def _extract_response_xml(self, domain, response): """Extract XML content of an HTTP response into dictionary format. Args: response: HTML Response objects Returns: A dictionary: {alexa-ranking key : alexa-ranking value}. """ attributes = {} alexa_keys = {'POPULARITY': 'TEXT', 'REACH': 'RANK', 'RANK': 'DELTA'} try: xml_root = ET.fromstring(response._content) for xml_child in xml_root.findall('SD//'): if xml_child.tag in alexa_keys and \ alexa_keys[xml_child.tag] in xml_child.attrib: attributes[xml_child.tag.lower( )] = xml_child.attrib[alexa_keys[xml_child.tag]] except ParseError: # Skip ill-formatted XML and return no Alexa attributes pass attributes['domain'] = domain return {'attributes': attributes} def _bulk_cache_lookup(self, api_name, keys): """Performes a bulk cache lookup and returns a tuple with the results found and the keys missing in the cache. If cached is not configured it will return an empty dictionary of found results and the initial list of keys. Args: api_name: a string name of the API. keys: an enumerable of string keys. Returns: A tuple: (responses found, missing keys). """ if self._cache: responses = self._cache.bulk_lookup(api_name, keys) missing_keys = [key for key in keys if key not in responses.keys()] return (responses, missing_keys) return ({}, keys)
src/tests/test_eeil.py
francesco-p/FACIL
243
12616306
from tests import run_main_and_assert FAST_LOCAL_TEST_ARGS = "--exp-name local_test --datasets mnist" \ " --network LeNet --num-tasks 3 --seed 1 --batch-size 32" \ " --nepochs 3" \ " --num-workers 0" \ " --approach eeil" def test_eeil_exemplars_with_noise_grad(): args_line = FAST_LOCAL_TEST_ARGS args_line += " --num-exemplars 200" args_line += " --nepochs-finetuning 1" args_line += " --noise-grad" run_main_and_assert(args_line) def test_eeil_exemplars(): args_line = FAST_LOCAL_TEST_ARGS args_line += " --num-exemplars 200" args_line += " --nepochs-finetuning 1" run_main_and_assert(args_line) def test_eeil_with_warmup(): args_line = FAST_LOCAL_TEST_ARGS args_line += " --warmup-nepochs 5" args_line += " --warmup-lr-factor 0.5" args_line += " --num-exemplars 200" args_line += " --nepochs-finetuning 1" run_main_and_assert(args_line)
nltk/corpus/reader/plaintext.py
Bharat123rox/nltk
9,747
12616315
# Natural Language Toolkit: Plaintext Corpus Reader # # Copyright (C) 2001-2021 NLTK Project # Author: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # URL: <https://www.nltk.org/> # For license information, see LICENSE.TXT """ A reader for corpora that consist of plaintext documents. """ import nltk.data from nltk.corpus.reader.api import * from nltk.corpus.reader.util import * from nltk.tokenize import * class PlaintextCorpusReader(CorpusReader): """ Reader for corpora that consist of plaintext documents. Paragraphs are assumed to be split using blank lines. Sentences and words can be tokenized using the default tokenizers, or by custom tokenizers specified as parameters to the constructor. This corpus reader can be customized (e.g., to skip preface sections of specific document formats) by creating a subclass and overriding the ``CorpusView`` class variable. """ CorpusView = StreamBackedCorpusView """The corpus view class used by this reader. Subclasses of ``PlaintextCorpusReader`` may specify alternative corpus view classes (e.g., to skip the preface sections of documents.)""" def __init__( self, root, fileids, word_tokenizer=WordPunctTokenizer(), sent_tokenizer=nltk.data.LazyLoader("tokenizers/punkt/english.pickle"), para_block_reader=read_blankline_block, encoding="utf8", ): r""" Construct a new plaintext corpus reader for a set of documents located at the given root directory. Example usage: >>> root = '/usr/local/share/nltk_data/corpora/webtext/' >>> reader = PlaintextCorpusReader(root, '.*\.txt') # doctest: +SKIP :param root: The root directory for this corpus. :param fileids: A list or regexp specifying the fileids in this corpus. :param word_tokenizer: Tokenizer for breaking sentences or paragraphs into words. :param sent_tokenizer: Tokenizer for breaking paragraphs into words. :param para_block_reader: The block reader used to divide the corpus into paragraph blocks. """ CorpusReader.__init__(self, root, fileids, encoding) self._word_tokenizer = word_tokenizer self._sent_tokenizer = sent_tokenizer self._para_block_reader = para_block_reader def words(self, fileids=None): """ :return: the given file(s) as a list of words and punctuation symbols. :rtype: list(str) """ return concat( [ self.CorpusView(path, self._read_word_block, encoding=enc) for (path, enc, fileid) in self.abspaths(fileids, True, True) ] ) def sents(self, fileids=None): """ :return: the given file(s) as a list of sentences or utterances, each encoded as a list of word strings. :rtype: list(list(str)) """ if self._sent_tokenizer is None: raise ValueError("No sentence tokenizer for this corpus") return concat( [ self.CorpusView(path, self._read_sent_block, encoding=enc) for (path, enc, fileid) in self.abspaths(fileids, True, True) ] ) def paras(self, fileids=None): """ :return: the given file(s) as a list of paragraphs, each encoded as a list of sentences, which are in turn encoded as lists of word strings. :rtype: list(list(list(str))) """ if self._sent_tokenizer is None: raise ValueError("No sentence tokenizer for this corpus") return concat( [ self.CorpusView(path, self._read_para_block, encoding=enc) for (path, enc, fileid) in self.abspaths(fileids, True, True) ] ) def _read_word_block(self, stream): words = [] for i in range(20): # Read 20 lines at a time. words.extend(self._word_tokenizer.tokenize(stream.readline())) return words def _read_sent_block(self, stream): sents = [] for para in self._para_block_reader(stream): sents.extend( [ self._word_tokenizer.tokenize(sent) for sent in self._sent_tokenizer.tokenize(para) ] ) return sents def _read_para_block(self, stream): paras = [] for para in self._para_block_reader(stream): paras.append( [ self._word_tokenizer.tokenize(sent) for sent in self._sent_tokenizer.tokenize(para) ] ) return paras class CategorizedPlaintextCorpusReader(CategorizedCorpusReader, PlaintextCorpusReader): """ A reader for plaintext corpora whose documents are divided into categories based on their file identifiers. """ def __init__(self, *args, **kwargs): """ Initialize the corpus reader. Categorization arguments (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to the ``CategorizedCorpusReader`` constructor. The remaining arguments are passed to the ``PlaintextCorpusReader`` constructor. """ CategorizedCorpusReader.__init__(self, kwargs) PlaintextCorpusReader.__init__(self, *args, **kwargs) # FIXME: Is there a better way? How to not hardcode this? # Possibly, add a language kwargs to CategorizedPlaintextCorpusReader to # override the `sent_tokenizer`. class PortugueseCategorizedPlaintextCorpusReader(CategorizedPlaintextCorpusReader): def __init__(self, *args, **kwargs): CategorizedCorpusReader.__init__(self, kwargs) kwargs["sent_tokenizer"] = nltk.data.LazyLoader( "tokenizers/punkt/portuguese.pickle" ) PlaintextCorpusReader.__init__(self, *args, **kwargs) class EuroparlCorpusReader(PlaintextCorpusReader): """ Reader for Europarl corpora that consist of plaintext documents. Documents are divided into chapters instead of paragraphs as for regular plaintext documents. Chapters are separated using blank lines. Everything is inherited from ``PlaintextCorpusReader`` except that: - Since the corpus is pre-processed and pre-tokenized, the word tokenizer should just split the line at whitespaces. - For the same reason, the sentence tokenizer should just split the paragraph at line breaks. - There is a new 'chapters()' method that returns chapters instead instead of paragraphs. - The 'paras()' method inherited from PlaintextCorpusReader is made non-functional to remove any confusion between chapters and paragraphs for Europarl. """ def _read_word_block(self, stream): words = [] for i in range(20): # Read 20 lines at a time. words.extend(stream.readline().split()) return words def _read_sent_block(self, stream): sents = [] for para in self._para_block_reader(stream): sents.extend([sent.split() for sent in para.splitlines()]) return sents def _read_para_block(self, stream): paras = [] for para in self._para_block_reader(stream): paras.append([sent.split() for sent in para.splitlines()]) return paras def chapters(self, fileids=None): """ :return: the given file(s) as a list of chapters, each encoded as a list of sentences, which are in turn encoded as lists of word strings. :rtype: list(list(list(str))) """ return concat( [ self.CorpusView(fileid, self._read_para_block, encoding=enc) for (fileid, enc) in self.abspaths(fileids, True) ] ) def paras(self, fileids=None): raise NotImplementedError( "The Europarl corpus reader does not support paragraphs. Please use chapters() instead." )
scripts/bed_merge_overlapping.py
lldelisle/bx-python
122
12616326
#!/usr/bin/env python """ Merge any overlapping regions of bed files. Bed files can be provided on the command line or on stdin. Merged regions are always reported on the '+' strand, and any fields beyond chrom/start/stop are lost. usage: %prog bed files ... """ import fileinput import sys from bx.bitset_builders import binned_bitsets_from_bed_file bed_filenames = sys.argv[1:] if bed_filenames: input = fileinput.input(bed_filenames) else: input = sys.stdin bitsets = binned_bitsets_from_bed_file(input) for chrom in bitsets: bits = bitsets[chrom] end = 0 while True: start = bits.next_set(end) if start == bits.size: break end = bits.next_clear(start) print("%s\t%d\t%d" % (chrom, start, end))
vimfiles/bundle/vim-python/submodules/rope/ropetest/contrib/finderrorstest.py
ciskoinch8/vimrc
463
12616327
try: import unittest2 as unittest except ImportError: import unittest from rope.contrib import finderrors from ropetest import testutils class FindErrorsTest(unittest.TestCase): def setUp(self): super(FindErrorsTest, self).setUp() self.project = testutils.sample_project() self.mod = self.project.root.create_file('mod.py') def tearDown(self): testutils.remove_project(self.project) super(FindErrorsTest, self).tearDown() def test_unresolved_variables(self): self.mod.write('print(var)\n') result = finderrors.find_errors(self.project, self.mod) self.assertEqual(1, len(result)) self.assertEqual(1, result[0].lineno) def test_defined_later(self): self.mod.write('print(var)\nvar = 1\n') result = finderrors.find_errors(self.project, self.mod) self.assertEqual(1, len(result)) self.assertEqual(1, result[0].lineno) def test_ignoring_builtins(self): self.mod.write('range(2)\n') result = finderrors.find_errors(self.project, self.mod) self.assertEqual(0, len(result)) def test_ignoring_none(self): self.mod.write('var = None\n') result = finderrors.find_errors(self.project, self.mod) self.assertEqual(0, len(result)) def test_bad_attributes(self): code = 'class C(object):\n' \ ' pass\n' \ 'c = C()\n' \ 'print(c.var)\n' self.mod.write(code) result = finderrors.find_errors(self.project, self.mod) self.assertEqual(1, len(result)) self.assertEqual(4, result[0].lineno) if __name__ == '__main__': unittest.main()
dace/transformation/interstate/multistate_inline.py
jnice-81/dace
227
12616330
<gh_stars>100-1000 # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved. """ Inline multi-state SDFGs. """ import ast from collections import defaultdict from copy import deepcopy as dc from dace.frontend.python.ndloop import ndrange import itertools import networkx as nx from typing import Callable, Dict, Iterable, List, Set, Optional, Tuple, Union import warnings from dace import memlet, registry, sdfg as sd, Memlet, symbolic, dtypes, subsets from dace.frontend.python import astutils from dace.sdfg import nodes, propagation from dace.sdfg.graph import MultiConnectorEdge, SubgraphView from dace.sdfg import SDFG, SDFGState from dace.sdfg import utils as sdutil, infer_types, propagation from dace.transformation import transformation, helpers from dace.properties import make_properties, Property from dace import data @registry.autoregister_params(singlestate=True, strict=False) @make_properties class InlineMultistateSDFG(transformation.Transformation): """ Inlines a multi-state nested SDFG into a top-level SDFG. This only happens if the state has the nested SDFG node isolated (i.e., only containing it and input/output access nodes), and thus the state machines can be combined. """ nested_sdfg = transformation.PatternNode(nodes.NestedSDFG) @staticmethod def annotates_memlets(): return True @staticmethod def expressions(): return [sdutil.node_path_graph(InlineMultistateSDFG.nested_sdfg)] @staticmethod def _check_strides(inner_strides: List[symbolic.SymbolicType], outer_strides: List[symbolic.SymbolicType], memlet: Memlet, nested_sdfg: nodes.NestedSDFG) -> bool: """ Returns True if the strides of the inner array can be matched to the strides of the outer array upon inlining. Takes into consideration memlet (un)squeeze and nested SDFG symbol mapping. :param inner_strides: The strides of the array inside the nested SDFG. :param outer_strides: The strides of the array in the external SDFG. :param nested_sdfg: Nested SDFG node with symbol mapping. :return: True if all strides match, False otherwise. """ # Replace all inner symbols based on symbol mapping istrides = list(inner_strides) def replfunc(mapping): for i, s in enumerate(istrides): if symbolic.issymbolic(s): istrides[i] = s.subs(mapping) symbolic.safe_replace(nested_sdfg.symbol_mapping, replfunc) if istrides == list(outer_strides): return True # Take unsqueezing into account dims_to_ignore = [ i for i, s in enumerate(memlet.subset.size()) if s == 1 ] ostrides = [ os for i, os in enumerate(outer_strides) if i not in dims_to_ignore ] if len(ostrides) == 0: ostrides = [1] if len(ostrides) != len(istrides): return False return all(istr == ostr for istr, ostr in zip(istrides, ostrides)) def can_be_applied(self, state: SDFGState, candidate, expr_index, sdfg, strict=False): nested_sdfg = self.nested_sdfg(sdfg) if nested_sdfg.no_inline: return False # Ensure the state only contains a nested SDFG and input/output access # nodes for node in state.nodes(): if isinstance(node, nodes.NestedSDFG): if node is not nested_sdfg: return False elif isinstance(node, nodes.AccessNode): # Must be connected to nested SDFG # if nested_sdfg in state.predecessors(nested_sdfg): # if state.in_degree(node) > 0: # return False found = False for e in state.out_edges(node): if e.dst is not nested_sdfg: return False if state.in_degree(node) > 0: return False # Only accept full ranges for now. TODO(later): Improve if e.data.subset != subsets.Range.from_array( sdfg.arrays[node.data]): return False # Do not accept views. TODO(later): Improve outer_desc = sdfg.arrays[node.data] inner_desc = nested_sdfg.sdfg.arrays[e.dst_conn] if (outer_desc.shape != inner_desc.shape or outer_desc.strides != inner_desc.strides): return False found = True for e in state.in_edges(node): if e.src is not nested_sdfg: return False if state.out_degree(node) > 0: return False # Only accept full ranges for now. TODO(later): Improve if e.data.subset != subsets.Range.from_array( sdfg.arrays[node.data]): return False # Do not accept views. TODO(later): Improve outer_desc = sdfg.arrays[node.data] inner_desc = nested_sdfg.sdfg.arrays[e.src_conn] if (outer_desc.shape != inner_desc.shape or outer_desc.strides != inner_desc.strides): return False found = True # elif nested_sdfg in state.successors(nested_sdfg): # if state.out_degree(node) > 0: # return False if not found: return False else: return False return True @staticmethod def match_to_str(graph, candidate): return graph.label def apply(self, sdfg: SDFG): outer_state: SDFGState = sdfg.nodes()[self.state_id] nsdfg_node = self.nested_sdfg(sdfg) nsdfg: SDFG = nsdfg_node.sdfg if nsdfg_node.schedule is not dtypes.ScheduleType.Default: infer_types.set_default_schedule_and_storage_types( nsdfg, nsdfg_node.schedule) ####################################################### # Collect and update top-level SDFG metadata # Global/init/exit code for loc, code in nsdfg.global_code.items(): sdfg.append_global_code(code.code, loc) for loc, code in nsdfg.init_code.items(): sdfg.append_init_code(code.code, loc) for loc, code in nsdfg.exit_code.items(): sdfg.append_exit_code(code.code, loc) # Environments for nstate in nsdfg.nodes(): for node in nstate.nodes(): if isinstance(node, nodes.CodeNode): node.environments |= nsdfg_node.environments # Constants for cstname, cstval in nsdfg.constants.items(): if cstname in sdfg.constants: if cstval != sdfg.constants[cstname]: warnings.warn('Constant value mismatch for "%s" while ' 'inlining SDFG. Inner = %s != %s = outer' % (cstname, cstval, sdfg.constants[cstname])) else: sdfg.add_constant(cstname, cstval) # Find original source/destination edges (there is only one edge per # connector, according to match) inputs: Dict[str, MultiConnectorEdge] = {} outputs: Dict[str, MultiConnectorEdge] = {} input_set: Dict[str, str] = {} output_set: Dict[str, str] = {} for e in outer_state.in_edges(nsdfg_node): inputs[e.dst_conn] = e input_set[e.data.data] = e.dst_conn for e in outer_state.out_edges(nsdfg_node): outputs[e.src_conn] = e output_set[e.data.data] = e.src_conn # Replace symbols using invocation symbol mapping # Two-step replacement (N -> __dacesym_N --> map[N]) to avoid clashes symbolic.safe_replace(nsdfg_node.symbol_mapping, nsdfg.replace_dict) # Access nodes that need to be reshaped # reshapes: Set(str) = set() # for aname, array in nsdfg.arrays.items(): # if array.transient: # continue # edge = None # if aname in inputs: # edge = inputs[aname] # if len(array.shape) > len(edge.data.subset): # reshapes.add(aname) # continue # if aname in outputs: # edge = outputs[aname] # if len(array.shape) > len(edge.data.subset): # reshapes.add(aname) # continue # if edge is not None and not InlineMultistateSDFG._check_strides( # array.strides, sdfg.arrays[edge.data.data].strides, # edge.data, nsdfg_node): # reshapes.add(aname) # Mapping from nested transient name to top-level name transients: Dict[str, str] = {} # All transients become transients of the parent (if data already # exists, find new name) for nstate in nsdfg.nodes(): for node in nstate.nodes(): if isinstance(node, nodes.AccessNode): datadesc = nsdfg.arrays[node.data] if node.data not in transients and datadesc.transient: new_name = node.data if (new_name in sdfg.arrays or new_name in sdfg.symbols or new_name in sdfg.constants): new_name = f'{nsdfg.label}_{node.data}' name = sdfg.add_datadesc(new_name, datadesc, find_new_name=True) transients[node.data] = name # All transients of edges between code nodes are also added to parent for edge in nstate.edges(): if (isinstance(edge.src, nodes.CodeNode) and isinstance(edge.dst, nodes.CodeNode)): if edge.data.data is not None: datadesc = nsdfg.arrays[edge.data.data] if edge.data.data not in transients and datadesc.transient: new_name = edge.data.data if (new_name in sdfg.arrays or new_name in sdfg.symbols or new_name in sdfg.constants): new_name = f'{nsdfg.label}_{edge.data.data}' name = sdfg.add_datadesc(new_name, datadesc, find_new_name=True) transients[edge.data.data] = name ####################################################### # Replace data on inlined SDFG nodes/edges # Replace data names with their top-level counterparts repldict = {} repldict.update(transients) repldict.update({ k: v.data.data for k, v in itertools.chain(inputs.items(), outputs.items()) }) symbolic.safe_replace(repldict, nsdfg.replace_dict, value_as_string=True) # Add views whenever reshapes are necessary # for dname in reshapes: # desc = nsdfg.arrays[dname] # # To avoid potential confusion, rename protected __return keyword # if dname.startswith('__return'): # newname = f'{nsdfg.name}_ret{dname[8:]}' # else: # newname = dname # newname, _ = sdfg.add_view(newname, # desc.shape, # desc.dtype, # storage=desc.storage, # strides=desc.strides, # offset=desc.offset, # debuginfo=desc.debuginfo, # allow_conflicts=desc.allow_conflicts, # total_size=desc.total_size, # alignment=desc.alignment, # may_alias=desc.may_alias, # find_new_name=True) # repldict[dname] = newname # Add extra access nodes for out/in view nodes # inv_reshapes = {repldict[r]: r for r in reshapes} # for nstate in nsdfg.nodes(): # for node in nstate.nodes(): # if isinstance(node, # nodes.AccessNode) and node.data in inv_reshapes: # if nstate.in_degree(node) > 0 and nstate.out_degree( # node) > 0: # # Such a node has to be in the output set # edge = outputs[inv_reshapes[node.data]] # # Redirect outgoing edges through access node # out_edges = list(nstate.out_edges(node)) # anode = nstate.add_access(edge.data.data) # vnode = nstate.add_access(node.data) # nstate.add_nedge(node, anode, edge.data) # nstate.add_nedge(anode, vnode, edge.data) # for e in out_edges: # nstate.remove_edge(e) # nstate.add_edge(vnode, e.src_conn, e.dst, # e.dst_conn, e.data) # Make unique names for states statenames = set(s.label for s in sdfg.nodes()) for nstate in nsdfg.nodes(): if nstate.label in statenames: newname = data.find_new_name(nstate.label, statenames) statenames.add(newname) nstate.set_label(newname) ####################################################### # Collect and modify interstate edges as necessary outer_assignments = set() for e in sdfg.edges(): outer_assignments |= e.data.assignments.keys() inner_assignments = set() for e in nsdfg.edges(): inner_assignments |= e.data.assignments.keys() assignments_to_replace = inner_assignments & outer_assignments sym_replacements: Dict[str, str] = {} allnames = set(sdfg.symbols.keys()) | set(sdfg.arrays.keys()) for assign in assignments_to_replace: newname = data.find_new_name(assign, allnames) allnames.add(newname) sym_replacements[assign] = newname nsdfg.replace_dict(sym_replacements) ####################################################### # Add nested SDFG states into top-level SDFG sdfg.add_nodes_from(nsdfg.nodes()) for ise in nsdfg.edges(): sdfg.add_edge(ise.src, ise.dst, ise.data) ####################################################### # Reconnect inlined SDFG source = nsdfg.start_state sinks = nsdfg.sink_nodes() # Reconnect state machine for e in sdfg.in_edges(outer_state): sdfg.add_edge(e.src, source, e.data) for e in sdfg.out_edges(outer_state): for sink in sinks: sdfg.add_edge(sink, e.dst, e.data) # Modify start state as necessary if sdfg.start_state is outer_state: sdfg.start_state = sdfg.node_id(source) # TODO: Modify memlets by offsetting # If both source and sink nodes are inputs/outputs, reconnect once # edges_to_ignore = self._modify_access_to_access(new_incoming_edges, # nsdfg, nstate, state, # orig_data) # source_to_outer = {n: e.src for n, e in new_incoming_edges.items()} # sink_to_outer = {n: e.dst for n, e in new_outgoing_edges.items()} # # If a source/sink node is one of the inputs/outputs, reconnect it, # # replacing memlets in outgoing/incoming paths # modified_edges = set() # modified_edges |= self._modify_memlet_path(new_incoming_edges, nstate, # state, sink_to_outer, True, # edges_to_ignore) # modified_edges |= self._modify_memlet_path(new_outgoing_edges, nstate, # state, source_to_outer, # False, edges_to_ignore) # # Reshape: add connections to viewed data # self._modify_reshape_data(reshapes, repldict, inputs, nstate, state, # True) # self._modify_reshape_data(reshapes, repldict, outputs, nstate, state, # False) # Modify all other internal edges pertaining to input/output nodes # for nstate in nsdfg.nodes(): # for node in nstate.nodes(): # if isinstance(node, nodes.AccessNode): # if node.data in input_set or node.data in output_set: # if node.data in input_set: # outer_edge = inputs[input_set[node.data]] # else: # outer_edge = outputs[output_set[node.data]] # for edge in state.all_edges(node): # if (edge not in modified_edges # and edge.data.data == node.data): # for e in state.memlet_tree(edge): # if e.data.data == node.data: # e._data = helpers.unsqueeze_memlet( # e.data, outer_edge.data) # Replace nested SDFG parents with new SDFG for nstate in nsdfg.nodes(): nstate.parent = sdfg for node in nstate.nodes(): if isinstance(node, nodes.NestedSDFG): node.sdfg.parent_sdfg = sdfg node.sdfg.parent_nsdfg_node = node ####################################################### # Remove nested SDFG and state sdfg.remove_node(outer_state) # def _modify_access_to_access( # self, # input_edges: Dict[nodes.Node, MultiConnectorEdge], # nsdfg: SDFG, # nstate: SDFGState, # state: SDFGState, # orig_data: Dict[Union[nodes.AccessNode, MultiConnectorEdge], str], # ) -> Set[MultiConnectorEdge]: # """ # Deals with access->access edges where both sides are non-transient. # """ # result = set() # for node, top_edge in input_edges.items(): # for inner_edge in nstate.out_edges(node): # if inner_edge.dst not in orig_data: # continue # inner_data = orig_data[inner_edge.dst] # if (isinstance(inner_edge.dst, nodes.AccessNode) # and not nsdfg.arrays[inner_data].transient): # matching_edge: MultiConnectorEdge = next( # state.out_edges_by_connector(top_edge.dst, inner_data)) # # Create memlet by unsqueezing both w.r.t. src and dst # # subsets # in_memlet = helpers.unsqueeze_memlet( # inner_edge.data, top_edge.data) # out_memlet = helpers.unsqueeze_memlet( # inner_edge.data, matching_edge.data) # new_memlet = in_memlet # new_memlet.other_subset = out_memlet.subset # # Connect with new edge # state.add_edge(top_edge.src, top_edge.src_conn, # matching_edge.dst, matching_edge.dst_conn, # new_memlet) # result.add(inner_edge) # return result # def _modify_memlet_path( # self, # new_edges: Dict[nodes.Node, MultiConnectorEdge], # nstate: SDFGState, # state: SDFGState, # inner_to_outer: Dict[nodes.Node, MultiConnectorEdge], # inputs: bool, # edges_to_ignore: Set[MultiConnectorEdge], # ) -> Set[MultiConnectorEdge]: # """ Modifies memlet paths in an inlined SDFG. Returns set of modified # edges. # """ # result = set() # for node, top_edge in new_edges.items(): # inner_edges = (nstate.out_edges(node) # if inputs else nstate.in_edges(node)) # for inner_edge in inner_edges: # if inner_edge in edges_to_ignore: # continue # new_memlet = helpers.unsqueeze_memlet(inner_edge.data, # top_edge.data) # if inputs: # if inner_edge.dst in inner_to_outer: # dst = inner_to_outer[inner_edge.dst] # else: # dst = inner_edge.dst # new_edge = state.add_edge(top_edge.src, top_edge.src_conn, # dst, inner_edge.dst_conn, # new_memlet) # mtree = state.memlet_tree(new_edge) # else: # if inner_edge.src in inner_to_outer: # # don't add edges twice # continue # new_edge = state.add_edge(inner_edge.src, # inner_edge.src_conn, top_edge.dst, # top_edge.dst_conn, new_memlet) # mtree = state.memlet_tree(new_edge) # # Modify all memlets going forward/backward # def traverse(mtree_node): # result.add(mtree_node.edge) # mtree_node.edge._data = helpers.unsqueeze_memlet( # mtree_node.edge.data, top_edge.data) # for child in mtree_node.children: # traverse(child) # for child in mtree.children: # traverse(child) # return result # def _modify_reshape_data(self, reshapes: Set[str], repldict: Dict[str, str], # new_edges: Dict[str, MultiConnectorEdge], # nstate: SDFGState, state: SDFGState, inputs: bool): # anodes = nstate.source_nodes() if inputs else nstate.sink_nodes() # reshp = {repldict[r]: r for r in reshapes} # for node in anodes: # if not isinstance(node, nodes.AccessNode): # continue # if node.data not in reshp: # continue # edge = new_edges[reshp[node.data]] # if inputs: # state.add_edge(edge.src, edge.src_conn, node, None, edge.data) # else: # state.add_edge(node, None, edge.dst, edge.dst_conn, edge.data)
scripts/ci/util.py
haroonf/azure-cli-extensions
207
12616334
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import os import re import json import zipfile # copy from wheel==0.30.0 WHEEL_INFO_RE = re.compile( r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?) ((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?) \.whl|\.dist-info)$""", re.VERBOSE).match def get_repo_root(): current_dir = os.path.dirname(os.path.abspath(__file__)) while not os.path.exists(os.path.join(current_dir, 'CONTRIBUTING.rst')): current_dir = os.path.dirname(current_dir) return current_dir def _get_extension_modname(ext_dir): # Modification of https://github.com/Azure/azure-cli/blob/dev/src/azure-cli-core/azure/cli/core/extension.py#L153 EXTENSIONS_MOD_PREFIX = 'azext_' pos_mods = [n for n in os.listdir(ext_dir) if n.startswith(EXTENSIONS_MOD_PREFIX) and os.path.isdir(os.path.join(ext_dir, n))] if len(pos_mods) != 1: raise AssertionError("Expected 1 module to load starting with " "'{}': got {}".format(EXTENSIONS_MOD_PREFIX, pos_mods)) return pos_mods[0] def _get_azext_metadata(ext_dir): # Modification of https://github.com/Azure/azure-cli/blob/dev/src/azure-cli-core/azure/cli/core/extension.py#L109 AZEXT_METADATA_FILENAME = 'azext_metadata.json' azext_metadata = None ext_modname = _get_extension_modname(ext_dir=ext_dir) azext_metadata_filepath = os.path.join(ext_dir, ext_modname, AZEXT_METADATA_FILENAME) if os.path.isfile(azext_metadata_filepath): with open(azext_metadata_filepath) as f: azext_metadata = json.load(f) return azext_metadata def get_ext_metadata(ext_dir, ext_file, ext_name): # Modification of https://github.com/Azure/azure-cli/blob/dev/src/azure-cli-core/azure/cli/core/extension.py#L89 WHL_METADATA_FILENAME = 'metadata.json' zip_ref = zipfile.ZipFile(ext_file, 'r') zip_ref.extractall(ext_dir) zip_ref.close() metadata = {} dist_info_dirs = [f for f in os.listdir(ext_dir) if f.endswith('.dist-info')] azext_metadata = _get_azext_metadata(ext_dir) if not azext_metadata: raise ValueError('azext_metadata.json for Extension "{}" Metadata is missing'.format(ext_name)) metadata.update(azext_metadata) for dist_info_dirname in dist_info_dirs: parsed_dist_info_dir = WHEEL_INFO_RE(dist_info_dirname) if parsed_dist_info_dir and parsed_dist_info_dir.groupdict().get('name') == ext_name.replace('-', '_'): whl_metadata_filepath = os.path.join(ext_dir, dist_info_dirname, WHL_METADATA_FILENAME) if os.path.isfile(whl_metadata_filepath): with open(whl_metadata_filepath) as f: metadata.update(json.load(f)) return metadata def get_whl_from_url(url, filename, tmp_dir, whl_cache=None): if not whl_cache: whl_cache = {} if url in whl_cache: return whl_cache[url] import requests TRIES = 3 for try_number in range(TRIES): try: r = requests.get(url, stream=True) assert r.status_code == 200, "Request to {} failed with {}".format(url, r.status_code) break except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as err: import time time.sleep(0.5) continue ext_file = os.path.join(tmp_dir, filename) with open(ext_file, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # ignore keep-alive new chunks f.write(chunk) whl_cache[url] = ext_file return ext_file SRC_PATH = os.path.join(get_repo_root(), 'src') INDEX_PATH = os.path.join(SRC_PATH, 'index.json') def _catch_dup_keys(pairs): seen = {} for k, v in pairs: if k in seen: raise ValueError("duplicate key {}".format(k)) seen[k] = v return seen def get_index_data(): try: with open(INDEX_PATH) as f: return json.load(f, object_pairs_hook=_catch_dup_keys) except ValueError as err: raise AssertionError("Invalid JSON in {}: {}".format(INDEX_PATH, err))
app/apiv2/plans/plan.py
Joey-Wondersign/Staffjoy-suite-Joey
890
12616339
<gh_stars>100-1000 from flask_restful import abort, Resource from app.constants import API_ENVELOPE from app.plans import plans from app.constants import PLAN_PUBLIC_KEYS class PlanApi(Resource): # Any authenticated users can access def get(self, plan_id): if plan_id not in plans.keys(): abort(404) plan_data = plans[plan_id] clean_plan = {"id": plan_id} for key in PLAN_PUBLIC_KEYS: clean_plan[key] = plan_data.get(key) return { API_ENVELOPE: clean_plan, }
tests/brevitas/test_brevitas_non_scaled_QuantHardTanh_export.py
AlexMontgomerie/finn
283
12616347
# Copyright (c) 2020, Xilinx # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of FINN nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import onnx # noqa import numpy as np import torch import brevitas.onnx as bo from brevitas.nn import QuantHardTanh from brevitas.core.restrict_val import RestrictValueType from brevitas.core.scaling import ScalingImplType import pytest from finn.core.modelwrapper import ModelWrapper import finn.core.onnx_exec as oxe from finn.transformation.infer_shapes import InferShapes from brevitas.core.quant import QuantType export_onnx_path = "test_brevitas_non_scaled_QuantHardTanh_export.onnx" @pytest.mark.parametrize("abits", [1, 2, 4, 8]) @pytest.mark.parametrize("narrow_range", [False, True]) @pytest.mark.parametrize("max_val", [1.0, 1 - 2 ** (-7)]) def test_brevitas_act_export_qhardtanh_nonscaled(abits, narrow_range, max_val): def get_quant_type(bit_width): if bit_width is None: return QuantType.FP elif bit_width == 1: return QuantType.BINARY else: return QuantType.INT act_quant_type = get_quant_type(abits) min_val = -1.0 ishape = (1, 10) b_act = QuantHardTanh( bit_width=abits, quant_type=act_quant_type, max_val=max_val, min_val=min_val, restrict_scaling_type=RestrictValueType.LOG_FP, scaling_impl_type=ScalingImplType.CONST, narrow_range=narrow_range, ) bo.export_finn_onnx(b_act, ishape, export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype( np.float32 ) idict = {model.graph.input[0].name: inp_tensor} odict = oxe.execute_onnx(model, idict, True) produced = odict[model.graph.output[0].name] inp_tensor = torch.from_numpy(inp_tensor).float() expected = b_act.forward(inp_tensor).detach().numpy() assert np.isclose(produced, expected, atol=1e-3).all() os.remove(export_onnx_path)
braindecode/models/shallow_fbcsp.py
sylvchev/braindecode
260
12616353
<reponame>sylvchev/braindecode<gh_stars>100-1000 import numpy as np from torch import nn from torch.nn import init from braindecode.models.base import BaseModel from braindecode.torch_ext.modules import Expression from braindecode.torch_ext.functions import safe_log, square from braindecode.torch_ext.util import np_to_var class ShallowFBCSPNet(BaseModel): """ Shallow ConvNet model from [2]_. References ---------- .. [2] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. & <NAME>. (2017). Deep learning with convolutional neural networks for EEG decoding and visualization. Human Brain Mapping , Aug. 2017. Online: http://dx.doi.org/10.1002/hbm.23730 """ def __init__( self, in_chans, n_classes, input_time_length=None, n_filters_time=40, filter_time_length=25, n_filters_spat=40, pool_time_length=75, pool_time_stride=15, final_conv_length=30, conv_nonlin=square, pool_mode="mean", pool_nonlin=safe_log, split_first_layer=True, batch_norm=True, batch_norm_alpha=0.1, drop_prob=0.5, ): if final_conv_length == "auto": assert input_time_length is not None self.__dict__.update(locals()) del self.self def create_network(self): pool_class = dict(max=nn.MaxPool2d, mean=nn.AvgPool2d)[self.pool_mode] model = nn.Sequential() if self.split_first_layer: model.add_module("dimshuffle", Expression(_transpose_time_to_spat)) model.add_module( "conv_time", nn.Conv2d( 1, self.n_filters_time, (self.filter_time_length, 1), stride=1, ), ) model.add_module( "conv_spat", nn.Conv2d( self.n_filters_time, self.n_filters_spat, (1, self.in_chans), stride=1, bias=not self.batch_norm, ), ) n_filters_conv = self.n_filters_spat else: model.add_module( "conv_time", nn.Conv2d( self.in_chans, self.n_filters_time, (self.filter_time_length, 1), stride=1, bias=not self.batch_norm, ), ) n_filters_conv = self.n_filters_time if self.batch_norm: model.add_module( "bnorm", nn.BatchNorm2d( n_filters_conv, momentum=self.batch_norm_alpha, affine=True ), ) model.add_module("conv_nonlin", Expression(self.conv_nonlin)) model.add_module( "pool", pool_class( kernel_size=(self.pool_time_length, 1), stride=(self.pool_time_stride, 1), ), ) model.add_module("pool_nonlin", Expression(self.pool_nonlin)) model.add_module("drop", nn.Dropout(p=self.drop_prob)) model.eval() if self.final_conv_length == "auto": out = model( np_to_var( np.ones( (1, self.in_chans, self.input_time_length, 1), dtype=np.float32, ) ) ) n_out_time = out.cpu().data.numpy().shape[2] self.final_conv_length = n_out_time model.add_module( "conv_classifier", nn.Conv2d( n_filters_conv, self.n_classes, (self.final_conv_length, 1), bias=True, ), ) model.add_module("softmax", nn.LogSoftmax(dim=1)) model.add_module("squeeze", Expression(_squeeze_final_output)) # Initialization, xavier is same as in paper... init.xavier_uniform_(model.conv_time.weight, gain=1) # maybe no bias in case of no split layer and batch norm if self.split_first_layer or (not self.batch_norm): init.constant_(model.conv_time.bias, 0) if self.split_first_layer: init.xavier_uniform_(model.conv_spat.weight, gain=1) if not self.batch_norm: init.constant_(model.conv_spat.bias, 0) if self.batch_norm: init.constant_(model.bnorm.weight, 1) init.constant_(model.bnorm.bias, 0) init.xavier_uniform_(model.conv_classifier.weight, gain=1) init.constant_(model.conv_classifier.bias, 0) return model # remove empty dim at end and potentially remove empty time dim # do not just use squeeze as we never want to remove first dim def _squeeze_final_output(x): assert x.size()[3] == 1 x = x[:, :, :, 0] if x.size()[2] == 1: x = x[:, :, 0] return x def _transpose_time_to_spat(x): return x.permute(0, 3, 2, 1)
locations/spiders/goddard_school.py
davidchiles/alltheplaces
297
12616404
<reponame>davidchiles/alltheplaces # -*- coding: utf-8 -*- import scrapy import re from locations.items import GeojsonPointItem class GoddardSchoolSpider(scrapy.Spider): name = "goddard_school" item_attributes = { 'brand': "Goddard School" } allowed_domains = ["www.goddardschool.com"] start_urls = ( 'https://www.goddardschool.com/LocationsXML.aspx', ) def store_hours(self, hours_string): match = re.match(r'^(\w+) - (\w+): (\d{1,2}):(\d{2}) (am|pm) - (\d{1,2}):(\d{2}) (am|pm)$', hours_string) (f_dow, t_dow, f_hr, f_min, f_ampm, t_hr, t_min, t_ampm) = match.groups() f_hr = int(f_hr) f_min = int(f_min) t_hr = int(t_hr) t_min = int(t_min) if f_ampm == 'pm': f_hr = int(f_hr) + 12 if t_ampm == 'pm': t_hr = int(t_hr) + 12 return '{}-{} {:02d}:{:02d}-{:02d}:{:02d}'.format( f_dow[:2], t_dow[:2], f_hr, f_min, t_hr, t_min, ) def parse(self, response): for marker_elem in response.xpath('//marker'): yield scrapy.Request( response.urljoin(marker_elem.xpath('@url')[0].extract()), callback=self.parse_location, meta={ 'ref': marker_elem.xpath('@id')[0].extract(), 'name': marker_elem.xpath('@name')[0].extract(), 'addr_full': marker_elem.xpath('@address')[0].extract(), 'city': marker_elem.xpath('@city')[0].extract(), 'state': marker_elem.xpath('@state')[0].extract(), 'postcode': marker_elem.xpath('@zip')[0].extract(), 'phone': marker_elem.xpath('@phone')[0].extract(), 'lat': marker_elem.xpath('@lat')[0].extract(), 'lon': marker_elem.xpath('@lng')[0].extract(), } ) def parse_location(self, response): properties = { 'addr_full': response.meta['addr_full'], 'city': response.meta['city'], 'state': response.meta['state'], 'postcode': response.meta['postcode'], 'ref': response.meta['ref'], 'website': response.url, 'lon': float(response.meta['lon']), 'lat': float(response.meta['lat']), } hours_elem = response.xpath('//span[@itemprop="hours"]/text()') if hours_elem: properties['opening_hours'] = self.store_hours(hours_elem[0].extract()) yield GeojsonPointItem(**properties)
pxr/usd/usdGeom/testenv/testUsdGeomConsts.py
DougRogers-DigitalFish/USD
3,680
12616434
#!/pxrpythonsubst # # Copyright 2017 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. import sys, os, unittest from pxr import Sdf, Usd, UsdGeom class TestUsdGeomConsts(unittest.TestCase): def _WriteConst(self, prim, attrName, typeStr, constVal): """Add a const to the prim""" attr = prim.CreateAttribute(attrName, typeStr) self.assertTrue(attr.IsDefined()) attr.Set(constVal) def _CompareAuthoredToConst(self, prim, attrName, constVal): """Ensure the const that we previously serialized compares equal to the source of the value we authored (i.e. the actual schema const) """ attr = prim.GetAttribute(attrName) self.assertTrue(attr.IsDefined()) val = attr.Get() self.assertEqual(constVal, val, "Round-tripped constant '%s' did not compare " "equal to schema constant. " "(schema: %s, roundtripped: %s)" % (attrName, repr(constVal), repr(val))) def _CompareAuthoredToArchived(self ,prim, attrName, archivePrim): """To catch ourselves if we inadvertently change the value of a const in a schema (which would be really bad to do unwittingly), we compare the value we wrote and read back in to a value from a baseline usda file of the same layout. """ attr = prim.GetAttribute(attrName) self.assertTrue(attr.IsDefined()) newVal = attr.Get() archAttr = archivePrim.GetAttribute(attrName) self.assertTrue(archAttr.IsDefined()) archVal = archAttr.Get() self.assertEqual(archVal, newVal, "Baseline archived constant '%s' did not " "compare equal to schema constant " "(baseline: %s, schema: %s)." % (attrName, repr(archVal), repr(newVal))) def test_Basic(self): lyr = Sdf.Layer.CreateNew("testConsts.usda") self.assertTrue(lyr) # To update this file, just grab the result of a run of this test (disabling # the archive comparison, so it will succeed) from the test-run directory archiveFn = 'testConsts.usda' archiveLyr = Sdf.Layer.FindOrOpen(archiveFn) self.assertTrue(archiveLyr) stage = Usd.Stage.Open("testConsts.usda") self.assertTrue(stage) archiveStage = Usd.Stage.Open(archiveFn) self.assertTrue(archiveStage) prim = UsdGeom.Scope.Define(stage, "/ConstRoot").GetPrim() self.assertTrue(prim) archivePrim = archiveStage.GetPrimAtPath("/ConstRoot") self.assertTrue(archivePrim) testConsts = (('SHARPNESS_INFINITE', Sdf.ValueTypeNames.Float, UsdGeom.Mesh.SHARPNESS_INFINITE), ) # # First write out all the consts # for ( name, cType, cVal ) in testConsts: self._WriteConst( prim, name, cType, cVal ) lyr.Save() # # Now, Rebuild the stage from a fresh read of the file # stage = None lyr = None lyr = Sdf.Layer.FindOrOpen("testConsts.usda") self.assertTrue(lyr) stage = Usd.Stage.Open("testConsts.usda") self.assertTrue(stage) prim = stage.GetPrimAtPath("/ConstRoot") self.assertTrue(prim) # # Finally, compare the (from code) const values with both # the just-serialized values (reread), and the archived values # for ( name, cType, cVal ) in testConsts: self._CompareAuthoredToConst(prim, name, cVal ) self._CompareAuthoredToArchived(prim, name, archivePrim) if __name__ == "__main__": unittest.main()
Anaconda-files/Program_14c.py
arvidl/dynamical-systems-with-applications-using-python
106
12616442
<gh_stars>100-1000 # Program 14c: Lyapunov exponents of the logistic map. # See Figure 14.18. import numpy as np import matplotlib.pyplot as plt num_points = 16000 result = [] lambdas = [] maps = [] xmin, xmax = 3, 4 mult = (xmax - xmin) * num_points mu_values = np.arange(xmin, xmax, 20/num_points) for r in mu_values: x = 0.1 result = [] for t in range(100): x = r * x * (1 - x) result.append(np.log(abs(r - 2*r*x))) lambdas.append(np.mean(result)) # Ignore first 100 iterates. for t in range(20): x = r * x * (1 - x) maps.append(x) fig = plt.figure(figsize=(10,7)) ax1 = fig.add_subplot(1,1,1) xticks = np.linspace(xmin, xmax, mult) zero = [0] * mult ax1.plot(xticks, zero, 'k-', linewidth=3) ax1.plot(xticks, maps,'r.', alpha = 0.3, label='Logistic map') ax1.set_xlabel('r') ax1.plot(mu_values, lambdas, 'b-', linewidth=1, label='Lyapunov exponent') ax1.grid('on') ax1.set_ylim(-1, 1) ax1.set_xlabel('$\mu$', fontsize=15) ax1.legend(loc='best') ax1.set_title('Logistic map versus Lyapunov exponent', fontsize=15) plt.show()
pc_software/build_linux.py
joelnb/duckyPad
578
12616462
import os import sys import shutil os.system('rm -rfv ./__pycache__') os.system('rm -rfv ./build') os.system('rm -rfv ./dist') os.system('rm -rfv ./*.zip') THIS_VERSION = None try: mainfile = open('duckypad_config.py') for line in mainfile: if "THIS_VERSION_NUMBER =" in line: THIS_VERSION = line.replace('\n', '').replace('\r', '').split("'")[-2] mainfile.close() except Exception as e: print('build_linux exception:', e) exit() if THIS_VERSION is None: print('could not find version number!') exit() print(THIS_VERSION) py_file_list = [x for x in os.listdir('.') if x.endswith('.py')] py_file_list = [x for x in py_file_list if 'build_' not in x.lower() and 'setup.py' not in x.lower()] print(py_file_list) output_dir_name = "duckypad_config_" + THIS_VERSION + "_source" if os.path.isdir(output_dir_name): shutil.rmtree(output_dir_name) os.mkdir(output_dir_name) output_dir_path = os.path.join('.', output_dir_name) print(output_dir_path) for item in py_file_list: shutil.copy(item, output_dir_path) zip_file_name = "duckypad_config_" + THIS_VERSION + "_source.zip" os.system('7z.exe a ' + zip_file_name + ' -r ' + output_dir_path) if os.path.isdir(output_dir_path): shutil.rmtree(output_dir_path)
finrl_meta/env_future_trading/wt4elegantrl/wtpy/apps/datahelper/DHFactory.py
eitin-infant/FinRL-Meta
214
12616469
<reponame>eitin-infant/FinRL-Meta<filename>finrl_meta/env_future_trading/wt4elegantrl/wtpy/apps/datahelper/DHFactory.py from wtpy.apps.datahelper.DHDefs import BaseDataHelper from wtpy.apps.datahelper.DHBaostock import DHBaostock from wtpy.apps.datahelper.DHTushare import DHTushare from wtpy.apps.datahelper.DHRqData import DHRqData class DHFactory: @staticmethod def createHelper(name:str) -> BaseDataHelper: ''' 创建数据辅助模块\n @name 模块名称,目前支持的有tushare、baostock、rqdata ''' name = name.lower() if name == "baostock": return DHBaostock() elif name == "tushare": return DHTushare() elif name == "rqdata": return DHRqData() else: raise Exception("Cannot recognize helper with name %s" % (name))
atlas-examples/sample-app/src/main/python/lineage_example.py
mario-renau-alstom/atlas
1,248
12616470
#!/usr/bin/env/python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from apache_atlas.model.enums import LineageDirection LOG = logging.getLogger('lineage-example') class LineageExample: def __init__(self, client): self.client = client def lineage(self, guid): direction = LineageDirection.BOTH.name lineage_info = self.client.lineage.get_lineage_info(guid, direction, 0) if not lineage_info: LOG.info("Not able to find lineage info") return relations = lineage_info.relations guid_entity_map = lineage_info.guidEntityMap for relation in relations: from_entity = guid_entity_map[relation.fromEntityId] to_entity = guid_entity_map[relation.toEntityId] LOG.info("%s (%s) -> %s (%s)", from_entity.displayText, from_entity.typeName, to_entity.displayText, to_entity.typeName)
src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py
dctelus/transformers
8,028
12616528
<reponame>dctelus/transformers<gh_stars>1000+ # coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() json_indent = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model best_score_hparams = { # fairseq: "wmt19-ru-en": {"length_penalty": 1.1}, "wmt19-en-ru": {"length_penalty": 1.15}, "wmt19-en-de": {"length_penalty": 1.0}, "wmt19-de-en": {"length_penalty": 1.1}, # allenai: "wmt16-en-de-dist-12-1": {"length_penalty": 0.6}, "wmt16-en-de-dist-6-1": {"length_penalty": 0.6}, "wmt16-en-de-12-1": {"length_penalty": 0.8}, "wmt19-de-en-6-6-base": {"length_penalty": 0.6}, "wmt19-de-en-6-6-big": {"length_penalty": 0.6}, } # this remaps the different models to their organization names org_names = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: org_names[m] = "facebook" for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: org_names[m] = "allenai" def rewrite_dict_keys(d): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} d2 = dict((re.sub(r"@@$", "", k), v) if k.endswith("@@") else (re.sub(r"$", "</w>", k), v) for k, v in d.items()) keep_keys = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del d2[f"{k}</w>"] d2[k] = d[k] # restore return d2 def convert_fsmt_checkpoint_to_pytorch(fsmt_checkpoint_path, pytorch_dump_folder_path): # prep assert os.path.exists(fsmt_checkpoint_path) os.makedirs(pytorch_dump_folder_path, exist_ok=True) print(f"Writing results to {pytorch_dump_folder_path}") # handle various types of models checkpoint_file = basename(fsmt_checkpoint_path) fsmt_folder_path = dirname(fsmt_checkpoint_path) cls = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel models = cls.hub_models() kwargs = {"bpe": "fastbpe", "tokenizer": "moses"} data_name_or_path = "." # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(f"using checkpoint {checkpoint_file}") chkpt = hub_utils.from_pretrained( fsmt_folder_path, checkpoint_file, data_name_or_path, archive_map=models, **kwargs ) args = vars(chkpt["args"]["model"]) src_lang = args["source_lang"] tgt_lang = args["target_lang"] data_root = dirname(pytorch_dump_folder_path) model_dir = basename(pytorch_dump_folder_path) # dicts src_dict_file = os.path.join(fsmt_folder_path, f"dict.{src_lang}.txt") tgt_dict_file = os.path.join(fsmt_folder_path, f"dict.{tgt_lang}.txt") src_dict = Dictionary.load(src_dict_file) src_vocab = rewrite_dict_keys(src_dict.indices) src_vocab_size = len(src_vocab) src_vocab_file = os.path.join(pytorch_dump_folder_path, "vocab-src.json") print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records") with open(src_vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(src_vocab, ensure_ascii=False, indent=json_indent)) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab do_lower_case = True for k in src_vocab.keys(): if not k.islower(): do_lower_case = False break tgt_dict = Dictionary.load(tgt_dict_file) tgt_vocab = rewrite_dict_keys(tgt_dict.indices) tgt_vocab_size = len(tgt_vocab) tgt_vocab_file = os.path.join(pytorch_dump_folder_path, "vocab-tgt.json") print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records") with open(tgt_vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(tgt_vocab, ensure_ascii=False, indent=json_indent)) # merges_file (bpecodes) merges_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES["merges_file"]) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" fsmt_merges_file = os.path.join(fsmt_folder_path, fn) if os.path.exists(fsmt_merges_file): break with open(fsmt_merges_file, encoding="utf-8") as fin: merges = fin.read() merges = re.sub(r" \d+$", "", merges, 0, re.M) # remove frequency number print(f"Generating {merges_file}") with open(merges_file, "w", encoding="utf-8") as fout: fout.write(merges) # model config fsmt_model_config_file = os.path.join(pytorch_dump_folder_path, "config.json") # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}" assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}" model_conf = { "architectures": ["FSMTForConditionalGeneration"], "model_type": "fsmt", "activation_dropout": args["activation_dropout"], "activation_function": "relu", "attention_dropout": args["attention_dropout"], "d_model": args["decoder_embed_dim"], "dropout": args["dropout"], "init_std": 0.02, "max_position_embeddings": args["max_source_positions"], "num_hidden_layers": args["encoder_layers"], "src_vocab_size": src_vocab_size, "tgt_vocab_size": tgt_vocab_size, "langs": [src_lang, tgt_lang], "encoder_attention_heads": args["encoder_attention_heads"], "encoder_ffn_dim": args["encoder_ffn_embed_dim"], "encoder_layerdrop": args["encoder_layerdrop"], "encoder_layers": args["encoder_layers"], "decoder_attention_heads": args["decoder_attention_heads"], "decoder_ffn_dim": args["decoder_ffn_embed_dim"], "decoder_layerdrop": args["decoder_layerdrop"], "decoder_layers": args["decoder_layers"], "bos_token_id": 0, "pad_token_id": 1, "eos_token_id": 2, "is_encoder_decoder": True, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_all_embeddings"], } # good hparam defaults to start with model_conf["num_beams"] = 5 model_conf["early_stopping"] = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: model_conf["length_penalty"] = best_score_hparams[model_dir]["length_penalty"] else: model_conf["length_penalty"] = 1.0 print(f"Generating {fsmt_model_config_file}") with open(fsmt_model_config_file, "w", encoding="utf-8") as f: f.write(json.dumps(model_conf, ensure_ascii=False, indent=json_indent)) # tokenizer config fsmt_tokenizer_config_file = os.path.join(pytorch_dump_folder_path, TOKENIZER_CONFIG_FILE) tokenizer_conf = { "langs": [src_lang, tgt_lang], "model_max_length": 1024, "do_lower_case": do_lower_case, } print(f"Generating {fsmt_tokenizer_config_file}") with open(fsmt_tokenizer_config_file, "w", encoding="utf-8") as f: f.write(json.dumps(tokenizer_conf, ensure_ascii=False, indent=json_indent)) # model model = chkpt["models"][0] model_state_dict = model.state_dict() # rename keys to start with 'model.' model_state_dict = OrderedDict(("model." + k, v) for k, v in model_state_dict.items()) # remove unneeded keys ignore_keys = [ "model.model", "model.encoder.version", "model.decoder.version", "model.encoder_embed_tokens.weight", "model.decoder_embed_tokens.weight", "model.encoder.embed_positions._float_tensor", "model.decoder.embed_positions._float_tensor", ] for k in ignore_keys: model_state_dict.pop(k, None) config = FSMTConfig.from_pretrained(pytorch_dump_folder_path) model_new = FSMTForConditionalGeneration(config) # check that it loads ok model_new.load_state_dict(model_state_dict, strict=False) # save pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME) print(f"Generating {pytorch_weights_dump_path}") torch.save(model_state_dict, pytorch_weights_dump_path) print("Conversion is done!") print("\nLast step is to upload the files to s3") print(f"cd {data_root}") print(f"transformers-cli upload {model_dir}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fsmt_checkpoint_path", default=None, type=str, required=True, help="Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts, bpecodes, etc.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
gaiadet/models/utils/__init__.py
zengming16/GAIA-det
149
12616534
<filename>gaiadet/models/utils/__init__.py from .dynamic_res_layer import DynamicResLayer
RecoEgamma/EgammaTools/python/lowPtElectronModifier_cfi.py
Purva-Chaudhari/cmssw
852
12616543
<gh_stars>100-1000 import FWCore.ParameterSet.Config as cms lowPtElectronModifier = cms.PSet( modifierName = cms.string('LowPtElectronModifier'), beamSpot = cms.InputTag('offlineBeamSpot'), conversions = cms.InputTag('gsfTracksOpenConversions:gsfTracksOpenConversions'), addExtraUserVars = cms.bool(True), vertices = cms.InputTag("offlineSlimmedPrimaryVertices"), )
ironic/db/sqlalchemy/alembic/versions/9cbeefa3763f_add_port_is_smartnic.py
yanndegat/ironic
350
12616554
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add is_smartnic port attribute Revision ID: 9cbeefa3763f Revises: <PASSWORD> Create Date: 2019-01-13 09:31:13.336479 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '9cbeefa3763f' down_revision = 'dd67b91a1981' def upgrade(): op.add_column('ports', sa.Column('is_smartnic', sa.Boolean(), default=False))
examples/hyperloglog_examples.py
yrik/datasketch
1,771
12616568
''' Some examples for MinHash ''' from datasketch.hyperloglog import HyperLogLog data1 = ['hyperloglog', 'is', 'a', 'probabilistic', 'data', 'structure', 'for', 'estimating', 'the', 'cardinality', 'of', 'dataset', 'dataset', 'a'] data2 = ['hyperloglog', 'is', 'a', 'probabilistic', 'DATA', 'structure', 'for', 'estimating', 'the', 'number', 'of', 'distinct', 'values', 'of', 'dataset', 'dataset', 'a'] def eg1(): h = HyperLogLog() for d in data1: h.update(d.encode('utf8')) print("Estimated cardinality is", h.count()) s1 = set(data1) print("Actual cardinality is", len(s1)) def eg2(): h1 = HyperLogLog() h2 = HyperLogLog() for d in data1: h1.update(d.encode('utf8')) for d in data2: h2.update(d.encode('utf8')) u = HyperLogLog.union(h1, h2) print("Estimated union cardinality is", u.count()) s1 = set(data1) s2 = set(data2) su = s1.union(s2) print("Actual union cardinality is", len(su)) if __name__ == "__main__": eg1() eg2()
tests/test_operators_spherical.py
vaishnavtv/neurodiffeq
202
12616575
import pytest import torch import torch.nn as nn from torch import sin, cos import numpy as np from neurodiffeq import diff from neurodiffeq.generators import GeneratorSpherical from neurodiffeq.function_basis import ZonalSphericalHarmonics from neurodiffeq.networks import FCNN from neurodiffeq.operators import spherical_curl from neurodiffeq.operators import spherical_grad from neurodiffeq.operators import spherical_div from neurodiffeq.operators import spherical_laplacian from neurodiffeq.operators import spherical_vector_laplacian from neurodiffeq.operators import spherical_to_cartesian, cartesian_to_spherical @pytest.fixture(autouse=True) def magic(): torch.manual_seed(42) np.random.seed(42) class HarmonicsNN(nn.Module): def __init__(self, degrees, harmonics_fn): super(HarmonicsNN, self).__init__() self.net_r = FCNN(1, n_output_units=len(degrees)) self.harmonics_fn = harmonics_fn def forward(self, r, theta, phi): R = self.net_r(r) Y = self.harmonics_fn(theta, phi) return (R * Y).sum(dim=1, keepdim=True) EPS = 1e-4 degrees = list(range(10)) @pytest.fixture def x(): n_points, r_min, r_max = 1024, 1.0, 10.0 g = GeneratorSpherical(n_points, r_min=r_min, r_max=r_max) return [t.reshape(-1, 1) for t in g.get_examples()] @pytest.fixture def U(x): F = [HarmonicsNN(degrees, ZonalSphericalHarmonics(degrees=degrees)) for _ in range(3)] return tuple(f(*x) for f in F) @pytest.fixture def u(x): return HarmonicsNN(degrees, ZonalSphericalHarmonics(degrees=degrees))(*x) def test_cartesian_to_spherical(): x = torch.rand(1000, 1, requires_grad=True) y = torch.rand(1000, 1, requires_grad=True) z = torch.rand(1000, 1, requires_grad=True) r, theta, phi = cartesian_to_spherical(x, y, z) assert torch.allclose(r * torch.sin(theta) * cos(phi), x) assert torch.allclose(r * torch.sin(theta) * sin(phi), y) assert torch.allclose(r * torch.cos(theta), z) def test_spherical_to_cartesian(): r = torch.rand(1000, 1, requires_grad=True) theta = torch.rand(1000, 1, requires_grad=True) * np.pi phi = torch.rand(1000, 1, requires_grad=True) * np.pi * 2 x, y, z = spherical_to_cartesian(r, theta, phi) assert torch.allclose(r * torch.sin(theta) * cos(phi), x) assert torch.allclose(r * torch.sin(theta) * sin(phi), y) assert torch.allclose(r * torch.cos(theta), z) def test_spherical_div(U, x): out = spherical_div(*U, *x) ur, utheta, uphi = U r, theta, phi = x ans = diff(r ** 2 * ur, r) / r ** 2 + \ diff(utheta * sin(theta), theta) / (r * sin(theta)) + \ diff(uphi, phi) / (r * sin(theta)) assert torch.allclose(out, ans) def test_spherical_grad(u, x): out_r, out_theta, out_phi = spherical_grad(u, *x) r, theta, phi = x assert torch.allclose(out_r, diff(u, r)) assert torch.allclose(out_theta, diff(u, theta) / r) assert torch.allclose(out_phi, diff(u, phi) / (r * sin(theta))) def test_spherical_curl(U, x): out_r, out_theta, out_phi = spherical_curl(*U, *x) ur, utheta, uphi = U r, theta, phi = x assert torch.allclose(out_r, (diff(uphi * sin(theta), theta) - diff(utheta, phi)) / (r * sin(theta))) assert torch.allclose(out_theta, (diff(ur, phi) / sin(theta) - diff(r * uphi, r)) / r) assert torch.allclose(out_phi, (diff(r * utheta, r) - diff(ur, theta)) / r) def test_spherical_laplacian(u, x): out = spherical_laplacian(u, *x) r, theta, phi = x assert torch.allclose( out, diff(r ** 2 * diff(u, r), r) / r ** 2 + diff(sin(theta) * diff(u, theta), theta) / (r ** 2 * sin(theta)) + diff(u, phi, order=2) / (r ** 2 * sin(theta) ** 2) ) def test_spherical_vector_laplacian(U, x): out_r, out_theta, out_phi = spherical_vector_laplacian(*U, *x) ur, utheta, uphi = U r, theta, phi = x def scalar_lap(u): return diff(r ** 2 * diff(u, r), r) / r ** 2 \ + diff(sin(theta) * diff(u, theta), theta) / (r ** 2 * sin(theta)) \ + diff(u, phi, order=2) / (r ** 2 * sin(theta) ** 2) assert torch.allclose( out_r, scalar_lap(ur) - 2 * ur / r ** 2 - 2 / (r ** 2 * sin(theta)) * diff(utheta * sin(theta), theta) - 2 / (r ** 2 * sin(theta)) * diff(uphi, phi) ) assert torch.allclose( out_theta, scalar_lap(utheta) - utheta / (r ** 2 * sin(theta) ** 2) + 2 / r ** 2 * diff(ur, theta) - 2 * cos(theta) / (r ** 2 * sin(theta) ** 2) * diff(uphi, phi) ) assert torch.allclose( out_phi, scalar_lap(uphi) - uphi / (r ** 2 * sin(theta) ** 2) + 2 / (r ** 2 * sin(theta)) * diff(ur, phi) + 2 * cos(theta) / (r ** 2 * sin(theta) ** 2) * diff(utheta, phi) )
folium/elements.py
iwpnd/folium
5,451
12616597
<filename>folium/elements.py from branca.element import Figure, Element, JavascriptLink, CssLink class JSCSSMixin(Element): """Render links to external Javascript and CSS resources.""" default_js = [] default_css = [] def render(self, **kwargs): figure = self.get_root() assert isinstance(figure, Figure), ('You cannot render this Element ' 'if it is not in a Figure.') for name, url in self.default_js: figure.header.add_child(JavascriptLink(url), name=name) for name, url in self.default_css: figure.header.add_child(CssLink(url), name=name) super().render(**kwargs)
src/sqlacodegen/utils.py
randallk/sqlacodegen
960
12616611
from typing import List, Set from sqlalchemy import CheckConstraint from sqlalchemy.engine import Connectable from sqlalchemy.sql import ClauseElement from sqlalchemy.sql.schema import ( ColumnCollectionConstraint, Constraint, ForeignKeyConstraint, Table) def get_column_names(constraint: ColumnCollectionConstraint) -> List[str]: return list(constraint.columns.keys()) def get_constraint_sort_key(constraint: Constraint) -> str: if isinstance(constraint, CheckConstraint): return f'C{constraint.sqltext}' elif isinstance(constraint, ColumnCollectionConstraint): return constraint.__class__.__name__[0] + repr(get_column_names(constraint)) else: return str(constraint) def get_compiled_expression(statement: ClauseElement, bind: Connectable) -> str: """Return the statement in a form where any placeholders have been filled in.""" return str(statement.compile(bind, compile_kwargs={"literal_binds": True})) def get_common_fk_constraints(table1: Table, table2: Table) -> Set[ForeignKeyConstraint]: """Return a set of foreign key constraints the two tables have against each other.""" c1 = set(c for c in table1.constraints if isinstance(c, ForeignKeyConstraint) and c.elements[0].column.table == table2) c2 = set(c for c in table2.constraints if isinstance(c, ForeignKeyConstraint) and c.elements[0].column.table == table1) return c1.union(c2)
a02_TextCNN/other_experiement/p7_TextCNN_predict_ensemble.py
sunshinenum/text_classification
7,723
12616620
<reponame>sunshinenum/text_classification from p7_TextCNN_predict import get_logits_with_value_by_input from p7_TextCNN_predict_exp import get_logits_with_value_by_input_exp import tensorflow as tf def main(_): for start in range(217360): end=start+1 label_list,p_list=get_logits_with_value_by_input(start,end) label_list_exp, p_list_exp=get_logits_with_value_by_input_exp(start,end) if start<5: print("----------------------------------------------------") print(start,"label_list0:",label_list,"p_list0:",p_list) print(start,"label_list1:", label_list_exp, "p_list1:", p_list_exp) else: break if __name__ == "__main__": tf.app.run()
anuga/file/__init__.py
samcom12/anuga_core
136
12616640
<reponame>samcom12/anuga_core """Common file modules. Here you will find modules to load, save, and extract information in a variety of different file formats. This module takes care of reading and writing datafiles such as topograhies, model output, etc Formats used within AnuGA: .sww: Netcdf format for storing model output f(t,x,y) .tms: Netcdf format for storing time series f(t) .csv: ASCII format for storing arbitrary points and associated attributes .pts: NetCDF format for storing arbitrary points and associated attributes .asc: ASCII format of regular DEMs as output from ArcView .prj: Associated ArcView file giving more meta data for asc format .ers: ERMapper header format of regular DEMs for ArcView .dem: NetCDF representation of regular DEM data .tsh: ASCII format for storing meshes and associated boundary and region info .msh: NetCDF format for storing meshes and associated boundary and region info .nc: Native ferret NetCDF format A typical dataflow can be described as follows Manually created files: ASC, PRJ: Digital elevation models (gridded) TSH: Triangular meshes (e.g. created from anuga.pmesh) NC Model outputs for use as boundary conditions (e.g from MOST) AUTOMATICALLY CREATED FILES: ASC, PRJ -> DEM -> PTS: Conversion of DEM's to native pts file NC -> SWW: Conversion of MOST bundary files to boundary sww PTS + TSH -> TSH with elevation: Least squares fit TSH -> SWW: Conversion of TSH to sww viewable using Swollen TSH + Boundary SWW -> SWW: Simluation using abstract_2d_finite_volumes """ from numpy.testing import Tester test = Tester().test
firestore/generate_android_test.py
oliwilkinsonio/firebase-cpp-sdk
193
12616672
#!/usr/grte/v4/bin/python2.7 # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generate JUnit4 tests from gtest files. This script reads a template and fills in test-specific information such as .so library name and Java class name. This script also goes over the gtest files and finds all test methods of the pattern TEST_F(..., ...) and converts each into a @Test-annotated test method. """ # We will be open-source this. So please do not introduce google3 dependency # unless absolutely necessary. import argparse import re GTEST_METHOD_RE = (r'TEST(?:_F)?[(]\s*(?P<test_class>[A-Za-z]+)\s*,\s*' r'(?P<test_method>[A-Za-z]+)\s*[)]') JAVA_TEST_METHOD = r""" @Test public void {test_class}{test_method}() {{ run("{test_class}.{test_method}"); }} """ def generate_fragment(gtests): """Generate @Test-annotated test method code from the provided gtest files.""" fragments = [] gtest_method_pattern = re.compile(GTEST_METHOD_RE) for gtest in gtests: with open(gtest, 'r') as gtest_file: gtest_code = gtest_file.read() for matched in re.finditer(gtest_method_pattern, gtest_code): fragments.append( JAVA_TEST_METHOD.format( test_class=matched.group('test_class'), test_method=matched.group('test_method'))) return ''.join(fragments) def generate_file(template, out, **kwargs): """Generate a Java file from the provided template and parameters.""" with open(template, 'r') as template_file: template_string = template_file.read() java_code = template_string.format(**kwargs) with open(out, 'w') as out_file: out_file.write(java_code) def main(): parser = argparse.ArgumentParser( description='Generates JUnit4 tests from gtest files.') parser.add_argument( '--template', help='the filename of the template to use in the generation', required=True) parser.add_argument( '--java_package', help='which package test Java class belongs to', required=True) parser.add_argument( '--java_class', help='specifies the name of the class to generate', required=True) parser.add_argument( '--so_lib', help=('specifies the name of the native library without prefix lib and ' 'suffix .so. You must compile the C++ test code together with the ' 'firestore_android_test_main.cc as a shared library, say libfoo.so ' 'and pass the name foo here.'), required=True) parser.add_argument('--out', help='the output file path', required=True) parser.add_argument('srcs', nargs='+', help='the input gtest file paths') args = parser.parse_args() fragment = generate_fragment(args.srcs) generate_file( args.template, args.out, package_name=args.java_package, java_class_name=args.java_class, so_lib_name=args.so_lib, tests=fragment) if __name__ == '__main__': main()
recipes/Python/473851_Compose_HTML_Mail_embedded_images_URL_or_local/recipe-473851.py
tdiprima/code
2,023
12616677
# HtmlMail python class # Compose HTML mails from URLs or local files with all images included # # Author: <NAME> <<EMAIL>> import sys, os, urllib2, urlparse from email.MIMEText import MIMEText from email.MIMEImage import MIMEImage from email.MIMEMultipart import MIMEMultipart import email, re class HtmlMail: def __init__(self, location, encoding="iso-8859-1"): self.location=location if location.find("http://")==0: self.is_http=True else: self.is_http=False self.encoding=encoding self.p1=re.compile("(<img.*?src=\")(.*?)(\".*?>)", re.IGNORECASE|re.DOTALL) self.p2=re.compile("(<.*?background=\")(.*?)(\".*?>)", re.IGNORECASE|re.DOTALL) self.p3=re.compile("(<input.*?src=\")(.*?)(\".*?>)", re.IGNORECASE|re.DOTALL) self.img_c=0 def set_log(self,log): self.log=log def _handle_image(self, matchobj): img=matchobj.group(2) if not self.images.has_key(img): self.img_c+=1 self.images[img]="dazoot-img%d" % self.img_c return "%scid:%s%s" % (matchobj.group(1), self.images[img], matchobj.group(3)) def _parse_images(self): self.images={} self.content=self.p1.sub(self._handle_image, self.content) self.content=self.p2.sub(self._handle_image, self.content) self.content=self.p3.sub(self._handle_image, self.content) return self.images def _read_image(self, imglocation): if self.is_http: img_url=urlparse.urljoin(self.location, imglocation) content=urllib2.urlopen(img_url).read() return content else: return file(imglocation, "rb").read() def get_msg(self): if self.is_http: content=urllib2.urlopen(self.location).read() else: content=file(self.location, "r").read() self.content=content msg=MIMEMultipart("related") images=self._parse_images() tmsg=MIMEText(self.content, "html", self.encoding) msg.attach(tmsg) for img in images.keys(): img_content=self._read_image(img) img_msg=MIMEImage(img_content) img_type, img_ext=img_msg["Content-Type"].split("/") del img_msg["MIME-Version"] del img_msg["Content-Type"] del img_msg["Content-Transfer-Encoding"] img_msg.add_header("Content-Type", "%s/%s; name=\"%s.%s\"" % (img_type, img_ext, images[img], img_ext)) img_msg.add_header("Content-Transfer-Encoding", "base64") img_msg.add_header("Content-ID", "<%s>" % images[img]) img_msg.add_header("Content-Disposition", "inline; filename=\"%s.%s\"" % (images[img], img_ext)) msg.attach(img_msg) return msg if __name__=="__main__": # test the class here import smtplib hm=HtmlMail("http://www.egirl.ro/newsletter/december2005_2/") msg=hm.get_msg() msg["Subject"]="Egirl Newsletter" msg["From"]="<NAME> <<EMAIL>>" msg["To"]="<EMAIL>" s=smtplib.SMTP("localhost") s.sendmail("<EMAIL>", msg["To"], msg.as_string()) s.quit()
dca/hyper.py
mjheid/dca
193
12616710
import os import pickle import json import numpy as np from kopt import CompileFN, test_fn from hyperopt import fmin, tpe, hp, Trials import keras.optimizers as opt from . import io from .network import AE_types def hyper(args): adata = io.read_dataset(args.input, transpose=args.transpose, test_split=False) hyper_params = { "data": { "norm_input_log": hp.choice('d_norm_log', (True, False)), "norm_input_zeromean": hp.choice('d_norm_zeromean', (True, False)), "norm_input_sf": hp.choice('d_norm_sf', (True, False)), }, "model": { "lr": hp.loguniform("m_lr", np.log(1e-3), np.log(1e-2)), "ridge": hp.loguniform("m_ridge", np.log(1e-7), np.log(1e-1)), "l1_enc_coef": hp.loguniform("m_l1_enc_coef", np.log(1e-7), np.log(1e-1)), "hidden_size": hp.choice("m_hiddensize", ((64,32,64), (32,16,32), (64,64), (32,32), (16,16), (16,), (32,), (64,), (128,))), "activation": hp.choice("m_activation", ('relu', 'selu', 'elu', 'PReLU', 'linear', 'LeakyReLU')), "aetype": hp.choice("m_aetype", ('zinb', 'zinb-conddisp')), "batchnorm": hp.choice("m_batchnorm", (True, False)), "dropout": hp.uniform("m_do", 0, 0.7), "input_dropout": hp.uniform("m_input_do", 0, 0.8), }, "fit": { "epochs": args.hyperepoch } } def data_fn(norm_input_log, norm_input_zeromean, norm_input_sf): ad = adata.copy() ad = io.normalize(ad, size_factors=norm_input_sf, logtrans_input=norm_input_log, normalize_input=norm_input_zeromean) x_train = {'count': ad.X, 'size_factors': ad.obs.size_factors} y_train = ad.raw.X return (x_train, y_train), def model_fn(train_data, lr, hidden_size, activation, aetype, batchnorm, dropout, input_dropout, ridge, l1_enc_coef): net = AE_types[aetype](train_data[1].shape[1], hidden_size=hidden_size, l2_coef=0.0, l1_coef=0.0, l2_enc_coef=0.0, l1_enc_coef=l1_enc_coef, ridge=ridge, hidden_dropout=dropout, input_dropout=input_dropout, batchnorm=batchnorm, activation=activation, init='glorot_uniform', debug=args.debug) net.build() net.model.summary() optimizer = opt.__dict__['RMSprop'](lr=lr, clipvalue=5.0) net.model.compile(loss=net.loss, optimizer=optimizer) return net.model output_dir = os.path.join(args.outputdir, 'hyperopt_results') objective = CompileFN('autoencoder_hyperpar_db', 'myexp1', data_fn=data_fn, model_fn=model_fn, loss_metric='loss', loss_metric_mode='min', valid_split=.2, save_model=None, save_results=True, use_tensorboard=False, save_dir=output_dir) test_fn(objective, hyper_params, save_model=None) trials = Trials() best = fmin(objective, hyper_params, trials=trials, algo=tpe.suggest, max_evals=args.hypern, catch_eval_exceptions=True) with open(os.path.join(output_dir, 'trials.pickle'), 'wb') as f: pickle.dump(trials, f) #TODO: map indices in "best" back to choice-based hyperpars before saving with open(os.path.join(output_dir, 'best.json'), 'wt') as f: json.dump(best, f, sort_keys=True, indent=4) print(best) #TODO: not just save the best conf but also train the model with these params
demo/json-model/json_parser.py
bclehmann/xgboost
23,866
12616731
'''Demonstration for parsing JSON tree model file generated by XGBoost. The support is experimental, output schema is subject to change in the future. ''' import json import argparse class Tree: '''A tree built by XGBoost.''' # Index into node array _left = 0 _right = 1 _parent = 2 _ind = 3 _cond = 4 _default_left = 5 # Index into stat array _loss_chg = 0 _sum_hess = 1 _base_weight = 2 def __init__(self, tree_id: int, nodes, stats): self.tree_id = tree_id self.nodes = nodes self.stats = stats def loss_change(self, node_id: int): '''Loss gain of a node.''' return self.stats[node_id][self._loss_chg] def sum_hessian(self, node_id: int): '''Sum Hessian of a node.''' return self.stats[node_id][self._sum_hess] def base_weight(self, node_id: int): '''Base weight of a node.''' return self.stats[node_id][self._base_weight] def split_index(self, node_id: int): '''Split feature index of node.''' return self.nodes[node_id][self._ind] def split_condition(self, node_id: int): '''Split value of a node.''' return self.nodes[node_id][self._cond] def parent(self, node_id: int): '''Parent ID of a node.''' return self.nodes[node_id][self._parent] def left_child(self, node_id: int): '''Left child ID of a node.''' return self.nodes[node_id][self._left] def right_child(self, node_id: int): '''Right child ID of a node.''' return self.nodes[node_id][self._right] def is_leaf(self, node_id: int): '''Whether a node is leaf.''' return self.nodes[node_id][self._left] == -1 def is_deleted(self, node_id: int): '''Whether a node is deleted.''' # std::numeric_limits<uint32_t>::max() return self.nodes[node_id][self._ind] == 4294967295 def __str__(self): stacks = [0] nodes = [] while stacks: node = {} nid = stacks.pop() node['node id'] = nid node['gain'] = self.loss_change(nid) node['cover'] = self.sum_hessian(nid) nodes.append(node) if not self.is_leaf(nid) and not self.is_deleted(nid): left = self.left_child(nid) right = self.right_child(nid) stacks.append(left) stacks.append(right) string = '\n'.join(map(lambda x: ' ' + str(x), nodes)) return string class Model: '''Gradient boosted tree model.''' def __init__(self, model: dict): '''Construct the Model from JSON object. parameters ---------- m: A dictionary loaded by json ''' # Basic property of a model self.learner_model_shape = model['learner']['learner_model_param'] self.num_output_group = int(self.learner_model_shape['num_class']) self.num_feature = int(self.learner_model_shape['num_feature']) self.base_score = float(self.learner_model_shape['base_score']) # A field encoding which output group a tree belongs self.tree_info = model['learner']['gradient_booster']['model'][ 'tree_info'] model_shape = model['learner']['gradient_booster']['model'][ 'gbtree_model_param'] # JSON representation of trees j_trees = model['learner']['gradient_booster']['model']['trees'] # Load the trees self.num_trees = int(model_shape['num_trees']) self.leaf_size = int(model_shape['size_leaf_vector']) # Right now XGBoost doesn't support vector leaf yet assert self.leaf_size == 0, str(self.leaf_size) trees = [] for i in range(self.num_trees): tree = j_trees[i] tree_id = int(tree['id']) assert tree_id == i, (tree_id, i) # properties left_children = tree['left_children'] right_children = tree['right_children'] parents = tree['parents'] split_conditions = tree['split_conditions'] split_indices = tree['split_indices'] default_left = tree['default_left'] # stats base_weights = tree['base_weights'] loss_changes = tree['loss_changes'] sum_hessian = tree['sum_hessian'] stats = [] nodes = [] # We resemble the structure used inside XGBoost, which is similar # to adjacency list. for node_id in range(len(left_children)): nodes.append([ left_children[node_id], right_children[node_id], parents[node_id], split_indices[node_id], split_conditions[node_id], default_left[node_id] ]) stats.append([ loss_changes[node_id], sum_hessian[node_id], base_weights[node_id] ]) tree = Tree(tree_id, nodes, stats) trees.append(tree) self.trees = trees def print_model(self): for i, tree in enumerate(self.trees): print('tree_id:', i) print(tree) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Demonstration for loading and printing XGBoost model.') parser.add_argument('--model', type=str, required=True, help='Path to JSON model file.') args = parser.parse_args() with open(args.model, 'r') as fd: model = json.load(fd) model = Model(model) model.print_model()
tests/test_issues/test_708.py
leonardt/magma
167
12616750
<gh_stars>100-1000 import magma as m from magma.testing import check_files_equal import os import pytest import tempfile @pytest.mark.parametrize("inline", [False, True]) def test_708(inline): class A(m.Product): x = m.UInt[8] @m.sequential2() class Test: def __init__(self): self.a: A = m.namedtuple(x=m.uint(0, 8)) def __call__(self, c: m.Bit) -> m.AnonProduct[dict(a=A)]: if c: a = m.replace(self.a, dict(x=self.a.x + 1)) else: a = self.a return m.namedtuple(a=a) name = f"test_708_inline_{inline}" path = f"build/{name}" m.compile(path, Test, output="coreir-verilog", inline=inline, disable_width_cast=True, disable_ndarray=inline is False) verilator_path = os.path.join(os.path.dirname(__file__), path) assert not os.system( f"verilator --lint-only --language 1364-2005 {verilator_path}.v" ) assert check_files_equal(__file__, f"{path}.v", f"gold/{name}.v")
tensorflow/python/profiler/profile_context_test.py
abhaikollara/tensorflow
848
12616751
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.client import session from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import variables from tensorflow.python.platform import gfile from tensorflow.python.platform import test from tensorflow.python.profiler import option_builder # pylint: disable=g-bad-import-order from tensorflow.python.profiler import profile_context from tensorflow.python.profiler.internal import model_analyzer_testlib as lib builder = option_builder.ProfileOptionBuilder class ProfilerContextTest(test.TestCase): @test_util.run_deprecated_v1 def testBasics(self): ops.reset_default_graph() outfile = os.path.join(test.get_temp_dir(), "dump") opts = builder(builder.time_and_memory() ).with_file_output(outfile).build() x = lib.BuildFullModel() profile_str = None profile_step100 = os.path.join(test.get_temp_dir(), "profile_100") with profile_context.ProfileContext(test.get_temp_dir()) as pctx: pctx.add_auto_profiling("op", options=opts, profile_steps=[15, 50, 100]) with session.Session() as sess: self.evaluate(variables.global_variables_initializer()) total_steps = 101 for i in range(total_steps): self.evaluate(x) if i == 14 or i == 49: self.assertTrue(gfile.Exists(outfile)) gfile.Remove(outfile) if i == 99: self.assertTrue(gfile.Exists(profile_step100)) with gfile.Open(outfile, "r") as f: profile_str = f.read() gfile.Remove(outfile) self.assertEqual(set([15, 50, 100]), set(pctx.get_profiles("op").keys())) with lib.ProfilerFromFile( os.path.join(test.get_temp_dir(), "profile_100")) as profiler: profiler.profile_operations(options=opts) with gfile.Open(outfile, "r") as f: if test.is_built_with_rocm(): # The profiler output for ROCm mode, includes an extra warning # related to the lack of stream tracing in ROCm mode. # Need to skip this warning when doing the diff profile_str = "\n".join(profile_str.split("\n")[7:]) self.assertEqual(profile_str, f.read()) @test_util.run_deprecated_v1 def testAutoTracingInDeubMode(self): ops.reset_default_graph() x = lib.BuildFullModel() with profile_context.ProfileContext(test.get_temp_dir(), debug=True): with session.Session() as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(10): self.evaluate(x) for f in gfile.ListDirectory(test.get_temp_dir()): # Warm up, no tracing. self.assertFalse("run_meta" in f) self.evaluate(x) self.assertTrue( gfile.Exists(os.path.join(test.get_temp_dir(), "run_meta_11"))) gfile.Remove(os.path.join(test.get_temp_dir(), "run_meta_11")) # fetched already. self.evaluate(x) for f in gfile.ListDirectory(test.get_temp_dir()): self.assertFalse("run_meta" in f) @test_util.run_deprecated_v1 def testDisabled(self): ops.reset_default_graph() x = lib.BuildFullModel() with profile_context.ProfileContext(test.get_temp_dir(), enabled=False) as pctx: with session.Session() as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(10): self.evaluate(x) self.assertTrue(pctx.profiler is None) self.assertTrue( getattr(session.BaseSession, "profile_context", None) is None) with profile_context.ProfileContext(test.get_temp_dir()) as pctx: with session.Session() as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(10): self.evaluate(x) self.assertFalse(pctx.profiler is None) self.assertFalse( getattr(session.BaseSession, "profile_context", None) is None) if __name__ == "__main__": test.main()
pyquante2/utils.py
berquist/pyquante2
107
12616753
<reponame>berquist/pyquante2 """ utils.py - Simple utilility funtions used in pyquante2. """ import numpy as np from math import factorial,lgamma from itertools import combinations_with_replacement,combinations from functools import reduce def pairs(it): return combinations_with_replacement(it,2) def upairs(it): return combinations(it,2) def fact2(n): """ fact2(n) - n!!, double factorial of n >>> fact2(0) 1 >>> fact2(1) 1 >>> fact2(3) 3 >>> fact2(8) 384 >>> fact2(-1) 1 """ return reduce(int.__mul__,range(n,0,-2),1) def norm2(a): return np.dot(a,a) def binomial(n,k): """ Binomial coefficient >>> binomial(5,2) 10 >>> binomial(10,5) 252 """ if n==k: return 1 assert n>k, "Attempting to call binomial(%d,%d)" % (n,k) return factorial(n)//(factorial(k)*factorial(n-k)) def Fgamma(m,x): """ Incomplete gamma function >>> np.isclose(Fgamma(0,0),1.0) True """ SMALL=1e-12 x = max(x,SMALL) return 0.5*pow(x,-m-0.5)*gamm_inc(m+0.5,x) # def gamm_inc_scipy(a,x): # """ # Demonstration on how to replace the gamma calls with scipy.special functions. # By default, pyquante only requires numpy, but this may change as scipy # builds become more stable. # >>> np.isclose(gamm_inc_scipy(0.5,1),1.49365) # True # >>> np.isclose(gamm_inc_scipy(1.5,2),0.6545103) # True # >>> np.isclose(gamm_inc_scipy(2.5,1e-12),0) # True # """ # from scipy.special import gamma,gammainc # return gamma(a)*gammainc(a,x) def gamm_inc(a,x): """ Incomple gamma function \gamma; computed from NumRec routine gammp. >>> np.isclose(gamm_inc(0.5,1),1.49365) True >>> np.isclose(gamm_inc(1.5,2),0.6545103) True >>> np.isclose(gamm_inc(2.5,1e-12),0) True """ assert (x > 0 and a >= 0), "Invalid arguments in routine gamm_inc: %s,%s" % (x,a) if x < (a+1.0): #Use the series representation gam,gln = _gser(a,x) else: #Use continued fractions gamc,gln = _gcf(a,x) gam = 1-gamc return np.exp(gln)*gam def _gser(a,x): "Series representation of Gamma. NumRec sect 6.1." ITMAX=100 EPS=3.e-7 gln=lgamma(a) assert(x>=0),'x < 0 in gser' if x == 0 : return 0,gln ap = a delt = sum = 1./a for i in range(ITMAX): ap=ap+1. delt=delt*x/ap sum=sum+delt if abs(delt) < abs(sum)*EPS: break else: print('a too large, ITMAX too small in gser') gamser=sum*np.exp(-x+a*np.log(x)-gln) return gamser,gln def _gcf(a,x): "Continued fraction representation of Gamma. NumRec sect 6.1" ITMAX=100 EPS=3.e-7 FPMIN=1.e-30 gln=lgamma(a) b=x+1.-a c=1./FPMIN d=1./b h=d for i in range(1,ITMAX+1): an=-i*(i-a) b=b+2. d=an*d+b if abs(d) < FPMIN: d=FPMIN c=b+an/c if abs(c) < FPMIN: c=FPMIN d=1./d delt=d*c h=h*delt if abs(delt-1.) < EPS: break else: print('a too large, ITMAX too small in gcf') gammcf=np.exp(-x+a*np.log(x)-gln)*h return gammcf,gln def trace2(A,B): "Return trace(AB) of matrices A and B" return np.sum(A*B) def dmat(c,nclosed,nopen=0): """Form the density matrix from the first nclosed orbitals of c. If nopen != 0, add in half the density matrix from the next nopen orbitals. """ d = np.dot(c[:,:nclosed],c[:,:nclosed].T) if nopen > 0: d += 0.5*np.dot(c[:,nclosed:(nclosed+nopen)],c[:,nclosed:(nclosed+nopen)].T) return d def symorth(S): "Symmetric orthogonalization" E,U = np.linalg.eigh(S) n = len(E) Shalf = np.identity(n,'d') for i in range(n): Shalf[i,i] /= np.sqrt(E[i]) return simx(Shalf,U,True) def canorth(S): "Canonical orthogonalization U/sqrt(lambda)" E,U = np.linalg.eigh(S) for i in range(len(E)): U[:,i] = U[:,i] / np.sqrt(E[i]) return U def cholorth(S): "Cholesky orthogonalization" return np.linalg.inv(np.linalg.cholesky(S)).T def simx(A,B,transpose=False): "Similarity transform B^T(AB) or B(AB^T) (if transpose)" if transpose: return np.dot(B,np.dot(A,B.T)) return np.dot(B.T,np.dot(A,B)) def ao2mo(H,C): return simx(H,C) def mo2ao(H,C,S): return simx(H,np.dot(S,C),transpose=True) def geigh(H,S): "Solve the generalized eigensystem Hc = ESc" A = cholorth(S) E,U = np.linalg.eigh(simx(H,A)) return E,np.dot(A,U) def parseline(line,format): """\ Given a line (a string actually) and a short string telling how to format it, return a list of python objects that result. The format string maps words (as split by line.split()) into python code: x -> Nothing; skip this word s -> Return this word as a string i -> Return this word as an int d -> Return this word as an int f -> Return this word as a float Basic parsing of strings: >>> parseline('Hello, World','ss') ['Hello,', 'World'] You can use 'x' to skip a record; you also don't have to parse every record: >>> parseline('1 2 3 4','xdd') [2, 3] >>> parseline('C1 0.0 0.0 0.0','sfff') ['C1', 0.0, 0.0, 0.0] Should this return an empty list? >>> parseline('This line wont be parsed','xx') """ xlat = {'x':None,'s':str,'f':float,'d':int,'i':int} result = [] words = line.split() for i in range(len(format)): f = format[i] trans = xlat.get(f,None) if trans: result.append(trans(words[i])) if len(result) == 0: return None if len(result) == 1: return result[0] return result def colorscale(mag, cmin, cmax): """ Return a tuple of floats between 0 and 1 for R, G, and B. From Python Cookbook (9.11?) """ # Normalize to 0-1 try: x = float(mag-cmin)/(cmax-cmin) except ZeroDivisionError: x = 0.5 # cmax == cmin blue = min((max((4*(0.75-x), 0.)), 1.)) red = min((max((4*(x-0.25), 0.)), 1.)) green = min((max((4*abs(x-0.5)-1., 0.)), 1.)) return red, green, blue #Todo: replace with np.isclose #def isnear(a,b,tol=1e-6): return abs(a-b) < tol if __name__ == '__main__': import doctest doctest.testmod()
examples/upload_file_test.py
mdmintz/seleniumspot
2,745
12616755
""" Testing the self.choose_file() and self.assert_attribute() methods. """ import os from seleniumbase import BaseCase class FileUploadButtonTests(BaseCase): def test_file_upload_button(self): self.open("https://seleniumbase.io/w3schools/file_upload") self.switch_to_frame("iframeResult") zoom_in = 'input[type="file"]{zoom: 1.6;-moz-transform: scale(1.6);}' self.add_css_style(zoom_in) self.highlight('input[type="file"]') dir_name = os.path.dirname(os.path.abspath(__file__)) my_file = "screenshot.png" file_path = os.path.join(dir_name, "example_logs/%s" % my_file) self.assert_attribute("#myFile", "value", "") self.choose_file('input[type="file"]', file_path) self.assert_attribute("#myFile", "value", "C:\\fakepath\\%s" % my_file) self.highlight('input[type="file"]')
tests/unit/tuner/pytorch/test_dist.py
rizwandel/finetuner
270
12616761
import numpy as np import pytest import torch from scipy.spatial.distance import pdist, squareform from finetuner.tuner.pytorch.losses import get_distance N_BATCH = 10 N_DIM = 128 @pytest.mark.parametrize('distance', ['cosine', 'euclidean', 'sqeuclidean']) def test_dist(distance): embeddings = np.random.rand(N_BATCH, N_DIM) real_dists = squareform(pdist(embeddings, metric=distance)) dists = get_distance(torch.tensor(embeddings), distance) np.testing.assert_almost_equal(real_dists, dists.numpy())
tests/console/commands/test_search.py
zEdS15B3GCwq/poetry
7,258
12616771
from __future__ import annotations from pathlib import Path from typing import TYPE_CHECKING import pytest if TYPE_CHECKING: import httpretty from cleo.testers.command_tester import CommandTester from tests.types import CommandTesterFactory TESTS_DIRECTORY = Path(__file__).parent.parent.parent FIXTURES_DIRECTORY = ( TESTS_DIRECTORY / "repositories" / "fixtures" / "pypi.org" / "search" ) @pytest.fixture(autouse=True) def mock_search_http_response(http: type[httpretty.httpretty]) -> None: with FIXTURES_DIRECTORY.joinpath("search.html").open(encoding="utf-8") as f: http.register_uri("GET", "https://pypi.org/search", f.read()) @pytest.fixture def tester(command_tester_factory: CommandTesterFactory) -> CommandTester: return command_tester_factory("search") def test_search(tester: CommandTester, http: type[httpretty.httpretty]): tester.execute("sqlalchemy") expected = """ sqlalchemy (1.3.10) Database Abstraction Library sqlalchemy-dao (1.3.1) Simple wrapper for sqlalchemy. graphene-sqlalchemy (2.2.2) Graphene SQLAlchemy integration sqlalchemy-utcdatetime (1.0.4) Convert to/from timezone aware datetimes when storing in a DBMS paginate-sqlalchemy (0.3.0) Extension to paginate.Page that supports SQLAlchemy queries sqlalchemy-audit (0.1.0) sqlalchemy-audit provides an easy way to set up revision tracking for your data. transmogrify-sqlalchemy (1.0.2) Feed data from SQLAlchemy into a transmogrifier pipeline sqlalchemy-schemadisplay (1.3) Turn SQLAlchemy DB Model into a graph sqlalchemy-traversal (0.5.2) UNKNOWN sqlalchemy-filters (0.10.0) A library to filter SQLAlchemy queries. sqlalchemy-wrap (2.1.7) Python wrapper for the CircleCI API sqlalchemy-nav (0.0.2) SQLAlchemy-Nav provides SQLAlchemy Mixins for creating navigation bars compatible with\ Bootstrap sqlalchemy-repr (0.0.1) Automatically generates pretty repr of a SQLAlchemy model. sqlalchemy-diff (0.1.3) Compare two database schemas using sqlalchemy. sqlalchemy-equivalence (0.1.1) Provides natural equivalence support for SQLAlchemy declarative models. broadway-sqlalchemy (0.0.1) A broadway extension wrapping Flask-SQLAlchemy jsonql-sqlalchemy (1.0.1) Simple JSON-Based CRUD Query Language for SQLAlchemy sqlalchemy-plus (0.2.0) Create Views and Materialized Views with SqlAlchemy cherrypy-sqlalchemy (0.5.3) Use SQLAlchemy with CherryPy sqlalchemy-sqlany (1.0.3) SAP Sybase SQL Anywhere dialect for SQLAlchemy """ output = tester.io.fetch_output() assert output == expected
bounter/tests/cms/test_cms_quality.py
BenLangmead/bounter
987
12616793
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Author: <NAME> <<EMAIL>> # Copyright (C) 2017 Rare Technologies # # This code is distributed under the terms and conditions # from the MIT License (MIT). import unittest from bounter import CountMinSketch class CountMinSketchQualityCommonTest(unittest.TestCase): def __init__(self, methodName='runTest', log_counting=None): self.log_counting = log_counting super(CountMinSketchQualityCommonTest, self).__init__(methodName=methodName) """ Functional tests for CountMinSketch.quality method, which returns quality rating of the structure """ def setUp(self): self.cms = CountMinSketch(1, log_counting=self.log_counting) def test_quality_default(self): """ Uses the default structure """ self.assertEqual(self.cms.quality(), 0) three_quarters = int((self.cms.width * 3) / 4) for i in range(three_quarters): self.cms.increment(str(i), 1 + (i % 13)) self.assertGreaterEqual(self.cms.quality(), 0.5) self.assertLessEqual(self.cms.quality(), 1.0) for i in range(three_quarters * 7): self.cms.increment(str(i), 1 + (i % 13)) self.assertGreaterEqual(self.cms.quality(), 4.0) self.assertLessEqual(self.cms.quality(), 6.0) class CountMinSketchQualityConservativeTest(CountMinSketchQualityCommonTest): def __init__(self, methodName='runTest'): super(CountMinSketchQualityConservativeTest, self).__init__(methodName=methodName, log_counting=None) class CountMinSketchQualityLog1024Test(CountMinSketchQualityCommonTest): def __init__(self, methodName='runTest'): super(CountMinSketchQualityLog1024Test, self).__init__(methodName=methodName, log_counting=1024) class CountMinSketchQualityLog8Test(CountMinSketchQualityCommonTest): def __init__(self, methodName='runTest'): super(CountMinSketchQualityLog8Test, self).__init__(methodName=methodName, log_counting=8) def load_tests(loader, tests, pattern): test_cases = unittest.TestSuite() test_cases.addTests(loader.loadTestsFromTestCase(CountMinSketchQualityConservativeTest)) test_cases.addTests(loader.loadTestsFromTestCase(CountMinSketchQualityLog1024Test)) test_cases.addTests(loader.loadTestsFromTestCase(CountMinSketchQualityLog8Test)) return test_cases if __name__ == '__main__': unittest.main()
byt5/sigmorphon.py
onlyrico/byt5
377
12616796
# Copyright 2021 The ByT5 Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Add Tasks to registry.""" import functools import random from byt5.tasks import DEFAULT_BYTE_OUTPUT_FEATURES from byt5.tasks import DEFAULT_MT5_OUTPUT_FEATURES from byt5.tasks import DEFAULT_PREPROCESSORS import numpy import seqio import t5.data from t5.data import preprocessors # Place downloaded data from https://sigmorphon.github.io/sharedtasks/2020 in # the following directory. SIGMORPHON_DIR = None FEATURE_MAP = { 'byt5': DEFAULT_BYTE_OUTPUT_FEATURES, 'mt5': DEFAULT_MT5_OUTPUT_FEATURES } # ====================== SIGMORPHON-2020 TASK-1 ==================== # Task 1: Multilingual Grapheme-to-Phoneme Conversion # Please see website https://sigmorphon.github.io/sharedtasks/2020/task1/ # for details. def get_2020_task1_preprocessor(language): return [ functools.partial( preprocessors.preprocess_tsv, inputs_format=f' {language} ' + '{0}', targets_format='{1}', num_fields=2), ] def metrics_task1_2020(targets, predictions): """Computes word error rate and edit distance metrics.""" def edit_distance(x, y) -> int: # Implementation from # https://github.com/sigmorphon/2020/blob/master/task1/evaluation/evallib.py idim = len(x) + 1 jdim = len(y) + 1 table = numpy.zeros((idim, jdim), dtype=numpy.uint8) table[1:, 0] = 1 table[0, 1:] = 1 for i in range(1, idim): for j in range(1, jdim): if x[i - 1] == y[j - 1]: table[i][j] = table[i - 1][j - 1] else: c1 = table[i - 1][j] c2 = table[i][j - 1] c3 = table[i - 1][j - 1] table[i][j] = min(c1, c2, c3) + 1 return int(table[-1][-1]) # Word-level measures. correct = 0 incorrect = 0 # Label-level measures. total_edits = 0 total_length = 0 for gold, hypo in zip(targets, predictions): edits = edit_distance(gold, hypo) length = len(gold) if edits == 0: correct += 1 else: incorrect += 1 total_edits += edits total_length += length wer = incorrect / (correct + incorrect) ler = 100 * total_edits / total_length return {'wer': wer, 'ler': ler} langs = [ 'arm', 'bul', 'fre', 'geo', 'hin', 'hun', 'ice', 'kor', 'lit', 'gre', 'ady', 'dut', 'jpn', 'rum', 'vie' ] year = '2020' task = 'task1' data_dir = f'{SIGMORPHON_DIR}/{year}/{task}/data/' for lang in langs: for prefix, output_features in FEATURE_MAP.items(): seqio.TaskRegistry.add( f'{prefix}_sigmorphon_{year}_{task}.{lang}', source=seqio.TextLineDataSource( split_to_filepattern={ 'train': f'{data_dir}/train/{lang}_train.tsv', 'validation': f'{data_dir}/dev/{lang}_dev.tsv', 'test': f'{data_dir}/test/{lang}_test.tsv'}), preprocessors=get_2020_task1_preprocessor(lang) + DEFAULT_PREPROCESSORS, output_features=output_features, metric_fns=[metrics_task1_2020]) for prefix in ['mt5', 'byt5']: t5.data.MixtureRegistry.add( f'{prefix}_sigmorphon_{year}_{task}', [f'{prefix}_sigmorphon_{year}_{task}.{lang}' for lang in langs], default_rate=1.) # ====================== SIGMORPHON-2020 TASK-0 ==================== # Task 0: Typologically Diverse Morphological Inflection # Please see website https://sigmorphon.github.io/sharedtasks/2020/task0/ # for details. def get_2020_task0_preprocessor(language): return [ functools.partial( preprocessors.preprocess_tsv, inputs_format=f'{language}' + ' {0} ' + 'form={2}', targets_format='{1}', num_fields=3), ] def metrics_task0_2020(targets, predictions): """Calculates exact match and edit distance based metrics.""" def distance(str1, str2): """Levenshtein distance.""" # Implementation from # https://github.com/sigmorphon2020/task0-data/blob/master/evaluate.py m = numpy.zeros([len(str2) + 1, len(str1) + 1]) for x in range(1, len(str2) + 1): m[x][0] = m[x - 1][0] + 1 for y in range(1, len(str1) + 1): m[0][y] = m[0][y - 1] + 1 for x in range(1, len(str2) + 1): for y in range(1, len(str1) + 1): if str1[y - 1] == str2[x - 1]: dg = 0 else: dg = 1 m[x][y] = min(m[x - 1][y] + 1, m[x][y - 1] + 1, m[x - 1][y - 1] + dg) return int(m[len(str2)][len(str1)]) correct, dist, total = 0., 0., 0. for target, prediction in zip(targets, predictions): if target == prediction: correct += 1 dist += distance(target, prediction) total += 1 return { 'accuracy': round(correct / total * 100, 2), 'distance': round(dist / total, 2) } surprise_lang_path_prefix = [ 'SURPRISE-LANGUAGES/Afro-Asiatic/mlt', 'SURPRISE-LANGUAGES/Germanic/gsw', 'SURPRISE-LANGUAGES/Nilo-Sahan/dje', 'SURPRISE-LANGUAGES/Romance/frm', 'SURPRISE-LANGUAGES/Indo-Aryan/urd', 'SURPRISE-LANGUAGES/Uralic/kpv', 'SURPRISE-LANGUAGES/Sino-Tibetan/bod', 'SURPRISE-LANGUAGES/Germanic/nno', 'SURPRISE-LANGUAGES/Uralic/olo', 'SURPRISE-LANGUAGES/Romance/fur', 'SURPRISE-LANGUAGES/Romance/cat', 'SURPRISE-LANGUAGES/Afro-Asiatic/syc', 'SURPRISE-LANGUAGES/Algic/cre', 'SURPRISE-LANGUAGES/Turkic/kir', 'SURPRISE-LANGUAGES/Uralic/lud', 'SURPRISE-LANGUAGES/Uralic/udm', 'SURPRISE-LANGUAGES/Iranian/pus', 'SURPRISE-LANGUAGES/Romance/ast', 'SURPRISE-LANGUAGES/Germanic/gml', 'SURPRISE-LANGUAGES/Turkic/bak', 'SURPRISE-LANGUAGES/Indo-Aryan/hin', 'SURPRISE-LANGUAGES/Iranian/fas', 'SURPRISE-LANGUAGES/Niger-Congo/sna', 'SURPRISE-LANGUAGES/Romance/xno', 'SURPRISE-LANGUAGES/Romance/vec', 'SURPRISE-LANGUAGES/Dravidian/kan', 'SURPRISE-LANGUAGES/Afro-Asiatic/orm', 'SURPRISE-LANGUAGES/Turkic/uzb', 'SURPRISE-LANGUAGES/Uto-Aztecan/ood', 'SURPRISE-LANGUAGES/Turkic/tuk', 'SURPRISE-LANGUAGES/Iranian/tgk', 'SURPRISE-LANGUAGES/Romance/lld', 'SURPRISE-LANGUAGES/Turkic/kaz', 'SURPRISE-LANGUAGES/Indo-Aryan/ben', 'SURPRISE-LANGUAGES/Siouan/dak', 'SURPRISE-LANGUAGES/Romance/glg', 'SURPRISE-LANGUAGES/Turkic/kjh', 'SURPRISE-LANGUAGES/Turkic/crh', 'SURPRISE-LANGUAGES/Indo-Aryan/san', 'SURPRISE-LANGUAGES/Dravidian/tel', 'SURPRISE-LANGUAGES/Tungusic/evn', 'SURPRISE-LANGUAGES/Turkic/aze', 'SURPRISE-LANGUAGES/Uralic/vro', 'SURPRISE-LANGUAGES/Turkic/uig', 'SURPRISE-LANGUAGES/Australian/mwf' ] development_lang_path_prefix = [ 'DEVELOPMENT-LANGUAGES/germanic/swe', 'DEVELOPMENT-LANGUAGES/germanic/ang', 'DEVELOPMENT-LANGUAGES/oto-manguean/azg', 'DEVELOPMENT-LANGUAGES/uralic/vep', 'DEVELOPMENT-LANGUAGES/niger-congo/lin', 'DEVELOPMENT-LANGUAGES/niger-congo/nya', 'DEVELOPMENT-LANGUAGES/germanic/frr', 'DEVELOPMENT-LANGUAGES/uralic/vot', 'DEVELOPMENT-LANGUAGES/austronesian/mlg', 'DEVELOPMENT-LANGUAGES/oto-manguean/ctp', 'DEVELOPMENT-LANGUAGES/oto-manguean/otm', 'DEVELOPMENT-LANGUAGES/oto-manguean/ote', 'DEVELOPMENT-LANGUAGES/uralic/fin', 'DEVELOPMENT-LANGUAGES/oto-manguean/cpa', 'DEVELOPMENT-LANGUAGES/austronesian/mao', 'DEVELOPMENT-LANGUAGES/uralic/mdf', 'DEVELOPMENT-LANGUAGES/germanic/dan', 'DEVELOPMENT-LANGUAGES/niger-congo/gaa', 'DEVELOPMENT-LANGUAGES/oto-manguean/cly', 'DEVELOPMENT-LANGUAGES/uralic/mhr', 'DEVELOPMENT-LANGUAGES/niger-congo/zul', 'DEVELOPMENT-LANGUAGES/uralic/krl', 'DEVELOPMENT-LANGUAGES/niger-congo/kon', 'DEVELOPMENT-LANGUAGES/oto-manguean/czn', 'DEVELOPMENT-LANGUAGES/germanic/gmh', 'DEVELOPMENT-LANGUAGES/uralic/izh', 'DEVELOPMENT-LANGUAGES/austronesian/ceb', 'DEVELOPMENT-LANGUAGES/germanic/nob', 'DEVELOPMENT-LANGUAGES/austronesian/tgl', 'DEVELOPMENT-LANGUAGES/austronesian/hil', 'DEVELOPMENT-LANGUAGES/niger-congo/lug', 'DEVELOPMENT-LANGUAGES/niger-congo/sot', 'DEVELOPMENT-LANGUAGES/niger-congo/swa', 'DEVELOPMENT-LANGUAGES/germanic/isl', 'DEVELOPMENT-LANGUAGES/oto-manguean/pei', 'DEVELOPMENT-LANGUAGES/uralic/sme', 'DEVELOPMENT-LANGUAGES/germanic/nld', 'DEVELOPMENT-LANGUAGES/niger-congo/aka', 'DEVELOPMENT-LANGUAGES/germanic/eng', 'DEVELOPMENT-LANGUAGES/oto-manguean/zpv', 'DEVELOPMENT-LANGUAGES/uralic/est', 'DEVELOPMENT-LANGUAGES/uralic/liv', 'DEVELOPMENT-LANGUAGES/oto-manguean/xty', 'DEVELOPMENT-LANGUAGES/germanic/deu', 'DEVELOPMENT-LANGUAGES/uralic/myv' ] year = '2020' task = 'task0' data_dir = f'{SIGMORPHON_DIR}/{year}/task0-data/' langs = [ path_prefix.split('/')[-1] for path_prefix in surprise_lang_path_prefix + development_lang_path_prefix ] random.shuffle(langs) path_prefixes = surprise_lang_path_prefix + development_lang_path_prefix for prefix, output_features in FEATURE_MAP.items(): for path_prefix in path_prefixes: lang = path_prefix.split('/')[-1] split_to_filepattern = { 'train': f'{data_dir}/{path_prefix}.trn', 'validation': f'{data_dir}/{path_prefix}.dev', 'test': f'{data_dir}/GOLD-TEST/{lang}.tst', } seqio.TaskRegistry.add( f'{prefix}_sigmorphon_{year}_{task}.{lang}', source=seqio.TextLineDataSource( split_to_filepattern=split_to_filepattern), preprocessors=get_2020_task0_preprocessor(lang) + DEFAULT_PREPROCESSORS, output_features=output_features, metric_fns=[metrics_task0_2020]) seqio.TaskRegistry.add( f'{prefix}_sigmorphon_{year}_{task}.all', source=seqio.TextLineDataSource( split_to_filepattern={ 'test': f'{data_dir}/test.tsv', 'validation': f'{data_dir}/validation.tsv',}), preprocessors=[preprocessors.preprocess_tsv, *DEFAULT_PREPROCESSORS,], output_features=output_features, metric_fns=[metrics_task0_2020]) for prefix in ['mt5', 'byt5']: t5.data.MixtureRegistry.add( f'{prefix}_sigmorphon_{year}_{task}', [f'{prefix}_sigmorphon_{year}_{task}.{lang}' for lang in langs], default_rate=1.)
examples/applications/mercury_solarsystem3.py
rknop/amuse
131
12616797
# import numpy from amuse.community.mercury.interface import MercuryWayWard from amuse.community.sse.interface import SSE from amuse.ext.solarsystem import new_solar_system_for_mercury from amuse.units import units from amuse.units.quantities import VectorQuantity from amuse.plot import ( plot, native_plot, ) try: from matplotlib import pyplot HAS_MATPLOTLIB = True except ImportError: HAS_MATPLOTLIB = False def planetplot(): sun, planets = new_solar_system_for_mercury() initial = 12.2138 | units.Gyr final = 12.3300 | units.Gyr step = 10000.0 | units.yr timerange = VectorQuantity.arange(initial, final, step) gd = MercuryWayWard() gd.initialize_code() # gd.stopping_conditions.timeout_detection.disable() gd.central_particle.add_particles(sun) gd.orbiters.add_particles(planets) gd.commit_particles() se = SSE() # se.initialize_code() se.commit_parameters() se.particles.add_particles(sun) se.commit_particles() channelp = gd.orbiters.new_channel_to(planets) channels = se.particles.new_channel_to(sun) for time in timerange: err = gd.evolve_model(time-initial) channelp.copy() # planets.savepoint(time) err = se.evolve_model(time) channels.copy() gd.central_particle.mass = sun.mass print( sun[0].mass.value_in(units.MSun), time.value_in(units.Myr), planets[4].x.value_in(units.AU), planets[4].y.value_in(units.AU), planets[4].z.value_in(units.AU) ) gd.stop() se.stop() for planet in planets: t, x = planet.get_timeline_of_attribute_as_vector("x") t, y = planet.get_timeline_of_attribute_as_vector("y") plot(x, y, '.') native_plot.gca().set_aspect('equal') native_plot.show() if __name__ == "__main__": planetplot()
app/src/thirdparty/telemetry/value/histogram_unittest.py
ta2edchimp/big-rig
925
12616800
<gh_stars>100-1000 # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import unittest from telemetry import story from telemetry import page as page_module from telemetry import value from telemetry.value import histogram as histogram_module class TestBase(unittest.TestCase): def setUp(self): story_set = story.StorySet(base_dir=os.path.dirname(__file__)) story_set.AddStory( page_module.Page("http://www.bar.com/", story_set, story_set.base_dir)) story_set.AddStory( page_module.Page("http://www.baz.com/", story_set, story_set.base_dir)) story_set.AddStory( page_module.Page("http://www.foo.com/", story_set, story_set.base_dir)) self.story_set = story_set @property def pages(self): return self.story_set.stories class ValueTest(TestBase): def testHistogramBasic(self): page0 = self.pages[0] histogram = histogram_module.HistogramValue( page0, 'x', 'counts', raw_value_json='{"buckets": [{"low": 1, "high": 2, "count": 1}]}', important=False) self.assertEquals( ['{"buckets": [{"low": 1, "high": 2, "count": 1}]}'], histogram.GetBuildbotValue()) self.assertEquals(1.5, histogram.GetRepresentativeNumber()) self.assertEquals( ['{"buckets": [{"low": 1, "high": 2, "count": 1}]}'], histogram.GetBuildbotValue()) self.assertEquals( 'unimportant-histogram', histogram.GetBuildbotDataType(value.SUMMARY_RESULT_OUTPUT_CONTEXT)) histogram.important = True self.assertEquals( 'histogram', histogram.GetBuildbotDataType(value.SUMMARY_RESULT_OUTPUT_CONTEXT)) def testBucketAsDict(self): bucket = histogram_module.HistogramValueBucket(33, 45, 78) d = bucket.AsDict() self.assertEquals(d, { 'low': 33, 'high': 45, 'count': 78 }) def testAsDict(self): histogram = histogram_module.HistogramValue( None, 'x', 'counts', raw_value_json='{"buckets": [{"low": 1, "high": 2, "count": 1}]}', important=False) d = histogram.AsDictWithoutBaseClassEntries() self.assertEquals(['buckets'], d.keys()) self.assertTrue(isinstance(d['buckets'], list)) self.assertEquals(len(d['buckets']), 1) def testFromDict(self): d = { 'type': 'histogram', 'name': 'x', 'units': 'counts', 'buckets': [{'low': 1, 'high': 2, 'count': 1}] } v = value.Value.FromDict(d, {}) self.assertTrue(isinstance(v, histogram_module.HistogramValue)) self.assertEquals( ['{"buckets": [{"low": 1, "high": 2, "count": 1}]}'], v.GetBuildbotValue())
test-framework/test-suites/integration/tests/remove/test_remove_environment_route.py
knutsonchris/stacki
123
12616807
<gh_stars>100-1000 import json from textwrap import dedent class TestRemoveEnvironmentRoute: def test_no_args(self, host): result = host.run('stack remove environment route') assert result.rc == 255 assert result.stderr == dedent('''\ error - "environment" argument is required {environment ...} {address=string} ''') def test_invalid(self, host): result = host.run('stack remove environment route test address=127.0.0.1') assert result.rc == 255 assert result.stderr == dedent('''\ error - "test" argument is not a valid environment {environment ...} {address=string} ''') def test_no_address(self, host, add_environment): result = host.run('stack remove environment route test') assert result.rc == 255 assert result.stderr == dedent('''\ error - "address" parameter is required {environment ...} {address=string} ''') def test_one_arg(self, host, add_environment): # Add a couple environment routes result = host.run('stack add environment route test address=192.168.0.2 gateway=private') assert result.rc == 0 result = host.run('stack add environment route test address=192.168.0.3 gateway=private') assert result.rc == 0 # Confirm they are there result = host.run('stack list environment route test output-format=json') assert result.rc == 0 assert json.loads(result.stdout) == [ { "environment": "test", "network": "192.168.0.2", "netmask": "255.255.255.255", "gateway": None, "subnet": "private", "interface": None }, { "environment": "test", "network": "192.168.0.3", "netmask": "255.255.255.255", "gateway": None, "subnet": "private", "interface": None } ] # Now remove the first environment route added result = host.run('stack remove environment route test address=192.168.0.2') assert result.rc == 0 # Make sure only one route was removed result = host.run('stack list environment route test output-format=json') assert result.rc == 0 assert json.loads(result.stdout) == [ { "environment": "test", "network": "192.168.0.3", "netmask": "255.255.255.255", "gateway": None, "subnet": "private", "interface": None } ] def test_multiple_args(self, host, add_environment): # Add a couple environment routes to the "test" environment result = host.run('stack add environment route test address=192.168.0.2 gateway=private') assert result.rc == 0 result = host.run('stack add environment route test address=192.168.0.3 gateway=private') assert result.rc == 0 # Add a second test environment add_environment('foo') # Add a couple environment routes to the "foo" environment result = host.run('stack add environment route foo address=192.168.0.4 gateway=private') assert result.rc == 0 result = host.run('stack add environment route foo address=192.168.0.5 gateway=private') assert result.rc == 0 # Confirm all our routes are there result = host.run('stack list environment route test foo output-format=json') assert result.rc == 0 assert json.loads(result.stdout) == [ { 'environment': 'foo', 'gateway': None, 'interface': None, 'netmask': '255.255.255.255', 'network': '192.168.0.4', 'subnet': 'private' }, { 'environment': 'foo', 'gateway': None, 'interface': None, 'netmask': '255.255.255.255', 'network': '192.168.0.5', 'subnet': 'private' }, { 'environment': 'test', 'gateway': None, 'interface': None, 'netmask': '255.255.255.255', 'network': '192.168.0.2', 'subnet': 'private' }, { 'environment': 'test', 'gateway': None, 'interface': None, 'netmask': '255.255.255.255', 'network': '192.168.0.3', 'subnet': 'private' } ] # Now remove the second route added to "test" environment result = host.run('stack remove environment route test address=192.168.0.3') assert result.rc == 0 # And the first added to "foo" environment result = host.run('stack remove environment route foo address=192.168.0.4') assert result.rc == 0 # Make sure only the expected routes were removed result = host.run('stack list environment route test foo output-format=json') assert result.rc == 0 assert json.loads(result.stdout) == [ { 'environment': 'foo', 'gateway': None, 'interface': None, 'netmask': '255.255.255.255', 'network': '192.168.0.5', 'subnet': 'private' }, { 'environment': 'test', 'gateway': None, 'interface': None, 'netmask': '255.255.255.255', 'network': '192.168.0.2', 'subnet': 'private' } ]
src/visions/typesets/typeset.py
bhumikapahariapuresoftware/visions
142
12616811
<filename>src/visions/typesets/typeset.py import warnings from functools import singledispatch from pathlib import Path from typing import ( Any, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Type, TypeVar, Union, ) import networkx as nx import pandas as pd from visions.types.generic import Generic from visions.types.type import VisionsBaseType TypeOrTypeset = TypeVar("TypeOrTypeset", Type[VisionsBaseType], "VisionsTypeset") pathTypes = TypeVar( "pathTypes", Type[VisionsBaseType], Dict[str, Type[VisionsBaseType]] ) pdT = TypeVar("pdT", pd.Series, pd.DataFrame) T = Type[VisionsBaseType] def build_graph(nodes: Set[Type[VisionsBaseType]]) -> Tuple[nx.DiGraph, nx.DiGraph]: """Constructs a traversable relation graph between visions types Builds a type relation graph from a collection of :class:`visions.types.type.VisionsBaseType` where each node corresponds to a type and each edge is a relation defined on the type. Args: nodes: An Sequence of :class:`visions.types.type.VisionsBaseType` Returns: A directed graph of type relations for the provided nodes. """ style_map = {True: "dashed", False: "solid"} relation_graph = nx.DiGraph() relation_graph.add_nodes_from(nodes) noninferential_edges = [] for node in nodes: for relation in node.relations: if relation.related_type not in nodes: warnings.warn( f"Provided relations included mapping from {relation.related_type} to {relation.type} " f"but {relation.related_type} was not included in the provided list of nodes" ) else: relation_graph.add_edge( relation.related_type, relation.type, relationship=relation, style=style_map[relation.inferential], ) if not relation.inferential: noninferential_edges.append((relation.related_type, relation.type)) check_graph_constraints(relation_graph) base_graph = relation_graph.edge_subgraph(noninferential_edges) return relation_graph, base_graph def check_graph_constraints(relation_graph: nx.DiGraph) -> None: """Validates a relation_graph is appropriately constructed Args: relation_graph: A directed graph representing the set of relations between type nodes. """ check_isolates(relation_graph) check_cycles(relation_graph) def check_isolates(graph: nx.DiGraph) -> None: """Check for orphaned nodes. Args: graph: the graph to check """ nodes = set(graph.nodes) root_node = next(nx.topological_sort(graph)) isolates = list(set(nx.isolates(graph)) - {root_node}) # root can be isolate graph.remove_nodes_from(isolates) orphaned_nodes = nodes - set(graph.nodes) if orphaned_nodes: message = f"{orphaned_nodes} were isolates in the type relation map and consequently orphaned. " message += "Please add some mapping to the orphaned nodes." warnings.warn(message) def check_cycles(graph: nx.DiGraph) -> None: """Check for cycles and warn if one is found Args: graph: the graph to check """ cycles = list(nx.simple_cycles(graph)) if len(cycles) > 0: warnings.warn(f"Cyclical relations between types {cycles} detected") def traverse_graph_with_series( base_type: T, series: Sequence, graph: nx.DiGraph, path: List[T] = None, state: Optional[dict] = None, ) -> Tuple[Sequence, List[T], dict]: """Depth First Search traversal. There should be at most one successor that contains the series. Args: base_type: Entry-point for graph to start traversal series: the Series to check graph: the Graph to traverse path: the path so far state: traversal state Returns: The most uniquely specified node matching the series. """ if state is None: state = dict() if path is None: path = [] path.append(base_type) for vision_type in graph.successors(base_type): relation = graph[base_type][vision_type]["relationship"] if relation.is_relation(series, state): series = relation.transform(series, state) return traverse_graph_with_series(vision_type, series, graph, path, state) return series, path, state def traverse_graph_with_sampled_series( base_type: T, series: pd.Series, graph: nx.DiGraph, sample_size: int = 10, state: dict = dict(), ) -> Tuple[Sequence, List[T], dict]: """Depth First Search traversal with sampling. There should be at most one successor that contains the series. Args: base_type: Entry-point for graph to start traversal series: the Series to check graph: the Graph to traverse sample_size: number of items used in heuristic traversal state: traversal state Returns: The most uniquely specified node matching the series. """ if (series.shape[0] < 1000) or (sample_size > series.shape[0]): return traverse_graph_with_series(base_type, series, graph, state=state) series_sample = series.sample(sample_size) _, path, _ = traverse_graph_with_series( base_type, series_sample, graph, state=state ) if len(path) == 1: return series, path, state # Cast the full series from_type = path[0] for i, to_type in enumerate(path[1:]): relation = graph[from_type][to_type]["relationship"] if not relation.is_relation(series, state): break series = relation.transform(series, state) from_type = to_type return series, path[0 : (i + 2)], state @singledispatch def traverse_graph( data: Sequence, root_node: T, graph: nx.DiGraph ) -> Tuple[Sequence, Union[List[T], Dict[str, List[T]]], Dict[str, dict]]: return traverse_graph_with_series(root_node, data, graph) @singledispatch def get_type_from_path( path_data: Union[Sequence[T], Dict[str, Sequence[T]]] ) -> Union[T, Dict[str, T]]: raise TypeError(f"Can't get types from path object of type {type(path_data)}") @get_type_from_path.register(list) @get_type_from_path.register(tuple) def _get_type_from_path_builtin(path_list: Sequence[T]) -> T: return path_list[-1] @get_type_from_path.register(dict) def _get_type_from_path_dict(path_dict: Dict[str, Sequence[T]]) -> Dict[str, T]: return {k: v[-1] for k, v in path_dict.items()} class VisionsTypeset: """ A collection of :class:`visions.types.type.VisionsBaseType` with associated relationship map between them. Attributes: types: The collection of Visions Types derived from :class:`visions.types.type.VisionsBaseType` base_graph: The graph of relations composed exclusively of :class:`visions.relations.relations.IdentityRelation` relation_graph: The full relation graph including both :class:`visions.relations.relations.IdentityRelation` and :class:`visions.relations.relations.InferenceRelation` """ def __init__(self, types: Set[Type[VisionsBaseType]]) -> None: """ Args: types: a set of types """ self._root_node: Optional[T] = None if not isinstance(types, Iterable): raise ValueError("types should be Sequence") self.relation_graph, self.base_graph = build_graph(set(types)) if not issubclass(self.root_node, Generic): raise ValueError("`root_node` should be a subclass of Generic") self.types = set(self.relation_graph.nodes) @property def root_node(self) -> T: """Returns a cached copy of the relation_graphs root node Args: Returns: A cached copy of the relation_graphs root node. """ if self._root_node is None: self._root_node = next(nx.topological_sort(self.relation_graph)) return self._root_node def detect(self, data: Any) -> Tuple[Sequence, Any, dict]: """The results found after only considering IdentityRelations. Notes: This is an advanced feature, consider using `detect_type` in case the type is what is needed. Args: data: a DataFrame or Series to determine types over Returns: A tuple of the coerced sequence, visited nodes and state """ return traverse_graph(data, self.root_node, self.base_graph) def detect_type(self, data: Sequence) -> Union[T, Dict[str, T]]: """The inferred type found only considering IdentityRelations. Args: data: a DataFrame or Series to determine types over Returns: A dictionary of {name: type} pairs in the case of DataFrame input or a type """ _, paths, _ = self.detect(data) return get_type_from_path(paths) def infer(self, data: Sequence) -> Tuple[Sequence, Any, dict]: """The results found after considering all relations. Notes: This is an advanced feature, consider using `infer_type` in case the type is what is needed. Args: data: a DataFrame or Series to determine types over Returns: A tuple of the coerced sequence, visited nodes and state """ return traverse_graph(data, self.root_node, self.relation_graph) def infer_type(self, data: Sequence) -> Union[T, Dict[str, T]]: """The inferred type found using all type relations. Args: data: a DataFrame or Series to determine types over Returns: A dictionary of {name: type} pairs in the case of DataFrame input or a type """ _, paths, _ = self.infer(data) return get_type_from_path(paths) def cast_to_detected(self, data: Sequence) -> Sequence: """Transforms input data into a canonical representation using only IdentityRelations Args: data: a DataFrame or Series to determine types over Returns: new_data: The transformed DataFrame or Series. """ data, _, _ = self.detect(data) return data def cast_to_inferred(self, data: Sequence) -> Sequence: """Transforms input data and returns it's corresponding new type relation using all relations. Args: data: a DataFrame or Series to determine types over Returns: new_data: The transformed DataFrame or Series. types: A dictionary of {name: type} pairs in the case of DataFrame input or a type. """ data, _, _ = self.infer(data) return data def output_graph( self, file_name: Union[str, Path], base_only: bool = False, dpi: Optional[int] = None, ) -> None: """Write the type graph to a file. Args: file_name: the file to save the output to base_only: if True, plot the graph without relation mapping edges dpi: set the dpi of the output image """ from visions.utils.graph import output_graph if base_only: graph = self.base_graph.copy() else: graph = self.relation_graph.copy() graph.graph["node"] = {"shape": "box", "color": "red"} if dpi is not None: graph.graph["graph"] = {"dpi": dpi} output_graph(graph, file_name) def plot_graph( self, dpi: int = 800, base_only: bool = False, figsize: Optional[Tuple[int, int]] = None, ): """ Args: dpi: dpi of the matplotlib figure. figsize: figure size base_only: Only display the typesets base_graph Returns: Displays the image """ import os import tempfile from matplotlib import image as mpimg from matplotlib import pyplot as plt with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file: self.output_graph(temp_file.name, dpi=dpi, base_only=base_only) img = mpimg.imread(temp_file.name) plt.figure(dpi=dpi, figsize=figsize) plt.axis("off") plt.imshow(img) os.unlink(temp_file.name) def _get_other_type(self, other: TypeOrTypeset) -> Set[T]: """Converts input into a set of :class:`visions.types.type.VisionsBaseType` Args: other: A :class:`visions.types.type.VisionsBaseType` or :class:`visions.typesets.typeset.VisionsTypeset` Raises: NotImplementedError: Returns: Set[Type[VisionsBaseType]]: """ if isinstance(other, VisionsTypeset): other_types = set(other.types) elif issubclass(other, VisionsBaseType): other_types = {other} else: raise NotImplementedError( f"Typeset operation not implemented for type {type(other)}" ) return other_types def replace(self, old: T, new: T) -> "VisionsTypeset": """Create a new typeset having replace one type with another. Args: old: Visions type to replace. new: Replacement visions type. Returns A VisionsTypeset """ types = self.types.copy() types.add(new) types.remove(old) return VisionsTypeset(types) def __add__(self, other: TypeOrTypeset) -> "VisionsTypeset": """Adds a type or typeset into the current typeset. Args: other: Type or typeset to be added Returns A VisionsTypeset """ other_types = self._get_other_type(other) return VisionsTypeset(self.types | other_types) def __iadd__(self, other: TypeOrTypeset) -> "VisionsTypeset": """Adds a type or typeset into the current typeset. Args: other: Type or typeset to be added Returns A VisionsTypeset """ return self.__add__(other) def __sub__(self, other: TypeOrTypeset) -> "VisionsTypeset": """Subtracts a type or typeset from the current typeset. Args: other: Type or typeset to be removed Returns A VisionsTypeset """ other_types = self._get_other_type(other) return VisionsTypeset(self.types - other_types) def __isub__(self, other: TypeOrTypeset) -> "VisionsTypeset": """Subtracts a type or typeset from the current typeset. Args: other: Type or typeset to be removed Returns A VisionsTypeset """ return self.__sub__(other) def __repr__(self) -> str: """Pretty representation of the typeset. Returns A :class:`visions.typesets.typeset.VisionsTypeset` """ return self.__class__.__name__
docs/core/examples/pbecho.py
Khymeira/twisted
4,612
12616834
<filename>docs/core/examples/pbecho.py<gh_stars>1000+ # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. if __name__ == "__main__": # Avoid using any names defined in the "__main__" module. from pbecho import main raise SystemExit(main()) from zope.interface import implementer from twisted.cred.portal import IRealm from twisted.spread import pb class DefinedError(pb.Error): pass class SimplePerspective(pb.Avatar): def perspective_echo(self, text): print("echoing", text) return text def perspective_error(self): raise DefinedError("exception!") def logout(self): print(self, "logged out") @implementer(IRealm) class SimpleRealm: def requestAvatar(self, avatarId, mind, *interfaces): if pb.IPerspective in interfaces: avatar = SimplePerspective() return pb.IPerspective, avatar, avatar.logout else: raise NotImplementedError("no interface") def main(): from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse from twisted.cred.portal import Portal from twisted.internet import reactor portal = Portal(SimpleRealm()) checker = InMemoryUsernamePasswordDatabaseDontUse() checker.addUser("guest", "guest") portal.registerChecker(checker) reactor.listenTCP(pb.portno, pb.PBServerFactory(portal)) reactor.run()
proxy/python/proxy.py
kuchaguangjie/go-micro-examples
132
12616846
import uuid import requests import json registry_uri = "http://localhost:8081/registry" call_uri = "http://localhost:8081" headers = {'content-type': 'application/json'} def register(service): return requests.post(registry_uri, data=json.dumps(service), headers=headers) def deregister(service): return requests.delete(registry_uri, data=json.dumps(service), headers=headers) def rpc_call(path, request): return requests.post(call_uri + path, data=json.dumps(request), headers=headers).json() def http_call(path, request): return requests.post(call_uri + path, data=request)
authlib/common/encoding.py
minddistrict/authlib
3,172
12616854
import json import base64 import struct def to_bytes(x, charset='utf-8', errors='strict'): if x is None: return None if isinstance(x, bytes): return x if isinstance(x, str): return x.encode(charset, errors) if isinstance(x, (int, float)): return str(x).encode(charset, errors) return bytes(x) def to_unicode(x, charset='utf-8', errors='strict'): if x is None or isinstance(x, str): return x if isinstance(x, bytes): return x.decode(charset, errors) return str(x) def to_native(x, encoding='ascii'): if isinstance(x, str): return x return x.decode(encoding) def json_loads(s): return json.loads(s) def json_dumps(data, ensure_ascii=False): return json.dumps(data, ensure_ascii=ensure_ascii, separators=(',', ':')) def urlsafe_b64decode(s): s += b'=' * (-len(s) % 4) return base64.urlsafe_b64decode(s) def urlsafe_b64encode(s): return base64.urlsafe_b64encode(s).rstrip(b'=') def base64_to_int(s): data = urlsafe_b64decode(to_bytes(s, charset='ascii')) buf = struct.unpack('%sB' % len(data), data) return int(''.join(["%02x" % byte for byte in buf]), 16) def int_to_base64(num): if num < 0: raise ValueError('Must be a positive integer') s = num.to_bytes((num.bit_length() + 7) // 8, 'big', signed=False) return to_unicode(urlsafe_b64encode(s)) def json_b64encode(text): if isinstance(text, dict): text = json_dumps(text) return urlsafe_b64encode(to_bytes(text))
example/run_sac.py
krish-dx/machina
302
12616863
<filename>example/run_sac.py """ An example of Soft Actor Critic. """ import argparse import copy import json import os from pprint import pprint import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import gym import machina as mc from machina.pols import GaussianPol from machina.algos import sac from machina.vfuncs import DeterministicSAVfunc from machina.envs import GymEnv from machina.traj import Traj from machina.traj import epi_functional as ef from machina.samplers import EpiSampler from machina import logger from machina.utils import set_device, measure from simple_net import PolNet, QNet, VNet parser = argparse.ArgumentParser() parser.add_argument('--log', type=str, default='garbage', help='Directory name of log.') parser.add_argument('--env_name', type=str, default='Pendulum-v0', help='Name of environment.') parser.add_argument('--c2d', action='store_true', default=False, help='If True, action is discretized.') parser.add_argument('--record', action='store_true', default=False, help='If True, movie is saved.') parser.add_argument('--seed', type=int, default=256) parser.add_argument('--max_epis', type=int, default=1000000, help='Number of episodes to run.') parser.add_argument('--max_steps_off', type=int, default=1000000000000, help='Number of episodes stored in off traj.') parser.add_argument('--num_parallel', type=int, default=4, help='Number of processes to sample.') parser.add_argument('--cuda', type=int, default=-1, help='cuda device number.') parser.add_argument('--max_steps_per_iter', type=int, default=10000, help='Number of steps to use in an iteration.') parser.add_argument('--batch_size', type=int, default=256) parser.add_argument('--sampling', type=int, default=1, help='Number of sampling in calculation of expectation.') parser.add_argument('--no_reparam', action='store_true', default=False) parser.add_argument('--pol_lr', type=float, default=1e-4, help='Policy learning rate') parser.add_argument('--qf_lr', type=float, default=3e-4, help='Q function learning rate') parser.add_argument('--ent_alpha', type=float, default=1, help='Entropy coefficient.') parser.add_argument('--tau', type=float, default=5e-3, help='Coefficient of target function.') parser.add_argument('--gamma', type=float, default=0.99, help='Discount factor.') args = parser.parse_args() if not os.path.exists(args.log): os.makedirs(args.log) with open(os.path.join(args.log, 'args.json'), 'w') as f: json.dump(vars(args), f) pprint(vars(args)) if not os.path.exists(os.path.join(args.log, 'models')): os.makedirs(os.path.join(args.log, 'models')) np.random.seed(args.seed) torch.manual_seed(args.seed) device_name = 'cpu' if args.cuda < 0 else "cuda:{}".format(args.cuda) device = torch.device(device_name) set_device(device) score_file = os.path.join(args.log, 'progress.csv') logger.add_tabular_output(score_file) logger.add_tensorboard_output(args.log) env = GymEnv(args.env_name, log_dir=os.path.join( args.log, 'movie'), record_video=args.record) env.env.seed(args.seed) observation_space = env.observation_space action_space = env.action_space pol_net = PolNet(observation_space, action_space) pol = GaussianPol(observation_space, action_space, pol_net) qf_net1 = QNet(observation_space, action_space) qf1 = DeterministicSAVfunc(observation_space, action_space, qf_net1) targ_qf_net1 = QNet(observation_space, action_space) targ_qf_net1.load_state_dict(qf_net1.state_dict()) targ_qf1 = DeterministicSAVfunc(observation_space, action_space, targ_qf_net1) qf_net2 = QNet(observation_space, action_space) qf2 = DeterministicSAVfunc(observation_space, action_space, qf_net2) targ_qf_net2 = QNet(observation_space, action_space) targ_qf_net2.load_state_dict(qf_net2.state_dict()) targ_qf2 = DeterministicSAVfunc(observation_space, action_space, targ_qf_net2) qfs = [qf1, qf2] targ_qfs = [targ_qf1, targ_qf2] log_alpha = nn.Parameter(torch.zeros((), device=device)) sampler = EpiSampler(env, pol, args.num_parallel, seed=args.seed) optim_pol = torch.optim.Adam(pol_net.parameters(), args.pol_lr) optim_qf1 = torch.optim.Adam(qf_net1.parameters(), args.qf_lr) optim_qf2 = torch.optim.Adam(qf_net2.parameters(), args.qf_lr) optim_qfs = [optim_qf1, optim_qf2] optim_alpha = torch.optim.Adam([log_alpha], args.pol_lr) off_traj = Traj(args.max_steps_off, traj_device='cpu') total_epi = 0 total_step = 0 max_rew = -1e6 while args.max_epis > total_epi: with measure('sample'): epis = sampler.sample(pol, max_steps=args.max_steps_per_iter) with measure('train'): on_traj = Traj(traj_device='cpu') on_traj.add_epis(epis) on_traj = ef.add_next_obs(on_traj) on_traj.register_epis() off_traj.add_traj(on_traj) total_epi += on_traj.num_epi step = on_traj.num_step total_step += step result_dict = sac.train( off_traj, pol, qfs, targ_qfs, log_alpha, optim_pol, optim_qfs, optim_alpha, step, args.batch_size, args.tau, args.gamma, args.sampling, not args.no_reparam ) rewards = [np.sum(epi['rews']) for epi in epis] mean_rew = np.mean(rewards) logger.record_results(args.log, result_dict, score_file, total_epi, step, total_step, rewards, plot_title=args.env_name) if mean_rew > max_rew: torch.save(pol.state_dict(), os.path.join( args.log, 'models', 'pol_max.pkl')) torch.save(qf1.state_dict(), os.path.join( args.log, 'models', 'qf1_max.pkl')) torch.save(qf2.state_dict(), os.path.join( args.log, 'models', 'qf2_max.pkl')) torch.save(optim_pol.state_dict(), os.path.join( args.log, 'models', 'optim_pol_max.pkl')) torch.save(optim_qf1.state_dict(), os.path.join( args.log, 'models', 'optim_qf1_max.pkl')) torch.save(optim_qf2.state_dict(), os.path.join( args.log, 'models', 'optim_qf2_max.pkl')) max_rew = mean_rew torch.save(pol.state_dict(), os.path.join( args.log, 'models', 'pol_last.pkl')) torch.save(qf1.state_dict(), os.path.join( args.log, 'models', 'qf1_last.pkl')) torch.save(qf2.state_dict(), os.path.join( args.log, 'models', 'qf2_last.pkl')) torch.save(optim_pol.state_dict(), os.path.join( args.log, 'models', 'optim_pol_last.pkl')) torch.save(optim_qf1.state_dict(), os.path.join( args.log, 'models', 'optim_qf1_last.pkl')) torch.save(optim_qf2.state_dict(), os.path.join( args.log, 'models', 'optim_qf2_last.pkl')) del on_traj del sampler
scitbx/lbfgs/__init__.py
dperl-sol/cctbx_project
155
12616874
<reponame>dperl-sol/cctbx_project<filename>scitbx/lbfgs/__init__.py from __future__ import absolute_import, division, print_function from scitbx.array_family import flex import boost_adaptbx.boost.python as bp ext = bp.import_ext("scitbx_lbfgs_ext") from scitbx_lbfgs_ext import * from libtbx import adopt_init_args raw = raw_lbfgs() class core_parameters(object): def __init__(self, m=5, maxfev=20, gtol=0.9, xtol=1.e-16, stpmin=1.e-20, stpmax=1.e20): adopt_init_args(self, locals()) class termination_parameters(object): def __init__(self, traditional_convergence_test=True, traditional_convergence_test_eps=1.e-5, drop_convergence_test_n_test_points=5, drop_convergence_test_max_drop_eps=1.e-5, drop_convergence_test_iteration_coefficient=2, min_iterations=0, max_iterations=None, max_calls=None): drop_convergence_test_n_test_points = max( drop_convergence_test_n_test_points, min_iterations) adopt_init_args(self, locals()) class exception_handling_parameters(object): def __init__(self, ignore_line_search_failed_rounding_errors=True, ignore_line_search_failed_step_at_lower_bound=False, ignore_line_search_failed_step_at_upper_bound=False, ignore_line_search_failed_maxfev=False, ignore_line_search_failed_xtol=False, ignore_search_direction_not_descent=False): adopt_init_args(self, locals()) def filter(self, msg, n, x, g): if (not msg.startswith("lbfgs error")): return 1 if (msg.find("Rounding errors prevent further progress.") >= 0): if (self.ignore_line_search_failed_rounding_errors): return 0 elif (msg.find("The step is at the lower bound stpmin().") >= 0): if (x is not None and g is not None and ext.traditional_convergence_test(n)(x, g)): return 0 if (self.ignore_line_search_failed_step_at_lower_bound): return -1 elif (msg.find("The step is at the upper bound stpmax().") >= 0): if (self.ignore_line_search_failed_step_at_upper_bound): return -1 elif (msg.find("Number of function evaluations has reached" + " maxfev().") >= 0): if (self.ignore_line_search_failed_maxfev): return -1 elif (msg.find("Relative width of the interval of uncertainty" + " is at most xtol().") >= 0): if (self.ignore_line_search_failed_xtol): return -1 elif (msg.find("The search direction is not a descent direction.") >= 0): if (x is not None and g is not None and ext.traditional_convergence_test(n)(x, g)): return 0 if (self.ignore_search_direction_not_descent): return -1 return 1 def run_c_plus_plus(target_evaluator, termination_params=None, core_params=None, exception_handling_params=None, log=None, #---> Insertion starts gradient_only=False, line_search=True): #<--- Insertion ends if (termination_params is None): termination_params = termination_parameters() if (core_params is None): core_params = core_parameters() if (exception_handling_params is None): exception_handling_params = exception_handling_parameters() x = target_evaluator.x if (log is not None): print("lbfgs minimizer():", file=log) print(" x.size():", x.size(), file=log) print(" m:", core_params.m, file=log) print(" maxfev:", core_params.maxfev, file=log) print(" gtol:", core_params.gtol, file=log) print(" xtol:", core_params.xtol, file=log) print(" stpmin:", core_params.stpmin, file=log) print(" stpmax:", core_params.stpmax, file=log) print("lbfgs traditional_convergence_test:", \ termination_params.traditional_convergence_test, file=log) minimizer = ext.minimizer( x.size(), core_params.m, core_params.maxfev, core_params.gtol, core_params.xtol, core_params.stpmin, core_params.stpmax) if (termination_params.traditional_convergence_test): is_converged = ext.traditional_convergence_test( x.size(), termination_params.traditional_convergence_test_eps) else: is_converged = ext.drop_convergence_test( n_test_points=termination_params.drop_convergence_test_n_test_points, max_drop_eps=termination_params.drop_convergence_test_max_drop_eps, iteration_coefficient =termination_params.drop_convergence_test_iteration_coefficient) callback_after_step = getattr(target_evaluator, "callback_after_step", None) diag_mode = getattr(target_evaluator, "diag_mode", None) if (diag_mode is not None): assert diag_mode in ["once", "always"] f_min, x_min = None, None f, g = None, None try: while 1: if (diag_mode is None): f, g = target_evaluator.compute_functional_and_gradients() d = None else: f, g, d = target_evaluator.compute_functional_gradients_diag() if (diag_mode == "once"): diag_mode = None if (f_min is None): if (not termination_params.traditional_convergence_test): is_converged(f) f_min, x_min = f, x.deep_copy() elif (f_min > f): f_min, x_min = f, x.deep_copy() if (log is not None): print("lbfgs minimizer.run():" \ " f=%.6g, |g|=%.6g, x_min=%.6g, x_mean=%.6g, x_max=%.6g" % ( f, g.norm(), flex.min(x), flex.mean(x), flex.max(x)), file=log) if (d is None): #---> Insertion starts if (minimizer.run(x, f, g, gradient_only,line_search)): continue #<--- Insertion ends else: #---> Insertion starts if (minimizer.run(x, f, g, d, gradient_only,line_search)): continue #<--- Insertion ends if (log is not None): print("lbfgs minimizer step", file=log) if (callback_after_step is not None): if (callback_after_step(minimizer) is True): if (log is not None): print("lbfgs minimizer stop: callback_after_step is True", file=log) break if (termination_params.traditional_convergence_test): if ( minimizer.iter() >= termination_params.min_iterations and is_converged(x, g)): if (log is not None): print("lbfgs minimizer stop: traditional_convergence_test", file=log) break else: if (is_converged(f)): if (log is not None): print("lbfgs minimizer stop: drop_convergence_test", file=log) break if ( termination_params.max_iterations is not None and minimizer.iter() >= termination_params.max_iterations): if (log is not None): print("lbfgs minimizer stop: max_iterations", file=log) break if ( termination_params.max_calls is not None and minimizer.nfun() > termination_params.max_calls): if (log is not None): print("lbfgs minimizer stop: max_calls", file=log) break if (d is None): #---> Insertion starts if (not minimizer.run(x, f, g, gradient_only,line_search)): break #<--- Insertion ends else: #---> Insertion starts if (not minimizer.run(x, f, g, d, gradient_only,line_search)): break #<--- Insertion ends except RuntimeError as e: minimizer.error = str(e) if (log is not None): print("lbfgs minimizer exception:", str(e), file=log) if (x_min is not None): x.clear() x.extend(x_min) error_classification = exception_handling_params.filter( minimizer.error, x.size(), x, g) if (error_classification > 0): raise elif (error_classification < 0): minimizer.is_unusual_error = True else: minimizer.is_unusual_error = False else: minimizer.error = None minimizer.is_unusual_error = None if (log is not None): print("lbfgs minimizer done.", file=log) return minimizer def run_fortran(target_evaluator, termination_params=None, core_params=None): "For debugging only!" from scitbx.python_utils.misc import store from fortran_lbfgs import lbfgs as fortran_lbfgs import Numeric if (termination_params is None): termination_params = termination_parameters() if (core_params is None): core_params = core_parameters() assert termination_params.traditional_convergence_test assert core_params.maxfev == 20 x = target_evaluator.x n = x.size() m = core_params.m gtol = core_params.gtol xtol = core_params.xtol stpmin = core_params.stpmin stpmax = core_params.stpmax eps = termination_params.traditional_convergence_test_eps x_numeric = Numeric.array(Numeric.arange(n), Numeric.Float64) g_numeric = Numeric.array(Numeric.arange(n), Numeric.Float64) size_w = n*(2*m+1)+2*m w = Numeric.array(Numeric.arange(size_w), Numeric.Float64) diag = Numeric.array(Numeric.arange(n), Numeric.Float64) iprint = [1, 0] diagco = 0 iflag = Numeric.array([0], Numeric.Int32) minimizer = store(error=None) while 1: f, g = target_evaluator.compute_functional_and_gradients() for i,xi in enumerate(x): x_numeric[i] = xi for i,gi in enumerate(g): g_numeric[i] = gi fortran_lbfgs(n, m, x_numeric, f, g_numeric, diagco, diag, iprint, eps, xtol, w, iflag) for i,xi in enumerate(x_numeric): x[i] = xi if (iflag[0] == 0): break if (iflag[0] < 0): minimizer.error = "fortran lbfgs error" break return minimizer def run(target_evaluator, termination_params=None, core_params=None, exception_handling_params=None, use_fortran=False, log=None, #---> Insertion starts gradient_only=False, line_search=True): #<--- Insertion ends if (use_fortran): return run_fortran(target_evaluator, termination_params, core_params) else: return run_c_plus_plus( target_evaluator, termination_params, core_params, exception_handling_params, log, #---> Insertion starts gradient_only, line_search) #<--- Insertion ends
data/transcoder_evaluation_gfg/python/MAXIMUM_PRODUCT_INCREASING_SUBSEQUENCE.py
mxl1n/CodeGen
241
12616875
# Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # def f_gold ( arr , n ) : mpis = [ 0 ] * ( n ) for i in range ( n ) : mpis [ i ] = arr [ i ] for i in range ( 1 , n ) : for j in range ( i ) : if ( arr [ i ] > arr [ j ] and mpis [ i ] < ( mpis [ j ] * arr [ i ] ) ) : mpis [ i ] = mpis [ j ] * arr [ i ] return max ( mpis ) #TOFILL if __name__ == '__main__': param = [ ([1, 1, 4, 7, 7, 9, 12, 20, 45, 53, 58, 63, 65, 65, 86, 98, 98],12,), ([46, -58, 70, 60, 74, 42, 6, -26, 78, 32, 14, -56, -48, 86, -2, 94, -44, -62, -50, -8, -4, -36, -62, -98, -98, -78, 56, 92, 88],27,), ([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],14,), ([13, 71, 93, 68, 43, 75, 44, 15, 1, 91, 7, 9, 65, 85, 46, 87, 37, 74, 19, 30, 87, 27, 82, 92, 12, 36, 6, 27, 76, 80, 30, 83, 67, 83, 65, 28, 81, 59, 63, 11, 70],20,), ([-96, -94, -92, -88, -84, -80, -74, -70, -62, -56, -48, -46, -40, -34, -32, -26, -22, -22, -12, -10, -8, -6, -2, 0, 2, 4, 6, 18, 18, 30, 34, 34, 38, 38, 40, 48, 54, 56, 60, 84, 88, 88, 90, 94, 96],30,), ([1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],22,), ([1, 1, 5, 5, 6, 7, 18, 35, 39, 51, 64, 73, 87, 90, 91, 92],11,), ([-54, 8, -92, -28, 72, 54, -74, 36, -10, 54, -30, -16, -72, -32, -92, 38, -76, -76, -50, -92, 48],19,), ([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],26,), ([47, 57, 72, 40, 53, 46, 62, 51, 42, 89, 9, 91, 58, 67, 20, 91, 63, 50, 32, 6, 63, 49, 3, 89, 87, 54, 65, 72, 72, 62, 31, 6, 48, 87, 17, 95, 59, 57],30,) ] n_success = 0 for i, parameters_set in enumerate(param): if f_filled(*parameters_set) == f_gold(*parameters_set): n_success+=1 print("#Results: %i, %i" % (n_success, len(param)))
pyscf/agf2/_agf2.py
QuESt-Calculator/pyscf
501
12616882
<filename>pyscf/agf2/_agf2.py<gh_stars>100-1000 # Copyright 2014-2020 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # import numpy as np import ctypes from pyscf import lib from pyscf.agf2 import mpi_helper libagf2 = lib.load_library('libagf2') def cholesky_build(vv, vev, eps=1e-16): ''' Constructs the truncated auxiliaries from :attr:`vv` and :attr:`vev`. Performs a Cholesky decomposition via :func:`numpy.linalg.cholesky`, for a positive-definite or positive-semidefinite matrix. For the latter, the null space is removed. The :attr:`vv` matrix of :func:`build_se_part` can be positive- semidefinite when :attr:`gf_occ.naux` < :attr:`gf_occ.nphys` for the occupied self-energy, or :attr:`gf_vir.naux` < :attr:`gf_vir.nphys` for the virtual self-energy. ''' nmo = vv.shape[0] try: b = np.linalg.cholesky(vv).T except np.linalg.LinAlgError: w, v = np.linalg.eigh(vv) w[w < eps] = eps vv_posdef = np.dot(np.dot(v, np.diag(w)), v.T.conj()) b = np.linalg.cholesky(vv_posdef).T b_inv = np.linalg.inv(b) m = np.dot(np.dot(b_inv.T, vev), b_inv) e, c = np.linalg.eigh(m) c = np.dot(b.T, c[:nmo]) return e, c def build_mats_ragf2_incore(qeri, e_occ, e_vir, os_factor=1.0, ss_factor=1.0): ''' Wraps AGF2ee_vv_vev_islice ''' fdrv = getattr(libagf2, 'AGF2ee_vv_vev_islice') assert qeri.ndim == 4 nmo = qeri.shape[0] nocc = e_occ.size nvir = e_vir.size qeri = np.asarray(qeri, order='C') e_i = np.asarray(e_occ, order='C') e_a = np.asarray(e_vir, order='C') vv = np.zeros((nmo*nmo)) vev = np.zeros((nmo*nmo)) rank, size = mpi_helper.rank, mpi_helper.size istart = rank * nocc // size iend = nocc if rank == (size-1) else (rank+1) * nocc // size fdrv(qeri.ctypes.data_as(ctypes.c_void_p), e_i.ctypes.data_as(ctypes.c_void_p), e_a.ctypes.data_as(ctypes.c_void_p), ctypes.c_double(os_factor), ctypes.c_double(ss_factor), ctypes.c_int(nmo), ctypes.c_int(nocc), ctypes.c_int(nvir), ctypes.c_int(istart), ctypes.c_int(iend), vv.ctypes.data_as(ctypes.c_void_p), vev.ctypes.data_as(ctypes.c_void_p)) vv = vv.reshape(nmo, nmo) vev = vev.reshape(nmo, nmo) mpi_helper.barrier() mpi_helper.allreduce_safe_inplace(vv) mpi_helper.allreduce_safe_inplace(vev) return vv, vev def build_mats_ragf2_outcore(qeri, e_occ, e_vir, os_factor=1.0, ss_factor=1.0): ''' Python version of AGF2ee_vv_vev_islice to support outcore ''' assert qeri.ndim == 4 nmo = qeri.shape[0] nocc = e_occ.size vv = np.zeros((nmo, nmo)) vev = np.zeros((nmo, nmo)) fpos = os_factor + ss_factor fneg = -ss_factor eja = lib.direct_sum('j,a->ja', e_occ, -e_vir) eja = eja.ravel() for i in mpi_helper.nrange(nocc): xija = qeri[:,i].reshape(nmo, -1) xjia = qeri[:,:,i].reshape(nmo, -1) eija = eja + e_occ[i] vv = lib.dot(xija, xija.T, alpha=fpos, beta=1, c=vv) vv = lib.dot(xija, xjia.T, alpha=fneg, beta=1, c=vv) exija = xija * eija[None] vev = lib.dot(exija, xija.T, alpha=fpos, beta=1, c=vev) vev = lib.dot(exija, xjia.T, alpha=fneg, beta=1, c=vev) vv = vv.reshape(nmo, nmo) vev = vev.reshape(nmo, nmo) mpi_helper.barrier() mpi_helper.allreduce_safe_inplace(vv) mpi_helper.allreduce_safe_inplace(vev) return vv, vev def build_mats_dfragf2_incore(qxi, qja, e_occ, e_vir, os_factor=1.0, ss_factor=1.0): ''' Wrapper for AGF2df_vv_vev_islice ''' fdrv = getattr(libagf2, 'AGF2df_vv_vev_islice') naux = qxi.shape[0] nocc = e_occ.size nvir = e_vir.size nmo = qxi.size // (naux*nocc) assert qxi.size == (naux * nmo * nocc) assert qja.size == (naux * nocc * nvir) qxi = np.asarray(qxi, order='C') qja = np.asarray(qja, order='C') e_i = np.asarray(e_occ, order='C') e_a = np.asarray(e_vir, order='C') rank, size = mpi_helper.rank, mpi_helper.size vv = np.zeros((nmo*nmo)) vev = np.zeros((nmo*nmo)) start = rank * nocc // size end = nocc if rank == (size-1) else (rank+1) * nocc // size fdrv(qxi.ctypes.data_as(ctypes.c_void_p), qja.ctypes.data_as(ctypes.c_void_p), e_i.ctypes.data_as(ctypes.c_void_p), e_a.ctypes.data_as(ctypes.c_void_p), ctypes.c_double(os_factor), ctypes.c_double(ss_factor), ctypes.c_int(nmo), ctypes.c_int(nocc), ctypes.c_int(nvir), ctypes.c_int(naux), ctypes.c_int(start), ctypes.c_int(end), vv.ctypes.data_as(ctypes.c_void_p), vev.ctypes.data_as(ctypes.c_void_p)) vv = vv.reshape(nmo, nmo) vev = vev.reshape(nmo, nmo) mpi_helper.barrier() mpi_helper.allreduce_safe_inplace(vv) mpi_helper.allreduce_safe_inplace(vev) return vv, vev def build_mats_dfragf2_lowmem(qxi, qja, e_occ, e_vir, os_factor=1.0, ss_factor=1.0): ''' Wrapper for AGF2df_vv_vev_islice_lowmem ''' fdrv = getattr(libagf2, 'AGF2df_vv_vev_islice_lowmem') naux = qxi.shape[0] nocc = e_occ.size nvir = e_vir.size nmo = qxi.size // (naux*nocc) assert qxi.size == (naux * nmo * nocc) assert qja.size == (naux * nocc * nvir) qxi = np.asarray(qxi, order='C') qja = np.asarray(qja, order='C') e_i = np.asarray(e_occ, order='C') e_a = np.asarray(e_vir, order='C') rank, size = mpi_helper.rank, mpi_helper.size vv = np.zeros((nmo*nmo)) vev = np.zeros((nmo*nmo)) start = rank * (nocc * nocc) // size end = nocc*nocc if rank == (size-1) else (rank+1) * (nocc*nocc) // size fdrv(qxi.ctypes.data_as(ctypes.c_void_p), qja.ctypes.data_as(ctypes.c_void_p), e_i.ctypes.data_as(ctypes.c_void_p), e_a.ctypes.data_as(ctypes.c_void_p), ctypes.c_double(os_factor), ctypes.c_double(ss_factor), ctypes.c_int(nmo), ctypes.c_int(nocc), ctypes.c_int(nvir), ctypes.c_int(naux), ctypes.c_int(start), ctypes.c_int(end), vv.ctypes.data_as(ctypes.c_void_p), vev.ctypes.data_as(ctypes.c_void_p)) vv = vv.reshape(nmo, nmo) vev = vev.reshape(nmo, nmo) mpi_helper.barrier() mpi_helper.allreduce_safe_inplace(vv) mpi_helper.allreduce_safe_inplace(vev) return vv, vev def build_mats_dfragf2_outcore(qxi, qja, e_occ, e_vir, os_factor=1.0, ss_factor=1.0): ''' Python version of AGF2df_vv_vev_islice to support outcore ''' naux = qxi.shape[0] nocc = e_occ.size nvir = e_vir.size nmo = qxi.size // (naux*nocc) assert qxi.size == (naux * nmo * nocc) assert qja.size == (naux * nocc * nvir) vv = np.zeros((nmo, nmo)) vev = np.zeros((nmo, nmo)) fpos = os_factor + ss_factor fneg = -ss_factor eja = lib.direct_sum('j,a->ja', e_occ, -e_vir) eja = eja.ravel() buf = (np.zeros((nmo, nocc*nvir)), np.zeros((nmo*nocc, nvir))) for i in mpi_helper.nrange(nocc): qx = qxi.reshape(naux, nmo, nocc)[:,:,i] xija = lib.dot(qx.T, qja, c=buf[0]) xjia = lib.dot(qxi.T, qja[:,i*nvir:(i+1)*nvir], c=buf[1]) xjia = xjia.reshape(nmo, nocc*nvir) eija = eja + e_occ[i] vv = lib.dot(xija, xija.T, alpha=fpos, beta=1, c=vv) vv = lib.dot(xija, xjia.T, alpha=fneg, beta=1, c=vv) exija = xija * eija[None] vev = lib.dot(exija, xija.T, alpha=fpos, beta=1, c=vev) vev = lib.dot(exija, xjia.T, alpha=fneg, beta=1, c=vev) vv = vv.reshape(nmo, nmo) vev = vev.reshape(nmo, nmo) mpi_helper.barrier() mpi_helper.allreduce_safe_inplace(vv) mpi_helper.allreduce_safe_inplace(vev) return vv, vev def build_mats_uagf2_incore(qeri, e_occ, e_vir, os_factor=1.0, ss_factor=1.0): ''' Wraps AGF2uee_vv_vev_islice ''' fdrv = getattr(libagf2, 'AGF2uee_vv_vev_islice') assert qeri[0].ndim == qeri[1].ndim == 4 nmo = qeri[0].shape[0] noa, nob = e_occ[0].size, e_occ[1].size nva, nvb = e_vir[0].size, e_vir[1].size qeri_a = np.asarray(qeri[0], order='C') qeri_b = np.asarray(qeri[1], order='C') e_i = np.asarray(e_occ[0], order='C') e_I = np.asarray(e_occ[1], order='C') e_a = np.asarray(e_vir[0], order='C') e_A = np.asarray(e_vir[1], order='C') vv = np.zeros((nmo*nmo)) vev = np.zeros((nmo*nmo)) rank, size = mpi_helper.rank, mpi_helper.size istart = rank * noa // size iend = noa if rank == (size-1) else (rank+1) * noa // size fdrv(qeri_a.ctypes.data_as(ctypes.c_void_p), qeri_b.ctypes.data_as(ctypes.c_void_p), e_i.ctypes.data_as(ctypes.c_void_p), e_I.ctypes.data_as(ctypes.c_void_p), e_a.ctypes.data_as(ctypes.c_void_p), e_A.ctypes.data_as(ctypes.c_void_p), ctypes.c_double(os_factor), ctypes.c_double(ss_factor), ctypes.c_int(nmo), ctypes.c_int(noa), ctypes.c_int(nob), ctypes.c_int(nva), ctypes.c_int(nvb), ctypes.c_int(istart), ctypes.c_int(iend), vv.ctypes.data_as(ctypes.c_void_p), vev.ctypes.data_as(ctypes.c_void_p)) vv = vv.reshape(nmo, nmo) vev = vev.reshape(nmo, nmo) mpi_helper.barrier() mpi_helper.allreduce_safe_inplace(vv) mpi_helper.allreduce_safe_inplace(vev) return vv, vev def build_mats_uagf2_outcore(qeri, e_occ, e_vir, os_factor=1.0, ss_factor=1.0): ''' Python version of AGF2uee_vv_vev_islice to support outcore ''' assert qeri[0].ndim == qeri[1].ndim == 4 nmo = qeri[0].shape[0] # noa, nob = e_occ[0].size, e_occ[1].size # nva, nvb = e_vir[0].size, e_vir[1].size noa = e_occ[0].size vv = np.zeros((nmo, nmo)) vev = np.zeros((nmo, nmo)) fposa = ss_factor fnega = -ss_factor fposb = os_factor eja_a = lib.direct_sum('j,a->ja', e_occ[0], -e_vir[0]).ravel() eja_b = lib.direct_sum('j,a->ja', e_occ[1], -e_vir[1]).ravel() for i in mpi_helper.nrange(noa): xija_aa = qeri[0][:,i].reshape(nmo, -1) xija_ab = qeri[1][:,i].reshape(nmo, -1) xjia_aa = qeri[0][:,:,i].reshape(nmo, -1) eija_aa = eja_a + e_occ[0][i] eija_ab = eja_b + e_occ[0][i] vv = lib.dot(xija_aa, xija_aa.T, alpha=fposa, beta=1, c=vv) vv = lib.dot(xija_aa, xjia_aa.T, alpha=fnega, beta=1, c=vv) vv = lib.dot(xija_ab, xija_ab.T, alpha=fposb, beta=1, c=vv) exija_aa = xija_aa * eija_aa[None] exija_ab = xija_ab * eija_ab[None] vev = lib.dot(exija_aa, xija_aa.T, alpha=fposa, beta=1, c=vev) vev = lib.dot(exija_aa, xjia_aa.T, alpha=fnega, beta=1, c=vev) vev = lib.dot(exija_ab, xija_ab.T, alpha=fposb, beta=1, c=vev) vv = vv.reshape(nmo, nmo) vev = vev.reshape(nmo, nmo) mpi_helper.barrier() mpi_helper.allreduce_safe_inplace(vv) mpi_helper.allreduce_safe_inplace(vev) return vv, vev def build_mats_dfuagf2_incore(qxi, qja, e_occ, e_vir, os_factor=1.0, ss_factor=1.0): ''' Wrapper for AGF2udf_vv_vev_islice ''' fdrv = getattr(libagf2, 'AGF2udf_vv_vev_islice') naux = qxi[0].shape[0] noa, nob = e_occ[0].size, e_occ[1].size nva, nvb = e_vir[0].size, e_vir[1].size nmo = qxi[0].size // (naux*noa) assert qxi[0].size == (naux * nmo * noa) assert qja[0].size == (naux * noa * nva) assert qja[1].size == (naux * nob * nvb) qxi_a, qxi_b = qxi qja_a, qja_b = qja qxi = np.asarray(qxi_a, order='C') qja = np.asarray(qja_a, order='C') qJA = np.asarray(qja_b, order='C') e_i = np.asarray(e_occ[0], order='C') e_I = np.asarray(e_occ[1], order='C') e_a = np.asarray(e_vir[0], order='C') e_A = np.asarray(e_vir[1], order='C') vv = np.zeros((nmo*nmo)) vev = np.zeros((nmo*nmo)) rank, size = mpi_helper.rank, mpi_helper.size istart = rank * noa // size iend = noa if rank == (size-1) else (rank+1) * noa // size fdrv(qxi.ctypes.data_as(ctypes.c_void_p), qja.ctypes.data_as(ctypes.c_void_p), qJA.ctypes.data_as(ctypes.c_void_p), e_i.ctypes.data_as(ctypes.c_void_p), e_I.ctypes.data_as(ctypes.c_void_p), e_a.ctypes.data_as(ctypes.c_void_p), e_A.ctypes.data_as(ctypes.c_void_p), ctypes.c_double(os_factor), ctypes.c_double(ss_factor), ctypes.c_int(nmo), ctypes.c_int(noa), ctypes.c_int(nob), ctypes.c_int(nva), ctypes.c_int(nvb), ctypes.c_int(naux), ctypes.c_int(istart), ctypes.c_int(iend), vv.ctypes.data_as(ctypes.c_void_p), vev.ctypes.data_as(ctypes.c_void_p)) vv = vv.reshape(nmo, nmo) vev = vev.reshape(nmo, nmo) mpi_helper.barrier() mpi_helper.allreduce_safe_inplace(vv) mpi_helper.allreduce_safe_inplace(vev) return vv, vev def build_mats_dfuagf2_lowmem(qxi, qja, e_occ, e_vir, os_factor=1.0, ss_factor=1.0): ''' Wrapper for AGF2udf_vv_vev_islice_lowmem ''' fdrv = getattr(libagf2, 'AGF2udf_vv_vev_islice_lowmem') naux = qxi[0].shape[0] noa, nob = e_occ[0].size, e_occ[1].size nva, nvb = e_vir[0].size, e_vir[1].size nmo = qxi[0].size // (naux*noa) assert qxi[0].size == (naux * nmo * noa) assert qja[0].size == (naux * noa * nva) assert qja[1].size == (naux * nob * nvb) qxi_a, qxi_b = qxi qja_a, qja_b = qja qxi = np.asarray(qxi_a, order='C') qja = np.asarray(qja_a, order='C') qJA = np.asarray(qja_b, order='C') e_i = np.asarray(e_occ[0], order='C') e_I = np.asarray(e_occ[1], order='C') e_a = np.asarray(e_vir[0], order='C') e_A = np.asarray(e_vir[1], order='C') vv = np.zeros((nmo*nmo)) vev = np.zeros((nmo*nmo)) rank, size = mpi_helper.rank, mpi_helper.size nomax = max(noa, nob) start = rank * (noa*nomax) // size end = (noa*nomax) if rank == (size-1) else (rank+1) * (noa*nomax) // size fdrv(qxi.ctypes.data_as(ctypes.c_void_p), qja.ctypes.data_as(ctypes.c_void_p), qJA.ctypes.data_as(ctypes.c_void_p), e_i.ctypes.data_as(ctypes.c_void_p), e_I.ctypes.data_as(ctypes.c_void_p), e_a.ctypes.data_as(ctypes.c_void_p), e_A.ctypes.data_as(ctypes.c_void_p), ctypes.c_double(os_factor), ctypes.c_double(ss_factor), ctypes.c_int(nmo), ctypes.c_int(noa), ctypes.c_int(nob), ctypes.c_int(nva), ctypes.c_int(nvb), ctypes.c_int(naux), ctypes.c_int(start), ctypes.c_int(end), vv.ctypes.data_as(ctypes.c_void_p), vev.ctypes.data_as(ctypes.c_void_p)) vv = vv.reshape(nmo, nmo) vev = vev.reshape(nmo, nmo) mpi_helper.barrier() mpi_helper.allreduce_safe_inplace(vv) mpi_helper.allreduce_safe_inplace(vev) return vv, vev def build_mats_dfuagf2_outcore(qxi, qja, e_occ, e_vir, os_factor=1.0, ss_factor=1.0): ''' Python version of AGF2udf_vv_vev_islice to support outcore ''' naux = qxi[0].shape[0] noa, nob = e_occ[0].size, e_occ[1].size nva, nvb = e_vir[0].size, e_vir[1].size nmo = qxi[0].size // (naux*noa) assert qxi[0].size == (naux * nmo * noa) assert qja[0].size == (naux * noa * nva) assert qja[1].size == (naux * nob * nvb) qxi_a, qxi_b = qxi qja_a, qja_b = qja vv = np.zeros((nmo, nmo)) vev = np.zeros((nmo, nmo)) fposa = ss_factor fnega = -ss_factor fposb = os_factor eja_a = lib.direct_sum('j,a->ja', e_occ[0], -e_vir[0]).ravel() eja_b = lib.direct_sum('j,a->ja', e_occ[1], -e_vir[1]).ravel() buf = (np.zeros((nmo, noa*nva)), np.zeros((nmo, nob*nvb)), np.zeros((nmo*noa, nva))) for i in mpi_helper.nrange(noa): qx_a = qxi_a.reshape(naux, nmo, noa)[:,:,i] xija_aa = lib.dot(qx_a.T, qja_a, c=buf[0]) xija_ab = lib.dot(qx_a.T, qja_b, c=buf[1]) xjia_aa = lib.dot(qxi_a.T, qja_a[:,i*nva:(i+1)*nva], c=buf[2]) xjia_aa = xjia_aa.reshape(nmo, -1) eija_aa = eja_a + e_occ[0][i] eija_ab = eja_b + e_occ[0][i] vv = lib.dot(xija_aa, xija_aa.T, alpha=fposa, beta=1, c=vv) vv = lib.dot(xija_aa, xjia_aa.T, alpha=fnega, beta=1, c=vv) vv = lib.dot(xija_ab, xija_ab.T, alpha=fposb, beta=1, c=vv) exija_aa = xija_aa * eija_aa[None] exija_ab = xija_ab * eija_ab[None] vev = lib.dot(exija_aa, xija_aa.T, alpha=fposa, beta=1, c=vev) vev = lib.dot(exija_aa, xjia_aa.T, alpha=fnega, beta=1, c=vev) vev = lib.dot(exija_ab, xija_ab.T, alpha=fposb, beta=1, c=vev) vv = vv.reshape(nmo, nmo) vev = vev.reshape(nmo, nmo) mpi_helper.barrier() mpi_helper.allreduce_safe_inplace(vv) mpi_helper.allreduce_safe_inplace(vev) return vv, vev def get_blksize(max_memory_total, *sizes): ''' Gets a block size such that the sum of the product of :attr:`sizes` with :attr:`blksize` is less than avail memory. If multiple tuples are provided, the maximum is used. ''' if isinstance(sizes[0], tuple): sum_of_sizes = max([sum(x) for x in sizes]) else: sum_of_sizes = sum(sizes) mem_avail = max_memory_total - lib.current_memory()[0] mem_avail *= 8e6 # MB -> bits sum_of_sizes *= 64 # 64 bits -> bits return int(mem_avail / sum_of_sizes)
datadog_checks_dev/tests/tooling/manifest_validator/input_constants.py
abraham-leal/integrations-core
663
12616883
<reponame>abraham-leal/integrations-core # (C) Datadog, Inc. 2021-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) from datadog_checks.dev.tooling.datastructures import JSONDict # This file contains the constants used for V2 manifest validator testing ORACLE_METADATA_CSV_EXAMPLE = [(0, {"metric_name": "oracle.session_count"})] V2_VALID_MANIFEST = { "app_id": "datadog-oracle", "classifier_tags": [ "Category::Marketplace", "Category::Cloud", "Category::Log Collection", "Supported OS::Windows", "Supported OS::Mac OS", "Offering::Integration", "Offering::UI Extension", ], "assets": { "dashboards": {"oracle": "assets/dashboards/example.json"}, "integration": { "configuration": {"spec": "assets/configuration/spec.yaml"}, "events": {"creates_events": True}, "id": "oracle", "metrics": { "auto_install": True, "check": "oracle.session_count", "metadata_path": "metrics_metadata.csv", "prefix": "oracle.", }, "service_checks": {"metadata_path": "assets/service_checks.json"}, "source_type_name": "Oracle Database", }, }, "author": { "homepage": "https://www.datadoghq.com", "name": "Datadog", "sales_email": "<EMAIL>", "support_email": "<EMAIL>", }, "display_on_public_website": True, "legal_terms": {}, "manifest_version": "2.0.0", "oauth": {}, "pricing": [{"billing_type": "free"}], "tile": { "changelog": "CHANGELOG.md", "configuration": "README.md#Setup", "description": "Oracle relational database system designed for enterprise grid computing", "media": [], "overview": "README.md#Overview", "title": "Oracle", }, } VALID_MEDIA_MANIFEST = JSONDict( { "tile": { "media": [ { "media_type": "video", "caption": "This is an example video caption!", "image_url": "images/video_thumbnail.png", "vimeo_id": 123456789, }, { "media_type": "image", "caption": "This is an example image caption!", "image_url": "images/acme_before.png", }, { "media_type": "image", "caption": "This is an example image caption!", "image_url": "images/acme_after.png", }, ] } } ) INVALID_MEDIA_MANIFEST_TOO_MANY_VIDEOS = JSONDict( { "tile": { "media": [ { "media_type": "video", "caption": "This is an example video caption!", "image_url": "images/video_thumbnail.png", "vimeo_id": 123456789, }, { "media_type": "video", "caption": "This is an example video caption!", "image_url": "images/acme_before.png", "vimeo_id": 123456789, }, ] } } ) INVALID_MEDIA_MANIFEST_BAD_STRUCTURE = JSONDict( { "tile": { "media": [ { "media_type": "video", "cation": "This is an example video caption!", "imageurl": "images/video_thumbnail.png", "vimeo_id": 123456789, }, { "meda_type": "image", "captin": "This is an example image caption!", "image_url": "images/acme_before.png", }, ] } } ) INVALID_MEDIA_MANIFEST_INCORRECT_VIMEO_ID_TYPE = JSONDict( { "tile": { "media": [ { "media_type": "video", "cation": "This is an example video caption!", "image_url": "images/video_thumbnail.png", "vimeo_id": "123456789", }, { "media_type": "image", "caption": "This is an example image caption!", "image_url": "images/acme_before.png", }, ] } } ) IMMUTABLE_ATTRIBUTES_V1_MANIFEST = {"manifest_version": "1.0.0"} IMMUTABLE_ATTRIBUTES_V2_MANIFEST = JSONDict({"manifest_version": "2.0.0"}) class MockedResponseInvalid: status_code = 400 def raise_for_status(self): raise AssertionError() def json(self): return "Invalid response for test!" class MockedResponseValid: status_code = 200 def raise_for_status(self): return class MockedContextObj: obj = { 'org': 'my-org', 'orgs': { 'my-org': { 'api_key': '123abc', 'app_key': 'app123', 'dd_url': 'foo.com', } }, }
model/cls/loss/lifted_structure_loss.py
qrsforever/torchcv
171
12616907
<filename>model/cls/loss/lifted_structure_loss.py #!/usr/bin/env python # -*- coding:utf-8 -*- # Author: <NAME>(<EMAIL>) # Loss function for Image Classification. import torch import torch.nn as nn import torch.nn.functional as F class LiftedStructureLoss(nn.Module): def __init__(self, configer): super(LiftedStructureLoss, self).__init__() self.params_dict = dict() if 'lifted_structure_loss' in configer.get('loss', 'params'): self.params_dict = configer.get('loss', 'params')['lifted_structure_loss'] @staticmethod def pdist(A, squared=False): prod = torch.mm(A, A.t()) norm = prod.diag().unsqueeze(1).expand_as(prod) res = (norm + norm.t() - 2 * prod).clamp(min=1e-12) return res if squared else res.sqrt() def forward(self, embeddings, labels): d = self.pdist(embeddings, squared=False) pos = torch.eq(*[labels.unsqueeze(dim).expand_as(d) for dim in [0, 1]]).type_as(d) neg_i = torch.mul((self.params_dict['margin'] - d).exp(), 1 - pos).sum(1).expand_as(d).clamp(min=1e-12) return torch.sum(F.relu(pos.triu(1) * ((neg_i + neg_i.t()).log() + d)).pow(2)) / (pos.sum() - len(d) + 1e-8)
helpers_cntk.py
cubean/ImageSimilarityUsingCntk
130
12616921
<filename>helpers_cntk.py # -*- coding: utf-8 -*- import os, random, pdb import matplotlib.pyplot as plt from cntk import constant, use_default_device, cross_entropy_with_softmax, classification_error from cntk import load_model, Trainer, UnitType from cntk.io import MinibatchSource, ImageDeserializer, StreamDefs, StreamDef import cntk.io.transforms as xforms from cntk.layers import placeholder, GlobalAveragePooling, Dropout, Dense from cntk.learners import momentum_sgd, learning_rate_schedule, momentum_schedule from cntk.logging import log_number_of_parameters, ProgressPrinter, graph from cntk.logging.graph import find_by_name from cntk.ops import input_variable, combine from cntk.ops.functions import CloneMethod from helpers import * random.seed(0) #NOTE: the functionality in this file is adapted and extended from CNTK's transfer learning tutorial: # https://github.com/Microsoft/CNTK/wiki/Build-your-own-image-classifier-using-Transfer-Learning ################################ # helper functions - cntk ################################ def printDeviceType(boGpuRequired = False): if use_default_device().type() != 0: print("Using GPU for CNTK training/scoring.") else: print("WARNING: using CPU for CNTK training/scoring.") if boGpuRequired: raise Exception("Cannot find GPU or GPU is already locked.") # Creates a minibatch source for training or testing def create_mb_source(map_file, image_width, image_height, num_channels, num_classes, boTrain): transforms = [] if boTrain: # Scale to square-sized image. without this the cropping transform would chop the larger dimension of an # image to make it squared, and then take 0.9 crops from within the squared image. transforms += [xforms.scale(width=2*image_width, height=2*image_height, channels=num_channels, interpolations='linear', scale_mode='pad', pad_value=114)] transforms += [xforms.crop(crop_type='randomside', side_ratio=0.9, jitter_type='uniratio')] # Randomly crop square area transforms += [xforms.scale(width=image_width, height=image_height, channels=num_channels, # Scale down and pad interpolations='linear', scale_mode='pad', pad_value=114)] if boTrain: transforms += [xforms.color(brightness_radius=0.2, contrast_radius=0.2, saturation_radius=0.2)] return MinibatchSource(ImageDeserializer(map_file, StreamDefs( features = StreamDef(field='image', transforms=transforms), labels = StreamDef(field='label', shape=num_classes))), randomize = boTrain, multithreaded_deserializer=True) # Creates the network model for transfer learning def create_model(base_model_file, input_features, num_classes, dropout_rate = 0.5, freeze_weights = False): # Load the pretrained classification net and find nodes base_model = load_model(base_model_file) feature_node = find_by_name(base_model, 'features') beforePooling_node = find_by_name(base_model, "z.x.x.r") #graph.plot(base_model, filename="base_model.pdf") # Write graph visualization # Clone model until right before the pooling layer, ie. until including z.x.x.r modelCloned = combine([beforePooling_node.owner]).clone( CloneMethod.freeze if freeze_weights else CloneMethod.clone, {feature_node: placeholder(name='features')}) # Center the input around zero and set model input. # Do this early, to avoid CNTK bug with wrongly estimated layer shapes feat_norm = input_features - constant(114) model = modelCloned(feat_norm) # Pool over all spatial dimensions and add dropout layer avgPool = GlobalAveragePooling(name = "poolingLayer")(model) if dropout_rate > 0: avgPoolDrop = Dropout(dropout_rate)(avgPool) else: avgPoolDrop = avgPool # Add new dense layer for class prediction finalModel = Dense(num_classes, activation=None, name="prediction") (avgPoolDrop) return finalModel # Trains a transfer learning model def train_model(base_model_file, train_map_file, test_map_file, input_resolution, num_epochs, mb_size, max_train_images, lr_per_mb, momentum_per_mb, l2_reg_weight, dropout_rate, freeze_weights, num_channels = 3): #init image_width = input_resolution image_height = input_resolution epoch_size_test = len(readTable(test_map_file)) epoch_size_train = len(readTable(train_map_file)) epoch_size_train = min(epoch_size_train, max_train_images) num_classes = max(ToIntegers(getColumn(readTable(train_map_file), 1)) + ToIntegers(getColumn(readTable(test_map_file), 1))) + 1 # Create the minibatch source minibatch_source_train = create_mb_source(train_map_file, image_width, image_height, num_channels, num_classes, True) minibatch_source_test = create_mb_source(test_map_file, image_width, image_height, num_channels, num_classes, False) # Define mapping from reader streams to network inputs label_input = input_variable(num_classes) image_input = input_variable((num_channels, image_height, image_width), name = "input") input_map = { image_input: minibatch_source_train['features'], label_input: minibatch_source_train['labels'] } # Instantiate the transfer learning model and loss function cntkModel = create_model(base_model_file, image_input, num_classes, dropout_rate, freeze_weights) ce = cross_entropy_with_softmax(cntkModel, label_input) pe = classification_error(cntkModel, label_input) # Instantiate the trainer object lr_schedule = learning_rate_schedule(lr_per_mb, unit=UnitType.minibatch) mm_schedule = momentum_schedule(momentum_per_mb) learner = momentum_sgd(cntkModel.parameters, lr_schedule, mm_schedule, l2_regularization_weight=l2_reg_weight) progress_writers = [ProgressPrinter(tag='Training', num_epochs=num_epochs)] trainer = Trainer(cntkModel, (ce, pe), learner, progress_writers) # Run training epochs print("Training transfer learning model for {0} epochs (epoch_size_train = {1}).".format(num_epochs, epoch_size_train)) errsTest = [] errsTrain = [] log_number_of_parameters(cntkModel) for epoch in range(num_epochs): # Train model err_numer = 0 sample_counts = 0 while sample_counts < epoch_size_train: # Loop over minibatches in the epoch sample_count = min(mb_size, epoch_size_train - sample_counts) data = minibatch_source_train.next_minibatch(sample_count, input_map = input_map) trainer.train_minibatch(data) # Update model with it sample_counts += sample_count # Count samples processed so far err_numer += trainer.previous_minibatch_evaluation_average * sample_count if sample_counts % (100 * mb_size) == 0: print ("Training: processed {0} samples".format(sample_counts)) # Visualize training images # img_data = data[image_input].asarray() # for i in range(len(img_data)): # debugImg = img_data[i].squeeze().swapaxes(0, 1).swapaxes(1, 2) / 255.0 # imshow(debugImg) # Compute accuracy on training and test sets errsTrain.append(err_numer / float(sample_counts)) trainer.summarize_training_progress() errsTest.append(cntkComputeTestError(trainer, minibatch_source_test, mb_size, epoch_size_test, input_map)) trainer.summarize_test_progress() # Plot training progress plt.plot(errsTrain, 'b-', errsTest, 'g-') plt.xlabel('Epoch number') plt.ylabel('Error') plt.title('Training error (blue), test error (green)') plt.draw() return cntkModel # Evaluate model accuracy def cntkComputeTestError(trainer, minibatch_source_test, mb_size, epoch_size, input_map): acc_numer = 0 sample_counts = 0 while sample_counts < epoch_size: # Loop over minibatches in the epoch sample_count = min(mb_size, epoch_size - sample_counts) data = minibatch_source_test.next_minibatch(sample_count, input_map = input_map) acc_numer += trainer.test_minibatch(data) * sample_count sample_counts += sample_count return acc_numer / float(sample_counts) def runCntkModel(model, map_file, node_name = [], mb_size = 1): # Get minibatch source num_classes = model.shape[0] (image_width, image_height) = find_by_name(model, "input").shape[1:] minibatch_source = create_mb_source(map_file, image_width, image_height, 3, num_classes, False) features_si = minibatch_source['features'] # Set output node if node_name == []: output_node = model else: node_in_graph = model.find_by_name(node_name) output_node = combine([node_in_graph.owner]) # Evaluate DNN for all images data = [] sample_counts = 0 imgPaths = getColumn(readTable(map_file), 0) while sample_counts < len(imgPaths): sample_count = min(mb_size, len(imgPaths) - sample_counts) mb = minibatch_source.next_minibatch(sample_count) output = output_node.eval(mb[features_si]) data += [o.flatten() for o in output] sample_counts += sample_count if sample_counts % 100 < mb_size: print("Evaluating DNN (output dimension = {}) for image {} of {}: {}".format(len(data[-1]), sample_counts, len(imgPaths), imgPaths[sample_counts - 1])) data = [[imgPath, feat] for imgPath, feat in zip(imgPaths, data)] return data def featurizeImages(model, imgFilenamesPath, imgDir, map_file, node_name = [], mb_size = 1): # Get image paths imgFilenames = loadFromPickle(imgFilenamesPath) imgLabelMap = getImgLabelMap(imgFilenames, imgDir) imgLabelMap = zip(getColumn(imgLabelMap,0), [0] * len(imgLabelMap)) # Set labels to all 0's since not used anyway # Run CNTK model for each image # Note: CNTK's MinibatchSource/ImageReader currently does not support in-memory # calls, hence need to save input map to disk. writeTable(map_file, imgLabelMap) cntkOutput = runCntkModel(model, map_file, node_name) # Store all features in a dictionary features = dict() for imgPath, feat in cntkOutput: imgFilename = os.path.basename(imgPath) imgSubdir = os.path.split(os.path.split(imgPath)[0])[1] key = imgSubdir + "/" + imgFilename features[key] = feat return features
geosnap/tests/test_dynamics.py
WawNun/geosnap
148
12616930
<gh_stars>100-1000 import numpy as np import os from geosnap import Community from geosnap.analyze import sequence, transition RTOL = 0.00001 import pytest try: LTDB = os.environ["LTDB_SAMPLE"] except: LTDB=None @pytest.mark.skipif(not LTDB, reason="unable to locate LTDB data") def test_transition(): """ Testing transition modeling. """ columbus = Community.from_ltdb(msa_fips="18140") columns = [ "median_household_income", "p_poverty_rate", "p_edu_college_greater", "p_unemployment_rate", ] columbus1 = columbus.cluster( columns=[ "median_household_income", "p_poverty_rate", "p_edu_college_greater", "p_unemployment_rate", ], method="ward", ) # 1. Markov modeling m = transition(columbus1.gdf, cluster_col="ward") mp = np.array( [ [0.79189189, 0.00540541, 0.0027027, 0.13243243, 0.06216216, 0.00540541], [0.0203252, 0.75609756, 0.10569106, 0.11382114, 0.0, 0.00406504], [0.00917431, 0.20183486, 0.75229358, 0.01834862, 0.0, 0.01834862], [0.1959799, 0.18341709, 0.00251256, 0.61809045, 0.0, 0.0], [0.32307692, 0.0, 0.0, 0.0, 0.66153846, 0.01538462], [0.09375, 0.0625, 0.0, 0.0, 0.0, 0.84375], ] ) np.testing.assert_allclose(m.p, mp, RTOL) # 2. Spatial Markov modeling np.random.seed(5) sm = transition(columbus1.gdf, cluster_col="ward", w_type="queen") smp = np.array( [ [0.82413793, 0.0, 0.0, 0.10689655, 0.06896552, 0.0], [0.25, 0.5, 0.125, 0.125, 0.0, 0.0], [0.5, 0.0, 0.5, 0.0, 0.0, 0.0], [0.23809524, 0.0952381, 0.0, 0.66666667, 0.0, 0.0], [0.21621622, 0.0, 0.0, 0.0, 0.75675676, 0.02702703], [0.16666667, 0.0, 0.0, 0.0, 0.0, 0.83333333], ] ) np.testing.assert_allclose(sm.P[0], smp, RTOL) @pytest.mark.skipif(not LTDB, reason="unable to locate LTDB data") def test_sequence(): """ Testing sequence modeling. """ columbus = Community.from_ltdb(msa_fips="18140") columns = [ "median_household_income", "p_poverty_rate", "p_edu_college_greater", "p_unemployment_rate", ] columbus1 = columbus.cluster( columns=[ "median_household_income", "p_poverty_rate", "p_edu_college_greater", "p_unemployment_rate", ], method="ward", ) # 1. Transition-orientied optimal matching output = sequence( columbus1.gdf, seq_clusters=5, dist_type="tran", cluster_col="ward" ) values = np.array([3, 3, 0, 2, 3, 1]) np.testing.assert_allclose(output[1].values[0], values, RTOL) # 2. Hamming distance output = sequence( columbus1.gdf, seq_clusters=5, dist_type="hamming", cluster_col="ward" ) values = np.array([3, 3, 0, 2, 3, 2]) np.testing.assert_allclose(output[1].values[0], values, RTOL)