max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
generator/modules/__module__.py | vv111y/deepo | 6,560 | 12674065 | # -*- coding: utf-8 -*-
def parametrized(dec):
def layer(*args, **kwargs):
def repl(f):
return dec(f, *args, **kwargs)
return repl
return layer
@parametrized
def dependency(module, *_deps):
module.deps = _deps
return module
@parametrized
def source(module, _source):
module.source = _source
return module
@parametrized
def version(module, _ver):
module.version = _ver
return module
@dependency()
@source('unknown')
@version('latest')
class Module(object):
def __init__(self, composer):
self.composer = composer
def __repr__(self):
return '%-13s %-6s (%s)' % (
self.name(),
self.version,
self.source)
def build(self):
pass
def expose(self):
return []
def name(self):
return self.__class__.__name__.lower()
|
Validation/HGCalValidation/python/HGVHistoProducerAlgoBlock_cfi.py | Purva-Chaudhari/cmssw | 852 | 12674073 | <filename>Validation/HGCalValidation/python/HGVHistoProducerAlgoBlock_cfi.py
import FWCore.ParameterSet.Config as cms
HGVHistoProducerAlgoBlock = cms.PSet(
minEta = cms.double(-4.5),
maxEta = cms.double(4.5),
nintEta = cms.int32(100),
useFabsEta = cms.bool(False),
#parameters for energy
minEne = cms.double(0.),
maxEne = cms.double(500.),
nintEne = cms.int32(250),
#parameters for pt
minPt = cms.double(0.),
maxPt = cms.double(100.),
nintPt = cms.int32(100),
#parameters for phi
minPhi = cms.double(-3.2),
maxPhi = cms.double(3.2),
nintPhi = cms.int32(80),
#parameters for counting mixed hits clusters
minMixedHitsSimCluster = cms.double(0.),
maxMixedHitsSimCluster = cms.double(800.),
nintMixedHitsSimCluster = cms.int32(100),
#parameters for counting mixed hits clusters
minMixedHitsCluster = cms.double(0.),
maxMixedHitsCluster = cms.double(800.),
nintMixedHitsCluster = cms.int32(100),
#parameters for the total amount of energy clustered by all layer clusters (fraction over caloparticles)
minEneCl = cms.double(0.),
maxEneCl = cms.double(110.),
nintEneCl = cms.int32(110),
#parameters for the longitudinal depth barycenter
minLongDepBary = cms.double(0.),
maxLongDepBary = cms.double(110.),
nintLongDepBary = cms.int32(110),
#z position of vertex
minZpos = cms.double(-550.),
maxZpos = cms.double(550.),
nintZpos = cms.int32(1100),
#Parameters for the total number of simclusters per layer
minTotNsimClsperlay = cms.double(0.),
maxTotNsimClsperlay = cms.double(50.),
nintTotNsimClsperlay = cms.int32(50),
#Parameters for the total number of layer clusters per layer
minTotNClsperlay = cms.double(0.),
maxTotNClsperlay = cms.double(50.),
nintTotNClsperlay = cms.int32(50),
#Parameters for the energy clustered by layer clusters per layer (fraction)
minEneClperlay = cms.double(0.),
maxEneClperlay = cms.double(110.),
nintEneClperlay = cms.int32(110),
#Parameters for the score both for:
#1. calo particle to layer clusters association per layer
#2. layer cluster to calo particles association per layer
minScore = cms.double(0.),
maxScore = cms.double(1.02),
nintScore = cms.int32(51),
#Parameters for shared energy fraction. That is:
#1. Fraction of each of the layer clusters energy related to a
# calo particle over that calo particle's energy.
#2. Fraction of each of the calo particles energy
# related to a layer cluster over that layer cluster's energy.
minSharedEneFrac = cms.double(0.),
maxSharedEneFrac = cms.double(1.),
nintSharedEneFrac = cms.int32(100),
minTSTSharedEneFracEfficiency = cms.double(0.5),
#Same as above for tracksters
minTSTSharedEneFrac = cms.double(0.),
maxTSTSharedEneFrac = cms.double(1.0),
nintTSTSharedEneFrac = cms.int32(100),
#Parameters for the total number of simclusters per thickness
minTotNsimClsperthick = cms.double(0.),
maxTotNsimClsperthick = cms.double(800.),
nintTotNsimClsperthick = cms.int32(100),
#Parameters for the total number of layer clusters per thickness
minTotNClsperthick = cms.double(0.),
maxTotNClsperthick = cms.double(800.),
nintTotNClsperthick = cms.int32(100),
#Parameters for the total number of cells per per thickness per layer
minTotNcellsperthickperlayer = cms.double(0.),
maxTotNcellsperthickperlayer = cms.double(500.),
nintTotNcellsperthickperlayer = cms.int32(100),
#Parameters for the distance of cluster cells to seed cell per thickness per layer
minDisToSeedperthickperlayer = cms.double(0.),
maxDisToSeedperthickperlayer = cms.double(300.),
nintDisToSeedperthickperlayer = cms.int32(100),
#Parameters for the energy weighted distance of cluster cells to seed cell per thickness per layer
minDisToSeedperthickperlayerenewei = cms.double(0.),
maxDisToSeedperthickperlayerenewei = cms.double(10.),
nintDisToSeedperthickperlayerenewei = cms.int32(50),
#Parameters for the distance of cluster cells to max cell per thickness per layer
minDisToMaxperthickperlayer = cms.double(0.),
maxDisToMaxperthickperlayer = cms.double(300.),
nintDisToMaxperthickperlayer = cms.int32(100),
#Parameters for the energy weighted distance of cluster cells to max cell per thickness per layer
minDisToMaxperthickperlayerenewei = cms.double(0.),
maxDisToMaxperthickperlayerenewei = cms.double(50.),
nintDisToMaxperthickperlayerenewei = cms.int32(50),
#Parameters for the distance of cluster cells to max cell per thickness per layer
minDisSeedToMaxperthickperlayer = cms.double(0.),
maxDisSeedToMaxperthickperlayer = cms.double(300.),
nintDisSeedToMaxperthickperlayer = cms.int32(100),
#Parameters for the energy of a cluster per thickness per layer
minClEneperthickperlayer = cms.double(0.),
maxClEneperthickperlayer = cms.double(10.),
nintClEneperthickperlayer = cms.int32(100),
#Parameters for the energy density of cluster cells per thickness
minCellsEneDensperthick = cms.double(0.),
maxCellsEneDensperthick = cms.double(100.),
nintCellsEneDensperthick = cms.int32(200),
#Parameters for the total number of tracksters per event
#We always treet one event as two events, one in +z one in -z
minTotNTSTs = cms.double(0.),
maxTotNTSTs = cms.double(50.),
nintTotNTSTs = cms.int32(50),
#Parameters for the total number of layer clusters in trackster
minTotNClsinTSTs = cms.double(0.),
maxTotNClsinTSTs = cms.double(400.),
nintTotNClsinTSTs = cms.int32(100),
#Parameters for the total number of layer clusters in trackster per layer
minTotNClsinTSTsperlayer = cms.double(0.),
maxTotNClsinTSTsperlayer = cms.double(50.),
nintTotNClsinTSTsperlayer = cms.int32(50),
#Parameters for the multiplicity of layer clusters in trackster
minMplofLCs = cms.double(0.),
maxMplofLCs = cms.double(20.),
nintMplofLCs = cms.int32(20),
#Parameters for cluster size
minSizeCLsinTSTs = cms.double(0.),
maxSizeCLsinTSTs = cms.double(50.),
nintSizeCLsinTSTs = cms.int32(50),
#Parameters for the energy of a cluster per multiplicity
minClEnepermultiplicity = cms.double(0.),
maxClEnepermultiplicity = cms.double(10.),
nintClEnepermultiplicity = cms.int32(10),
#parameters for X
minX = cms.double(-300.),
maxX = cms.double(300.),
nintX = cms.int32(100),
#parameters for Y
minY = cms.double(-300.),
maxY = cms.double(300.),
nintY = cms.int32(100),
#parameters for Z
minZ = cms.double(-550.),
maxZ = cms.double(550.),
nintZ = cms.int32(1100)
)
|
alexa/listService/listService.py | eifinger/appdaemon-scripts | 122 | 12674086 | import appdaemon.plugins.hass.hassapi as hass
#
# Provide the list of HA entities for Alexa Apps
#
#
# Args:
#
# switchable: dict of switchable entities
# temperature: dict of temperature sensors
# door: dict of reed sensors showing if the door is completely open
# door_tilted: dict of reed sensors showing if a door is partially/leaning open
# window: dict of reed sensors showing if a window is open
#
#
# Release Notes
#
# Version 1.0:
# Initial Version
class ListService(hass.Hass):
def initialize(self):
return
def getSwitchable(self):
return self.args["switchable"]
def getTemperature(self):
return self.args["temperature"]
def getDoor(self):
return self.args["door"]
def getWindow(self):
return self.args["window"]
def getDoorTilted(self):
return self.args["door_tilted"]
|
src/rayoptics/elem/elements.py | diplodocuslongus/ray-optics | 106 | 12674100 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2018 <NAME>
""" Module for element modeling
.. Created on Sun Jan 28 16:27:01 2018
.. codeauthor: <NAME>
"""
from collections import namedtuple
import itertools
from packaging import version
import numpy as np
from anytree import Node
import rayoptics
import rayoptics.util.rgbtable as rgbt
import rayoptics.oprops.thinlens as thinlens
from rayoptics.elem import parttree
from rayoptics.elem.profiles import Spherical, Conic
from rayoptics.elem.surface import Surface
from rayoptics.seq.gap import Gap
from rayoptics.seq.medium import Glass, glass_decode
import rayoptics.gui.appcmds as cmds
from rayoptics.gui.actions import (Action, AttrAction, SagAction, BendAction,
ReplaceGlassAction)
import opticalglass.glassfactory as gfact
import opticalglass.glasspolygons as gp
GraphicsHandle = namedtuple('GraphicsHandle', ['polydata', 'tfrm', 'polytype',
'color'], defaults=(None,))
GraphicsHandle.polydata.__doc__ = "poly data in local coordinates"
GraphicsHandle.tfrm.__doc__ = "global transformation for polydata"
GraphicsHandle.polytype.__doc__ = "'polygon' (for filled) or 'polyline'"
GraphicsHandle.color.__doc__ = "RGBA for the polydata or None for default"
""" tuple grouping together graphics rendering data
Attributes:
polydata: poly data in local coordinates
tfrm: global transformation for polydata
polytype: 'polygon' (for filled) or 'polyline'
"""
# --- Factory functions
def create_thinlens(power=0., indx=1.5, sd=None, **kwargs):
tl = thinlens.ThinLens(power=power, ref_index=indx, max_ap=sd, **kwargs)
tle = ThinElement(tl)
tree = tle.tree()
return [[tl, None, None, 1, +1]], [tle], tree
def create_mirror(c=0.0, r=None, cc=0.0, ec=None,
power=None, profile=None, sd=None, **kwargs):
'''Create a sequence and element for a mirror.
Args:
c: vertex curvature
r: vertex radius of curvature
cc: conic constant
ec: 1 + cc
power: optical power of the mirror
sd: semi-diameter
profile: Spherical or Conic type, or a profile instance
'''
delta_n = kwargs['delta_n'] if 'delta_n' in kwargs else -2
if power:
cv = power/delta_n
elif r:
cv = 1.0/r
else:
cv = c
if ec:
k = ec - 1.0
else:
k = cc
if profile is Spherical:
prf = Spherical(c=cv)
elif profile is Conic:
prf = Conic(c=cv, cc=k)
elif profile is not None:
prf = profile
else:
if k == 0.0:
prf = Spherical(c=cv)
else:
prf = Conic(c=cv, cc=k)
m = Surface(profile=prf, interact_mode='reflect', max_ap=sd,
delta_n=delta_n, **kwargs)
ele_kwargs = {'label': kwargs['label']} if 'label' in kwargs else {}
me = Mirror(m, sd=sd, **ele_kwargs)
tree = me.tree()
return [[m, None, None, 1, -1]], [me], tree
def lens_from_power(power=0., bending=0., th=None, sd=1.,
med=None, nom_wvl='d'):
if med is None:
med = Glass()
rndx = med.rindex(nom_wvl)
if th is None:
th = sd/5
if bending == -1:
cv2 = -power/(rndx - 1)
cv1 = 0
else:
B = (bending - 1)/(bending + 1)
a = (rndx - 1)*(th/rndx)*B
b = 1 - B
c = -power/(rndx - 1)
cv1 = (-b + np.sqrt(b**2 - 4*a*c))/(2*a)
cv2 = cv1*B
return cv1, cv2, th, rndx, sd
def create_lens(power=0., bending=0., th=None, sd=1., med=None, **kwargs):
if med is None:
med = Glass()
lens = lens_from_power(power=power, bending=bending, th=th, sd=sd,
med=med)
cv1, cv2, th, rndx, sd = lens
s1 = Surface(profile=Spherical(c=cv1), max_ap=sd, delta_n=(rndx - 1))
s2 = Surface(profile=Spherical(c=cv2), max_ap=sd, delta_n=(1 - rndx))
g = Gap(t=th, med=med)
le = Element(s1, s2, g, sd=sd)
tree = le.tree()
return [[s1, g, None, rndx, 1], [s2, None, None, 1, 1]], [le], tree
def achromat(power, Va, Vb):
"""Compute lens powers for a thin doublet achromat, given their V-numbers."""
power_a = (Va/(Va - Vb))*power
power_b = (Vb/(Vb - Va))*power
return power_a, power_b
def create_cemented_doublet(power=0., bending=0., th=None, sd=1.,
glasses=('N-BK7,Schott', 'N-F2,Schott'),
**kwargs):
from opticalglass.spectral_lines import get_wavelength
from opticalglass import glass
wvls = np.array([get_wavelength(w) for w in ['d', 'F', 'C']])
gla_a = gfact.create_glass(glasses[0])
rndx_a = gla_a.calc_rindex(wvls)
Va, PcDa = glass.calc_glass_constants(*rndx_a)
gla_b = gfact.create_glass(glasses[1])
rndx_b = gla_b.calc_rindex(wvls)
Vb, PcDb = glass.calc_glass_constants(*rndx_b)
power_a, power_b = achromat(power, Va, Vb)
if th is None:
th = sd/4
t1 = 3*th/4
t2 = th/4
if power_a < 0:
t1, t2 = t2, t1
lens_a = lens_from_power(power=power_a, bending=bending, th=t1, sd=sd,
med=gla_a)
cv1, cv2, t1, indx_a, sd = lens_a
# cv1 = power_a/(rndx_a[0] - 1)
# delta_cv = -cv1/2
# cv1 += delta_cv
# cv2 = delta_cv
# cv3 = power_b/(1 - rndx_b[0]) + delta_cv
indx_b = rndx_b[0]
cv3 = (power_b/(indx_b-1) - cv2)/((t2*cv2*(indx_b-1)/indx_b) - 1)
s1 = Surface(profile=Spherical(c=cv1), max_ap=sd,
delta_n=(rndx_a[0] - 1))
s2 = Surface(profile=Spherical(c=cv2), max_ap=sd,
delta_n=(rndx_b[0] - rndx_a[0]))
s3 = Surface(profile=Spherical(c=cv3), max_ap=sd,
delta_n=(1 - rndx_b[0]))
g1 = Gap(t=t1, med=gla_a)
g2 = Gap(t=t2, med=gla_b)
g_tfrm = np.identity(3), np.array([0., 0., 0.])
ifc_list = []
ifc_list.append([0, s1, g1, 1, g_tfrm])
ifc_list.append([1, s2, g2, 1, g_tfrm])
ifc_list.append([2, s3, None, 1, g_tfrm])
ce = CementedElement(ifc_list)
tree = ce.tree()
return [[s1, g1, None, rndx_a, 1],
[s2, g2, None, rndx_b, 1],
[s3, None, None, 1, 1]], [ce], tree
def create_dummy_plane(sd=1., **kwargs):
s = Surface(**kwargs)
se = DummyInterface(s, sd=sd)
tree = se.tree()
return [[s, None, None, 1, +1]], [se], tree
def create_air_gap(t=0., **kwargs):
g = Gap(t=t)
ag = AirGap(g, **kwargs)
tree = ag.tree()
return g, ag, tree
def create_from_file(filename, **kwargs):
opm = cmds.open_model(filename)
sm = opm['seq_model']
osp = opm['optical_spec']
em = opm['ele_model']
pt = opm['part_tree']
if len(pt.nodes_with_tag(tag='#element')) == 0:
parttree.elements_from_sequence(em, sm, pt)
if 'power' in kwargs:
desired_power = kwargs['power']
cur_power = osp.parax_data.fod.power
# scale_factor is linear, power is 1/linear
# so use reciprocal of power to compute scale_factor
scale_factor = cur_power/desired_power
sm.apply_scale_factor(scale_factor)
seq = [list(node) for node in sm.path(start=1, stop=-1)]
seq[-1][2] = None
sys_nodes = pt.nodes_with_tag(tag='#element#airgap',
not_tag='#object#image')
eles = [node.id for node in sys_nodes]
root = Node('file', id=None, tag='#group', children=sys_nodes)
return seq, eles, root
def calc_render_color_for_material(matl):
""" get element color based on V-number of glass"""
try:
gc = float(matl.glass_code())
except AttributeError:
return (255, 255, 255, 64) # white
else:
# set element color based on V-number
indx, vnbr = glass_decode(gc)
dsg, rgb = gp.find_glass_designation(indx, vnbr)
if rgb is None:
return [228, 237, 243, 64] # ED designation
# rgb = Element.clut.get_color(vnbr)
return rgb
# --- Element definitions
class Element():
"""Lens element domain model. Manage rendering and selection/editing.
An Element consists of 2 Surfaces, 1 Gap, and edge_extent information.
Attributes:
parent: the :class:`ElementModel`
label: string identifier
s1: first/origin :class:`~rayoptics.seq.interface.Interface`
s2: second/last :class:`~rayoptics.seq.interface.Interface`
gap: element thickness and material :class:`~rayoptics.seq.gap.Gap`
tfrm: global transform to element origin, (Rot3, trans3)
medium_name: the material filling the gap
flat1, flat2: semi-diameter of flat or None. Setting to None will result in
re-evaluation of flat ID
do_flat1, do_flat2: 'if concave', 'always', 'never', 'if convex'
handles: dict of graphical entities
actions: dict of actions associated with the graphical handles
"""
clut = rgbt.RGBTable(filename='red_blue64.csv',
data_range=[10.0, 100.])
label_format = 'E{}'
serial_number = 0
def __init__(self, s1, s2, g, tfrm=None, idx=0, idx2=1, sd=1.,
label=None):
if label is None:
Element.serial_number += 1
self.label = Element.label_format.format(Element.serial_number)
else:
self.label = label
if tfrm is not None:
self.tfrm = tfrm
else:
self.tfrm = (np.identity(3), np.array([0., 0., 0.]))
self.s1 = s1
self.s1_indx = idx
self.s2 = s2
self.s2_indx = idx2
self.gap = g
self.medium_name = self.gap.medium.name()
self._sd = sd
self.flat1 = None
self.flat2 = None
self.do_flat1 = 'if concave' # alternatives are 'never', 'always',
self.do_flat2 = 'if concave' # or 'if convex'
self.handles = {}
self.actions = {}
@property
def sd(self):
"""Semi-diameter """
return self._sd
@sd.setter
def sd(self, semidiam):
self._sd = semidiam
self.edge_extent = (-semidiam, semidiam)
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['parent']
del attrs['tfrm']
del attrs['s1']
del attrs['s2']
del attrs['gap']
del attrs['handles']
del attrs['actions']
return attrs
def __str__(self):
fmt = 'Element: {!r}, {!r}, t={:.4f}, sd={:.4f}, glass: {}'
return fmt.format(self.s1.profile, self.s2.profile, self.gap.thi,
self.sd, self.gap.medium.name())
def sync_to_restore(self, ele_model, surfs, gaps, tfrms):
# when restoring, we want to use the stored indices to look up the
# new object instances
self.parent = ele_model
self.tfrm = tfrms[self.s1_indx]
self.s1 = surfs[self.s1_indx]
self.gap = gaps[self.s1_indx]
self.s2 = surfs[self.s2_indx]
if not hasattr(self, 'medium_name'):
self.medium_name = self.gap.medium.name()
if not hasattr(self, 'do_flat1'):
self.do_flat1 = 'if concave'
if not hasattr(self, 'do_flat2'):
self.do_flat2 = 'if concave'
def sync_to_update(self, seq_model):
# when updating, we want to use the stored object instances to get the
# current indices into the interface list (e.g. to handle insertion and
# deletion of interfaces)
self.s1_indx = seq_model.ifcs.index(self.s1)
self.s2_indx = seq_model.ifcs.index(self.s2)
self.medium_name = self.gap.medium.name()
def tree(self, **kwargs):
"""Build tree linking sequence to element model. """
default_tag = '#element#lens'
tag = default_tag + kwargs.get('tag', '')
zdir = kwargs.get('z_dir', 1)
# Interface branch 1
e = Node('E', id=self, tag=tag)
p1 = Node('p1', id=self.s1.profile, tag='#profile', parent=e)
Node(f'i{self.s1_indx}', id=self.s1, tag='#ifc', parent=p1)
# Gap branch
t = Node('t', id=self.gap, tag='#thic', parent=e)
Node(f'g{self.s1_indx}', id=(self.gap, zdir), tag='#gap', parent=t)
# Interface branch 2
p2 = Node('p2', id=self.s2.profile, tag='#profile', parent=e)
Node(f'i{self.s2_indx}', id=self.s2, tag='#ifc', parent=p2)
return e
def reference_interface(self):
return self.s1
def reference_idx(self):
return self.s1_indx
def interface_list(self):
return [self.s1, self.s2]
def gap_list(self):
return [self.gap]
def get_bending(self):
cv1 = self.s1.profile_cv
cv2 = self.s2.profile_cv
delta_cv = cv1 - cv2
bending = 0.
if delta_cv != 0.0:
bending = (cv1 + cv2)/delta_cv
return bending
def set_bending(self, bending):
cv1 = self.s1.profile_cv
cv2 = self.s2.profile_cv
delta_cv = cv1 - cv2
cv2_new = 0.5*(bending - 1.)*delta_cv
cv1_new = bending*delta_cv - cv2_new
self.s1.profile_cv = cv1_new
self.s2.profile_cv = cv2_new
def update_size(self):
extents = np.union1d(self.s1.get_y_aperture_extent(),
self.s2.get_y_aperture_extent())
self.edge_extent = (extents[0], extents[-1])
self.sd = max(self.s1.surface_od(), self.s2.surface_od())
return self.sd
def compute_flat(self, s):
ca = s.surface_od()
if (1.0 - ca/self.sd) >= 0.05:
flat = ca
else:
flat = None
return flat
def extent(self):
if hasattr(self, 'edge_extent'):
return self.edge_extent
else:
return (-self.sd, self.sd)
def render_shape(self):
def use_flat(do_flat, is_concave):
if do_flat == 'always':
return True
elif do_flat == 'is concave' and is_concave:
return True
elif do_flat == 'is convex' and not is_concave:
return True
return False
is_concave_s1 = self.s1.profile_cv < 0.0
is_concave_s2 = self.s2.profile_cv > 0.0
if use_flat(self.do_flat1, is_concave_s1):
if self.flat1 is None:
flat1 = self.flat1 = self.compute_flat(self.s1)
else:
flat1 = self.flat1
else:
flat1 = None
poly = self.s1.full_profile(self.extent(), flat1)
if use_flat(self.do_flat2, is_concave_s2):
if self.flat2 is None:
flat2 = self.flat2 = self.compute_flat(self.s2)
else:
flat2 = self.flat2
else:
flat2 = None
poly2 = self.s2.full_profile(self.extent(), flat2, -1)
for p in poly2:
p[0] += self.gap.thi
poly += poly2
poly.append(poly[0])
return poly
def render_handles(self, opt_model):
self.handles = {}
ifcs_gbl_tfrms = opt_model.seq_model.gbl_tfrms
shape = self.render_shape()
color = calc_render_color_for_material(self.gap.medium)
self.handles['shape'] = GraphicsHandle(shape, self.tfrm, 'polygon',
color)
extent = self.extent()
if self.flat1 is not None:
extent_s1 = self.flat1,
else:
extent_s1 = extent
poly_s1 = self.s1.full_profile(extent_s1, None)
gh1 = GraphicsHandle(poly_s1, ifcs_gbl_tfrms[self.s1_indx], 'polyline')
self.handles['s1_profile'] = gh1
if self.flat2 is not None:
extent_s2 = self.flat2,
else:
extent_s2 = extent
poly_s2 = self.s2.full_profile(extent_s2, None, -1)
gh2 = GraphicsHandle(poly_s2, ifcs_gbl_tfrms[self.s2_indx], 'polyline')
self.handles['s2_profile'] = gh2
poly_sd_upr = []
poly_sd_upr.append([poly_s1[-1][0], extent[1]])
poly_sd_upr.append([poly_s2[0][0]+self.gap.thi, extent[1]])
self.handles['sd_upr'] = GraphicsHandle(poly_sd_upr, self.tfrm,
'polyline')
poly_sd_lwr = []
poly_sd_lwr.append([poly_s2[-1][0]+self.gap.thi, extent[0]])
poly_sd_lwr.append([poly_s1[0][0], extent[0]])
self.handles['sd_lwr'] = GraphicsHandle(poly_sd_lwr, self.tfrm,
'polyline')
poly_ct = []
poly_ct.append([0., 0.])
poly_ct.append([self.gap.thi, 0.])
self.handles['ct'] = GraphicsHandle(poly_ct, self.tfrm, 'polyline')
return self.handles
def handle_actions(self):
self.actions = {}
shape_actions = {}
shape_actions['pt'] = BendAction(self)
shape_actions['y'] = AttrAction(self, 'sd')
shape_actions['glass'] = ReplaceGlassAction(self.gap)
self.actions['shape'] = shape_actions
s1_prof_actions = {}
s1_prof_actions['pt'] = SagAction(self.s1)
self.actions['s1_profile'] = s1_prof_actions
s2_prof_actions = {}
s2_prof_actions['pt'] = SagAction(self.s2)
self.actions['s2_profile'] = s2_prof_actions
sd_upr_action = {}
sd_upr_action['y'] = AttrAction(self, 'sd')
self.actions['sd_upr'] = sd_upr_action
sd_lwr_action = {}
sd_lwr_action['y'] = AttrAction(self, 'sd')
self.actions['sd_lwr'] = sd_lwr_action
ct_action = {}
ct_action['x'] = AttrAction(self.gap, 'thi')
self.actions['ct'] = ct_action
return self.actions
class Mirror():
label_format = 'M{}'
serial_number = 0
def __init__(self, ifc, tfrm=None, idx=0, sd=1., thi=None, z_dir=1.0,
label=None):
if label is None:
Mirror.serial_number += 1
self.label = Mirror.label_format.format(Mirror.serial_number)
else:
self.label = label
self.render_color = (158, 158, 158, 64)
if tfrm is not None:
self.tfrm = tfrm
else:
self.tfrm = (np.identity(3), np.array([0., 0., 0.]))
self.s = ifc
self.s_indx = idx
self.z_dir = z_dir
self.sd = sd
self.flat = None
self.thi = thi
self.medium_name = 'Mirror'
self.handles = {}
self.actions = {}
def get_thi(self):
thi = self.thi
if self.thi is None:
thi = 0.05*self.sd
return thi
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['parent']
del attrs['tfrm']
del attrs['s']
del attrs['handles']
del attrs['actions']
return attrs
def __str__(self):
thi = self.get_thi()
fmt = 'Mirror: {!r}, t={:.4f}, sd={:.4f}'
return fmt.format(self.s.profile, thi, self.sd)
def sync_to_restore(self, ele_model, surfs, gaps, tfrms):
self.parent = ele_model
self.tfrm = tfrms[self.s_indx]
self.s = surfs[self.s_indx]
if not hasattr(self, 'medium_name'):
self.medium_name = 'Mirror'
def sync_to_update(self, seq_model):
self.s_indx = seq_model.ifcs.index(self.s)
def tree(self, **kwargs):
default_tag = '#element#mirror'
tag = default_tag + kwargs.get('tag', '')
# Interface branch
m = Node('M', id=self, tag=tag)
p = Node('p', id=self.s.profile, tag='#profile', parent=m)
Node(f'i{self.s_indx}', id=self.s, tag='#ifc', parent=p)
# Gap branch = None
return m
def reference_interface(self):
return self.s
def reference_idx(self):
return self.s_indx
def interface_list(self):
return [self.s]
def gap_list(self):
return []
def update_size(self):
self.edge_extent = self.s.get_y_aperture_extent()
self.sd = self.s.surface_od()
return self.sd
def extent(self):
if hasattr(self, 'edge_extent'):
return self.edge_extent
else:
self.edge_extent = self.s.get_y_aperture_extent()
return self.edge_extent
def substrate_offset(self):
thi = self.get_thi()
# We want to extend the mirror substrate along the same direction
# of the incoming ray. The mirror's z_dir is following reflection so
# flip the sign to get the preceding direction.
offset = -self.z_dir*thi
return offset
def render_shape(self):
poly = self.s.full_profile(self.extent(), self.flat)
poly2 = self.s.full_profile(self.extent(), self.flat, -1)
offset = self.substrate_offset()
for p in poly2:
p[0] += offset
poly += poly2
poly.append(poly[0])
return poly
def render_handles(self, opt_model):
self.handles = {}
ifcs_gbl_tfrms = opt_model.seq_model.gbl_tfrms
self.handles['shape'] = GraphicsHandle(self.render_shape(), self.tfrm,
'polygon', self.render_color)
poly = self.s.full_profile(self.extent(), None)
self.handles['s_profile'] = GraphicsHandle(poly,
ifcs_gbl_tfrms[self.s_indx],
'polyline')
offset = self.substrate_offset()
poly_sd_upr = []
poly_sd_upr.append(poly[-1])
poly_sd_upr.append([poly[-1][0]+offset, poly[-1][1]])
self.handles['sd_upr'] = GraphicsHandle(poly_sd_upr, self.tfrm,
'polyline')
poly_sd_lwr = []
poly_sd_lwr.append(poly[0])
poly_sd_lwr.append([poly[0][0]+offset, poly[0][1]])
self.handles['sd_lwr'] = GraphicsHandle(poly_sd_lwr, self.tfrm,
'polyline')
return self.handles
def handle_actions(self):
self.actions = {}
shape_actions = {}
shape_actions['pt'] = SagAction(self.s)
self.actions['shape'] = shape_actions
s_prof_actions = {}
s_prof_actions['pt'] = SagAction(self.s)
self.actions['s_profile'] = s_prof_actions
sd_upr_action = {}
sd_upr_action['y'] = AttrAction(self, 'edge_extent[1]')
self.actions['sd_upr'] = sd_upr_action
sd_lwr_action = {}
sd_lwr_action['y'] = AttrAction(self, 'edge_extent[0]')
self.actions['sd_lwr'] = sd_lwr_action
return self.actions
class CementedElement():
"""Cemented element domain model. Manage rendering and selection/editing.
A CementedElement consists of 3 or more Surfaces, 2 or more Gaps, and
edge_extent information.
Attributes:
parent: the :class:`ElementModel`
label: string identifier
idxs: list of seq_model interface indices
ifcs: list of :class:`~rayoptics.seq.interface.Interface`
gaps: list of thickness and material :class:`~rayoptics.seq.gap.Gap`
tfrm: global transform to element origin, (Rot3, trans3)
medium_name: the material filling the gap
flats: semi-diameter of flat if ifc is concave, or None
handles: dict of graphical entities
actions: dict of actions associated with the graphical handles
"""
clut = rgbt.RGBTable(filename='red_blue64.csv',
data_range=[10.0, 100.])
label_format = 'CE{}'
serial_number = 0
def __init__(self, ifc_list, label=None):
if label is None:
CementedElement.serial_number += 1
self.label = CementedElement.label_format.format(
CementedElement.serial_number)
else:
self.label = label
g_tfrm = ifc_list[0][4]
if g_tfrm is not None:
self.tfrm = g_tfrm
else:
self.tfrm = (np.identity(3), np.array([0., 0., 0.]))
self.idxs = []
self.ifcs = []
self.gaps = []
self.medium_name = ''
for interface in ifc_list:
i, ifc, g, z_dir, g_tfrm = interface
self.idxs.append(i)
self.ifcs.append(ifc)
if g is not None:
self.gaps.append(g)
if self.medium_name != '':
self.medium_name += ', '
self.medium_name += g.medium.name()
if len(self.gaps) == len(self.ifcs):
self.gaps.pop()
self.medium_name = self.medium_name.rpartition(',')[0]
self._sd = self.update_size()
self.flats = [None]*len(self.ifcs)
self.handles = {}
self.actions = {}
@property
def sd(self):
"""Semi-diameter """
return self._sd
@sd.setter
def sd(self, semidiam):
self._sd = semidiam
self.edge_extent = (-semidiam, semidiam)
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['parent']
del attrs['tfrm']
del attrs['ifcs']
del attrs['gaps']
del attrs['flats']
del attrs['handles']
del attrs['actions']
return attrs
def __str__(self):
fmt = 'CementedElement: {}'
return fmt.format(self.idxs)
def sync_to_restore(self, ele_model, surfs, gaps, tfrms):
# when restoring, we want to use the stored indices to look up the
# new object instances
self.parent = ele_model
self.ifcs = [surfs[i] for i in self.idxs]
self.gaps = [gaps[i] for i in self.idxs[:-1]]
self.tfrm = tfrms[self.idxs[0]]
self.flats = [None]*len(self.ifcs)
if not hasattr(self, 'medium_name'):
self.medium_name = self.gap.medium.name()
def sync_to_update(self, seq_model):
# when updating, we want to use the stored object instances to get the
# current indices into the interface list (e.g. to handle insertion and
# deletion of interfaces)
self.idxs = [seq_model.ifcs.index(ifc) for ifc in self.ifcs]
def element_list(self):
idxs = self.idxs
ifcs = self.ifcs
gaps = self.gaps
e_list = []
for i in range(len(gaps)):
e = Element(ifcs[i], ifcs[i+1], gaps[i],
sd=self.sd, tfrm=self.tfrm,
idx=idxs[i], idx2=idxs[i+1])
e_list.append(e)
def tree(self, **kwargs):
default_tag = '#element#cemented'
tag = default_tag + kwargs.get('tag', '')
zdir = kwargs.get('z_dir', 1)
ce = Node('CE', id=self, tag=tag)
for i, sg in enumerate(itertools.zip_longest(self.ifcs, self.gaps),
start=1):
ifc, gap = sg
pid = f'p{i}'.format(i)
p = Node(pid, id=ifc.profile, tag='#profile', parent=ce)
Node(f'i{self.idxs[i-1]}', id=ifc, tag='#ifc', parent=p)
# Gap branch
if gap is not None:
t = Node(f't{i}', id=gap, tag='#thic', parent=ce)
Node(f'g{self.idxs[i-1]}', id=(gap, zdir),
tag='#gap', parent=t)
return ce
def reference_interface(self):
return self.ifcs[0]
def reference_idx(self):
return self.idxs[0]
def interface_list(self):
return self.ifcs
def gap_list(self):
return self.gaps
def update_size(self):
extents = np.union1d(self.ifcs[0].get_y_aperture_extent(),
self.ifcs[-1].get_y_aperture_extent())
self.edge_extent = (extents[0], extents[-1])
self.sd = max([ifc.surface_od() for ifc in self.ifcs])
return self.sd
def compute_flat(self, s):
ca = s.surface_od()
if (1.0 - ca/self.sd) >= 0.05:
flat = ca
else:
flat = None
return flat
def extent(self):
if hasattr(self, 'edge_extent'):
return self.edge_extent
else:
return (-self.sd, self.sd)
def render_shape(self):
if self.ifcs[0].profile_cv < 0.0:
self.flats[0] = self.compute_flat(self.ifcs[0])
else:
self.flats[0] = None
if self.ifcs[-1].profile_cv > 0.0:
self.flats[-1] = self.compute_flat(self.ifcs[-1])
else:
self.flats[-1] = None
# generate the profile polylines
self.profiles = []
sense = 1
for ifc, flat in zip(self.ifcs, self.flats):
poly = ifc.full_profile(self.extent(), flat, sense)
self.profiles.append(poly)
sense = -sense
# offset the profiles wrt the element origin
thi = 0
for i, poly_profile in enumerate(self.profiles[1:]):
thi += self.gaps[i].thi
for p in poly_profile:
p[0] += thi
# just return outline
poly_shape = []
poly_shape += self.profiles[0]
poly_shape += self.profiles[-1]
poly_shape.append(poly_shape[0])
return poly_shape
def render_handles(self, opt_model):
self.handles = {}
shape = self.render_shape()
# self.handles['shape'] = GraphicsHandle(shape, self.tfrm, 'polygon')
for i, gap in enumerate(self.gaps):
poly = []
poly += self.profiles[i]
poly += self.profiles[i+1]
poly.append(self.profiles[i][0])
color = calc_render_color_for_material(gap.medium)
self.handles['shape'+str(i+1)] = GraphicsHandle(poly, self.tfrm,
'polygon', color)
return self.handles
def handle_actions(self):
self.actions = {}
return self.actions
class ThinElement():
label_format = 'TL{}'
serial_number = 0
def __init__(self, ifc, tfrm=None, idx=0, sd=None, label=None):
if label is None:
ThinElement.serial_number += 1
self.label = ThinElement.label_format.format(
ThinElement.serial_number)
else:
self.label = label
self.render_color = (192, 192, 192)
if tfrm is not None:
self.tfrm = tfrm
else:
self.tfrm = (np.identity(3), np.array([0., 0., 0.]))
self.intrfc = ifc
self.intrfc_indx = idx
self.medium_name = 'Thin Element'
if sd is not None:
self.sd = sd
else:
self.sd = ifc.max_aperture
self.handles = {}
self.actions = {}
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['parent']
del attrs['tfrm']
del attrs['intrfc']
del attrs['handles']
del attrs['actions']
return attrs
def __str__(self):
return str(self.intrfc)
def tree(self, **kwargs):
default_tag = '#element#thinlens'
tag = default_tag + kwargs.get('tag', '')
tle = Node('TL', id=self, tag=tag)
Node('tl', id=self.intrfc, tag='#ifc', parent=tle)
return tle
def sync_to_restore(self, ele_model, surfs, gaps, tfrms):
self.parent = ele_model
self.tfrm = tfrms[self.intrfc_indx]
self.intrfc = surfs[self.intrfc_indx]
if not hasattr(self, 'medium_name'):
self.medium_name = 'Thin Element'
ro_version = ele_model.opt_model.ro_version
if version.parse(ro_version) < version.parse("0.7.0a"):
ThinElement.serial_number += 1
self.label = ThinElement.label_format.format(ThinElement.serial_number)
def sync_to_update(self, seq_model):
self.intrfc_indx = seq_model.ifcs.index(self.intrfc)
def reference_interface(self):
return self.intrfc
def reference_idx(self):
return self.intrfc_indx
def interface_list(self):
return [self.intrfc]
def gap_list(self):
return []
def update_size(self):
self.sd = self.intrfc.surface_od()
return self.sd
def render_shape(self):
poly = self.intrfc.full_profile((-self.sd, self.sd))
return poly
def render_handles(self, opt_model):
self.handles = {}
shape = self.render_shape()
self.handles['shape'] = GraphicsHandle(shape, self.tfrm, 'polygon',
self.render_color)
return self.handles
def handle_actions(self):
self.actions = {}
return self.actions
class DummyInterface():
label_format = 'D{}'
serial_number = 0
def __init__(self, ifc, idx=0, sd=None, tfrm=None, label=None):
if label is None:
DummyInterface.serial_number += 1
self.label = DummyInterface.label_format.format(
DummyInterface.serial_number)
else:
self.label = label
self.render_color = (192, 192, 192)
if tfrm is not None:
self.tfrm = tfrm
else:
self.tfrm = (np.identity(3), np.array([0., 0., 0.]))
self.ref_ifc = ifc
self.idx = idx
self.medium_name = 'Interface'
if sd is not None:
self.sd = sd
else:
self.sd = ifc.max_aperture
self.handles = {}
self.actions = {}
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['parent']
del attrs['tfrm']
del attrs['ref_ifc']
del attrs['handles']
del attrs['actions']
return attrs
def __str__(self):
return str(self.ref_ifc)
def sync_to_restore(self, ele_model, surfs, gaps, tfrms):
self.parent = ele_model
self.tfrm = tfrms[self.idx]
self.ref_ifc = surfs[self.idx]
if not hasattr(self, 'medium_name'):
self.medium_name = 'Interface'
def sync_to_update(self, seq_model):
self.idx = seq_model.ifcs.index(self.ref_ifc)
def tree(self, **kwargs):
default_tag = '#dummyifc'
tag = default_tag + kwargs.get('tag', '')
di = Node('DI', id=self, tag=tag)
p = Node('p', id=self.ref_ifc.profile, tag='#profile', parent=di)
Node(f'i{self.idx}', id=self.ref_ifc, tag='#ifc', parent=p)
return di
def reference_interface(self):
return self.ref_ifc
def reference_idx(self):
return self.idx
def interface_list(self):
return [self.ref_ifc]
def gap_list(self):
return []
def update_size(self):
self.sd = self.ref_ifc.surface_od()
return self.sd
def render_shape(self):
poly = self.ref_ifc.full_profile((-self.sd, self.sd))
return poly
def render_handles(self, opt_model):
self.handles = {}
self.handles['shape'] = GraphicsHandle(self.render_shape(), self.tfrm,
'polyline')
return self.handles
def handle_actions(self):
self.actions = {}
def get_adj_spaces():
seq_model = self.parent.opt_model.seq_model
if self.idx > 0:
before = seq_model.gaps[self.idx-1].thi
else:
before = None
if self.idx < seq_model.get_num_surfaces() - 1:
after = seq_model.gaps[self.idx].thi
else:
after = None
return (before, after)
def set_adj_spaces(cur_value, change):
seq_model = self.parent.opt_model.seq_model
if cur_value[0] is not None:
seq_model.gaps[self.idx-1].thi = cur_value[0] + change
if cur_value[1] is not None:
seq_model.gaps[self.idx].thi = cur_value[1] - change
slide_action = {}
slide_action['x'] = Action(get_adj_spaces, set_adj_spaces)
self.actions['shape'] = slide_action
return self.actions
class AirGap():
label_format = 'AG{}'
serial_number = 0
def __init__(self, g, idx=0, tfrm=None, label=None):
if label is None:
AirGap.serial_number += 1
self.label = AirGap.label_format.format(AirGap.serial_number)
else:
self.label = label
if tfrm is not None:
self.tfrm = tfrm
else:
self.tfrm = (np.identity(3), np.array([0., 0., 0.]))
self.render_color = (237, 243, 254, 64) # light blue
self.gap = g
self.medium_name = self.gap.medium.name()
self.idx = idx
self.handles = {}
self.actions = {}
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['parent']
del attrs['tfrm']
del attrs['gap']
del attrs['handles']
del attrs['actions']
return attrs
def __str__(self):
return str(self.gap)
def sync_to_restore(self, ele_model, surfs, gaps, tfrms):
self.parent = ele_model
self.gap = gaps[self.idx]
self.tfrm = tfrms[self.idx]
if not hasattr(self, 'render_color'):
self.render_color = (237, 243, 254, 64) # light blue
if not hasattr(self, 'medium_name'):
self.medium_name = self.gap.medium.name()
ro_version = ele_model.opt_model.ro_version
if version.parse(ro_version) < version.parse("0.7.0a"):
AirGap.serial_number += 1
self.label = AirGap.label_format.format(AirGap.serial_number)
def sync_to_update(self, seq_model):
self.idx = seq_model.gaps.index(self.gap)
def tree(self, **kwargs):
default_tag = '#airgap'
tag = default_tag + kwargs.get('tag', '')
ag = Node('AG', id=self, tag=tag)
t = Node('t', id=self.gap, tag='#thic', parent=ag)
zdir = kwargs.get('z_dir', 1)
Node(f'g{self.idx}', id=(self.gap, zdir), tag='#gap', parent=t)
return ag
def reference_interface(self):
return None
def reference_idx(self):
return self.idx
def interface_list(self):
return []
def gap_list(self):
return [self.gap]
def update_size(self):
pass
def render_handles(self, opt_model):
self.handles = {}
poly_ct = []
poly_ct.append([0., 0.])
poly_ct.append([self.gap.thi, 0.])
# Modify the tfrm to account for any decenters following
# the reference ifc.
tfrm = self.tfrm
decenter = opt_model.seq_model.ifcs[self.idx].decenter
if decenter is not None:
r_global, t_global = tfrm
r_after_ifc, t_after_ifc = decenter.tform_after_surf()
t = r_global.dot(t_after_ifc) + t_global
r = r_global if r_after_ifc is None else r_global.dot(r_after_ifc)
tfrm = r, t
self.handles['ct'] = GraphicsHandle(poly_ct, tfrm, 'polyline')
return self.handles
def handle_actions(self):
self.actions = {}
ct_action = {}
ct_action['x'] = AttrAction(self.gap, 'thi')
self.actions['ct'] = ct_action
return self.actions
# --- Element model
class ElementModel:
"""Maintain the element based representation of the optical model
Attributes:
opt_model: the :class:`~rayoptics.optical.opticalmodel.OpticalModel`
elements: list of element type things
"""
def __init__(self, opt_model, **kwargs):
self.opt_model = opt_model
self.elements = []
def reset(self):
self.__init__(self.opt_model)
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['opt_model']
return attrs
def sync_to_restore(self, opt_model):
self.opt_model = opt_model
seq_model = opt_model.seq_model
surfs = seq_model.ifcs
gaps = seq_model.gaps
tfrms = seq_model.compute_global_coords(1)
# special processing for older models
# self.airgaps_from_sequence(seq_model, tfrms)
# self.add_dummy_interface_at_image(seq_model, tfrms)
self.reset_serial_numbers()
for i, e in enumerate(self.elements, start=1):
e.sync_to_restore(self, surfs, gaps, tfrms)
if not hasattr(e, 'label'):
e.label = e.label_format.format(i)
self.sequence_elements()
# self.relabel_airgaps()
def reset_serial_numbers(self):
Element.serial_number = 0
Mirror.serial_number = 0
CementedElement.serial_number = 0
ThinElement.serial_number = 0
DummyInterface.serial_number = 0
AirGap.serial_number = 0
def airgaps_from_sequence(self, seq_model, tfrms):
""" add airgaps and dummy interfaces to an older version model """
for e in self.elements:
if isinstance(e, AirGap):
return # found an AirGap, model probably OK
num_elements = 0
seq_model = self.opt_model.seq_model
for i, g in enumerate(seq_model.gaps):
if g.medium.name().lower() == 'air':
if i > 0:
s = seq_model.ifcs[i]
tfrm = tfrms[i]
num_elements = self.process_airgap(
seq_model, i, g, s, tfrm,
num_elements, add_ele=False)
def add_dummy_interface_at_image(self, seq_model, tfrms):
if len(self.elements) and self.elements[-1].label == 'Image':
return
s = seq_model.ifcs[-1]
idx = seq_model.get_num_surfaces() - 1
di = DummyInterface(s, sd=s.surface_od(), tfrm=tfrms[-1], idx=idx,
label='Image')
self.opt_model.part_tree.add_element_to_tree(di, tag='#image')
self.add_element(di)
def update_model(self, **kwargs):
seq_model = self.opt_model['seq_model']
tfrms = seq_model.compute_global_coords(1)
# dynamically build element list from part_tree
part_tree = self.opt_model['part_tree']
nodes = part_tree.nodes_with_tag(tag='#element#airgap#dummyifc')
elements = [n.id for n in nodes]
# hook or unhook elements from ele_model
cur_set = set(self.elements)
new_set = set(elements)
added_ele = list(new_set.difference(cur_set))
for e in added_ele:
e.parent = self
removed_ele = list(cur_set.difference(new_set))
for e in removed_ele:
e.parent = None
# update the elements
for e in elements:
e.update_size()
e.sync_to_update(seq_model)
e.tfrm = tfrms[e.reference_idx()]
self.elements = elements
self.sequence_elements()
def sequence_elements(self):
""" Sort elements in order of reference interfaces in seq_model """
seq_model = self.opt_model.seq_model
# sort by element reference interface sequential index
self.elements.sort(key=lambda e: e.reference_idx())
# Make sure z_dir matches the sequential model. Used to get
# the correct substrate offset.
if hasattr(seq_model, 'z_dir'):
for e in self.elements:
if hasattr(e, 'z_dir'):
e.z_dir = seq_model.z_dir[e.reference_idx()]
def relabel_airgaps(self):
for i, e in enumerate(self.elements):
if isinstance(e, AirGap):
eb = self.elements[i-1].label
ea = self.elements[i+1].label
e.label = AirGap.label_format.format(eb + '-' + ea)
def add_element(self, e):
e.parent = self
self.elements.append(e)
def remove_element(self, e):
e.parent = None
self.elements.remove(e)
def remove_node(self, e_node):
part_tree = self.opt_model.part_tree
nodes = part_tree.nodes_with_tag(tag='#element#airgap#dummyifc',
root=e_node)
eles = [n.id for n in nodes]
for e in eles:
self.remove_element(e)
def get_num_elements(self):
return len(self.elements)
def list_model(self, tag='#element#dummyifc'):
nodes = self.opt_model.part_tree.nodes_with_tag(tag=tag)
elements = [n.id for n in nodes]
for i, ele in enumerate(elements):
print("%d: %s (%s): %s" %
(i, ele.label, type(ele).__name__, ele))
def list_elements(self):
for i, ele in enumerate(self.elements):
print("%d: %s (%s): %s" %
(i, ele.label, type(ele).__name__, ele))
def element_type(self, i):
return type(self.elements[i]).__name__
|
fireworks/user_objects/firetasks/tests/test_filepad_tasks.py | jmmshn/fireworks | 251 | 12674102 | __author__ = "<NAME>, <NAME>"
import os
import unittest
from ruamel.yaml import YAML
from fireworks.user_objects.firetasks.filepad_tasks import (
AddFilesTask,
DeleteFilesTask,
GetFilesByQueryTask,
GetFilesTask,
)
from fireworks.utilities.filepad import FilePad
module_dir = os.path.abspath(os.path.dirname(__file__))
class FilePadTasksTest(unittest.TestCase):
def setUp(self):
self.paths = [os.path.join(module_dir, "write.yaml"), os.path.join(module_dir, "delete.yaml")]
self.identifiers = ["write", "delete"]
self.fp = FilePad.auto_load()
def test_addfilestask_run(self):
t = AddFilesTask(paths=self.paths, identifiers=self.identifiers)
t.run_task({})
write_file_contents, _ = self.fp.get_file("write")
with open(self.paths[0]) as f:
self.assertEqual(write_file_contents, f.read().encode())
del_file_contents, _ = self.fp.get_file("delete")
with open(self.paths[1]) as f:
self.assertEqual(del_file_contents, f.read().encode())
def test_deletefilestask_run(self):
t = DeleteFilesTask(identifiers=self.identifiers)
t.run_task({})
file_contents, doc = self.fp.get_file("write")
self.assertIsNone(file_contents)
self.assertIsNone(doc)
file_contents, doc = self.fp.get_file("delete")
self.assertIsNone(file_contents)
self.assertIsNone(doc)
def test_getfilestask_run(self):
t = AddFilesTask(paths=self.paths, identifiers=self.identifiers)
t.run_task({})
dest_dir = os.path.abspath(".")
identifiers = ["write"]
new_file_names = ["write_2.yaml"]
t = GetFilesTask(identifiers=identifiers, dest_dir=dest_dir, new_file_names=new_file_names)
t.run_task({})
write_file_contents, _ = self.fp.get_file("write")
with open(os.path.join(dest_dir, new_file_names[0])) as f:
self.assertEqual(write_file_contents, f.read().encode())
os.remove(os.path.join(dest_dir, new_file_names[0]))
def test_getfilesbyquerytask_run(self):
"""Tests querying objects from FilePad by metadata"""
t = AddFilesTask(paths=self.paths, identifiers=self.identifiers, metadata={"key": "value"})
t.run_task({})
dest_dir = os.path.abspath(".")
new_file_names = ["test_file.yaml"]
t = GetFilesByQueryTask(query={"metadata->key": "value"}, dest_dir=dest_dir, new_file_names=new_file_names)
t.run_task({})
test_file_contents, _ = self.fp.get_file("test_idenfifier")
self.assertEqual(test_file_contents, open(os.path.join(dest_dir, new_file_names[0])).read().encode())
os.remove(os.path.join(dest_dir, new_file_names[0]))
def test_getfilesbyquerytask_run(self):
"""Tests querying objects from FilePad by metadata"""
with open("original_test_file.txt", "w") as f:
f.write("Some file with some content")
t = AddFilesTask(paths=["original_test_file.txt"], identifiers=["some_identifier"], metadata={"key": "value"})
t.run_task({})
os.remove("original_test_file.txt")
dest_dir = os.path.abspath(".")
t = GetFilesByQueryTask(
query={"metadata->key": "value"}, dest_dir=dest_dir, new_file_names=["queried_test_file.txt"]
)
t.run_task({})
test_file_contents, _ = self.fp.get_file("some_identifier")
with open(os.path.join(dest_dir, "queried_test_file.txt")) as f:
self.assertEqual(test_file_contents, f.read().encode())
os.remove(os.path.join(dest_dir, "queried_test_file.txt"))
def test_getfilesbyquerytask_metafile_run(self):
"""Tests writing metadata to a yaml file"""
with open("original_test_file.txt", "w") as f:
f.write("Some file with some content")
t = AddFilesTask(paths=["original_test_file.txt"], identifiers=["test_identifier"], metadata={"key": "value"})
t.run_task({})
os.remove("original_test_file.txt")
dest_dir = os.path.abspath(".")
t = GetFilesByQueryTask(
query={"metadata->key": "value"},
meta_file=True,
meta_file_suffix=".meta.yaml",
dest_dir=dest_dir,
new_file_names=["queried_test_file.txt"],
)
t.run_task({})
with open("queried_test_file.txt.meta.yaml") as f:
yaml = YAML(typ="safe")
metadata = yaml.load(f)
self.assertEqual(metadata["key"], "value")
os.remove(os.path.join(dest_dir, "queried_test_file.txt"))
os.remove(os.path.join(dest_dir, "queried_test_file.txt.meta.yaml"))
def test_getfilesbyquerytask_ignore_empty_result_run(self):
"""Tests on ignoring empty results from FilePad query"""
dest_dir = os.path.abspath(".")
t = GetFilesByQueryTask(
query={"metadata->key": "value"},
fizzle_empty_result=False,
dest_dir=dest_dir,
new_file_names=["queried_test_file.txt"],
)
t.run_task({})
# test successful if no exception raised
def test_getfilesbyquerytask_raise_empty_result_run(self):
"""Tests on raising exception on empty results from FilePad query"""
dest_dir = os.path.abspath(".")
t = GetFilesByQueryTask(
query={"metadata->key": "value"},
fizzle_empty_result=True,
dest_dir=dest_dir,
new_file_names=["queried_test_file.txt"],
)
with self.assertRaises(ValueError):
t.run_task({})
# test successful if exception raised
def test_getfilesbyquerytask_ignore_degenerate_file_name(self):
"""Tests on ignoring degenerate file name in result from FilePad query"""
with open("degenerate_file.txt", "w") as f:
f.write("Some file with some content")
t = AddFilesTask(paths=["degenerate_file.txt"], identifiers=["some_identifier"], metadata={"key": "value"})
t.run_task({})
with open("degenerate_file.txt", "w") as f:
f.write("Some other file with some other content BUT same file name")
t = AddFilesTask(
paths=["degenerate_file.txt"], identifiers=["some_other_identifier"], metadata={"key": "value"}
)
t.run_task({})
os.remove("degenerate_file.txt")
t = GetFilesByQueryTask(query={"metadata->key": "value"}, fizzle_degenerate_file_name=False)
t.run_task({})
# test successful if no exception raised
def test_getfilesbyquerytask_raise_degenerate_file_name(self):
"""Tests on raising exception on degenerate file name from FilePad query"""
with open("degenerate_file.txt", "w") as f:
f.write("Some file with some content")
t = AddFilesTask(paths=["degenerate_file.txt"], identifiers=["some_identifier"], metadata={"key": "value"})
t.run_task({})
with open("degenerate_file.txt", "w") as f:
f.write("Some other file with some other content BUT same file name")
t = AddFilesTask(
paths=["degenerate_file.txt"], identifiers=["some_other_identifier"], metadata={"key": "value"}
)
t.run_task({})
os.remove("degenerate_file.txt")
t = GetFilesByQueryTask(query={"metadata->key": "value"}, fizzle_degenerate_file_name=True)
with self.assertRaises(ValueError):
t.run_task({})
# test successful if exception raised
def test_getfilesbyquerytask_sort_ascending_name_run(self):
"""Tests on sorting queried files in ascending order"""
file_contents = ["Some file with some content", "Some other file with some other content"]
with open("degenerate_file.txt", "w") as f:
f.write(file_contents[0])
t = AddFilesTask(
paths=["degenerate_file.txt"], identifiers=["some_identifier"], metadata={"key": "value", "sort_key": 0}
)
t.run_task({})
with open("degenerate_file.txt", "w") as f:
f.write(file_contents[-1])
t = AddFilesTask(
paths=["degenerate_file.txt"],
identifiers=["some_other_identifier"],
metadata={"key": "value", "sort_key": 1},
)
t.run_task({})
os.remove("degenerate_file.txt")
t = GetFilesByQueryTask(
query={"metadata->key": "value"}, fizzle_degenerate_file_name=False, sort_key="sort_key", sort_direction=1
)
t.run_task({})
with open("degenerate_file.txt") as f:
self.assertEqual(file_contents[-1], f.read())
def test_getfilesbyquerytask_sort_descending_name_run(self):
"""Tests on sorting queried files in descending order"""
file_contents = ["Some file with some content", "Some other file with some other content"]
with open("degenerate_file.txt", "w") as f:
f.write(file_contents[0])
t = AddFilesTask(
paths=["degenerate_file.txt"], identifiers=["some_identifier"], metadata={"key": "value", "sort_key": 10}
)
t.run_task({})
with open("degenerate_file.txt", "w") as f:
f.write(file_contents[-1])
t = AddFilesTask(
paths=["degenerate_file.txt"],
identifiers=["some_other_identifier"],
metadata={"key": "value", "sort_key": 20},
)
t.run_task({})
os.remove("degenerate_file.txt")
t = GetFilesByQueryTask(
query={"metadata->key": "value"},
fizzle_degenerate_file_name=False,
sort_key="metadata.sort_key",
sort_direction=-1,
)
t.run_task({})
with open("degenerate_file.txt") as f:
self.assertEqual(file_contents[0], f.read())
os.remove("degenerate_file.txt")
def test_addfilesfrompatterntask_run(self):
t = AddFilesTask(paths="*.yaml", directory=module_dir)
t.run_task({})
write_file_contents, _ = self.fp.get_file(self.paths[0])
with open(self.paths[0]) as f:
self.assertEqual(write_file_contents, f.read().encode())
del_file_contents, wdoc = self.fp.get_file(self.paths[1])
with open(self.paths[1]) as f:
self.assertEqual(del_file_contents, f.read().encode())
def tearDown(self):
self.fp.reset()
if __name__ == "__main__":
unittest.main()
|
var/spack/repos/builtin/packages/r-insight/package.py | LiamBindle/spack | 2,360 | 12674103 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RInsight(RPackage):
"""Easy Access to Model Information for Various Model Objects
A tool to provide an easy, intuitive and consistent access to information
contained in various R models, like model formulas, model terms,
information about random effects, data that was used to fit the model or
data from response variables. 'insight' mainly revolves around two types of
functions: Functions that find (the names of) information, starting with
'find_', and functions that get the underlying data, starting with 'get_'.
The package has a consistent syntax and works with many different model
objects, where otherwise functions to access these information are
missing."""
homepage = "https://easystats.github.io/insight/"
cran = "insight"
version('0.14.1', sha256='0e7761997a46ee33039cdeff1779dbc210de3644e4444c6e893e4ef2f12cc129')
depends_on('[email protected]:', type=('build', 'run'))
|
config_files/sthv1/tsm_baseline.py | shreyas-bk/TPN | 353 | 12674133 | model = dict(
type='TSN2D',
backbone=dict(
type='ResNet',
pretrained='modelzoo://resnet50',
nsegments=8,
depth=50,
out_indices=(3,),
tsm=True,
bn_eval=False,
partial_bn=False),
spatial_temporal_module=dict(
type='SimpleSpatialModule',
spatial_type='avg',
spatial_size=7),
segmental_consensus=dict(
type='SimpleConsensus',
consensus_type='avg'),
cls_head=dict(
type='ClsHead',
with_avg_pool=False,
temporal_feature_size=1,
spatial_feature_size=1,
dropout_ratio=0.5,
in_channels=2048,
num_classes=174))
train_cfg = None
test_cfg = None
# dataset settings
dataset_type = 'RawFramesDataset'
data_root = ''
data_root_val = ''
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
videos_per_gpu=8,
workers_per_gpu=8,
train=dict(
type=dataset_type,
ann_file='data/sthv1/train_videofolder.txt',
img_prefix=data_root,
img_norm_cfg=img_norm_cfg,
num_segments=8,
new_length=1,
new_step=1,
random_shift=True,
modality='RGB',
image_tmpl='{:05d}.jpg',
img_scale=256,
input_size=224,
flip_ratio=0.5,
resize_keep_ratio=True,
resize_crop=True,
color_jitter=True,
color_space_aug=True,
oversample=None,
max_distort=1,
test_mode=False),
val=dict(
type=dataset_type,
ann_file='data/sthv1/val_videofolder.txt',
img_prefix=data_root_val,
img_norm_cfg=img_norm_cfg,
num_segments=8,
new_length=1,
new_step=1,
random_shift=False,
modality='RGB',
image_tmpl='{:05d}.jpg',
img_scale=256,
input_size=224,
flip_ratio=0,
resize_keep_ratio=True,
oversample=None,
test_mode=False),
test=dict(
type=dataset_type,
ann_file='data/sthv1/val_videofolder.txt',
img_prefix=data_root_val,
img_norm_cfg=img_norm_cfg,
num_segments=16,
new_length=1,
new_step=1,
random_shift=False,
modality='RGB',
image_tmpl='{:05d}.jpg',
img_scale=256,
input_size=256,
flip_ratio=0,
resize_keep_ratio=True,
oversample="three_crop",
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, nesterov=True)
optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
step=[75, 125])
checkpoint_config = dict(interval=1)
workflow = [('train', 1)]
# yapf:disable
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 150
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
|
app/main/routes_picoferm_api.py | zuiko42/picobrew_pico | 142 | 12674140 | <gh_stars>100-1000
import json
from datetime import datetime
from flask import current_app, send_from_directory
from webargs import fields
from webargs.flaskparser import use_args, FlaskParser
from . import main
from .. import socketio
from .config import ferm_active_sessions_path, picoferm_firmware_path, MachineType
from .firmware import firmware_filename, minimum_firmware, firmware_upgrade_required
from .model import PicoFermSession
from .session_parser import active_ferm_sessions
arg_parser = FlaskParser()
# Register: /API/PicoFerm/isRegistered?uid={uid}&token={token}
# Response: '#{0}#' where {0} : 1 = Registered, 0 = Not Registered
ferm_registered_args = {
'uid': fields.Str(required=True), # 12 character alpha-numeric serial number
'token': fields.Str(required=True), # 8 character alpha-numberic number
}
@main.route('/API/PicoFerm/isRegistered')
@use_args(ferm_registered_args, location='querystring')
def process_ferm_registered(args):
uid = args['uid']
if uid not in active_ferm_sessions:
active_ferm_sessions[uid] = PicoFermSession()
return '#1#'
# Check Firmware: /API/PicoFerm/checkFirmware?uid={UID}&version={VERSION}
# Response: '#{0}#' where {0} : 1 = Update Available, 0 = No Updates
check_ferm_firmware_args = {
'uid': fields.Str(required=True), # 12 character alpha-numeric serial number
'version': fields.Str(required=True), # Current firmware version - i.e. 0.1.11
}
@main.route('/API/PicoFerm/checkFirmware')
@use_args(check_ferm_firmware_args, location='querystring')
def process_check_ferm_firmware(args):
if firmware_upgrade_required(MachineType.PICOFERM, args['version']):
return '#1#'
return '#0#'
# Get Firmware: /API/pico/getFirmware?uid={UID}
# Response: RAW Bin File Contents
get_firmware_args = {
'uid': fields.Str(required=True), # 12 character alpha-numeric serial number
}
@main.route('/API/PicoFerm/getFirmwareAddress')
@use_args(get_firmware_args, location='querystring')
def process_get_firmware_address(args):
filename = firmware_filename(MachineType.PICOFERM, minimum_firmware(MachineType.PICOFERM))
return '#http://picobrew.com/firmware/picoferm/{}#'.format(filename)
# Get Firmware: /firmware/picoferm/<version>
# Response: RAW Bin File
@main.route('/firmware/picoferm/<file>', methods=['GET'])
def process_picoferm_firmware(file):
current_app.logger.debug('DEBUG: PicoFerm fetch firmware file={}'.format(file))
return send_from_directory(picoferm_firmware_path(), file)
# Get State: /API/PicoFerm/getState?uid={UID}
# Response: '#{0}#' where {0} : 2,4 = nothing to do, 10,0 = in progress/send data, 10,16 = in progress/error, 2,16 = complete/stop sending data
get_ferm_state_args = {
'uid': fields.Str(required=True), # 12 character alpha-numeric serial number
}
@main.route('/API/PicoFerm/getState')
@use_args(get_ferm_state_args, location='querystring')
def process_get_ferm_state(args):
uid = args['uid']
if uid not in active_ferm_sessions:
active_ferm_sessions[uid] = PicoFermSession()
session = active_ferm_sessions[uid]
if session.active == True:
return '#10,0#'
elif session.uninit or session.file == None:
return '#2,4'
# LogDataSet: /API/PicoFerm/logDataSet?uid={UID}&rate={RATE}&voltage={VOLTAGE}&data={DATA}
# Response: '#{0}#' where {0} : 10,0 = in progress/send data, ?????
log_ferm_dataset_args = {
'uid': fields.Str(required=True), # 12 character alpha-numeric serial number
'rate': fields.Float(required=True), # Rate between samples (minutes)
'voltage': fields.Float(required=True), # %0.2f Voltage
'data': fields.Str(required=True), # List of dictionary (Temperature (S1), Pressure (S2)): [{"s1":%0.2f,"s2":%0.2f},]
}
@main.route('/API/PicoFerm/logDataSet')
@use_args(log_ferm_dataset_args, location='querystring')
def process_log_ferm_dataset(args):
uid = args['uid']
if uid not in active_ferm_sessions or active_ferm_sessions[uid].uninit:
create_new_session(uid)
data = json.loads(args['data'])
time_delta = args['rate'] * 60 * 1000
time = ((datetime.utcnow() - datetime(1970, 1, 1)).total_seconds() * 1000) - (time_delta * (len(data) - 1))
session_data = []
log_data = ''
for d in data:
point = {'time': time,
'temp': d['s1'],
'pres': d['s2'],
}
session_data.append(point)
time = time + time_delta
log_data += '\n\t{},'.format(json.dumps(point))
active_ferm_sessions[uid].data.extend(session_data)
active_ferm_sessions[uid].voltage = str(args['voltage']) + 'V'
graph_update = json.dumps({'voltage': args['voltage'], 'data': session_data})
socketio.emit('ferm_session_update|{}'.format(args['uid']), graph_update)
# end fermentation only when user specifies fermentation is complete
if active_ferm_sessions[uid].uninit == False and active_ferm_sessions[uid].active == False:
active_ferm_sessions[uid].file.write('{}\n\n]'.format(log_data[:-2]))
active_ferm_sessions[uid].cleanup()
# The server makes a determination when fermenting is done based on the datalog after it sends '2,4'
return '#2,4#'
else:
active_ferm_sessions[uid].active = True
active_ferm_sessions[uid].file.write(log_data)
active_ferm_sessions[uid].file.flush()
# Errors like '10,16' send data but mark data error.
# '10,0' tells the PicoFerm to continue to send data.
return '#10,0#'
# -------- Utility --------
def create_new_session(uid):
if uid not in active_ferm_sessions:
active_ferm_sessions[uid] = PicoFermSession()
active_ferm_sessions[uid].uninit = False
active_ferm_sessions[uid].start_time = datetime.now() # Not now, but X interval seconds ago
active_ferm_sessions[uid].filepath = ferm_active_sessions_path().joinpath('{0}#{1}.json'.format(active_ferm_sessions[uid].start_time.strftime('%Y%m%d_%H%M%S'), uid))
active_ferm_sessions[uid].file = open(active_ferm_sessions[uid].filepath, 'w')
active_ferm_sessions[uid].file.write('[')
|
vtrace/notifiers.py | rnui2k/vivisect | 716 | 12674143 | <filename>vtrace/notifiers.py
"""
Vtrace notitifers base classes and examples
Vtrace supports the idea of callback notifiers which
get called whenever particular events occur in the target
process. Notifiers may be registered to recieve a callback
on any of the vtrace.NOTIFY_FOO events from vtrace. One notifier
*may* be registered with more than one trace, as the "notify"
method is passed a reference to the trace for which an event
has occured...
"""
# Copyright (C) 2007 Invisigoth - See LICENSE file for details
import logging
import traceback
import vtrace
logger = logging.getLogger(__name__)
class Notifier(object):
"""
The top level example notifier... Anything which registers
itself for trace events or tracegroup events should implement
the notify method as shown here.
"""
def __init__(self):
"""
All extenders *must* call this. Mostly because all the
goop necessary for the remote debugging stuff...
(if notifier is instantiated on server, all is well, if it's
on the client it needs a proxy...)
"""
pass
def handleEvent(self, event, trace):
"""
An "internal" handler so if we need to do something
from an API perspective before calling the notify method
we can have a good "all at once" hook
"""
self.notify(event, trace)
def notify(self, event, trace):
logger.info("Got event: %d from pid %d", event, trace.getPid())
class VerboseNotifier(Notifier):
def notify(self, event, trace):
logger.info("PID %d - ThreadID (%d) got", trace.getPid(), trace.getMeta("ThreadId"))
if event == vtrace.NOTIFY_ALL:
("WTF, how did we get a vtrace.NOTIFY_ALL event?!?!")
elif event == vtrace.NOTIFY_SIGNAL:
signo = trace.getCurrentSignal()
("vtrace.NOTIFY_SIGNAL %d (0x%08x)" % (signo, signo))
if trace.getMeta("Platform") == "windows":
logger.info(repr(trace.getMeta("Win32Event")))
elif event == vtrace.NOTIFY_BREAK:
logger.info("vtrace.NOTIFY_BREAK")
logger.info("\tIP: 0x%08x", trace.getProgramCounter())
elif event == vtrace.NOTIFY_SYSCALL:
logger.info("vtrace.NOTIFY_SYSCALL")
elif event == vtrace.NOTIFY_CONTINUE:
logger.info("vtrace.NOTIFY_CONTINUE")
elif event == vtrace.NOTIFY_EXIT:
logger.info("vtrace.NOTIFY_EXIT")
logger.info("\tExitCode: %d", trace.getMeta("ExitCode"))
elif event == vtrace.NOTIFY_ATTACH:
logger.info("vtrace.NOTIFY_ATTACH")
elif event == vtrace.NOTIFY_DETACH:
logger.info("vtrace.NOTIFY_DETACH")
elif event == vtrace.NOTIFY_LOAD_LIBRARY:
logger.info("vtrace.NOTIFY_LOAD_LIBRARY")
logger.info("\tLoaded library %s", trace.getMeta('LatestLibrary'))
elif event == vtrace.NOTIFY_UNLOAD_LIBRARY:
logger.info("vtrace.NOTIFY_UNLOAD_LIBRARY")
elif event == vtrace.NOTIFY_CREATE_THREAD:
logger.info("vtrace.NOTIFY_CREATE_THREAD")
logger.info("\tNew thread - ThreadID: %d", trace.getMeta("ThreadId"))
elif event == vtrace.NOTIFY_EXIT_THREAD:
logger.info("vtrace.NOTIFY_EXIT_THREAD")
logger.info("Thread exited - ThreadID: %d", trace.getMeta("ExitThread", -1))
elif event == vtrace.NOTIFY_STEP:
logger.info("vtrace.NOTIFY_STEP")
else:
logger.warning("Unhandled vtrace event type of: %d", event)
class DistributedNotifier(Notifier):
"""
A notifier which will distributed notifications out to
locally registered notifiers so that remote tracer's notifier
callbacks only require once across the wire.
"""
# NOTE: once you turn on vtrace.NOTIFY_ALL it can't be turned back off yet.
def __init__(self):
Notifier.__init__(self)
self.shared = False
self.events = []
self.notifiers = {}
for i in range(vtrace.NOTIFY_MAX):
self.notifiers[i] = []
def notify(self, event, trace):
self.fireNotifiers(event, trace)
def fireNotifiers(self, event, trace):
"""
Fire all our registerd local-notifiers
"""
nlist = self.notifiers.get(vtrace.NOTIFY_ALL, [])
for notifier in nlist:
try:
notifier.handleEvent(event, trace)
except Exception:
logger.error("Exception in notifier:\n%s", traceback.format_exc())
nlist = self.notifiers.get(event, [])
for notifier in nlist:
try:
notifier.handleEvent(event, trace)
except Exception:
logger.error("Exception in notifier:\n%s", traceback.format_exc())
def registerNotifier(self, event, notif):
"""
Register a sub-notifier to get the remote callback's via
our local delivery.
"""
nlist = self.notifiers.get(event)
nlist.append(notif)
def deregisterNotifier(self, event, notif):
nlist = self.notifiers.get(event)
nlist.remove(notif)
|
venv/lib/python3.8/site-packages/pylint/extensions/_check_docs_utils.py | DiegoSilvaHoffmann/Small-Ecommerce | 463 | 12674161 | <reponame>DiegoSilvaHoffmann/Small-Ecommerce
# Copyright (c) 2016-2020 <NAME> <<EMAIL>>
# Copyright (c) 2016-2019 <NAME> <<EMAIL>>
# Copyright (c) 2016 <NAME> <<EMAIL>>
# Copyright (c) 2016 <NAME> <<EMAIL>>
# Copyright (c) 2016 <NAME> <<EMAIL>>
# Copyright (c) 2017, 2020 hippo91 <<EMAIL>>
# Copyright (c) 2017 Mitar <<EMAIL>>
# Copyright (c) 2018, 2020 <NAME> <<EMAIL>>
# Copyright (c) 2018 <NAME> <<EMAIL>>
# Copyright (c) 2018 ssolanki <<EMAIL>>
# Copyright (c) 2018 <NAME> <<EMAIL>>
# Copyright (c) 2018 <NAME> <<EMAIL>>
# Copyright (c) 2019 <NAME> <<EMAIL>>
# Copyright (c) 2019 <NAME> <<EMAIL>>
# Copyright (c) 2019 <NAME> <<EMAIL>>
# Copyright (c) 2021 <NAME> <<EMAIL>>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""Utility methods for docstring checking."""
import re
from typing import List
import astroid
from pylint.checkers import utils
def space_indentation(s):
"""The number of leading spaces in a string
:param str s: input string
:rtype: int
:return: number of leading spaces
"""
return len(s) - len(s.lstrip(" "))
def get_setters_property_name(node):
"""Get the name of the property that the given node is a setter for.
:param node: The node to get the property name for.
:type node: str
:rtype: str or None
:returns: The name of the property that the node is a setter for,
or None if one could not be found.
"""
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
if (
isinstance(decorator, astroid.Attribute)
and decorator.attrname == "setter"
and isinstance(decorator.expr, astroid.Name)
):
return decorator.expr.name
return None
def get_setters_property(node):
"""Get the property node for the given setter node.
:param node: The node to get the property for.
:type node: astroid.FunctionDef
:rtype: astroid.FunctionDef or None
:returns: The node relating to the property of the given setter node,
or None if one could not be found.
"""
property_ = None
property_name = get_setters_property_name(node)
class_node = utils.node_frame_class(node)
if property_name and class_node:
class_attrs = class_node.getattr(node.name)
for attr in class_attrs:
if utils.decorated_with_property(attr):
property_ = attr
break
return property_
def returns_something(return_node):
"""Check if a return node returns a value other than None.
:param return_node: The return node to check.
:type return_node: astroid.Return
:rtype: bool
:return: True if the return node returns a value other than None,
False otherwise.
"""
returns = return_node.value
if returns is None:
return False
return not (isinstance(returns, astroid.Const) and returns.value is None)
def _get_raise_target(node):
if isinstance(node.exc, astroid.Call):
func = node.exc.func
if isinstance(func, (astroid.Name, astroid.Attribute)):
return utils.safe_infer(func)
return None
def _split_multiple_exc_types(target: str) -> List[str]:
delimiters = r"(\s*,(?:\s*or\s)?\s*|\s+or\s+)"
return re.split(delimiters, target)
def possible_exc_types(node):
"""
Gets all of the possible raised exception types for the given raise node.
.. note::
Caught exception types are ignored.
:param node: The raise node to find exception types for.
:type node: astroid.node_classes.NodeNG
:returns: A list of exception types possibly raised by :param:`node`.
:rtype: set(str)
"""
excs = []
if isinstance(node.exc, astroid.Name):
inferred = utils.safe_infer(node.exc)
if inferred:
excs = [inferred.name]
elif node.exc is None:
handler = node.parent
while handler and not isinstance(handler, astroid.ExceptHandler):
handler = handler.parent
if handler and handler.type:
inferred_excs = astroid.unpack_infer(handler.type)
excs = (exc.name for exc in inferred_excs if exc is not astroid.Uninferable)
else:
target = _get_raise_target(node)
if isinstance(target, astroid.ClassDef):
excs = [target.name]
elif isinstance(target, astroid.FunctionDef):
for ret in target.nodes_of_class(astroid.Return):
if ret.frame() != target:
# return from inner function - ignore it
continue
val = utils.safe_infer(ret.value)
if (
val
and isinstance(val, (astroid.Instance, astroid.ClassDef))
and utils.inherit_from_std_ex(val)
):
excs.append(val.name)
try:
return {exc for exc in excs if not utils.node_ignores_exception(node, exc)}
except astroid.InferenceError:
return set()
def docstringify(docstring, default_type="default"):
for docstring_type in [
SphinxDocstring,
EpytextDocstring,
GoogleDocstring,
NumpyDocstring,
]:
instance = docstring_type(docstring)
if instance.is_valid():
return instance
docstring_type = DOCSTRING_TYPES.get(default_type, Docstring)
return docstring_type(docstring)
class Docstring:
re_for_parameters_see = re.compile(
r"""
For\s+the\s+(other)?\s*parameters\s*,\s+see
""",
re.X | re.S,
)
supports_yields: bool = False
"""True if the docstring supports a "yield" section.
False if the docstring uses the returns section to document generators.
"""
# These methods are designed to be overridden
# pylint: disable=no-self-use
def __init__(self, doc):
doc = doc or ""
self.doc = doc.expandtabs()
def is_valid(self):
return False
def exceptions(self):
return set()
def has_params(self):
return False
def has_returns(self):
return False
def has_rtype(self):
return False
def has_property_returns(self):
return False
def has_property_type(self):
return False
def has_yields(self):
return False
def has_yields_type(self):
return False
def match_param_docs(self):
return set(), set()
def params_documented_elsewhere(self):
return self.re_for_parameters_see.search(self.doc) is not None
class SphinxDocstring(Docstring):
re_type = r"""
[~!.]? # Optional link style prefix
\w(?:\w|\.[^\.])* # Valid python name
"""
re_simple_container_type = r"""
{type} # a container type
[\(\[] [^\n\s]+ [\)\]] # with the contents of the container
""".format(
type=re_type
)
re_multiple_simple_type = r"""
(?:{container_type}|{type})
(?:(?:\s+(?:of|or)\s+|\s*,\s*)(?:{container_type}|{type}))*
""".format(
type=re_type, container_type=re_simple_container_type
)
re_xref = r"""
(?::\w+:)? # optional tag
`{}` # what to reference
""".format(
re_type
)
re_param_raw = r"""
: # initial colon
(?: # Sphinx keywords
param|parameter|
arg|argument|
key|keyword
)
\s+ # whitespace
(?: # optional type declaration
({type}|{container_type})
\s+
)?
(\w+) # Parameter name
\s* # whitespace
: # final colon
""".format(
type=re_type, container_type=re_simple_container_type
)
re_param_in_docstring = re.compile(re_param_raw, re.X | re.S)
re_type_raw = r"""
:type # Sphinx keyword
\s+ # whitespace
({type}) # Parameter name
\s* # whitespace
: # final colon
""".format(
type=re_multiple_simple_type
)
re_type_in_docstring = re.compile(re_type_raw, re.X | re.S)
re_property_type_raw = r"""
:type: # Sphinx keyword
\s+ # whitespace
{type} # type declaration
""".format(
type=re_multiple_simple_type
)
re_property_type_in_docstring = re.compile(re_property_type_raw, re.X | re.S)
re_raise_raw = r"""
: # initial colon
(?: # Sphinx keyword
raises?|
except|exception
)
\s+ # whitespace
({type}) # exception type
\s* # whitespace
: # final colon
""".format(
type=re_multiple_simple_type
)
re_raise_in_docstring = re.compile(re_raise_raw, re.X | re.S)
re_rtype_in_docstring = re.compile(r":rtype:")
re_returns_in_docstring = re.compile(r":returns?:")
supports_yields = False
def is_valid(self):
return bool(
self.re_param_in_docstring.search(self.doc)
or self.re_raise_in_docstring.search(self.doc)
or self.re_rtype_in_docstring.search(self.doc)
or self.re_returns_in_docstring.search(self.doc)
or self.re_property_type_in_docstring.search(self.doc)
)
def exceptions(self):
types = set()
for match in re.finditer(self.re_raise_in_docstring, self.doc):
raise_type = match.group(1)
types.update(_split_multiple_exc_types(raise_type))
return types
def has_params(self):
if not self.doc:
return False
return self.re_param_in_docstring.search(self.doc) is not None
def has_returns(self):
if not self.doc:
return False
return bool(self.re_returns_in_docstring.search(self.doc))
def has_rtype(self):
if not self.doc:
return False
return bool(self.re_rtype_in_docstring.search(self.doc))
def has_property_returns(self):
if not self.doc:
return False
# The summary line is the return doc,
# so the first line must not be a known directive.
return not self.doc.lstrip().startswith(":")
def has_property_type(self):
if not self.doc:
return False
return bool(self.re_property_type_in_docstring.search(self.doc))
def match_param_docs(self):
params_with_doc = set()
params_with_type = set()
for match in re.finditer(self.re_param_in_docstring, self.doc):
name = match.group(2)
params_with_doc.add(name)
param_type = match.group(1)
if param_type is not None:
params_with_type.add(name)
params_with_type.update(re.findall(self.re_type_in_docstring, self.doc))
return params_with_doc, params_with_type
class EpytextDocstring(SphinxDocstring):
"""
Epytext is similar to Sphinx. See the docs:
http://epydoc.sourceforge.net/epytext.html
http://epydoc.sourceforge.net/fields.html#fields
It's used in PyCharm:
https://www.jetbrains.com/help/pycharm/2016.1/creating-documentation-comments.html#d848203e314
https://www.jetbrains.com/help/pycharm/2016.1/using-docstrings-to-specify-types.html
"""
re_param_in_docstring = re.compile(
SphinxDocstring.re_param_raw.replace(":", "@", 1), re.X | re.S
)
re_type_in_docstring = re.compile(
SphinxDocstring.re_type_raw.replace(":", "@", 1), re.X | re.S
)
re_property_type_in_docstring = re.compile(
SphinxDocstring.re_property_type_raw.replace(":", "@", 1), re.X | re.S
)
re_raise_in_docstring = re.compile(
SphinxDocstring.re_raise_raw.replace(":", "@", 1), re.X | re.S
)
re_rtype_in_docstring = re.compile(
r"""
@ # initial "at" symbol
(?: # Epytext keyword
rtype|returntype
)
: # final colon
""",
re.X | re.S,
)
re_returns_in_docstring = re.compile(r"@returns?:")
def has_property_returns(self):
if not self.doc:
return False
# If this is a property docstring, the summary is the return doc.
if self.has_property_type():
# The summary line is the return doc,
# so the first line must not be a known directive.
return not self.doc.lstrip().startswith("@")
return False
class GoogleDocstring(Docstring):
re_type = SphinxDocstring.re_type
re_xref = SphinxDocstring.re_xref
re_container_type = r"""
(?:{type}|{xref}) # a container type
[\(\[] [^\n]+ [\)\]] # with the contents of the container
""".format(
type=re_type, xref=re_xref
)
re_multiple_type = r"""
(?:{container_type}|{type}|{xref})
(?:(?:\s+(?:of|or)\s+|\s*,\s*)(?:{container_type}|{type}|{xref}))*
""".format(
type=re_type, xref=re_xref, container_type=re_container_type
)
_re_section_template = r"""
^([ ]*) {0} \s*: \s*$ # Google parameter header
( .* ) # section
"""
re_param_section = re.compile(
_re_section_template.format(r"(?:Args|Arguments|Parameters)"),
re.X | re.S | re.M,
)
re_keyword_param_section = re.compile(
_re_section_template.format(r"Keyword\s(?:Args|Arguments|Parameters)"),
re.X | re.S | re.M,
)
re_param_line = re.compile(
r"""
\s* \*{{0,2}}(\w+) # identifier potentially with asterisks
\s* ( [(]
{type}
(?:,\s+optional)?
[)] )? \s* : # optional type declaration
\s* (.*) # beginning of optional description
""".format(
type=re_multiple_type
),
re.X | re.S | re.M,
)
re_raise_section = re.compile(
_re_section_template.format(r"Raises"), re.X | re.S | re.M
)
re_raise_line = re.compile(
r"""
\s* ({type}) \s* : # identifier
\s* (.*) # beginning of optional description
""".format(
type=re_multiple_type
),
re.X | re.S | re.M,
)
re_returns_section = re.compile(
_re_section_template.format(r"Returns?"), re.X | re.S | re.M
)
re_returns_line = re.compile(
r"""
\s* ({type}:)? # identifier
\s* (.*) # beginning of description
""".format(
type=re_multiple_type
),
re.X | re.S | re.M,
)
re_property_returns_line = re.compile(
r"""
^{type}: # indentifier
\s* (.*) # Summary line / description
""".format(
type=re_multiple_type
),
re.X | re.S | re.M,
)
re_yields_section = re.compile(
_re_section_template.format(r"Yields?"), re.X | re.S | re.M
)
re_yields_line = re_returns_line
supports_yields = True
def is_valid(self):
return bool(
self.re_param_section.search(self.doc)
or self.re_raise_section.search(self.doc)
or self.re_returns_section.search(self.doc)
or self.re_yields_section.search(self.doc)
or self.re_property_returns_line.search(self._first_line())
)
def has_params(self):
if not self.doc:
return False
return self.re_param_section.search(self.doc) is not None
def has_returns(self):
if not self.doc:
return False
entries = self._parse_section(self.re_returns_section)
for entry in entries:
match = self.re_returns_line.match(entry)
if not match:
continue
return_desc = match.group(2)
if return_desc:
return True
return False
def has_rtype(self):
if not self.doc:
return False
entries = self._parse_section(self.re_returns_section)
for entry in entries:
match = self.re_returns_line.match(entry)
if not match:
continue
return_type = match.group(1)
if return_type:
return True
return False
def has_property_returns(self):
# The summary line is the return doc,
# so the first line must not be a known directive.
first_line = self._first_line()
return not bool(
self.re_param_section.search(first_line)
or self.re_raise_section.search(first_line)
or self.re_returns_section.search(first_line)
or self.re_yields_section.search(first_line)
)
def has_property_type(self):
if not self.doc:
return False
return bool(self.re_property_returns_line.match(self._first_line()))
def has_yields(self):
if not self.doc:
return False
entries = self._parse_section(self.re_yields_section)
for entry in entries:
match = self.re_yields_line.match(entry)
if not match:
continue
yield_desc = match.group(2)
if yield_desc:
return True
return False
def has_yields_type(self):
if not self.doc:
return False
entries = self._parse_section(self.re_yields_section)
for entry in entries:
match = self.re_yields_line.match(entry)
if not match:
continue
yield_type = match.group(1)
if yield_type:
return True
return False
def exceptions(self):
types = set()
entries = self._parse_section(self.re_raise_section)
for entry in entries:
match = self.re_raise_line.match(entry)
if not match:
continue
exc_type = match.group(1)
exc_desc = match.group(2)
if exc_desc:
types.update(_split_multiple_exc_types(exc_type))
return types
def match_param_docs(self):
params_with_doc = set()
params_with_type = set()
entries = self._parse_section(self.re_param_section)
entries.extend(self._parse_section(self.re_keyword_param_section))
for entry in entries:
match = self.re_param_line.match(entry)
if not match:
continue
param_name = match.group(1)
param_type = match.group(2)
param_desc = match.group(3)
if param_type:
params_with_type.add(param_name)
if param_desc:
params_with_doc.add(param_name)
return params_with_doc, params_with_type
def _first_line(self):
return self.doc.lstrip().split("\n", 1)[0]
@staticmethod
def min_section_indent(section_match):
return len(section_match.group(1)) + 1
@staticmethod
def _is_section_header(_):
# Google parsing does not need to detect section headers,
# because it works off of indentation level only
return False
def _parse_section(self, section_re):
section_match = section_re.search(self.doc)
if section_match is None:
return []
min_indentation = self.min_section_indent(section_match)
entries = []
entry = []
is_first = True
for line in section_match.group(2).splitlines():
if not line.strip():
continue
indentation = space_indentation(line)
if indentation < min_indentation:
break
# The first line after the header defines the minimum
# indentation.
if is_first:
min_indentation = indentation
is_first = False
if indentation == min_indentation:
if self._is_section_header(line):
break
# Lines with minimum indentation must contain the beginning
# of a new parameter documentation.
if entry:
entries.append("\n".join(entry))
entry = []
entry.append(line)
if entry:
entries.append("\n".join(entry))
return entries
class NumpyDocstring(GoogleDocstring):
_re_section_template = r"""
^([ ]*) {0} \s*?$ # Numpy parameters header
\s* [-=]+ \s*?$ # underline
( .* ) # section
"""
re_param_section = re.compile(
_re_section_template.format(r"(?:Args|Arguments|Parameters)"),
re.X | re.S | re.M,
)
re_param_line = re.compile(
r"""
\s* (\w+) # identifier
\s* :
\s* (?:({type})(?:,\s+optional)?)? # optional type declaration
\n # description starts on a new line
\s* (.*) # description
""".format(
type=GoogleDocstring.re_multiple_type
),
re.X | re.S,
)
re_raise_section = re.compile(
_re_section_template.format(r"Raises"), re.X | re.S | re.M
)
re_raise_line = re.compile(
r"""
\s* ({type})$ # type declaration
\s* (.*) # optional description
""".format(
type=GoogleDocstring.re_type
),
re.X | re.S | re.M,
)
re_returns_section = re.compile(
_re_section_template.format(r"Returns?"), re.X | re.S | re.M
)
re_returns_line = re.compile(
r"""
\s* (?:\w+\s+:\s+)? # optional name
({type})$ # type declaration
\s* (.*) # optional description
""".format(
type=GoogleDocstring.re_multiple_type
),
re.X | re.S | re.M,
)
re_yields_section = re.compile(
_re_section_template.format(r"Yields?"), re.X | re.S | re.M
)
re_yields_line = re_returns_line
supports_yields = True
@staticmethod
def min_section_indent(section_match):
return len(section_match.group(1))
@staticmethod
def _is_section_header(line):
return bool(re.match(r"\s*-+$", line))
DOCSTRING_TYPES = {
"sphinx": SphinxDocstring,
"epytext": EpytextDocstring,
"google": GoogleDocstring,
"numpy": NumpyDocstring,
"default": Docstring,
}
"""A map of the name of the docstring type to its class.
:type: dict(str, type)
"""
|
esrally/utils/net.py | Kua-Fu/rally | 1,577 | 12674182 | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import logging
import os
import socket
import urllib.error
from urllib.parse import parse_qs, quote, urlencode, urlparse, urlunparse
import certifi
import urllib3
from esrally import exceptions
from esrally.utils import console, convert
__HTTP = None
def init():
logger = logging.getLogger(__name__)
global __HTTP
proxy_url = os.getenv("http_proxy")
if proxy_url and len(proxy_url) > 0:
parsed_url = urllib3.util.parse_url(proxy_url)
logger.info("Connecting via proxy URL [%s] to the Internet (picked up from the env variable [http_proxy]).", proxy_url)
__HTTP = urllib3.ProxyManager(
proxy_url,
cert_reqs="CERT_REQUIRED",
ca_certs=certifi.where(),
# appropriate headers will only be set if there is auth info
proxy_headers=urllib3.make_headers(proxy_basic_auth=parsed_url.auth),
)
else:
logger.info("Connecting directly to the Internet (no proxy support).")
__HTTP = urllib3.PoolManager(cert_reqs="CERT_REQUIRED", ca_certs=certifi.where())
class Progress:
def __init__(self, msg, accuracy=0):
self.p = console.progress()
# if we don't show a decimal sign, the maximum width is 3 (max value is 100 (%)). Else its 3 + 1 (for the decimal point)
# the accuracy that the user requested.
total_width = 3 if accuracy == 0 else 4 + accuracy
# sample formatting string: [%5.1f%%] for an accuracy of 1
self.percent_format = "[%%%d.%df%%%%]" % (total_width, accuracy)
self.msg = msg
def __call__(self, bytes_read, bytes_total):
if bytes_total:
completed = bytes_read / bytes_total
total_as_mb = convert.bytes_to_human_string(bytes_total)
self.p.print("%s (%s total size)" % (self.msg, total_as_mb), self.percent_format % (completed * 100))
else:
self.p.print(self.msg, ".")
def finish(self):
self.p.finish()
def _fake_import_boto3():
# This function only exists to be mocked in tests to raise an ImportError, in
# order to simulate the absence of boto3
pass
def _download_from_s3_bucket(bucket_name, bucket_path, local_path, expected_size_in_bytes=None, progress_indicator=None):
# pylint: disable=import-outside-toplevel
# lazily initialize S3 support - it might not be available
try:
_fake_import_boto3()
import boto3.s3.transfer
except ImportError:
console.error("S3 support is optional. Install it with `python -m pip install esrally[s3]`")
raise
class S3ProgressAdapter:
def __init__(self, size, progress):
self._expected_size_in_bytes = size
self._progress = progress
self._bytes_read = 0
def __call__(self, bytes_amount):
self._bytes_read += bytes_amount
self._progress(self._bytes_read, self._expected_size_in_bytes)
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucket_name)
if expected_size_in_bytes is None:
expected_size_in_bytes = bucket.Object(bucket_path).content_length
progress_callback = S3ProgressAdapter(expected_size_in_bytes, progress_indicator) if progress_indicator else None
bucket.download_file(bucket_path, local_path, Callback=progress_callback, Config=boto3.s3.transfer.TransferConfig(use_threads=False))
def _build_gcs_object_url(bucket_name, bucket_path):
# / and other special characters must be urlencoded in bucket and object names
# ref: https://cloud.google.com/storage/docs/request-endpoints#encoding
return functools.reduce(
urllib.parse.urljoin,
[
"https://storage.googleapis.com/storage/v1/b/",
f"{quote(bucket_name.strip('/'), safe='')}/",
"o/",
f"{quote(bucket_path.strip('/'), safe='')}",
"?alt=media",
],
)
def _download_from_gcs_bucket(bucket_name, bucket_path, local_path, expected_size_in_bytes=None, progress_indicator=None):
# pylint: disable=import-outside-toplevel
# lazily initialize Google Cloud Storage support - we might not need it
import google.auth
import google.auth.transport.requests as tr_requests
import google.oauth2.credentials
# Using Google Resumable Media as the standard storage library doesn't support progress
# (https://github.com/googleapis/python-storage/issues/27)
from google.resumable_media.requests import ChunkedDownload
ro_scope = "https://www.googleapis.com/auth/devstorage.read_only"
access_token = os.environ.get("GOOGLE_AUTH_TOKEN")
if access_token:
credentials = google.oauth2.credentials.Credentials(token=access_token, scopes=(ro_scope,))
else:
# https://google-auth.readthedocs.io/en/latest/user-guide.html
credentials, _ = google.auth.default(scopes=(ro_scope,))
transport = tr_requests.AuthorizedSession(credentials)
chunk_size = 50 * 1024 * 1024 # 50MB
with open(local_path, "wb") as local_fp:
media_url = _build_gcs_object_url(bucket_name, bucket_path)
download = ChunkedDownload(media_url, chunk_size, local_fp)
# allow us to calculate the total bytes
download.consume_next_chunk(transport)
if not expected_size_in_bytes:
expected_size_in_bytes = download.total_bytes
while not download.finished:
if progress_indicator and download.bytes_downloaded and download.total_bytes:
progress_indicator(download.bytes_downloaded, expected_size_in_bytes)
download.consume_next_chunk(transport)
def download_from_bucket(blobstore, url, local_path, expected_size_in_bytes=None, progress_indicator=None):
blob_downloader = {"s3": _download_from_s3_bucket, "gs": _download_from_gcs_bucket}
logger = logging.getLogger(__name__)
bucket_and_path = url[5:] # s3:// or gs:// prefix for now
bucket_end_index = bucket_and_path.find("/")
bucket = bucket_and_path[:bucket_end_index]
# we need to remove the leading "/"
bucket_path = bucket_and_path[bucket_end_index + 1 :]
logger.info("Downloading from [%s] bucket [%s] and path [%s] to [%s].", blobstore, bucket, bucket_path, local_path)
blob_downloader[blobstore](bucket, bucket_path, local_path, expected_size_in_bytes, progress_indicator)
return expected_size_in_bytes
def download_http(url, local_path, expected_size_in_bytes=None, progress_indicator=None):
with __http().request("GET", url, preload_content=False, retries=10, timeout=urllib3.Timeout(connect=45, read=240)) as r, open(
local_path, "wb"
) as out_file:
if r.status > 299:
raise urllib.error.HTTPError(url, r.status, "", None, None)
# noinspection PyBroadException
try:
size_from_content_header = int(r.getheader("Content-Length"))
if expected_size_in_bytes is None:
expected_size_in_bytes = size_from_content_header
except BaseException:
size_from_content_header = None
chunk_size = 2 ** 16
bytes_read = 0
for chunk in r.stream(chunk_size):
out_file.write(chunk)
bytes_read += len(chunk)
if progress_indicator and size_from_content_header:
progress_indicator(bytes_read, size_from_content_header)
return expected_size_in_bytes
def add_url_param_elastic_no_kpi(url):
scheme = urllib3.util.parse_url(url).scheme
if scheme.startswith("http"):
return _add_url_param(url, {"x-elastic-no-kpi": "true"})
else:
return url
def _add_url_param(url, params):
url_parsed = urlparse(url)
query = parse_qs(url_parsed.query)
query.update(params)
return urlunparse(
(url_parsed.scheme, url_parsed.netloc, url_parsed.path, url_parsed.params, urlencode(query, doseq=True), url_parsed.fragment)
)
def download(url, local_path, expected_size_in_bytes=None, progress_indicator=None):
"""
Downloads a single file from a URL to the provided local path.
:param url: The remote URL specifying one file that should be downloaded. May be either a HTTP, HTTPS, S3 or GS URL.
:param local_path: The local file name of the file that should be downloaded.
:param expected_size_in_bytes: The expected file size in bytes if known. It will be used to verify that all data have been downloaded.
:param progress_indicator A callable that can be use to report progress to the user. It is expected to take two parameters
``bytes_read`` and ``total_bytes``. If not provided, no progress is shown. Note that ``total_bytes`` is derived from
the ``Content-Length`` header and not from the parameter ``expected_size_in_bytes`` for downloads via HTTP(S).
"""
tmp_data_set_path = local_path + ".tmp"
try:
scheme = urllib3.util.parse_url(url).scheme
if scheme in ["s3", "gs"]:
expected_size_in_bytes = download_from_bucket(scheme, url, tmp_data_set_path, expected_size_in_bytes, progress_indicator)
else:
expected_size_in_bytes = download_http(url, tmp_data_set_path, expected_size_in_bytes, progress_indicator)
except BaseException:
if os.path.isfile(tmp_data_set_path):
os.remove(tmp_data_set_path)
raise
else:
download_size = os.path.getsize(tmp_data_set_path)
if expected_size_in_bytes is not None and download_size != expected_size_in_bytes:
if os.path.isfile(tmp_data_set_path):
os.remove(tmp_data_set_path)
raise exceptions.DataError(
"Download of [%s] is corrupt. Downloaded [%d] bytes but [%d] bytes are expected. Please retry."
% (local_path, download_size, expected_size_in_bytes)
)
os.rename(tmp_data_set_path, local_path)
def retrieve_content_as_string(url):
with __http().request("GET", url, timeout=urllib3.Timeout(connect=45, read=240)) as response:
return response.read().decode("utf-8")
def has_internet_connection(probing_url):
logger = logging.getLogger(__name__)
try:
# We try to connect to Github by default. We use that to avoid touching too much different remote endpoints.
logger.debug("Checking for internet connection against [%s]", probing_url)
# We do a HTTP request here to respect the HTTP proxy setting. If we'd open a plain socket connection we circumvent the
# proxy and erroneously conclude we don't have an Internet connection.
response = __http().request("GET", probing_url, timeout=2.0)
status = response.status
logger.debug("Probing result is HTTP status [%s]", str(status))
return status == 200
except KeyboardInterrupt:
raise
except BaseException:
logger.debug("Could not detect a working Internet connection", exc_info=True)
return False
def __http():
if not __HTTP:
init()
return __HTTP
def resolve(hostname_or_ip):
if hostname_or_ip and hostname_or_ip.startswith("127"):
return hostname_or_ip
addrinfo = socket.getaddrinfo(hostname_or_ip, 22, 0, 0, socket.IPPROTO_TCP)
for family, _, _, _, sockaddr in addrinfo:
# we're interested in the IPv4 address
if family == socket.AddressFamily.AF_INET:
ip, _ = sockaddr
if ip[:3] != "127":
return ip
return None
|
python3-alpha/extra_modules/bs4/builder/_htmlparser.py | stormtheh4ck3r/python-for-android | 267 | 12674194 | """Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
from html.parser import HTMLParser
import sys
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = (
major > 3
or (major == 3 and minor > 2)
or (major == 3 and minor == 2 and release >= 3))
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
self.soup.handle_starttag(name, None, None, dict(attrs))
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed.
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
else:
real_name = int(name)
try:
data = chr(real_name)
except (ValueError, OverflowError) as e:
data = "\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
features = [HTML, STRICT, HTMLPARSER]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT:
kwargs['strict'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, str):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
parser.feed(markup)
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
|
Examples/Tests/particle_pusher/analysis_pusher.py | ruohai0925/artemis | 131 | 12674215 | <gh_stars>100-1000
#! /usr/bin/env python
# Copyright 2019 <NAME>
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
# This script tests the particle pusher (HC)
# using a force-free field,
# in which position x should remain 0.
# An initial velocity Vy corresponding to
# Lorentz factor = 20 is used.
# Bz is fixed at 1 T.
# Ex = -Vy*Bz.
# Possible errors:
# Boris: 2321.3958529
# Vay: 0.00010467
# HC: 0.00011403
# tolerance: 0.001
# Possible running time: ~ 4.0 s
import sys
import yt
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
tolerance = 0.001
filename = sys.argv[1]
ds = yt.load( filename )
ad = ds.all_data()
x = ad['particle_position_x'].to_ndarray()
print('error = ', abs(x))
print('tolerance = ', tolerance)
assert(abs(x) < tolerance)
test_name = filename[:-9] # Could also be os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, filename)
|
skiing-huabei/utils/email_warning.py | OneCodeMonkey/spider-suites | 3,649 | 12674216 | <reponame>OneCodeMonkey/spider-suites
from traceback import format_tb
import smtplib
from email.header import Header
from email.mime.text import MIMEText
from email.utils import (
parseaddr, formataddr)
from config import get_email_args
from logger import other
email_args = get_email_args()
smtp_server = email_args['server']
smtp_port = email_args['port']
from_addr = email_args['from']
from_password = email_args['password']
to_addr = email_args['to']
email_subject = email_args['subject']
warning_info = email_args['warning_info']
def _format_addr(nick_addr):
name, addr = parseaddr(nick_addr)
return formataddr((Header(name, 'utf-8').encode(), addr))
def gen_msg(content, subject, email_from, email_to, from_nick=None, to_nick=None):
if from_nick is None:
from_nick = email_from
if to_nick is None:
to_nick = email_to
msg = MIMEText(content, 'plain', 'utf-8')
msg['From'] = _format_addr('{} <{}>'.format(from_nick, email_from))
msg['To'] = _format_addr('{} <{}>'.format(to_nick, email_to))
msg['Subject'] = Header(subject).encode()
return msg
def send_email(email_from=from_addr, email_pass=from_password, to_addrs=None, server=smtp_server, port=smtp_port,
from_nick='weibospider', to_nick='SpiderUser'):
if to_addrs is None or isinstance(to_addrs, str):
to_addrs = [to_addrs]
msg = gen_msg(warning_info, email_subject, email_from, to_addrs[0], from_nick, to_nick)
server = smtplib.SMTP(server, port)
try:
server.starttls()
server.login(email_from, email_pass)
rs = server.sendmail(email_from, to_addrs, msg.as_string())
except Exception as e:
other.error('Failed to send emails, {} is raised, here are details:{}'.format(e, format_tb(e.__traceback__)[0]))
else:
return rs
finally:
server.quit()
|
meter/daemon/master_v1.py | anbo225/docklet | 273 | 12674237 | import subprocess, os
def http_client_post(ip, port, url, entries = {}):
import urllib.request, urllib.parse, json
url = url if not url.startswith('/') else url[1:]
response = urllib.request.urlopen('http://%s:%d/%s' % (ip, port, url), urllib.parse.urlencode(entries).encode())
obj = json.loads(response.read().decode().strip())
response.close()
return obj
class case_handler:
# [Order-by] lexicographic order
# curl -L -X POST http://0.0.0.0:1728/v1/minions/list
def minions_list(form, args):
minions = []
for item in args.conn:
minions.append(args.conn[item][1])
return {"minions": minions}
# curl -L -X POST -F mem=4096 -F cpu=2 http://0.0.0.0:1728/v1/resource/allocation
def resource_allocation(form, args):
mem = int(form['mem'])
cpu = int(form['cpu'])
candidates = {}
from daemon.http import minion_http_handler
for item in args.conn:
addr = args.conn[item][1]
obj = http_client_post(addr, minion_http_handler.http_port, '/v1/system/memsw/available')
if obj['success'] and obj['data']['Mbytes'] >= mem:
candidates[addr] = obj['data']
if len(candidates) <= 0:
raise Exception("no minions")
else:
from policy.allocate import candidates_selector
one = candidates_selector.select(candidates)
return {"recommend": one}
# curl -L -X POST -F user=docklet http://0.0.0.0:1728/v1/user/live/add
def user_live_add(form, args):
if not os.path.exists('/var/lib/docklet/global/users/%s' % form['user']):
return False
subprocess.getoutput('echo live > /var/lib/docklet/global/users/%s/status' % form['user'])
return True
# curl -L -X POST -F user=docklet http://0.0.0.0:1728/v1/user/live/remove
def user_live_remove(form, args):
subprocess.getoutput('rm -f /var/lib/docklet/global/users/%s/status' % form['user'])
return True
# curl -L -X POST http://0.0.0.0:1728/v1/user/live/list
def user_live_list(form, args):
return subprocess.getoutput('ls -1 /var/lib/docklet/global/users/*/status 2>/dev/null | awk -F\/ \'{print $(NF-1)\'}').split()
|
apps/__init__.py | PyCN/BlogBackendProject | 335 | 12674244 | <filename>apps/__init__.py
# _*_ coding: utf-8 _*_
__author__ = 'LennonChin'
__date__ = '2017/11/27 23:38' |
terrascript/provider/NetApp/netapp_elementsw.py | mjuenema/python-terrascript | 507 | 12674275 | <filename>terrascript/provider/NetApp/netapp_elementsw.py
# terrascript/provider/NetApp/netapp_elementsw.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:22:13 UTC)
import terrascript
class netapp_elementsw(terrascript.Provider):
"""Support for Volume, Initiator, Account, and Volume Access Group resources."""
__description__ = (
"Support for Volume, Initiator, Account, and Volume Access Group resources."
)
__namespace__ = "NetApp"
__name__ = "netapp-elementsw"
__source__ = "https://github.com/NetApp/terraform-provider-netapp-elementsw"
__version__ = "20.11.0"
__published__ = "2020-11-24T20:02:40Z"
__tier__ = "partner"
__all__ = ["netapp_elementsw"]
|
fuzzymatcher/scorer_default.py | nikoAguilar/fuzzymatcher_edit | 218 | 12674277 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import pandas as pd
from collections import Counter
from itertools import chain, product
from operator import mul
from functools import reduce
from math import log10
from functools import lru_cache
import logging
log = logging.getLogger(__name__)
from fuzzymatcher.utils import add_dmetaphones_to_col, is_mispelling, convert_series_to_dmetaphones, tokens_to_dmetaphones
class Scorer:
"""
A DataPreprocessor is responsible for ingesting df_left (the dataframe containing the records we
want to find matches for) and df_right (the dataframe we want to search for potential matches)
and applying preprocessing stages like normalisation to make matching easier.
"""
def add_data(self, matcher):
self.matcher = matcher
self._generate_probs()
def get_prob(self, token, field, left_right, misspelling=False):
"""
Get probability given field and token
"""
try:
if not misspelling and left_right == "left":
return self.left_field_token_probs_dict[field][token]
if not misspelling and left_right == "right":
return self.right_field_token_probs_dict[field][token]
if misspelling and left_right == "left":
return self.left_field_misspelling_probs_dict[field][token]
if misspelling and left_right == "right":
return self.right_field_misspelling_probs_dict[field][token]
except KeyError:
return None
@lru_cache(maxsize=int(1e6))
def score_match(self, record_left_id, record_right_id):
record_left = self.matcher.left_records[record_left_id]
record_right = self.matcher.right_records[record_right_id]
# Need to find common tokens, and get their probabilities
fields_left = record_left.fields
prob = 1
for f_left in fields_left:
p = self._field_to_prob(f_left, record_left, record_right)
prob = p * prob
match_score = self.prob_to_score(prob)
return {"match_prob" : prob, "match_score": match_score, "record_right": record_right}
def _field_to_prob(self, field_left, record_left, record_right):
field_right = self.matcher.left_to_right_lookup[field_left]
tokens_left = set(record_left.clean_token_dict[field_left])
tokens_right = set(record_right.clean_token_dict[field_right])
matching_tokens = tokens_left.intersection(tokens_right)
unmatching_tokens_left = tokens_left.difference(tokens_right)
unmatching_tokens_right = tokens_right.difference(tokens_left)
prob_matching = self._get_prob_matching(matching_tokens, field_right)
prob_unmatching1 = self._get_prob_unmatching(unmatching_tokens_left, tokens_right, field_right, field_left)
prob_unmatching2 = self._get_prob_unmatching(unmatching_tokens_right, tokens_left, field_right, field_left)
tokens_alt_left = set(record_left.token_misspelling_dict[field_left])
tokens_alt_right = set(record_right.token_misspelling_dict[field_right])
matching_tokens_alt = tokens_alt_left.intersection(tokens_alt_right)
prob_matching_alt = self._get_prob_matching(matching_tokens_alt, field_right, misspelling=True)
prob = prob_matching * prob_unmatching1 * prob_unmatching2 * prob_matching_alt
return prob
def _get_prob_matching(self, tokens, f_right, misspelling=False):
prob = 1
for t in tokens:
p = self.get_prob(t,f_right,"right", misspelling)
prob = p * prob
return prob
def _get_prob_unmatching(self, unmatching_tokens, record_tokens, field_right, field_left):
# If the unmatching token is not a misspelling, then undo its probability
prob = 1
for umt in unmatching_tokens:
if not self._is_misspelling_of_one(umt, record_tokens):
p = self.get_prob(umt,field_right,"right")
if p is None: # If this token never appears on the right, how often does it appear on the left
p = self.get_prob(umt,field_left,"left")
prob = p * prob
prob = Scorer._adjust_prob_towards_one(prob)
return 1/prob
def _is_misspelling_of_one(self, token, token_list):
for t in token_list:
if self.matcher.token_comparison.is_mispelling(token, t):
return True
return False
def get_token_lists_by_field(self, recordsdict, attribute):
token_lists_by_field = {}
key = next(iter(recordsdict))
fields = recordsdict[key].fields
for f in fields:
token_lists_by_field[f] = []
for key, this_record in recordsdict.items():
for f in fields:
tokens = getattr(this_record, attribute)[f]
token_lists_by_field[f].extend(tokens)
return token_lists_by_field
def field_tokens_to_prob(self, field_tokens):
ft = field_tokens
for key, value in ft.items():
counts = Counter(value)
count_sum = sum(counts.values())
counts = {k: v/count_sum for k,v in counts.items()}
ft[key] = counts
return ft
def _generate_probs(self):
left_field_tokens = self.get_token_lists_by_field(self.matcher.left_records, "clean_token_dict")
self.left_field_token_probs_dict = self.field_tokens_to_prob(left_field_tokens)
right_field_tokens = self.get_token_lists_by_field(self.matcher.right_records, "clean_token_dict")
self.right_field_token_probs_dict = self.field_tokens_to_prob(right_field_tokens)
left_field_tokens = self.get_token_lists_by_field(self.matcher.left_records, "token_misspelling_dict")
self.left_field_misspelling_probs_dict = self.field_tokens_to_prob(left_field_tokens)
right_field_tokens = self.get_token_lists_by_field(self.matcher.right_records, "token_misspelling_dict")
self.right_field_misspelling_probs_dict = self.field_tokens_to_prob(right_field_tokens)
@staticmethod
def prob_to_score(prob):
return -(log10(prob))/30
@staticmethod
def _adjust_prob_towards_one(initial_prob, amount = 2):
return initial_prob
|
faker/providers/internet/sk_SK/__init__.py | StabbarN/faker | 12,077 | 12674283 | from .. import Provider as InternetProvider
class Provider(InternetProvider):
user_name_formats = (
'{{last_name_female}}.{{first_name_female}}',
'{{last_name_female}}.{{first_name_female}}',
'{{last_name_male}}.{{first_name_male}}',
'{{last_name_male}}.{{first_name_male}}',
'{{first_name_female}}.{{last_name_female}}',
'{{first_name_male}}.{{last_name_male}}',
'{{first_name}}##',
'?{{last_name}}',
'?{{last_name}}',
'?{{last_name}}',
)
email_formats = ('{{user_name}}@{{free_email_domain}}', )
free_email_domains = (
'zoznam.sk',
'gmail.com',
'centrum.sk',
'post.sk',
'chello.sk',
'pobox.sk',
'szm.sk',
'atlas.sk',
'azet.sk',
'inmail.sk',
)
tlds = ('sk', 'com')
|
src/config.py | eco999/VALORANT-rank-yoinker | 107 | 12674294 | import json
from io import TextIOWrapper
from json import JSONDecodeError
import requests
import os
from src.logs import Logging
class Config:
def __init__(self, log):
self.log = log
if not os.path.exists("config.json"):
self.log("config.json not found, creating new one")
with open("config.json", "w") as file:
config = self.config_dialog(file)
try:
with open("config.json", "r") as file:
self.log("config opened")
config = json.load(file)
if config.get("cooldown") is None:
self.log("some config values are None, getting new config")
config = self.config_dialog(file)
if config.get("weapon") == "" or config.get("weapon") == None:
weapon = input("Enter the name of the weapon you use the most (This is for tracking the skins): ").capitalize().strip()
self.log(f"User inputted {weapon} as the weapon")
with open("config.json", "w") as f:
if not self.weapon_check(weapon):
print(weapon + " is not known valorant weapon you can edit directly " + os.getcwd() + "\config.json\n")
config["weapon"] = "vandal"
json.dump(config, f, indent=4)
self.log("vandal weapon has been added to the config file by default")
else:
config["weapon"] = weapon
json.dump(config, f, indent=4)
self.log(f"{weapon} weapon has been added to the config file by user")
except (JSONDecodeError):
self.log("invalid file")
with open("config.json", "w") as file:
config = self.config_dialog(file)
finally:
self.cooldown = config["cooldown"]
self.log(f"got cooldown with value '{self.cooldown}'")
if not self.weapon_check(config["weapon"]):
self.weapon = "vandal" # if the user manually entered a wrong name into the config file, this will be the default until changed by the user.
else:
self.weapon = config["weapon"]
def config_dialog(self, fileToWrite: TextIOWrapper):
self.log("color config prompt called")
jsonToWrite = {"cooldown": 1, "weapon":""}
json.dump(jsonToWrite, fileToWrite)
return jsonToWrite
def weapon_check(self, name):
if name in [weapon["displayName"] for weapon in requests.get("https://valorant-api.com/v1/weapons").json()["data"]]:
return True
else:
return False |
exercises/fr/exc_01_08_02.py | Jette16/spacy-course | 2,085 | 12674298 | import spacy
nlp = spacy.load("fr_core_news_sm")
text = "Apple a été créée en 1976 par <NAME>, <NAME> et <NAME>."
# Traite le texte
doc = ____
# Itère sur les entités prédites
for ent in ____.____:
# Affiche le texte de l'entité et son label
print(ent.____, ____.____)
|
test/element_test.py | screamingskulls/sofi | 402 | 12674304 | <gh_stars>100-1000
from sofi.ui import Element
def test_attrs_to_string_shortcut():
attributes = [
('cl', 'class'),
('ident', 'id'),
]
e = Element(cl='container', ident='foo')
should_be = 'class="container" id="foo"'
assert e._attrs_to_string(attributes) == should_be
def test_attrs_to_string_empty_shortcut():
attributes = [
]
e = Element()
should_be = ''
assert e._attrs_to_string(attributes) == should_be
def test_attrs_to_string_default_arguments():
e = Element(cl='container', ident='foo')
should_be = 'class="container" id="foo"'
assert e._attrs_to_string() == should_be
def test_attrs_to_string_attribute_with_no_value():
attributes = [
('cl', 'class'),
('ident', 'id'),
('disabled', None),
]
e = Element(cl='container', ident='foo')
should_be = 'class="container" id="foo" disabled'
assert e._attrs_to_string(attributes) == should_be
def test_attrs_to_string_with_nonexistent_attribute():
attributes = [
('cl', 'class'),
('ident', 'id'),
('this_doesnt_exist', 'foo'),
]
e = Element(cl='container', ident='foo')
should_be = 'class="container" id="foo"'
assert e._attrs_to_string(attributes) == should_be
|
indra/tests/test_cyjs_assembler.py | zebulon2/indra | 136 | 12674312 | <filename>indra/tests/test_cyjs_assembler.py<gh_stars>100-1000
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.statements import *
from indra.assemblers.cyjs import CyJSAssembler
mek = Agent('MAP2K1', db_refs={'HGNC': '6840', 'TEXT': 'mek1'})
erk = Agent('MAPK1', db_refs={'UP': 'P28482'})
dusp = Agent('DUSP4')
st_phos = Phosphorylation(mek, erk)
st_phos_Y = Phosphorylation(mek, erk, residue='Y')
st_phos_T = Phosphorylation(mek, erk, residue='T')
st_dephos = Dephosphorylation(dusp, erk)
st_complex = Complex([mek, erk, dusp])
st_act = Activation(mek, erk)
st_gef = Gef(Agent('SOS1'), Agent('HRAS'))
st_gap = Gap(Agent('RASA1'), Agent('HRAS'))
st_incamount = IncreaseAmount(Agent('TP53'), Agent('MDM2'))
st_decamount = DecreaseAmount(Agent('MDM2'), Agent('TP53'))
st_act2 = Inhibition(dusp, erk)
st_cited = Phosphorylation(mek, erk, evidence=Evidence(pmid='12345',
text='MEK phosphorylates ERK'))
st_cited2 = Phosphorylation(mek, erk, evidence=Evidence(pmid='api35',
text='MEK phosphorylates ERK'))
st_selfmod = Autophosphorylation(Agent('AKT1'), 'S', '473')
def test_act():
cja = CyJSAssembler()
cja.add_statements([st_act, st_act2])
cja.make_model()
assert len(cja._nodes) == 3
assert len(cja._edges) == 2
polarities = [edge['data']['polarity'] for edge in cja._edges]
assert len(set(polarities))==2
assert 'positive' in polarities
assert 'negative' in polarities
db_refs = [node['data']['db_refs'] for node in cja._nodes]
for node, refs in zip(cja._nodes, db_refs):
if node['data']['name'] == 'MAP2K1':
assert refs.get('HGNC') == \
'https://identifiers.org/hgnc:6840', refs
assert refs.get('TEXT') == 'mek1', refs
if node['data']['name'] == 'MAPK1':
assert refs.get('UniProt')
if node['data']['name'] == 'DUSP4':
assert not refs
def test_regamount():
cja = CyJSAssembler()
cja.add_statements([st_incamount, st_decamount])
cja.make_model()
assert len(cja._nodes) == 2
assert len(cja._edges) == 2
polarities = [edge['data']['polarity'] for edge in cja._edges]
assert len(set(polarities))==2
assert 'positive' in polarities
assert 'negative' in polarities
def test_ras():
cja = CyJSAssembler()
cja.add_statements([st_gef, st_gap])
cja.make_model()
assert len(cja._nodes) == 3
assert len(cja._edges) == 2
polarities = [edge['data']['polarity'] for edge in cja._edges]
assert len(set(polarities))==2
assert 'positive' in polarities
assert 'negative' in polarities
def test_selfmod():
cja = CyJSAssembler()
cja.add_statements([st_selfmod])
cja.make_model()
assert len(cja._nodes) == 1
assert len(cja._edges) == 1
polarities = [edge['data']['polarity'] for edge in cja._edges]
assert len(polarities) == 1
assert polarities[0] == 'positive'
def test_complex():
cja = CyJSAssembler()
cja.add_statements([st_complex])
cja.make_model()
assert len(cja._nodes) == 3
assert len(cja._edges) == 3
polarities = [edge['data']['polarity'] for edge in cja._edges]
assert len(set(polarities))==1
assert 'none' in polarities
def test_print_cyjs_graph():
cja = CyJSAssembler()
cja.add_statements([st_act, st_act2])
cja.make_model()
cyjs_str = cja.print_cyjs_graph()
# assert output is not empty
assert len(cyjs_str) > len('{\n "edges": [],\n "nodes": []\n}')
def test_no_grouping():
st1 = Phosphorylation(Agent('A'), Agent('B'))
st2 = Phosphorylation(Agent('A'), Agent('C'))
st3 = Phosphorylation(Agent('C'), Agent('B'))
cja = CyJSAssembler()
cja.add_statements([st1, st2, st3])
cja.make_model(grouping=True)
parents = [node['data']['parent'] for node in cja._nodes]
for parent in parents:
assert parent == ''
def test_grouping_block_targeting_node():
st1 = Phosphorylation(Agent('A'), Agent('B'))
st2 = Phosphorylation(Agent('C'), Agent('B'))
cja = CyJSAssembler()
cja.add_statements([st1, st2])
cja.make_model(grouping=True)
for node in cja._nodes:
if node['data']['name'] == 'A':
parent_a = node['data']['parent']
if node['data']['name'] == 'B':
parent_b = node['data']['parent']
assert parent_b == ''
if node['data']['name'] == 'C':
parent_c = node['data']['parent']
assert_element_properties(cja)
assert parent_a == parent_c
parent_a_name = [x['data']['name'] for x in cja._nodes if
x['data']['id']==parent_a][0]
assert parent_a_name.startswith('Group')
assert len(cja._edges) == 3
virtual_edges = [x for x in cja._edges if
x['data']['i'] == 'Virtual']
assert len(virtual_edges) == 2
real_edges = [x for x in cja._edges if
x['data']['i'] != 'Virtual']
assert len(real_edges) == 1
def test_grouping_node_targeting_block():
st1 = Phosphorylation(Agent('A'), Agent('B'))
st2 = Phosphorylation(Agent('A'), Agent('C'))
cja = CyJSAssembler()
cja.add_statements([st1, st2])
cja.make_model(grouping=True)
for node in cja._nodes:
if node['data']['name'] == 'A':
parent_a = node['data']['parent']
assert parent_a == ''
if node['data']['name'] == 'B':
parent_b = node['data']['parent']
if node['data']['name'] == 'C':
parent_c = node['data']['parent']
assert_element_properties(cja)
assert parent_b == parent_c
parent_b_name = [x['data']['name'] for x in cja._nodes if
x['data']['id']==parent_b][0]
assert parent_b_name.startswith('Group')
assert len(cja._edges) == 3
virtual_edges = [x for x in cja._edges if
x['data']['i'] == 'Virtual']
assert len(virtual_edges) == 2
real_edges = [x for x in cja._edges if
x['data']['i'] != 'Virtual']
assert len(real_edges) == 1
def test_grouping_node_targeting_block_targeting_node():
st1 = Phosphorylation(Agent('A'), Agent('B'))
st2 = Phosphorylation(Agent('A'), Agent('C'))
st3 = Phosphorylation(Agent('B'), Agent('D'))
st4 = Phosphorylation(Agent('C'), Agent('D'))
cja = CyJSAssembler()
cja.add_statements([st1, st2, st3, st4])
cja.make_model(grouping=True)
for node in cja._nodes:
if node['data']['name'] == 'A':
parent_a = node['data']['parent']
assert parent_a == ''
if node['data']['name'] == 'B':
parent_b = node['data']['parent']
if node['data']['name'] == 'C':
parent_c = node['data']['parent']
if node['data']['name'] == 'D':
parent_d = node['data']['parent']
assert parent_d == ''
assert_element_properties(cja)
assert parent_b == parent_c
parent_b_name = [x['data']['name'] for x in cja._nodes if
x['data']['id']==parent_b][0]
assert parent_b_name.startswith('Group')
assert len(cja._edges) == 6
virtual_edges = [x for x in cja._edges if
x['data']['i'] == 'Virtual']
assert len(virtual_edges) == 4
real_edges = [x for x in cja._edges if
x['data']['i'] != 'Virtual']
assert len(real_edges) == 2
def test_grouping_block_targeting_block():
st1 = Phosphorylation(Agent('A'), Agent('B'))
st2 = Phosphorylation(Agent('A'), Agent('C'))
st3 = Phosphorylation(Agent('D'), Agent('B'))
st4 = Phosphorylation(Agent('D'), Agent('C'))
cja = CyJSAssembler()
cja.add_statements([st1, st2, st3, st4])
cja.make_model(grouping=True)
for node in cja._nodes:
if node['data']['name'] == 'A':
parent_a = node['data']['parent']
if node['data']['name'] == 'B':
parent_b = node['data']['parent']
if node['data']['name'] == 'C':
parent_c = node['data']['parent']
if node['data']['name'] == 'D':
parent_d = node['data']['parent']
assert_element_properties(cja)
assert parent_b == parent_c
assert parent_a == parent_d
parent_b_name = [x['data']['name'] for x in cja._nodes if
x['data']['id']==parent_b][0]
parent_a_name = [x['data']['name'] for x in cja._nodes if
x['data']['id']==parent_a][0]
assert parent_b_name.startswith('Group')
assert parent_a_name.startswith('Group')
assert len(cja._edges) == 5
virtual_edges = [x for x in cja._edges if
x['data']['i'] == 'Virtual']
assert len(virtual_edges) == 4
real_edges = [x for x in cja._edges if
x['data']['i'] != 'Virtual']
assert len(real_edges) == 1
def test_edge_aggregation_between_nongroup_nodes():
cja = CyJSAssembler()
cja.add_statements([st_phos_Y, st_phos_T])
cja.make_model(grouping=False)
assert len(cja._nodes) == 2
assert len(cja._edges) == 1
for edge in cja._edges:
assert len(edge['data']['uuid_list']) == 2
for node in cja._nodes:
assert len(node['data']['uuid_list']) == 2
cja = CyJSAssembler()
cja.add_statements([st_phos_Y, st_phos_T])
cja.make_model(grouping=True)
assert len(cja._nodes) == 2
assert len(cja._edges) == 1
for edge in cja._edges:
assert len(edge['data']['uuid_list']) == 2
for node in cja._nodes:
assert len(node['data']['uuid_list']) == 2
def assert_element_properties(cja):
# each element needs an id
elements = ([n for n in cja._nodes] + [e for e in cja._edges])
for element in elements:
assert element['data']['id'] is not None, "Element ID is none"
assert element['data']['id'] != '', "Element ID is blank string!"
# each element should also have a list of uuids with at least one uuid
assert element['data']['uuid_list'] is not None, "uuid_list is None"
assert len(element['data']['uuid_list']) >= 1, "uuid_list is empty!"
for uuid in element['data']['uuid_list']:
assert type(uuid) == type('abc'), str(uuid) + ' is not a string'
|
tools/version_bump.py | tirkarthi/python-plexapi | 749 | 12674323 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Helper script to bump the current version."""
import argparse
import re
import subprocess
from packaging.version import Version
from plexapi import const
SUPPORTED_BUMP_TYPES = ["patch", "minor", "major"]
def _bump_release(release, bump_type):
"""Return a bumped release tuple consisting of 3 numbers."""
major, minor, patch = release
if bump_type == "patch":
patch += 1
elif bump_type == "minor":
minor += 1
patch = 0
elif bump_type == "major":
major += 1
minor = 0
patch = 0
return major, minor, patch
def bump_version(version, bump_type):
"""Return a new version given a current version and action."""
new_release = _bump_release(version.release, bump_type)
temp = Version("0")
temp._version = version._version._replace(release=new_release)
return Version(str(temp))
def write_version(version):
"""Update plexapi constant file with new version."""
with open("plexapi/const.py") as f:
content = f.read()
version_names = ["MAJOR", "MINOR", "PATCH"]
version_values = str(version).split(".", 2)
for n, v in zip(version_names, version_values):
version_line = f"{n}_VERSION = "
content = re.sub(f"{version_line}.*\n", f"{version_line}{v}\n", content)
with open("plexapi/const.py", "wt") as f:
content = f.write(content)
def main():
"""Execute script."""
parser = argparse.ArgumentParser(description="Bump version of plexapi")
parser.add_argument(
"bump_type",
help="The type of version bump to perform",
choices=SUPPORTED_BUMP_TYPES,
)
parser.add_argument(
"--commit", action="store_true", help="Create a version bump commit"
)
parser.add_argument(
"--tag", action="store_true", help="Tag the commit with the release version"
)
arguments = parser.parse_args()
if arguments.tag and not arguments.commit:
parser.error("--tag requires use of --commit")
if arguments.commit and subprocess.run(["git", "diff", "--quiet"]).returncode == 1:
print("Cannot use --commit because git is dirty")
return
current = Version(const.__version__)
bumped = bump_version(current, arguments.bump_type)
assert bumped > current, "Bumped version is not newer than old version"
write_version(bumped)
if not arguments.commit:
return
subprocess.run(["git", "commit", "-nam", f"Release {bumped}"])
if arguments.tag:
subprocess.run(["git", "tag", str(bumped), "-m", f"Release {bumped}"])
def test_bump_version():
"""Make sure it all works."""
import pytest
assert bump_version(Version("4.7.0"), "patch") == Version("4.7.1")
assert bump_version(Version("4.7.0"), "minor") == Version("4.8.0")
assert bump_version(Version("4.7.3"), "minor") == Version("4.8.0")
assert bump_version(Version("4.7.0"), "major") == Version("5.0.0")
assert bump_version(Version("4.7.3"), "major") == Version("5.0.0")
assert bump_version(Version("5.0.0"), "major") == Version("6.0.0")
if __name__ == "__main__":
main()
|
third_party/gsutil/third_party/pyasn1/tests/codec/cer/__main__.py | tingshao/catapult | 2,151 | 12674325 | <filename>third_party/gsutil/third_party/pyasn1/tests/codec/cer/__main__.py
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pyasn1/license.html
#
try:
import unittest2 as unittest
except ImportError:
import unittest
suite = unittest.TestLoader().loadTestsFromNames(
['tests.codec.cer.test_encoder.suite',
'tests.codec.cer.test_decoder.suite']
)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
macro_benchmark/SegLink/seglink/config.py | songhappy/ai-matrix | 180 | 12674329 | import tensorflow as tf
# basic
tf.app.flags.DEFINE_string('action', 'train', 'Action to take')
tf.app.flags.DEFINE_string('working_dir', '', 'Directory for saving checkpoints and log files')
tf.app.flags.DEFINE_string('log_file_prefix', 'fctd_', 'Prefix of logging file name')
# FCTD model
tf.app.flags.DEFINE_integer('pos_label', 1, 'Label for the background class')
tf.app.flags.DEFINE_integer('neg_label', 0, 'Label for the background class')
tf.app.flags.DEFINE_float('fctd_min_scale', 0.1, 'Minimum region size')
tf.app.flags.DEFINE_float('fctd_max_scale', 0.95, 'Maximum region size')
tf.app.flags.DEFINE_float('pos_scale_diff_threshold', 1.7, '')
tf.app.flags.DEFINE_float('neg_scale_diff_threshold', 2.0, '')
tf.app.flags.DEFINE_integer('fctd_n_scale', 6, 'Number of region scales')
tf.app.flags.DEFINE_integer('n_local_links', 8, 'Number of links of a grid node')
tf.app.flags.DEFINE_integer('n_cross_links', 4, 'Number of cross-layer links on each node')
tf.app.flags.DEFINE_string('link_clf_mode', 'softmax', 'Mode of classifying local links. Can be softmax or sigmoid')
# testing
tf.app.flags.DEFINE_integer('test_period', 5000, 'Period of on-the-fly testing')
tf.app.flags.DEFINE_string('test_model_path', '', 'Test model path')
tf.app.flags.DEFINE_string('test_record_path', '', 'Test tf-records path')
tf.app.flags.DEFINE_integer('num_test', 500, 'Number of test images')
tf.app.flags.DEFINE_float('node_threshold', 0.5, 'Confidence threshold for nodes')
tf.app.flags.DEFINE_float('link_threshold', 0.5, 'Confidence threshold for links')
tf.app.flags.DEFINE_integer('nms_top_k', 400, 'Apply NMS only to examples with top-k scores on each class')
tf.app.flags.DEFINE_integer('keep_top_k', 200, 'Keep examples with top-k scores after NMS')
tf.app.flags.DEFINE_integer('save_visualization', 0, 'Save visualization results')
tf.app.flags.DEFINE_string('result_format', 'icdar_2015_inc', 'Result file format')
tf.app.flags.DEFINE_float('bbox_scale_factor', 0, 'Bounding box scale trick')
# summaries and checkpoints
tf.app.flags.DEFINE_integer('brief_summary_period', 10, 'Period for brief summaries')
tf.app.flags.DEFINE_integer('detailed_summary_period', 500, 'Period for detailed summaries')
tf.app.flags.DEFINE_integer('checkpoint_period', 5000, 'Period for saving checkpoints')
|
filprofiler/__init__.py | whalesalad/filprofiler | 521 | 12674344 | <filename>filprofiler/__init__.py<gh_stars>100-1000
"""The Fil memory profiler."""
__all__ = ["__version__"]
# If we're running with Fil preloaded, after forks make sure Fil is no longer
# enabled, since we don't yet support child processes. This is also done in C
# code; doing it only in Python or only C doesn't seem to work.
import sys
import os
if sys.version_info[:2] > (3, 6):
# register_at_fork only works in Python 3.6 or later.
if os.getenv("__FIL_STATUS") in ("api", "program"):
def unset(_os=os):
_os.environ["__FIL_STATUS"] = "subprocess"
os.register_at_fork(after_in_child=unset)
del unset
# Fallback mechanism for detecting forks, for Python 3.6 or if someone isn't
# doing fork()-without-exec() right (i.e. not calling the C API postfork()):
_original_pid = os.getpid()
del sys, os
try:
from ._version import version as __version__
except ImportError:
# package is not installed
try:
from importlib.metadata import version, PackageNotFoundError
try:
__version__ = version(__name__)
except PackageNotFoundError:
__version__ = "unknown"
except ImportError:
# Python 3.6 doesn't have importlib.metadata:
__version__ = "unknown"
def load_ipython_extension(ipython):
"""Load our IPython magic."""
from IPython.core.error import UsageError
import os
if os.environ.get("__FIL_STATUS") != "api":
raise UsageError(
"In order to use Fil, you need to run your notebook with the Fil kernel.\n\n"
"You can change the kernel via the 'Change Kernel' option at the bottom of "
"the Kernel menu in Jupyter."
)
from ._ipython import FilMagics
ipython.register_magics(FilMagics)
|
pipeline/Serverless/02_pipeline/deploy.py | Rkauff/Klayers | 1,096 | 12674353 | import os
import time
from datetime import datetime
import boto3
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Key
from aws_lambda_powertools.logging import Logger
logger = Logger()
import common.get_config as get_config
def check_regions_to_deploy(package, requirements_hash, regions):
"""
Args:
package: Name of package to deploy
requirements_hash: Hash of requirements.txt file
regions: Total regions configured to deployed
return:
regions_to_deploy(list): Regions where latest package doesn't match requirements_hash provided
"""
table_name = os.environ["DB_NAME"]
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(table_name)
response = table.query(
IndexName="package_global",
KeyConditionExpression=Key("pckg").eq(package) & Key("dplySts").eq("latest"),
)
# check if there are any region in regions that aren't deployed
regions_deployed = [item["rgn"] for item in response["Items"]]
regions_to_deploy = [region for region in regions if region not in regions_deployed]
logger.info(
{
"message": f"Deploying to {len(regions_to_deploy)} new regions",
"new_regions": regions_to_deploy,
"regions_deployed": regions_deployed,
}
)
# for all deployed regions, check if it has the latest version
for item in response["Items"]:
if item["rqrmntsHsh"] != requirements_hash:
if item["rgn"] in regions:
regions_to_deploy.append(item["rgn"])
# deduplicate
regions_to_deploy = list(set(regions_to_deploy))
logger.info({"regions_to_deploy": regions_to_deploy})
return regions_to_deploy
def download_artifact(package_artifact):
"""
Downloads s3://bucket_name/package_artifact to /tmp directory
"""
bucket_name = os.environ["BUCKET_NAME"]
s3 = boto3.resource("s3")
s3.meta.client.download_file(
bucket_name, package_artifact, f"/tmp/{package_artifact}"
)
with open(f"/tmp/{package_artifact}", "rb") as zip_file:
zip_binary = zip_file.read()
logger.info(f"Package {package_artifact} downloaded")
return zip_binary
def get_requirements_txt(package):
"""
Args:
package: Name of package to query for
return:
requirements_txt (str): Requirements.txt of the package, or "null" ot not present
"""
build_v0 = "bldVrsn0#"
sk = f"pckg#{package}"
client = boto3.client("dynamodb")
table_name = os.environ["DB_NAME"]
response = client.get_item(
TableName=table_name, Key={"pk": {"S": build_v0}, "sk": {"S": sk}},
)
logger.info({"query_requirements": response})
requirements_txt = response.get("Item", {}).get("rqrmntsTxt", {}).get("S", "null")
return requirements_txt
@logger.inject_lambda_context
def main(event, context):
regions = get_config.get_aws_regions()
package = event["package"]
version = event["version"]
build_flag = event["build_flag"]
package_artifact = event["zip_file"]
requirements_hash = event["requirements_hash"]
license_info = event["license_info"]
table_name = os.environ["DB_NAME"]
expiry_days = int(os.environ["EXPIRY_DAYS"])
dynamo_client = boto3.client("dynamodb")
deployed_flag = False
# Check if need to deploy
regions_to_deploy = check_regions_to_deploy(package, requirements_hash, regions)
if len(regions_to_deploy) == 0:
logger.info({"message": "No new regions to deploy to, terminating!"})
return {
"deployed_flag": deployed_flag,
"build_flag": build_flag,
"package": package,
"version": version,
"requirements_hash": requirements_hash,
}
logger.info(
{"message": "Regions to deploy", "regions_to_deploy": regions_to_deploy}
)
# Download Lambda Artifact
layer_name = f"{os.environ['LAMBDA_PREFIX']}{package}"
zip_binary = download_artifact(package_artifact)
# Get requirements txt
requirements_txt = get_requirements_txt(package)
for region in regions_to_deploy:
# Publish Layer Version
logger.info({"message": "Deploying", "region": region, "package": package})
lambda_client = boto3.client("lambda", region_name=region)
response = lambda_client.publish_layer_version(
LayerName=layer_name,
Description=f"{package}=={version} | {requirements_hash}",
Content={"ZipFile": zip_binary},
CompatibleRuntimes=["python3.6", "python3.7", "python3.8"],
LicenseInfo=license_info,
)
layer_version_arn = response["LayerVersionArn"]
layer_version_created_date = datetime.utcnow().isoformat()
layer_version = int(layer_version_arn.split(":")[-1])
# Make Layer Publicly accessible
logger.info(
{
"message": "Making Public",
"region": region,
"package": package,
"arn": layer_version_arn,
"created_date": layer_version_created_date,
}
)
lambda_client.add_layer_version_permission(
LayerName=layer_name,
VersionNumber=layer_version,
StatementId="make_public",
Action="lambda:GetLayerVersion",
Principal="*",
)
# Insert new entry into DynamoDB
logger.info(
{
"message": "Inserting to table",
"region": region,
"package": package,
"arn": layer_version_arn,
}
)
pk = f"lyr#{region}.{package}"
sk_v0 = "lyrVrsn0#"
sk = f"lyrVrsn#v{layer_version}"
sk_previous = f"lyrVrsn#v{layer_version-1}"
dynamo_client.transact_write_items(
TransactItems=[
{
"Update": {
"TableName": table_name,
"Key": {"pk": {"S": pk}, "sk": {"S": sk_v0},},
"UpdateExpression": "set "
"rqrmntsTxt = :rqrmntsTxt, "
"pckgVrsn = :pckgVrsn, "
"rqrmntsHsh = :rqrmntsHsh,"
"arn = :arn,"
"crtdDt = :crtdDt,"
"lyrVrsn = :lyrVrsn",
"ExpressionAttributeValues": {
":rqrmntsTxt": {"S": requirements_txt},
":crtdDt": {"S": layer_version_created_date},
":pckgVrsn": {"S": version},
":rqrmntsHsh": {"S": requirements_hash},
":arn": {"S": layer_version_arn},
":lyrVrsn": {"N": str(layer_version)},
},
# Allow update only if
# Current lyrVrsn is less than updated value
# or lyrVrsn doesn't exists
"ConditionExpression": "lyrVrsn <= :lyrVrsn OR attribute_not_exists(lyrVrsn)",
}
},
{
"Put": {
"TableName": table_name,
"Item": {
"pk": {"S": pk},
"sk": {"S": sk},
"pckgVrsn": {"S": version},
"crtdDt": {"S": layer_version_created_date},
"rqrmntsTxt": {"S": requirements_txt},
"rqrmntsHsh": {"S": requirements_hash},
"arn": {"S": layer_version_arn},
"pckg": {"S": package},
"rgn": {"S": region},
"dplySts": {"S": "latest"},
"lyrVrsn": {"N": str(layer_version)},
},
}
},
]
)
if layer_version > 1:
logger.info(
{
"message": "Updating Previous Version",
"region": region,
"package": package,
"arn": layer_version_arn,
}
)
try:
dynamo_client.update_item(
TableName=table_name,
Key={"pk": {"S": pk}, "sk": {"S": sk_previous}},
UpdateExpression="set " "dplySts = :dplySts, " "exDt = :exDt",
ExpressionAttributeValues={
":dplySts": {"S": "deprecated"},
":exDt": {"N": str(int(time.time() + 24 * 3600 * expiry_days))},
},
ConditionExpression="attribute_exists(sk)",
)
except ClientError as e:
if e.response["Error"]["Code"] == "ConditionalCheckFailedException":
logger.warning(
{
"message": "Conditional Check failed",
"layer_version": layer_version,
"sk": sk_previous,
}
)
deployed_flag = True
return {
"deployed_to": regions_to_deploy,
"deployed_flag": deployed_flag,
"build_flag": build_flag,
"package": package,
"version": version,
"requirements_hash": requirements_hash,
}
|
lib/gcn/sparse/torch_nn.py | ChenFengYe/relightable-nr | 105 | 12674371 | from torch import nn
from torch.nn import Sequential as Seq, Linear as Lin
##############################
# Basic layers
##############################
def act_layer(act_type, inplace=False, neg_slope=0.2, n_prelu=1):
"""
helper selecting activation
:param act_type:
:param inplace:
:param neg_slope:
:param n_prelu:
:return:
"""
act_type = act_type.lower()
if act_type == 'relu':
layer = nn.ReLU(inplace)
elif act_type == 'leakyrelu':
layer = nn.LeakyReLU(neg_slope, inplace)
elif act_type == 'prelu':
layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
else:
raise NotImplementedError('activation layer [%s] is not found' % act_type)
return layer
def norm_layer(norm_type, nc):
# helper selecting normalization layer
norm_type = norm_type.lower()
if norm_type == 'batch':
layer = nn.BatchNorm1d(nc, affine=True)
elif norm_type == 'instance':
layer = nn.InstanceNorm1d(nc, affine=False)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return layer
class MultiSeq(Seq):
def __init__(self, *args):
super(MultiSeq, self).__init__(*args)
def forward(self, *inputs):
for module in self._modules.values():
if type(inputs) == tuple:
inputs = module(*inputs)
else:
inputs = module(inputs)
return inputs
class MLP(Seq):
def __init__(self, channels, act_type='relu', norm_type=None, bias=True):
m = []
for i in range(1, len(channels)):
m.append(Lin(channels[i - 1], channels[i], bias))
if act_type:
m.append(act_layer(act_type))
if norm_type:
m.append(norm_layer(norm_type, channels[-1]))
super(MLP, self).__init__(*m)
|
scraper/src/tests/default_strategy/tags_test.py | erichiggins0/docsearch-scraper | 242 | 12674377 | <filename>scraper/src/tests/default_strategy/tags_test.py
# coding: utf-8
import lxml.html
from .abstract import get_strategy
class TestTags:
def test_adding_tags_for_page(self):
# Given
strategy = get_strategy({
'start_urls': [{
'url': 'http://foo.bar/api',
'tags': ["test"]
}]
})
strategy.dom = lxml.html.fromstring("""
<html><body>
<h1>Foo</h1>
</body></html>
""")
# When
actual = strategy.get_records_from_dom("http://foo.bar/api")
# Then
assert actual[0]['tags'] == ["test"]
def test_adding_tags_for_subpage(self):
# Given
strategy = get_strategy({
'start_urls': [{
'url': 'http://foo.bar/api',
'tags': ["test"]
}]
})
strategy.dom = lxml.html.fromstring("""
<html><body>
<h1>Foo</h1>
</body></html>
""")
# When
actual = strategy.get_records_from_dom("http://foo.bar/api/test")
# Then
assert actual[0]['tags'] == ["test"]
def test_regex_start_urls(self):
# Given
# Stub ENV variables read by ConfigLoader
strategy = get_strategy({
'start_urls': [
{
'url': 'http://foo.bar/.*',
'tags': ["test"]
}
]
})
strategy.dom = lxml.html.fromstring("""
<html><body>
<h1>Foo</h1>
</body></html>
""")
# When
actual = strategy.get_records_from_dom("http://foo.bar/api/test")
# Then
assert actual[0]['tags'] == ["test"]
|
tools/dev/parrot_fuzzer.py | winnit-myself/Wifie | 312 | 12674394 | <reponame>winnit-myself/Wifie
#!/usr/bin/env python
# Copyright (C) 2009-2011, Parrot Foundation.
from fusil.application import Application
from fusil.process.watch import WatchProcess
from fusil.process.create import CreateProcess
from fusil.process.stdout import WatchStdout
from fusil.project_agent import ProjectAgent
from fusil.process.tools import locateProgram
from fusil.write_code import WriteCode
from optparse import OptionGroup
import re
import string
import random
'''
=head1 NAME
parrot_fuzzer.py - opcode fuzzer
=head1 DESCRIPTION
This is a fuzzer for Parrot, written in Python using the Fusil library. It
attempts to break Parrot by generating calls to random PIR opcodes.
=head1 DEPENDENCIES
This script requires Python 2.5+ to run. The Fusil
L<http://fusil.hachoir.org/trac> and python-ptrace
L<http://python-ptrace.hachoir.org/trac> libraries are also required.
=head1 USAGE
Short version: C<sudo python tools/dev/parrot_fuzzer.py>
C<parrot_fuzzer.py> is run like any other Fusil-based fuzzer. Fusil likes to be
run as the root user so that the child process in which Parrot runs can be put
in a more restricted environment, limiting potential damage.
Fusil assumes the existence of a C<fusil> user and group. Parrot runs as this
user/group as part of its restricted environment. Passing C<--unsafe> allows
it to run as the current user. Although it is not likely that this will cause
any damage to your system, it is possible.
C<parrot_fuzzer.py> needs access to Parrot's source code in order to figure out
which PMCs and ops are available. It assumes that it's running in the root
directory of Parrot's source code. You can specify a different directory using
the C<--parrot-root> switch.
=head1 OPTIONS
=over 4
=item C<--parrot-root=/path/to/parrot>
Represents the path to the Parrot root directory. By default, this is the
current directory.
=item C<--runcore=--some-runcore>
Specifies which runcore to use when running Parrot. The default is the I<slow>
core. This option corresponds directly to Parrot's C<--runcore> option. Other
runcores include I<fast>.
Run C<parrot --help> for more details.
=item C<--ignore-blacklist>
Some PMC's and opcodes are known to cause false positives or results of limited
value. These are blacklisted by default. Using C<--ignore-blacklist> causes
the fuzzer to use all available PMC's and opcodes, even those known to behave
badly during testing.
=item C<--instructions=10>
Represents the number of instructions during the test run. Note that a larger
number such as 20 does not necessarily result in more failures. Defaults to 3.
=back
=head1 LICENSE
This program is distributed under the same license as Parrot itself.
=cut
'''
class ParrotFuzzer(Application):
# Base name of the dir where temp files and successful results will be stored
NAME="parrot_fuzz"
def createFuzzerOptions(self, parser):
options = OptionGroup(parser, "Parrot fuzzer")
options.add_option("--parrot-root",
help="Parrot program path (default: .)",
type="str",
default=".")
options.add_option("--runcore",
help="Run Parrot with the specified runcore (default: --slow-core)",
type="str",
default="--slow-core")
options.add_option("--instructions",
help="Generate this many instructions per test run (default: 3)",
type="int",
default="3")
options.add_option("--ignore-blacklist",
help="Use opcodes and PMCs known to cause bad or questionable results (default: use blacklists)",
action="store_true",
default=False)
return options
def setupProject(self):
parrot_root = self.options.parrot_root
runcore = self.options.runcore
parrot = locateProgram(parrot_root + "/parrot")
process = ParrotProcess(self.project, [parrot, runcore, "<fuzzy.pir>"])
pirgen = PirGenerator(self.project, self.options)
WatchProcess(process)
WatchStdout(process)
class PirGenerator(ProjectAgent, WriteCode):
def __init__(self, project, options):
self.parrot_root = options.parrot_root
self.instruction_count = options.instructions
self.ignore_blacklist = options.ignore_blacklist
self.opfunc_gen = OpfuncGenerator()
self.arg_gen = ArgGenerator(self.parrot_root, self.ignore_blacklist)
self.opfunc_gen.populateOpfuncList(self.parrot_root, self.ignore_blacklist)
ProjectAgent.__init__(self, project, "pir_source")
WriteCode.__init__(self)
def generatePir(self, filename):
self.pir_body = ''
self.pir_preamble = """
.sub main
$P0 = new ['ExceptionHandler']
set_addr $P0, catchall
push_eh $P0 #pokemon: gotta catch 'em all
"""
self.pir_postamble = """
catchall:
# Don't do anything with exceptions: we're hoping for a segfault or similar.
.end
"""
# How many instructions to generate
# Strangely, a low number like 3 seems to generate slightly more faults
# than a high number like 20
opfunc_count = self.instruction_count
self.pir_body += " # generating "+str(opfunc_count)+" instructions\n"
arg_types = ['s', 'p', 'i', 'n', 'sc', 'ic', 'nc']
opfuncs = []
arg_counts = dict()
self.createFile(filename)
arg_gen = self.arg_gen
# Pick some opfuncs
for i in range(opfunc_count):
opfuncs.append(OpfuncCall(*self.opfunc_gen.getOpfunc()))
# Calculate how many of each type of arg will be needed
for arg_type in arg_types:
arg_counts[arg_type] = 0
for opfunc in opfuncs:
arg_counts[arg_type] += opfunc.getArgCount(arg_type)
for arg_type in arg_types:
arg_gen.setArgCount(arg_type, arg_counts[arg_type])
# Generate the args, adding any supporting code to the preamble
self.pir_preamble += arg_gen.generateStringArgs()
self.pir_preamble += arg_gen.generatePMCArgs()
self.pir_preamble += arg_gen.generateIntArgs()
self.pir_preamble += arg_gen.generateNumArgs()
self.pir_preamble += arg_gen.generateStringConstArgs()
self.pir_preamble += arg_gen.generateIntConstArgs()
self.pir_preamble += arg_gen.generateNumConstArgs()
# Put the args into the opfunc calls
for opfunc in opfuncs:
for arg_num in range(opfunc.getTotalArgCount()):
arg_type = opfunc.getArgType(arg_num)
opfunc.setArgVal(arg_num, arg_gen.getArgVal(arg_type))
self.pir_body += opfunc.getOpfuncCall()
# Write the code
self.write(0, self.pir_preamble)
self.write(0, self.pir_body)
self.write(0, self.pir_postamble)
self.close()
def on_session_start(self):
filename = self.session().createFilename('fuzzy.pir')
self.generatePir(filename)
self.send('pir_source', filename)
# Representation of a call to an opfunc, including values of arguments
# Note that argumens are literal, e.g. '$P0', '"foo"', etc
class OpfuncCall:
def __init__(self, name, sig):
self.arg_types = []
self.arg_vals = []
self.name = name
if sig == '':
self.long_name = name
else:
self.long_name = name + '_' + sig
self.total_arg_count = 0
if sig != '':
for arg in string.split(sig, "_"):
self.arg_types.append(arg)
self.arg_vals.append('')
self.total_arg_count += 1
def getLongName(self):
return self.long_name
def getArgCount(self, arg):
return self.arg_types.count(arg)
def getTotalArgCount(self):
return self.total_arg_count
def getArgType(self, n):
return self.arg_types[n]
def getArgType(self, n):
return self.arg_types[n]
def setArgVal(self, n, arg_val):
self.arg_vals[n] = arg_val
def getOpfuncCall(self):
opfunc_call = '\n # '+self.long_name+'\n ' + self.name
for arg_val in self.arg_vals:
opfunc_call += ' ' + arg_val + ','
opfunc_call = string.rstrip(opfunc_call, ",")
opfunc_call += "\n"
return opfunc_call
class ArgGenerator:
arg_counts = {}
args = {}
def __init__(self, parrot_root, ignore_blacklist):
self.pmc_gen = PMCTypeGenerator()
self.pmc_gen.populatePMCList(parrot_root, ignore_blacklist)
def setArgCount(self, arg_type, count):
self.arg_counts[arg_type] = count
def getArgVal(self, arg_type):
return random.choice(self.args[arg_type])
def generateStringArgs(self):
pir_preamble = ""
self.args['s'] = []
for n in range(self.arg_counts['s']):
str_val = self.getString()
pir_preamble += " $S" + str(n) + " = \"" + str_val + "\"\n"
self.args['s'].append('$S' + str(n))
return pir_preamble
def generatePMCArgs(self):
pir_preamble = ""
self.args['p'] = []
for n in range(self.arg_counts['p']):
pir_preamble += " $P" + str(n) + " = new ['" + self.pmc_gen.getPMCType() + "']\n"
self.args['p'].append('$P' + str(n))
return pir_preamble
def generateIntArgs(self):
pir_preamble = ""
self.args['i'] = []
for n in range(self.arg_counts['i']):
num = random.choice(['neg_many','neg_one','zero','pos_one','pos_many'])
if num == 'neg_many':
num_val = random.randint(-999999,-2)
if num == 'neg_one':
num_val = -1
if num == 'zero':
num_val = 0
if num == 'pos_one':
num_val = 1
if num == 'pos_many':
num_val = random.randint(2, 999999)
pir_preamble += " $I" + str(n) + " = "+str(num_val)+"\n"
self.args['i'].append('$I' + str(n))
return pir_preamble
def generateNumArgs(self):
pir_preamble = ""
self.args['n'] = []
for n in range(self.arg_counts['n']):
num = random.choice(['neg_many','neg_one','zero','pos_one','pos_many'])
if num == 'neg_many':
num_val = (random.random() * -999999) - 1
if num == 'neg_one':
num_val = -1.0
if num == 'zero':
num_val = 0.0
if num == 'pos_one':
num_val = 1.0
if num == 'pos_many':
num_val = (random.random() * 999999) + 1
pir_preamble += " $N" + str(n) + " = "+str(num_val)+"\n"
self.args['n'].append('$N' + str(n))
return pir_preamble
def generateStringConstArgs(self):
pir_preamble = ""
self.args['sc'] = []
for n in range(self.arg_counts['sc']):
self.args['sc'].append('"'+self.getString()+'"')
return pir_preamble
def generateIntConstArgs(self):
pir_preamble = ""
self.args['ic'] = []
for n in range(self.arg_counts['ic']):
# Negative numbers and zero mess up control flow-related ops
#num = random.choice(['neg_many','neg_one','zero','pos_one','pos_many'])
num = random.choice(['pos_one','pos_many'])
if num == 'neg_many':
num_val = random.randint(-999999,-2)
if num == 'neg_one':
num_val = -1
if num == 'zero':
num_val = 0
if num == 'pos_one':
num_val = 1
if num == 'pos_many':
num_val = random.randint(2, 999999)
self.args['ic'].append(str(num_val))
return pir_preamble
def generateNumConstArgs(self):
pir_preamble = ""
self.args['nc'] = []
for n in range(self.arg_counts['nc']):
num = random.choice(['neg_many','neg_one','zero','pos_one','pos_many'])
if num == 'neg_many':
num_val = (random.random() * -999999) - 1
if num == 'neg_one':
num_val = -1.0
if num == 'zero':
num_val = 0.0
if num == 'pos_one':
num_val = 1.0
if num == 'pos_many':
num_val = (random.random() * 999999) + 1
self.args['nc'].append(str(num_val))
return pir_preamble
def getString(self):
str_val = ''
chars = string.printable + string.punctuation + string.whitespace
str_len = random.randint(0,10)
for m in range(str_len):
char = chars[random.randint(0, len(chars)-1)]
if char == '"':
char = '\\"'
if char == '\\':
char = '\\\\'
if char == '\n' or char == '\r':
char = ''
str_val += char
return str_val
class PMCTypeGenerator:
pmc_list = []
pmc_blacklist = [
'Packfile',
'PackfileAnnotation',
'PackfileAnnotationKeys',
'PackfileAnnotations',
'PackfileConstantTable',
'PackfileDirectory',
'PackfileFixupEntry',
'PackfileFixupTable',
'PackfileRawSegment',
'PackfileSegment',
]
def populatePMCList(self, parrot_root, ignore_blacklist):
pmc_pm = parrot_root + "/lib/Parrot/PMC.pm"
pmc_f = open(pmc_pm, 'r')
for line in pmc_f:
if re.search('\t[a-zA-Z]+ => [0-9]+,', line):
line = re.sub('\t', '', line)
line = re.sub(' =>.*\n', '', line)
if ignore_blacklist or line not in self.pmc_blacklist:
self.pmc_list.append(line)
def getPMCType(self):
return random.choice(self.pmc_list)
class OpfuncGenerator:
opfunc_list = []
opfunc_blacklist = [
'check_events', # Only for testing
'check_events__', # Not for direct use
'clears', # Clearing all [SPIN] registers isn't useful
'clearp',
'cleari',
'clearn',
'cpu_ret',
'debug',
'debug_break',
'debug_init',
'debug_load',
'debug_print',
'die',
'exit',
'gc_debug',
'if',
'pic_callr__',
'pic_get_params__',
'pic_infix__',
'pic_inline_sub__',
'pic_set_returns__',
'pin',
'pop_eh',
'prederef__',
'profile',
'push_eh',
'returncc',
'rethrow',
'runinterp',
'setn_ind',
'sets_ind',
'seti_ind',
'setp_ind',
'sleep',
'tailcall',
'trace',
'trap',
'unless',
'unpin',
'yield',
]
def populateOpfuncList(self, parrot_root, ignore_blacklist):
ops_h = parrot_root + "/src/ops/core_ops.c"
ops_f = open(ops_h, 'r')
# This is a moderately fragile hack that relies on the specific
# format of some generated code, expect breakage
for line in ops_f:
if line.find('PARROT_INLINE_OP') > -1 or line.find('PARROT_FUNCTION_OP') > -1:
line = ops_f.next()
short_name = line
line = ops_f.next()
long_name = line
# Strip leading space and opening double-quote
short_name = re.sub('[ ]+"', '', short_name)
long_name = re.sub('[ ]+"', '', long_name)
# Strip everything after closing double-quote
short_name = re.sub('".*\n', '', short_name)
long_name = re.sub('".*\n', '', long_name)
if long_name == short_name:
sig = ''
else:
sig = string.replace(long_name, short_name + '_', '')
#XXX: Don't know how to handle these args
if (not re.search('(pc|k|ki|kc|kic)', sig)):
if ignore_blacklist or short_name not in self.opfunc_blacklist:
self.opfunc_list.append([short_name, sig])
# print "accepted "+long_name+"("+sig+")"
#else:
# print "REJECTED "+long_name+"("+sig+")"
def getOpfunc(self):
return random.choice(self.opfunc_list)
class ParrotProcess(CreateProcess):
def on_pir_source(self, filename):
self.cmdline.arguments[1] = filename
self.createProcess()
if __name__ == "__main__":
ParrotFuzzer().main()
|
tools/perf/page_sets/media_cns_cases.py | google-ar/chromium | 777 | 12674398 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class BasicPlayPage(page_module.Page):
def __init__(self, url, page_set, name=''):
super(BasicPlayPage, self).__init__(url=url, page_set=page_set, name=name)
self.add_browser_metrics = True
def PlayAction(self, action_runner):
action_runner.PlayMedia(playing_event_timeout_in_seconds=60,
ended_event_timeout_in_seconds=60)
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
def SeekBeforeAndAfterPlayhead(self, action_runner):
action_runner.PlayMedia(playing_event_timeout_in_seconds=60)
# Wait for 1 second so that we know the play-head is at ~1s.
action_runner.Wait(1)
# Seek to before the play-head location.
action_runner.SeekMedia(seconds=0.5, timeout_in_seconds=60,
label='seek_warm')
# Seek to after the play-head location.
action_runner.SeekMedia(seconds=15, timeout_in_seconds=60,
label='seek_cold')
class SeekBeforeAndAfterPlayheadPage(BasicPlayPage):
def __init__(self, url, page_set, name):
super(SeekBeforeAndAfterPlayheadPage, self).__init__(url=url,
page_set=page_set,
name=name)
self.add_browser_metrics = False
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class MediaCnsCasesPageSet(story.StorySet):
""" Media benchmark on network constrained conditions. """
def __init__(self):
super(MediaCnsCasesPageSet, self).__init__()
urls_list = [
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=no_constraints_webm&src=tulip2.webm&net=none',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=cable_webm&src=tulip2.webm&net=cable',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_webm&src=tulip2.webm&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=no_constraints_ogv&src=tulip2.ogv&net=none',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=cable_ogv&src=tulip2.ogv&net=cable',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_ogv&src=tulip2.ogv&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=no_constraints_mp4&src=tulip2.mp4&net=none',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=cable_mp4&src=tulip2.mp4&net=cable',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_mp4&src=tulip2.mp4&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=no_constraints_wav&src=tulip2.wav&type=audio&net=none',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=cable_wav&src=tulip2.wav&type=audio&net=cable',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_wav&src=tulip2.wav&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=no_constraints_ogg&src=tulip2.ogg&type=audio&net=none',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=cable_ogg&src=tulip2.ogg&type=audio&net=cable',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_ogg&src=tulip2.ogg&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=no_constraints_mp3&src=tulip2.mp3&type=audio&net=none',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=cable_mp3&src=tulip2.mp3&type=audio&net=cable',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_mp3&src=tulip2.mp3&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=no_constraints_m4a&src=tulip2.m4a&type=audio&net=none',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=cable_m4a&src=tulip2.m4a&type=audio&net=cable',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_m4a&src=tulip2.m4a&type=audio&net=wifi'
]
for url in urls_list:
self.AddStory(BasicPlayPage(url, self))
urls_list2 = [
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_mp3&src=tulip2.mp3&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_m4a&src=tulip2.m4a&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_ogg&src=tulip2.ogg&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_wav&src=tulip2.wav&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_mp4&src=tulip2.mp4&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_ogv&src=tulip2.ogv&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_webm&src=tulip2.webm&type=audio&net=wifi'
]
for url in urls_list2:
if url in urls_list:
name = 'seek_' + url
else:
name = ''
self.AddStory(SeekBeforeAndAfterPlayheadPage(url, self, name=name))
|
src/backbones/efficientnet/effnetv2_configs.py | HY-Vision-Lab/metrabs | 208 | 12674423 | <reponame>HY-Vision-Lab/metrabs
# Copyright 2021 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""EfficientNet V1 and V2 model configs."""
import functools
import re
import backbones.efficientnet.effnetv2_hparams as hparams
cfg_register = functools.partial(hparams.register, prefix='cfg:')
class BlockDecoder(object):
"""Block Decoder for readability."""
def _decode_block_string(self, block_string):
"""Gets a block through a string notation of arguments."""
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
return hparams.Config(
kernel_size=int(options['k']),
num_repeat=int(options['r']),
input_filters=int(options['i']),
output_filters=int(options['o']),
expand_ratio=int(options['e']),
se_ratio=float(options['se']) if 'se' in options else None,
bottomright_stride='br' in options,
strides=int(options['s']),
conv_type=int(options['c']) if 'c' in options else 0,
)
def _encode_block_string(self, block):
"""Encodes a block to a string."""
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d' % block.strides,
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters,
'c%d' % block.conv_type,
'f%d' % block.fused_conv,
]
if block.se_ratio > 0 and block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.bottomright_stride > 0 and block.se_ratio <= 1:
args.append('br')
return '_'.join(args)
def decode(self, string_list):
"""Decodes a list of string notations to specify blocks inside the network.
Args:
string_list: a list of strings, each string is a notation of block.
Returns:
A list of namedtuples to represent blocks arguments.
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(self._decode_block_string(block_string))
return blocks_args
def encode(self, blocks_args):
"""Encodes a list of Blocks to a list of strings.
Args:
blocks_args: A list of namedtuples to represent blocks arguments.
Returns:
a list of strings, each string is a notation of block.
"""
block_strings = []
for block in blocks_args:
block_strings.append(self._encode_block_string(block))
return block_strings
#################### EfficientNet V1 configs ####################
v1_b0_block_str = [
'r1_k3_s1_e1_i32_o16_se0.25',
'r2_k3_s2_e6_i16_o24_se0.25',
'r2_k5_s2_e6_i24_o40_se0.25',
'r3_k3_s2_e6_i40_o80_se0.25',
'r3_k5_s1_e6_i80_o112_se0.25',
'r4_k5_s2_e6_i112_o192_se0.25_br',
'r1_k3_s1_e6_i192_o320_se0.25',
]
efficientnetv1_params = {
# (width_coefficient, depth_coefficient, resolution, dropout_rate)
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
}
def efficientnetv1_config(model_name='efficientnet-b0'):
"""EfficientNetV1 model config."""
width_coefficient, depth_coefficient, isize, dropout_rate = (
efficientnetv1_params[model_name])
cfg = hparams.Config(
model=dict(
model_name=model_name,
blocks_args=BlockDecoder().decode(v1_b0_block_str),
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
dropout_rate=dropout_rate,
),
eval=dict(isize=isize),
train=dict(isize=0.8), # 80% of eval size
data=dict(augname='effnetv1_autoaug'),
)
return cfg
#################### EfficientNet V2 configs ####################
v2_base_block = [ # The baseline config for v2 models.
'r1_k3_s1_e1_i32_o16_c1',
'r2_k3_s2_e4_i16_o32_c1',
'r2_k3_s2_e4_i32_o48_c1',
'r3_k3_s2_e4_i48_o96_se0.25',
'r5_k3_s1_e6_i96_o112_se0.25',
'r8_k3_s2_e6_i112_o192_se0.25_br',
]
v2_s_block = [ # about base.yaml * (width1.4, depth1.8)
'r2_k3_s1_e1_i24_o24_c1',
'r4_k3_s2_e4_i24_o48_c1',
'r4_k3_s2_e4_i48_o64_c1',
'r6_k3_s2_e4_i64_o128_se0.25',
'r9_k3_s1_e6_i128_o160_se0.25',
'r15_k3_s2_e6_i160_o256_se0.25_br',
]
v2_m_block = [ # about base.yaml * (width1.6, depth2.2)
'r3_k3_s1_e1_i24_o24_c1',
'r5_k3_s2_e4_i24_o48_c1',
'r5_k3_s2_e4_i48_o80_c1',
'r7_k3_s2_e4_i80_o160_se0.25',
'r14_k3_s1_e6_i160_o176_se0.25',
'r18_k3_s2_e6_i176_o304_se0.25_br',
'r5_k3_s1_e6_i304_o512_se0.25',
]
v2_l_block = [ # about base.yaml * (width2.0, depth3.1)
'r4_k3_s1_e1_i32_o32_c1',
'r7_k3_s2_e4_i32_o64_c1',
'r7_k3_s2_e4_i64_o96_c1',
'r10_k3_s2_e4_i96_o192_se0.25',
'r19_k3_s1_e6_i192_o224_se0.25',
'r25_k3_s2_e6_i224_o384_se0.25_br',
'r7_k3_s1_e6_i384_o640_se0.25',
]
v2_xl_block = [ # only for 21k pretraining.
'r4_k3_s1_e1_i32_o32_c1',
'r8_k3_s2_e4_i32_o64_c1',
'r8_k3_s2_e4_i64_o96_c1',
'r16_k3_s2_e4_i96_o192_se0.25',
'r24_k3_s1_e6_i192_o256_se0.25',
'r32_k3_s2_e6_i256_o512_se0.25_br',
'r8_k3_s1_e6_i512_o640_se0.25',
]
efficientnetv2_params = {
# (block, width, depth, train_size, eval_size, dropout, randaug, mixup, aug)
'efficientnetv2-s': # 83.9% @ 22M
(v2_s_block, 1.0, 1.0, 300, 384, 0.2, 10, 0, 'randaug'),
'efficientnetv2-m': # 85.2% @ 54M
(v2_m_block, 1.0, 1.0, 384, 480, 0.3, 15, 0.2, 'randaug'),
'efficientnetv2-l': # 85.7% @ 120M
(v2_l_block, 1.0, 1.0, 384, 480, 0.4, 20, 0.5, 'randaug'),
'efficientnetv2-xl':
(v2_xl_block, 1.0, 1.0, 384, 512, 0.4, 20, 0.5, 'randaug'),
# For fair comparison to EfficientNetV1, using the same scaling and autoaug.
'efficientnetv2-b0': # 78.7% @ 7M params
(v2_base_block, 1.0, 1.0, 192, 224, 0.2, 0, 0, 'effnetv1_autoaug'),
'efficientnetv2-b1': # 79.8% @ 8M params
(v2_base_block, 1.0, 1.1, 192, 240, 0.2, 0, 0, 'effnetv1_autoaug'),
'efficientnetv2-b2': # 80.5% @ 10M params
(v2_base_block, 1.1, 1.2, 208, 260, 0.3, 0, 0, 'effnetv1_autoaug'),
'efficientnetv2-b3': # 82.1% @ 14M params
(v2_base_block, 1.2, 1.4, 240, 300, 0.3, 0, 0, 'effnetv1_autoaug'),
}
def efficientnetv2_config(model_name='efficientnetv2-s'):
"""EfficientNetV2 model config."""
block, width, depth, train_size, eval_size, dropout, randaug, mix, aug = (
efficientnetv2_params[model_name])
cfg = hparams.Config(
model=dict(
model_name=model_name,
blocks_args=BlockDecoder().decode(block),
width_coefficient=width,
depth_coefficient=depth,
dropout_rate=dropout,
),
train=dict(isize=train_size, stages=4, sched=True),
eval=dict(isize=eval_size),
data=dict(augname=aug, ram=randaug, mixup_alpha=mix, cutmix_alpha=mix),
)
return cfg
################################################################################
def get_model_config(model_name: str):
"""Main entry for model name to config."""
if model_name.startswith('efficientnet-'):
return efficientnetv1_config(model_name)
if model_name.startswith('efficientnetv2-'):
return efficientnetv2_config(model_name)
raise ValueError(f'Unknown model_name {model_name}')
|
locations/spiders/thebigbiscuit.py | nbeecher/alltheplaces | 297 | 12674426 | # -*- coding: utf-8 -*-
import scrapy
from locations.items import GeojsonPointItem
class TheBigBiscuitSpider(scrapy.Spider):
name = 'thebigbiscuit'
item_attributes = {'brand': 'The Big Biscuit'}
allowed_domains = ['www.bigbiscuit.com']
start_urls = ['https://bigbiscuit.com/locations/']
def parse(self, response):
for each in response.xpath('//*[contains(@class, "section--location")]'):
name = each.xpath('./*[@class="location-title"]/text()').extract_first()
if name:
street = each.xpath('.//*[@class="info-block"]//*[@class="text-small"][1]/text()').extract_first()
postcode = each.xpath('./@data-zip').extract_first()
city = each.xpath('./@data-city').extract_first()
state = each.xpath('./@data-state').extract_first()
phone = each.xpath('.//*[@class="phone"]/text()').extract_first()
ref = each.xpath('.//*[@data-location]/@data-location').extract_first()
hours = each.xpath('.//*[@class="info-block"]//*[@class="text-small"][3]/text()').extract_first()
website = each.xpath(f'//*[@class="title h6" and text()="{name}"]/parent::div/following-sibling::a[1]/@href').extract_first()
properties = {
'name': name,
'ref': ref,
'street': street,
'city': city,
'postcode': postcode,
'state': state,
'phone': phone,
'website': website
}
yield GeojsonPointItem(**properties)
|
setup.py | vilhub/DeBERTa | 916 | 12674450 | import subprocess
import datetime
import sys
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
install('setuptools')
install('gitpython')
import setuptools
import git
repo = git.Repo(search_parent_directories=True)
date=datetime.datetime.utcnow()
with open("README.md", "r") as fh:
long_description = fh.read() + f'\n git version: {repo.head.object.hexsha}' + \
f'\n date: {date}'
with open('VERSION') as fs:
version = fs.readline().strip()
# version += f".dev{date.strftime('%y%m%d%H%M%S')}"
with open('requirements.txt') as fs:
requirements = [l.strip() for l in fs if not l.strip().startswith('#')]
extras = {}
extras["docs"] = ["recommonmark", "sphinx", "sphinx-markdown-tables", "sphinx-rtd-theme"]
setuptools.setup(
name="DeBERTa",
version=version,
author="penhe",
author_email="<EMAIL>",
description="Decoding enhanced BERT with Disentangled Attention",
keywords="NLP deep learning transformer pytorch Attention BERT RoBERTa DeBERTa",
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/microsoft/DeBERTa",
packages=setuptools.find_packages(exclude=['__pycache__']),
package_dir = {'DeBERTa':'DeBERTa'},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
extras_require=extras,
install_requires=requirements)
|
tests/test_s3-acl.py | kaiboma/hammer | 385 | 12674455 | <filename>tests/test_s3-acl.py
import pytest
from . import mock_s3
from library.aws.s3 import S3BucketsAclChecker
from library.aws.utility import Account
buckets = {
"Bucket1": {
"Description": "Private bucket",
"CheckShouldPass": True,
"ACL": "private",
},
"Bucket2": {
"Description": "Public read bucket",
"CheckShouldPass": False,
"ACL": "public-read",
},
"Bucket3": {
"Description": "Public read-write bucket",
"CheckShouldPass": False,
"ACL": "public-read-write",
},
"Bucket4": {
"Description": "Authenticated read bucket",
"CheckShouldPass": False,
"ACL": "authenticated-read",
}
}
def find_rule_prop(bucket, prop, default):
try:
return buckets[bucket.name][prop]
except KeyError:
return default
def ident_test(arg):
"""
Used to build identification string for each autogenerated test (for easy recognition of failed tests).
:param bucket: dict with information about rules
:return: .
"""
# print(jsonDumps(s3acl_details))
if isinstance(arg, bool):
return "remediated" if arg else "original"
else:
descr = find_rule_prop(arg, "Description", "default description")
return f"params: {arg.name} ({descr})"
def pytest_generate_tests(metafunc):
"""
Entrypoint for tests (built-in pytest function for dynamic generation of test cases).
"""
# Launch EC2 mocking and env preparation
mock_s3.start()
mock_s3.create_env(buckets)
account = Account()
checker = S3BucketsAclChecker(account)
checker.check()
for s3_bucket in checker.buckets:
s3_bucket.restrict_acl()
checker_remediated = S3BucketsAclChecker(account)
checker_remediated.check()
s3_buckets = [(bucket, False) for bucket in checker.buckets]
s3_buckets += [(bucket, True) for bucket in checker_remediated.buckets]
metafunc.parametrize("bucket,remediated", s3_buckets, ids=ident_test)
@pytest.mark.s3acl
def test_s3acl(bucket, remediated):
"""
Actual testing function.
:param bucket:
:param remediated
:return: nothing, raises AssertionError if actual test result is not matched with expected
"""
# print(f"{jsonDumps(s3acl_details)}")
expected = True if remediated else find_rule_prop(bucket, "CheckShouldPass", True)
assert expected == (not bucket.public_by_acl)
|
tests/gpflow/utilities/test_bijectors.py | HarrySpearing/GPflow | 1,724 | 12674469 | <filename>tests/gpflow/utilities/test_bijectors.py<gh_stars>1000+
import numpy as np
import pytest
import tensorflow_probability as tfp
from gpflow.config import Config, as_context
from gpflow.utilities import positive, triangular
@pytest.mark.parametrize(
"env_lower, override_lower",
[
(0.1, None), # ensure default from config is applied
(0.0, 0.2), # ensure override is applied
(0.3, 0.4), # ensure local overrides config
],
)
def test_positive_lower(env_lower, override_lower):
expected_lower = override_lower or env_lower
with as_context(Config(positive_bijector="softplus", positive_minimum=env_lower)):
bijector = positive(lower=override_lower)
assert isinstance(bijector, tfp.bijectors.Chain)
assert np.isclose(bijector.bijectors[0].shift, expected_lower)
@pytest.mark.parametrize(
"env_bijector, override_bijector, expected_class",
[
("softplus", None, tfp.bijectors.Softplus),
("softplus", "Exp", tfp.bijectors.Exp),
("exp", None, tfp.bijectors.Exp),
("exp", "Softplus", tfp.bijectors.Softplus),
],
)
def test_positive_bijector(env_bijector, override_bijector, expected_class):
with as_context(Config(positive_bijector=env_bijector, positive_minimum=0.0)):
bijector = positive(base=override_bijector)
assert isinstance(bijector, expected_class)
def test_positive_calculation_order():
value, lower = -10.0, 10.0
expected = np.exp(value) + lower
with as_context(Config(positive_bijector="exp", positive_minimum=lower)):
result = positive()(value).numpy()
assert np.isclose(result, expected)
assert result >= lower
def test_triangular():
assert isinstance(triangular(), tfp.bijectors.FillTriangular)
|
venv/lib/python3.7/site-packages/pyasn1_modules/rfc8619.py | nicholasadamou/StockBird | 9,953 | 12674491 | #
# This file is part of pyasn1-modules software.
#
# Created by <NAME>.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# Algorithm Identifiers for HKDF
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc8619.txt
#
from pyasn1.type import univ
from pyasn1_modules import rfc5280
# Object Identifiers
id_alg_hkdf_with_sha256 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.28')
id_alg_hkdf_with_sha384 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.29')
id_alg_hkdf_with_sha512 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.30')
# Key Derivation Algorithm Identifiers
kda_hkdf_with_sha256 = rfc5280.AlgorithmIdentifier()
kda_hkdf_with_sha256['algorithm'] = id_alg_hkdf_with_sha256
# kda_hkdf_with_sha256['parameters'] are absent
kda_hkdf_with_sha384 = rfc5280.AlgorithmIdentifier()
kda_hkdf_with_sha384['algorithm'] = id_alg_hkdf_with_sha384
# kda_hkdf_with_sha384['parameters'] are absent
kda_hkdf_with_sha512 = rfc5280.AlgorithmIdentifier()
kda_hkdf_with_sha512['algorithm'] = id_alg_hkdf_with_sha512
# kda_hkdf_with_sha512['parameters'] are absent
|
deps/memkind/test/python_framework/cmd_helper.py | kimleeju/Heterogeneous_Redis | 107 | 12674498 | #
# Copyright (C) 2016 Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice(s),
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice(s),
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import pytest
import os
import tempfile
import subprocess
class CMD_helper(object):
def execute_cmd(self, command, sudo=False):
if sudo:
command = "sudo {0}".format(command)
#Initialize temp file for stdout. Will be removed when closed.
outfile = tempfile.SpooledTemporaryFile()
try:
#Invoke process
p = subprocess.Popen(command, stdout=outfile, stderr=subprocess.STDOUT, shell=True)
p.communicate()
#Read stdout from file
outfile.seek(0)
stdout = outfile.read()
except:
raise
finally:
#Make sure the file is closed
outfile.close()
retcode = p.returncode
return stdout, retcode
def get_command_path(self, binary):
"""Get the path to the binary."""
path = os.path.dirname(os.path.abspath(__file__))
return os.path.join(path, binary) |
nodemcu_uploader/luacode.py | bazooka07/nodemcu-uploader | 324 | 12674565 | # -*- coding: utf-8 -*-
"""This module contains all the LUA code that needs to be on the device
to perform whats needed. They will be uploaded if they doesn't exist"""
# Copyright (C) 2015-2019 <NAME> <<EMAIL>>
# pylint: disable=C0301
# flake8: noqa
LUA_FUNCTIONS = ['recv_block', 'recv_name', 'recv', 'shafile', 'send_block', 'send_file', 'send']
DOWNLOAD_FILE = "file.open('{filename}') print(file.seek('end', 0)) file.seek('set', {bytes_read}) uart.write(0, file.read({chunk_size}))file.close()"
PRINT_FILE = "file.open('{filename}') print('---{filename}---') print(file.read()) file.close() print('---')"
INFO_GROUP = "for key,value in pairs(node.info('{group}')) do k=tostring(key) print(k .. string.rep(' ', 20 - #k), tostring(value)) end"
LIST_FILES = 'for key,value in pairs(file.list()) do print(key,value) end'
# NUL = \000, ACK = \006
RECV_LUA = \
r"""
function recv()
local on,w,ack,nack=uart.on,uart.write,'\6','\21'
local fd
local function recv_block(d)
local t,l = d:byte(1,2)
if t ~= 1 then w(0, nack); fd:close(); return on('data') end
if l >= 0 then fd:write(d:sub(3, l+2)); end
if l == 0 then fd:close(); w(0, ack); return on('data') else w(0, ack) end
end
local function recv_name(d) d = d:gsub('%z.*', '') d:sub(1,-2) file.remove(d) fd=file.open(d, 'w') on('data', 130, recv_block, 0) w(0, ack) end
on('data', '\0', recv_name, 0)
w(0, 'C')
end
function shafile(f) print(crypto.toHex(crypto.fhash('sha1', f))) end
""" # noqa: E122
SEND_LUA = \
r"""
function send(f) uart.on('data', 1, function (data)
local on,w=uart.on,uart.write
local fd
local function send_block(d) l = string.len(d) w(0, '\001' .. string.char(l) .. d .. string.rep('\0', 128 - l)) return l end
local function send_file(f)
local s, p
fd=file.open(f) s=fd:seek('end', 0) p=0
on('data', 1, function(data)
if data == '\006' and p<s then
fd:seek('set',p) p=p+send_block(fd:read(128))
else
send_block('') fd:close() on('data') print('interrupted')
end
end, 0)
w(0, f .. '\000')
end
uart.on('data') if data == 'C' then send_file(f) else print('transfer interrupted') end end, 0)
end
"""
UART_SETUP = 'uart.setup(0,{baud},8,0,1,1)'
REMOVE_ALL_FILES = r"""
for key,value in pairs(file.list()) do file.remove(key) end
"""
|
tests/views/test_current.py | whywaita/gitfs | 921 | 12674583 | # Copyright 2014-2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from threading import Event
import pytest
from mock import patch, MagicMock, call
from fuse import FuseOSError
from gitfs.views.current import CurrentView
from gitfs.cache.gitignore import CachedIgnore
class TestCurrentView(object):
def test_rename(self):
mocked_re = MagicMock()
mocked_index = MagicMock()
mocked_os = MagicMock()
mocked_result = MagicMock()
mocked_result.rename.return_value = True
mocked_re.sub.return_value = "new"
mocked_os.path.split.return_value = [1, 1]
with patch.multiple("gitfs.views.current", re=mocked_re, os=mocked_os):
from gitfs.views import current as current_view
old_rename = current_view.PassthroughView.rename
current_view.PassthroughView.rename = lambda self, old, new: True
current = CurrentView(
regex="regex", repo="repo", repo_path="repo_path", ignore=CachedIgnore()
)
current._stage = mocked_index
result = current.rename("old", "new")
assert result is True
mocked_index.assert_called_once_with(
**{"remove": 1, "add": "new", "message": "Rename old to new"}
)
mocked_os.path.split.assert_called_once_with("old")
current_view.PassthroughView.rename = old_rename
def test_rename_in_git_dir(self):
current = CurrentView(repo="repo", repo_path="repo_path", ignore=CachedIgnore())
with pytest.raises(FuseOSError):
current.rename(".git/", ".git/")
def test_symlink(self):
mocked_index = MagicMock()
mocked_repo = MagicMock()
mocked_full_path = MagicMock()
mocked_full_path.return_value = "full_path"
mocked_repo._full_path = mocked_full_path
with patch("gitfs.views.current.os") as mocked_os:
mocked_os.symlink.return_value = "done"
current = CurrentView(
repo=mocked_repo, repo_path="repo_path", ignore=CachedIgnore()
)
current._stage = mocked_index
assert current.symlink("name", "target") == "done"
mocked_os.symlink.assert_called_once_with("target", "full_path")
mocked_full_path.assert_called_once_with("name")
message = "Create symlink to target for name"
mocked_index.assert_called_once_with(add="name", message=message)
def test_readlink(self):
mocked_repo = MagicMock()
mocked_full_path = MagicMock()
mocked_full_path.return_value = "full path"
mocked_repo._full_path = mocked_full_path
with patch("gitfs.views.current.os") as mocked_os:
mocked_os.readlink.return_value = "done"
current = CurrentView(
repo=mocked_repo, repo_path="repo_path", ignore=CachedIgnore()
)
current._full_path = mocked_full_path
assert current.readlink("path") == "done"
def test_getattr(self):
mocked_full = MagicMock()
mocked_os = MagicMock()
mocked_stat = MagicMock()
mocked_repo = MagicMock()
mocked_stat.simple = "stat"
mocked_os.lstat.return_value = mocked_stat
mocked_full.return_value = "full_path"
mocked_repo._full_path = mocked_full
with patch.multiple("gitfs.views.current", os=mocked_os, STATS=["simple"]):
current = CurrentView(
repo=mocked_repo,
uid=1,
gid=1,
repo_path="repo_path",
ignore=CachedIgnore(),
)
current._full_path = mocked_full
result = current.getattr("path")
asserted_result = {"st_uid": 1, "st_gid": 1, "simple": "stat"}
assert result == asserted_result
mocked_os.lstat.assert_called_once_with("full_path")
mocked_full.assert_called_once_with("path")
def test_write_in_git_dir(self):
with pytest.raises(FuseOSError):
current = CurrentView(
repo="repo",
uid=1,
gid=1,
repo_path="repo_path",
read_only=Event(),
ignore=CachedIgnore(),
)
current.write(".git/index", "buf", "offset", 1)
def test_write_in_modules_dir(self):
with pytest.raises(FuseOSError):
current = CurrentView(
repo="repo",
uid=1,
gid=1,
repo_path="repo_path",
read_only=Event(),
ignore=CachedIgnore(),
)
current.write(".gitmodules", "buf", "offset", 1)
def test_write_to_large_file(self):
current = CurrentView(
repo="repo",
uid=1,
gid=1,
repo_path="repo_path",
read_only=Event(),
ignore=CachedIgnore(),
)
current.max_size = 10
current.dirty = {"/path": {"size": 5}}
with pytest.raises(FuseOSError):
current.write("/path", "bufffffert", 11, 1)
def test_write(self):
from gitfs.views import current as current_view
mocked_write = lambda self, path, buf, offste, fh: "done"
old_write = current_view.PassthroughView.write
current_view.PassthroughView.write = mocked_write
current = CurrentView(
repo="repo",
uid=1,
gid=1,
repo_path="repo_path",
read_only=Event(),
ignore=CachedIgnore(),
)
current.max_offset = 20
current.max_size = 20
current.dirty = {1: {}}
assert current.write("/path", "buf", 3, 1) == "done"
assert current.dirty == {1: {"message": "Update /path", "stage": True}}
current_view.PassthroughView.write = old_write
def test_mkdir(self):
from gitfs.views import current as current_view
old_mkdir = current_view.PassthroughView.mkdir
old_chmod = current_view.PassthroughView.chmod
mocked_mkdir = lambda self, path, mode: "done"
mocked_chmod = MagicMock()
mocked_chmod.return_value = None
current_view.PassthroughView.mkdir = mocked_mkdir
current_view.PassthroughView.chmod = mocked_chmod
mocked_release = MagicMock()
mocked_full_path = MagicMock()
mocked_repo = MagicMock()
mocked_full_path.return_value = "full_path"
mocked_repo._full_path = mocked_full_path
keep_path = "/path/.keep"
mode = os.O_WRONLY | os.O_CREAT
with patch("gitfs.views.current.os") as mocked_os:
mocked_os.path.exists.return_value = False
mocked_os.open.return_value = 10
mocked_os.O_WRONLY = os.O_WRONLY
mocked_os.O_CREAT = os.O_CREAT
current = CurrentView(
repo=mocked_repo,
uid=1,
gid=1,
repo_path="repo_path",
ignore=CachedIgnore(),
)
current.release = mocked_release
assert current.mkdir("/path", "mode") == "done"
mocked_full_path.assert_called_once_with(keep_path)
mocked_os.path.exists.assert_called_once_with(keep_path)
mocked_os.open.assert_called_once_with("full_path", mode)
mocked_chmod.assert_called_once_with(keep_path, 0o644)
assert current.dirty == {
10: {"message": "Create the /path directory", "stage": True}
}
mocked_release.assert_called_once_with(keep_path, 10)
current_view.PassthroughView.mkdir = old_mkdir
current_view.PassthroughView.chmod = old_chmod
def test_mkdir_in_git_dir(self):
current = CurrentView(
repo="repo", uid=1, gid=1, repo_path="repo_path", ignore=CachedIgnore()
)
with pytest.raises(FuseOSError):
current.mkdir(".git/", "mode")
def test_create_in_git_dir(self):
current = CurrentView(
repo="repo", uid=1, gid=1, repo_path="repo_path", ignore=CachedIgnore()
)
with pytest.raises(FuseOSError):
current.create(".git/", "mode")
def test_create(self):
from gitfs.views import current as current_view
old_chmod = current_view.PassthroughView.chmod
mock_chmod = lambda self, path, mode: "done"
current_view.PassthroughView.chmod = mock_chmod
mocked_open = MagicMock()
mocked_open.return_value = "done"
current = CurrentView(
repo="repo", uid=1, gid=1, repo_path="repo_path", ignore=CachedIgnore()
)
current.dirty = {"/path": {"content": "here"}}
current.open_for_write = mocked_open
assert current.create("/path", "mode") == "done"
current_view.PassthroughView.chmod = old_chmod
def test_chmod_in_git_dir(self):
current = CurrentView(
repo="repo", uid=1, gid=1, repo_path="repo_path", ignore=CachedIgnore()
)
with pytest.raises(FuseOSError):
current.chmod(".git/", "mode")
def test_chmod(self):
from gitfs.views import current as current_view
old_chmod = current_view.PassthroughView.chmod
current_view.PassthroughView.chmod = lambda self, path, mode: "done"
mocked_index = MagicMock()
mocked_full = MagicMock(return_value="/path")
mocked_repo = MagicMock(_full_path=mocked_full)
current = CurrentView(
repo=mocked_repo, uid=1, gid=1, repo_path="repo_path", ignore=CachedIgnore()
)
current._stage = mocked_index
assert current.chmod("/path", 0o100644) == "done"
message = "Chmod to 0%o on %s" % (0o644, "/path")
mocked_index.assert_called_once_with(add="/path", message=message)
current_view.PassthroughView.chmod = old_chmod
def test_chmod_on_dir(self):
from gitfs.views import current as current_view
old_chmod = current_view.PassthroughView.chmod
current_view.PassthroughView.chmod = lambda self, path, mode: "done"
mocked_full = MagicMock(return_value="repo/path/to/dir")
mocked_repo = MagicMock(_full_path=mocked_full)
with patch("gitfs.views.current.os") as mocked_os:
mocked_os.path.isdir.return_value = True
current = CurrentView(
repo=mocked_repo,
uid=1,
gid=1,
repo_path="repo_path",
ignore=CachedIgnore(),
)
assert current.chmod("/path/to/dir", 0o040755) == "done"
mocked_os.path.isdir.assert_called_once_with("repo/path/to/dir")
current_view.PassthroughView.chmod = old_chmod
def test_fsync_a_file_from_git_dir(self):
current = CurrentView(
repo="repo", uid=1, gid=1, repo_path="repo_path", ignore=CachedIgnore()
)
with pytest.raises(FuseOSError):
current.fsync(".git/", "data", 0)
def test_fsync(self):
from gitfs.views import current as current_view
old_fsync = current_view.PassthroughView.fsync
current_view.PassthroughView.fsync = lambda me, path, data, fh: "done"
mocked_index = MagicMock()
current = CurrentView(
repo="repo", uid=1, gid=1, repo_path="repo_path", ignore=CachedIgnore()
)
current._stage = mocked_index
assert current.fsync("/path", "data", 1) == "done"
message = "Fsync /path"
mocked_index.assert_called_once_with(add="/path", message=message)
current_view.PassthroughView.fsync = old_fsync
def test_unlink_from_git_dir(self):
current = CurrentView(repo="repo", repo_path="repo_path", ignore=CachedIgnore())
with pytest.raises(FuseOSError):
current.unlink(".git/")
def test_unlink(self):
from gitfs.views import current as current_view
old_unlink = current_view.PassthroughView.unlink
current_view.PassthroughView.unlink = lambda me, path: "done"
mocked_index = MagicMock()
current = CurrentView(
repo="repo", uid=1, gid=1, repo_path="repo_path", ignore=CachedIgnore()
)
current._stage = mocked_index
assert current.unlink("/path") == "done"
message = "Deleted /path"
mocked_index.assert_called_once_with(remove="/path", message=message)
current_view.PassthroughView.unlink = old_unlink
def test_stage(self):
mocked_repo = MagicMock()
mocked_sanitize = MagicMock()
mocked_queue = MagicMock()
mocked_files = MagicMock(return_value=None)
mocked_sanitize.return_value = ["to-stage"]
current = CurrentView(
repo=mocked_repo,
repo_path="repo_path",
queue=mocked_queue,
ignore=CachedIgnore(),
)
current._sanitize = mocked_sanitize
current._get_files_from_path = mocked_files
current._stage("message", ["add"], ["remove"])
mocked_queue.commit.assert_called_once_with(
add=["to-stage"], remove=["to-stage"], message="message"
)
mocked_repo.index.add.assert_called_once_with(["to-stage"])
mocked_repo.index.remove.assert_called_once_with(["to-stage"])
mocked_files.has_calls([call(["add"])])
mocked_sanitize.has_calls([call(["add"]), call(["remove"])])
def test_sanitize(self):
current = CurrentView(repo="repo", repo_path="repo_path")
assert current._sanitize("/path") == "path"
def test_open(self):
mocked_full = MagicMock(return_value="full_path")
mocked_repo = MagicMock(_full_path=mocked_full)
mocked_os = MagicMock()
mocked_os.open.return_value = 1
with patch.multiple("gitfs.views.current", os=mocked_os):
current = CurrentView(
repo=mocked_repo, repo_path="repo_path", ignore=CachedIgnore()
)
current._full_path = mocked_full
current.writing = set([])
assert current.open("path/", os.O_WRONLY) == 1
mocked_os.open.assert_called_once_with("full_path", os.O_WRONLY)
def test_release_with_stage(self):
message = "I need to stage this"
mocked_os = MagicMock()
mocked_stage = MagicMock()
mocked_os.close.return_value = 0
with patch.multiple("gitfs.views.current", os=mocked_os):
current = CurrentView(
repo="repo", repo_path="repo_path", ignore=CachedIgnore()
)
current._stage = mocked_stage
current.dirty = {4: {"message": message, "stage": True}}
assert current.release("/path", 4) == 0
mocked_os.close.assert_called_once_with(4)
mocked_stage.assert_called_once_with(add="/path", message=message)
def test_release_without_stage(self):
message = "No need to stage this"
mocked_os = MagicMock()
mocked_stage = MagicMock()
mocked_os.close.return_value = 0
with patch.multiple("gitfs.views.current", os=mocked_os):
current = CurrentView(
repo="repo", repo_path="repo_path", ignore=CachedIgnore()
)
current._stage = mocked_stage
current.dirty = {4: {"message": message, "stage": False}}
assert current.release("/path", 4) == 0
mocked_os.close.assert_called_once_with(4)
assert mocked_stage.call_count == 0
|
var/spack/repos/builtin/packages/node-js/package.py | LiamBindle/spack | 2,360 | 12674598 | <reponame>LiamBindle/spack
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import subprocess
import sys
from spack import *
class NodeJs(Package):
"""Node.js is a JavaScript runtime built on Chrome's V8 JavaScript
engine."""
homepage = "https://nodejs.org/"
url = "https://nodejs.org/dist/v13.5.0/node-v13.5.0.tar.gz"
list_url = "https://nodejs.org/dist/"
list_depth = 1
# Current (latest features)
version('15.3.0', sha256='cadfa384a5f14591b84ce07a1afe529f28deb0d43366fb0ae4e78afba96bfaf2')
version('14.16.1', sha256='5f5080427abddde7f22fd2ba77cd2b8a1f86253277a1eec54bc98a202728ce80')
version('14.13.0', sha256='8538b2e76aa06ee0e6eb1c118426c3c5ca53b2e49d66591738eacf76e89edd61')
version('14.10.0', sha256='7e0d7a1aa23697415e3588a1ca4f1c47496e6c88b9cf37c66be90353d3e4ac3e')
version('13.8.0', sha256='815b5e1b18114f35da89e4d98febeaba97555d51ef593bd5175db2b05f2e8be6')
version('13.5.0', sha256='4b8078d896a7550d7ed399c1b4ac9043e9f883be404d9b337185c8d8479f2db8')
# LTS (recommended for most users)
version('14.15.1', sha256='a1120472bf55aea745287693a6651e16973e1008c9d6107df350126adf9716fe', preferred=True)
version('12.18.4', sha256='a802d87e579e46fc52771ed6f2667048320caca867be3276f4c4f1bbb41389c3')
version('12.18.3', sha256='6ea85f80e01b007cc9b566b8836513bc5102667d833bad4c1092be60fa60c2d4')
version('12.16.0', sha256='ae2dfe74485d821d4fef7cf1802acd2322cd994c853a2327c4306952f4453441')
version('12.14.0', sha256='5c1939867228f3845c808ef84a89c8ee93cc35f857bf7587ecee1b5a6d9da67b')
version('11.1.0', sha256='3f53b5ac25b2d36ad538267083c0e603d9236867a936c22a9116d95fa10c60d5')
version('10.13.0', sha256='aa06825fff375ece7c0d881ae0de5d402a857e8cabff9b4a50f2f0b7b44906be')
version('8.11.4', sha256='459144e361d64ca7362c37cc9717c044ef909d348cb5aa3f2b62538560a6085a')
version('8.9.1', sha256='32491b7fcc4696b2cdead45c47e52ad16bbed8f78885d32e873952fee0f971e1')
version('7.1.0', sha256='595e7e2a37d1e0573044a90077bb12c0f750e5d8851899ffa74038238da9a983')
version('6.3.0', sha256='4ed7a99985f8afee337cc22d5fef61b495ab4238dfff3750ac9019e87fc6aae6')
version('6.2.2', sha256='b6baee57a0ede496c7c7765001f7495ad74c8dfe8c34f1a6fb2cd5d8d526ffce')
variant('debug', default=False, description='Include debugger support')
variant('doc', default=False, description='Compile with documentation')
variant('icu4c', default=False, description='Build with support for all locales instead of just English')
variant('openssl', default=True, description='Build with Spacks OpenSSL instead of the bundled version')
variant('zlib', default=True, description='Build with Spacks zlib instead of the bundled version')
# https://github.com/nodejs/node/blob/master/BUILDING.md#unix-and-macos
depends_on('[email protected]:', type='build')
depends_on('libtool', type='build', when=sys.platform != 'darwin')
depends_on('pkgconfig', type='build')
depends_on('[email protected]:2.8,3.5:', when='@12:', type='build')
depends_on('[email protected]:2.8', when='@:11', type='build')
# depends_on('bash-completion', when="+bash-completion")
depends_on('icu4c', when='+icu4c')
depends_on('[email protected]:1.0', when='@:9+openssl')
depends_on('[email protected]:', when='@10:+openssl')
depends_on('zlib', when='+zlib')
phases = ['configure', 'build', 'install']
# https://github.com/spack/spack/issues/19310
conflicts('%gcc@:4.8',
msg="fails to build with gcc 4.8 "
"(see https://github.com/spack/spack/issues/19310")
def setup_build_environment(self, env):
# Force use of experimental Python 3 support
env.set('PYTHON', self.spec['python'].command.path)
env.set('NODE_GYP_FORCE_PYTHON', self.spec['python'].command.path)
def configure_args(self):
# On OSX, the system libtool must be used
# So, we ensure that this is the case by...
if sys.platform == 'darwin':
process_pipe = subprocess.Popen(["which", "libtool"],
stdout=subprocess.PIPE)
result_which = process_pipe.communicate()[0]
process_pipe = subprocess.Popen(["whereis", "libtool"],
stdout=subprocess.PIPE)
result_whereis = process_pipe.communicate()[0]
assert result_which == result_whereis, (
'On OSX the system libtool must be used. Please'
'(temporarily) remove \n %s or its link to libtool from'
'path')
args = [
'--prefix={0}'.format(self.prefix),
# Note: npm is updated more regularly than node.js, so we build
# the package instead of using the bundled version
'--without-npm'
]
if '+debug' in self.spec:
args.append('--debug')
if '+openssl' in self.spec:
args.extend([
'--shared-openssl',
'--shared-openssl-includes={0}'.format(
self.spec['openssl'].prefix.include),
'--shared-openssl-libpath={0}'.format(
self.spec['openssl'].prefix.lib),
])
if '+zlib' in self.spec:
args.extend([
'--shared-zlib',
'--shared-zlib-includes={0}'.format(
self.spec['zlib'].prefix.include),
'--shared-zlib-libpath={0}'.format(
self.spec['zlib'].prefix.lib),
])
if '+icu4c' in self.spec:
args.append('--with-intl=full-icu')
return args
def configure(self, spec, prefix):
if self.version >= Version('10.11.0'):
python('configure.py', *self.configure_args())
else:
python('configure', *self.configure_args())
def build(self, spec, prefix):
make()
if '+doc' in spec:
make('doc')
@run_after('build')
@on_package_attributes(run_tests=True)
def build_test(self):
make('test')
make('test-addons')
def install(self, spec, prefix):
make('install')
|
python_toolbox/wx_tools/widgets/cute_html_window.py | hboshnak/python_toolbox | 119 | 12674599 | # Copyright 2009-2011 <NAME>.
# This program is distributed under the LGPL2.1 license.
import webbrowser
import wx.html
from python_toolbox.wx_tools.widgets.cute_window import CuteWindow
class CuteHtmlWindow(wx.html.HtmlWindow, CuteWindow):
event_modules = wx.html
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.html.HW_DEFAULT_STYLE,
name=wx.html.HtmlWindowNameStr):
wx.html.HtmlWindow.__init__(self, parent=parent, id=id, pos=pos,
size=size, style=style, name=name)
self.bind_event_handlers(CuteHtmlWindow)
def _on_html_link_clicked(self, event):
webbrowser.open_new_tab(
event.GetLinkInfo().GetHref()
) |
pandapower/test/networks/test_cigre_networks.py | yougnen/pandapower | 104 | 12674600 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pandas as pd
import pytest
import pandapower as pp
import pandapower.networks as pn
def test_cigre_hv():
net = pn.create_cigre_network_hv() # length_km_6a_6b=0.1
pp.runpp(net)
all_vn_kv = pd.Series([22, 220, 380])
assert net.bus.vn_kv.isin(all_vn_kv).all()
all_length_km = pd.Series([100, 300, 600, 0.1])
assert net.line.length_km.isin(all_length_km).all()
assert len(net.bus) == 13
assert len(net.line) == 9
assert len(net.gen) == 3
assert len(net.sgen) == 0
assert len(net.shunt) == 3
assert len(net.trafo) == 6
assert len(net.load) == 5
assert len(net.ext_grid) == 1
assert net.converged
net = pn.create_cigre_network_hv(length_km_6a_6b=80)
assert net.line.length_km[8] == 80
def test_cigre_mv():
net = pn.create_cigre_network_mv() # with_der=False
pp.runpp(net)
all_vn_kv = pd.Series([110, 20])
assert net.bus.vn_kv.isin(all_vn_kv).all()
assert len(net.bus) == 15
assert len(net.line) == 15
assert len(net.gen) == 0
assert len(net.sgen) == 0
assert len(net.shunt) == 0
assert len(net.trafo) == 2
assert len(net.load) == 18
assert len(net.ext_grid) == 1
assert len(net.switch) == 8
assert net.converged
net = pn.create_cigre_network_mv(with_der="pv_wind")
pp.runpp(net)
all_vn_kv = pd.Series([110, 20])
assert net.bus.vn_kv.isin(all_vn_kv).all()
assert len(net.bus) == 15
assert len(net.line) == 15
assert len(net.gen) == 0
assert len(net.sgen) == 9
assert len(net.shunt) == 0
assert len(net.trafo) == 2
assert len(net.load) == 18
assert len(net.ext_grid) == 1
assert len(net.switch) == 8
assert net.converged
net = pn.create_cigre_network_mv(with_der="all")
pp.runpp(net)
all_vn_kv = pd.Series([110, 20])
assert net.bus.vn_kv.isin(all_vn_kv).all()
assert len(net.bus) == 15
assert len(net.line) == 15
assert len(net.gen) == 0
assert len(net.sgen) == 13
assert len(net.storage) == 2
assert len(net.shunt) == 0
assert len(net.trafo) == 2
assert len(net.load) == 18
assert len(net.ext_grid) == 1
assert len(net.switch) == 8
assert net.converged
def test_cigre_lv():
net = pn.create_cigre_network_lv()
pp.runpp(net)
all_vn_kv = pd.Series([20, 0.4])
assert net.bus.vn_kv.isin(all_vn_kv).all()
assert len(net.bus) == 44
assert len(net.line) == 37
assert len(net.gen) == 0
assert len(net.sgen) == 0
assert len(net.shunt) == 0
assert len(net.trafo) == 3
assert len(net.load) == 15
assert len(net.ext_grid) == 1
assert len(net.switch) == 3
assert net.converged
if __name__ == '__main__':
pytest.main(['-x', "test_cigre_networks.py"])
|
package_control/commands/advanced_install_package_command.py | zjzh/package_control | 3,373 | 12674612 | <reponame>zjzh/package_control
import threading
import re
import time
import functools
import sublime
import sublime_plugin
from ..show_error import show_error
from ..package_manager import PackageManager
from ..package_disabler import PackageDisabler
from ..thread_progress import ThreadProgress
try:
str_cls = unicode
bytes_cls = str
except (NameError):
str_cls = str
bytes_cls = bytes
class AdvancedInstallPackageCommand(sublime_plugin.WindowCommand):
"""
A command that accepts a comma-separated list of packages to install, or
prompts the user to paste a comma-separated list
"""
def run(self, packages=None):
is_str = isinstance(packages, str_cls)
is_bytes = isinstance(packages, bytes_cls)
if packages and (is_str or is_bytes):
packages = self.split(packages)
if packages and isinstance(packages, list):
return self.start(packages)
self.window.show_input_panel(
'Packages to Install (Comma-separated)',
'',
self.on_done,
None,
None
)
def split(self, packages):
if isinstance(packages, bytes_cls):
packages = packages.decode('utf-8')
return re.split(r'\s*,\s*', packages)
def on_done(self, input):
"""
Input panel handler - adds the provided URL as a repository
:param input:
A string of the URL to the new repository
"""
input = input.strip()
if not input:
show_error(
u'''
No package names were entered
'''
)
return
self.start(self.split(input))
def start(self, packages):
thread = AdvancedInstallPackageThread(packages)
thread.start()
message = 'Installing package'
if len(packages) > 1:
message += 's'
ThreadProgress(thread, message, '')
class AdvancedInstallPackageThread(threading.Thread, PackageDisabler):
"""
A thread to run the installation of one or more packages in
"""
def __init__(self, packages):
"""
:param window:
An instance of :class:`sublime.Window` that represents the Sublime
Text window to show the available package list in.
"""
self.manager = PackageManager()
self.packages = packages
self.installed = self.manager.list_packages()
self.disabled = []
for package_name in packages:
operation_type = 'install' if package_name not in self.installed else 'upgrade'
self.disabled.extend(self.disable_packages(package_name, operation_type))
threading.Thread.__init__(self)
def run(self):
# Allow packages to properly disable
time.sleep(0.7)
def do_reenable_package(package_name):
operation_type = 'install' if package_name not in self.installed else 'upgrade'
self.reenable_package(package_name, operation_type)
for package in self.packages:
result = self.manager.install_package(package)
# Do not reenable if installation deferred until next restart
if result is not None and package in self.disabled:
# We use a functools.partial to generate the on-complete callback in
# order to bind the current value of the parameters, unlike lambdas.
sublime.set_timeout(functools.partial(do_reenable_package, package), 700)
|
Lib/bsddb/__init__.py | arvindm95/unladen-swallow | 2,293 | 12674619 | <filename>Lib/bsddb/__init__.py<gh_stars>1000+
#----------------------------------------------------------------------
# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
# and <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# o Redistributions of source code must retain the above copyright
# notice, this list of conditions, and the disclaimer that follows.
#
# o Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# o Neither the name of Digital Creations nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#----------------------------------------------------------------------
"""Support for Berkeley DB 4.0 through 4.7 with a simple interface.
For the full featured object oriented interface use the bsddb.db module
instead. It mirrors the Oracle Berkeley DB C API.
"""
import sys
absolute_import = (sys.version_info[0] >= 3)
if sys.py3kwarning:
import warnings
warnings.warnpy3k("in 3.x, bsddb has been removed; "
"please use the pybsddb project instead",
DeprecationWarning, 2)
try:
if __name__ == 'bsddb3':
# import _pybsddb binary as it should be the more recent version from
# a standalone pybsddb addon package than the version included with
# python as bsddb._bsddb.
if absolute_import :
# Because this syntaxis is not valid before Python 2.5
exec("from . import _pybsddb")
else :
import _pybsddb
_bsddb = _pybsddb
from bsddb3.dbutils import DeadlockWrap as _DeadlockWrap
else:
import _bsddb
from bsddb.dbutils import DeadlockWrap as _DeadlockWrap
except ImportError:
# Remove ourselves from sys.modules
import sys
del sys.modules[__name__]
raise
# bsddb3 calls it db, but provide _db for backwards compatibility
db = _db = _bsddb
__version__ = db.__version__
error = db.DBError # So bsddb.error will mean something...
#----------------------------------------------------------------------
import sys, os
from weakref import ref
if sys.version_info[0:2] <= (2, 5) :
import UserDict
MutableMapping = UserDict.DictMixin
else :
import collections
MutableMapping = collections.MutableMapping
class _iter_mixin(MutableMapping):
def _make_iter_cursor(self):
cur = _DeadlockWrap(self.db.cursor)
key = id(cur)
self._cursor_refs[key] = ref(cur, self._gen_cref_cleaner(key))
return cur
def _gen_cref_cleaner(self, key):
# use generate the function for the weakref callback here
# to ensure that we do not hold a strict reference to cur
# in the callback.
return lambda ref: self._cursor_refs.pop(key, None)
def __iter__(self):
self._kill_iteration = False
self._in_iter += 1
try:
try:
cur = self._make_iter_cursor()
# FIXME-20031102-greg: race condition. cursor could
# be closed by another thread before this call.
# since we're only returning keys, we call the cursor
# methods with flags=0, dlen=0, dofs=0
key = _DeadlockWrap(cur.first, 0,0,0)[0]
yield key
next = getattr(cur, "next")
while 1:
try:
key = _DeadlockWrap(next, 0,0,0)[0]
yield key
except _bsddb.DBCursorClosedError:
if self._kill_iteration:
raise RuntimeError('Database changed size '
'during iteration.')
cur = self._make_iter_cursor()
# FIXME-20031101-greg: race condition. cursor could
# be closed by another thread before this call.
_DeadlockWrap(cur.set, key,0,0,0)
next = getattr(cur, "next")
except _bsddb.DBNotFoundError:
pass
except _bsddb.DBCursorClosedError:
# the database was modified during iteration. abort.
pass
# When Python 2.3 not supported in bsddb3, we can change this to "finally"
except :
self._in_iter -= 1
raise
self._in_iter -= 1
def iteritems(self):
if not self.db:
return
self._kill_iteration = False
self._in_iter += 1
try:
try:
cur = self._make_iter_cursor()
# FIXME-20031102-greg: race condition. cursor could
# be closed by another thread before this call.
kv = _DeadlockWrap(cur.first)
key = kv[0]
yield kv
next = getattr(cur, "next")
while 1:
try:
kv = _DeadlockWrap(next)
key = kv[0]
yield kv
except _bsddb.DBCursorClosedError:
if self._kill_iteration:
raise RuntimeError('Database changed size '
'during iteration.')
cur = self._make_iter_cursor()
# FIXME-20031101-greg: race condition. cursor could
# be closed by another thread before this call.
_DeadlockWrap(cur.set, key,0,0,0)
next = getattr(cur, "next")
except _bsddb.DBNotFoundError:
pass
except _bsddb.DBCursorClosedError:
# the database was modified during iteration. abort.
pass
# When Python 2.3 not supported in bsddb3, we can change this to "finally"
except :
self._in_iter -= 1
raise
self._in_iter -= 1
class _DBWithCursor(_iter_mixin):
"""
A simple wrapper around DB that makes it look like the bsddbobject in
the old module. It uses a cursor as needed to provide DB traversal.
"""
def __init__(self, db):
self.db = db
self.db.set_get_returns_none(0)
# FIXME-20031101-greg: I believe there is still the potential
# for deadlocks in a multithreaded environment if someone
# attempts to use the any of the cursor interfaces in one
# thread while doing a put or delete in another thread. The
# reason is that _checkCursor and _closeCursors are not atomic
# operations. Doing our own locking around self.dbc,
# self.saved_dbc_key and self._cursor_refs could prevent this.
# TODO: A test case demonstrating the problem needs to be written.
# self.dbc is a DBCursor object used to implement the
# first/next/previous/last/set_location methods.
self.dbc = None
self.saved_dbc_key = None
# a collection of all DBCursor objects currently allocated
# by the _iter_mixin interface.
self._cursor_refs = {}
self._in_iter = 0
self._kill_iteration = False
def __del__(self):
self.close()
def _checkCursor(self):
if self.dbc is None:
self.dbc = _DeadlockWrap(self.db.cursor)
if self.saved_dbc_key is not None:
_DeadlockWrap(self.dbc.set, self.saved_dbc_key)
self.saved_dbc_key = None
# This method is needed for all non-cursor DB calls to avoid
# Berkeley DB deadlocks (due to being opened with DB_INIT_LOCK
# and DB_THREAD to be thread safe) when intermixing database
# operations that use the cursor internally with those that don't.
def _closeCursors(self, save=1):
if self.dbc:
c = self.dbc
self.dbc = None
if save:
try:
self.saved_dbc_key = _DeadlockWrap(c.current, 0,0,0)[0]
except db.DBError:
pass
_DeadlockWrap(c.close)
del c
for cref in self._cursor_refs.values():
c = cref()
if c is not None:
_DeadlockWrap(c.close)
def _checkOpen(self):
if self.db is None:
raise error, "BSDDB object has already been closed"
def isOpen(self):
return self.db is not None
def __len__(self):
self._checkOpen()
return _DeadlockWrap(lambda: len(self.db)) # len(self.db)
if sys.version_info[0:2] >= (2, 6) :
def __repr__(self) :
if self.isOpen() :
return repr(dict(_DeadlockWrap(self.db.items)))
return repr(dict())
def __getitem__(self, key):
self._checkOpen()
return _DeadlockWrap(lambda: self.db[key]) # self.db[key]
def __setitem__(self, key, value):
self._checkOpen()
self._closeCursors()
if self._in_iter and key not in self:
self._kill_iteration = True
def wrapF():
self.db[key] = value
_DeadlockWrap(wrapF) # self.db[key] = value
def __delitem__(self, key):
self._checkOpen()
self._closeCursors()
if self._in_iter and key in self:
self._kill_iteration = True
def wrapF():
del self.db[key]
_DeadlockWrap(wrapF) # del self.db[key]
def close(self):
self._closeCursors(save=0)
if self.dbc is not None:
_DeadlockWrap(self.dbc.close)
v = 0
if self.db is not None:
v = _DeadlockWrap(self.db.close)
self.dbc = None
self.db = None
return v
def keys(self):
self._checkOpen()
return _DeadlockWrap(self.db.keys)
def has_key(self, key):
self._checkOpen()
return _DeadlockWrap(self.db.has_key, key)
def set_location(self, key):
self._checkOpen()
self._checkCursor()
return _DeadlockWrap(self.dbc.set_range, key)
def next(self): # Renamed by "2to3"
self._checkOpen()
self._checkCursor()
rv = _DeadlockWrap(getattr(self.dbc, "next"))
return rv
if sys.version_info[0] >= 3 : # For "2to3" conversion
next = __next__
def previous(self):
self._checkOpen()
self._checkCursor()
rv = _DeadlockWrap(self.dbc.prev)
return rv
def first(self):
self._checkOpen()
# fix 1725856: don't needlessly try to restore our cursor position
self.saved_dbc_key = None
self._checkCursor()
rv = _DeadlockWrap(self.dbc.first)
return rv
def last(self):
self._checkOpen()
# fix 1725856: don't needlessly try to restore our cursor position
self.saved_dbc_key = None
self._checkCursor()
rv = _DeadlockWrap(self.dbc.last)
return rv
def sync(self):
self._checkOpen()
return _DeadlockWrap(self.db.sync)
#----------------------------------------------------------------------
# Compatibility object factory functions
def hashopen(file, flag='c', mode=0666, pgsize=None, ffactor=None, nelem=None,
cachesize=None, lorder=None, hflags=0):
flags = _checkflag(flag, file)
e = _openDBEnv(cachesize)
d = db.DB(e)
d.set_flags(hflags)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
if ffactor is not None: d.set_h_ffactor(ffactor)
if nelem is not None: d.set_h_nelem(nelem)
d.open(file, db.DB_HASH, flags, mode)
return _DBWithCursor(d)
#----------------------------------------------------------------------
def btopen(file, flag='c', mode=0666,
btflags=0, cachesize=None, maxkeypage=None, minkeypage=None,
pgsize=None, lorder=None):
flags = _checkflag(flag, file)
e = _openDBEnv(cachesize)
d = db.DB(e)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
d.set_flags(btflags)
if minkeypage is not None: d.set_bt_minkey(minkeypage)
if maxkeypage is not None: d.set_bt_maxkey(maxkeypage)
d.open(file, db.DB_BTREE, flags, mode)
return _DBWithCursor(d)
#----------------------------------------------------------------------
def rnopen(file, flag='c', mode=0666,
rnflags=0, cachesize=None, pgsize=None, lorder=None,
rlen=None, delim=None, source=None, pad=None):
flags = _checkflag(flag, file)
e = _openDBEnv(cachesize)
d = db.DB(e)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
d.set_flags(rnflags)
if delim is not None: d.set_re_delim(delim)
if rlen is not None: d.set_re_len(rlen)
if source is not None: d.set_re_source(source)
if pad is not None: d.set_re_pad(pad)
d.open(file, db.DB_RECNO, flags, mode)
return _DBWithCursor(d)
#----------------------------------------------------------------------
def _openDBEnv(cachesize):
e = db.DBEnv()
if cachesize is not None:
if cachesize >= 20480:
e.set_cachesize(0, cachesize)
else:
raise error, "cachesize must be >= 20480"
e.set_lk_detect(db.DB_LOCK_DEFAULT)
e.open('.', db.DB_PRIVATE | db.DB_CREATE | db.DB_THREAD | db.DB_INIT_LOCK | db.DB_INIT_MPOOL)
return e
def _checkflag(flag, file):
if flag == 'r':
flags = db.DB_RDONLY
elif flag == 'rw':
flags = 0
elif flag == 'w':
flags = db.DB_CREATE
elif flag == 'c':
flags = db.DB_CREATE
elif flag == 'n':
flags = db.DB_CREATE
#flags = db.DB_CREATE | db.DB_TRUNCATE
# we used db.DB_TRUNCATE flag for this before but Berkeley DB
# 4.2.52 changed to disallowed truncate with txn environments.
if file is not None and os.path.isfile(file):
os.unlink(file)
else:
raise error, "flags should be one of 'r', 'w', 'c' or 'n'"
return flags | db.DB_THREAD
#----------------------------------------------------------------------
# This is a silly little hack that allows apps to continue to use the
# DB_THREAD flag even on systems without threads without freaking out
# Berkeley DB.
#
# This assumes that if Python was built with thread support then
# Berkeley DB was too.
try:
import thread
del thread
except ImportError:
db.DB_THREAD = 0
#----------------------------------------------------------------------
|
reviewboard/oauth/models.py | amalik2/reviewboard | 921 | 12674620 | <filename>reviewboard/oauth/models.py
"""Models for OAuth2 applications."""
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
from djblets.db.fields import JSONField
from oauth2_provider.models import AbstractApplication
from reviewboard.site.models import LocalSite
class Application(AbstractApplication):
"""An OAuth2 application.
This model is specialized so that it can be limited to a
:py:class:`~reviewboard.site.models.LocalSite`.
"""
enabled = models.BooleanField(
verbose_name=_('Enabled'),
help_text=_('Whether or not this application can be used to '
'authenticate with Review Board.'),
default=True,
)
original_user = models.ForeignKey(
verbose_name=_('Original User'),
to=User,
blank=True,
null=True,
help_text=_('The original owner of this application.')
)
local_site = models.ForeignKey(
verbose_name=_('Local Site'),
to=LocalSite,
related_name='oauth_applications',
blank=True,
null=True,
help_text=_('An optional Local Site to limit this application to.<br>'
'If specified, only users with access to the Local Site '
'will be able to use the application.'),
)
extra_data = JSONField(
_('Extra Data'),
null=True,
default=dict,
)
@property
def is_disabled_for_security(self):
"""Whether or not this application is disabled for security reasons.
This will be ``True`` when the :py:attr:`original_owner` no longer
has access to the :py:attr:`local_site` this application is associated
with.
"""
return not self.enabled and self.original_user_id is not None
def clean(self):
"""Validate the application.
We do the validation for this in :py:meth:`ApplicationForm.clean()
<reviewboard.oauth.forms.ApplicationForm.clean` so that we can have
errors for ``authorization_grant_type`` and ``redirect_uris`` conflicts
show up on the appropriate field. The parent class does the same
validation, but as a result it will have form-wide errors instead of
per-field errors for the above two fields when they are in conflict.
Therefore we avoid that validation by making this a no-op.
"""
pass
def is_accessible_by(self, user, local_site=None):
"""Return whether or not the user has access to this Application.
A user has access if one of the following conditions is met:
* The user owns the Application.
* The user is an administrator.
* The user is a Local Site administrator on the Local Site the
Application is assigned to.
Args:
user (django.contrib.auth.models.User):
The user in question.
local_site (reviewboard.site.models.LocalSite):
The Local Site the user would access this Application under.
Returns:
bool:
Whether or not the given user has access to information about
this Application.
"""
return (user.is_authenticated() and
(self.user_id == user.pk or
user.is_superuser or
(self.local_site_id is not None and
local_site is not None and
self.local_site_id == local_site.pk and
local_site.is_mutable_by(user))))
def is_mutable_by(self, user, local_site=None):
"""Return whether or not the user can modify this Application.
A user has access if one of the following conditions is met:
* The user owns the Application.
* The user is an administrator.
* The user is a Local Site administrator on the Local Site the
Application is assigned to.
Args:
user (django.contrib.auth.models.User):
The user in question.
local_site (reviewboard.site.models.LocalSite):
The Local Site the user would modify this Application under.
Returns:
bool:
Whether or not the given user can modify this Application.
"""
return self.is_accessible_by(user, local_site=local_site)
class Meta:
db_table = 'reviewboard_oauth_application'
verbose_name = _('OAuth Application')
verbose_name_plural = _('OAuth Applications')
|
AppServer/lib/django-1.2/tests/modeltests/serializers/tests.py | loftwah/appscale | 790 | 12674629 | # -*- coding: utf-8 -*-
from datetime import datetime
from StringIO import StringIO
import unittest
from xml.dom import minidom
from django.conf import settings
from django.core import serializers
from django.db import transaction
from django.test import TestCase, TransactionTestCase, Approximate
from django.utils import simplejson
from models import Category, Author, Article, AuthorProfile, Actor, \
Movie, Score, Player, Team
class SerializerRegistrationTests(unittest.TestCase):
def setUp(self):
self.old_SERIALIZATION_MODULES = getattr(settings, 'SERIALIZATION_MODULES', None)
self.old_serializers = serializers._serializers
serializers._serializers = {}
settings.SERIALIZATION_MODULES = {
"json2" : "django.core.serializers.json",
}
def tearDown(self):
serializers._serializers = self.old_serializers
if self.old_SERIALIZATION_MODULES:
settings.SERIALIZATION_MODULES = self.old_SERIALIZATION_MODULES
else:
delattr(settings, 'SERIALIZATION_MODULES')
def test_register(self):
"Registering a new serializer populates the full registry. Refs #14823"
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertTrue('json3' in public_formats)
self.assertTrue('json2' in public_formats)
self.assertTrue('xml' in public_formats)
def test_unregister(self):
"Unregistering a serializer doesn't cause the registry to be repopulated. Refs #14823"
serializers.unregister_serializer('xml')
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertFalse('xml' in public_formats)
self.assertTrue('json3' in public_formats)
def test_builtin_serializers(self):
"Requesting a list of serializer formats popuates the registry"
all_formats = set(serializers.get_serializer_formats())
public_formats = set(serializers.get_public_serializer_formats())
self.assertTrue('xml' in all_formats),
self.assertTrue('xml' in public_formats)
self.assertTrue('json2' in all_formats)
self.assertTrue('json2' in public_formats)
self.assertTrue('python' in all_formats)
self.assertFalse('python' in public_formats)
class SerializersTestBase(object):
@staticmethod
def _comparison_value(value):
return value
def setUp(self):
sports = Category.objects.create(name="Sports")
music = Category.objects.create(name="Music")
op_ed = Category.objects.create(name="Op-Ed")
self.joe = Author.objects.create(name="Joe")
self.jane = Author.objects.create(name="Jane")
self.a1 = Article(
author=self.jane,
headline="Poker has no place on ESPN",
pub_date=datetime(2006, 6, 16, 11, 00)
)
self.a1.save()
self.a1.categories = [sports, op_ed]
self.a2 = Article(
author=self.joe,
headline="Time to reform copyright",
pub_date=datetime(2006, 6, 16, 13, 00, 11, 345)
)
self.a2.save()
self.a2.categories = [music, op_ed]
def test_serialize(self):
"""Tests that basic serialization works."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
self.assertTrue(self._validate_output(serial_str))
def test_serializer_roundtrip(self):
"""Tests that serialized content can be deserialized."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
models = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(len(models), 2)
def test_altering_serialized_output(self):
"""
Tests the ability to create new objects by
modifying serialized content.
"""
old_headline = "Poker has no place on ESPN"
new_headline = "Poker has no place on television"
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
serial_str = serial_str.replace(old_headline, new_headline)
models = list(serializers.deserialize(self.serializer_name, serial_str))
# Prior to saving, old headline is in place
self.assertTrue(Article.objects.filter(headline=old_headline))
self.assertFalse(Article.objects.filter(headline=new_headline))
for model in models:
model.save()
# After saving, new headline is in place
self.assertTrue(Article.objects.filter(headline=new_headline))
self.assertFalse(Article.objects.filter(headline=old_headline))
def test_one_to_one_as_pk(self):
"""
Tests that if you use your own primary key field
(such as a OneToOneField), it doesn't appear in the
serialized field list - it replaces the pk identifier.
"""
profile = AuthorProfile(author=self.joe,
date_of_birth=datetime(1970,1,1))
profile.save()
serial_str = serializers.serialize(self.serializer_name,
AuthorProfile.objects.all())
self.assertFalse(self._get_field_values(serial_str, 'author'))
for obj in serializers.deserialize(self.serializer_name, serial_str):
self.assertEqual(obj.object.pk, self._comparison_value(self.joe.pk))
def test_serialize_field_subset(self):
"""Tests that output can be restricted to a subset of fields"""
valid_fields = ('headline','pub_date')
invalid_fields = ("author", "categories")
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all(),
fields=valid_fields)
for field_name in invalid_fields:
self.assertFalse(self._get_field_values(serial_str, field_name))
for field_name in valid_fields:
self.assertTrue(self._get_field_values(serial_str, field_name))
def test_serialize_unicode(self):
"""Tests that unicode makes the roundtrip intact"""
actor_name = u"Za\u017c\u00f3\u0142\u0107"
movie_title = u'G\u0119\u015bl\u0105 ja\u017a\u0144'
ac = Actor(name=actor_name)
mv = Movie(title=movie_title, actor=ac)
ac.save()
mv.save()
serial_str = serializers.serialize(self.serializer_name, [mv])
self.assertEqual(self._get_field_values(serial_str, "title")[0], movie_title)
self.assertEqual(self._get_field_values(serial_str, "actor")[0], actor_name)
obj_list = list(serializers.deserialize(self.serializer_name, serial_str))
mv_obj = obj_list[0].object
self.assertEqual(mv_obj.title, movie_title)
def test_serialize_with_null_pk(self):
"""
Tests that serialized data with no primary key results
in a model instance with no id
"""
category = Category(name="Reference")
serial_str = serializers.serialize(self.serializer_name, [category])
pk_value = self._get_pk_values(serial_str)[0]
self.assertFalse(pk_value)
cat_obj = list(serializers.deserialize(self.serializer_name,
serial_str))[0].object
self.assertEqual(cat_obj.id, None)
def test_float_serialization(self):
"""Tests that float values serialize and deserialize intact"""
sc = Score(score=3.4)
sc.save()
serial_str = serializers.serialize(self.serializer_name, [sc])
deserial_objs = list(serializers.deserialize(self.serializer_name,
serial_str))
self.assertEqual(deserial_objs[0].object.score, Approximate(3.4, places=1))
def test_custom_field_serialization(self):
"""Tests that custom fields serialize and deserialize intact"""
team_str = "<NAME>"
player = Player()
player.name = "<NAME>"
player.rank = 1
player.team = Team(team_str)
player.save()
serial_str = serializers.serialize(self.serializer_name,
Player.objects.all())
team = self._get_field_values(serial_str, "team")
self.assertTrue(team)
self.assertEqual(team[0], team_str)
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(deserial_objs[0].object.team.to_string(),
player.team.to_string())
def test_pre_1000ad_date(self):
"""Tests that year values before 1000AD are properly formatted"""
# Regression for #12524 -- dates before 1000AD get prefixed
# 0's on the year
a = Article.objects.create(
author = self.jane,
headline = "Nobody remembers the early years",
pub_date = datetime(1, 2, 3, 4, 5, 6))
serial_str = serializers.serialize(self.serializer_name, [a])
date_values = self._get_field_values(serial_str, "pub_date")
self.assertEquals(date_values[0], "0001-02-03 04:05:06")
def test_pkless_serialized_strings(self):
"""
Tests that serialized strings without PKs
can be turned into models
"""
deserial_objs = list(serializers.deserialize(self.serializer_name,
self.pkless_str))
for obj in deserial_objs:
self.assertFalse(obj.object.id)
obj.save()
self.assertEqual(Category.objects.all().count(), 4)
class SerializersTransactionTestBase(object):
def test_forward_refs(self):
"""
Tests that objects ids can be referenced before they are
defined in the serialization data.
"""
# The deserialization process needs to be contained
# within a transaction in order to test forward reference
# handling.
transaction.enter_transaction_management()
transaction.managed(True)
objs = serializers.deserialize(self.serializer_name, self.fwd_ref_str)
for obj in objs:
obj.save()
transaction.commit()
transaction.leave_transaction_management()
for model_cls in (Category, Author, Article):
self.assertEqual(model_cls.objects.all().count(), 1)
art_obj = Article.objects.all()[0]
self.assertEqual(art_obj.categories.all().count(), 1)
self.assertEqual(art_obj.author.name, "Agnes")
class XmlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "xml"
pkless_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.category">
<field type="CharField" name="name">Reference</field>
</object>
</django-objects>"""
@staticmethod
def _comparison_value(value):
# The XML serializer handles everything as strings, so comparisons
# need to be performed on the stringified value
return unicode(value)
@staticmethod
def _validate_output(serial_str):
try:
minidom.parseString(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("object")
for field in fields:
ret_list.append(field.getAttribute("pk"))
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("field")
for field in fields:
if field.getAttribute("name") == field_name:
temp = []
for child in field.childNodes:
temp.append(child.nodeValue)
ret_list.append("".join(temp))
return ret_list
class XmlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "xml"
fwd_ref_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="1" model="serializers.article">
<field to="serializers.author" name="author" rel="ManyToOneRel">1</field>
<field type="CharField" name="headline">Forward references pose no problem</field>
<field type="DateTimeField" name="pub_date">2006-06-16 15:00:00</field>
<field to="serializers.category" name="categories" rel="ManyToManyRel">
<object pk="1"></object>
</field>
</object>
<object pk="1" model="serializers.author">
<field type="CharField" name="name">Agnes</field>
</object>
<object pk="1" model="serializers.category">
<field type="CharField" name="name">Reference</field></object>
</django-objects>"""
class JsonSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "json"
pkless_str = """[{"pk": null, "model": "serializers.category", "fields": {"name": "Reference"}}]"""
@staticmethod
def _validate_output(serial_str):
try:
simplejson.loads(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
serial_list = simplejson.loads(serial_str)
for obj_dict in serial_list:
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
serial_list = simplejson.loads(serial_str)
for obj_dict in serial_list:
if field_name in obj_dict["fields"]:
ret_list.append(obj_dict["fields"][field_name])
return ret_list
class JsonSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "json"
fwd_ref_str = """[
{
"pk": 1,
"model": "serializers.article",
"fields": {
"headline": "Forward references pose no problem",
"pub_date": "2006-06-16 15:00:00",
"categories": [1],
"author": 1
}
},
{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
},
{
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
try:
import yaml
except ImportError:
pass
else:
class YamlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
pkless_str = """- fields:
name: Reference
pk: null
model: serializers.category"""
@staticmethod
def _validate_output(serial_str):
try:
yaml.load(StringIO(serial_str))
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.load(stream):
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.load(stream):
if "fields" in obj_dict and field_name in obj_dict["fields"]:
field_value = obj_dict["fields"][field_name]
# yaml.load will return non-string objects for some
# of the fields we are interested in, this ensures that
# everything comes back as a string
if isinstance(field_value, basestring):
ret_list.append(field_value)
else:
ret_list.append(str(field_value))
return ret_list
class YamlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
|
rllib/env/dm_env_wrapper.py | firebolt55439/ray | 21,382 | 12674643 | <gh_stars>1000+
import gym
from gym import spaces
import numpy as np
try:
from dm_env import specs
except ImportError:
specs = None
def _convert_spec_to_space(spec):
if isinstance(spec, dict):
return spaces.Dict(
{k: _convert_spec_to_space(v)
for k, v in spec.items()})
if isinstance(spec, specs.DiscreteArray):
return spaces.Discrete(spec.num_values)
elif isinstance(spec, specs.BoundedArray):
return spaces.Box(
low=np.asscalar(spec.minimum),
high=np.asscalar(spec.maximum),
shape=spec.shape,
dtype=spec.dtype)
elif isinstance(spec, specs.Array):
return spaces.Box(
low=-float("inf"),
high=float("inf"),
shape=spec.shape,
dtype=spec.dtype)
raise NotImplementedError(
("Could not convert `Array` spec of type {} to Gym space. "
"Attempted to convert: {}").format(type(spec), spec))
class DMEnv(gym.Env):
"""A `gym.Env` wrapper for the `dm_env` API.
"""
metadata = {"render.modes": ["rgb_array"]}
def __init__(self, dm_env):
super(DMEnv, self).__init__()
self._env = dm_env
self._prev_obs = None
if specs is None:
raise RuntimeError((
"The `specs` module from `dm_env` was not imported. Make sure "
"`dm_env` is installed and visible in the current python "
"environment."))
def step(self, action):
ts = self._env.step(action)
reward = ts.reward
if reward is None:
reward = 0.
return ts.observation, reward, ts.last(), {"discount": ts.discount}
def reset(self):
ts = self._env.reset()
return ts.observation
def render(self, mode="rgb_array"):
if self._prev_obs is None:
raise ValueError(
"Environment not started. Make sure to reset before rendering."
)
if mode == "rgb_array":
return self._prev_obs
else:
raise NotImplementedError(
"Render mode '{}' is not supported.".format(mode))
@property
def action_space(self):
spec = self._env.action_spec()
return _convert_spec_to_space(spec)
@property
def observation_space(self):
spec = self._env.observation_spec()
return _convert_spec_to_space(spec)
@property
def reward_range(self):
spec = self._env.reward_spec()
if isinstance(spec, specs.BoundedArray):
return spec.minimum, spec.maximum
return -float("inf"), float("inf")
|
TL-ERC/setup.py | NouamaneTazi/conv-emotion | 488 | 12674644 | <reponame>NouamaneTazi/conv-emotion
import os
DATASET_DIRECTORY_PATH = "./datasets/"
GENERATIVE_WEIGHTS_DIRECTORY_PATH = "./generative_weights/"
if not os.path.exists(DATASET_DIRECTORY_PATH):
os.makedirs(DATASET_DIRECTORY_PATH)
if not os.path.exists(GENERATIVE_WEIGHTS_DIRECTORY_PATH):
os.makedirs(GENERATIVE_WEIGHTS_DIRECTORY_PATH) |
pyscf/mcscf/__init__.py | QuESt-Calculator/pyscf | 501 | 12674646 | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''CASCI and CASSCF
When using results of this code for publications, please cite the following paper:
"A general second order complete active space self-consistent-field solver for large-scale systems", <NAME>, <NAME>, and <NAME>, Chem. Phys. Lett. 683, 291 (2017).
Simple usage::
>>> from pyscf import gto, scf, mcscf
>>> mol = gto.M(atom='N 0 0 0; N 0 0 1', basis='ccpvdz', verbose=0)
>>> mf = scf.RHF(mol).run()
>>> mc = mcscf.CASCI(mf, 6, 6)
>>> mc.kernel()[0]
-108.980200816243354
>>> mc = mcscf.CASSCF(mf, 6, 6)
>>> mc.kernel()[0]
-109.044401882238134
>>> mc = mcscf.CASSCF(mf, 4, 4)
>>> cas_list = [5,6,8,9] # pick orbitals for CAS space, 1-based indices
>>> mo = mcscf.sort_mo(mc, mf.mo_coeff, cas_list)
>>> mc.kernel(mo)[0]
-109.007378939813691
:func:`mcscf.CASSCF` or :func:`mcscf.CASCI` returns a proper instance of CASSCF/CASCI class.
There are some parameters to control the CASSCF/CASCI method.
verbose : int
Print level. Default value equals to :class:`Mole.verbose`.
max_memory : float or int
Allowed memory in MB. Default value equals to :class:`Mole.max_memory`.
ncas : int
Active space size.
nelecas : tuple of int
Active (nelec_alpha, nelec_beta)
ncore : int or tuple of int
Core electron number. In UHF-CASSCF, it's a tuple to indicate the different core eletron numbers.
natorb : bool
Whether to restore the natural orbital during CASSCF optimization. Default is not.
canonicalization : bool
Whether to canonicalize orbitals. Default is True.
fcisolver : an instance of :class:`FCISolver`
The pyscf.fci module provides several FCISolver for different scenario. Generally,
fci.direct_spin1.FCISolver can be used for all RHF-CASSCF. However, a proper FCISolver
can provide better performance and better numerical stability. One can either use
:func:`fci.solver` function to pick the FCISolver by the program or manually assigen
the FCISolver to this attribute, e.g.
>>> from pyscf import fci
>>> mc = mcscf.CASSCF(mf, 4, 4)
>>> mc.fcisolver = fci.solver(mol, singlet=True)
>>> mc.fcisolver = fci.direct_spin1.FCISolver(mol)
You can control FCISolver by setting e.g.::
>>> mc.fcisolver.max_cycle = 30
>>> mc.fcisolver.conv_tol = 1e-7
For more details of the parameter for FCISolver, See :mod:`fci`.
By replacing this fcisolver, you can easily use the CASCI/CASSCF solver
with other FCI replacements, such as DMRG, QMC. See :mod:`dmrgscf` and
:mod:`fciqmcscf`.
The Following attributes are used for CASSCF
conv_tol : float
Converge threshold. Default is 1e-7
conv_tol_grad : float
Converge threshold for CI gradients and orbital rotation gradients.
Default is 1e-4
max_stepsize : float
The step size for orbital rotation. Small step size is prefered.
Default is 0.03.
(NOTE although the default step size is small enough for many systems,
it happens that the orbital optimizor crosses the barriar of local
minimum and converge to the neighbour solution, e.g. the CAS(4,4) for
C2H4 in the test files. In these systems, adjusting max_stepsize,
max_ci_stepsize and max_cycle_micro, max_cycle_micro_inner and
ah_start_tol may be helpful)
>>> mc = mcscf.CASSCF(mf, 6, 6)
>>> mc.max_stepsize = .01
>>> mc.max_cycle_micro = 1
>>> mc.max_cycle_macro = 100
>>> mc.max_cycle_micro_inner = 1
>>> mc.ah_start_tol = 1e-6
max_ci_stepsize : float
The max size for approximate CI updates. The approximate updates are
used in 1-step algorithm, to estimate the change of CI wavefunction wrt
the orbital rotation. Small step size is prefered. Default is 0.01.
max_cycle_macro : int
Max number of macro iterations. Default is 50.
max_cycle_micro : int
Max number of micro iterations in each macro iteration. Depending on
systems, increasing this value might reduce the total macro
iterations. Generally, 2 - 3 steps should be enough. Default is 2.
max_cycle_micro_inner : int
Max number of steps for the orbital rotations allowed for the augmented
hessian solver. It can affect the actual size of orbital rotation.
Even with a small max_stepsize, a few max_cycle_micro_inner can
accumulate the rotation and leads to a significant change of the CAS
space. Depending on systems, increasing this value migh reduce the
total number of macro iterations. The value between 2 - 8 is preferred.
Default is 4.
frozen : int or list
If integer is given, the inner-most orbitals are excluded from optimization.
Given the orbital indices (0-based) in a list, any doubly occupied core
orbitals, active orbitals and external orbitals can be frozen.
ah_level_shift : float, for AH solver.
Level shift for the Davidson diagonalization in AH solver. Default is 0.
ah_conv_tol : float, for AH solver.
converge threshold for Davidson diagonalization in AH solver. Default is 1e-8.
ah_max_cycle : float, for AH solver.
Max number of iterations allowd in AH solver. Default is 20.
ah_lindep : float, for AH solver.
Linear dependence threshold for AH solver. Default is 1e-16.
ah_start_tol : flat, for AH solver.
In AH solver, the orbital rotation is started without completely solving the AH problem.
This value is to control the start point. Default is 1e-4.
ah_start_cycle : int, for AH solver.
In AH solver, the orbital rotation is started without completely solving the AH problem.
This value is to control the start point. Default is 3.
``ah_conv_tol``, ``ah_max_cycle``, ``ah_lindep``, ``ah_start_tol`` and ``ah_start_cycle``
can affect the accuracy and performance of CASSCF solver. Lower
``ah_conv_tol`` and ``ah_lindep`` can improve the accuracy of CASSCF
optimization, but slow down the performance.
>>> from pyscf import gto, scf, mcscf
>>> mol = gto.M(atom='N 0 0 0; N 0 0 1', basis='ccpvdz', verbose=0)
>>> mf = scf.UHF(mol)
>>> mf.scf()
>>> mc = mcscf.CASSCF(mf, 6, 6)
>>> mc.conv_tol = 1e-10
>>> mc.ah_conv_tol = 1e-5
>>> mc.kernel()
-109.044401898486001
>>> mc.ah_conv_tol = 1e-10
>>> mc.kernel()
-109.044401887945668
chkfile : str
Checkpoint file to save the intermediate orbitals during the CASSCF optimization.
Default is the checkpoint file of mean field object.
Saved results
e_tot : float
Total MCSCF energy (electronic energy plus nuclear repulsion)
ci : ndarray
CAS space FCI coefficients
converged : bool, for CASSCF only
It indicates CASSCF optimization converged or not.
mo_energy: ndarray,
Diagonal elements of general Fock matrix
mo_coeff : ndarray, for CASSCF only
Optimized CASSCF orbitals coefficients
Note the orbitals are NOT natural orbitals by default. There are two
inbuilt methods to convert the mo_coeff to natural orbitals.
1. Set .natorb attribute. It can be used before calculation.
2. call .cas_natorb_ method after the calculation to in-place convert the orbitals
'''
from pyscf.mcscf import mc1step
from pyscf.mcscf import mc1step_symm
from pyscf.mcscf import casci
from pyscf.mcscf import casci_symm
from pyscf.mcscf import addons
from pyscf.mcscf import ucasci
casci_uhf = ucasci # for backward compatibility
from pyscf.mcscf import umc1step
mc1step_uhf = umc1step # for backward compatibility
from pyscf.mcscf.addons import *
from pyscf.mcscf import chkfile
def CASSCF(mf_or_mol, ncas, nelecas, ncore=None, frozen=None):
from pyscf import gto
from pyscf import scf
if isinstance(mf_or_mol, gto.Mole):
mf = scf.RHF(mf_or_mol)
else:
mf = mf_or_mol
if isinstance(mf, scf.uhf.UHF):
mf = scf.addons.convert_to_rhf(mf)
if getattr(mf, 'with_df', None):
return DFCASSCF(mf, ncas, nelecas, ncore, frozen)
if mf.mol.symmetry:
mc = mc1step_symm.CASSCF(mf, ncas, nelecas, ncore, frozen)
else:
mc = mc1step.CASSCF(mf, ncas, nelecas, ncore, frozen)
return mc
RCASSCF = CASSCF
def CASCI(mf_or_mol, ncas, nelecas, ncore=None):
from pyscf import gto
from pyscf import scf
if isinstance(mf_or_mol, gto.Mole):
mf = scf.RHF(mf_or_mol)
else:
mf = mf_or_mol
if isinstance(mf, scf.uhf.UHF):
mf = scf.addons.convert_to_rhf(mf)
if getattr(mf, 'with_df', None):
return DFCASCI(mf, ncas, nelecas, ncore)
if mf.mol.symmetry:
mc = casci_symm.CASCI(mf, ncas, nelecas, ncore)
else:
mc = casci.CASCI(mf, ncas, nelecas, ncore)
return mc
RCASCI = CASCI
def UCASCI(mf_or_mol, ncas, nelecas, ncore=None):
from pyscf import gto
from pyscf import scf
if isinstance(mf_or_mol, gto.Mole):
mf = scf.UHF(mf_or_mol)
else:
mf = mf_or_mol
if not isinstance(mf, scf.uhf.UHF):
mf = scf.addons.convert_to_uhf(mf, remove_df=True)
mc = ucasci.UCASCI(mf, ncas, nelecas, ncore)
return mc
def UCASSCF(mf_or_mol, ncas, nelecas, ncore=None, frozen=None):
from pyscf import gto
from pyscf import scf
if isinstance(mf_or_mol, gto.Mole):
mf = scf.UHF(mf_or_mol)
else:
mf = mf_or_mol
if not isinstance(mf, scf.uhf.UHF):
mf = scf.addons.convert_to_uhf(mf, remove_df=True)
mc = umc1step.UCASSCF(mf, ncas, nelecas, ncore, frozen)
return mc
def newton(mc):
return mc.newton()
from pyscf.mcscf import df
def DFCASSCF(mf_or_mol, ncas, nelecas, auxbasis=None, ncore=None,
frozen=None):
from pyscf import gto
from pyscf import scf
if isinstance(mf_or_mol, gto.Mole):
mf = scf.RHF(mf_or_mol).density_fit()
else:
mf = mf_or_mol
if isinstance(mf, scf.uhf.UHF):
mf = scf.addons.convert_to_rhf(mf, remove_df=False)
if mf.mol.symmetry:
mc = mc1step_symm.CASSCF(mf, ncas, nelecas, ncore, frozen)
else:
mc = mc1step.CASSCF(mf, ncas, nelecas, ncore, frozen)
return df.density_fit(mc, auxbasis)
def DFCASCI(mf_or_mol, ncas, nelecas, auxbasis=None, ncore=None):
from pyscf import gto
from pyscf import scf
if isinstance(mf_or_mol, gto.Mole):
mf = scf.RHF(mf_or_mol).density_fit()
else:
mf = mf_or_mol
if isinstance(mf, scf.uhf.UHF):
mf = scf.addons.convert_to_rhf(mf, remove_df=False)
if mf.mol.symmetry:
mc = casci_symm.CASCI(mf, ncas, nelecas, ncore)
else:
mc = casci.CASCI(mf, ncas, nelecas, ncore)
return df.density_fit(mc, auxbasis)
approx_hessian = df.approx_hessian
def density_fit(mc, auxbasis=None, with_df=None):
return mc.density_fit(auxbasis, with_df)
|
tests/cpydiff/types_bytearray_sliceassign.py | learnforpractice/micropython-cpp | 13,648 | 12674648 | <gh_stars>1000+
"""
categories: Types,bytearray
description: Array slice assignment with unsupported RHS
cause: Unknown
workaround: Unknown
"""
b = bytearray(4)
b[0:1] = [1, 2]
print(b)
|
sfa/config/train_config.py | FinalCold/SFA3D | 409 | 12674653 | """
# -*- coding: utf-8 -*-
-----------------------------------------------------------------------------------
# Author: <NAME>
# DoC: 2020.08.17
# email: <EMAIL>
-----------------------------------------------------------------------------------
# Description: The configurations of the project will be defined here
"""
import os
import argparse
import torch
from easydict import EasyDict as edict
def parse_train_configs():
parser = argparse.ArgumentParser(description='The Implementation using PyTorch')
parser.add_argument('--seed', type=int, default=2020,
help='re-produce the results with seed random')
parser.add_argument('--saved_fn', type=str, default='fpn_resnet_18', metavar='FN',
help='The name using for saving logs, models,...')
parser.add_argument('--root-dir', type=str, default='../', metavar='PATH',
help='The ROOT working directory')
####################################################################
############## Model configs ########################
####################################################################
parser.add_argument('--arch', type=str, default='fpn_resnet_18', metavar='ARCH',
help='The name of the model architecture')
parser.add_argument('--pretrained_path', type=str, default=None, metavar='PATH',
help='the path of the pretrained checkpoint')
####################################################################
############## Dataloader and Running configs #######
####################################################################
parser.add_argument('--hflip_prob', type=float, default=0.5,
help='The probability of horizontal flip')
parser.add_argument('--no-val', action='store_true',
help='If true, dont evaluate the model on the val set')
parser.add_argument('--num_samples', type=int, default=None,
help='Take a subset of the dataset to run and debug')
parser.add_argument('--num_workers', type=int, default=4,
help='Number of threads for loading data')
parser.add_argument('--batch_size', type=int, default=16,
help='mini-batch size (default: 16), this is the total'
'batch size of all GPUs on the current node when using'
'Data Parallel or Distributed Data Parallel')
parser.add_argument('--print_freq', type=int, default=50, metavar='N',
help='print frequency (default: 50)')
parser.add_argument('--tensorboard_freq', type=int, default=50, metavar='N',
help='frequency of saving tensorboard (default: 50)')
parser.add_argument('--checkpoint_freq', type=int, default=2, metavar='N',
help='frequency of saving checkpoints (default: 5)')
####################################################################
############## Training strategy ####################
####################################################################
parser.add_argument('--start_epoch', type=int, default=1, metavar='N',
help='the starting epoch')
parser.add_argument('--num_epochs', type=int, default=300, metavar='N',
help='number of total epochs to run')
parser.add_argument('--lr_type', type=str, default='cosin',
help='the type of learning rate scheduler (cosin or multi_step or one_cycle)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='initial learning rate')
parser.add_argument('--minimum_lr', type=float, default=1e-7, metavar='MIN_LR',
help='minimum learning rate during training')
parser.add_argument('--momentum', type=float, default=0.949, metavar='M',
help='momentum')
parser.add_argument('-wd', '--weight_decay', type=float, default=0., metavar='WD',
help='weight decay (default: 0.)')
parser.add_argument('--optimizer_type', type=str, default='adam', metavar='OPTIMIZER',
help='the type of optimizer, it can be sgd or adam')
parser.add_argument('--steps', nargs='*', default=[150, 180],
help='number of burn in step')
####################################################################
############## Loss weight ##########################
####################################################################
####################################################################
############## Distributed Data Parallel ############
####################################################################
parser.add_argument('--world-size', default=-1, type=int, metavar='N',
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int, metavar='N',
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:29500', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--gpu_idx', default=None, type=int,
help='GPU index to use.')
parser.add_argument('--no_cuda', action='store_true',
help='If true, cuda is not used.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
####################################################################
############## Evaluation configurations ###################
####################################################################
parser.add_argument('--evaluate', action='store_true',
help='only evaluate the model, not training')
parser.add_argument('--resume_path', type=str, default=None, metavar='PATH',
help='the path of the resumed checkpoint')
parser.add_argument('--K', type=int, default=50,
help='the number of top K')
configs = edict(vars(parser.parse_args()))
####################################################################
############## Hardware configurations #############################
####################################################################
configs.device = torch.device('cpu' if configs.no_cuda else 'cuda')
configs.ngpus_per_node = torch.cuda.device_count()
configs.pin_memory = True
configs.input_size = (608, 608)
configs.hm_size = (152, 152)
configs.down_ratio = 4
configs.max_objects = 50
configs.imagenet_pretrained = True
configs.head_conv = 64
configs.num_classes = 3
configs.num_center_offset = 2
configs.num_z = 1
configs.num_dim = 3
configs.num_direction = 2 # sin, cos
configs.heads = {
'hm_cen': configs.num_classes,
'cen_offset': configs.num_center_offset,
'direction': configs.num_direction,
'z_coor': configs.num_z,
'dim': configs.num_dim
}
configs.num_input_features = 4
####################################################################
############## Dataset, logs, Checkpoints dir ######################
####################################################################
configs.dataset_dir = os.path.join(configs.root_dir, 'dataset', 'kitti')
configs.checkpoints_dir = os.path.join(configs.root_dir, 'checkpoints', configs.saved_fn)
configs.logs_dir = os.path.join(configs.root_dir, 'logs', configs.saved_fn)
if not os.path.isdir(configs.checkpoints_dir):
os.makedirs(configs.checkpoints_dir)
if not os.path.isdir(configs.logs_dir):
os.makedirs(configs.logs_dir)
return configs
|
colour/notation/datasets/__init__.py | rift-labs-developer/colour | 1,380 | 12674674 | # -*- coding: utf-8 -*-
from .munsell import * # noqa
from . import munsell
__all__ = []
__all__ += munsell.__all__
|
Tools/Scenarios/strip_code.py | ErQing/Nova | 212 | 12674687 | #!/usr/bin/env python3
from nova_script_parser import normalize_dialogue, parse_chapters
in_filename = 'scenario.txt'
out_filename = 'scenario_no_code.txt'
with open(in_filename, 'r', encoding='utf-8') as f:
chapters = parse_chapters(f)
with open(out_filename, 'w', encoding='utf-8', newline='\n') as f:
for chapter_name, entries, _, _ in chapters:
print(chapter_name)
f.write(chapter_name + '\n\n')
for _, chara_name, dialogue in entries:
dialogue = normalize_dialogue(dialogue, remove_todo=False)
if dialogue:
if chara_name:
f.write(f'{chara_name}:{dialogue}\n\n')
else:
f.write(dialogue + '\n\n')
|
official/vision/detection/modeling/learning_rates.py | akshit-protonn/models | 82,518 | 12674689 | <filename>official/vision/detection/modeling/learning_rates.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learning rate schedule."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow as tf
from official.modeling.hyperparams import params_dict
class StepLearningRateWithLinearWarmup(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""Class to generate learning rate tensor."""
def __init__(self, total_steps, params):
"""Creates the step learning rate tensor with linear warmup."""
super(StepLearningRateWithLinearWarmup, self).__init__()
self._total_steps = total_steps
assert isinstance(params, (dict, params_dict.ParamsDict))
if isinstance(params, dict):
params = params_dict.ParamsDict(params)
self._params = params
def __call__(self, global_step):
warmup_lr = self._params.warmup_learning_rate
warmup_steps = self._params.warmup_steps
init_lr = self._params.init_learning_rate
lr_levels = self._params.learning_rate_levels
lr_steps = self._params.learning_rate_steps
linear_warmup = (
warmup_lr + tf.cast(global_step, dtype=tf.float32) / warmup_steps *
(init_lr - warmup_lr))
learning_rate = tf.where(global_step < warmup_steps, linear_warmup, init_lr)
for next_learning_rate, start_step in zip(lr_levels, lr_steps):
learning_rate = tf.where(global_step >= start_step, next_learning_rate,
learning_rate)
return learning_rate
def get_config(self):
return {'_params': self._params.as_dict()}
class CosineLearningRateWithLinearWarmup(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""Class to generate learning rate tensor."""
def __init__(self, total_steps, params):
"""Creates the consine learning rate tensor with linear warmup."""
super(CosineLearningRateWithLinearWarmup, self).__init__()
self._total_steps = total_steps
assert isinstance(params, (dict, params_dict.ParamsDict))
if isinstance(params, dict):
params = params_dict.ParamsDict(params)
self._params = params
def __call__(self, global_step):
global_step = tf.cast(global_step, dtype=tf.float32)
warmup_lr = self._params.warmup_learning_rate
warmup_steps = self._params.warmup_steps
init_lr = self._params.init_learning_rate
total_steps = self._total_steps
linear_warmup = (
warmup_lr + global_step / warmup_steps * (init_lr - warmup_lr))
cosine_learning_rate = (
init_lr * (tf.cos(np.pi * (global_step - warmup_steps) /
(total_steps - warmup_steps)) + 1.0) / 2.0)
learning_rate = tf.where(global_step < warmup_steps, linear_warmup,
cosine_learning_rate)
return learning_rate
def get_config(self):
return {'_params': self._params.as_dict()}
def learning_rate_generator(total_steps, params):
"""The learning rate function generator."""
if params.type == 'step':
return StepLearningRateWithLinearWarmup(total_steps, params)
elif params.type == 'cosine':
return CosineLearningRateWithLinearWarmup(total_steps, params)
else:
raise ValueError('Unsupported learning rate type: {}.'.format(params.type))
|
examples/titanic/multiple_model_training.py | yarenty/ludwig | 970 | 12674694 | <reponame>yarenty/ludwig
#!/usr/bin/env python
# # Multiple Model Training Example
#
# This example trains multiple models and extracts training statistics
import logging
import shutil
# ## Import required libraries
from ludwig.api import LudwigModel
from ludwig.datasets import titanic
from ludwig.visualize import learning_curves
# clean out old results
shutil.rmtree("./results", ignore_errors=True)
shutil.rmtree("./visualizations", ignore_errors=True)
# list models to train
list_of_model_ids = ["model1", "model2"]
list_of_train_stats = []
training_set, _, _ = titanic.load(split=True)
# ## Train models
for model_id in list_of_model_ids:
print(">>>> training: ", model_id)
# Define Ludwig model object that drive model training
model = LudwigModel(config="./" + model_id + "_config.yaml", logging_level=logging.WARN)
# initiate model training
train_stats, _, _ = model.train(dataset=training_set, experiment_name="multiple_experiment", model_name=model_id)
# save training stats for later use
list_of_train_stats.append(train_stats)
print(">>>>>>> completed: ", model_id, "\n")
# generating learning curves from training
learning_curves(
list_of_train_stats,
"Survived",
model_names=list_of_model_ids,
output_directory="./visualizations",
file_format="png",
)
|
trove/common/cache.py | hlesesne/trove | 244 | 12674706 | # Copyright 2021 Catalyst Cloud
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The code related to integration between oslo.cache module and trove."""
from oslo_cache import core
from oslo_config import cfg
def register_cache_configurations(conf):
"""Register all configurations required for oslo.cache.
The procedure registers all configurations required for oslo.cache.
It should be called before configuring of cache region
"""
core.configure(conf)
ports_cache_group = cfg.OptGroup('instance_ports_cache')
ports_cache_opts = [
cfg.IntOpt('expiration_time', default=86400,
help='TTL, in seconds, for any cached item in the '
'dogpile.cache region used for caching of the '
'instance ports.'),
cfg.BoolOpt("caching", default=True,
help='Toggle to enable/disable caching when getting trove '
'instance ports. Please note that the global toggle '
'for oslo.cache(enabled=True in [cache] group) '
'must be enabled to use this feature.')
]
conf.register_group(ports_cache_group)
conf.register_opts(ports_cache_opts, group=ports_cache_group)
return conf
# variable that stores an initialized cache region for trove
_REGION = None
def get_cache_region():
global _REGION
if not _REGION:
_REGION = core.configure_cache_region(
conf=register_cache_configurations(cfg.CONF),
region=core.create_region())
return _REGION
|
easytransfer/losses/classification_regression_loss.py | johnson7788/EasyTransfer | 806 | 12674707 | # coding=utf-8
# Copyright (c) 2019 Alibaba PAI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
def softmax_cross_entropy(labels, depth, logits):
labels = tf.squeeze(labels)
one_hot_labels = tf.one_hot(labels, depth=depth, dtype=tf.float32)
loss = tf.losses.softmax_cross_entropy(onehot_labels=one_hot_labels,
logits=logits)
return loss
def mean_square_error(labels, logits):
return tf.losses.mean_squared_error(labels, logits)
def multi_label_sigmoid_cross_entropy(labels, depth, logits):
one_hots = tf.one_hot(labels, depth)
multi_hots = tf.reduce_max(one_hots, axis=1)
multi_hots = tf.cast(multi_hots, logits.dtype)
return tf.losses.sigmoid_cross_entropy(multi_class_labels=multi_hots, logits=logits) |
python_modules/libraries/dagster-pagerduty/dagster_pagerduty_tests/test_resources.py | rpatil524/dagster | 4,606 | 12674718 | <gh_stars>1000+
import responses
from dagster import build_op_context, op
from dagster_pagerduty import pagerduty_resource
@responses.activate
def test_pagerduty_resource():
@op(required_resource_keys={"pagerduty"})
def pagerduty_op(context):
assert context.resources.pagerduty
with responses.RequestsMock() as rsps:
rsps.add(
rsps.POST,
"https://events.pagerduty.com/v2/enqueue/",
status=202,
json={"status": "success", "message": "Event processed", "dedup_key": "foobar"},
)
context.resources.pagerduty.EventV2_create(
summary="PING OK - Packet loss = 0%, RTA = 1.41 ms Host 'acme-andromeda-sv1-c40"
":: 172.16.31.10' is DOWN",
source="prod05.theseus.acme-widgets.com",
severity="error",
event_action="trigger",
dedup_key="foobar",
timestamp="2015-07-17T08:42:58.315+0000",
component="mysql",
group="prod-datapipe",
event_class="High CPU",
custom_details={"ping time": "1500ms", "load avg": 0.75},
)
return True
with build_op_context(
resources={
"pagerduty": pagerduty_resource.configured(
{"routing_key": "<KEY>"}
)
}
) as context:
assert pagerduty_op(context)
|
experimental/language_structure/psl/psl_model_multiwoz_test_util.py | y0ast/uncertainty-baselines | 794 | 12674720 | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Util file for psl rules test."""
from typing import List
import tensorflow as tf
LOGITS = [[[0.0, 0.0, 0.4, 0.4, 0.0, 0.2, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.2, 0.6, 0.0, 0.2, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0],
[0.0, 0.8, 0.1, 0.1, 0.2, 0.0, 0.0, 0.0, 0.2],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.8, 0.0, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.5, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.5, 0.4, 0.0, 0.0, 0.0, 0.1, 0.0],
[0.0, 0.0, 0.8, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.9, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]]
FEATURES = [[[-1, -2, -2, -2, -1, -2, -2, -2], [-1, -2, -2, -2, -1, -2, -2, -2],
[-1, -2, -2, -2, -2, -2, -2, -2], [-1, -2, -2, -2, -2, -1, -2, -1],
[-1, -2, -2, -1, -1, -2, -2, -2], [-2, -1, -2, -1, -1, -2, -1, -2],
[-3, -2, -2, -2, -2, -2, -2, -2], [-3, -2, -2, -2, -2, -2, -2, -2],
[-3, -2, -2, -2, -2, -2, -2, -2], [-3, -2, -2, -2, -2, -2, -2,
-2]],
[[-1, -2, -2, -2, -2, -2, -2, -2], [-1, -2, -2, -2, -2, -2, -2, -2],
[-1, -2, -2, -1, -1, -2, -2, -2], [-1, -2, -2, -2, -1, -2, -2, -1],
[-1, -2, -1, -2, -1, -2, -2, -2], [-2, -2, -2, -1, -1, -2, -2, -2],
[-3, -2, -2, -2, -2, -2, -2, -2], [-3, -2, -2, -2, -2, -2, -2, -2],
[-3, -2, -2, -2, -2, -2, -2, -2], [-3, -2, -2, -2, -2, -2, -2,
-2]]]
DATA = {
'train_data': [
[[[
1109, 1616, 41, 800, 740, 1743, 557, 981, 886, 1616, 1658, 909,
1380, 1256, 1565, 482, 1304
], [1109, 1304]],
[[1109, 1023, 38, 893, 1037, 1664, 886, 1304],
[
1109,
218, 751, 1616, 812, 1406, 1152, 981, 65, 778, 688, 886, 427, 641,
611, 742, 321, 557, 354, 1471, 161, 182, 767, 1304
]],
[[1109, 1162, 145, 557, 981, 740, 734, 776, 1037, 755, 886, 1304],
[
1109, 1616, 812, 1406, 1152, 981, 79, 886, 766, 1616, 558, 165,
1471, 161, 182, 4, 1304
]],
[[1109, 1738, 145, 893, 532, 1304],
[
1109, 1616, 1658, 218, 1616, 812, 1406, 1152, 981, 79, 886, 1023,
38, 557, 354, 182, 731, 161, 182, 1304
]],
[[1109, 1738, 145, 1215, 1047, 1274, 1304],
[
1109, 1616, 812, 1406, 1152, 981, 740, 65, 778, 688, 886, 427,
641, 611, 742, 321, 557, 354, 1017, 161, 731, 1304
]],
[[1109, 1162, 641, 631, 145, 1738, 1499, 740, 1743, 557, 981, 1304],
[
1109, 1616, 1658, 218, 145, 1162, 1499, 981, 740, 263, 173, 62,
886, 766, 1616, 558, 165, 1471, 161, 1017, 4, 1304
]]],
[[[
1109, 1616, 1658, 1450, 1743, 800, 1430, 79, 886, 1616, 1658, 1496,
1565, 1448, 929, 1489, 742, 1662, 1565, 1662, 1304
], [1109, 1304]]],
[[[
1109, 1616, 1658, 1276, 1450, 1743, 800, 1430, 79, 751, 1616, 1133,
1431, 1496, 742, 1062, 1415, 1565, 818, 1304
], [1109, 1304]]],
[[[
1109, 1616, 41, 800, 981, 886, 1616, 1077, 742, 1145, 1565, 83,
1037, 923, 1304
], [1109, 1304]],
[[1109, 1738, 145, 557, 740, 1743, 557, 981, 909, 256, 680, 187, 1304],
[
1109, 218, 1616, 812, 1406, 1152, 981, 740, 886, 1023, 38, 557,
354, 182, 767, 161, 1017, 4, 1304
]],
[[1109, 525, 641, 751, 1498, 1133, 1431, 1085, 1743, 610, 1304],
[1109, 427, 641, 611, 742, 865, 641, 557, 574, 1304]],
[[1109, 525, 641, 751, 1498, 1133, 1431, 1085, 886, 1304],
[1109, 1185, 641, 1077, 1762, 512, 4, 1304]]],
[[[
1109, 764, 1178, 1616, 1658, 1450, 1743, 557, 981, 79, 886, 1616,
1133, 1431, 1496, 742, 821, 1565, 83, 1304
], [1109, 1304]]]
],
'test_data': [
[[[
1109, 1616, 1658, 1450, 1743, 891, 38, 800, 1430, 886, 1616, 1658,
909, 742, 499, 1565, 1159, 1472, 886, 1304
], [1109, 1304]]],
[[[
1109, 1616, 427, 611, 564, 112, 801, 1412, 742, 446, 248, 800, 1001,
194, 886, 1616, 1077, 742, 1514, 1743, 142, 886, 1304
], [1109, 1304]],
[[1109, 1738, 1573, 557, 1510, 1561, 1301, 1301, 1412, 4, 1304],
[
1109, 1616, 323, 800, 1409, 1177, 886, 1573, 1738, 557, 1412, 742,
1621, 248, 800, 1001, 194, 886, 1304
]],
[[1109, 1499, 1718, 37, 1738, 1337, 1616, 1077, 886, 1304],
[
1109, 800, 1176, 72, 1506, 1738, 1374, 751, 427, 641, 611, 742,
1514, 1573, 1304
]]],
[[[
1109, 1228, 1616, 1658, 1450, 1743, 800, 981, 886, 1616, 1077, 742,
1145, 283, 1669, 1565, 482, 1250, 551, 886, 1304
], [1109, 1304]],
[[1109, 1228, 766, 641, 1406, 1762, 742, 849, 1304],
[
1109, 1616, 812, 1406, 1152, 981, 740, 886, 427, 641, 611, 742,
321, 557, 354, 182, 731, 4, 1304
]],
[[1109, 1718, 37, 1738, 1337, 1616, 1077, 1304],
[1109, 427, 641, 611, 742, 865, 641, 557, 574, 1304]],
[[1109, 525, 641, 37, 1738, 1337, 1616, 1077, 886, 1304],
[1109, 1738, 145, 1762, 512, 1616, 766, 814, 641, 4, 1304]]],
[[[
1109, 1228, 1616, 1658, 1450, 1743, 662, 226, 557, 981, 79, 886,
1616, 1658, 1496, 742, 1187, 1493, 1136, 1565, 1690, 886, 1304
], [1109, 1304]]],
],
'vocab_mapping': {
'address': 53,
'thank': 525,
'sure': 631,
'yes': 758,
'hello': 764,
'pricey': 1012,
'hi': 1228,
'great': 1490,
'no': 1499,
'phone': 1596,
'thanks': 1718,
},
'train_labels': [[
'init_request', 'second_request', 'second_request', 'second_request',
'second_request', 'insist'
], ['init_request'], ['init_request'],
['init_request', 'second_request', 'cancel', 'end'],
['init_request']],
'test_labels': [['init_request'],
['init_request', 'slot_question', 'cancel'],
['init_request', 'second_request', 'cancel', 'end'],
['init_request']]
}
TEST_MULTIWOZ_CONFIG = {
'default_seed': 4,
'batch_size': 128,
'max_dialog_size': 10,
'max_utterance_size': 40,
'class_map': {
'accept': 0,
'cancel': 1,
'end': 2,
'greet': 3,
'info_question': 4,
'init_request': 5,
'insist': 6,
'second_request': 7,
'slot_question': 8,
},
'accept_words': ['yes', 'great'],
'cancel_words': ['no'],
'end_words': ['thank', 'thanks'],
'greet_words': ['hello', 'hi'],
'info_question_words': ['address', 'phone'],
'insist_words': ['sure', 'no'],
'slot_question_words': ['pricey'],
'includes_word': -1,
'excludes_word': -2,
'mask_index': 0,
'accept_index': 1,
'cancel_index': 2,
'end_index': 3,
'greet_index': 4,
'info_question_index': 5,
'insist_index': 6,
'slot_question_index': 7,
'utterance_mask': -1,
'last_utterance_mask': -2,
'pad_utterance_mask': -3,
'shuffle_train': True,
'shuffle_test': False,
'train_epochs': 5,
}
def build_constrained_model(input_size: List[int]) -> tf.keras.Model:
"""Build simple neural model for class prediction."""
input_layer = tf.keras.layers.Input(input_size)
hidden_layer_1 = tf.keras.layers.Dense(1024)(input_layer)
hidden_layer_2 = tf.keras.layers.Dense(
512, activation='sigmoid')(
hidden_layer_1)
output = tf.keras.layers.Dense(
9, activation='softmax',
kernel_regularizer=tf.keras.regularizers.l2(1.0))(
hidden_layer_2)
model = tf.keras.Model(input_layer, output)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
|
test_twowheel.py | rossning92/rpi-robot | 106 | 12674722 | from device import Motor
from time import sleep
from twowheel import TwoWheelController
def test_controller(controller):
for _ in range(2):
controller.set_axis(x=-0.3)
sleep(0.25)
controller.set_axis(x=0.3)
sleep(0.25)
controller.set_axis()
if __name__ == "__main__":
controller = TwoWheelController()
test_controller(controller)
|
src/oci/golden_gate/models/create_deployment_backup_details.py | Manny27nyc/oci-python-sdk | 249 | 12674736 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateDeploymentBackupDetails(object):
"""
The information about a new DeploymentBackup.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateDeploymentBackupDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this CreateDeploymentBackupDetails.
:type display_name: str
:param compartment_id:
The value to assign to the compartment_id property of this CreateDeploymentBackupDetails.
:type compartment_id: str
:param deployment_id:
The value to assign to the deployment_id property of this CreateDeploymentBackupDetails.
:type deployment_id: str
:param namespace_name:
The value to assign to the namespace_name property of this CreateDeploymentBackupDetails.
:type namespace_name: str
:param bucket_name:
The value to assign to the bucket_name property of this CreateDeploymentBackupDetails.
:type bucket_name: str
:param object_name:
The value to assign to the object_name property of this CreateDeploymentBackupDetails.
:type object_name: str
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateDeploymentBackupDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this CreateDeploymentBackupDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'display_name': 'str',
'compartment_id': 'str',
'deployment_id': 'str',
'namespace_name': 'str',
'bucket_name': 'str',
'object_name': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'display_name': 'displayName',
'compartment_id': 'compartmentId',
'deployment_id': 'deploymentId',
'namespace_name': 'namespaceName',
'bucket_name': 'bucketName',
'object_name': 'objectName',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._display_name = None
self._compartment_id = None
self._deployment_id = None
self._namespace_name = None
self._bucket_name = None
self._object_name = None
self._freeform_tags = None
self._defined_tags = None
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this CreateDeploymentBackupDetails.
An object's Display Name.
:return: The display_name of this CreateDeploymentBackupDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this CreateDeploymentBackupDetails.
An object's Display Name.
:param display_name: The display_name of this CreateDeploymentBackupDetails.
:type: str
"""
self._display_name = display_name
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this CreateDeploymentBackupDetails.
The `OCID`__ of the compartment being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this CreateDeploymentBackupDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this CreateDeploymentBackupDetails.
The `OCID`__ of the compartment being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this CreateDeploymentBackupDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def deployment_id(self):
"""
**[Required]** Gets the deployment_id of this CreateDeploymentBackupDetails.
The `OCID`__ of the deployment being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The deployment_id of this CreateDeploymentBackupDetails.
:rtype: str
"""
return self._deployment_id
@deployment_id.setter
def deployment_id(self, deployment_id):
"""
Sets the deployment_id of this CreateDeploymentBackupDetails.
The `OCID`__ of the deployment being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param deployment_id: The deployment_id of this CreateDeploymentBackupDetails.
:type: str
"""
self._deployment_id = deployment_id
@property
def namespace_name(self):
"""
**[Required]** Gets the namespace_name of this CreateDeploymentBackupDetails.
Name of namespace that serves as a container for all of your buckets
:return: The namespace_name of this CreateDeploymentBackupDetails.
:rtype: str
"""
return self._namespace_name
@namespace_name.setter
def namespace_name(self, namespace_name):
"""
Sets the namespace_name of this CreateDeploymentBackupDetails.
Name of namespace that serves as a container for all of your buckets
:param namespace_name: The namespace_name of this CreateDeploymentBackupDetails.
:type: str
"""
self._namespace_name = namespace_name
@property
def bucket_name(self):
"""
**[Required]** Gets the bucket_name of this CreateDeploymentBackupDetails.
Name of the bucket where the object is to be uploaded in the object storage
:return: The bucket_name of this CreateDeploymentBackupDetails.
:rtype: str
"""
return self._bucket_name
@bucket_name.setter
def bucket_name(self, bucket_name):
"""
Sets the bucket_name of this CreateDeploymentBackupDetails.
Name of the bucket where the object is to be uploaded in the object storage
:param bucket_name: The bucket_name of this CreateDeploymentBackupDetails.
:type: str
"""
self._bucket_name = bucket_name
@property
def object_name(self):
"""
**[Required]** Gets the object_name of this CreateDeploymentBackupDetails.
Name of the object to be uploaded to object storage
:return: The object_name of this CreateDeploymentBackupDetails.
:rtype: str
"""
return self._object_name
@object_name.setter
def object_name(self, object_name):
"""
Sets the object_name of this CreateDeploymentBackupDetails.
Name of the object to be uploaded to object storage
:param object_name: The object_name of this CreateDeploymentBackupDetails.
:type: str
"""
self._object_name = object_name
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this CreateDeploymentBackupDetails.
A simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this CreateDeploymentBackupDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this CreateDeploymentBackupDetails.
A simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this CreateDeploymentBackupDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this CreateDeploymentBackupDetails.
Tags defined for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this CreateDeploymentBackupDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this CreateDeploymentBackupDetails.
Tags defined for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this CreateDeploymentBackupDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
python/ffi_navigator/pattern.py | csullivan/ffi-navigator | 148 | 12674747 | """FFI related code patterns and utility function to detect them."""
from typing import Optional
import re
import attr
import numpy as np
from bisect import bisect
from .lsp import Range, Position, Location
class Pattern:
"""Base class of all interesting code patterns."""
pass
@attr.s
class Def(Pattern):
"""Definition of an FFI resource.
Parameters
----------
key : str
Global unique id to locate the FFI resource.
path : str
The file path.
range: Range
The location range of the defininition.
"""
key : str = attr.ib()
path : str = attr.ib()
range: Range = attr.ib()
@attr.s
class Ref(Pattern):
"""Reference of an FFI resource.
Parameters
----------
key : str
Global unique id to locate the FFI resource.
path : str
The file path.
range: Range
The location range of the defininition.
"""
key : str = attr.ib()
path : str = attr.ib()
range : Range = attr.ib()
@attr.s
class Export(Pattern):
"""Export a collection of keys with the same prefix.
Parameters
----------
key_prefix : str
The prefix of keys to be exported.
path : str
The file path to be exported.
fkey2var : Function
The function that takes a key and maps to a local var
in the path which corresponds to the FFI resource.
fvar2key : Function
The function that takes local var name and maps
to the corresponding key, can return None.
"""
key_prefix : str = attr.ib()
path : str = attr.ib()
fkey2var = attr.ib()
fvar2key = attr.ib()
@attr.s
class Symbol(Pattern):
"""A symbol in python expression, can contain dot.
"""
value : str = attr.ib()
def re_matcher(rexpr, fcreate, use_search=False):
"""
Parameters
----------
rexpr : str
A regexp pattern to match.
fcreate : Function (match, path, range) -> result.
use_search: bool
Whether use search
"""
rexpr = re.compile(rexpr)
def _matcher(path, source, begin_line=0, end_line=None):
source = source.split("\n") if isinstance(source, str) else source
results = []
end_line = min(end_line, len(source)) if end_line else len(source)
for line in range(begin_line, end_line):
content = source[line]
match = rexpr.search(content) if use_search else rexpr.match(content)
if match:
start, end = match.span()
start_pos = Position(line, start)
end_pos = Position(line, end)
item = fcreate(match, path,
Range(start_pos, end_pos))
if item:
results.append(item)
return results
return _matcher
def re_multi_line_matcher(rexpr, fcreate):
""" Matches a pattern spanning multiple lines
Parameters
----------
rexpr : str
A regexp pattern to match.
fcreate : Function (match, path, range) -> result.
"""
rexpr = re.compile(rexpr)
def _matcher(path, lines):
source = "".join(lines)
matches = list(rexpr.finditer(source))
if matches == []:
return []
line_counts = map(lambda line: len(line), lines)
cumsum = np.cumsum(list(line_counts))
# find line num, start and end pos for each match
next_begin = 0
result = []
for match in matches:
line_num_start = bisect(cumsum[next_begin:], match.start()) + next_begin
next_begin = line_num_start
line_num_end = line_num_start + match.group().count("\n")
pos_start = match.start() - int(cumsum[line_num_start-1])
pos_end = match.end() - int(cumsum[line_num_end-1])
assert(pos_start >= 0 and pos_end >= 0)
rg = Range(Position(line_num_start, pos_start), Position(line_num_end, pos_end))
result.append(fcreate(match, path, rg))
return result
return _matcher
def re_match_pybind_class():
return re_multi_line_matcher(r"py::class_\<[A-Za-z0-9|_|::|<|>]+(\,\s*[A-Za-z0-9|_|::|<|>]+)*\>"
r"\s*\(\s*m,\s*\"(?P<key>[A-Za-z0-9|_]+)\""
r"(,\s*[A-Za-z0-9|_|::|<|>|(|)]+)*\)",
lambda match, path, rg: \
Def(key=match.group("key"), path=path, range=rg))
def re_match_pybind_method():
return re_multi_line_matcher(r"\.def\(\s*\"(?P<key>[a-z0-9|_]+)\"",
lambda match, path, rg: \
Def(key=match.group("key"), path=path, range=rg))
def macro_matcher(macro_names, fcreate=None):
"""Match pattern <macro_name>("<skey>"
Parameters
----------
macro_names : list
List of macro names to match.
fcreate : Function (skey, path, range, macro_name) -> result.
"""
rexpr = r"(?P<macro_name>("
rexpr += "|".join(re.escape(x) for x in macro_names)
rexpr += r"))\(\"(?P<skey>[^\"]+)\"\)?"
def _fcreate(match, path, rg):
return fcreate(match.group("skey"), path,
rg,
match.group("macro_name"))
return re_matcher(rexpr, _fcreate)
def func_get_searcher(func_names, fcreate=None):
"""Search pattern <func_name>("<skey>")
Parameters
----------
func_names : list
List of macro names to match.
fcreate : Function (skey, path, range, func_name) -> result.
"""
rexpr = r"(?P<func_name>("
rexpr += "|".join(re.escape(x) for x in func_names)
rexpr += r"))\(\"(?P<skey>[^\"]+)\"\)"
rexpr = re.compile(rexpr)
def _matcher(path, source, begin_line=0, end_line=None):
source = source.split("\n") if isinstance(source, str) else source
results = []
end_line = min(end_line, len(source)) if end_line else len(source)
for line in range(begin_line, end_line):
content = source[line]
str_pos = 0
while True:
match = rexpr.search(content, str_pos)
if not match:
break
start, end = match.span("skey")
start_pos = Position(line, start)
end_pos = Position(line, end)
item = fcreate(match.group("skey"), path,
Range(start_pos, end_pos),
match.group("func_name"))
if item:
results.append(item)
str_pos = match.end()
return results
return _matcher
def decorator_matcher(func_names, keyword, fcreate=None):
"""Search pattern @[namespace]<func_name>("<skey>")
Parameters
----------
func_names : list
List of macro names to match.
fcreate : Function (skey, path, range, func_name) -> result.
"""
decorator = r"@?(?P<decorator>([a-zA-Z_]?[a-zA-Z_0-9.]*.)?("
decorator += "|".join(re.escape(x) for x in func_names)
decorator += "))((\(\"(?P<skey>[^\"]+)\")|(\s*\Z))"
nextline = keyword + r"\s+(?P<skey>[a-zA-Z_0-9]+)\("
decorator = re.compile(decorator)
nextline = re.compile(nextline)
def _matcher(path, source, begin_line=0, end_line=None):
source = source.split("\n") if isinstance(source, str) else source
results = []
end_line = min(end_line, len(source)) if end_line else len(source)
for line in range(begin_line, end_line):
content = source[line]
match = decorator.match(content)
if match:
skey = match.group("skey")
if skey:
start, end = match.span("skey")
lineno = line
if not skey and line + 1 < len(source):
match_name = nextline.match(source[line + 1])
if match_name:
skey = match_name.group("skey")
start, end = match_name.span("skey")
lineno = line + 1
if skey:
start_pos = Position(lineno, start)
end_pos = Position(lineno, end)
item = fcreate(skey, path,
Range(start_pos, end_pos),
match.group("decorator"))
if item:
results.append(item)
return results
return _matcher
@attr.s
class PyImport:
"""Python import syntax."""
from_mod : Optional[str] = attr.ib()
import_name : str = attr.ib()
alias : Optional[str] = attr.ib()
RE_PY_IMPORT_PREFIX = re.compile(r"\s*from\s+(?P<mod>[^\s]+)\s+import")
RE_PY_IMPORT_ITEM = re.compile(r"\s+(?P<name>[^\s]+)(\s+as\s+(?P<alias>[^\s]+))?")
def find_py_imports(source):
"""Discover python import information."""
source = source.split("\n") if isinstance(source, str) else source
results = []
for line, content in enumerate(source):
prefix = RE_PY_IMPORT_PREFIX.match(content)
if prefix:
from_mod = prefix.group("mod")
for item in content[prefix.end():].split(","):
match = RE_PY_IMPORT_ITEM.match(item)
if match:
results.append(PyImport(from_mod=from_mod,
import_name=match.group("name"),
alias=match.group("alias")))
return results
RE_PY_DELIM = r"(\s|[.,\(\)\[\]{}:=\+\-\*]|\Z)+"
def search_symbol(source, symbols):
"""Search symbols within a source, return matched positions."""
source = source.split("\n") if isinstance(source, str) else source
rexpr = RE_PY_DELIM + "(?P<name>("
rexpr += "|".join([re.escape(sym) for sym in symbols]) + "))"
rexpr += RE_PY_DELIM
rexpr = re.compile(rexpr)
results = []
for line, content in enumerate(source):
match = rexpr.search(content)
if match:
start, end = match.span("name")
start_pos = Position(line, start)
end_pos = Position(line, end)
results.append(Range(start_pos, end_pos))
return results
RE_PY_NAMESPACE_PREFIX = re.compile(r"[a-zA-Z_][a-zA-Z0-9_.]+\Z")
RE_PY_VAR_NAME = re.compile(r"[a-zA-Z0-9_.]+")
def extract_symbol(source, pos: Position):
"""Find the complete expression, include namespace prefix"""
source = source.split("\n") if isinstance(source, str) else source
content = source[pos.line]
mprefix = RE_PY_NAMESPACE_PREFIX.search(content, 0, pos.character)
start = mprefix.start() if mprefix else pos.character
mvar = RE_PY_VAR_NAME.match(content, pos.character)
end = mvar.end() if mvar else pos.character
value = content[start:end]
if end < len(content) and content[end] == "\"":
return None
return Symbol(value)
|
ceilometer/keystone_client.py | ionutbiru/ceilometer | 239 | 12674754 | <gh_stars>100-1000
#
# Copyright 2015 eNovance <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from keystoneauth1 import loading as ka_loading
from keystoneclient.v3 import client as ks_client_v3
from oslo_config import cfg
DEFAULT_GROUP = "service_credentials"
# List of group that can set auth_section to use a different
# credentials section
OVERRIDABLE_GROUPS = ['gnocchi', 'zaqar', 'monasca']
def get_session(conf, requests_session=None, group=None, timeout=None):
"""Get a ceilometer service credentials auth session."""
group = group or DEFAULT_GROUP
auth_plugin = ka_loading.load_auth_from_conf_options(conf, group)
kwargs = {'auth': auth_plugin, 'session': requests_session}
if timeout is not None:
kwargs['timeout'] = timeout
session = ka_loading.load_session_from_conf_options(conf, group, **kwargs)
return session
def get_client(conf, trust_id=None, requests_session=None,
group=DEFAULT_GROUP):
"""Return a client for keystone v3 endpoint, optionally using a trust."""
session = get_session(conf, requests_session=requests_session, group=group)
return ks_client_v3.Client(session=session, trust_id=trust_id,
region_name=conf[group].region_name)
def get_service_catalog(client):
return client.session.auth.get_access(client.session).service_catalog
def get_auth_token(client):
return client.session.auth.get_access(client.session).auth_token
CLI_OPTS = [
cfg.StrOpt('region-name',
deprecated_group="DEFAULT",
deprecated_name="os-region-name",
default=os.environ.get('OS_REGION_NAME'),
help='Region name to use for OpenStack service endpoints.'),
cfg.StrOpt('interface',
default=os.environ.get(
'OS_INTERFACE', os.environ.get('OS_ENDPOINT_TYPE',
'public')),
deprecated_name="os-endpoint-type",
choices=('public', 'internal', 'admin', 'auth', 'publicURL',
'internalURL', 'adminURL'),
help='Type of endpoint in Identity service catalog to use for '
'communication with OpenStack services.'),
]
def register_keystoneauth_opts(conf):
_register_keystoneauth_group(conf, DEFAULT_GROUP)
for group in OVERRIDABLE_GROUPS:
_register_keystoneauth_group(conf, group)
conf.set_default('auth_section', DEFAULT_GROUP, group=group)
def _register_keystoneauth_group(conf, group):
ka_loading.register_auth_conf_options(conf, group)
ka_loading.register_session_conf_options(
conf, group,
deprecated_opts={'cacert': [
cfg.DeprecatedOpt('os-cacert', group=group),
cfg.DeprecatedOpt('os-cacert', group="DEFAULT")]
})
conf.register_opts(CLI_OPTS, group=group)
def post_register_keystoneauth_opts(conf):
for group in OVERRIDABLE_GROUPS:
if conf[group].auth_section != DEFAULT_GROUP:
# NOTE(sileht): We register this again after the auth_section have
# been read from the configuration file
_register_keystoneauth_group(conf, conf[group].auth_section)
|
04-distillation/model.py | PeiqinSun/tf-tutorials | 184 | 12674784 | <reponame>PeiqinSun/tf-tutorials
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
import tensorflow.contrib as tf_contrib
from common import config
from model_utils import conv2d_quantize
class Model():
def __init__(self):
# set the initializer of conv_weight and conv_bias
self.weight_init = tf_contrib.layers.variance_scaling_initializer(factor=1.0,
mode='FAN_IN', uniform=False)
self.bias_init = tf.zeros_initializer()
self.reg = tf_contrib.layers.l2_regularizer(config.weight_decay)
def _conv_layer(self, name, inp, kernel_shape, stride, padding='SAME', f_num_bits=None,w_num_bits = None, if_num_bits = None, is_training=False):
with tf.variable_scope(name) as scope:
conv_filter = tf.get_variable(name=name+'_filter', shape=kernel_shape, initializer=self.weight_init, regularizer=self.reg)
conv_bias = tf.get_variable(name=name+'_bias', shape=kernel_shape[-1], initializer=self.bias_init)
x = conv2d_quantize(name + '_quantize', inp, stride, padding, data_format='NHWC',kernel_shape = kernel_shape, conv_filter = conv_filter, conv_bias = conv_bias, f_num_bits = f_num_bits, w_num_bits = w_num_bits, if_num_bits = if_num_bits, is_training = is_training )
return x
def _pool_layer(self, name, inp, ksize, stride, padding='SAME', mode='MAX'):
assert mode in ['MAX', 'AVG'], 'the mode of pool must be MAX or AVG'
if mode == 'MAX':
x = tf.nn.max_pool(inp, ksize=[1, ksize, ksize, 1], strides=[1, stride, stride, 1],
padding=padding, name=name, data_format='NHWC')
elif mode == 'AVG':
x = tf.nn.avg_pool(inp, ksize=[1, ksize, ksize, 1], strides=[1, stride, stride, 1],
padding=padding, name=name, data_format='NHWC')
return x
def _fc_layer(self, name, inp, units, dropout=0.5):
with tf.variable_scope(name) as scope:
shape = inp.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
x = tf.reshape(inp, [-1, dim]) # flatten
if dropout > 0:
x = tf.nn.dropout(x, keep_prob=dropout, name='dropout')
x = tf.layers.dense(x, units, kernel_initializer=self.weight_init,
bias_initializer=self.bias_init, kernel_regularizer=self.reg)
return x
def build(self):
w_bits = 2
f_bits = 1
data = tf.placeholder(tf.float32, shape=(None,)+config.image_shape+(config.nr_channel,),
name='data')
label = tf.placeholder(tf.int32, shape=(None,), name='label')
# convert the format of label to one-hot
label_onehot = tf.one_hot(label, config.nr_class, dtype=tf.int32)
# a setting for bn
is_training = tf.placeholder(tf.bool, name='is_training')
# conv1
x = self._conv_layer(name='quantized_conv1', inp=data,
kernel_shape=[3, 3, config.nr_channel, 16], stride=1, f_num_bits = f_bits, w_num_bits = w_bits, is_training=is_training) # Nx32x32x32
x = self._pool_layer(name='quantized_pool1', inp=x, ksize=2, stride=2, mode='MAX') # Nx16x16x16
# conv2
x = self._conv_layer(name='quantized_conv21', inp=x, kernel_shape=[3, 3, 16, 32],
stride=1, f_num_bits = f_bits, w_num_bits = w_bits,is_training=is_training)
x = self._conv_layer(name='quantized_conv22', inp=x, kernel_shape=[3, 3, 32, 32],
stride=1, f_num_bits = f_bits, w_num_bits = w_bits, is_training=is_training)
x = self._pool_layer(name='quantized_pool2', inp=x, ksize=2, stride=2, mode='MAX') # Nx8x8x32
# conv3
x = self._conv_layer(name='quantized_conv31', inp=x, kernel_shape=[3, 3, 32, 64],
stride=1, f_num_bits = f_bits, w_num_bits = w_bits,is_training=is_training)
x = self._conv_layer(name='quantized_conv32', inp=x, kernel_shape=[3, 3, 64, 64],
stride=1, f_num_bits = f_bits, w_num_bits = w_bits,is_training=is_training)
x = self._pool_layer(name='quantized_pool3', inp=x, ksize=2, stride=2, mode='MAX') # Nx4x4x64
# conv4
x = self._conv_layer(name='quantized_conv41', inp=x, kernel_shape=[3, 3, 64, 128],
stride=1, f_num_bits = f_bits, w_num_bits = w_bits,is_training=is_training)
x = self._conv_layer(name='quantized_conv42', inp=x, kernel_shape=[3, 3, 128, 128],
stride=1, f_num_bits = f_bits, w_num_bits = w_bits,is_training=is_training)
x = self._pool_layer(name='quantized_pool4', inp=x, ksize=4, stride=4, mode='AVG') # Nx1x1x128
# fc1
logits = self._fc_layer(name='quantized_fc1', inp=x, units=config.nr_class, dropout=0)
placeholders = {
'data': data,
'label': label,
'is_training': is_training,
}
return placeholders, label_onehot, logits
|
zipfly/__init__.py | beckedorf/zipfly | 522 | 12674807 | <reponame>beckedorf/zipfly<filename>zipfly/__init__.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, with_statement
__version__ = '6.0.4'
__author__ = '<NAME> - Buzon, Inc.'
__license__ = 'MIT'
# v
from .zipfly import ZipFly
from .zipfly import LargePredictionSize
from .response import FileResponse
from .api import Buffer
|
explainerdashboard/dashboard_components/composites.py | Simon-Free/explainerdashboard | 1,178 | 12674824 | __all__ = [
'ImportancesComposite',
'ClassifierModelStatsComposite',
'RegressionModelStatsComposite',
'IndividualPredictionsComposite',
'ShapDependenceComposite',
'ShapInteractionsComposite',
'DecisionTreesComposite',
'WhatIfComposite',
'SimplifiedClassifierComposite',
'SimplifiedRegressionComposite',
]
import dash_bootstrap_components as dbc
import dash_html_components as html
from ..explainers import RandomForestExplainer, XGBExplainer
from ..dashboard_methods import *
from .classifier_components import *
from .regression_components import *
from .overview_components import *
from .connectors import *
from .shap_components import *
from .decisiontree_components import *
from .. import to_html
class ImportancesComposite(ExplainerComponent):
def __init__(self, explainer, title="Feature Importances", name=None,
hide_title=False, hide_importances=False, hide_descriptions=False,
hide_selector=True, **kwargs):
"""Overview tab of feature importances
Can show both permutation importances and mean absolute shap values.
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Feature Importances".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_title (bool, optional): hide the title
hide_importances (bool, optional): hide the ImportancesComponent
hide_descriptions (bool, optional): hide the FeatureDescriptionsComponent
hide_selector (bool, optional): hide the post label selector.
Defaults to True.
"""
super().__init__(explainer, title, name)
self.importances = ImportancesComponent(
explainer, name=self.name+"0", hide_selector=hide_selector, **kwargs)
self.feature_descriptions = FeatureDescriptionsComponent(explainer, **kwargs)
if not self.explainer.descriptions:
self.hide_descriptions=True
def layout(self):
return html.Div([
dbc.Row([
make_hideable(
dbc.Col([
html.H2(self.title)]), hide=self.hide_title),
]),
dbc.Row([
make_hideable(
dbc.Col([
self.importances.layout(),
]), hide=self.hide_importances),
], style=dict(margin=25)),
dbc.Row([
make_hideable(
dbc.Col([
self.feature_descriptions.layout(),
]), hide=self.hide_descriptions),
], style=dict(margin=25))
])
def to_html(self, state_dict=None, add_header=True):
html = to_html.hide(to_html.title(self.title), hide=self.hide_title)
html += to_html.card_rows(
[to_html.hide(self.importances.to_html(state_dict, add_header=False), self.hide_importances)],
[to_html.hide(self.feature_descriptions.to_html(state_dict, add_header=False), self.hide_descriptions)],
)
if add_header:
return to_html.add_header(html)
return html
class ClassifierModelStatsComposite(ExplainerComponent):
def __init__(self, explainer, title="Classification Stats", name=None,
hide_title=True, hide_selector=True,
hide_globalcutoff=False,
hide_modelsummary=False, hide_confusionmatrix=False,
hide_precision=False, hide_classification=False,
hide_rocauc=False, hide_prauc=False,
hide_liftcurve=False, hide_cumprecision=False,
pos_label=None,
bin_size=0.1, quantiles=10, cutoff=0.5, **kwargs):
"""Composite of multiple classifier related components:
- precision graph
- confusion matrix
- lift curve
- classification graph
- roc auc graph
- pr auc graph
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Decision Trees".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_title (bool, optional): hide title. Defaults to True.
hide_selector (bool, optional): hide all pos label selectors. Defaults to True.
hide_globalcutoff (bool, optional): hide CutoffPercentileComponent
hide_modelsummary (bool, optional): hide ClassifierModelSummaryComponent
hide_confusionmatrix (bool, optional): hide ConfusionMatrixComponent
hide_precision (bool, optional): hide PrecisionComponent
hide_classification (bool, optional): hide ClassificationComponent
hide_rocauc (bool, optional): hide RocAucComponent
hide_prauc (bool, optional): hide PrAucComponent
hide_liftcurve (bool, optional): hide LiftCurveComponent
hide_cumprecision (bool, optional): hide CumulativePrecisionComponent
pos_label ({int, str}, optional): initial pos label. Defaults to explainer.pos_label
bin_size (float, optional): bin_size for precision plot. Defaults to 0.1.
quantiles (int, optional): number of quantiles for precision plot. Defaults to 10.
cutoff (float, optional): initial cutoff. Defaults to 0.5.
"""
super().__init__(explainer, title, name)
self.summary = ClassifierModelSummaryComponent(explainer, name=self.name+"0",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.precision = PrecisionComponent(explainer, name=self.name+"1",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.confusionmatrix = ConfusionMatrixComponent(explainer, name=self.name+"2",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.cumulative_precision = CumulativePrecisionComponent(explainer, name=self.name+"3",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.liftcurve = LiftCurveComponent(explainer, name=self.name+"4",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.classification = ClassificationComponent(explainer, name=self.name+"5",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.rocauc = RocAucComponent(explainer, name=self.name+"6",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.prauc = PrAucComponent(explainer, name=self.name+"7",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.cutoffpercentile = CutoffPercentileComponent(explainer, name=self.name+"8",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.cutoffconnector = CutoffConnector(self.cutoffpercentile,
[self.summary, self.precision, self.confusionmatrix, self.liftcurve,
self.cumulative_precision, self.classification, self.rocauc, self.prauc])
def layout(self):
return html.Div([
dbc.Row([
make_hideable(
dbc.Col([
html.H2('Model Performance:')]), hide=self.hide_title),
]),
dbc.Row([
make_hideable(
dbc.Col([
self.cutoffpercentile.layout(),
]), hide=self.hide_globalcutoff),
], style=dict(marginTop=25, marginBottom=25)),
dbc.CardDeck([
make_hideable(self.summary.layout(), hide=self.hide_modelsummary),
make_hideable(self.confusionmatrix.layout(), hide=self.hide_confusionmatrix),
], style=dict(marginBottom=25)),
dbc.CardDeck([
make_hideable(self.precision.layout(), hide=self.hide_precision),
make_hideable(self.classification.layout(), hide=self.hide_classification)
], style=dict(marginBottom=25)),
dbc.CardDeck([
make_hideable(self.rocauc.layout(), hide=self.hide_rocauc),
make_hideable(self.prauc.layout(), hide=self.hide_prauc),
], style=dict(marginBottom=25)),
dbc.CardDeck([
make_hideable(self.liftcurve.layout(), self.hide_liftcurve),
make_hideable(self.cumulative_precision.layout(), self.hide_cumprecision),
], style=dict(marginBottom=25)),
])
def to_html(self, state_dict=None, add_header=True):
html = to_html.hide(to_html.title(self.title), hide=self.hide_title)
html += to_html.card_rows(
[to_html.hide(self.summary.to_html(state_dict, add_header=False), hide=self.hide_modelsummary),
to_html.hide(self.confusionmatrix.to_html(state_dict, add_header=False), hide=self.hide_confusionmatrix)],
[to_html.hide(self.precision.to_html(state_dict, add_header=False), hide=self.hide_precision),
to_html.hide(self.classification.to_html(state_dict, add_header=False), hide=self.hide_classification)],
[to_html.hide(self.rocauc.to_html(state_dict, add_header=False), hide=self.hide_rocauc),
to_html.hide(self.prauc.to_html(state_dict, add_header=False), hide=self.hide_prauc)],
[to_html.hide(self.liftcurve.to_html(state_dict, add_header=False), hide=self.hide_liftcurve),
to_html.hide(self.cumulative_precision.to_html(state_dict, add_header=False), hide=self.hide_cumprecision)]
)
if add_header:
return to_html.add_header(html)
return html
class RegressionModelStatsComposite(ExplainerComponent):
def __init__(self, explainer, title="Regression Stats", name=None,
hide_title=True, hide_modelsummary=False,
hide_predsvsactual=False, hide_residuals=False,
hide_regvscol=False,
logs=False, pred_or_actual="vs_pred", residuals='difference',
col=None, **kwargs):
"""Composite for displaying multiple regression related graphs:
- predictions vs actual plot
- residual plot
- residuals vs feature
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Regression Stats".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_title (bool, optional): hide title. Defaults to True.
hide_modelsummary (bool, optional): hide RegressionModelSummaryComponent
hide_predsvsactual (bool, optional): hide PredictedVsActualComponent
hide_residuals (bool, optional): hide ResidualsComponent
hide_regvscol (bool, optional): hide RegressionVsColComponent
logs (bool, optional): Use log axis. Defaults to False.
pred_or_actual (str, optional): plot residuals vs predictions
or vs y (actual). Defaults to "vs_pred".
residuals (str, {'difference', 'ratio', 'log-ratio'} optional):
How to calcualte residuals. Defaults to 'difference'.
col ({str, int}, optional): Feature to use for residuals plot. Defaults to None.
"""
super().__init__(explainer, title, name)
assert pred_or_actual in ['vs_actual', 'vs_pred'], \
"pred_or_actual should be 'vs_actual' or 'vs_pred'!"
self.modelsummary = RegressionModelSummaryComponent(explainer,
name=self.name+"0",**kwargs)
self.preds_vs_actual = PredictedVsActualComponent(explainer, name=self.name+"0",
logs=logs, **kwargs)
self.residuals = ResidualsComponent(explainer, name=self.name+"1",
pred_or_actual=pred_or_actual, residuals=residuals, **kwargs)
self.reg_vs_col = RegressionVsColComponent(explainer, name=self.name+"2",
logs=logs, **kwargs)
def layout(self):
return html.Div([
dbc.Row([
make_hideable(
dbc.Col([
html.H2('Model Performance:')]), hide=self.hide_title)
]),
dbc.CardDeck([
make_hideable(self.modelsummary.layout(), hide=self.hide_modelsummary),
make_hideable(self.preds_vs_actual.layout(), hide=self.hide_predsvsactual),
], style=dict(margin=25)),
dbc.CardDeck([
make_hideable(self.residuals.layout(), hide=self.hide_residuals),
make_hideable(self.reg_vs_col.layout(), hide=self.hide_regvscol),
], style=dict(margin=25))
])
def to_html(self, state_dict=None, add_header=True):
html = to_html.hide(to_html.title(self.title), hide=self.hide_title)
html += to_html.card_rows(
[to_html.hide(self.modelsummary.to_html(state_dict, add_header=False), hide=self.hide_modelsummary),
to_html.hide(self.preds_vs_actual.to_html(state_dict, add_header=False), hide=self.hide_predsvsactual)],
[to_html.hide(self.residuals.to_html(state_dict, add_header=False), hide=self.hide_residuals),
to_html.hide(self.reg_vs_col.to_html(state_dict, add_header=False), hide=self.hide_regvscol)],
)
if add_header:
return to_html.add_header(html)
return html
class IndividualPredictionsComposite(ExplainerComponent):
def __init__(self, explainer, title="Individual Predictions", name=None,
hide_predindexselector=False, hide_predictionsummary=False,
hide_contributiongraph=False, hide_pdp=False,
hide_contributiontable=False,
hide_title=False, hide_selector=True, index_check=True,
**kwargs):
"""Composite for a number of component that deal with individual predictions:
- random index selector
- prediction summary
- shap contributions graph
- shap contribution table
- pdp graph
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Individual Predictions".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_predindexselector (bool, optional): hide ClassifierRandomIndexComponent
or RegressionRandomIndexComponent
hide_predictionsummary (bool, optional): hide ClassifierPredictionSummaryComponent
or RegressionPredictionSummaryComponent
hide_contributiongraph (bool, optional): hide ShapContributionsGraphComponent
hide_pdp (bool, optional): hide PdpComponent
hide_contributiontable (bool, optional): hide ShapContributionsTableComponent
hide_title (bool, optional): hide title. Defaults to False.
index_check (bool, optional): only pass valid indexes from random index
selector to feature input. Defaults to True.
hide_selector(bool, optional): hide all pos label selectors. Defaults to True.
"""
super().__init__(explainer, title, name)
if self.explainer.is_classifier:
self.index = ClassifierRandomIndexComponent(explainer, name=self.name+"0",
hide_selector=hide_selector, **kwargs)
self.summary = ClassifierPredictionSummaryComponent(explainer, name=self.name+"1",
hide_selector=hide_selector, **kwargs)
elif self.explainer.is_regression:
self.index = RegressionRandomIndexComponent(explainer, name=self.name+"0",
hide_selector=hide_selector, **kwargs)
self.summary = RegressionPredictionSummaryComponent(explainer, name=self.name+"1",
hide_selector=hide_selector, **kwargs)
self.contributions = ShapContributionsGraphComponent(explainer, name=self.name+"2",
hide_selector=hide_selector, **kwargs)
self.pdp = PdpComponent(explainer, name=self.name+"3",
hide_selector=hide_selector, **kwargs)
self.contributions_list = ShapContributionsTableComponent(explainer, name=self.name+"4",
hide_selector=hide_selector, **kwargs)
self.index_connector = IndexConnector(self.index,
[self.summary, self.contributions, self.pdp, self.contributions_list],
explainer=explainer if index_check else None)
def layout(self):
return dbc.Container([
dbc.CardDeck([
make_hideable(self.index.layout(), hide=self.hide_predindexselector),
make_hideable(self.summary.layout(), hide=self.hide_predictionsummary),
], style=dict(marginBottom=25, marginTop=25)),
dbc.CardDeck([
make_hideable(self.contributions.layout(), hide=self.hide_contributiongraph),
make_hideable(self.pdp.layout(), hide=self.hide_pdp),
], style=dict(marginBottom=25, marginTop=25)),
dbc.Row([
dbc.Col([
make_hideable(self.contributions_list.layout(), hide=self.hide_contributiontable),
], md=6),
dbc.Col([
html.Div([]),
], md=6),
])
], fluid=True)
def to_html(self, state_dict=None, add_header=True):
html = to_html.title(self.title)
html += to_html.card_rows(
[to_html.hide(self.index.to_html(state_dict, add_header=False), self.hide_predindexselector),
to_html.hide(self.summary.to_html(state_dict, add_header=False), self.hide_predictionsummary)],
[to_html.hide(self.contributions.to_html(state_dict, add_header=False), self.hide_contributiongraph),
to_html.hide(self.pdp.to_html(state_dict, add_header=False), self.hide_pdp)],
[to_html.hide(self.contributions_list.to_html(state_dict, add_header=False), self.hide_contributiontable)]
)
if add_header:
return to_html.add_header(html)
return html
class WhatIfComposite(ExplainerComponent):
def __init__(self, explainer, title="What if...", name=None,
hide_whatifindexselector=False, hide_inputeditor=False,
hide_whatifprediction=False, hide_whatifcontributiongraph=False,
hide_whatifpdp=False, hide_whatifcontributiontable=False,
hide_title=True, hide_selector=True, index_check=True,
n_input_cols=4, sort='importance', **kwargs):
"""Composite for the whatif component:
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Individual Predictions".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_title (bool, optional): hide title. Defaults to True.
hide_selector(bool, optional): hide all pos label selectors. Defaults to True.
hide_whatifindexselector (bool, optional): hide ClassifierRandomIndexComponent
or RegressionRandomIndexComponent
hide_inputeditor (bool, optional): hide FeatureInputComponent
hide_whatifprediction (bool, optional): hide PredictionSummaryComponent
hide_whatifcontributiongraph (bool, optional): hide ShapContributionsGraphComponent
hide_whatifcontributiontable (bool, optional): hide ShapContributionsTableComponent
hide_whatifpdp (bool, optional): hide PdpComponent
index_check (bool, optional): only pass valid indexes from random index
selector to feature input. Defaults to True.
n_input_cols (int, optional): number of columns to divide the feature inputs into.
Defaults to 4.
sort ({'abs', 'high-to-low', 'low-to-high', 'importance'}, optional): sorting of shap values.
Defaults to 'importance'.
"""
super().__init__(explainer, title, name)
if 'hide_whatifcontribution' in kwargs:
print("Warning: hide_whatifcontribution will be deprecated, use hide_whatifcontributiongraph instead!")
self.hide_whatifcontributiongraph = kwargs['hide_whatifcontribution']
self.input = FeatureInputComponent(explainer, name=self.name+"0",
hide_selector=hide_selector, n_input_cols=self.n_input_cols,
**update_params(kwargs, hide_index=True))
if self.explainer.is_classifier:
self.index = ClassifierRandomIndexComponent(explainer, name=self.name+"1",
hide_selector=hide_selector, **kwargs)
self.prediction = ClassifierPredictionSummaryComponent(explainer, name=self.name+"2",
feature_input_component=self.input,
hide_star_explanation=True,
hide_selector=hide_selector, **kwargs)
elif self.explainer.is_regression:
self.index = RegressionRandomIndexComponent(explainer, name=self.name+"1", **kwargs)
self.prediction = RegressionPredictionSummaryComponent(explainer, name=self.name+"2",
feature_input_component=self.input, **kwargs)
self.contribgraph = ShapContributionsGraphComponent(explainer, name=self.name+"3",
feature_input_component=self.input,
hide_selector=hide_selector, sort=sort, **kwargs)
self.contribtable = ShapContributionsTableComponent(explainer, name=self.name+"4",
feature_input_component=self.input,
hide_selector=hide_selector, sort=sort, **kwargs)
self.pdp = PdpComponent(explainer, name=self.name+"5",
feature_input_component=self.input,
hide_selector=hide_selector, **kwargs)
self.index_connector = IndexConnector(self.index, self.input,
explainer=explainer if index_check else None)
def layout(self):
return dbc.Container([
dbc.Row([
make_hideable(
dbc.Col([
html.H1(self.title)
]), hide=self.hide_title),
]),
dbc.Row([
make_hideable(
dbc.Col([
self.index.layout(),
], md=7), hide=self.hide_whatifindexselector),
make_hideable(
dbc.Col([
self.prediction.layout(),
], md=5), hide=self.hide_whatifprediction),
], style=dict(marginBottom=15, marginTop=15)),
dbc.CardDeck([
make_hideable(self.input.layout(), hide=self.hide_inputeditor),
], style=dict(marginBottom=15, marginTop=15)),
dbc.CardDeck([
make_hideable(self.contribgraph.layout(), hide=self.hide_whatifcontributiongraph),
make_hideable(self.pdp.layout(), hide=self.hide_whatifpdp),
], style=dict(marginBottom=15, marginTop=15)),
dbc.Row([
make_hideable(
dbc.Col([
self.contribtable.layout()
], md=6), hide=self.hide_whatifcontributiontable),
dbc.Col([], md=6),
])
], fluid=True)
def to_html(self, state_dict=None, add_header=True):
html = to_html.title(self.title)
html += to_html.card_rows(
[to_html.hide(self.index.to_html(state_dict, add_header=False), self.hide_whatifindexselector),
to_html.hide(self.prediction.to_html(state_dict, add_header=False), self.hide_whatifprediction)],
[to_html.hide(self.input.to_html(state_dict, add_header=False), self.hide_inputeditor)],
[to_html.hide(self.contribgraph.to_html(state_dict, add_header=False), self.hide_whatifcontributiongraph),
to_html.hide(self.pdp.to_html(state_dict, add_header=False), self.hide_whatifpdp)],
[to_html.hide(self.contribtable.to_html(state_dict, add_header=False), self.hide_whatifcontributiontable)]
)
if add_header:
return to_html.add_header(html)
return html
class ShapDependenceComposite(ExplainerComponent):
def __init__(self, explainer, title='Feature Dependence', name=None,
hide_selector=True,
hide_shapsummary=False, hide_shapdependence=False,
depth=None, **kwargs):
"""Composite of ShapSummary and ShapDependence component
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Feature Dependence".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_selector (bool, optional): hide all pos label selectors. Defaults to True.
hide_shapsummary (bool, optional): hide ShapSummaryComponent
hide_shapdependence (bool, optional): ShapDependenceComponent
depth (int, optional): Number of features to display. Defaults to None.
"""
super().__init__(explainer, title, name)
self.shap_summary = ShapSummaryComponent(
self.explainer, name=self.name+"0",
**update_params(kwargs, hide_selector=hide_selector, depth=depth))
self.shap_dependence = ShapDependenceComponent(
self.explainer, name=self.name+"1",
hide_selector=hide_selector, **kwargs)
self.connector = ShapSummaryDependenceConnector(
self.shap_summary, self.shap_dependence)
def layout(self):
return dbc.Container([
dbc.CardDeck([
make_hideable(self.shap_summary.layout(), hide=self.hide_shapsummary),
make_hideable(self.shap_dependence.layout(), hide=self.hide_shapdependence),
], style=dict(marginTop=25)),
], fluid=True)
def to_html(self, state_dict=None, add_header=True):
html = to_html.title(self.title)
html += to_html.card_rows(
[to_html.hide(self.shap_summary.to_html(state_dict, add_header=False), self.hide_shapsummary),
to_html.hide(self.shap_dependence.to_html(state_dict, add_header=False), self.hide_shapdependence)],
)
if add_header:
return to_html.add_header(html)
return html
class ShapInteractionsComposite(ExplainerComponent):
def __init__(self, explainer, title='Feature Interactions', name=None,
hide_selector=True,
hide_interactionsummary=False, hide_interactiondependence=False,
depth=None, **kwargs):
"""Composite of InteractionSummaryComponent and InteractionDependenceComponent
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Feature Interactions".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_selector (bool, optional): hide all pos label selectors. Defaults to True.
hide_interactionsummary (bool, optional): hide InteractionSummaryComponent
hide_interactiondependence (bool, optional): hide InteractionDependenceComponent
depth (int, optional): Initial number of features to display. Defaults to None.
"""
super().__init__(explainer, title, name)
self.interaction_summary = InteractionSummaryComponent(explainer, name=self.name+"0",
hide_selector=hide_selector, depth=depth, **kwargs)
self.interaction_dependence = InteractionDependenceComponent(explainer, name=self.name+"1",
hide_selector=hide_selector, **kwargs)
self.connector = InteractionSummaryDependenceConnector(
self.interaction_summary, self.interaction_dependence)
def layout(self):
return dbc.Container([
dbc.CardDeck([
make_hideable(self.interaction_summary.layout(), hide=self.hide_interactionsummary),
make_hideable(self.interaction_dependence.layout(), hide=self.hide_interactiondependence),
], style=dict(marginTop=25))
], fluid=True)
def to_html(self, state_dict=None, add_header=True):
html = to_html.title(self.title)
html += to_html.card_rows(
[to_html.hide(self.interaction_summary.to_html(state_dict, add_header=False), self.hide_interactionsummary),
to_html.hide(self.interaction_dependence.to_html(state_dict, add_header=False), self.hide_interactiondependence)],
)
if add_header:
return to_html.add_header(html)
return html
class DecisionTreesComposite(ExplainerComponent):
def __init__(self, explainer, title="Decision Trees", name=None,
hide_treeindexselector=False, hide_treesgraph=False,
hide_treepathtable=False, hide_treepathgraph=False,
hide_selector=True, index_check=True, **kwargs):
"""Composite of decision tree related components:
- index selector
- individual decision trees barchart
- decision path table
- deciion path graph
Args:
explainer (Explainer): explainer object constructed with either
RandomForestClassifierExplainer() or RandomForestRegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Decision Trees".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_treeindexselector (bool, optional): hide ClassifierRandomIndexComponent
or RegressionRandomIndexComponent
hide_treesgraph (bool, optional): hide DecisionTreesComponent
hide_treepathtable (bool, optional): hide DecisionPathTableComponent
hide_treepathgraph (bool, optional): DecisionPathGraphComponent
hide_selector (bool, optional): hide all pos label selectors. Defaults to True.
index_check (bool, optional): only pass valid indexes from random index
selector to feature input. Defaults to True.
"""
super().__init__(explainer, title, name)
self.trees = DecisionTreesComponent(explainer, name=self.name+"0",
hide_selector=hide_selector, **kwargs)
self.decisionpath_table = DecisionPathTableComponent(explainer, name=self.name+"1",
hide_selector=hide_selector, **kwargs)
if explainer.is_classifier:
self.index = ClassifierRandomIndexComponent(explainer, name=self.name+"2",
hide_selector=hide_selector, **kwargs)
elif explainer.is_regression:
self.index = RegressionRandomIndexComponent(explainer, name=self.name+"2",
**kwargs)
self.decisionpath_graph = DecisionPathGraphComponent(explainer, name=self.name+"3",
hide_selector=hide_selector, **kwargs)
self.index_connector = IndexConnector(self.index,
[self.trees, self.decisionpath_table, self.decisionpath_graph],
explainer=explainer if index_check else None)
self.highlight_connector = HighlightConnector(self.trees,
[self.decisionpath_table, self.decisionpath_graph])
def layout(self):
return html.Div([
dbc.Row([
make_hideable(
dbc.Col([
self.index.layout(),
]), hide=self.hide_treeindexselector),
], style=dict(margin=25)),
dbc.Row([
make_hideable(
dbc.Col([
self.trees.layout(),
]), hide=self.hide_treesgraph),
], style=dict(margin=25)),
dbc.Row([
make_hideable(
dbc.Col([
self.decisionpath_table.layout(),
]), hide=self.hide_treepathtable),
], style=dict(margin=25)),
dbc.Row([
make_hideable(
dbc.Col([
self.decisionpath_graph.layout()
]), hide=self.hide_treepathgraph),
], style=dict(margin=25)),
])
def to_html(self, state_dict=None, add_header=True):
html = to_html.title(self.title)
html += to_html.card_rows(
[to_html.hide(self.index.to_html(state_dict, add_header=False), self.hide_treeindexselector)],
[to_html.hide(self.trees.to_html(state_dict, add_header=False), self.hide_treesgraph)],
[to_html.hide(self.decisionpath_table.to_html(state_dict, add_header=False), self.hide_treepathtable)],
)
if add_header:
return to_html.add_header(html)
return html
class SimplifiedClassifierComposite(ExplainerComponent):
def __init__(self, explainer, title="Simple Classifier Explainer", name=None,
hide_title=False, classifier_custom_component='roc_auc',
hide_confusionmatrix=False, hide_classifier_custom_component=False,
hide_shapsummary=False, hide_shapdependence=False,
hide_predindexselector=False, hide_predictionsummary=False,
hide_contributiongraph=False, **kwargs):
"""Composite of multiple classifier related components, on a single tab:
- confusion matrix
- one other model quality indicator: choose from pr auc graph, precision graph,
lift curve, classification graph, or roc auc graph
- shap importance
- shap dependence
- index selector
- index prediction summary
- index shap contribution graph
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Simple Classification Stats".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_title (bool, optional): hide the title. Defaults to False.
classifier_custom_component (str, optional): custom classifier quality indicator
supported by the ClassifierExplainer object. Valid values are:
'roc_auc', 'metrics', pr_auc', 'precision_graph', 'lift_curve',
'classification'. Defaults to 'roc_auc'.
hide_confusionmatrix (bool, optional): hide ConfusionMatrixComponent
hide_classifier_custom_component (bool, optional): hide the chosen classifier_custom_component
hide_shapsummary (bool, optional): hide ShapSummaryComponent
hide_shapdependence (bool, optional): hide ShapDependenceComponent
hide_predindexselector (bool, optional): hide ClassifierRandomIndexComponent
or RegressionRandomIndexComponent
hide_predictionsummary (bool, optional): hide ClassifierPredictionSummaryComponent
or RegressionPredictionSummaryComponent
hide_contributiongraph (bool, optional): hide ShapContributionsGraphComponent
"""
super().__init__(explainer, title=title, name=name)
self.confusionmatrix = ConfusionMatrixComponent(explainer,
**update_params(kwargs, hide_percentage=True, hide_selector=True, hide_normalize=True))
# select custom classifier report metric
if classifier_custom_component == 'metrics':
self.classifier_custom_component = ClassifierModelSummaryComponent(explainer,
**update_params(kwargs, hide_selector=True))
elif classifier_custom_component == 'pr_auc':
self.classifier_custom_component = PrAucComponent(explainer,
**update_params(kwargs, hide_selector=True))
elif classifier_custom_component == 'precision_graph':
self.classifier_custom_component = PrecisionComponent(explainer,
**update_params(kwargs, hide_selector=True))
elif classifier_custom_component == 'lift_curve':
self.classifier_custom_component = LiftCurveComponent(explainer,
**update_params(kwargs, hide_selector=True))
elif classifier_custom_component == 'classifiction':
self.classifier_custom_component = ClassificationComponent(explainer,
**update_params(kwargs, hide_selector=True))
elif classifier_custom_component == 'roc_auc':
self.classifier_custom_component = RocAucComponent(explainer,
**update_params(kwargs, hide_selector=True))
else:
raise ValueError(
"ERROR: SimplifiedClassifierDashboard parameter classifier_custom_component "
"should be in {'metrics', 'roc_auc', pr_auc', 'precision_graph', 'lift_curve', 'class_graph'} "
f"but you passed {classifier_custom_component}!")
# SHAP summary & dependence
self.shap_summary = ShapSummaryComponent(
explainer, **update_params(kwargs, title="Shap Feature Importances",
hide_index=True, hide_selector=True, depth=None, hide_depth=True))
self.shap_dependence = ShapDependenceComponent(
explainer, **update_params(kwargs, hide_selector=True, hide_index=True, color_col="no_color_col"))
# SHAP contribution, along with prediction summary
self.index = ClassifierRandomIndexComponent(explainer,
hide_selector=True, **kwargs)
self.summary = ClassifierPredictionSummaryComponent(explainer, hide_index=True,
hide_selector=True, **kwargs)
self.contributions = ShapContributionsGraphComponent(explainer, hide_index=True, hide_depth=True,
hide_selector=True, **kwargs)
self.cutoffconnector = CutoffConnector(self.confusionmatrix, self.classifier_custom_component)
self.connector = ShapSummaryDependenceConnector(self.shap_summary, self.shap_dependence)
self.index_connector = IndexConnector(self.index, [self.summary, self.contributions])
def layout(self):
return dbc.Container([
dbc.Row([
make_hideable(
dbc.Col([
html.H1(self.title, id='simple-classifier-composite-title')
]), hide=self.hide_title),
]),
dbc.Row([
dbc.Col([
html.H2("Model performance"),
dbc.CardDeck([
make_hideable(self.confusionmatrix.layout(),
hide=self.hide_confusionmatrix),
make_hideable(self.classifier_custom_component.layout(),
hide=self.hide_classifier_custom_component),
], style=dict(marginTop=25, marginBottom=25)),
])
]),
dbc.Row([
dbc.Col([
html.H3("SHAP values"),
dbc.CardDeck([
make_hideable(self.shap_summary.layout(),
hide=self.hide_shapsummary),
make_hideable(self.shap_dependence.layout(),
hide=self.hide_shapdependence),
], style=dict(marginTop=25, marginBottom=25)),
])
]),
dbc.Row([
dbc.Col([
html.H2("Individual predictions"),
dbc.CardDeck([
make_hideable(self.index.layout(),
hide=self.hide_predindexselector),
make_hideable(self.summary.layout(),
hide=self.hide_predictionsummary),
], style=dict(marginBottom=25, marginTop=25)),
dbc.CardDeck([
make_hideable(self.contributions.layout(),
hide=self.hide_contributiongraph),
], style=dict(marginBottom=25, marginTop=25))
])
]),
], fluid=False)
def to_html(self, state_dict=None, add_header=True):
html = to_html.hide(to_html.title(self.title), hide=self.hide_title)
html += to_html.card_rows(
[to_html.hide(self.confusionmatrix.to_html(state_dict, add_header=False), hide=self.hide_confusionmatrix),
to_html.hide(self.classifier_custom_component.to_html(state_dict, add_header=False), hide=self.hide_classifier_custom_component)],
[to_html.hide(self.shap_summary.to_html(state_dict, add_header=False), hide=self.hide_shapsummary),
to_html.hide(self.shap_dependence.to_html(state_dict, add_header=False), hide=self.hide_shapdependence)],
[to_html.hide(self.index.to_html(state_dict, add_header=False), hide=self.hide_predindexselector),
to_html.hide(self.summary.to_html(state_dict, add_header=False), hide=self.hide_predictionsummary)],
[to_html.hide(self.contributions.to_html(state_dict, add_header=False), hide=self.hide_contributiongraph)]
)
if add_header:
return to_html.add_header(html)
return html
class SimplifiedRegressionComposite(ExplainerComponent):
def __init__(self, explainer, title="Simple Regression Explainer", name=None,
hide_title=False, regression_custom_component='vs_col',
hide_goodness_of_fit=False, hide_regression_custom_component=False,
hide_shapsummary=False, hide_shapdependence=False,
hide_predindexselector=False, hide_predictionsummary=False,
hide_contributiongraph=False, **kwargs):
"""Composite of multiple classifier related components, on a single tab:
- goodness of fit component
- one other model quality indicator: 'metrics', 'residuals' or'residuals_vs_col'
- shap importance
- shap dependence
- index selector
- index prediction summary
- index shap contribution graph
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Simple Classification Stats".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_title (bool, optional): hide the title. Defaults to False.
regression_custom_component (str, optional): custom classifier quality
indicator supported by the ClassifierExplainer object. Valid values are:
'metrics', 'residuals' or'vs_col'
hide_goodness_of_fit (bool, optional): hide goodness of fit component
hide_regression_custom_component (bool, optional): hide the chosen
regression_custom_component
hide_shapsummary (bool, optional): hide ShapSummaryComponent
hide_shapdependence (bool, optional): hide ShapDependenceComponent
hide_predindexselector (bool, optional): hide RegressionRandomIndexComponent
or RegressionRandomIndexComponent
hide_predictionsummary (bool, optional): hide RegressionPredictionSummaryComponent
or RegressionPredictionSummaryComponent
hide_contributiongraph (bool, optional): hide ShapContributionsGraphComponent
"""
super().__init__(explainer, title, name)
self.goodness_of_fit = PredictedVsActualComponent(explainer, **kwargs)
# select custom classifier report metric
if regression_custom_component == 'metrics':
self.regression_custom_component = RegressionModelSummaryComponent(explainer, **kwargs)
elif regression_custom_component == 'residuals':
self.regression_custom_component = ResidualsComponent(explainer, **kwargs)
elif regression_custom_component == 'vs_col':
self.regression_custom_component = RegressionVsColComponent(explainer,
**update_params(kwargs, display='predicted'))
else:
raise ValueError(
"ERROR: SimplifiedRegressionDashboard parameter "
"regression_custom_component should be in {'metrics', 'residuals', 'vs_col'}"
f" but you passed {regression_custom_component}!")
# SHAP summary & dependence
self.shap_summary = ShapSummaryComponent(
explainer, **update_params(kwargs, title="Shap Feature Importances",
hide_index=True, depth=None, hide_depth=True))
self.shap_dependence = ShapDependenceComponent(
explainer, **update_params(kwargs, hide_index=True))
# SHAP contribution, along with prediction summary
self.index = RegressionRandomIndexComponent(explainer, **kwargs)
self.summary = RegressionPredictionSummaryComponent(explainer, hide_index=True, **kwargs)
self.contributions = ShapContributionsGraphComponent(explainer,
**update_params(kwargs, hide_index=True, hide_depth=True))
self.connector = ShapSummaryDependenceConnector(self.shap_summary, self.shap_dependence)
self.index_connector = IndexConnector(self.index, [self.summary, self.contributions])
def layout(self):
return dbc.Container([
dbc.Row([
make_hideable(
dbc.Col([
html.H1(self.title, id='simple-regression-composite-title'),
]), hide=self.hide_title),
]),
dbc.Row([
dbc.Col([
html.H2("Model performance"),
dbc.CardDeck([
make_hideable(self.goodness_of_fit.layout(),
hide=self.hide_goodness_of_fit),
make_hideable(self.regression_custom_component.layout(),
hide=self.hide_regression_custom_component),
], style=dict(marginTop=25, marginBottom=25)),
])
]),
dbc.Row([
dbc.Col([
html.H3("SHAP values"),
dbc.CardDeck([
make_hideable(self.shap_summary.layout(),
hide=self.hide_shapsummary),
make_hideable(self.shap_dependence.layout(),
hide=self.hide_shapdependence),
], style=dict(marginTop=25, marginBottom=25)),
])
]),
dbc.Row([
dbc.Col([
html.H2("Individual predictions"),
dbc.CardDeck([
make_hideable(self.index.layout(),
hide=self.hide_predindexselector),
make_hideable(self.summary.layout(),
hide=self.hide_predictionsummary),
], style=dict(marginBottom=25, marginTop=25)),
dbc.CardDeck([
make_hideable(self.contributions.layout(),
hide=self.hide_contributiongraph),
], style=dict(marginBottom=25, marginTop=25))
])
])
], fluid=False)
def to_html(self, state_dict=None, add_header=True):
html = to_html.hide(to_html.title(self.title), hide=self.hide_title)
html += to_html.card_rows(
[to_html.hide(self.goodness_of_fit.to_html(state_dict, add_header=False), hide=self.hide_goodness_of_fit),
to_html.hide(self.regression_custom_component.to_html(state_dict, add_header=False), hide=self.hide_regression_custom_component)],
[to_html.hide(self.shap_summary.to_html(state_dict, add_header=False), hide=self.hide_shapsummary),
to_html.hide(self.shap_dependence.to_html(state_dict, add_header=False), hide=self.hide_shapdependence)],
[to_html.hide(self.index.to_html(state_dict, add_header=False), hide=self.hide_predindexselector),
to_html.hide(self.summary.to_html(state_dict, add_header=False), hide=self.hide_predictionsummary)],
[to_html.hide(self.contributions.to_html(state_dict, add_header=False), hide=self.hide_contributiongraph)]
)
if add_header:
return to_html.add_header(html)
return html |
third_party/chromite/cli/cros/lint_unittest.py | zipated/src | 2,151 | 12674833 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test the lint module."""
from __future__ import print_function
import collections
import StringIO
from chromite.cli.cros import lint
from chromite.lib import cros_test_lib
# pylint: disable=protected-access
class TestNode(object):
"""Object good enough to stand in for lint funcs"""
Args = collections.namedtuple('Args', ('args', 'vararg', 'kwarg'))
Arg = collections.namedtuple('Arg', ('name',))
def __init__(self, doc='', fromlineno=0, path='foo.py', args=(), vararg='',
kwarg='', names=None, lineno=0, name='module',
display_type='Module', col_offset=None):
if names is None:
names = [('name', None)]
self.doc = doc
self.lines = doc.split('\n')
self.fromlineno = fromlineno
self.lineno = lineno
self.file = path
self.args = self.Args(args=[self.Arg(name=x) for x in args],
vararg=vararg, kwarg=kwarg)
self.names = names
self.name = name
self._display_type = display_type
self.col_offset = col_offset
def argnames(self):
return [arg.name for arg in self.args.args]
def display_type(self):
return self._display_type
class StatStub(object):
"""Dummy object to stand in for stat checks."""
def __init__(self, size=0, mode=0o644):
self.st_size = size
self.st_mode = mode
class CheckerTestCase(cros_test_lib.TestCase):
"""Helpers for Checker modules"""
def add_message(self, msg_id, node=None, line=None, args=None):
"""Capture lint checks"""
# We include node.doc here explicitly so the pretty assert message
# inclues it in the output automatically.
doc = node.doc if node else ''
self.results.append((msg_id, doc, line, args))
def setUp(self):
assert hasattr(self, 'CHECKER'), 'TestCase must set CHECKER'
self.results = []
self.checker = self.CHECKER()
self.checker.add_message = self.add_message
class DocStringCheckerTest(CheckerTestCase):
"""Tests for DocStringChecker module"""
GOOD_FUNC_DOCSTRINGS = (
'Some string',
"""Short summary
Body of text.
""",
"""line o text
Body and comments on
more than one line.
Args:
moo: cow
Returns:
some value
Raises:
something else
""",
"""Short summary.
Args:
fat: cat
Yields:
a spoon
""",
"""Don't flag args variables as sections.
Args:
return: Foo!
""",
)
BAD_FUNC_DOCSTRINGS = (
"""
bad first line
""",
"""The first line is good
but the second one isn't
""",
""" whitespace is wrong""",
"""whitespace is wrong """,
""" whitespace is wrong
Multiline tickles differently.
""",
"""Should be no trailing blank lines
Returns:
a value
""",
"""ok line
cuddled end""",
"""we want Args, not Arguments
Arguments:
some: arg
""",
"""section order is wrong here
Raises:
It raised.
Returns:
It returned
""",
"""sections are duplicated
Returns:
True
Returns:
or was it false
""",
"""sections lack whitespace between them
Args:
foo: bar
Returns:
yeah
""",
"""yields is misspelled
Yield:
a car
""",
"""Section name has bad spacing
Args:\x20\x20\x20
key: here
""",
"""too many blank lines
Returns:
None
""",
"""wrongly uses javadoc
@returns None
""",
"""the indentation is incorrect
Args:
some: day
""",
"""the final indentation is incorrect
Blah.
""",
)
# The current linter isn't good enough yet to detect these.
TODO_BAD_FUNC_DOCSTRINGS = (
"""The returns section isn't a proper section
Args:
bloop: de
returns something
""",
)
CHECKER = lint.DocStringChecker
def testGood_visit_function(self):
"""Allow known good docstrings"""
for dc in self.GOOD_FUNC_DOCSTRINGS:
self.results = []
node = TestNode(doc=dc, display_type=None, col_offset=4)
self.checker.visit_function(node)
self.assertEqual(self.results, [],
msg='docstring was not accepted:\n"""%s"""' % dc)
def testBad_visit_function(self):
"""Reject known bad docstrings"""
for dc in self.BAD_FUNC_DOCSTRINGS:
self.results = []
node = TestNode(doc=dc, display_type=None, col_offset=4)
self.checker.visit_function(node)
self.assertNotEqual(self.results, [],
msg='docstring was not rejected:\n"""%s"""' % dc)
def testSmoke_visit_module(self):
"""Smoke test for modules"""
self.checker.visit_module(TestNode(doc='foo'))
self.assertEqual(self.results, [])
self.checker.visit_module(TestNode(doc='', path='/foo/__init__.py'))
self.assertEqual(self.results, [])
def testSmoke_visit_class(self):
"""Smoke test for classes"""
self.checker.visit_class(TestNode(doc='bar'))
def testGood_check_first_line(self):
"""Verify _check_first_line accepts good inputs"""
docstrings = (
'Some string',
)
for dc in docstrings:
self.results = []
node = TestNode(doc=dc)
self.checker._check_first_line(node, node.lines)
self.assertEqual(self.results, [],
msg='docstring was not accepted:\n"""%s"""' % dc)
def testBad_check_first_line(self):
"""Verify _check_first_line rejects bad inputs"""
docstrings = (
'\nSome string\n',
)
for dc in docstrings:
self.results = []
node = TestNode(doc=dc)
self.checker._check_first_line(node, node.lines)
self.assertEqual(len(self.results), 1)
def testGood_check_second_line_blank(self):
"""Verify _check_second_line_blank accepts good inputs"""
docstrings = (
'Some string\n\nThis is the third line',
'Some string',
)
for dc in docstrings:
self.results = []
node = TestNode(doc=dc)
self.checker._check_second_line_blank(node, node.lines)
self.assertEqual(self.results, [],
msg='docstring was not accepted:\n"""%s"""' % dc)
def testBad_check_second_line_blank(self):
"""Verify _check_second_line_blank rejects bad inputs"""
docstrings = (
'Some string\nnonempty secondline',
)
for dc in docstrings:
self.results = []
node = TestNode(doc=dc)
self.checker._check_second_line_blank(node, node.lines)
self.assertEqual(len(self.results), 1)
def testGoodFuncVarKwArg(self):
"""Check valid inputs for *args and **kwargs"""
for vararg in (None, 'args', '_args'):
for kwarg in (None, 'kwargs', '_kwargs'):
self.results = []
node = TestNode(vararg=vararg, kwarg=kwarg)
self.checker._check_func_signature(node)
self.assertEqual(len(self.results), 0)
def testMisnamedFuncVarKwArg(self):
"""Reject anything but *args and **kwargs"""
for vararg in ('arg', 'params', 'kwargs', '_moo'):
self.results = []
node = TestNode(vararg=vararg)
self.checker._check_func_signature(node)
self.assertEqual(len(self.results), 1)
for kwarg in ('kwds', '_kwds', 'args', '_moo'):
self.results = []
node = TestNode(kwarg=kwarg)
self.checker._check_func_signature(node)
self.assertEqual(len(self.results), 1)
def testGoodFuncArgs(self):
"""Verify normal args in Args are allowed"""
datasets = (
("""args are correct, and cls is ignored
Args:
moo: cow
""",
('cls', 'moo',), None, None,
),
("""args are correct, and self is ignored
Args:
moo: cow
*args: here
""",
('self', 'moo',), 'args', 'kwargs',
),
("""args are allowed to wrap
Args:
moo:
a big fat cow
that takes many lines
to describe its fatness
""",
('moo',), None, 'kwargs',
),
)
for dc, args, vararg, kwarg in datasets:
self.results = []
node = TestNode(doc=dc, args=args, vararg=vararg, kwarg=kwarg)
self.checker._check_all_args_in_doc(node, node.lines)
self.assertEqual(len(self.results), 0)
def testBadFuncArgs(self):
"""Verify bad/missing args in Args are caught"""
datasets = (
("""missing 'bar'
Args:
moo: cow
""",
('moo', 'bar',),
),
("""missing 'cow' but has 'bloop'
Args:
moo: cow
""",
('bloop',),
),
("""too much space after colon
Args:
moo: cow
""",
('moo',),
),
("""not enough space after colon
Args:
moo:cow
""",
('moo',),
),
)
for dc, args in datasets:
self.results = []
node = TestNode(doc=dc, args=args)
self.checker._check_all_args_in_doc(node, node.lines)
self.assertEqual(len(self.results), 1)
class ChromiteLoggingCheckerTest(CheckerTestCase):
"""Tests for ChromiteLoggingChecker module"""
CHECKER = lint.ChromiteLoggingChecker
def testLoggingImported(self):
"""Test that import logging is flagged."""
node = TestNode(names=[('logging', None)], lineno=15)
self.checker.visit_import(node)
self.assertEqual(self.results, [('R9301', '', 15, None)])
def testLoggingNotImported(self):
"""Test that importing something else (not logging) is not flagged."""
node = TestNode(names=[('myModule', None)], lineno=15)
self.checker.visit_import(node)
self.assertEqual(self.results, [])
class SourceCheckerTest(CheckerTestCase):
"""Tests for SourceChecker module"""
CHECKER = lint.SourceChecker
def _testShebang(self, shebangs, exp, mode):
"""Helper for shebang tests"""
for shebang in shebangs:
self.results = []
node = TestNode()
stream = StringIO.StringIO(shebang)
st = StatStub(size=len(shebang), mode=mode)
self.checker._check_shebang(node, stream, st)
self.assertEqual(len(self.results), exp,
msg='processing shebang failed: %r' % shebang)
def testBadShebang(self):
"""Verify _check_shebang rejects bad shebangs"""
shebangs = (
'#!/usr/bin/python\n',
'#! /usr/bin/python2 \n',
'#!/usr/bin/env python\n',
'#! /usr/bin/env python2 \n',
'#!/usr/bin/python2\n',
)
self._testShebang(shebangs, 1, 0o755)
def testGoodShebangNoExec(self):
"""Verify _check_shebang rejects shebangs on non-exec files"""
shebangs = (
'#!/usr/bin/env python2\n',
'#!/usr/bin/env python3\n',
)
self._testShebang(shebangs, 1, 0o644)
def testGoodShebang(self):
"""Verify _check_shebang accepts good shebangs"""
shebangs = (
'#!/usr/bin/env python2\n',
'#!/usr/bin/env python3\n',
'#!/usr/bin/env python2\t\n',
)
self._testShebang(shebangs, 0, 0o755)
def testGoodUnittestName(self):
"""Verify _check_module_name accepts good unittest names"""
module_names = (
'lint_unittest',
)
for name in module_names:
node = TestNode(name=name)
self.results = []
self.checker._check_module_name(node)
self.assertEqual(len(self.results), 0)
def testBadUnittestName(self):
"""Verify _check_module_name accepts good unittest names"""
module_names = (
'lint_unittests',
)
for name in module_names:
node = TestNode(name=name)
self.results = []
self.checker._check_module_name(node)
self.assertEqual(len(self.results), 1)
|
old_jobs/guess_sklearn_ma_daily_job.py | welldo/stock | 5,309 | 12674847 | <reponame>welldo/stock<gh_stars>1000+
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import libs.common as common
import pandas as pd
import numpy as np
import math
import datetime
import sklearn as skl
from sklearn import datasets, linear_model
# https://github.com/udacity/machine-learning/issues/202
# sklearn.cross_validation 这个包不推荐使用了。
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.neighbors import KNeighborsClassifier
# 要操作的数据库表名称。
table_name = "guess_sklearn_ma_daily"
# 批处理数据。
def stat_all_batch(tmp_datetime):
datetime_str = (tmp_datetime).strftime("%Y-%m-%d")
datetime_int = (tmp_datetime).strftime("%Y%m%d")
print("datetime_str:", datetime_str)
print("datetime_int:", datetime_int)
try:
# 删除老数据。
del_sql = " DELETE FROM `stock_data`.`%s` WHERE `date`= %s " % (table_name, datetime_int)
print("del_sql:", del_sql)
common.insert(del_sql)
except Exception as e:
print("error :", e)
sql_count = """
SELECT count(1) FROM stock_data.ts_today_all WHERE `date` = %s and `trade` > 0 and `open` > 0 and trade <= 20
and `code` not like %s and `name` not like %s
"""
# 修改逻辑,增加中小板块计算。 中小板:002,创业板:300 。and `code` not like %s and `code` not like %s and `name` not like %s
# count = common.select_count(sql_count, params=[datetime_int, '002%', '300%', '%st%'])
count = common.select_count(sql_count, params=[datetime_int, '300%', '%st%'])
print("count :", count)
batch_size = 100
end = int(math.ceil(float(count) / batch_size) * batch_size)
print(end)
# for i in range(0, end, batch_size):
for i in range(0, end, batch_size):
print("loop :", i)
# 查询今日满足股票数据。剔除数据:创业板股票数据,中小板股票数据,所有st股票
# #`code` not like '002%' and `code` not like '300%' and `name` not like '%st%'
sql_1 = """
SELECT `date`, `code`, `name`, `changepercent`, `trade`, `open`, `high`, `low`,
`settlement`, `volume`, `turnoverratio`, `amount`, `per`, `pb`, `mktcap`, `nmc`
FROM stock_data.ts_today_all WHERE `date` = %s and `trade` > 0 and `open` > 0 and trade <= 20
and `code` not like %s and `name` not like %s limit %s , %s
"""
print(sql_1)
# data = pd.read_sql(sql=sql_1, con=common.engine(), params=[datetime_int, '002%', '300%', '%st%', i, batch_size])
data = pd.read_sql(sql=sql_1, con=common.engine(), params=[datetime_int, '300%', '%st%', i, batch_size])
data = data.drop_duplicates(subset="code", keep="last")
print("########data[trade]########:", len(data))
# 使用 trade 填充数据
stock_sklearn = pd.DataFrame({
"date": data["date"], "code": data["code"], "next_close": data["trade"],
"sklearn_score": data["trade"]}, index=data.index.values)
print(stock_sklearn.head())
stock_sklearn_apply = stock_sklearn.apply(apply_sklearn, axis=1) # , axis=1)
# 重命名
del stock_sklearn_apply["date"] # 合并前删除 date 字段。
# 合并数据
data_new = pd.merge(data, stock_sklearn_apply, on=['code'], how='left')
# for index, row in data.iterrows():
# next_stock, score = stat_index_all(row, i)
# print(next_stock, score)
data_new["next_close"] = data_new["next_close"].round(2) # 数据保留4位小数
data_new["sklearn_score"] = data_new["sklearn_score"].round(2) # 数据保留2位小数
data_new["trade_float32"] = data["trade"].astype('float32', copy=False)
data_new["up_rate"] = (data_new["next_close"] - data_new["trade_float32"]) * 100 / data_new["trade_float32"]
data_new["up_rate"] = data_new["up_rate"].round(2) # 数据保留2位小数
del data_new["trade_float32"]
try:
common.insert_db(data_new, table_name, False, "`date`,`code`")
print("insert_db")
except Exception as e:
print("error :", e)
# 重命名
del data_new["name"]
print(data_new)
# code date next_close sklearn_score
def apply_sklearn(data):
# 要操作的数据库表名称。
print("########stat_index_all########:", len(data))
date = data["date"]
code = data["code"]
print(date, code)
date_end = datetime.datetime.strptime(date, "%Y%m%d")
date_start = (date_end + datetime.timedelta(days=-300)).strftime("%Y-%m-%d")
date_end = date_end.strftime("%Y-%m-%d")
print(code, date_start, date_end)
# open high close low volume price_change p_change ma5 ma10 ma20 v_ma5 v_ma10 v_ma20 turnover
stock_X = common.get_hist_data_cache(code, date_start, date_end)
# 增加空判断,如果是空返回 0 数据。
if stock_X is None:
return list([code, date, 0.0, 0.0])
stock_X = stock_X.sort_index(0) # 将数据按照日期排序下。
stock_y = pd.Series(stock_X["close"].values) # 标签
stock_X_next = stock_X.iloc[len(stock_X) - 1]
print("########################### stock_X_next date:", stock_X_next)
# 使用今天的交易价格,13 个指标预测明天的价格。偏移股票数据,今天的数据,目标是明天的价格。
stock_X = stock_X.drop(stock_X.index[len(stock_X) - 1]) # 删除最后一条数据
stock_y = stock_y.drop(stock_y.index[0]) # 删除第一条数据
# print("########################### stock_X date:", stock_X)
# 删除掉close 也就是收盘价格。
del stock_X["close"]
del stock_X_next["close"]
model = linear_model.LinearRegression()
# model = KNeighborsClassifier()
model.fit(stock_X.values, stock_y)
# print("############## test_akshare & target #############")
# print("############## coef_ & intercept_ #############")
# print(model.coef_) # 系数
# print(model.intercept_) # 截断
next_close = model.predict([stock_X_next.values])
if len(next_close) == 1:
next_close = next_close[0]
sklearn_score = model.score(stock_X.values, stock_y)
print("score:", sklearn_score) # 评分
return list([code, date, next_close, sklearn_score * 100])
# main函数入口
if __name__ == '__main__':
# 使用方法传递。
tmp_datetime = common.run_with_args(stat_all_batch)
|
todo/parser/subparsers/todo.py | tomasdanjonsson/td-cli | 154 | 12674872 | <reponame>tomasdanjonsson/td-cli
import argparse
from todo.constants import COMMANDS
from todo.parser.base import BaseParser
class TodoParser(BaseParser):
"""
usage: td [id] {get,delete,uncomplete,complete,edit} ...
manage todo
positional arguments:
id the id of the todo
{...} commands
get (g) show todo's details
delete (d) delete todo
uncomplete (u) uncomplete todo
complete (c) complete todo
edit (e) edit todo
optional arguments:
-h, --help show this help message and exit
`td [id]` defaults to `td [id] get`
You don't have to specify the whole `id`, a substring will do
"""
command = COMMANDS.GET_TODO
def _add_arguments(self):
self.parser.add_argument("id", action="store", help="the id of the todo")
subparser = self.parser.add_subparsers(dest="command", help="commands")
get_parser = self._add_parser(subparser, "get", aliases=["g"], help="get todo")
get_parser.set_defaults(command=COMMANDS.GET_TODO)
get_parser.usage = "td [id]\n td [id] get\n td [id] g"
get_parser.epilog = "`td [id]` is the shortcut to `td [id] get`"
get_parser.description = "show todo's details"
delete_parser = self._add_parser(subparser, "delete", aliases=["d"], help="delete todo")
delete_parser.add_argument(
"--yes",
"-y",
dest="skip_prompt",
action="store_true",
help="skip yes/no prompt when deleting todo",
)
delete_parser.set_defaults(command=COMMANDS.DELETE_TODO)
delete_parser.usage = "td [id] delete [-yes]\n td [id] d [-y]"
delete_parser.description = "delete todo"
uncomplete_parser = self._add_parser(
subparser, "uncomplete", aliases=["u"], help="uncomplete todo"
)
uncomplete_parser.set_defaults(command=COMMANDS.UNCOMPLETE_TODO)
uncomplete_parser.usage = "td [id] uncomplete\n td [id] u"
uncomplete_parser.description = "uncomplete todo"
complete_parser = self._add_parser(
subparser, "complete", aliases=["c"], help="complete todo"
)
complete_parser.set_defaults(command=COMMANDS.COMPLETE_TODO)
complete_parser.usage = "td [id] complete\n td [id] c"
complete_parser.description = "complete todo"
edit_parser = self._add_parser(
subparser,
"edit",
aliases=["e"],
help="edit todo",
formatter_class=argparse.RawTextHelpFormatter,
epilog="""If no optional arguments are provided, the todo will be
opened in your editor where you can edit the todo's details.
The editor defaults to `vi`, but you can choose your preferred one be setting:
```
[settings]
editor: <your_editor>
```
in ~/.td.cfg
""",
)
edit_parser.add_argument("--name", "-n", action="store", help="update todo's name")
edit_parser.add_argument("--details", "-d", action="store", help="update todo's detail")
edit_parser.add_argument("--group", "-g", action="store", help="set todo's group")
edit_parser.set_defaults(command=COMMANDS.EDIT_TODO)
edit_parser.usage = "td [id] edit [--name NAME] [--details DETAILS]\n td [id] e [-n NAME] [-d DETAILS]"
edit_parser.description = "edit todo"
|
docs/source/examples/main.py | torarvid/aio-pika | 832 | 12674878 | <reponame>torarvid/aio-pika
import asyncio
from aio_pika import connect_robust, Message
async def main(loop):
connection = await connect_robust(
"amqp://guest:[email protected]/", loop=loop
)
queue_name = "test_queue"
routing_key = "test_queue"
# Creating channel
channel = await connection.channel()
# Declaring exchange
exchange = await channel.declare_exchange("direct", auto_delete=True)
# Declaring queue
queue = await channel.declare_queue(queue_name, auto_delete=True)
# Binding queue
await queue.bind(exchange, routing_key)
await exchange.publish(
Message(
bytes("Hello", "utf-8"),
content_type="text/plain",
headers={"foo": "bar"},
),
routing_key,
)
# Receiving message
incoming_message = await queue.get(timeout=5)
# Confirm message
await incoming_message.ack()
await queue.unbind(exchange, routing_key)
await queue.delete()
await connection.close()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
|
usaspending_api/awards/v2/urls_subawards.py | g4brielvs/usaspending-api | 217 | 12674897 | <gh_stars>100-1000
from django.conf.urls import url
from usaspending_api.awards.v2.views.subawards import SubawardsViewSet
urlpatterns = [url(r"^$", SubawardsViewSet.as_view())]
|
opensfm/test/test_stats.py | ricklentz/OpenSfM | 2,535 | 12674900 | <filename>opensfm/test/test_stats.py<gh_stars>1000+
from opensfm import stats, types
from opensfm.synthetic_data import synthetic_dataset, synthetic_scene
def test_processing_statistics_normal(
scene_synthetic: synthetic_scene.SyntheticInputData,
):
reference = scene_synthetic.reconstruction
dataset = synthetic_dataset.SyntheticDataSet(
reference,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
processing_statistics = stats.processing_statistics(dataset, [reference])
assert list(processing_statistics.keys()) == ["steps_times", "date", "area"]
assert processing_statistics["steps_times"] == {
"Feature Extraction": -1,
"Features Matching": -1,
"Tracks Merging": -1,
"Reconstruction": -1,
"Total Time": 0,
}
assert processing_statistics["date"] == "unknown"
assert 3500 < processing_statistics["area"] < 3600
def test_processing_statistics_null(
scene_synthetic: synthetic_scene.SyntheticInputData,
null_scene: types.Reconstruction,
):
dataset = synthetic_dataset.SyntheticDataSet(
null_scene,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
processing_statistics = stats.processing_statistics(dataset, [null_scene])
assert list(processing_statistics.keys()) == ["steps_times", "date", "area"]
assert processing_statistics["steps_times"] == {
"Feature Extraction": -1,
"Features Matching": -1,
"Tracks Merging": -1,
"Reconstruction": -1,
"Total Time": 0,
}
assert processing_statistics["date"] == "unknown"
assert processing_statistics["area"] == -1
def test_features_statistics_normal(
scene_synthetic: synthetic_scene.SyntheticInputData,
):
reference = scene_synthetic.reconstruction
dataset = synthetic_dataset.SyntheticDataSet(
reference,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
features_statistics = stats.features_statistics(
dataset, scene_synthetic.tracks_manager, [reference]
)
assert list(features_statistics.keys()) == [
"detected_features",
"reconstructed_features",
]
assert (
features_statistics["detected_features"]
== features_statistics["reconstructed_features"]
)
assert features_statistics["reconstructed_features"] == {
"min": 303,
"max": 1065,
"mean": 841,
"median": 884,
}
def test_features_statistics_null(
scene_synthetic: synthetic_scene.SyntheticInputData,
null_scene: types.Reconstruction,
):
dataset = synthetic_dataset.SyntheticDataSet(
null_scene,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
features_statistics = stats.features_statistics(
dataset, scene_synthetic.tracks_manager, [null_scene]
)
assert list(features_statistics.keys()) == [
"detected_features",
"reconstructed_features",
]
assert (
features_statistics["detected_features"]
== features_statistics["reconstructed_features"]
)
assert features_statistics["reconstructed_features"] == {
"min": -1,
"max": -1,
"mean": -1,
"median": -1,
}
def test_reconstruction_statistics_normal(
scene_synthetic: synthetic_scene.SyntheticInputData,
):
reference = scene_synthetic.reconstruction
dataset = synthetic_dataset.SyntheticDataSet(
reference,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
reconstruction_statistics = stats.reconstruction_statistics(
dataset, scene_synthetic.tracks_manager, [reference]
)
assert reconstruction_statistics["components"] == 1
assert not reconstruction_statistics["has_gps"]
assert not reconstruction_statistics["has_gcp"]
assert 4900 < reconstruction_statistics["initial_points_count"] < 5000
assert reconstruction_statistics["initial_shots_count"] == 20
assert 4900 < reconstruction_statistics["reconstructed_points_count"] < 5000
assert reconstruction_statistics["reconstructed_shots_count"] == 20
assert 16800 < reconstruction_statistics["observations_count"] < 16900
assert 3.3 < reconstruction_statistics["average_track_length"] < 3.4
assert 3.4 < reconstruction_statistics["average_track_length_over_two"] < 3.5
assert len(reconstruction_statistics["histogram_track_length"]) == 5
assert 0.15 < reconstruction_statistics["reprojection_error_normalized"] < 0.16
assert 1.25 < reconstruction_statistics["reprojection_error_pixels"] < 1.28
assert len(reconstruction_statistics["reprojection_histogram_normalized"][0]) == 30
assert len(reconstruction_statistics["reprojection_histogram_normalized"][1]) == 31
assert len(reconstruction_statistics["reprojection_histogram_pixels"][0]) == 30
assert len(reconstruction_statistics["reprojection_histogram_pixels"][1]) == 31
def test_reconstruction_statistics_null(
scene_synthetic: synthetic_scene.SyntheticInputData,
null_scene: types.Reconstruction,
):
dataset = synthetic_dataset.SyntheticDataSet(
null_scene,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
reconstruction_statistics = stats.reconstruction_statistics(
dataset, scene_synthetic.tracks_manager, [null_scene]
)
assert reconstruction_statistics["components"] == 1
assert not reconstruction_statistics["has_gps"]
assert not reconstruction_statistics["has_gcp"]
assert 4900 < reconstruction_statistics["initial_points_count"] < 5000
assert reconstruction_statistics["initial_shots_count"] == 0
assert reconstruction_statistics["reconstructed_points_count"] == 0
assert reconstruction_statistics["reconstructed_shots_count"] == 0
assert reconstruction_statistics["observations_count"] == 0
assert reconstruction_statistics["average_track_length"] == -1
assert reconstruction_statistics["average_track_length_over_two"] == -1
assert len(reconstruction_statistics["histogram_track_length"]) == 0
assert reconstruction_statistics["reprojection_error_normalized"] == -1.0
assert reconstruction_statistics["reprojection_error_pixels"] == -1.0
assert len(reconstruction_statistics["reprojection_histogram_normalized"][0]) == 0
assert len(reconstruction_statistics["reprojection_histogram_normalized"][1]) == 0
assert len(reconstruction_statistics["reprojection_histogram_pixels"][0]) == 0
assert len(reconstruction_statistics["reprojection_histogram_pixels"][1]) == 0
def test_cameras_statistics_normal(
scene_synthetic: synthetic_scene.SyntheticInputData,
):
reference = scene_synthetic.reconstruction
dataset = synthetic_dataset.SyntheticDataSet(
reference,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
cameras_statistics = stats.cameras_statistics(dataset, [reference])
assert cameras_statistics == {
"1": {
"initial_values": {"k1": -0.1, "k2": 0.01, "focal": 0.7},
"optimized_values": {"k1": -0.1, "k2": 0.01, "focal": 0.7},
"bias": {
"rotation": [-0.0, -0.0, -0.0],
"scale": 1.0,
"translation": [0.0, 0.0, 0.0],
},
}
}
def test_cameras_statistics_null(
scene_synthetic: synthetic_scene.SyntheticInputData,
null_scene: types.Reconstruction,
):
dataset = synthetic_dataset.SyntheticDataSet(
null_scene,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
cameras_statistics = stats.cameras_statistics(dataset, [null_scene])
assert cameras_statistics == {}
def test_rig_statistics_normal(
scene_synthetic: synthetic_scene.SyntheticInputData,
):
reference = scene_synthetic.reconstruction
dataset = synthetic_dataset.SyntheticDataSet(
reference,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
rig_statistics = stats.rig_statistics(dataset, [reference])
assert rig_statistics == {}
def test_rig_statistics_null(
scene_synthetic: synthetic_scene.SyntheticInputData,
null_scene: types.Reconstruction,
):
dataset = synthetic_dataset.SyntheticDataSet(
null_scene,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
cameras_statistics = stats.rig_statistics(dataset, [null_scene])
assert cameras_statistics == {}
def test_gps_errors_normal(
scene_synthetic: synthetic_scene.SyntheticInputData,
):
reference = scene_synthetic.reconstruction
gps_errors = stats.gps_errors([reference])
assert gps_errors == {}
def test_gps_errors_null(
scene_synthetic: synthetic_scene.SyntheticInputData,
null_scene: types.Reconstruction,
):
gps_errors = stats.gps_errors([null_scene])
assert gps_errors == {}
def test_gcp_errors_normal(
scene_synthetic: synthetic_scene.SyntheticInputData,
):
reference = scene_synthetic.reconstruction
dataset = synthetic_dataset.SyntheticDataSet(
reference,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
gcp_errors = stats.gcp_errors(dataset, [reference])
assert gcp_errors == {}
def test_gcp_errors_null(
scene_synthetic: synthetic_scene.SyntheticInputData,
null_scene: types.Reconstruction,
):
dataset = synthetic_dataset.SyntheticDataSet(
null_scene,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
gcp_errors = stats.gcp_errors(dataset, [null_scene])
assert gcp_errors == {}
|
mayan/apps/testing/tests/mixins.py | bonitobonita24/Mayan-EDMS | 343 | 12674923 | <gh_stars>100-1000
import glob
import importlib
import logging
import os
import random
import time
from furl import furl
import psutil
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.webdriver import WebDriver
from django.apps import apps
from django.conf import settings
from django.conf.urls import url
from django.contrib.contenttypes.models import ContentType
from django.db import connection, connections, models
from django.db.models.signals import post_save, pre_save
from django.http import HttpResponse
from django.http.response import FileResponse
from django.template import Context, Template
from django.test.utils import ContextList
from django.urls import clear_url_caches, reverse
from django.utils.encoding import DjangoUnicodeDecodeError, force_text
from stronghold.decorators import public
from mayan.apps.acls.classes import ModelPermission
from mayan.apps.permissions.tests.mixins import PermissionTestMixin
from mayan.apps.storage.settings import setting_temporary_directory
from ..literals import (
TEST_SERVER_HOST, TEST_SERVER_SCHEME, TEST_VIEW_NAME, TEST_VIEW_URL
)
class ClientMethodsTestCaseMixin:
def _build_verb_kwargs(self, viewname=None, path=None, *args, **kwargs):
data = kwargs.pop('data', None) or {}
follow = kwargs.pop('follow', False)
query = kwargs.pop('query', None) or {}
headers = kwargs.pop('headers', None) or {}
if viewname:
path = reverse(viewname=viewname, *args, **kwargs)
path = furl(url=path)
path.args.update(query)
result = {'follow': follow, 'data': data, 'path': path.tostr()}
result.update(headers)
return result
def delete(self, viewname=None, path=None, *args, **kwargs):
return self.client.delete(
**self._build_verb_kwargs(
path=path, viewname=viewname, *args, **kwargs
)
)
def generic(self, method, viewname=None, path=None, *args, **kwargs):
return self.client.generic(
method=method, **self._build_verb_kwargs(
path=path, viewname=viewname, *args, **kwargs
)
)
def get(self, viewname=None, path=None, *args, **kwargs):
return self.client.get(
**self._build_verb_kwargs(
path=path, viewname=viewname, *args, **kwargs
)
)
def patch(self, viewname=None, path=None, *args, **kwargs):
return self.client.patch(
**self._build_verb_kwargs(
path=path, viewname=viewname, *args, **kwargs
)
)
def post(self, viewname=None, path=None, *args, **kwargs):
return self.client.post(
**self._build_verb_kwargs(
path=path, viewname=viewname, *args, **kwargs
)
)
def put(self, viewname=None, path=None, *args, **kwargs):
return self.client.put(
**self._build_verb_kwargs(
path=path, viewname=viewname, *args, **kwargs
)
)
class ConnectionsCheckTestCaseMixin:
_open_connections_check_enable = True
def _get_open_connections_count(self):
return len(connections.all())
def setUp(self):
super().setUp()
self._connections_count = self._get_open_connections_count()
def tearDown(self):
if self._open_connections_check_enable:
self.assertEqual(
self._connections_count, self._get_open_connections_count(),
msg='Database connection leak. The number of database '
'connections at the start and at the end of the test are not '
'the same.'
)
super().tearDown()
class ContentTypeCheckTestCaseMixin:
expected_content_types = ('text/html', 'text/html; charset=utf-8')
def _pre_setup(self):
super()._pre_setup()
test_instance = self
class CustomClient(self.client_class):
def request(self, *args, **kwargs):
response = super().request(*args, **kwargs)
content_type = response.headers.get('content-type', '')
if test_instance.expected_content_types:
test_instance.assertTrue(
content_type in test_instance.expected_content_types,
msg='Unexpected response content type: {}, expected: {}.'.format(
content_type, ' or '.join(test_instance.expected_content_types)
)
)
return response
self.client = CustomClient()
class ContentTypeTestCaseMixin:
def _inject_test_object_content_type(self):
self.test_object_content_type = ContentType.objects.get_for_model(
model=self.test_object
)
self.test_object_view_kwargs = {
'app_label': self.test_object_content_type.app_label,
'model_name': self.test_object_content_type.model,
'object_id': self.test_object.pk
}
class DelayTestCaseMixin:
def _test_delay(self, seconds=0.1):
time.sleep(seconds)
class DescriptorLeakCheckTestCaseMixin:
_skip_file_descriptor_test = False
def _get_process_descriptor_count(self):
process = psutil.Process()
return process.num_fds()
def _get_process_descriptors(self):
process = psutil.Process()._proc
return os.listdir("%s/%s/fd" % (process._procfs_path, process.pid))
def setUp(self):
super().setUp()
self._process_descriptor_count = self._get_process_descriptor_count()
self._process_descriptors = self._get_process_descriptors()
def tearDown(self):
if not self._skip_file_descriptor_test:
if self._get_process_descriptor_count() > self._process_descriptor_count:
raise ValueError(
'File descriptor leak. The number of file descriptors '
'at the end are higher than at the start of the test.'
)
for descriptor in self._get_process_descriptors():
if descriptor not in self._process_descriptors:
raise ValueError(
'File descriptor leak. A descriptor was found at '
'the end of the test that was not present at the '
'start of the test.'
)
super().tearDown()
class DownloadTestCaseMixin:
def assert_download_response(
self, response, content=None, filename=None, is_attachment=None,
mime_type=None
):
self.assertTrue(isinstance(response, FileResponse))
if filename:
self.assertEqual(response.filename, filename)
if content:
response_content = b''.join(list(response))
try:
response_content = force_text(s=response_content)
except DjangoUnicodeDecodeError:
"""Leave as bytes"""
self.assertEqual(response_content, content)
if is_attachment is not None:
self.assertTrue(response.as_attachment)
if mime_type:
self.assertTrue(
response.headers['Content-Type'].startswith(mime_type)
)
class EnvironmentTestCaseMixin:
def setUp(self):
super().setUp()
self._test_environment_variables = []
def tearDown(self):
for name in self._test_environment_variables:
os.environ.pop(name)
super().tearDown()
def _set_environment_variable(self, name, value):
self._test_environment_variables.append(name)
os.environ[name] = value
class ModelTestCaseMixin:
def _model_instance_to_dictionary(self, instance):
return instance._meta.model._default_manager.filter(
pk=instance.pk
).values()[0]
class OpenFileCheckTestCaseMixin:
_skip_open_file_leak_test = False
def _get_open_files(self):
process = psutil.Process()
return process.open_files()
def setUp(self):
super().setUp()
self._open_files = self._get_open_files()
def tearDown(self):
if not self._skip_open_file_leak_test:
for new_open_file in self._get_open_files():
if new_open_file not in self._open_files:
raise ValueError(
'File left open: {}'.format(new_open_file)
)
super().tearDown()
class RandomPrimaryKeyModelMonkeyPatchMixin:
random_primary_key_random_floor = 100
random_primary_key_random_ceiling = 10000
random_primary_key_maximum_attempts = 100
random_primary_key_enable = True
@staticmethod
def get_unique_primary_key(model):
manager = model._meta.default_manager
attempts = 0
while True:
primary_key = random.randint(
RandomPrimaryKeyModelMonkeyPatchMixin.random_primary_key_random_floor,
RandomPrimaryKeyModelMonkeyPatchMixin.random_primary_key_random_ceiling
)
if not manager.filter(pk=primary_key).exists():
break
attempts = attempts + 1
if attempts > RandomPrimaryKeyModelMonkeyPatchMixin.random_primary_key_maximum_attempts:
raise ValueError(
'Maximum number of retries for an unique random primary '
'key reached.'
)
return primary_key
@classmethod
def setUpClass(cls):
super().setUpClass()
random.seed()
def setUp(self):
if self.random_primary_key_enable:
self.method_save_original = models.Model.save
def method_save_new(instance, *args, **kwargs):
if instance.pk:
return self.method_save_original(instance, *args, **kwargs)
else:
# Set meta.auto_created to True to have the original save_base
# not send the pre_save signal which would normally send
# the instance without a primary key. Since we assign a random
# primary key any pre_save signal handler that relies on an
# empty primary key will fail.
# The meta.auto_created and manual pre_save sending emulates
# the original behavior. Since meta.auto_created also disables
# the post_save signal we must also send it ourselves.
# This hack work with Django 1.11 .save_base() but can break
# in future versions if that method is updated.
pre_save.send(
sender=instance.__class__, instance=instance, raw=False,
update_fields=None,
)
instance._meta.auto_created = True
instance.pk = RandomPrimaryKeyModelMonkeyPatchMixin.get_unique_primary_key(
model=instance._meta.model
)
instance.id = instance.pk
kwargs['force_insert'] = True
result = instance.save_base(*args, **kwargs)
instance._meta.auto_created = False
post_save.send(
sender=instance.__class__, instance=instance, created=True,
update_fields=None, raw=False
)
return result
setattr(models.Model, 'save', method_save_new)
super().setUp()
def tearDown(self):
if self.random_primary_key_enable:
models.Model.save = self.method_save_original
super().tearDown()
class SeleniumTestMixin:
SKIP_VARIABLE_NAME = 'TESTS_SELENIUM_SKIP'
@staticmethod
def _get_skip_variable_value():
return os.environ.get(
SeleniumTestMixin._get_skip_variable_environment_name(),
getattr(settings, SeleniumTestMixin.SKIP_VARIABLE_NAME, False)
)
@staticmethod
def _get_skip_variable_environment_name():
return 'MAYAN_{}'.format(SeleniumTestMixin.SKIP_VARIABLE_NAME)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.webdriver = None
if not SeleniumTestMixin._get_skip_variable_value():
options = Options()
options.add_argument('--headless')
cls.webdriver = WebDriver(
firefox_options=options, log_path='/dev/null'
)
@classmethod
def tearDownClass(cls):
if cls.webdriver:
cls.webdriver.quit()
super().tearDownClass()
def setUp(self):
if SeleniumTestMixin._get_skip_variable_value():
self.skipTest(reason='Skipping selenium test')
super().setUp()
def _open_url(self, fragment=None, path=None, viewname=None):
url = '{}{}{}'.format(
self.live_server_url, path or reverse(viewname=viewname),
fragment or ''
)
self.webdriver.get(url=url)
class SilenceLoggerTestCaseMixin:
"""
Changes the log level of a specific logger for the duration of a test.
The default level for silenced loggers is CRITICAL.
Example: self._silence_logger(name='mayan.apps.converter.managers')
"""
test_case_silenced_logger = None
test_case_silenced_logger_new_level = logging.CRITICAL
def tearDown(self):
if self.test_case_silenced_logger:
self.test_case_silenced_logger.setLevel(
level=self.test_case_silenced_logger_level
)
super().tearDown()
def _silence_logger(self, name):
self.test_case_silenced_logger = logging.getLogger(name=name)
self.test_case_silenced_logger_level = self.test_case_silenced_logger.level
self.test_case_silenced_logger.setLevel(
level=self.test_case_silenced_logger_new_level
)
class TempfileCheckTestCasekMixin:
# Ignore the jvmstat instrumentation and GitLab's CI .config files.
# Ignore LibreOffice fontconfig cache dir.
ignore_globs = ('hsperfdata_*', '.config', '.cache')
def _get_temporary_entries(self):
ignored_result = []
# Expand globs by joining the temporary directory and then flattening
# the list of lists into a single list.
for item in self.ignore_globs:
ignored_result.extend(
glob.glob(
os.path.join(setting_temporary_directory.value, item)
)
)
# Remove the path and leave only the expanded filename.
ignored_result = map(lambda x: os.path.split(x)[-1], ignored_result)
return set(
os.listdir(setting_temporary_directory.value)
) - set(ignored_result)
def setUp(self):
super().setUp()
if getattr(settings, 'COMMON_TEST_TEMP_FILES', False):
self._temporary_items = self._get_temporary_entries()
def tearDown(self):
if getattr(settings, 'COMMON_TEST_TEMP_FILES', False):
final_temporary_items = self._get_temporary_entries()
self.assertEqual(
self._temporary_items, final_temporary_items,
msg='Orphan temporary file. The number of temporary '
'files and/or directories at the start and at the end of '
'the test are not the same. Orphan entries: {}'.format(
','.join(final_temporary_items - self._temporary_items)
)
)
super().tearDown()
class TestModelTestCaseMixin(ContentTypeTestCaseMixin, PermissionTestMixin):
auto_create_test_object = False
auto_create_test_object_fields = None
auto_create_test_object_instance_kwargs = None
auto_create_test_object_model = False
auto_create_test_object_permission = False
@staticmethod
def _delete_test_model(model):
ModelPermission.deregister(model=model)
content_type = ContentType.objects.get_for_model(model=model)
if content_type.pk:
content_type.delete()
# Only attempt to deleted if the model was not deleted as part
# of the previous main test model deletion.
if model in apps.all_models[model._meta.app_label] or model in apps.app_configs[model._meta.app_label].models:
if not model._meta.proxy:
with connection.schema_editor() as schema_editor:
schema_editor.delete_model(model=model)
# Only attempt to delete if the model was not deleted as part
# of the previous main test model deletion.
TestModelTestCaseMixin._unregister_model(
app_label=model._meta.app_label, model_name=model._meta.model_name
)
ContentType.objects.clear_cache()
apps.clear_cache()
@staticmethod
def _unregister_model(app_label, model_name):
try:
del apps.all_models[app_label][model_name]
del apps.app_configs[app_label].models[model_name]
except KeyError:
"""
Non fatal. Means the model was deleted in the first attempt.
"""
@classmethod
def setUpClass(cls):
if connection.vendor == 'sqlite':
connection.disable_constraint_checking()
super().setUpClass()
def _delete_test_models(self):
# Delete the test models' content type entries and deregister the
# permissions, this avoids their Content Type from being looked up
# in subsequent tests where they don't exists due to the database
# transaction rollback.
# Clear previous model registration before re-registering it again to
# avoid conflict with test models with the same name, in the same app
# but from another test module.
self._test_models.reverse()
for model in self._test_models:
model.objects.all().delete()
for model in self._test_models:
TestModelTestCaseMixin._delete_test_model(model=model)
for model in self._test_models_extra:
TestModelTestCaseMixin._delete_test_model(model=model)
def setUp(self):
self._test_models = []
self._test_models_extra = set()
self.test_objects = []
self._test_migrations = []
super().setUp()
if self.auto_create_test_object or self.auto_create_test_object_model:
self.TestModel = self._create_test_model(
fields=self.auto_create_test_object_fields
)
if self.auto_create_test_object_permission:
self._create_test_permission()
ModelPermission.register(
model=self.TestModel, permissions=(
self.test_permission,
)
)
if self.auto_create_test_object:
self._create_test_object(
instance_kwargs=self.auto_create_test_object_instance_kwargs
)
self._create_test_models()
def _create_test_models(self):
"""
Optional test class method to have all of its test models created
in the proper order.
"""
def tearDown(self):
self._delete_test_models()
super().tearDown()
def _create_test_model(
self, base_class=None, fields=None, model_name=None,
options=None
):
base_class = base_class or models.Model
test_model_count = len(self._test_models)
self._test_model_name = model_name or '{}_{}'.format(
'TestModel', test_model_count
)
self.options = options
# Obtain the app_config and app_label from the test's module path.
self.app_config = apps.get_containing_app_config(
object_name=self.__class__.__module__
)
if connection.vendor == 'mysql':
self.skipTest(
reason='MySQL doesn\'t support schema changes inside an '
'atomic block.'
)
attrs = {
'__module__': self.__class__.__module__,
'Meta': self._get_test_model_meta()
}
if fields:
attrs.update(fields)
model_list_previous = set(
apps.app_configs[self.app_config.label].models.values()
)
model = type(
self._test_model_name, (base_class,), attrs
)
if not model._meta.proxy:
with connection.schema_editor() as schema_editor:
schema_editor.create_model(model=model)
self._test_models_extra.update(
set(
apps.app_configs[self.app_config.label].models.values()
) - model_list_previous - set((model,))
)
self._test_models.append(model)
ContentType.objects.clear_cache()
apps.clear_cache()
return model
def _create_test_object(self, instance_kwargs=None):
instance_kwargs = instance_kwargs or {}
if not getattr(self, 'TestModel', None):
self.TestModel = self._create_test_model()
self.test_object = self.TestModel.objects.create(**instance_kwargs)
self._inject_test_object_content_type()
self.test_objects.append(self.test_object)
def _get_test_model_meta(self):
self._test_db_table = '{}_{}'.format(
self.app_config.label, self._test_model_name.lower()
)
class Meta:
app_label = self.app_config.label
db_table = self._test_db_table
ordering = ('id',)
verbose_name = self._test_model_name
if self.options:
for key, value in self.options.items():
setattr(Meta, key, value)
return Meta
class TestServerTestCaseMixin:
def setUp(self):
super().setUp()
self.testserver_prefix = self.get_testserver_prefix()
self.testserver_url = self.get_testserver_url()
self.test_view_request = None
def _test_view_factory(self, test_object=None):
def test_view(request):
self.test_view_request = request
return HttpResponse()
return test_view
def get_testserver_prefix(self):
return furl(
scheme=TEST_SERVER_SCHEME, host=TEST_SERVER_HOST,
).tostr()
def get_testserver_url(self):
return furl(
scheme=TEST_SERVER_SCHEME, host=TEST_SERVER_HOST,
path=self.test_view_url
).tostr()
class TestViewTestCaseMixin:
auto_add_test_view = False
test_view_is_public = False
test_view_object = None
test_view_name = None
test_view_template = '{{ object }}'
test_view_url = TEST_VIEW_URL
def setUp(self):
super().setUp()
self._test_view_count = 0
self._test_view_names = []
if self.auto_add_test_view:
self.add_test_view(test_object=self.test_view_object)
def tearDown(self):
self.client.logout()
for _ in range(self._test_view_count):
self._get_test_view_urlpatterns().pop(0)
super().tearDown()
def _get_context_from_test_response(self, response):
if isinstance(response.context, ContextList):
# Template widget rendering causes test client response to be
# ContextList rather than RequestContext. Typecast to dictionary
# before updating.
result = dict(response.context).copy()
result.update({'request': response.wsgi_request})
context = Context(result)
else:
result = response.context or {}
result.update({'request': response.wsgi_request})
context = Context(result)
context.request = response.wsgi_request
return context
def _test_view_factory(self, test_object=None):
def test_view(request):
template = Template(template_string=self.test_view_template)
context = Context(
dict_={'object': test_object, 'resolved_object': test_object}
)
return HttpResponse(content=template.render(context=context))
if self.test_view_is_public:
return public(function=test_view)
else:
return test_view
def _get_test_view_urlpatterns(self):
return importlib.import_module(settings.ROOT_URLCONF).urlpatterns
def add_test_view(
self, test_object=None, test_view_factory=None, test_view_name=None,
test_view_url=None
):
if test_view_factory:
view = test_view_factory()
else:
view = self._test_view_factory(
test_object=test_object
)
if test_view_name:
self._test_view_name = test_view_name
else:
self._test_view_name = '{}_{}'.format(
TEST_VIEW_NAME, len(self._test_view_names)
)
self._get_test_view_urlpatterns().insert(
0, url(
regex=test_view_url or self.test_view_url, view=view,
name=self._test_view_name
)
)
clear_url_caches()
self._test_view_count += 1
self._test_view_names.append(self._test_view_name)
def get_test_view(self):
response = self.get(viewname=self._test_view_name)
return self._get_context_from_test_response(response=response)
|
easy_maps/widgets.py | cyber-barrista/django-easy-maps | 114 | 12674927 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from django import template
from django.forms import TextInput
class AddressWithMapWidget(TextInput):
width = 700
height = 200
zoom = 16
tpl = "{{% load easy_maps_tags %}}{{% easy_map address {0.width} {0.height} {0.zoom} %}}"
def render(self, name, value, attrs=None, renderer=None):
output = super(AddressWithMapWidget, self).render(name, value, attrs, renderer)
t = template.Template(self.tpl.format(self))
context = template.Context(
{
"address": value,
}
)
return output + t.render(context)
|
libraries/botbuilder-core/botbuilder/core/user_state.py | Fl4v/botbuilder-python | 388 | 12674958 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .turn_context import TurnContext
from .bot_state import BotState
from .storage import Storage
class UserState(BotState):
"""
Reads and writes user state for your bot to storage.
"""
no_key_error_message = (
"UserState: channel_id and/or conversation missing from context.activity."
)
def __init__(self, storage: Storage, namespace=""):
"""
Creates a new UserState instance.
:param storage:
:param namespace:
"""
self.namespace = namespace
super(UserState, self).__init__(storage, "Internal.UserState")
def get_storage_key(self, turn_context: TurnContext) -> str:
"""
Returns the storage key for the current user state.
:param turn_context:
:return:
"""
channel_id = turn_context.activity.channel_id or self.__raise_type_error(
"invalid activity-missing channelId"
)
user_id = turn_context.activity.from_property.id or self.__raise_type_error(
"invalid activity-missing from_property.id"
)
storage_key = None
if channel_id and user_id:
storage_key = "%s/users/%s" % (channel_id, user_id)
return storage_key
def __raise_type_error(self, err: str = "NoneType found while expecting value"):
raise TypeError(err)
|
homeassistant/components/cups/__init__.py | domwillcode/home-assistant | 30,023 | 12674962 | <gh_stars>1000+
"""The cups component."""
|
chemdataextractor_batteries/chemdataextractor/parse/base.py | MB9991/test_demo- | 199 | 12674977 | # -*- coding: utf-8 -*-
"""
chemdataextractor.parse.base
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import abstractproperty, abstractmethod
import logging
log = logging.getLogger(__name__)
class BaseParser(object):
""""""
@abstractproperty
def root(self):
pass
@abstractmethod
def interpret(self, result, start, end):
pass
def parse(self, tokens):
for result in self.root.scan(tokens):
for model in self.interpret(*result):
yield model
|
tests/test_type_alignment.py | TinkerBoard-Android/external-google-fruit | 1,666 | 12674980 | #!/usr/bin/env python3
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
from fruit_test_common import *
COMMON_DEFINITIONS = '''
#include "test_common.h"
struct alignas(1) X {
INJECT(X()) {
Assert(reinterpret_cast<std::uintptr_t>(this) % 1 == 0);
}
};
struct alignas(4) Y {
INJECT(Y()) {
Assert(reinterpret_cast<std::uintptr_t>(this) % 4 == 0);
}
};
struct alignas(128) Z {
INJECT(Z()) {
Assert(reinterpret_cast<std::uintptr_t>(this) % 128 == 0);
}
};
'''
class TestTypeAlignment(parameterized.TestCase):
def test_everything(self):
source = '''
fruit::Component<X, Y, Z> getComponent() {
return fruit::createComponent();
}
int main() {
fruit::Injector<X, Y, Z> injector(getComponent);
injector.get<X*>();
injector.get<Y*>();
injector.get<Z*>();
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
if __name__ == '__main__':
absltest.main()
|
okta/models/policy_rule_conditions.py | corylevine/okta-sdk-python | 145 | 12674996 | # flake8: noqa
"""
Copyright 2020 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.okta_object import OktaObject
from okta.models import app_and_instance_policy_rule_condition\
as app_and_instance_policy_rule_condition
from okta.models import app_instance_policy_rule_condition\
as app_instance_policy_rule_condition
from okta.models import policy_rule_auth_context_condition\
as policy_rule_auth_context_condition
from okta.models import password_policy_authentication_provider_condition\
as password_policy_authentication_provider_condition
from okta.models import before_scheduled_action_policy_rule_condition\
as before_scheduled_action_policy_rule_condition
from okta.models import client_policy_condition\
as client_policy_condition
from okta.models import context_policy_rule_condition\
as context_policy_rule_condition
from okta.models import device_policy_rule_condition\
as device_policy_rule_condition
from okta.models import grant_type_policy_rule_condition\
as grant_type_policy_rule_condition
from okta.models import group_policy_rule_condition\
as group_policy_rule_condition
from okta.models import identity_provider_policy_rule_condition\
as identity_provider_policy_rule_condition
from okta.models import mdm_enrollment_policy_rule_condition\
as mdm_enrollment_policy_rule_condition
from okta.models import policy_network_condition\
as policy_network_condition
from okta.models import policy_people_condition\
as policy_people_condition
from okta.models import platform_policy_rule_condition\
as platform_policy_rule_condition
from okta.models import risk_policy_rule_condition\
as risk_policy_rule_condition
from okta.models import risk_score_policy_rule_condition\
as risk_score_policy_rule_condition
from okta.models import o_auth_2_scopes_mediation_policy_rule_condition\
as o_auth_2_scopes_mediation_policy_rule_condition
from okta.models import user_identifier_policy_rule_condition\
as user_identifier_policy_rule_condition
from okta.models import user_status_policy_rule_condition\
as user_status_policy_rule_condition
from okta.models import user_policy_rule_condition\
as user_policy_rule_condition
class PolicyRuleConditions(
OktaObject
):
"""
A class for PolicyRuleConditions objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
if "app" in config:
if isinstance(config["app"],
app_and_instance_policy_rule_condition.AppAndInstancePolicyRuleCondition):
self.app = config["app"]
elif config["app"] is not None:
self.app = app_and_instance_policy_rule_condition.AppAndInstancePolicyRuleCondition(
config["app"]
)
else:
self.app = None
else:
self.app = None
if "apps" in config:
if isinstance(config["apps"],
app_instance_policy_rule_condition.AppInstancePolicyRuleCondition):
self.apps = config["apps"]
elif config["apps"] is not None:
self.apps = app_instance_policy_rule_condition.AppInstancePolicyRuleCondition(
config["apps"]
)
else:
self.apps = None
else:
self.apps = None
if "authContext" in config:
if isinstance(config["authContext"],
policy_rule_auth_context_condition.PolicyRuleAuthContextCondition):
self.auth_context = config["authContext"]
elif config["authContext"] is not None:
self.auth_context = policy_rule_auth_context_condition.PolicyRuleAuthContextCondition(
config["authContext"]
)
else:
self.auth_context = None
else:
self.auth_context = None
if "authProvider" in config:
if isinstance(config["authProvider"],
password_policy_authentication_provider_condition.PasswordPolicyAuthenticationProviderCondition):
self.auth_provider = config["authProvider"]
elif config["authProvider"] is not None:
self.auth_provider = password_policy_authentication_provider_condition.PasswordPolicyAuthenticationProviderCondition(
config["authProvider"]
)
else:
self.auth_provider = None
else:
self.auth_provider = None
if "beforeScheduledAction" in config:
if isinstance(config["beforeScheduledAction"],
before_scheduled_action_policy_rule_condition.BeforeScheduledActionPolicyRuleCondition):
self.before_scheduled_action = config["beforeScheduledAction"]
elif config["beforeScheduledAction"] is not None:
self.before_scheduled_action = before_scheduled_action_policy_rule_condition.BeforeScheduledActionPolicyRuleCondition(
config["beforeScheduledAction"]
)
else:
self.before_scheduled_action = None
else:
self.before_scheduled_action = None
if "clients" in config:
if isinstance(config["clients"],
client_policy_condition.ClientPolicyCondition):
self.clients = config["clients"]
elif config["clients"] is not None:
self.clients = client_policy_condition.ClientPolicyCondition(
config["clients"]
)
else:
self.clients = None
else:
self.clients = None
if "context" in config:
if isinstance(config["context"],
context_policy_rule_condition.ContextPolicyRuleCondition):
self.context = config["context"]
elif config["context"] is not None:
self.context = context_policy_rule_condition.ContextPolicyRuleCondition(
config["context"]
)
else:
self.context = None
else:
self.context = None
if "device" in config:
if isinstance(config["device"],
device_policy_rule_condition.DevicePolicyRuleCondition):
self.device = config["device"]
elif config["device"] is not None:
self.device = device_policy_rule_condition.DevicePolicyRuleCondition(
config["device"]
)
else:
self.device = None
else:
self.device = None
if "grantTypes" in config:
if isinstance(config["grantTypes"],
grant_type_policy_rule_condition.GrantTypePolicyRuleCondition):
self.grant_types = config["grantTypes"]
elif config["grantTypes"] is not None:
self.grant_types = grant_type_policy_rule_condition.GrantTypePolicyRuleCondition(
config["grantTypes"]
)
else:
self.grant_types = None
else:
self.grant_types = None
if "groups" in config:
if isinstance(config["groups"],
group_policy_rule_condition.GroupPolicyRuleCondition):
self.groups = config["groups"]
elif config["groups"] is not None:
self.groups = group_policy_rule_condition.GroupPolicyRuleCondition(
config["groups"]
)
else:
self.groups = None
else:
self.groups = None
if "identityProvider" in config:
if isinstance(config["identityProvider"],
identity_provider_policy_rule_condition.IdentityProviderPolicyRuleCondition):
self.identity_provider = config["identityProvider"]
elif config["identityProvider"] is not None:
self.identity_provider = identity_provider_policy_rule_condition.IdentityProviderPolicyRuleCondition(
config["identityProvider"]
)
else:
self.identity_provider = None
else:
self.identity_provider = None
if "mdmEnrollment" in config:
if isinstance(config["mdmEnrollment"],
mdm_enrollment_policy_rule_condition.MdmEnrollmentPolicyRuleCondition):
self.mdm_enrollment = config["mdmEnrollment"]
elif config["mdmEnrollment"] is not None:
self.mdm_enrollment = mdm_enrollment_policy_rule_condition.MdmEnrollmentPolicyRuleCondition(
config["mdmEnrollment"]
)
else:
self.mdm_enrollment = None
else:
self.mdm_enrollment = None
if "network" in config:
if isinstance(config["network"],
policy_network_condition.PolicyNetworkCondition):
self.network = config["network"]
elif config["network"] is not None:
self.network = policy_network_condition.PolicyNetworkCondition(
config["network"]
)
else:
self.network = None
else:
self.network = None
if "people" in config:
if isinstance(config["people"],
policy_people_condition.PolicyPeopleCondition):
self.people = config["people"]
elif config["people"] is not None:
self.people = policy_people_condition.PolicyPeopleCondition(
config["people"]
)
else:
self.people = None
else:
self.people = None
if "platform" in config:
if isinstance(config["platform"],
platform_policy_rule_condition.PlatformPolicyRuleCondition):
self.platform = config["platform"]
elif config["platform"] is not None:
self.platform = platform_policy_rule_condition.PlatformPolicyRuleCondition(
config["platform"]
)
else:
self.platform = None
else:
self.platform = None
if "risk" in config:
if isinstance(config["risk"],
risk_policy_rule_condition.RiskPolicyRuleCondition):
self.risk = config["risk"]
elif config["risk"] is not None:
self.risk = risk_policy_rule_condition.RiskPolicyRuleCondition(
config["risk"]
)
else:
self.risk = None
else:
self.risk = None
if "riskScore" in config:
if isinstance(config["riskScore"],
risk_score_policy_rule_condition.RiskScorePolicyRuleCondition):
self.risk_score = config["riskScore"]
elif config["riskScore"] is not None:
self.risk_score = risk_score_policy_rule_condition.RiskScorePolicyRuleCondition(
config["riskScore"]
)
else:
self.risk_score = None
else:
self.risk_score = None
if "scopes" in config:
if isinstance(config["scopes"],
o_auth_2_scopes_mediation_policy_rule_condition.OAuth2ScopesMediationPolicyRuleCondition):
self.scopes = config["scopes"]
elif config["scopes"] is not None:
self.scopes = o_auth_2_scopes_mediation_policy_rule_condition.OAuth2ScopesMediationPolicyRuleCondition(
config["scopes"]
)
else:
self.scopes = None
else:
self.scopes = None
if "userIdentifier" in config:
if isinstance(config["userIdentifier"],
user_identifier_policy_rule_condition.UserIdentifierPolicyRuleCondition):
self.user_identifier = config["userIdentifier"]
elif config["userIdentifier"] is not None:
self.user_identifier = user_identifier_policy_rule_condition.UserIdentifierPolicyRuleCondition(
config["userIdentifier"]
)
else:
self.user_identifier = None
else:
self.user_identifier = None
if "userStatus" in config:
if isinstance(config["userStatus"],
user_status_policy_rule_condition.UserStatusPolicyRuleCondition):
self.user_status = config["userStatus"]
elif config["userStatus"] is not None:
self.user_status = user_status_policy_rule_condition.UserStatusPolicyRuleCondition(
config["userStatus"]
)
else:
self.user_status = None
else:
self.user_status = None
if "users" in config:
if isinstance(config["users"],
user_policy_rule_condition.UserPolicyRuleCondition):
self.users = config["users"]
elif config["users"] is not None:
self.users = user_policy_rule_condition.UserPolicyRuleCondition(
config["users"]
)
else:
self.users = None
else:
self.users = None
else:
self.app = None
self.apps = None
self.auth_context = None
self.auth_provider = None
self.before_scheduled_action = None
self.clients = None
self.context = None
self.device = None
self.grant_types = None
self.groups = None
self.identity_provider = None
self.mdm_enrollment = None
self.network = None
self.people = None
self.platform = None
self.risk = None
self.risk_score = None
self.scopes = None
self.user_identifier = None
self.user_status = None
self.users = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"app": self.app,
"apps": self.apps,
"authContext": self.auth_context,
"authProvider": self.auth_provider,
"beforeScheduledAction": self.before_scheduled_action,
"clients": self.clients,
"context": self.context,
"device": self.device,
"grantTypes": self.grant_types,
"groups": self.groups,
"identityProvider": self.identity_provider,
"mdmEnrollment": self.mdm_enrollment,
"network": self.network,
"people": self.people,
"platform": self.platform,
"risk": self.risk,
"riskScore": self.risk_score,
"scopes": self.scopes,
"userIdentifier": self.user_identifier,
"userStatus": self.user_status,
"users": self.users
}
parent_req_format.update(current_obj_format)
return parent_req_format
|
tensorflow_model_analysis/slicer/slicer_test.py | jaymessina3/model-analysis | 1,118 | 12675017 | # Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Slicer test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import six
import tensorflow as tf
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import types
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.post_export_metrics import metric_keys
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import metrics_for_slice_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from google.protobuf import text_format
def make_features_dict(features_dict):
result = {}
for key, value in features_dict.items():
result[key] = {'node': np.array(value)}
return result
def create_fpls():
fpl1 = types.FeaturesPredictionsLabels(
input_ref=0,
features=make_features_dict({
'gender': ['f'],
'age': [13],
'interest': ['cars']
}),
predictions=make_features_dict({
'kb': [1],
}),
labels=make_features_dict({'ad_risk_score': [0]}))
fpl2 = types.FeaturesPredictionsLabels(
input_ref=0,
features=make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
}),
predictions=make_features_dict({
'kb': [1],
}),
labels=make_features_dict({'ad_risk_score': [0]}))
return [fpl1, fpl2]
def wrap_fpl(fpl):
return {
constants.INPUT_KEY: fpl,
constants.FEATURES_PREDICTIONS_LABELS_KEY: fpl
}
class SlicerTest(testutil.TensorflowModelAnalysisTest, parameterized.TestCase):
def setUp(self):
super(SlicerTest, self).setUp()
self.longMessage = True # pylint: disable=invalid-name
def _makeFeaturesDict(self, features_dict):
result = {}
for key, value in features_dict.items():
result[key] = {'node': np.array(value)}
return result
def assertSliceResult(self, name, features_dict, columns, features, expected):
spec = slicer.SingleSliceSpec(columns=columns, features=features)
msg = 'Test case %s: slice on columns %s, features %s' % (name, columns,
features)
six.assertCountEqual(
self, expected,
slicer.get_slices_for_features_dicts([features_dict], None, [spec]),
msg)
def testDeserializeSliceKey(self):
slice_metrics = text_format.Parse(
"""
single_slice_keys {
column: 'age'
int64_value: 5
}
single_slice_keys {
column: 'language'
bytes_value: 'english'
}
single_slice_keys {
column: 'price'
float_value: 1.0
}
""", metrics_for_slice_pb2.SliceKey())
got_slice_key = slicer.deserialize_slice_key(slice_metrics)
self.assertCountEqual([('age', 5), ('language', 'english'), ('price', 1.0)],
got_slice_key)
def testDeserializeCrossSliceKey(self):
slice_metrics = text_format.Parse(
"""
baseline_slice_key {
single_slice_keys {
column: 'age'
int64_value: 5
}
single_slice_keys {
column: 'language'
bytes_value: 'english'
}
single_slice_keys {
column: 'price'
float_value: 1.0
}
}
comparison_slice_key {
single_slice_keys {
column: 'age'
int64_value: 8
}
single_slice_keys {
column: 'language'
bytes_value: 'hindi'
}
}
""", metrics_for_slice_pb2.CrossSliceKey())
got_slice_key = slicer.deserialize_cross_slice_key(slice_metrics)
self.assertCountEqual(
((('age', 5), ('language', 'english'), ('price', 1.0)),
(('age', 8), ('language', 'hindi'))), got_slice_key)
def testSliceEquality(self):
overall = slicer.SingleSliceSpec()
age_column = slicer.SingleSliceSpec(columns=['age'])
age_feature = slicer.SingleSliceSpec(features=[('age', 5)])
age_and_gender = slicer.SingleSliceSpec(
columns=['age'], features=[('gender', 'f')])
# Note that we construct new instances of the slices to ensure that we
# aren't just checking object identity.
def check_equality_and_hash_equality(left, right):
self.assertEqual(left, right)
self.assertEqual(hash(left), hash(right))
check_equality_and_hash_equality(overall, slicer.SingleSliceSpec())
check_equality_and_hash_equality(age_column,
slicer.SingleSliceSpec(columns=['age']))
check_equality_and_hash_equality(
age_feature, slicer.SingleSliceSpec(features=[('age', 5)]))
check_equality_and_hash_equality(
age_and_gender,
slicer.SingleSliceSpec(columns=['age'], features=[('gender', 'f')]))
self.assertNotEqual(overall, age_column)
self.assertNotEqual(age_column, age_feature)
self.assertNotEqual(age_column, age_and_gender)
self.assertNotEqual(age_feature, age_and_gender)
self.assertCountEqual([slicer.SingleSliceSpec()], [overall])
self.assertCountEqual([
slicer.SingleSliceSpec(columns=['age']),
slicer.SingleSliceSpec(),
slicer.SingleSliceSpec(features=[('age', 5)]),
slicer.SingleSliceSpec(columns=['age'], features=[('gender', 'f')])
], [age_and_gender, age_feature, overall, age_column])
def testNoOverlappingColumns(self):
self.assertRaises(ValueError, slicer.SingleSliceSpec, ['age'], [('age', 5)])
def testNonUTF8ValueRaisesValueError(self):
column_name = 'column_name'
invalid_value = b'\x8a'
spec = slicer.SingleSliceSpec(columns=[column_name])
features_dict = self._makeFeaturesDict({
column_name: [invalid_value],
})
with self.assertRaisesRegex(ValueError, column_name):
list(slicer.get_slices_for_features_dicts([features_dict], None, [spec]))
def testGetSlicesForFeaturesDictUnivalent(self):
test_cases = [
('Overall', [], [], [()]),
('Feature does not match', [], [('age', 99)], []),
('No such column', ['no_such_column'], [], []),
('Single column', ['age'], [], [(('age', 5),)]),
('Single feature', [], [('age', 5)], [(('age', 5),)]),
('Single feature type mismatch', [], [('age', '5')], [(('age', 5),)]),
('One column, one feature',
['gender'], [('age', 5)], [(('age', 5), ('gender', 'f'))]),
('Two features', ['interest', 'gender'], [('age', 5)],
[(('age', 5), ('gender', 'f'), ('interest', 'cars'))]),
] # pyformat: disable
features_dict = self._makeFeaturesDict({
'gender': ['f'],
'age': [5],
'interest': ['cars']
})
for (name, columns, features, expected) in test_cases:
self.assertSliceResult(name, features_dict, columns, features, expected)
def testGetSlicesForFeaturesDictMultivalent(self):
test_cases = [
(
'One column',
['fruits'],
[],
[
(('fruits', 'apples'),),
(('fruits', 'pears'),)
],
),
(
'Two columns',
['fruits', 'interests'],
[],
[
(('fruits', 'apples'), ('interests', 'cars')),
(('fruits', 'apples'), ('interests', 'dogs')),
(('fruits', 'pears'), ('interests', 'cars')),
(('fruits', 'pears'), ('interests', 'dogs'))
],
),
(
'One feature',
[],
[('interests', 'cars')],
[
(('interests', 'cars'),)
],
),
(
'Two features',
[],
[('gender', 'f'), ('interests', 'cars')],
[
(('gender', 'f'), ('interests', 'cars'))
],
),
(
'One column, one feature',
['fruits'],
[('interests', 'cars')],
[
(('fruits', 'apples'), ('interests', 'cars')),
(('fruits', 'pears'), ('interests', 'cars'))
],
),
(
'One column, two features',
['fruits'],
[('gender', 'f'), ('interests', 'cars')],
[
(('fruits', 'apples'), ('gender', 'f'), ('interests', 'cars')),
(('fruits', 'pears'), ('gender', 'f'), ('interests', 'cars')),
],
),
(
'Two columns, one feature',
['interests', 'fruits'], [('gender', 'f')],
[
(('fruits', 'apples'), ('gender', 'f'), ('interests', 'cars')),
(('fruits', 'pears'), ('gender', 'f'), ('interests', 'cars')),
(('fruits', 'apples'), ('gender', 'f'), ('interests', 'dogs')),
(('fruits', 'pears'), ('gender', 'f'), ('interests', 'dogs'))
],
),
(
'Two columns, two features',
['interests', 'fruits'],
[('gender', 'f'), ('age', 5)],
[
(('age', 5), ('fruits', 'apples'), ('gender', 'f'),
('interests', 'cars')),
(('age', 5), ('fruits', 'pears'), ('gender', 'f'),
('interests', 'cars')),
(('age', 5), ('fruits', 'apples'), ('gender', 'f'),
('interests', 'dogs')),
(('age', 5), ('fruits', 'pears'), ('gender', 'f'),
('interests', 'dogs'))
],
)
] # pyformat: disable
features_dict = self._makeFeaturesDict({
'gender': ['f'],
'age': [5],
'interests': ['cars', 'dogs'],
'fruits': ['apples', 'pears']
})
for (name, columns, features, expected) in test_cases:
self.assertSliceResult(name, features_dict, columns, features, expected)
def testGetSlicesForFeaturesDictMultipleSingleSliceSpecs(self):
features_dict = self._makeFeaturesDict({
'gender': ['f'],
'age': [5],
'interest': ['cars']
})
spec_overall = slicer.SingleSliceSpec()
spec_age = slicer.SingleSliceSpec(columns=['age'])
spec_age4 = slicer.SingleSliceSpec(features=[('age', 4)])
spec_age5_gender = slicer.SingleSliceSpec(
columns=['gender'], features=[('age', 5)])
slice_spec = [spec_overall, spec_age, spec_age4, spec_age5_gender]
expected = [(), (('age', 5),), (('age', 5), ('gender', 'f'))]
self.assertCountEqual(
expected,
slicer.get_slices_for_features_dicts([features_dict], None, slice_spec))
def testStringifySliceKey(self):
test_cases = [
('overall', (), 'Overall'),
('one bytes feature', (('age_str', '5'),), 'age_str:5'),
('one int64 feature', (('age', 1),), 'age:1'),
('mixed', (('age', 1), ('gender', 'f')), 'age_X_gender:1_X_f'),
('more', (('age', 1), ('gender', 'f'), ('interest', 'cars')),
'age_X_gender_X_interest:1_X_f_X_cars'),
('unicode', (('text', b'\xe4\xb8\xad\xe6\x96\x87'),), u'text:\u4e2d\u6587'),
] # pyformat: disable
for (name, slice_key, stringified_key) in test_cases:
self.assertEqual(
stringified_key, slicer.stringify_slice_key(slice_key), msg=name)
def testIsCrossSliceApplicable(self):
test_cases = [
(True, 'overall pass', ((), (('b', 2),)), config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(),
slicing_specs=[config_pb2.SlicingSpec(feature_values={'b': '2'})])),
(True, 'value pass', ((('a', 1),), (('b', 2),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_values={'a': '1'}),
slicing_specs=[config_pb2.SlicingSpec(feature_values={'b': '2'})])),
(True, 'baseline key pass', ((('a', 1),), (('b', 2),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_keys=['a']),
slicing_specs=[config_pb2.SlicingSpec(feature_values={'b': '2'})])),
(True, 'comparison key pass', ((('a', 1),), (('b', 2),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_values={'a': '1'}),
slicing_specs=[config_pb2.SlicingSpec(feature_keys=['b'])])),
(True, 'comparison multiple key pass', ((('a', 1),), (('c', 3),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_values={'a': '1'}),
slicing_specs=[config_pb2.SlicingSpec(feature_keys=['b']),
config_pb2.SlicingSpec(feature_keys=['c'])])),
(False, 'overall fail', ((('a', 1),), (('b', 2),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(),
slicing_specs=[config_pb2.SlicingSpec(feature_values={'b': '2'})])),
(False, 'value fail', ((('a', 1),), (('b', 3),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_values={'a': '1'}),
slicing_specs=[config_pb2.SlicingSpec(feature_values={'b': '2'})])),
(False, 'baseline key fail', ((('c', 1),), (('b', 2),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_keys=['a']),
slicing_specs=[config_pb2.SlicingSpec(feature_values={'b': '2'})])),
(False, 'comparison key fail', ((('a', 1),), (('c', 3),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_values={'a': '1'}),
slicing_specs=[config_pb2.SlicingSpec(feature_keys=['b'])])),
(False, 'comparison multiple key fail', ((('a', 1),), (('d', 3),)),
config_pb2.CrossSlicingSpec(
baseline_spec=config_pb2.SlicingSpec(feature_values={'a': '1'}),
slicing_specs=[config_pb2.SlicingSpec(feature_keys=['b']),
config_pb2.SlicingSpec(feature_keys=['c'])])),
] # pyformat: disable
for (expected_result, name, sliced_key, slicing_spec) in test_cases:
self.assertEqual(
expected_result,
slicer.is_cross_slice_applicable(
cross_slice_key=sliced_key, cross_slicing_spec=slicing_spec),
msg=name)
def testGetSliceKeyType(self):
test_cases = [
(slicer.SliceKeyType, 'overall', ()),
(slicer.SliceKeyType, 'one bytes feature', (('a', '5'),)),
(slicer.SliceKeyType, 'one int64 feature', (('a', 1),)),
(slicer.SliceKeyType, 'mixed', (('a', 1), ('b', 'f'))),
(slicer.SliceKeyType, 'more', (('a', 1), ('b', 'f'), ('c', 'cars'))),
(slicer.SliceKeyType, 'unicode',
(('a', b'\xe4\xb8\xad\xe6\x96\x87'),)),
(slicer.CrossSliceKeyType, 'CrossSlice overall', ((), ())),
(slicer.CrossSliceKeyType, 'CrossSlice one slice key baseline',
((('a', '5'),), ())),
(slicer.CrossSliceKeyType, 'CrossSlice one slice key comparison',
((), (('a', 1),))),
(slicer.CrossSliceKeyType, 'CrossSlice two simple slice key',
((('a', 1),), (('b', 'f'),))),
(slicer.CrossSliceKeyType, 'CrossSlice two multiple slice key',
((('a', 1), ('b', 'f'), ('c', '11')),
(('a2', 1), ('b', 'm'), ('c', '11')))),
] # pyformat: disable
for (expected_result, name, slice_key) in test_cases:
self.assertEqual(
expected_result, slicer.get_slice_key_type(slice_key), msg=name)
unrecognized_test_cases = [
('Unrecognized 1: ', ('a')),
('Unrecognized 2: ', ('a',)),
('Unrecognized 3: ', ('a', 1)),
('Unrecognized 4: ', (('a'))),
('Unrecognized 5: ', (('a',))),
('Unrecognized 6: ', ((), (), ())),
('Unrecognized 7: ', ((('a', 1),), (('b', 1),), (('c', 1),))),
('Unrecognized 8: ', ((('a', 1),), ('b', 1))),
('Unrecognized 9: ', (('a', 1), (('b', 1),))),
] # pyformat: disable
for (name, slice_key) in unrecognized_test_cases:
with self.assertRaises(TypeError, msg=name + str(slice_key)):
slicer.get_slice_key_type(slice_key)
def testIsSliceApplicable(self):
test_cases = [
('applicable', ['column1'],
[('column3', 'value3'), ('column4', 'value4')],
(('column1', 'value1'), ('column3', 'value3'), ('column4', 'value4')),
True),
('wrongcolumns', ['column1', 'column2'],
[('column3', 'value3'), ('column4', 'value4')],
(('column1', 'value1'), ('column3', 'value3'), ('column4', 'value4')),
False),
('wrongfeatures', ['column1'], [('column3', 'value3')],
(('column1', 'value1'), ('column3', 'value3'), ('column4', 'value4')),
False),
('nocolumns', [], [('column3', 'value3')],
(('column1', 'value1'), ('column3', 'value3'), ('column4', 'value4')),
False),
('nofeatures', ['column1'], [], (('column1', 'value1'),), True),
('empty slice key', ['column1'], [('column2', 'value1')], (), False),
('overall', [], [], (), True)
] # pyformat: disable
for (name, columns, features, slice_key, result) in test_cases:
slice_spec = slicer.SingleSliceSpec(columns=columns, features=features)
self.assertEqual(
slice_spec.is_slice_applicable(slice_key), result, msg=name)
def testSliceDefaultSlice(self):
with beam.Pipeline() as pipeline:
fpls = create_fpls()
metrics = (
pipeline
| 'CreateTestInput' >> beam.Create(fpls)
| 'WrapFpls' >> beam.Map(wrap_fpl)
| 'ExtractSlices' >> slice_key_extractor.ExtractSliceKeys(
[slicer.SingleSliceSpec()])
| 'FanoutSlices' >> slicer.FanoutSlices())
def check_result(got):
try:
self.assertLen(got, 2)
expected_result = [
((), wrap_fpl(fpls[0])),
((), wrap_fpl(fpls[1])),
]
self.assertEqual(len(got), len(expected_result))
self.assertTrue(
got[0] == expected_result[0] and got[1] == expected_result[1] or
got[1] == expected_result[0] and got[0] == expected_result[1])
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(metrics, check_result)
def testSliceOneSlice(self):
with beam.Pipeline() as pipeline:
fpls = create_fpls()
metrics = (
pipeline
| 'CreateTestInput' >> beam.Create(fpls, reshuffle=False)
| 'WrapFpls' >> beam.Map(wrap_fpl)
| 'ExtractSlices' >> slice_key_extractor.ExtractSliceKeys([
slicer.SingleSliceSpec(),
slicer.SingleSliceSpec(columns=['gender'])
])
| 'FanoutSlices' >> slicer.FanoutSlices())
def check_result(got):
try:
self.assertLen(got, 4)
expected_result = [
((), wrap_fpl(fpls[0])),
((), wrap_fpl(fpls[1])),
((('gender', 'f'),), wrap_fpl(fpls[0])),
((('gender', 'm'),), wrap_fpl(fpls[1])),
]
self.assertCountEqual(got, expected_result)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(metrics, check_result)
def testMultidimSlices(self):
data = [{
'features': {
'gender': [['f'], ['f']],
'age': [[13], [13]],
'interest': [['cars'], ['cars']]
},
'predictions': [[1], [1]],
'labels': [[0], [0]],
constants.SLICE_KEY_TYPES_KEY:
np.array([[(), (('gender', 'f'),)], [(), (('gender', 'f'),)]])
}, {
'features': {
'gender': [['f'], ['m']],
'age': [[13], [10]],
'interest': [['cars'], ['cars']]
},
'predictions': [[1], [1]],
'labels': [[0], [0]],
constants.SLICE_KEY_TYPES_KEY: [[(), (('gender', 'f'),)],
[(), (('gender', 'm'),)]]
}]
with beam.Pipeline() as pipeline:
result = (
pipeline
| 'CreateTestInput' >> beam.Create(data, reshuffle=False)
| 'FanoutSlices' >> slicer.FanoutSlices())
def check_result(got):
try:
self.assertLen(got, 5)
del data[0][constants.SLICE_KEY_TYPES_KEY]
del data[1][constants.SLICE_KEY_TYPES_KEY]
expected_result = [
((), data[0]),
((), data[1]),
((('gender', 'f'),), data[0]),
((('gender', 'f'),), data[1]),
((('gender', 'm'),), data[1]),
]
self.assertCountEqual(got, expected_result)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result)
def testMultidimOverallSlices(self):
data = [{
constants.SLICE_KEY_TYPES_KEY: np.array([[()], [()]])
}, {
constants.SLICE_KEY_TYPES_KEY: np.array([[()], [()]])
}]
with beam.Pipeline() as pipeline:
result = (
pipeline
| 'CreateTestInput' >> beam.Create(data, reshuffle=False)
| 'FanoutSlices' >> slicer.FanoutSlices())
def check_result(got):
try:
self.assertLen(got, 2)
del data[0][constants.SLICE_KEY_TYPES_KEY]
del data[1][constants.SLICE_KEY_TYPES_KEY]
expected_result = [
((), data[0]),
((), data[1]),
]
self.assertCountEqual(got, expected_result)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result)
def testFilterOutSlices(self):
slice_key_1 = (('slice_key', 'slice1'),)
slice_key_2 = (('slice_key', 'slice2'),)
slice_key_3 = (('slice_key', 'slice3'),)
values_list = [(slice_key_1, {
'val11': 'val12'
}), (slice_key_2, {
'val21': 'val22'
})]
slice_counts_list = [(slice_key_1, 2), (slice_key_2, 1), (slice_key_3, 0)]
def check_output(got):
try:
self.assertLen(got, 2)
slices = {}
for (k, v) in got:
slices[k] = v
self.assertEqual(slices[slice_key_1], {'val11': 'val12'})
self.assertIn(metric_keys.ERROR_METRIC, slices[slice_key_2])
except AssertionError as err:
raise util.BeamAssertException(err)
with beam.Pipeline() as pipeline:
slice_counts_pcoll = (
pipeline | 'CreateSliceCountsPColl' >> beam.Create(slice_counts_list))
output_dict = (
pipeline
| 'CreateValuesPColl' >> beam.Create(values_list)
| 'FilterOutSlices' >> slicer.FilterOutSlices(
slice_counts_pcoll,
min_slice_size=2,
error_metric_key=metric_keys.ERROR_METRIC))
util.assert_that(output_dict, check_output)
@parameterized.named_parameters(
{
'testcase_name': 'matching_single_spec',
'slice_key': (('f1', 1),),
'slice_specs': [slicer.SingleSliceSpec(features=[('f1', 1)])],
'expected_result': True
},
{
'testcase_name': 'matching_single_spec_with_float',
'slice_key': (('f1', '1.0'),),
'slice_specs': [slicer.SingleSliceSpec(features=[('f1', '1.0')])],
'expected_result': True
},
{
'testcase_name': 'non_matching_single_spec',
'slice_key': (('f1', 1),),
'slice_specs': [slicer.SingleSliceSpec(columns=['f2'])],
'expected_result': False
},
{
'testcase_name': 'matching_multiple_specs',
'slice_key': (('f1', 1),),
'slice_specs': [
slicer.SingleSliceSpec(columns=['f1']),
slicer.SingleSliceSpec(columns=['f2'])
],
'expected_result': True
},
{
'testcase_name': 'empty_specs',
'slice_key': (('f1', 1),),
'slice_specs': [],
'expected_result': False
},
)
def testSliceKeyMatchesSliceSpecs(self, slice_key, slice_specs,
expected_result):
self.assertEqual(
expected_result,
slicer.slice_key_matches_slice_specs(slice_key, slice_specs))
if __name__ == '__main__':
tf.test.main()
|
robogym/robot/utils/logistic_functions.py | 0xflotus/robogym | 288 | 12675039 | <reponame>0xflotus/robogym<filename>robogym/robot/utils/logistic_functions.py
"""
Logistic functions
Function List:
1. logistic_sigmoid(x: float, a: float) -> float: calculate the normalized logistic sigmoid
with slope parameter a
2. clipped_logistic_sigmoid(x: float, a: float) -> float: calculate the normalized logistic sigmoid
with slope parameter a clipped to a [0, 1] output range
"""
import numpy as np
def logistic_sigmoid(x: float, a: float) -> float:
"""
Calculates the normalized logistic sigmoid as a function of x with parameterization on a
This function will be symmetric about 0.5
:param x: input value for calculation. Range: -inf to inf
will not clip at values to 0 and 1 outside of the 0 to 1 input range
:param a: value of the slope of the sigmoid. Values range from 0.5 for slope ~ 1 to
1.0 for slope ~ infinity. There's very little signal at a < 0.5
:return: the value of the normalized logistic sigmoid at x
"""
# set epsilon to be small. this is so we don't have divide by zero conditions
epsilon: float = 0.0001
# clip a to be between (0 + epsilon) and (1 - epsilon)
min_param_a: float = 0.0 + epsilon
max_param_a: float = 1.0 - epsilon
a = np.maximum(min_param_a, np.minimum(max_param_a, a))
# set a to be asymptotic at 1 and zero at 0
a = 1 / (1 - a) - 1
# calculate the numerator and denominator terms for the normalized sigmoid
A: float = 1.0 / (1.0 + np.exp(0 - ((x - 0.5) * a * 2.0)))
B: float = 1.0 / (1.0 + np.exp(a))
C: float = 1.0 / (1.0 + np.exp(0 - a))
y: float = (A - B) / (C - B)
return y
def clipped_logistic_sigmoid(x: float, a: float) -> float:
"""
Calculates the normalized logistic sigmoid as a function of x with parameterization on a
This function will be symmetric about 0.5
:param x: input value for calculation range: Range: -inf to inf, effective range 0 to 1
will output 0 for values below 0 and 1 for values above 1
:param a: value of the slope of the sigmoid. Values range from 0.5 for slope ~ 1 to
1.0 for slope ~ infinity. There's very little signal at a < 0.5
:return: the value of the normalized logistic sigmoid at x
"""
# clip values below zero and above one
x = np.maximum(x, 0.0)
x = np.minimum(x, 1.0)
# set epsilon to be small. this is so we don't have divide by zero conditions
epsilon: float = 0.0001
# clip a to be between (0 + epsilon) and (1 - epsilon)
min_param_a: float = 0.0 + epsilon
max_param_a: float = 1.0 - epsilon
a = np.maximum(min_param_a, np.minimum(max_param_a, a))
# set a to be asymptotic at 1 and zero at 0
a = 1 / (1 - a) - 1
# calculate the numerator and denominator terms for the normalized sigmoid
A: float = 1.0 / (1.0 + np.exp(0 - ((x - 0.5) * a * 2.0)))
B: float = 1.0 / (1.0 + np.exp(a))
C: float = 1.0 / (1.0 + np.exp(0 - a))
y: float = (A - B) / (C - B)
return y
|
python/federatedml/nn/backend/tf_keras/layers/baisc.py | rubenlozanoaht3m/DataDogm | 715 | 12675041 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.python.keras import layers
from .util import _get_initializer
def _build_dense(units, activation, use_bias=True, kernel_initializer="glorot_uniform",
bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None, seed=None, **kwargs):
return layers.Dense(units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=_get_initializer(kernel_initializer, seed),
bias_initializer=_get_initializer(bias_initializer, seed),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def _build_dropout(rate, noise_shape=None, seed=None, **kwargs):
return layers.Dropout(rate, noise_shape=noise_shape, seed=seed, **kwargs)
def _build_flatten(data_format=None, **kwargs):
return layers.Flatten(data_format=data_format, **kwargs)
|
__scraping__/legit.ng - requests/main.py | whitmans-max/python-examples | 140 | 12675070 | <reponame>whitmans-max/python-examples
#!/usr/bin/env python3
# date: 2020.01.09
#
import requests
from bs4 import BeautifulSoup
res = requests.get('https://www.legit.ng/1087216-igbo-proverbs-meaning.html')
soup = BeautifulSoup(res.content, 'html.parser')
data = []
for div in soup.find_all('div'):
for block in div.find_all('blockquote'):
text = block.text
if text.startswith('Igbo Proverb – '):
name = text[15:] # len('Igbo Proverb – ') == 15
data.append(name)
for item in data:
print(item)
|
biscuit/src/kernel/rewrite.py | whakapapa/biscuit | 2,062 | 12675085 | #!/usr/bin/env python2
import os
import subprocess
import sys
def usage():
print >> sys.stderr, 'usage: %s <in gobin> <out gobin>' % (sys.argv[0])
sys.exit(1)
def _fsrex(data):
# FS seg override?
c = ord(data[0])
if c != 0x64:
return False
c = ord(data[1])
# 64 bit width, 64bit width + r8-r15 reg
rexes = [0x48, 0x4c]
if c not in rexes:
return False
return True
def _movs(data):
# rewrite all movs using an fs segment override to use a gs segment
# override
if not _fsrex(data):
return False
c = ord(data[2])
# moves that i don't expect but may see someday
maybemov = [0x88, 0x8c, 0x8e, 0xa0, 0xa1, 0xa2, 0xa3, 0xb0, 0xb8, 0xc6]
if c in maybemov:
raise ValueError('new kind of mov %x' % (c))
movs = [0x8b, 0x89, 0xc7]
if c not in movs:
return False
return True
def _cmps(data):
# rewrite all cmps using an fs segment override to use a gs segment
# override
if not _fsrex(data):
return False
if data[2] != '\x39':
return False
return True
def rewrite(f):
spots = getspots(f.name)
data = list(f.read())
found = 0
for i in spots:
d = data[i:i+3]
if _movs(d) or _cmps(d):
found += 1
data[i] = '\x65'
if found == 0:
raise ValueError('didnt find a single occurance')
skipped = len(spots) - found
print >> sys.stderr, 'patched %d instructions (%d skipped)' % (found, skipped)
if found < skipped:
raise ValueError('more skipped than found')
return ''.join(data)
def getspots(fn):
# find all potential file offsets where an instruction uses %fs
odcmd = ['objdump', '--prefix-addresses', '-F', '-d', fn]
od = subprocess.Popen(odcmd, stdout=subprocess.PIPE)
gcmd = ['grep', '%fs']
gr = subprocess.Popen(gcmd, stdin=od.stdout, stdout=subprocess.PIPE)
od.stdout.close()
data, _ = gr.communicate()
ret = []
for l in data.split('\n'):
l = l.strip()
if l == '':
continue
l = l.split('File Offset: ')
if len(l) != 2:
print l
raise ValueError('unexpected output')
l = l[1].split(')')[0]
ret.append(int(l, 16))
return ret
if len(sys.argv) != 3:
usage()
fn = sys.argv[1]
dn = sys.argv[2]
newdata = ''
with open(fn, 'r+b') as f:
newdata = rewrite(f)
with open(dn, 'w') as nf:
nf.write(newdata)
print >> sys.stderr, 'created %s' % (dn)
|
tests/test_fms_api_event_list_parser.py | tervay/the-blue-alliance | 266 | 12675109 | <gh_stars>100-1000
import datetime
import json
import unittest2
from datafeeds.parsers.fms_api.fms_api_event_list_parser import FMSAPIEventListParser
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.event_type import EventType
from models.sitevar import Sitevar
class TestFMSAPIEventListParser(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
def tearDown(self):
self.testbed.deactivate()
def test_parse_event_list(self):
with open('test_data/fms_api/2015_event_list.json', 'r') as f:
events, districts = FMSAPIEventListParser(2015).parse(json.loads(f.read()))
self.assertTrue(isinstance(events, list))
self.assertTrue(isinstance(districts, list))
# File has 6 events, but we ignore CMP divisions (only subdivisions), so only 5 are expected back
self.assertEquals(len(events), 5)
self.assertEquals(len(districts), 1)
def test_parse_regional_event(self):
with open('test_data/fms_api/2015_event_list.json', 'r') as f:
events, districts = FMSAPIEventListParser(2015).parse(json.loads(f.read()))
event = events[0]
self.assertEquals(event.key_name, "<KEY>")
self.assertEquals(event.name, "New York City Regional")
self.assertEquals(event.short_name, "New York City")
self.assertEquals(event.event_short, "nyny")
self.assertEquals(event.official, True)
self.assertEquals(event.start_date, datetime.datetime(year=2015, month=3, day=12, hour=0, minute=0, second=0))
self.assertEquals(event.end_date, datetime.datetime(year=2015, month=3, day=15, hour=23, minute=59, second=59))
self.assertEquals(event.venue, "<NAME> Convention Center")
self.assertEquals(event.city, "New York")
self.assertEquals(event.state_prov, "NY")
self.assertEquals(event.country, "USA")
self.assertEquals(event.year, 2015)
self.assertEquals(event.event_type_enum, EventType.REGIONAL)
self.assertEquals(event.district_key, None)
def test_parse_district_event(self):
with open('test_data/fms_api/2015_event_list.json', 'r') as f:
events, districts = FMSAPIEventListParser(2015).parse(json.loads(f.read()))
event = events[1]
district = districts[0]
self.assertEquals(event.key_name, "2015cthar")
self.assertEquals(event.name, "NE District - Hartford Event")
self.assertEquals(event.short_name, "Hartford")
self.assertEquals(event.event_short, "cthar")
self.assertEquals(event.official, True)
self.assertEquals(event.start_date, datetime.datetime(year=2015, month=3, day=27, hour=0, minute=0, second=0))
self.assertEquals(event.end_date, datetime.datetime(year=2015, month=3, day=29, hour=23, minute=59, second=59))
self.assertEquals(event.venue, "Hartford Public High School")
self.assertEquals(event.city, "Hartford")
self.assertEquals(event.state_prov, "CT")
self.assertEquals(event.country, "USA")
self.assertEquals(event.year, 2015)
self.assertEquals(event.event_type_enum, EventType.DISTRICT)
self.assertEquals(event.district_key, district.key)
self.assertEquals(district.key_name, "2015ne")
self.assertEquals(district.abbreviation, "ne")
self.assertEquals(district.year, 2015)
def test_parse_district_cmp(self):
with open('test_data/fms_api/2015_event_list.json', 'r') as f:
events, districts = FMSAPIEventListParser(2015).parse(json.loads(f.read()))
event = events[2]
self.assertEquals(event.key_name, "2015necmp")
self.assertEquals(event.name, "NE FIRST District Championship presented by United Technologies")
self.assertEquals(event.short_name, "NE FIRST")
self.assertEquals(event.event_short, "necmp")
self.assertEquals(event.official, True)
self.assertEquals(event.start_date, datetime.datetime(year=2015, month=4, day=8, hour=0, minute=0, second=0))
self.assertEquals(event.end_date, datetime.datetime(year=2015, month=4, day=11, hour=23, minute=59, second=59))
self.assertEquals(event.venue, "Sports and Recreation Center, WPI")
self.assertEquals(event.city, "Worcester")
self.assertEquals(event.state_prov, "MA")
self.assertEquals(event.country, "USA")
self.assertEquals(event.year, 2015)
self.assertEquals(event.event_type_enum, EventType.DISTRICT_CMP)
self.assertEquals(event.district_key, districts[0].key)
def test_parse_cmp_subdivision(self):
with open('test_data/fms_api/2015_event_list.json', 'r') as f:
events, districts = FMSAPIEventListParser(2015).parse(json.loads(f.read()))
event = events[3]
self.assertEquals(event.key_name, "2015tes")
self.assertEquals(event.name, "Tesla Division")
self.assertEquals(event.short_name, "Tesla")
self.assertEquals(event.event_short, "tes")
self.assertEquals(event.official, True)
self.assertEquals(event.start_date, datetime.datetime(year=2015, month=4, day=22, hour=0, minute=0, second=0))
self.assertEquals(event.end_date, datetime.datetime(year=2015, month=4, day=25, hour=23, minute=59, second=59))
self.assertEquals(event.venue, "<NAME>")
self.assertEquals(event.city, "St. Louis")
self.assertEquals(event.state_prov, "MO")
self.assertEquals(event.country, "USA")
self.assertEquals(event.year, 2015)
self.assertEquals(event.event_type_enum, EventType.CMP_DIVISION)
self.assertEquals(event.district_key, None)
def test_parse_offseason(self):
with open('test_data/fms_api/2015_event_list.json', 'r') as f:
events, districts = FMSAPIEventListParser(2015).parse(json.loads(f.read()))
event = events[4]
self.assertEquals(event.key_name, "2015iri")
self.assertEquals(event.name, "Indiana Robotics Invitational")
self.assertEquals(event.short_name, "Indiana Robotics Invitational")
self.assertEquals(event.event_short, "iri")
self.assertEquals(event.official, False)
self.assertEquals(event.start_date, datetime.datetime(year=2015, month=7, day=17, hour=0, minute=0, second=0))
self.assertEquals(event.end_date, datetime.datetime(year=2015, month=7, day=18, hour=23, minute=59, second=59))
self.assertEquals(event.venue, "Lawrence North HS")
self.assertEquals(event.city, "Indianapolis")
self.assertEquals(event.state_prov, "IN")
self.assertEquals(event.country, "USA")
self.assertEquals(event.year, 2015)
self.assertEquals(event.event_type_enum, EventType.OFFSEASON)
self.assertEquals(event.district_key, None)
def test_parse_2017_event(self):
with open('test_data/fms_api/2017_event_list.json', 'r') as f:
events, districts = FMSAPIEventListParser(2017).parse(json.loads(f.read()))
self.assertEqual(len(events), 165)
self.assertEqual(len(districts), 10)
event = events[16]
self.assertEquals(event.key_name, "2017casj")
self.assertEquals(event.name, "Silicon Valley Regional")
self.assertEquals(event.short_name, "Silicon Valley")
self.assertEquals(event.event_short, "casj")
self.assertEquals(event.official, True)
self.assertEquals(event.start_date, datetime.datetime(year=2017, month=3, day=29, hour=0, minute=0, second=0))
self.assertEquals(event.end_date, datetime.datetime(year=2017, month=4, day=1, hour=23, minute=59, second=59))
self.assertEquals(event.venue, "San Jose State University - The Event Center")
self.assertEquals(event.city, "San Jose")
self.assertEquals(event.state_prov, "CA")
self.assertEquals(event.country, "USA")
self.assertEquals(event.year, 2017)
self.assertEquals(event.event_type_enum, EventType.REGIONAL)
self.assertEquals(event.district_key, None)
# New in 2017
self.assertEquals(event.website, "http://www.firstsv.org")
def test_parse_2017_events_with_cmp_hacks(self):
hack_sitevar = Sitevar(id='cmp_registration_hacks')
hack_sitevar.contents = {
"event_name_override": [
{"event": "2017cmpmo", "name": "FIRST Championship Event", "short_name": "Championship"},
{"event": "2017cmptx", "name": "FIRST Championship Event", "short_name": "Championship"}],
"set_start_to_last_day": ["2017cmptx", "2017cmpmo"],
"divisions_to_skip": ["2017arc", "2017cars", "2017cur", "2017dal", "2017dar"],
}
hack_sitevar.put()
with open('test_data/fms_api/2017_event_list.json', 'r') as f:
events, districts = FMSAPIEventListParser(2017).parse(json.loads(f.read()))
self.assertEqual(len(events), 160)
self.assertEqual(len(districts), 10)
non_einstein_types = EventType.CMP_EVENT_TYPES
non_einstein_types.remove(EventType.CMP_FINALS)
for key in hack_sitevar.contents['divisions_to_skip']:
self.assertFalse(filter(lambda e: e.key_name == key, events))
einstein_stl = next(e for e in events if e.key_name == '<KEY>')
self.assertIsNotNone(einstein_stl)
self.assertEqual(einstein_stl.name, "FIRST Championship Event (St. Louis)")
self.assertEqual(einstein_stl.short_name, "Championship (St. Louis)")
self.assertEquals(einstein_stl.start_date, datetime.datetime(year=2017, month=4, day=29, hour=0, minute=0, second=0))
self.assertEquals(einstein_stl.end_date, datetime.datetime(year=2017, month=4, day=29, hour=23, minute=59, second=59))
einstein_hou = next(e for e in events if e.key_name == '<KEY>')
self.assertIsNotNone(einstein_hou)
self.assertEqual(einstein_hou.name, "FIRST Championship Event (Houston)")
self.assertEqual(einstein_hou.short_name, "Championship (Houston)")
self.assertEquals(einstein_hou.start_date, datetime.datetime(year=2017, month=4, day=22, hour=0, minute=0, second=0))
self.assertEquals(einstein_hou.end_date, datetime.datetime(year=2017, month=4, day=22, hour=23, minute=59, second=59))
def test_parse_2017_official_offseason(self):
with open('test_data/fms_api/2017_event_list.json', 'r') as f:
events, districts = FMSAPIEventListParser(2017).parse(json.loads(f.read()))
self.assertEqual(len(events), 165)
self.assertEqual(len(districts), 10)
event = next(e for e in events if e.key_name == "2017iri")
self.assertEquals(event.key_name, "2017iri")
self.assertEquals(event.name, "Indiana Robotics Invitational")
self.assertEquals(event.short_name, "Indiana Robotics Invitational")
self.assertEquals(event.event_short, "iri")
self.assertEquals(event.official, True)
self.assertEquals(event.start_date, datetime.datetime(year=2017, month=7, day=14, hour=0, minute=0, second=0))
self.assertEquals(event.end_date, datetime.datetime(year=2017, month=7, day=15, hour=23, minute=59, second=59))
self.assertEquals(event.venue, "Lawrence North High School")
self.assertEquals(event.city, "Indianapolis")
self.assertEquals(event.state_prov, "IN")
self.assertEquals(event.country, "USA")
self.assertEquals(event.year, 2017)
self.assertEquals(event.event_type_enum, EventType.OFFSEASON)
self.assertEquals(event.district_key, None)
self.assertEquals(event.website, "http://indianaroboticsinvitational.org/")
self.assertIsNone(event.webcast)
def test_parse_2018_event(self):
with open('test_data/fms_api/2018_event_list.json', 'r') as f:
events, districts = FMSAPIEventListParser(2018).parse(json.loads(f.read()))
self.assertEqual(len(events), 178)
self.assertEqual(len(districts), 10)
event = events[18]
self.assertEquals(event.key_name, "2018casj")
self.assertEquals(event.name, "Silicon Valley Regional")
self.assertEquals(event.short_name, "Silicon Valley")
self.assertEquals(event.event_short, "casj")
self.assertEquals(event.official, True)
self.assertEquals(event.start_date, datetime.datetime(year=2018, month=3, day=28, hour=0, minute=0, second=0))
self.assertEquals(event.end_date, datetime.datetime(year=2018, month=3, day=31, hour=23, minute=59, second=59))
self.assertEquals(event.venue, "San Jose State University - The Event Center")
self.assertEquals(event.city, "San Jose")
self.assertEquals(event.state_prov, "CA")
self.assertEquals(event.country, "USA")
self.assertEquals(event.year, 2018)
self.assertEquals(event.event_type_enum, EventType.REGIONAL)
self.assertEquals(event.district_key, None)
self.assertEquals(event.website, "http://www.firstsv.org")
# New in 2018
self.assertEqual(event.webcast, [{"type": "twitch", "channel": "firstinspires9"}, {"type": "twitch", "channel": "firstinspires10"}])
def test_parse_division_parent(self):
with open('test_data/fms_api/2017_event_list.json', 'r') as f:
events, districts = FMSAPIEventListParser(2017).parse(json.loads(f.read()))
self.assertEqual(len(events), 165)
self.assertEqual(len(districts), 10)
# Test division <-> parent associations
for event in events:
event_key = event.key.id()
if event_key == '2017micmp':
self.assertEqual(event.parent_event, None)
self.assertEqual(
event.divisions,
[
ndb.Key('Event', '2017micmp1'),
ndb.Key('Event', '2017micmp2'),
ndb.Key('Event', '2017micmp3'),
ndb.Key('Event', '2017micmp4'),
]
)
elif event_key in {'2017micmp1', '2017micmp2', '2017micmp3', '2017micmp4'}:
self.assertEqual(event.parent_event, ndb.Key('Event', '2017micmp'))
self.assertEqual(event.divisions, [])
elif event_key == '<KEY>':
self.assertEqual(event.parent_event, None)
self.assertEqual(
event.divisions,
[
ndb.Key('Event', '2017carv'),
ndb.Key('Event', '2017gal'),
ndb.Key('Event', '2017hop'),
ndb.Key('Event', '2017new'),
ndb.Key('Event', '2017roe'),
ndb.Key('Event', '2017tur'),
]
)
elif event_key in {'2017carv', '2017gal', '2017hop', '2017new', '2017roe', '2017tur'}:
self.assertEqual(event.parent_event, ndb.Key('Event', '2017cmptx'))
self.assertEqual(event.divisions, [])
elif event_key == '<KEY>':
self.assertEqual(event.parent_event, None)
self.assertEqual(
event.divisions,
[
ndb.Key('Event', '2017arc'),
ndb.Key('Event', '2017cars'),
ndb.Key('Event', '2017cur'),
ndb.Key('Event', '2017dal'),
ndb.Key('Event', '2017dar'),
ndb.Key('Event', '2017tes'),
]
)
elif event_key in {'2017arc', '2017cars', '2017cur', '2017dal', '2017dar', '2017tes'}:
self.assertEqual(event.parent_event, ndb.Key('Event', '2017cmpmo'))
self.assertEqual(event.divisions, [])
else:
self.assertEqual(event.parent_event, None)
self.assertEqual(event.divisions, [])
with open('test_data/fms_api/2018_event_list.json', 'r') as f:
events, districts = FMSAPIEventListParser(2018).parse(json.loads(f.read()))
self.assertEqual(len(events), 178)
self.assertEqual(len(districts), 10)
# Test division <-> parent associations
for event in events:
event_key = event.key.id()
if event_key == '2018oncmp':
self.assertEqual(event.parent_event, None)
self.assertEqual(
event.divisions,
[
ndb.Key('Event', '2018oncmp1'),
ndb.Key('Event', '2018oncmp2'),
]
)
elif event_key in {'2018oncmp1', '2018oncmp2'}:
self.assertEqual(event.parent_event, ndb.Key('Event', '2018oncmp'))
self.assertEqual(event.divisions, [])
elif event_key == '2018micmp':
self.assertEqual(event.parent_event, None)
self.assertEqual(
event.divisions,
[
ndb.Key('Event', '2018micmp1'),
ndb.Key('Event', '2018micmp2'),
ndb.Key('Event', '2018micmp3'),
ndb.Key('Event', '2018micmp4'),
]
)
elif event_key in {'2018micmp1', '2018micmp2', '2018micmp3', '2018micmp4'}:
self.assertEqual(event.parent_event, ndb.Key('Event', '2018micmp'))
self.assertEqual(event.divisions, [])
elif event_key == '<KEY>':
self.assertEqual(event.parent_event, None)
self.assertEqual(
event.divisions,
[
ndb.Key('Event', '2018carv'),
ndb.Key('Event', '2018gal'),
ndb.Key('Event', '2018hop'),
ndb.Key('Event', '2018new'),
ndb.Key('Event', '2018roe'),
ndb.Key('Event', '2018tur'),
]
)
elif event_key in {'2018carv', '2018gal', '2018hop', '2018new', '2018roe', '2018tur'}:
self.assertEqual(event.parent_event, ndb.Key('Event', '2018cmptx'))
self.assertEqual(event.divisions, [])
elif event_key == '<KEY>':
self.assertEqual(event.parent_event, None)
self.assertEqual(
event.divisions,
[
ndb.Key('Event', '2018arc'),
ndb.Key('Event', '2018cars'),
ndb.Key('Event', '2018cur'),
ndb.Key('Event', '2018dal'),
ndb.Key('Event', '2018dar'),
ndb.Key('Event', '2018tes'),
]
)
elif event_key in {'2018arc', '2018cars', '2018cur', '2018dal', '2018dar', '2018tes'}:
self.assertEqual(event.parent_event, ndb.Key('Event', '2018cmpmi'))
self.assertEqual(event.divisions, [])
else:
self.assertEqual(event.parent_event, None)
self.assertEqual(event.divisions, [])
|
lib/__init__.py | lockywolf/pysqlcipher3 | 108 | 12675149 | # pysqlite2/__init__.py: the pysqlite2 package.
#
# Copyright (C) 2005 <NAME> <<EMAIL>>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
|
palladium/tests/test_julia.py | vishalbelsare/palladium | 528 | 12675155 | <reponame>vishalbelsare/palladium
from unittest.mock import call
from unittest.mock import Mock
from unittest.mock import patch
import numpy as np
import pytest
from pytest import fixture
pytest.importorskip("julia")
@fixture
def bridge(monkeypatch):
make_bridge = Mock()
monkeypatch.setattr('palladium.julia.make_bridge', make_bridge)
return make_bridge.return_value
class TestAbstractModel:
@fixture
def Model(self):
from palladium.julia import AbstractModel
return AbstractModel
@fixture
def model(self, Model, bridge):
return Model(
fit_func='myjulia.fit_func',
predict_func='yourjulia.predict_func',
fit_kwargs={'fit': 'kwargs'},
predict_kwargs={'predict': 'kwargs'},
)
def test_initialize_julia(self, model, bridge):
bridge.eval.side_effect = ['fit', 'predict']
model._initialize_julia()
assert model.fit_func_ == 'fit'
assert model.predict_func_ == 'predict'
assert call('import myjulia') in bridge.mock_calls
assert call('import yourjulia') in bridge.mock_calls
assert call('myjulia.fit_func') in bridge.mock_calls
assert call('yourjulia.predict_func') in bridge.mock_calls
def test_fit(self, model):
X, y = Mock(), Mock()
assert model.fit(X, y) is model
fit_func = model.fit_func_
fit_func.assert_called_with(X.T, y, fit='kwargs')
assert model.fitted_ == fit_func.return_value
def test_fit_with_label_encoder(self, model):
model.encode_labels = True
X, y = Mock(), Mock()
with patch('palladium.julia.LabelEncoder') as encoder:
model.fit(X, y) is model
fit_func = model.fit_func_
transform = encoder().fit_transform
fit_func.assert_called_with(X.T, transform.return_value, fit='kwargs')
def test_predict(self, model):
X = Mock()
model.fitted_ = Mock()
model._initialize_julia()
result = model.predict(X)
predict_func = model.predict_func_
predict_func.assert_called_with(
model.fitted_, X.astype().T, predict='kwargs')
assert result == predict_func.return_value
def test_predict_with_label_encoder(self, model):
model.encode_labels = True
X = Mock()
model.fitted_ = Mock()
model.enc_ = Mock()
inverse_transform = model.enc_.inverse_transform
model._initialize_julia()
result = model.predict(X)
assert result == inverse_transform.return_value
inverse_transform.assert_called_with(model.predict_func_.return_value)
def test_predict_convert_to_float(self, model):
X_in = np.array([1, 2, 3], dtype=object)
model.fitted_ = Mock()
model.enc_ = Mock()
model._initialize_julia()
model.predict(X_in)
X_conv = model.predict_func_.call_args[0][1]
assert np.all(X_in == X_conv)
assert X_conv.dtype == np.float64
def test_getstate(self, model):
model._initialize_julia()
model.fitted_ = Mock()
state = model.__getstate__()
assert state['fitted_'] is not model.fitted_
assert 'fit_func_' not in state
assert 'predict_func_' not in state
assert 'bridge_' not in state
idict = model.__dict__
assert 'fit_func_' in idict
assert 'predict_func_' in idict
assert 'bridge_' in idict
def test_setstate(self, model, bridge):
model.__setstate__(
{'fitted_': 'fitted', 'fit_kwargs': {'bl': 'arg'}})
assert model.fitted_ != 'fitted'
assert model.fitted_ == bridge.eval.return_value.return_value
class TestClassificationModel(TestAbstractModel):
@fixture
def Model(self):
from palladium.julia import ClassificationModel
return ClassificationModel
def test_score(self, model):
X, y = Mock(), Mock()
with patch('palladium.julia.accuracy_score') as accuracy_score:
with patch('palladium.julia.AbstractModel.predict') as predict:
model.score(X, y)
accuracy_score.assert_called_with(predict.return_value, y)
predict.assert_called_with(X)
|
pyrosm/networks.py | nmarchio/pyrosm | 186 | 12675163 | from pyrosm.data_manager import get_osm_data
from pyrosm.frames import prepare_geodataframe
import warnings
def get_network_data(
node_coordinates,
way_records,
tags_as_columns,
network_filter,
bounding_box,
slice_to_segments,
):
# Tags to keep as separate columns
tags_as_columns += ["id", "nodes", "timestamp", "changeset", "version"]
# Call signature for fetching network data
nodes, ways, relation_ways, relations = get_osm_data(
node_arrays=None,
way_records=way_records,
relations=None,
tags_as_columns=tags_as_columns,
data_filter=network_filter,
filter_type="exclude",
# Keep only records having 'highway' tag
osm_keys="highway",
)
# If there weren't any data, return None
if ways is None:
warnings.warn(
"Could not find any edges for given area.", UserWarning, stacklevel=2
)
return None, None
# Prepare GeoDataFrame
edges, nodes = prepare_geodataframe(
nodes,
node_coordinates,
ways,
relations,
relation_ways,
tags_as_columns,
bounding_box,
parse_network=True,
calculate_seg_lengths=slice_to_segments,
)
return edges, nodes
|
documents/__init__.py | nuwainfo/treeio | 242 | 12675178 | # encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Documents docstring
"""
|
scripts/vim/plugin/snippets/__init__.py | n13l/OpenAAA | 413 | 12675179 | __all__ = ['clang_complete', 'ultisnips', 'dummy']
|
downstream/votenet_det_new/models/backbone/sparseconv/models/wrapper.py | mbanani/PointContrast | 244 | 12675200 | <reponame>mbanani/PointContrast
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
from torch.nn import Module
from MinkowskiEngine import SparseTensor
class Wrapper(Module):
"""
Wrapper for the segmentation networks.
"""
OUT_PIXEL_DIST = -1
def __init__(self, NetClass, in_nchannel, out_nchannel, config):
super(Wrapper, self).__init__()
self.initialize_filter(NetClass, in_nchannel, out_nchannel, config)
def initialize_filter(self, NetClass, in_nchannel, out_nchannel, config):
raise NotImplementedError('Must initialize a model and a filter')
def forward(self, x, coords, colors=None):
soutput = self.model(x)
# During training, make the network invariant to the filter
if not self.training or random.random() < 0.5:
# Filter requires the model to finish the forward pass
wrapper_coords = self.filter.initialize_coords(self.model, coords, colors)
finput = SparseTensor(soutput.F, wrapper_coords)
soutput = self.filter(finput)
return soutput
|
tests/test_worksheet.py | aaaddress1/boobsnail | 169 | 12675201 | from unittest import TestCase
from excel4lib.sheet import *
class TestWorksheet(TestCase):
def test_column_iterate(self):
worksheet = Worksheet("test.csv")
worksheet.set_current_cords(1, 1)
for i in range(1,10):
for j in range(1, 10):
worksheet.add_cell(Cell(i, j, "{}{}".format(i,j)))
i = 1
for col in worksheet.column_iterate():
j = 1
for c in col[1]:
self.assertEqual(str(col[1][c]), "{}{}".format(i,j), "Should be {}{}".format(i,j))
j = j + 1
i = i + 1
def test_get_cell(self):
worksheet = Worksheet("test.csv")
worksheet.set_current_cords(1, 1)
worksheet.add_next_cell(Cell(-1, -1, "A"))
worksheet.add_next_cell(Cell(-1, -1, "A"))
worksheet.add_next_cell(Cell(-1, -1, ""))
worksheet.add_next_cell(Cell(-1, -1, ""))
worksheet.add_next_cell(Cell(-1, -1, "A"))
worksheet.set_current_cords(2, 1)
worksheet.add_next_cell(Cell(-1, -1, "B"))
worksheet.add_next_cell(Cell(-1, -1, "B"))
cell = worksheet.get_cell(1,1)
self.assertEqual(str(cell), "A", "Should be: A")
cell = worksheet.get_cell(10, 1)
self.assertEqual(cell, None, "Should be: None")
def test_is_reserved(self):
worksheet = Worksheet("test.csv")
for i in range(1,5):
worksheet.add_cell(Cell(1,i))
self.assertEqual(worksheet.is_reserved(1, 1, 2), True, "Should be True")
self.assertEqual(worksheet.is_reserved(2, 1, 2), False, "Should be False")
for i in range(8,12):
worksheet.add_cell(Cell(1,i))
self.assertEqual(worksheet.is_reserved(6, 8, 2), False, "Should be False")
def test_add_next_cell(self):
worksheet = Worksheet("test.csv")
worksheet.set_current_cords(1,1)
worksheet.add_next_cell(Cell(-1,-1,"A"))
worksheet.add_next_cell(Cell(-1, -1, "A"))
worksheet.add_next_cell(Cell(-1, -1, ""))
worksheet.add_next_cell(Cell(-1, -1, ""))
worksheet.add_next_cell(Cell(-1, -1, "A"))
worksheet.set_current_cords(2, 1)
worksheet.add_next_cell(Cell(-1, -1, "B"))
worksheet.add_next_cell(Cell(-1, -1, "B"))
csv = worksheet.to_csv()
val = """A;B;\nA;B;\n;;\n;;\nA;;\n"""
self.assertEqual(csv, val, "Should be: {}".format(val))
def test_add_cell(self):
worksheet = Worksheet("test.csv")
worksheet.add_cell(Cell(1,1, "A"))
worksheet.add_cell(Cell(2,1, "B"))
worksheet.add_cell(Cell(1,2, "A"))
worksheet.add_cell(Cell(2,2, "B"))
worksheet.add_cell(Cell(1,5, "A"))
csv = worksheet.to_csv()
val = """A;B;\nA;B;\n;;\n;;\nA;;\n"""
self.assertEqual(csv, val, "Should be: {}".format(val))
def test_replace_cell(self):
worksheet = Worksheet("test.csv")
worksheet.add_cell(Cell(1,1, "A"))
worksheet.add_cell(Cell(2,1, "B"))
worksheet.add_cell(Cell(1,2, "A"))
worksheet.add_cell(Cell(2,2, "B"))
c = Cell(1,5, "A")
c2 = Cell(1,5, "C")
worksheet.add_cell(c)
worksheet.replace_cell(c, c2)
csv = worksheet.to_csv()
val = """A;B;\nA;B;\n;;\n;;\nC;;\n"""
self.assertEqual(csv, val, "Should be: {}".format(val))
def test_add_above(self):
worksheet = Worksheet("test.csv")
# Cell is in first row
c = Cell(1, 1, "A")
worksheet.add_cell(c)
worksheet.add_above(Cell(1,1, "B"), c)
csv = worksheet.to_csv()
val = """B;\nA;\n"""
self.assertEqual(csv, val, "Should be: {}".format(val))
# Cell above is empty
worksheet = Worksheet("test.csv")
c = Cell(1, 2, "A")
worksheet.add_cell(c)
worksheet.add_above(Cell(1,1, "B"), c)
csv = worksheet.to_csv()
val = """B;\nA;\n"""
self.assertEqual(csv, val, "Should be: {}".format(val))
# Cell above is reserved but below is not
worksheet = Worksheet("test.csv")
c = Cell(1, 2, "A")
worksheet.add_cell(c)
worksheet.add_cell(Cell(1, 1, "A"))
worksheet.add_above(Cell(1,2, "B"), c)
csv = worksheet.to_csv()
val = """A;\nB;\nA;\n"""
self.assertEqual(csv, val, "Should be: {}".format(val))
# Cell above and below are reserved
worksheet = Worksheet("test.csv")
c = Cell(1, 2, "A")
worksheet.add_cell(c)
worksheet.add_cell(Cell(1, 1, "A"))
worksheet.add_cell(Cell(1, 3, "A"))
worksheet.add_above(Cell(1,2, "B"), c)
csv = worksheet.to_csv()
val = """A;\nB;\nA;\nA;\n"""
self.assertEqual(csv, val, "Should be: {}".format(val))
# Cell above and below are reserved
worksheet = Worksheet("test.csv")
c = Cell(1, 2, "A")
worksheet.add_cell(c)
worksheet.add_cell(Cell(1, 1, "A"))
worksheet.add_cell(Cell(1, 3, "A"))
worksheet.add_cell(Cell(1, 4, "A"))
worksheet.add_cell(Cell(1, 6, "A"))
worksheet.add_above(Cell(1,2, "B"), c)
csv = worksheet.to_csv()
val = """A;\nB;\nA;\nA;\nA;\nA;\n"""
self.assertEqual(csv, val, "Should be: {}".format(val))
def test_remove_cell(self):
worksheet = Worksheet("test.csv")
c = Cell(1, 1, "A")
worksheet.add_cell(c)
c = worksheet.get_cell(1,1)
self.assertEqual(str(c), "A", "Should be A")
worksheet.remove_cell(c)
c = worksheet.get_cell(1, 1)
self.assertEqual(c, None, "Should be None")
|
lib/node_modules/@stdlib/blas/base/sswap/binding.gyp | ghalimi/stdlib | 3,428 | 12675212 | <reponame>ghalimi/stdlib<filename>lib/node_modules/@stdlib/blas/base/sswap/binding.gyp<gh_stars>1000+
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A `.gyp` file for building a Node.js native add-on.
#
# [1]: https://gyp.gsrc.io/docs/InputFormatReference.md
# [2]: https://gyp.gsrc.io/docs/UserDocumentation.md
{
# List of files to include in this file:
'includes': [
'./include.gypi',
],
# Define variables to be used throughout the configuration for all targets:
'variables': {
# Target name should match the add-on export name:
'addon_target_name%': 'addon',
# Fortran compiler (to override -Dfortran_compiler=<compiler>):
'fortran_compiler%': 'gfortran',
# Fortran compiler flags:
'fflags': [
# Specify the Fortran standard to which a program is expected to conform:
'-std=f95',
# Indicate that the layout is free-form source code:
'-ffree-form',
# Aggressive optimization:
'-O3',
# Enable commonly used warning options:
'-Wall',
# Warn if source code contains problematic language features:
'-Wextra',
# Warn if a procedure is called without an explicit interface:
'-Wimplicit-interface',
# Do not transform names of entities specified in Fortran source files by appending underscores (i.e., don't mangle names, thus allowing easier usage in C wrappers):
'-fno-underscoring',
# Warn if source code contains Fortran 95 extensions and C-language constructs:
'-pedantic',
# Compile but do not link (output is an object file):
'-c',
],
# Set variables based on the host OS:
'conditions': [
[
'OS=="win"',
{
# Define the object file suffix:
'obj': 'obj',
},
{
# Define the object file suffix:
'obj': 'o',
}
], # end condition (OS=="win")
], # end conditions
}, # end variables
# Define compile targets:
'targets': [
# Target to generate an add-on:
{
# The target name should match the add-on export name:
'target_name': '<(addon_target_name)',
# Define dependencies:
'dependencies': [],
# Define directories which contain relevant include headers:
'include_dirs': [
# Local include directory:
'<@(include_dirs)',
],
# List of source files:
'sources': [
'<@(src_files)',
],
# Settings which should be applied when a target's object files are used as linker input:
'link_settings': {
# Define libraries:
'libraries': [
'<@(libraries)',
],
# Define library directories:
'library_dirs': [
'<@(library_dirs)',
],
},
# C/C++ compiler flags:
'cflags': [
# Enable commonly used warning options:
'-Wall',
# Aggressive optimization:
'-O3',
],
# C specific compiler flags:
'cflags_c': [
# Specify the C standard to which a program is expected to conform:
'-std=c99',
],
# C++ specific compiler flags:
'cflags_cpp': [
# Specify the C++ standard to which a program is expected to conform:
'-std=c++11',
],
# Linker flags:
'ldflags': [],
# Apply conditions based on the host OS:
'conditions': [
[
'OS=="mac"',
{
# Linker flags:
'ldflags': [
'-undefined dynamic_lookup',
'-Wl,-no-pie',
'-Wl,-search_paths_first',
],
},
], # end condition (OS=="mac")
[
'OS!="win"',
{
# C/C++ flags:
'cflags': [
# Generate platform-independent code:
'-fPIC',
],
},
], # end condition (OS!="win")
], # end conditions
# Define custom build actions for particular inputs:
'rules': [
{
# Define a rule for processing Fortran files:
'extension': 'f',
# Define the pathnames to be used as inputs when performing processing:
'inputs': [
# Full path of the current input:
'<(RULE_INPUT_PATH)'
],
# Define the outputs produced during processing:
'outputs': [
# Store an output object file in a directory for placing intermediate results (only accessible within a single target):
'<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).<(obj)'
],
# Define the rule for compiling Fortran based on the host OS:
'conditions': [
[
'OS=="win"',
# Rule to compile Fortran on Windows:
{
'rule_name': 'compile_fortran_windows',
'message': 'Compiling Fortran file <(RULE_INPUT_PATH) on Windows...',
'process_outputs_as_sources': 0,
# Define the command-line invocation:
'action': [
'<(fortran_compiler)',
'<@(fflags)',
'<@(_inputs)',
'-o',
'<@(_outputs)',
],
},
# Rule to compile Fortran on non-Windows:
{
'rule_name': 'compile_fortran_linux',
'message': 'Compiling Fortran file <(RULE_INPUT_PATH) on Linux...',
'process_outputs_as_sources': 1,
# Define the command-line invocation:
'action': [
'<(fortran_compiler)',
'<@(fflags)',
'-fPIC', # generate platform-independent code
'<@(_inputs)',
'-o',
'<@(_outputs)',
],
}
], # end condition (OS=="win")
], # end conditions
}, # end rule (extension=="f")
], # end rules
}, # end target <(addon_target_name)
# Target to copy a generated add-on to a standard location:
{
'target_name': 'copy_addon',
# Declare that the output of this target is not linked:
'type': 'none',
# Define dependencies:
'dependencies': [
# Require that the add-on be generated before building this target:
'<(addon_target_name)',
],
# Define a list of actions:
'actions': [
{
'action_name': 'copy_addon',
'message': 'Copying addon...',
# Explicitly list the inputs in the command-line invocation below:
'inputs': [],
# Declare the expected outputs:
'outputs': [
'<(addon_output_dir)/<(addon_target_name).node',
],
# Define the command-line invocation:
'action': [
'cp',
'<(PRODUCT_DIR)/<(addon_target_name).node',
'<(addon_output_dir)/<(addon_target_name).node',
],
},
], # end actions
}, # end target copy_addon
], # end targets
}
|
yellowbrick/text/freqdist.py | mrtrkmn/yellowbrick | 3,662 | 12675237 | # yellowbrick.text.freqdist
# Implementations of frequency distributions for text visualization.
#
# Author: <NAME>
# Created: Mon Feb 20 12:38:20 2017 -0500
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: freqdist.py [67b2740] <EMAIL> $
"""
Implementations of frequency distributions for text visualization
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from operator import itemgetter
from yellowbrick.text.base import TextVisualizer
from yellowbrick.exceptions import YellowbrickValueError
##########################################################################
## Quick Method
##########################################################################
def freqdist(
features, X, y=None, ax=None, n=50, orient="h", color=None, show=True, **kwargs
):
"""Displays frequency distribution plot for text.
This helper function is a quick wrapper to utilize the FreqDist
Visualizer (Transformer) for one-off analysis.
Parameters
----------
features : list, default: None
The list of feature names from the vectorizer, ordered by index. E.g.
a lexicon that specifies the unique vocabulary of the corpus. This
can be typically fetched using the ``get_feature_names()`` method of
the transformer in Scikit-Learn.
X: ndarray or DataFrame of shape n x m
A matrix of n instances with m features. In the case of text,
X is a list of list of already preprocessed words
y: ndarray or Series of length n
An array or series of target or class values
ax : matplotlib axes, default: None
The axes to plot the figure on.
n: integer, default: 50
Top N tokens to be plotted.
orient : 'h' or 'v', default: 'h'
Specifies a horizontal or vertical bar chart.
color : string
Specify color for bars
show: bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however
you cannot call ``plt.savefig`` from this signature, nor
``clear_figure``. If False, simply calls ``finalize()``
kwargs: dict
Keyword arguments passed to the super class.
Returns
-------
visualizer: FreqDistVisualizer
Returns the fitted, finalized visualizer
"""
# Instantiate the visualizer
viz = FreqDistVisualizer(features, ax=ax, n=n, orient=orient, color=color, **kwargs)
# Fit and transform the visualizer (calls draw)
viz.fit(X, y, **kwargs)
viz.transform(X)
# Draw the final visualization
if show:
viz.show()
else:
viz.finalize()
# Return the visualizer object
return viz
class FrequencyVisualizer(TextVisualizer):
"""
A frequency distribution tells us the frequency of each vocabulary
item in the text. In general, it could count any kind of observable
event. It is a distribution because it tells us how the total
number of word tokens in the text are distributed across the
vocabulary items.
Parameters
----------
features : list, default: None
The list of feature names from the vectorizer, ordered by index. E.g.
a lexicon that specifies the unique vocabulary of the corpus. This
can be typically fetched using the ``get_feature_names()`` method of
the transformer in Scikit-Learn.
ax : matplotlib axes, default: None
The axes to plot the figure on.
n: integer, default: 50
Top N tokens to be plotted.
orient : 'h' or 'v', default: 'h'
Specifies a horizontal or vertical bar chart.
color : string
Specify color for bars
kwargs : dict
Pass any additional keyword arguments to the super class.
These parameters can be influenced later on in the visualization
process, but can and should be set as early as possible.
"""
def __init__(self, features, ax=None, n=50, orient="h", color=None, **kwargs):
super(FreqDistVisualizer, self).__init__(ax=ax, **kwargs)
# Check that the orient is correct
orient = orient.lower().strip()
if orient not in {"h", "v"}:
raise YellowbrickValueError("Orientation must be 'h' or 'v'")
# Visualizer parameters
self.N = n
self.features = features
# Visual arguments
self.color = color
self.orient = orient
def count(self, X):
"""
Called from the fit method, this method gets all the
words from the corpus and their corresponding frequency
counts.
Parameters
----------
X : ndarray or masked ndarray
Pass in the matrix of vectorized documents, can be masked in
order to sum the word frequencies for only a subset of documents.
Returns
-------
counts : array
A vector containing the counts of all words in X (columns)
"""
# Sum on axis 0 (by columns), each column is a word
# Convert the matrix to an array
# Squeeze to remove the 1 dimension objects (like ravel)
return np.squeeze(np.asarray(X.sum(axis=0)))
def fit(self, X, y=None):
"""
The fit method is the primary drawing input for the frequency
distribution visualization. It requires vectorized lists of
documents and a list of features, which are the actual words
from the original corpus (needed to label the x-axis ticks).
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features representing the corpus
of frequency vectorized documents.
y : ndarray or DataFrame of shape n
Labels for the documents for conditional frequency distribution.
Notes
-----
.. note:: Text documents must be vectorized before ``fit()``.
"""
# Compute the conditional word frequency
if y is not None:
# Fit the frequencies
self.conditional_freqdist_ = {}
# Conditional frequency distribution
self.classes_ = [str(label) for label in set(y)]
for label in self.classes_:
self.conditional_freqdist_[label] = self.count(X[y == label])
else:
# No conditional frequencies
self.conditional_freqdist_ = None
# Frequency distribution of entire corpus.
self.freqdist_ = self.count(X)
self.sorted_ = self.freqdist_.argsort()[::-1] # Descending order
# Compute the number of words, vocab, and hapaxes
self.vocab_ = self.freqdist_.shape[0]
self.words_ = self.freqdist_.sum()
self.hapaxes_ = sum(1 for c in self.freqdist_ if c == 1)
# Draw and ensure that we return self
self.draw()
return self
def draw(self, **kwargs):
"""
Called from the fit method, this method creates the canvas and
draws the distribution plot on it.
Parameters
----------
kwargs: generic keyword arguments.
"""
# Prepare the data
bins = np.arange(self.N)
words = [self.features[i] for i in self.sorted_[: self.N]]
freqs = {}
# Set up the bar plots
if self.conditional_freqdist_:
for label, values in sorted(
self.conditional_freqdist_.items(), key=itemgetter(0)
):
freqs[label] = [values[i] for i in self.sorted_[: self.N]]
else:
freqs["corpus"] = [self.freqdist_[i] for i in self.sorted_[: self.N]]
# Draw a horizontal barplot
if self.orient == "h":
# Add the barchart, stacking if necessary
for label, freq in freqs.items():
self.ax.barh(bins, freq, label=label, color=self.color, align="center")
# Set the y ticks to the words
self.ax.set_yticks(bins)
self.ax.set_yticklabels(words)
# Order the features from top to bottom on the y axis
self.ax.invert_yaxis()
# Turn off y grid lines and turn on x grid lines
self.ax.yaxis.grid(False)
self.ax.xaxis.grid(True)
# Draw a vertical barplot
elif self.orient == "v":
# Add the barchart, stacking if necessary
for label, freq in freqs.items():
self.ax.bar(bins, freq, label=label, color=self.color, align="edge")
# Set the y ticks to the words
self.ax.set_xticks(bins)
self.ax.set_xticklabels(words, rotation=90)
# Turn off x grid lines and turn on y grid lines
self.ax.yaxis.grid(True)
self.ax.xaxis.grid(False)
# Unknown state
else:
raise YellowbrickValueError("Orientation must be 'h' or 'v'")
return self.ax
def finalize(self, **kwargs):
"""
The finalize method executes any subclass-specific axes
finalization steps. The user calls show & show calls finalize.
Parameters
----------
kwargs: generic keyword arguments.
"""
# Set the title
self.set_title("Frequency Distribution of Top {} tokens".format(self.N))
# Create the vocab, count, and hapaxes labels
infolabel = "vocab: {:,}\nwords: {:,}\nhapax: {:,}".format(
self.vocab_, self.words_, self.hapaxes_
)
self.ax.text(
0.68,
0.97,
infolabel,
transform=self.ax.transAxes,
fontsize=9,
verticalalignment="top",
bbox={"boxstyle": "round", "facecolor": "white", "alpha": 0.8},
)
# Set the legend and the grid
self.ax.legend(loc="upper right", frameon=True)
# Backwards compatibility alias
FreqDistVisualizer = FrequencyVisualizer
|
dataloaders/image_transforms.py | yoxu515/aot-benchmark | 105 | 12675242 | <filename>dataloaders/image_transforms.py
import math
import warnings
import random
import numbers
import numpy as np
from PIL import Image
from collections.abc import Sequence
import torch
import torchvision.transforms.functional as TF
_pil_interpolation_to_str = {
Image.NEAREST: 'PIL.Image.NEAREST',
Image.BILINEAR: 'PIL.Image.BILINEAR',
Image.BICUBIC: 'PIL.Image.BICUBIC',
Image.LANCZOS: 'PIL.Image.LANCZOS',
Image.HAMMING: 'PIL.Image.HAMMING',
Image.BOX: 'PIL.Image.BOX',
}
def _get_image_size(img):
if TF._is_pil_image(img):
return img.size
elif isinstance(img, torch.Tensor) and img.dim() > 2:
return img.shape[-2:][::-1]
else:
raise TypeError("Unexpected type {}".format(type(img)))
class RandomHorizontalFlip(object):
"""Horizontal flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, mask):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly flipped image.
"""
if random.random() < self.p:
img = TF.hflip(img)
mask = TF.hflip(mask)
return img, mask
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomAffine(object):
"""Random affine transformation of the image keeping center invariant
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees). Set to 0 to deactivate rotations.
translate (tuple, optional): tuple of maximum absolute fraction for horizontal
and vertical translations. For example translate=(a, b), then horizontal shift
is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is
randomly sampled from the range a <= scale <= b. Will keep original scale by default.
shear (sequence or float or int, optional): Range of degrees to select from.
If shear is a number, a shear parallel to the x axis in the range (-shear, +shear)
will be apllied. Else if shear is a tuple or list of 2 values a shear parallel to the x axis in the
range (shear[0], shear[1]) will be applied. Else if shear is a tuple or list of 4 values,
a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.
Will not apply shear by default
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter. See `filters`_ for more information.
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
fillcolor (tuple or int): Optional fill color (Tuple for RGB Image And int for grayscale) for the area
outside the transform in the output image.(Pillow>=5.0.0)
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
def __init__(self,
degrees,
translate=None,
scale=None,
shear=None,
resample=False,
fillcolor=0):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError(
"If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \
"degrees should be a list or tuple and it must be of length 2."
self.degrees = degrees
if translate is not None:
assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
"translate should be a list or tuple and it must be of length 2."
for t in translate:
if not (0.0 <= t <= 1.0):
raise ValueError(
"translation values should be between 0 and 1")
self.translate = translate
if scale is not None:
assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
"scale should be a list or tuple and it must be of length 2."
for s in scale:
if s <= 0:
raise ValueError("scale values should be positive")
self.scale = scale
if shear is not None:
if isinstance(shear, numbers.Number):
if shear < 0:
raise ValueError(
"If shear is a single number, it must be positive.")
self.shear = (-shear, shear)
else:
assert isinstance(shear, (tuple, list)) and \
(len(shear) == 2 or len(shear) == 4), \
"shear should be a list or tuple and it must be of length 2 or 4."
# X-Axis shear with [min, max]
if len(shear) == 2:
self.shear = [shear[0], shear[1], 0., 0.]
elif len(shear) == 4:
self.shear = [s for s in shear]
else:
self.shear = shear
self.resample = resample
self.fillcolor = fillcolor
@staticmethod
def get_params(degrees, translate, scale_ranges, shears, img_size):
"""Get parameters for affine transformation
Returns:
sequence: params to be passed to the affine transformation
"""
angle = random.uniform(degrees[0], degrees[1])
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(random.uniform(-max_dx, max_dx)),
np.round(random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if scale_ranges is not None:
scale = random.uniform(scale_ranges[0], scale_ranges[1])
else:
scale = 1.0
if shears is not None:
if len(shears) == 2:
shear = [random.uniform(shears[0], shears[1]), 0.]
elif len(shears) == 4:
shear = [
random.uniform(shears[0], shears[1]),
random.uniform(shears[2], shears[3])
]
else:
shear = 0.0
return angle, translations, scale, shear
def __call__(self, img, mask):
"""
img (PIL Image): Image to be transformed.
Returns:
PIL Image: Affine transformed image.
"""
ret = self.get_params(self.degrees, self.translate, self.scale,
self.shear, img.size)
img = TF.affine(img,
*ret,
resample=self.resample,
fillcolor=self.fillcolor)
mask = TF.affine(mask, *ret, resample=Image.NEAREST, fillcolor=0)
return img, mask
def __repr__(self):
s = '{name}(degrees={degrees}'
if self.translate is not None:
s += ', translate={translate}'
if self.scale is not None:
s += ', scale={scale}'
if self.shear is not None:
s += ', shear={shear}'
if self.resample > 0:
s += ', resample={resample}'
if self.fillcolor != 0:
s += ', fillcolor={fillcolor}'
s += ')'
d = dict(self.__dict__)
d['resample'] = _pil_interpolation_to_str[d['resample']]
return s.format(name=self.__class__.__name__, **d)
class RandomCrop(object):
"""Crop the given PIL Image at a random location.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int or sequence, optional): Optional padding on each border
of the image. Default is None, i.e no padding. If a sequence of length
4 is provided, it is used to pad left, top, right, bottom borders
respectively. If a sequence of length 2 is provided, it is used to
pad left/right, top/bottom borders, respectively.
pad_if_needed (boolean): It will pad the image if smaller than the
desired size to avoid raising an exception. Since cropping is done
after padding, the padding seems to be done at a random offset.
fill: Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
"""
def __init__(self,
size,
padding=None,
pad_if_needed=False,
fill=0,
padding_mode='constant'):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
self.pad_if_needed = pad_if_needed
self.fill = fill
self.padding_mode = padding_mode
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = _get_image_size(img)
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, img, mask):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
# if self.padding is not None:
# img = TF.pad(img, self.padding, self.fill, self.padding_mode)
#
# # pad the width if needed
# if self.pad_if_needed and img.size[0] < self.size[1]:
# img = TF.pad(img, (self.size[1] - img.size[0], 0), self.fill, self.padding_mode)
# # pad the height if needed
# if self.pad_if_needed and img.size[1] < self.size[0]:
# img = TF.pad(img, (0, self.size[0] - img.size[1]), self.fill, self.padding_mode)
i, j, h, w = self.get_params(img, self.size)
img = TF.crop(img, i, j, h, w)
mask = TF.crop(mask, i, j, h, w)
return img, mask
def __repr__(self):
return self.__class__.__name__ + '(size={0}, padding={1})'.format(
self.size, self.padding)
class RandomResizedCrop(object):
"""Crop the given PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self,
size,
scale=(0.08, 1.0),
ratio=(3. / 4., 4. / 3.),
interpolation=Image.BILINEAR):
if isinstance(size, (tuple, list)):
self.size = size
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
width, height = _get_image_size(img)
area = height * width
for _ in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
i = random.randint(0, height - h)
j = random.randint(0, width - w)
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if (in_ratio < min(ratio)):
w = width
h = int(round(w / min(ratio)))
elif (in_ratio > max(ratio)):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def __call__(self, img, mask):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
# print(i, j, h, w)
img = TF.resized_crop(img, i, j, h, w, self.size, self.interpolation)
mask = TF.resized_crop(mask, i, j, h, w, self.size, Image.NEAREST)
return img, mask
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(
tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(
tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
class ToOnehot(object):
"""To oneshot tensor
Args:
max_obj_n (float): Maximum number of the objects
"""
def __init__(self, max_obj_n, shuffle):
self.max_obj_n = max_obj_n
self.shuffle = shuffle
def __call__(self, mask, obj_list=None):
"""
Args:
mask (Mask in Numpy): Mask to be converted.
Returns:
Tensor: Converted mask in onehot format.
"""
new_mask = np.zeros((self.max_obj_n + 1, *mask.shape), np.uint8)
if not obj_list:
obj_list = list()
obj_max = mask.max() + 1
for i in range(1, obj_max):
tmp = (mask == i).astype(np.uint8)
if tmp.max() > 0:
obj_list.append(i)
if self.shuffle:
random.shuffle(obj_list)
obj_list = obj_list[:self.max_obj_n]
for i in range(len(obj_list)):
new_mask[i + 1] = (mask == obj_list[i]).astype(np.uint8)
new_mask[0] = 1 - np.sum(new_mask, axis=0)
return torch.from_numpy(new_mask), obj_list
def __repr__(self):
return self.__class__.__name__ + '(max_obj_n={})'.format(
self.max_obj_n)
class Resize(torch.nn.Module):
"""Resize the input image to the given size.
The image can be a PIL Image or a torch Tensor, in which case it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
size (sequence or int): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size).
In torchscript mode padding as single int is not supported, use a tuple or
list of length 1: ``[size, ]``.
interpolation (int, optional): Desired interpolation enum defined by `filters`_.
Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR``
and ``PIL.Image.BICUBIC`` are supported.
"""
def __init__(self, size, interpolation=Image.BILINEAR):
super().__init__()
if not isinstance(size, (int, Sequence)):
raise TypeError("Size should be int or sequence. Got {}".format(
type(size)))
if isinstance(size, Sequence) and len(size) not in (1, 2):
raise ValueError(
"If size is a sequence, it should have 1 or 2 values")
self.size = size
self.interpolation = interpolation
def forward(self, img, mask):
"""
Args:
img (PIL Image or Tensor): Image to be scaled.
Returns:
PIL Image or Tensor: Rescaled image.
"""
img = TF.resize(img, self.size, self.interpolation)
mask = TF.resize(mask, self.size, Image.NEAREST)
return img, mask
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(
self.size, interpolate_str)
|
wilds/datasets/rxrx1_dataset.py | caglasozen/wilds | 355 | 12675249 | <reponame>caglasozen/wilds
import os
from pathlib import Path
from collections import defaultdict
from PIL import Image
import pandas as pd
import numpy as np
import torch
from wilds.datasets.wilds_dataset import WILDSDataset
from wilds.common.grouper import CombinatorialGrouper
from wilds.common.metrics.all_metrics import Accuracy
class RxRx1Dataset(WILDSDataset):
"""
The RxRx1-WILDS dataset.
This is a modified version of the original RxRx1 dataset.
Supported `split_scheme`:
- 'official'
- 'mixed-to-test'
Input (x):
3-channel fluorescent microscopy images of cells
Label (y):
y is one of 1,139 classes:
- 0 to 1107: treatment siRNAs
- 1108 to 1137: positive control siRNAs
- 1138: negative control siRNA
Metadata:
Each image is annotated with its experiment, plate, well, and site, as
well as with the id of the siRNA the cells were perturbed with.
Website:
https://www.rxrx.ai/rxrx1
https://www.kaggle.com/c/recursion-cellular-image-classification
Original publication:
@inproceedings{taylor2019rxrx1,
author = {<NAME>. and <NAME>. and <NAME>. and <NAME>. and <NAME>.},
title = {RxRx1: An Image Set for Cellular Morphological Variation Across Many Experimental Batches.},
year = {2019},
booktitle = {International Conference on Learning Representations (ICLR)},
booksubtitle = {AI for Social Good Workshop},
url = {https://aiforsocialgood.github.io/iclr2019/accepted/track1/pdfs/30_aisg_iclr2019.pdf},
}
License:
This work is licensed under a Creative Commons
Attribution-NonCommercial-ShareAlike 4.0 International License. To view
a copy of this license, visit
http://creativecommons.org/licenses/by-nc-sa/4.0/.
"""
_dataset_name = 'rxrx1'
_versions_dict = {
'1.0': {
'download_url': 'https://worksheets.codalab.org/rest/bundles/0x6b7a05a3056a434498f0bb1252eb8440/contents/blob/',
'compressed_size': 7_413_123_845}
}
def __init__(self, version=None, root_dir='data', download=False,
split_scheme='official'):
self._version = version
self._split_scheme = split_scheme
if self._split_scheme not in ['official', 'mixed-to-test']:
raise ValueError(f'Split scheme {self._split_scheme} not recognized')
# path
self._data_dir = Path(self.initialize_data_dir(root_dir, download))
# Load splits
df = pd.read_csv(self._data_dir / 'metadata.csv')
# Splits
if split_scheme == 'official':
# Training: 33 experiments, 1 site per experiment (site 1)
# Validation: 4 experiments, 2 sites per experiment
# Test OOD: 14 experiments, 2 sites per experiment
# Test ID: Same 33 experiments from training set
# 1 site per experiment (site 2)
self._split_dict = {
'train': 0,
'val': 1,
'test': 2,
'id_test': 3
}
self._split_names = {
'train': 'Train',
'val': 'Validation (OOD)',
'test': 'Test (OOD)',
'id_test': 'Test (ID)'
}
self._split_array = df.dataset.apply(self._split_dict.get).values
# id_test set
mask = ((df.dataset == 'train') & (df.site == 2)).values
self._split_array[mask] = self.split_dict['id_test']
elif split_scheme == 'mixed-to-test':
# Training: 33 experiments total, 1 site per experiment (site 1)
# = 19 experiments from the orig training set (site 1)
# + 14 experiments from the orig test set (site 1)
# Validation: same as official split
# Test: 14 experiments from the orig test set,
# 1 site per experiment (site 2)
self._split_dict = {
'train': 0,
'val': 1,
'test': 2
}
self._split_names = {
'train': 'Train',
'val': 'Validation',
'test': 'Test'
}
self._split_array = df.dataset.apply(self._split_dict.get).values
# Use half of the training set (site 1) and discard site 2
mask_to_discard = ((df.dataset == 'train') & (df.site == 2)).values
self._split_array[mask_to_discard] = -1
# Take all site 1 in the test set and move it to train
mask_to_move = ((df.dataset == 'test') & (df.site == 1)).values
self._split_array[mask_to_move] = self._split_dict['train']
# For each of the test experiments, remove a train experiment of the same cell type
test_cell_type_counts = defaultdict(int)
test_experiments = df.loc[(df['dataset'] == 'test'), 'experiment'].unique()
for test_experiment in test_experiments:
test_cell_type = test_experiment.split('-')[0]
test_cell_type_counts[test_cell_type] += 1
# Training experiments are numbered starting from 1 and left-padded with 0s
experiments_to_discard = [
f'{cell_type}-{num:02}'
for cell_type, count in test_cell_type_counts.items()
for num in range(1, count+1)]
# Sanity check
train_experiments = df.loc[(df['dataset'] == 'train'), 'experiment'].unique()
for experiment in experiments_to_discard:
assert experiment in train_experiments
mask_to_discard = (df.experiment == experiment).values
self._split_array[mask_to_discard] = -1
else:
raise ValueError(f'Split scheme {self._split_scheme} not recognized')
# Filenames
def create_filepath(row):
filepath = os.path.join('images',
row.experiment,
f'Plate{row.plate}',
f'{row.well}_s{row.site}.png')
return filepath
self._input_array = df.apply(create_filepath, axis=1).values
# Labels
self._y_array = torch.tensor(df['sirna_id'].values)
self._n_classes = max(df['sirna_id']) + 1
self._y_size = 1
assert len(np.unique(df['sirna_id'])) == self._n_classes
# Convert experiment and well from strings to idxs
indexed_metadata = {}
self._metadata_map = {}
for key in ['cell_type', 'experiment', 'well']:
all_values = list(df[key].unique())
value_to_idx_map = {value: idx for idx, value in enumerate(all_values)}
value_idxs = [value_to_idx_map[value] for value in df[key].tolist()]
self._metadata_map[key] = all_values
indexed_metadata[key] = value_idxs
self._metadata_array = torch.tensor(
np.stack([indexed_metadata['cell_type'],
indexed_metadata['experiment'],
df['plate'].values,
indexed_metadata['well'],
df['site'].values,
self.y_array], axis=1)
)
self._metadata_fields = ['cell_type', 'experiment', 'plate', 'well', 'site', 'y']
# eval grouper
self._eval_grouper = CombinatorialGrouper(
dataset=self,
groupby_fields=(['cell_type'])
)
super().__init__(root_dir, download, split_scheme)
def eval(self, y_pred, y_true, metadata, prediction_fn=None):
"""
Computes all evaluation metrics.
Args:
- y_pred (Tensor): Predictions from a model. By default, they are
predicted labels (LongTensor). But they can also be other model
outputs such that prediction_fn(y_pred) are predicted labels.
- y_true (LongTensor): Ground-truth labels
- metadata (Tensor): Metadata
- prediction_fn (function): A function that turns y_pred into predicted labels
Output:
- results (dictionary): Dictionary of evaluation metrics
- results_str (str): String summarizing the evaluation metrics
"""
metric = Accuracy(prediction_fn=prediction_fn)
return self.standard_group_eval(
metric,
self._eval_grouper,
y_pred, y_true, metadata)
def get_input(self, idx):
"""
Args:
- idx (int): Index of a data point
Output:
- x (Tensor): Input features of the idx-th data point
"""
# All images are in the train folder
img_path = self.data_dir / self._input_array[idx]
img = Image.open(img_path)
return img
|
tools/perf/core/results_dashboard_unittest.py | sarang-apps/darshan_browser | 575 | 12675254 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
from mock import call
from core import results_dashboard
class ResultsDashboardTest(unittest.TestCase):
def setUp(self):
self.dummy_token_generator = lambda service_file, timeout: 'Ar<PASSWORD>'
self.perf_data = {'foo': 1, 'bar': 2}
self.dashboard_url = 'https://chromeperf.appspot.com'
def testRetryForSendResultRetryException(self):
def raise_retry_exception(
url, histogramset_json, token_generator_callback):
del url, histogramset_json # unused
del token_generator_callback # unused
raise results_dashboard.SendResultsRetryException('Should retry')
with mock.patch('core.results_dashboard.time.sleep') as sleep_mock:
with mock.patch('core.results_dashboard._SendHistogramJson',
side_effect=raise_retry_exception) as m:
upload_result = results_dashboard.SendResults(
self.perf_data, 'dummy_benchmark',
self.dashboard_url, send_as_histograms=True,
token_generator_callback=self.dummy_token_generator, num_retries=5)
self.assertFalse(upload_result)
self.assertEqual(m.call_count, 5)
self.assertEqual(
sleep_mock.mock_calls,
[call(15), call(30), call(60), call(120), call(240)])
def testNoRetryForSendResultFatalException(self):
def raise_retry_exception(
url, histogramset_json, token_generator_callback):
del url, histogramset_json # unused
del token_generator_callback # unused
raise results_dashboard.SendResultsFatalException('Do not retry')
with mock.patch('core.results_dashboard.time.sleep') as sleep_mock:
with mock.patch('core.results_dashboard._SendHistogramJson',
side_effect=raise_retry_exception) as m:
upload_result = results_dashboard.SendResults(
self.perf_data, 'dummy_benchmark',
self.dashboard_url, send_as_histograms=True,
token_generator_callback=self.dummy_token_generator,
num_retries=5)
self.assertFalse(upload_result)
self.assertEqual(m.call_count, 1)
self.assertFalse(sleep_mock.mock_calls)
def testNoRetryForSuccessfulSendResult(self):
with mock.patch('core.results_dashboard.time.sleep') as sleep_mock:
with mock.patch('core.results_dashboard._SendHistogramJson') as m:
upload_result = results_dashboard.SendResults(
self.perf_data, 'dummy_benchmark',
self.dashboard_url, send_as_histograms=True,
token_generator_callback=self.dummy_token_generator,
num_retries=5)
self.assertTrue(upload_result)
self.assertEqual(m.call_count, 1)
self.assertFalse(sleep_mock.mock_calls)
def testNoRetryAfterSucessfulSendResult(self):
counter = [0]
def raise_retry_exception_first_two_times(
url, histogramset_json, token_generator_callback):
del url, histogramset_json # unused
del token_generator_callback # unused
counter[0] += 1
if counter[0] <= 2:
raise results_dashboard.SendResultsRetryException('Please retry')
with mock.patch('core.results_dashboard.time.sleep') as sleep_mock:
with mock.patch('core.results_dashboard._SendHistogramJson',
side_effect=raise_retry_exception_first_two_times) as m:
upload_result = results_dashboard.SendResults(
self.perf_data, 'dummy_benchmark',
self.dashboard_url, send_as_histograms=True,
token_generator_callback=self.dummy_token_generator,
num_retries=5)
self.assertTrue(upload_result)
self.assertEqual(m.call_count, 3)
self.assertEqual(
sleep_mock.mock_calls, [call(15), call(30)])
|
cityscapesScripts/cityscapesscripts/preparation/json2instanceImg.py | zeroAska/TFSegmentation | 633 | 12675265 | #!/usr/bin/python
#
# Reads labels as polygons in JSON format and converts them to instance images,
# where each pixel has an ID that represents the ground truth class and the
# individual instance of that class.
#
# The pixel values encode both, class and the individual instance.
# The integer part of a division by 1000 of each ID provides the class ID,
# as described in labels.py. The remainder is the instance ID. If a certain
# annotation describes multiple instances, then the pixels have the regular
# ID of that class.
#
# Example:
# Let's say your labels.py assigns the ID 26 to the class 'car'.
# Then, the individual cars in an image get the IDs 26000, 26001, 26002, ... .
# A group of cars, where our annotators could not identify the individual
# instances anymore, is assigned to the ID 26.
#
# Note that not all classes distinguish instances (see labels.py for a full list).
# The classes without instance annotations are always directly encoded with
# their regular ID, e.g. 11 for 'building'.
#
# Usage: json2instanceImg.py [OPTIONS] <input json> <output image>
# Options:
# -h print a little help text
# -t use train IDs
#
# Can also be used by including as a module.
#
# Uses the mapping defined in 'labels.py'.
#
# See also createTrainIdInstanceImgs.py to apply the mapping to all annotations in Cityscapes.
#
# python imports
import os, sys, getopt
# Image processing
# Check if PIL is actually Pillow as expected
try:
from PIL import PILLOW_VERSION
except:
print("Please install the module 'Pillow' for image processing, e.g.")
print("pip install pillow")
sys.exit(-1)
try:
import PIL.Image as Image
import PIL.ImageDraw as ImageDraw
except:
print("Failed to import the image processing packages.")
sys.exit(-1)
# cityscapes imports
sys.path.append( os.path.normpath( os.path.join( os.path.dirname( __file__ ) , '..' , 'helpers' ) ) )
from annotation import Annotation
from labels import labels, name2label
# Print the information
def printHelp():
print('{} [OPTIONS] inputJson outputImg'.format(os.path.basename(sys.argv[0])))
print('')
print(' Reads labels as polygons in JSON format and converts them to instance images,')
print(' where each pixel has an ID that represents the ground truth class and the')
print(' individual instance of that class.')
print('')
print(' The pixel values encode both, class and the individual instance.')
print(' The integer part of a division by 1000 of each ID provides the class ID,')
print(' as described in labels.py. The remainder is the instance ID. If a certain')
print(' annotation describes multiple instances, then the pixels have the regular')
print(' ID of that class.')
print('')
print(' Example:')
print(' Let\'s say your labels.py assigns the ID 26 to the class "car".')
print(' Then, the individual cars in an image get the IDs 26000, 26001, 26002, ... .')
print(' A group of cars, where our annotators could not identify the individual')
print(' instances anymore, is assigned to the ID 26.')
print('')
print(' Note that not all classes distinguish instances (see labels.py for a full list).')
print(' The classes without instance annotations are always directly encoded with')
print(' their regular ID, e.g. 11 for "building".')
print('')
print('Options:')
print(' -h Print this help')
print(' -t Use the "trainIDs" instead of the regular mapping. See "labels.py" for details.')
# Print an error message and quit
def printError(message):
print('ERROR: {}'.format(message))
print('')
print('USAGE:')
printHelp()
sys.exit(-1)
# Convert the given annotation to a label image
def createInstanceImage(annotation, encoding):
# the size of the image
size = ( annotation.imgWidth , annotation.imgHeight )
# the background
if encoding == "ids":
backgroundId = name2label['unlabeled'].id
elif encoding == "trainIds":
backgroundId = name2label['unlabeled'].trainId
else:
print("Unknown encoding '{}'".format(encoding))
return None
# this is the image that we want to create
instanceImg = Image.new("I", size, backgroundId)
# a drawer to draw into the image
drawer = ImageDraw.Draw( instanceImg )
# a dict where we keep track of the number of instances that
# we already saw of each class
nbInstances = {}
for labelTuple in labels:
if labelTuple.hasInstances:
nbInstances[labelTuple.name] = 0
# loop over all objects
for obj in annotation.objects:
label = obj.label
polygon = obj.polygon
# If the object is deleted, skip it
if obj.deleted:
continue
# if the label is not known, but ends with a 'group' (e.g. cargroup)
# try to remove the s and see if that works
# also we know that this polygon describes a group
isGroup = False
if ( not label in name2label ) and label.endswith('group'):
label = label[:-len('group')]
isGroup = True
if not label in name2label:
printError( "Label '{}' not known.".format(label) )
# the label tuple
labelTuple = name2label[label]
# get the class ID
if encoding == "ids":
id = labelTuple.id
elif encoding == "trainIds":
id = labelTuple.trainId
# if this label distinguishs between invidudial instances,
# make the id a instance ID
if labelTuple.hasInstances and not isGroup:
id = id * 1000 + nbInstances[label]
nbInstances[label] += 1
# If the ID is negative that polygon should not be drawn
if id < 0:
continue
try:
drawer.polygon( polygon, fill=id )
except:
print("Failed to draw polygon with label {} and id {}: {}".format(label,id,polygon))
raise
return instanceImg
# A method that does all the work
# inJson is the filename of the json file
# outImg is the filename of the instance image that is generated
# encoding can be set to
# - "ids" : classes are encoded using the regular label IDs
# - "trainIds" : classes are encoded using the training IDs
def json2instanceImg(inJson,outImg,encoding="ids"):
annotation = Annotation()
annotation.fromJsonFile(inJson)
instanceImg = createInstanceImage( annotation , encoding )
instanceImg.save( outImg )
# The main method, if you execute this script directly
# Reads the command line arguments and calls the method 'json2instanceImg'
def main(argv):
trainIds = False
try:
opts, args = getopt.getopt(argv,"ht")
except getopt.GetoptError:
printError( 'Invalid arguments' )
for opt, arg in opts:
if opt == '-h':
printHelp()
sys.exit(0)
elif opt == '-t':
trainIds = True
else:
printError( "Handling of argument '{}' not implementend".format(opt) )
if len(args) == 0:
printError( "Missing input json file" )
elif len(args) == 1:
printError( "Missing output image filename" )
elif len(args) > 2:
printError( "Too many arguments" )
inJson = args[0]
outImg = args[1]
if trainIds:
json2instanceImg( inJson , outImg , 'trainIds' )
else:
json2instanceImg( inJson , outImg )
# call the main method
if __name__ == "__main__":
main(sys.argv[1:])
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.