max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
sdss/dr8.py | juandesant/astrometry.net | 460 | 12764195 | # This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
from __future__ import absolute_import
import os
from astrometry.util.fits import fits_table
import numpy as np
import logging
import tempfile
import sys
py3 = (sys.version_info[0] >= 3)
if py3:
from urllib.parse import urljoin
else:
from urlparse import urljoin
fitsio = None
try:
import fitsio
except:
try:
import pyfits
except ImportError:
try:
from astropy.io import fits as pyfits
except ImportError:
raise ImportError("Cannot import either pyfits or astropy.io.fits")
from .common import *
from .dr7 import *
from .yanny import *
from astrometry.util.run_command import run_command
class Frame(SdssFile):
def __init__(self, *args, **kwargs):
super(Frame, self).__init__(*args, **kwargs)
self.filetype = 'frame'
self.image = None
self.image_proxy = None
def getImageShape(self):
if self.image_proxy is not None:
# fitsio fits.FITSHDU object
H,W = self.image_proxy.get_info()['dims']
H = int(H)
W = int(W)
else:
H,W = self.image.shape
return H,W
def getImageSlice(self, slice):
if self.image_proxy is not None:
#print 'reading slice from image proxy:', slice
return self.image_proxy[slice]
return self.image[slice]
#def __str__(self):
def getImage(self):
if self.image is None and self.image_proxy is not None:
self.image = self.image_proxy.read()
self.image_proxy = None
return self.image
def getHeader(self):
return self.header
def getAsTrans(self):
return self.astrans
def getCalibVec(self):
return self.calib
def getSkyAt(self, x, y):
skyim = self.sky
(sh,sw) = skyim.shape
if sw != 256:
skyim = skyim.T
(sh,sw) = skyim.shape
xi = np.round(self.skyxi[x]).astype(int)
yi = np.round(self.skyyi[y]).astype(int)
yi = np.minimum(yi,sh-1)
return skyim[yi,xi]
def getSky(self):
skyim = self.sky
(sh,sw) = skyim.shape
if sw != 256:
skyim = skyim.T
(sh,sw) = skyim.shape
xi = np.round(self.skyxi).astype(int)
yi = np.round(self.skyyi).astype(int)
yi = np.minimum(yi,sh-1)
assert(all(xi >= 0) and all(xi < sw))
assert(all(yi >= 0) and all(yi < sh))
XI,YI = np.meshgrid(xi, yi)
# Nearest-neighbour interpolation -- we just need this
# for approximate invvar.
bigsky = skyim[YI,XI]
return bigsky
def getInvvar(self, psfield, bandnum, ignoreSourceFlux=False,
sourceFlux=None, constantSkyAt=None):
'''
If constantSkyAt = (x,y) (INTEGERS!),
returns a scalar (rather than a np.array) of the invvar at that point.
NOTE that this does NOT blank out masked pixels; use, eg,
fpM = sdss.readFpM(run, camcol, field, bandname)
for plane in [ 'INTERP', 'SATUR', 'CR', 'GHOST' ]:
fpM.setMaskedPixels(plane, invvar, 0, roi=roi)
'''
calibvec = self.getCalibVec()
if constantSkyAt:
x,y = constantSkyAt
calibvec = calibvec[x]
sky = self.getSkyAt(x,y)
if ignoreSourceFlux:
dn = sky
elif sourceFlux is None:
image = self.getImage()
dn = (image[y,x] / calibvec) + sky
else:
dn = (sourceFlux / calibvec) + sky
else:
bigsky = self.getSky()
if ignoreSourceFlux:
dn = bigsky
elif sourceFlux is None:
image = self.getImage()
dn = (image / calibvec) + bigsky
else:
dn = (sourceFlux / calibvec) + bigsky
gain = psfield.getGain(bandnum)
# Note, "darkvar" includes dark current *and* read noise.
darkvar = psfield.getDarkVariance(bandnum)
dnvar = (dn / gain) + darkvar
invvar = 1./(dnvar * calibvec**2)
return invvar
class PhotoObj(SdssFile):
def __init__(self, *args, **kwargs):
super(PhotoObj, self).__init__(*args, **kwargs)
self.filetype = 'photoObj'
self.table = None
def getTable(self):
return self.table
class runlist(object):
pass
class DR8(DR7):
_lup_to_mag_b = np.array([1.4e-10, 0.9e-10, 1.2e-10, 1.8e-10, 7.4e-10])
_two_lup_to_mag_b = 2.*_lup_to_mag_b
_ln_lup_to_mag_b = np.log(_lup_to_mag_b)
'''
From
http://data.sdss3.org/datamodel/glossary.html#asinh
m = -(2.5/ln(10))*[asinh(f/2b)+ln(b)].
The parameter b is a softening parameter measured in maggies, and
for the [u, g, r, i, z] bands has the values
[1.4, 0.9, 1.2, 1.8, 7.4] x 1e-10
'''
@staticmethod
def luptitude_to_mag(Lmag, bandnum, badmag=25):
if bandnum is None:
# assume Lmag is broadcastable to a 5-vector
twobi = DR8._two_lup_to_mag_b
lnbi = DR8._ln_lup_to_mag_b
else:
twobi = DR8._two_lup_to_mag_b[bandnum]
lnbi = DR8._ln_lup_to_mag_b[bandnum]
# MAGIC -1.08.... = -2.5/np.log(10.)
f = np.sinh(Lmag/-1.0857362047581294 - lnbi) * twobi
# prevent log10(-flux)
mag = np.zeros_like(f) + badmag
I = (f > 0)
mag[I] = -2.5 * np.log10(f[I])
return mag
@staticmethod
def nmgy_to_mag(nmgy):
return 22.5 - 2.5 * np.log10(nmgy)
def getDRNumber(self):
return 8
def useLocalTree(self, photoObjs=None, resolve=None):
if photoObjs is None:
photoObjs = os.environ['BOSS_PHOTOOBJ']
redux = os.environ['PHOTO_REDUX']
if resolve is None:
resolve = os.environ['PHOTO_RESOLVE']
self.filenames.update(
photoObj = os.path.join(photoObjs, '%(rerun)s', '%(run)i', '%(camcol)i',
'photoObj-%(run)06i-%(camcol)i-%(field)04i.fits'),
frame = os.path.join(photoObjs, 'frames', '%(rerun)s', '%(run)i', '%(camcol)i',
'frame-%(band)s-%(run)06i-%(camcol)i-%(field)04i.fits.bz2'),
photoField = os.path.join(photoObjs, '%(rerun)s', '%(run)i',
'photoField-%(run)06i-%(camcol)i.fits'),
psField = os.path.join(redux, '%(rerun)s', '%(run)i', 'objcs', '%(camcol)i',
'psField-%(run)06i-%(camcol)i-%(field)04i.fit'),
fpM = os.path.join(redux, '%(rerun)s', '%(run)i', 'objcs', '%(camcol)i',
'fpM-%(run)06i-%(band)s%(camcol)i-%(field)04i.fit.gz'),
window_flist = os.path.join(resolve, 'window_flist.fits'),
)
# use fpM files compressed
try:
del self.dassuffix['fpM']
except:
pass
try:
del self.processcmds['fpM']
except:
pass
def saveUnzippedFiles(self, basedir):
self.unzip_dir = basedir
def setFitsioReadBZ2(self, to=True):
'''
Call this if fitsio supports reading .bz2 files directly.
'''
self.readBz2 = to
def __init__(self, **kwargs):
'''
Useful kwargs:
basedir : (string) - local directory where data will be stored.
'''
DR7.__init__(self, **kwargs)
self.unzip_dir = None
self.readBz2 = False
# Local filenames
self.filenames.update({
'frame': 'frame-%(band)s-%(run)06i-%(camcol)i-%(field)04i.fits.bz2',
'idR': 'idR-%(run)06i-%(band)s-%(camcol)i-%(field)04i.fits',
'photoObj': 'photoObj-%(run)06i-%(camcol)i-%(field)04i.fits',
'photoField': 'photoField-%(run)06i-%(camcol)i.fits',
'window_flist': 'window_flist.fits',
})
# URLs on DAS server
self.dasurl = 'http://data.sdss3.org/sas/dr8/groups/boss/'
self.daspaths = {
'idR': 'photo/data/%(run)i/fields/%(camcol)i/idR-%(run)06i-%(band)s%(camcol)i-%(field)04i.fit.Z',
'fpObjc': 'photo/redux/%(rerun)s/%(run)i/objcs/%(camcol)i/fpObjc-%(run)06i-%(camcol)i-%(field)04i.fit',
# DR8 frames are no longer available on DAS.
'frame': '/sas/dr9/boss/photoObj/frames/%(rerun)s/%(run)i/%(camcol)i/frame-%(band)s-%(run)06i-%(camcol)i-%(field)04i.fits.bz2',
#'frame': 'photoObj/frames/%(rerun)s/%(run)i/%(camcol)i/frame-%(band)s-%(run)06i-%(camcol)i-%(field)04i.fits.bz2',
'photoObj': 'photoObj/%(rerun)s/%(run)i/%(camcol)i/photoObj-%(run)06i-%(camcol)i-%(field)04i.fits',
'psField': 'photo/redux/%(rerun)s/%(run)i/objcs/%(camcol)i/psField-%(run)06i-%(camcol)i-%(field)04i.fit',
'photoField': 'photoObj/%(rerun)s/%(run)i/photoField-%(run)06i-%(camcol)i.fits',
'fpM': 'photo/redux/%(rerun)s/%(run)i/objcs/%(camcol)i/fpM-%(run)06i-%(band)s%(camcol)i-%(field)04i.fit.gz',
'fpAtlas': 'photo/redux/%(rerun)s/%(run)i/objcs/%(camcol)i/fpAtlas-%(run)06i-%(camcol)i-%(field)04i.fit',
'window_flist': 'resolve/2010-05-23/window_flist.fits',
}
self.dassuffix = {
#'frame': '.bz2',
'fpM': '.gz',
'idR': '.Z',
}
# called in retrieve()
self.processcmds = {
'fpM': 'gunzip -cd %(input)s > %(output)s',
'idR': 'gunzip -cd %(input)s > %(output)s',
}
self.postprocesscmds = {
'frame': 'TMPFILE=$(mktemp %(output)s.tmp.XXXXXX) && bunzip2 -cd %(input)s > $TMPFILE && mv $TMPFILE %(output)s',
}
y = read_yanny(self._get_runlist_filename())
y = y['RUNDATA']
rl = runlist()
rl.run = np.array(y['run'])
rl.startfield = np.array(y['startfield'])
rl.endfield = np.array(y['endfield'])
rl.rerun = np.array(y['rerun'])
#print 'Rerun type:', type(rl.rerun), rl.rerun.dtype
self.runlist = rl
self.logger = logging.getLogger('astrometry.sdss.DR%i' %
self.getDRNumber())
#self.logger.debug('debug test')
#self.logger.info('info test')
#self.logger.warning('warning test')
def _unzip_frame(self, fn, run, camcol):
if self.readBz2:
return None,True
# No, PJM reported that pyfits failed on SDSS frame*.bz2 files
# if not fitsio:
# # pyfits can read .bz2
# return None,True
tempfn = None
keep = False
filetype = 'frame'
if not(filetype in self.postprocesscmds and fn.endswith('.bz2')):
return None,True
cmd = self.postprocesscmds[filetype]
if self.unzip_dir is not None:
udir = os.path.join(self.unzip_dir, '%i' % run, '%i' % camcol)
if not os.path.exists(udir):
try:
os.makedirs(udir)
except:
pass
tempfn = os.path.join(udir, os.path.basename(fn).replace('.bz2', ''))
#print 'Checking', tempfn
if os.path.exists(tempfn):
print('File exists:', tempfn)
return tempfn,True
else:
print('Saving to', tempfn)
keep = True
else:
fid,tempfn = tempfile.mkstemp()
os.close(fid)
cmd = cmd % dict(input = fn, output = tempfn)
self.logger.debug('cmd: %s' % cmd)
print('command:', cmd)
(rtn,out,err) = run_command(cmd)
if rtn:
print('Command failed: command', cmd)
print('Output:', out)
print('Error:', err)
print('Return val:', rtn)
raise RuntimeError('Command failed (return val %i): %s' % (rtn, cmd))
print(out)
print(err)
return tempfn,keep
def _get_runlist_filename(self):
return self._get_data_file('runList-dr8.par')
# read a data file describing the DR8 data
def _get_data_file(self, fn):
return os.path.join(os.path.dirname(__file__), fn)
def get_rerun(self, run, field=None):
I = (self.runlist.run == run)
if field is not None:
I *= (self.runlist.startfield <= field) * (self.runlist.endfield >= field)
I = np.flatnonzero(I)
reruns = np.unique(self.runlist.rerun[I])
#print 'Run', run, '-> reruns:', reruns
if len(reruns) == 0:
return None
return reruns[-1]
def get_url(self, filetype, run, camcol, field, band=None, rerun=None):
if rerun is None:
rerun = self.get_rerun(run, field)
path = self.daspaths[filetype]
url = urljoin(self.dasurl, path % dict(
run=run, camcol=camcol, field=field, rerun=rerun, band=band))
return url
def retrieve(self, filetype, run, camcol, field=None, band=None, skipExisting=True,
tempsuffix='.tmp', rerun=None):
outfn = self.getPath(filetype, run, camcol, field, band,
rerun=rerun)
print('Checking for file', outfn)
if outfn is None:
return None
if skipExisting and os.path.exists(outfn):
#print('Exists')
return outfn
outdir = os.path.dirname(outfn)
if not os.path.exists(outdir):
try:
os.makedirs(outdir)
except:
pass
url = self.get_url(filetype, run, camcol, field, band=band, rerun=rerun)
#print 'Did not find file:', outfn
print('Retrieving from URL:', url)
if self.curl:
cmd = "curl -o '%(outfn)s' '%(url)s'"
else:
cmd = "wget --continue -nv -O %(outfn)s '%(url)s'"
# suffix to add to the downloaded filename
suff = self.dassuffix.get(filetype, '')
oo = outfn + suff
if tempsuffix is not None:
oo += tempsuffix
cmd = cmd % dict(outfn=oo, url=url)
self.logger.debug('cmd: %s' % cmd)
(rtn,out,err) = run_command(cmd)
if rtn:
print('Command failed: command', cmd)
print('Output:', out)
print('Error:', err)
print('Return val:', rtn)
return None
if tempsuffix is not None:
#
self.logger.debug('Renaming %s to %s' % (oo, outfn+suff))
os.rename(oo, outfn + suff)
if filetype in self.processcmds:
cmd = self.processcmds[filetype]
cmd = cmd % dict(input = outfn + suff, output = outfn)
self.logger.debug('cmd: %s' % cmd)
(rtn,out,err) = run_command(cmd)
if rtn:
print('Command failed: command', cmd)
print('Output:', out)
print('Error:', err)
print('Return val:', rtn)
return None
return outfn
def readPhotoObj(self, run, camcol, field, filename=None):
obj = PhotoObj(run, camcol, field)
if filename is None:
fn = self.getPath('photoObj', run, camcol, field)
else:
fn = filename
obj.table = fits_table(fn)
return obj
def readFrame(self, run, camcol, field, band, filename=None):
'''
http://data.sdss3.org/datamodel/files/BOSS_PHOTOOBJ/frames/RERUN/RUN/CAMCOL/frame.html
'''
f = Frame(run, camcol, field, band)
# ...
if filename is None:
fn = self.getPath('frame', run, camcol, field, band)
else:
fn = filename
# optionally bunzip2 the frame file.
tempfn,keep = self._unzip_frame(fn, run, camcol)
if tempfn is not None:
fn = tempfn
if fitsio:
print('Frame filename', fn)
# eg /clusterfs/riemann/raid006/dr10/boss/photoObj/frames/301/2825/1/frame-u-002825-1-0126.fits.bz2
F = fitsio.FITS(fn, lower=True)
f.header = F[0].read_header()
# Allow later reading of just the pixels of interest.
f.image_proxy = F[0]
f.calib = F[1].read()
sky = F[2].read_columns(['allsky', 'xinterp', 'yinterp'])
#print 'sky', type(sky)
# ... supposed to be a recarray, but it's not...
f.sky, f.skyxi, f.skyyi = sky.tolist()[0]
tab = fits_table(F[3].read())
if not keep and tempfn is not None:
os.remove(tempfn)
else:
p = pyfits.open(fn)
# in nanomaggies
f.image = p[0].data
f.header = p[0].header
# converts counts -> nanomaggies
f.calib = p[1].data
# table with val,x,y -- binned; use bilinear interpolation to expand
sky = p[2].data
# table -- asTrans structure
tab = fits_table(p[3].data)
f.sky = sky.field('allsky')[0]
f.skyxi = sky.field('xinterp')[0]
f.skyyi = sky.field('yinterp')[0]
#print 'sky shape', f.sky.shape
if len(f.sky.shape) != 2:
f.sky = f.sky.reshape((-1, 256))
assert(len(tab) == 1)
tab = tab[0]
# DR7 has NODE, INCL in radians...
f.astrans = AsTrans(run, camcol, field, band,
node=np.deg2rad(tab.node), incl=np.deg2rad(tab.incl),
astrans=tab, cut_to_band=False)
return f
|
migrations/alembic/versions/2a8981379eba_add_locales_table.py | bonomali/parrot | 143 | 12764211 | <filename>migrations/alembic/versions/2a8981379eba_add_locales_table.py
"""add locales table
Revision ID: 2a8981379eba
Revises: 438b950c4c9a
Create Date: 2018-01-10 16:21:39.595957
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2a8981379eba'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.execute("""
CREATE TABLE locales (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
ident TEXT NOT NULL,
language TEXT NOT NULL,
country TEXT NOT NULL,
pairs hstore,
project_id UUID REFERENCES projects (id) ON UPDATE CASCADE ON DELETE CASCADE,
UNIQUE (ident, project_id)
);
""")
def downgrade():
op.execute("""
DROP TABLE locales;
""")
|
058-bmp-stegano/taski_zrodla/mapofbits/level0/testlevel0.py | gynvael/stream | 152 | 12764232 | <gh_stars>100-1000
#!/usr/bin/python
import os, sys
from struct import pack, unpack
PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(PATH)
import settings
def rb(d, off):
return d[off]
def rw(d, off):
return unpack("<H", str(d[off:off+2]))[0]
def rd(d, off):
return unpack("<I", str(d[off:off+4]))[0]
with open(PATH + "/level0.bmp", "rb") as f:
d = bytearray(f.read())
data_offset = rd(d, 0x0A) # bfOffBits
i = data_offset
msg_bit = 0
byte = 0
msg = ""
while i < len(d):
byte |= (d[i] & 1) << msg_bit
msg_bit += 1
if msg_bit == 8:
if byte != 0:
msg += chr(byte)
byte = 0
msg_bit = 0
i += 1
if str(settings.MSG) in msg:
print "Level 0: OK"
sys.exit(0)
print "Level 0: FAIL"
sys.exit(1)
|
spikeextractors/extractors/mdaextractors/__init__.py | zekearneodo/spikeextractors | 145 | 12764286 | from .mdaextractors import MdaRecordingExtractor, MdaSortingExtractor
|
release/stubs.min/Rhino/DocObjects/__init___parts/PointCloudObject.py | htlcnn/ironpython-stubs | 182 | 12764287 | <reponame>htlcnn/ironpython-stubs<filename>release/stubs.min/Rhino/DocObjects/__init___parts/PointCloudObject.py
class PointCloudObject(RhinoObject):
# no doc
def DuplicatePointCloudGeometry(self):
""" DuplicatePointCloudGeometry(self: PointCloudObject) -> PointCloud """
pass
PointCloudGeometry=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PointCloudGeometry(self: PointCloudObject) -> PointCloud
"""
|
Filters/Modeling/Testing/Python/TestImprintFilter.py | cclauss/VTK | 1,755 | 12764301 | <reponame>cclauss/VTK<gh_stars>1000+
#!/usr/bin/env python
import vtk
# A simpler imprint test. One plane
# imprints a second plane.
# Control the resolution of the test
res = 2
# Create the RenderWindow, Renderer
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer( ren )
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Use two plane sources:
# one plane imprints on the other plane.
#
plane1 = vtk.vtkPlaneSource()
plane1.SetXResolution(3)
plane1.SetYResolution(1)
plane1.SetOrigin(0,0,0)
plane1.SetPoint1(2,0,0)
plane1.SetPoint2(0,1,0)
p1Mapper = vtk.vtkPolyDataMapper()
p1Mapper.SetInputConnection(plane1.GetOutputPort())
p1Actor = vtk.vtkActor()
p1Actor.SetMapper(p1Mapper)
p1Actor.GetProperty().SetRepresentationToSurface()
plane2 = vtk.vtkPlaneSource()
plane2.SetXResolution(res)
plane2.SetYResolution(res)
plane2.SetOrigin(-0.25,0.25,0)
plane2.SetPoint1(1.5,0.25,0)
plane2.SetPoint2(-0.25,0.75,0)
p2Mapper = vtk.vtkPolyDataMapper()
p2Mapper.SetInputConnection(plane2.GetOutputPort())
p2Actor = vtk.vtkActor()
p2Actor.SetMapper(p2Mapper)
p2Actor.GetProperty().SetRepresentationToSurface()
# Now imprint
imp = vtk.vtkImprintFilter()
imp.SetTargetConnection(plane1.GetOutputPort())
imp.SetImprintConnection(plane2.GetOutputPort())
imp.SetTolerance(0.00001)
imp.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(imp.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetRepresentationToSurface()
# Create the RenderWindow, Renderer
#
ren1 = vtk.vtkRenderer()
ren1.SetBackground(0.1,0.2,0.4)
ren1.SetViewport(0,0,0.5,1.0)
ren2 = vtk.vtkRenderer()
ren2.SetBackground(0.1,0.2,0.4)
ren2.SetViewport(0.5,0,1.0,1.0)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer( ren1 )
renWin.AddRenderer( ren2 )
renWin.SetSize(600,300)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(p1Actor)
ren1.AddActor(p2Actor)
ren1.ResetCamera()
ren2.AddActor(actor)
ren2.SetActiveCamera(ren1.GetActiveCamera())
renWin.Render()
iren.Start()
|
delft/textClassification/preprocess.py | tantikristanti/delft | 333 | 12764312 | import itertools
import regex as re
import numpy as np
# seed is fixed for reproducibility
np.random.seed(7)
from tensorflow import set_random_seed
set_random_seed(7)
from unidecode import unidecode
from delft.utilities.Tokenizer import tokenizeAndFilterSimple
from delft.utilities.bert.run_classifier_delft import DataProcessor
import delft.utilities.bert.tokenization as tokenization
from delft.utilities.bert.run_classifier_delft import InputExample
special_character_removal = re.compile(r'[^A-Za-z\.\-\?\!\,\#\@\% ]',re.IGNORECASE)
def to_vector_single(text, embeddings, maxlen=300):
"""
Given a string, tokenize it, then convert it to a sequence of word embedding
vectors with the provided embeddings, introducing <PAD> and <UNK> padding token
vector when appropriate
"""
tokens = tokenizeAndFilterSimple(clean_text(text))
window = tokens[-maxlen:]
# TBD: use better initializers (uniform, etc.)
x = np.zeros((maxlen, embeddings.embed_size), )
# TBD: padding should be left and which vector do we use for padding?
# and what about masking padding later for RNN?
for i, word in enumerate(window):
x[i,:] = embeddings.get_word_vector(word).astype('float32')
return x
def to_vector_elmo(tokens, embeddings, maxlen=300, lowercase=False, num_norm=False):
"""
Given a list of tokens convert it to a sequence of word embedding
vectors based on ELMo contextualized embeddings
"""
subtokens = []
for i in range(0, len(tokens)):
local_tokens = []
for j in range(0, min(len(tokens[i]), maxlen)):
if lowercase:
local_tokens.append(lower(tokens[i][j]))
else:
local_tokens.append(tokens[i][j])
subtokens.append(local_tokens)
return embeddings.get_sentence_vector_only_ELMo(subtokens)
"""
if use_token_dump:
return embeddings.get_sentence_vector_ELMo_with_token_dump(tokens)
"""
def to_vector_bert(tokens, embeddings, maxlen=300, lowercase=False, num_norm=False):
"""
Given a list of tokens convert it to a sequence of word embedding
vectors based on the BERT contextualized embeddings, introducing
padding token when appropriate
"""
subtokens = []
for i in range(0, len(tokens)):
local_tokens = []
for j in range(0, min(len(tokens[i]), maxlen)):
if lowercase:
local_tokens.append(lower(tokens[i][j]))
else:
local_tokens.append(tokens[i][j])
subtokens.append(local_tokens)
vector = embeddings.get_sentence_vector_only_BERT(subtokens)
return vector
def to_vector_simple_with_elmo(tokens, embeddings, maxlen=300, lowercase=False, num_norm=False):
"""
Given a list of tokens convert it to a sequence of word embedding
vectors based on the concatenation of the provided static embeddings and
the ELMo contextualized embeddings, introducing <PAD> and <UNK>
padding token vector when appropriate
"""
subtokens = []
for i in range(0, len(tokens)):
local_tokens = []
for j in range(0, min(len(tokens[i]), maxlen)):
if lowercase:
local_tokens.append(lower(tokens[i][j]))
else:
local_tokens.append(tokens[i][j])
if len(tokens[i]) < maxlen:
for i in range(0, maxlen-len(tokens[i])):
local_tokens.append(" ")
subtokens.append(local_tokens)
return embeddings.get_sentence_vector_with_ELMo(subtokens)
def to_vector_simple_with_bert(tokens, embeddings, maxlen=300, lowercase=False, num_norm=False):
"""
Given a list of tokens convert it to a sequence of word embedding
vectors based on the concatenation of the provided static embeddings and
the BERT contextualized embeddings, introducing padding token vector
when appropriate
"""
subtokens = []
for i in range(0, len(tokens)):
local_tokens = []
for j in range(0, min(len(tokens[i]), maxlen)):
if lowercase:
local_tokens.append(lower(tokens[i][j]))
else:
local_tokens.append(tokens[i][j])
if len(tokens[i]) < maxlen:
for i in range(0, maxlen-len(tokens[i])):
local_tokens.append(" ")
subtokens.append(local_tokens)
return embeddings.get_sentence_vector_with_BERT(subtokens)
def clean_text(text):
x_ascii = unidecode(text)
x_clean = special_character_removal.sub('',x_ascii)
return x_clean
def lower(word):
return word.lower()
def normalize_num(word):
return re.sub(r'[0-90123456789]', r'0', word)
class BERT_classifier_processor(DataProcessor):
"""
BERT data processor for classification
"""
def __init__(self, labels=None, x_train=None, y_train=None, x_test=None, y_test=None):
self.list_classes = labels
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
def get_train_examples(self, x_train=None, y_train=None):
"""See base class."""
if x_train is not None:
self.x_train = x_train
if y_train is not None:
self.y_train = y_train
examples, _ = self.create_examples(self.x_train, self.y_train)
return examples
def get_labels(self):
"""See base class."""
return self.list_classes
def get_test_examples(self, x_test=None, y_test=None):
"""See base class."""
if x_test is not None:
self.x_test = x_test
if y_test is not None:
self.y_test = y_test
examples, results = self.create_examples(self.x_test, self.y_test)
return examples, results
def create_examples(self, x_s, y_s=None):
examples = []
valid_classes = np.zeros((y_s.shape[0],len(self.list_classes)))
accumul = 0
for (i, x) in enumerate(x_s):
y = y_s[i]
guid = i
text_a = tokenization.convert_to_unicode(x)
#the_class = self._rewrite_classes(y, i)
ind, = np.where(y == 1)
the_class = self.list_classes[ind[0]]
if the_class is None:
#print(text_a)
continue
if the_class not in self.list_classes:
#the_class = 'other'
continue
label = tokenization.convert_to_unicode(the_class)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
valid_classes[accumul] = y
accumul += 1
return examples, valid_classes
def create_inputs(self, x_s, dummy_label='dummy'):
examples = []
# dummy label to avoid breaking the bert base code
label = tokenization.convert_to_unicode(dummy_label)
for (i, x) in enumerate(x_s):
guid = i
text_a = tokenization.convert_to_unicode(x)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
|
docs/snippets/bus0_sync.py | husqvarnagroup/pynng | 174 | 12764334 | <reponame>husqvarnagroup/pynng
import time
from pynng import Bus0, Timeout
address = 'tcp://127.0.0.1:13131'
with Bus0(listen=address, recv_timeout=100) as s0, \
Bus0(dial=address, recv_timeout=100) as s1, \
Bus0(dial=address, recv_timeout=100) as s2:
# let all connections be established
time.sleep(0.05)
s0.send(b'hello buddies')
s1.recv() # prints b'hello buddies'
s2.recv() # prints b'hello buddies'
s1.send(b'hi s0')
print(s0.recv()) # prints b'hi s0'
# s2 is not directly connected to s1.
try:
s2.recv()
assert False, "this is never reached"
except Timeout:
print('s2 is not connected directly to s1!')
|
applications/pytorch/cnns/tests/test_train.py | payoto/graphcore_examples | 260 | 12764337 | <gh_stars>100-1000
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import os
import gc
import pytest
import shutil
import torch
import poptorch
import popart
from poptorch.optim import SGD
import import_helper
from train import TrainingModelWithLoss
import datasets
import models
from utils import get_train_accuracy, get_test_accuracy, run_script
@pytest.mark.ipus(1)
def test_recomputation_checkpoints():
gc.collect()
# run the model with and without recomputation
def train(model, recompute):
input_data = torch.ones(1, 3, 224, 224)
labels_data = torch.ones(1).long()
opts = poptorch.Options()
if recompute:
opts._Popart.set("autoRecomputation", int(popart.RecomputationType.Standard))
opts.outputMode(poptorch.OutputMode.All)
opts.randomSeed(0)
opts.Training.gradientAccumulation(1)
opts.Precision.enableStochasticRounding(False)
model_with_loss = TrainingModelWithLoss(model)
optimizer = SGD(model_with_loss.parameters(), lr=0.01, momentum=0., use_combined_accum=True)
training_model = poptorch.trainingModel(model_with_loss, opts, optimizer=optimizer)
predictions = []
for _ in range(3):
preds, _, _ = training_model(input_data, labels_data)
predictions.append(preds)
training_model.destroy()
return predictions
class Options():
def __init__(self):
self.model = "resnet18"
self.precision = "16.16"
self.norm_type = "group"
self.norm_eps = 1e-5
self.norm_num_groups = 32
self.normalization_location = "none"
self.pipeline_splits = []
self.eight_bit_io = False
self.num_io_tiles = 0
args = Options()
torch.manual_seed(0)
model = models.get_model(args, datasets.datasets_info["cifar10"], pretrained=True)
no_recompute_predictions = train(model, False)
args.recompute_checkpoints = ["conv", "norm"]
torch.manual_seed(0)
model = models.get_model(args, datasets.datasets_info["cifar10"], pretrained=True)
recompute_predictions = train(model, True)
for pred1, pred2 in zip(no_recompute_predictions, recompute_predictions):
assert torch.allclose(pred1, pred2, atol=1e-04)
@pytest.mark.ipus(4)
def test_replicas_reduction():
gc.collect()
def common_opts():
opts = poptorch.Options()
opts.Training.accumulationAndReplicationReductionType(poptorch.ReductionType.Mean)
opts.outputMode(poptorch.OutputMode.All)
opts.randomSeed(0)
opts.Training.gradientAccumulation(1)
return opts
def run_model(opts):
input_data = torch.ones(4, 1)
labels_data = torch.ones(4).long()
model = torch.nn.Linear(1, 2, bias=False)
model_with_loss = TrainingModelWithLoss(model, 0.1)
optimizer = SGD(model_with_loss.parameters(), lr=0.1, momentum=0., use_combined_accum=True)
training_model = poptorch.trainingModel(model_with_loss, opts, optimizer=optimizer)
for _ in range(3):
preds, loss, _ = training_model(input_data, labels_data)
# return the weights of the model
return list(model_with_loss.model.named_parameters())[0][1], loss
# Single replica
opts = common_opts()
opts.replicationFactor(1)
single_replica_weights, single_replica_loss = run_model(opts)
# 4 replica running
gc.collect()
opts = common_opts()
opts.replicationFactor(4)
replicated_weights, replicated_loss = run_model(opts)
assert torch.allclose(single_replica_weights, replicated_weights, atol=1e-05)
assert torch.allclose(single_replica_loss, replicated_loss, atol=1e-05)
@pytest.mark.ipus(1)
def test_generated():
gc.collect()
run_script("train/train.py", f"--data generated --model resnet18 --epoch 1 --precision 16.16 --validation-mode none --optimizer sgd_combined --lr 0.001 --gradient-accumulation 128 --batch-size 1 --dataloader-worker 4 --seed 0")
@pytest.mark.ipus(1)
@pytest.mark.parametrize("precision", ["16.16", "32.32"])
def test_synthetic(precision):
gc.collect()
run_script("train/train.py", f"--data synthetic --model resnet18 --epoch 1 --precision {precision} --validation-mode none --optimizer sgd_combined --lr 0.001 --gradient-accumulation 64 --batch-size 1 --dataloader-worker 4 --seed 0")
@pytest.mark.parametrize("label_smoothing", [0.0, 1.0, 0.1, 0.5])
def test_loss_function(label_smoothing):
torch.manual_seed(0)
inp = torch.rand(4, 10) * 10 - 5 # create random input between [-5,5)
label = torch.ones(4).long()
# calculate the ground truth
log_pred = torch.nn.functional.log_softmax(inp, dim=-1)
ground_truth = - torch.mean(torch.sum((label_smoothing / 10.0) * log_pred, dim=1) + (1.0 - label_smoothing) * log_pred[:, 1])
model_with_loss = TrainingModelWithLoss(lambda x: x, label_smoothing=label_smoothing)
_, loss, _ = model_with_loss(inp, label)
assert torch.allclose(ground_truth, loss, atol=1e-05)
@pytest.mark.ipus(1)
def test_mixup():
gc.collect()
run_script("train/train.py", f"--mixup-alpha 0.1 --data generated --model resnet18 --epoch 1 --validation-mode none --optimizer sgd_combined --batch-size 3 --dataloader-worker 1 --seed 0")
@pytest.mark.ipus(1)
def test_cutmix():
gc.collect()
run_script("train/train.py", f"--cutmix-lambda-low 0.0 --cutmix-lambda-high 1.0 --data generated --model resnet18 --epoch 1 --validation-mode none --optimizer sgd_combined --batch-size 3 --dataloader-worker 1 --seed 0")
class TestSynthetic:
@pytest.mark.ipus(2)
@pytest.mark.ipu_version("ipu2")
def test_synthetic_mixed_precision(self):
gc.collect()
run_script("train/train.py", "--data synthetic --model resnet18 --epoch 1 --precision 16.32 --pipeline-splits layer4/0 "
"--validation-mode none --optimizer sgd_combined --lr 0.001 --gradient-accumulation 64 --dataloader-worker 4 --seed 0")
class TestTrainCIFAR10:
@pytest.mark.ipus(1)
def test_single_ipu_validation_groupnorm(self):
gc.collect()
out = run_script("train/train.py", "--data cifar10 --model resnet18 --epoch 3 --precision 16.16 --optimizer sgd_combined --lr 0.1 --batch-size 2 --gradient-accumulation 32 "
"--norm-type group --norm-num-groups 32 --enable-stochastic-rounding --dataloader-worker 4 --seed 0")
acc = get_test_accuracy(out)
assert acc > 15.0
@pytest.mark.ipus(1)
@pytest.mark.ipu_version("ipu2")
def test_single_ipu_validation_batchnorm(self):
gc.collect()
out = run_script("train/train.py", "--data cifar10 --model resnet18 --epoch 2 --precision 16.16 --optimizer sgd_combined --lr 0.1 --gradient-accumulation 8 "
"--norm-type batch --batch-size 16 --enable-stochastic-rounding --dataloader-worker 4 --seed 0")
acc = get_test_accuracy(out)
assert acc > 15.0
@pytest.mark.ipus(2)
def test_replicas(self):
gc.collect()
out = run_script("train/train.py", "--data cifar10 --model resnet18 --epoch 2 --replicas 2 --precision 16.16 --validation-mode none --optimizer sgd_combined --lr 0.1 "
"--gradient-accumulation 32 --enable-stochastic-rounding --dataloader-worker 4 --seed 0")
acc = get_train_accuracy(out)
assert acc > 15.0
@pytest.mark.ipus(2)
def test_efficient_net(self):
gc.collect()
out = run_script("train/train.py", "--data cifar10 --epoch 4 --model efficientnet-b0 --precision 16.32 --validation-mode none --optimizer sgd_combined --lr 0.1 --gradient-accumulation 64 "
"--pipeline-splits blocks/2/1 --norm-type group --norm-num-groups 4 --enable-stochastic-rounding --dataloader-worker 4 --seed 0")
acc = get_train_accuracy(out)
assert acc > 15.0
@pytest.mark.ipus(1)
def test_full_precision(self):
gc.collect()
out = run_script("train/train.py", "--data cifar10 --epoch 2 --model resnet18 --precision 32.32 --optimizer sgd_combined --lr 0.1 --batch-size 1 --gradient-accumulation 64 --dataloader-worker 4 --seed 0")
acc = get_train_accuracy(out)
assert acc > 15.0
@pytest.mark.ipus(2)
@pytest.mark.ipu_version("ipu2")
def test_mixed_precision(self):
gc.collect()
out = run_script("train/train.py", "--data cifar10 --epoch 2 --model resnet18 --pipeline-splits layer4/0 --precision 16.32 --optimizer sgd_combined "
"--lr 0.1 --batch-size 1 --gradient-accumulation 64 --validation-mode none --dataloader-worker 4 --seed 0")
acc = get_train_accuracy(out)
assert acc > 15.0
@pytest.mark.ipus(1)
def test_single_ipu_mobilenet_v3_small_validation_batchnorm(self):
gc.collect()
out = run_script("train/train.py", "--data cifar10 --model mobilenet-v3-small --epoch 3 --precision 16.32 --optimizer sgd_combined --lr 0.1 --batch-size 2 --gradient-accumulation 32 "
"--norm-type batch --enable-stochastic-rounding --dataloader-worker 4 --seed 0")
acc = get_test_accuracy(out)
assert acc > 15.0
@pytest.mark.ipus(1)
@pytest.mark.ipu_version("ipu2")
def test_single_ipu_mobilenet_v3_large_validation_batchnorm(self):
gc.collect()
out = run_script("train/train.py", "--data cifar10 --model mobilenet-v3-large --epoch 3 --precision 16.32 --optimizer sgd_combined --lr 0.1 --batch-size 2 --gradient-accumulation 32 "
"--norm-type batch --enable-stochastic-rounding --dataloader-worker 4 --seed 0")
acc = get_test_accuracy(out)
assert acc > 15.0
@pytest.mark.ipus(1)
@pytest.mark.ipu_version("ipu2")
def test_half_resolution_training(self):
gc.collect()
out = run_script("train/train.py", "--data cifar10 --model resnet18 --epoch 1 --precision 16.32 --optimizer sgd_combined --lr 0.1 --batch-size 2 --gradient-accumulation 32 "
"--norm-type batch --dataloader-worker 4 --half-res-training --fine-tune-epoch 1 --fine-tune-first-trainable-layer layer3 --weight-avg-strategy exponential "
"--weight-avg-exp-decay 0.97 --checkpoint-path test_half_resolution_training --seed 0")
acc = get_test_accuracy(out)
assert acc > 15.0
# remove folder
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
shutil.rmtree(os.path.join(parent_dir, "test_half_resolution_training"))
class TestRestoreCheckpoint:
@pytest.mark.ipus(1)
def test_restore_train(self):
gc.collect()
# create a model
out = run_script("train/train.py", "--data cifar10 --epoch 2 --model resnet18 --precision 16.16 --optimizer sgd_combined --lr 0.1 --batch-size 2 --gradient-accumulation 32 --seed 0 "
"--validation-mode none --norm-type group --norm-num-groups 32 --checkpoint-path restore_test_path_test_restore_train --dataloader-worker 4")
saved_train_acc = get_train_accuracy(out)
# reload the model
out = run_script("train/restore.py", "--checkpoint-path restore_test_path_test_restore_train/resnet18_cifar10_1.pt")
acc = get_train_accuracy(out)
assert acc > saved_train_acc - 5.0
# remove folder
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
shutil.rmtree(os.path.join(parent_dir, "restore_test_path_test_restore_train"))
@pytest.mark.ipus(1)
def test_validation(self):
gc.collect()
# create a model
out = run_script("train/train.py", "--data cifar10 --epoch 1 --model resnet18 --precision 16.16 --optimizer sgd_combined --lr 0.1 --batch-size 2 --gradient-accumulation 32 --seed 0 "
"--norm-type group --norm-num-groups 32 --checkpoint-path restore_test_path_test_validation --dataloader-worker 4")
saved_test_acc = get_test_accuracy(out)
# validate the model
out = run_script("train/validate.py", "--checkpoint-path restore_test_path_test_validation/resnet18_cifar10_1.pt")
acc = get_test_accuracy(out)
# close enough
assert abs(saved_test_acc - acc) < 0.01
# remove folder
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
shutil.rmtree(os.path.join(parent_dir, "restore_test_path_test_validation"))
@pytest.mark.ipus(1)
def test_weight_avg(self):
gc.collect()
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
out1 = run_script("train/train.py", "--data cifar10 --epoch 3 --model resnet18 --precision 16.16 --weight-avg-strategy mean --norm-type group "
"--norm-num-groups 32 --optimizer sgd_combined --lr 0.1 --batch-size 2 --gradient-accumulation 32 --checkpoint-path restore_test_path_weight_avg "
"--weight-avg-N 2 --dataloader-worker 4 --seed 0")
os.remove(os.path.join(parent_dir, "restore_test_path_weight_avg", "resnet18_cifar10_3_averaged.pt"))
_ = run_script("train/weight_avg.py", "--checkpoint-path restore_test_path_weight_avg --weight-avg-strategy mean --weight-avg-N 2")
out2 = run_script("train/validate.py", "--checkpoint-path restore_test_path_weight_avg/resnet18_cifar10_3_averaged.pt")
acc1 = get_test_accuracy(out1)
acc2 = get_test_accuracy(out1)
assert acc1 > 15
assert acc1 == acc2
shutil.rmtree(os.path.join(parent_dir, "restore_test_path_weight_avg"))
@pytest.mark.ipus(1)
def test_mixup_cutmix_validation_weight_avg(self):
# Only make sure that checkpoint loading works with mixup model wrapper.
gc.collect()
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
run_script("train/train.py", f"--mixup-alpha 0.1 --cutmix-lambda-low 0.2 --cutmix-lambda-high 0.8 --data generated --checkpoint-path test_mixup_cutmix_validation_weight_avg --weight-avg-strategy exponential --weight-avg-exp-decay 0.97 --model resnet18 --epoch 2 --validation-mode after --optimizer sgd_combined --batch-size 4 --dataloader-worker 1 --seed 0")
shutil.rmtree(os.path.join(parent_dir, "test_mixup_cutmix_validation_weight_avg"))
@pytest.mark.ipus(1)
@pytest.mark.ipu_version("ipu2")
def test_mixup_cutmix_restore_train(self):
# Only make sure that checkpoint loading works with mixup model wrapper.
gc.collect()
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
run_script("train/train.py", f"--mixup-alpha 0.1 --cutmix-lambda-low 0.5 --cutmix-lambda-high 0.5 --data generated --checkpoint-path test_mixup_cutmix_restore_train --model resnet18 --epoch 2 --validation-mode none --optimizer sgd_combined --batch-size 4 --dataloader-worker 1 --seed 0")
run_script("train/restore.py", "--checkpoint-path test_mixup_cutmix_restore_train/resnet18_generated_1.pt")
shutil.rmtree(os.path.join(parent_dir, "test_mixup_cutmix_restore_train"))
|
doc/sphinxext/numpydoc/__init__.py | vimalromeo/pandas | 303 | 12764344 | from __future__ import division, absolute_import, print_function
from .numpydoc import setup
|
python/047 Permutations II.py | allandproust/leetcode-share | 156 | 12764349 | <filename>python/047 Permutations II.py
'''
Given a collection of numbers that might contain duplicates, return all possible unique permutations.
For example,
[1,1,2] have the following unique permutations:
[1,1,2], [1,2,1], and [2,1,1].
'''
class Solution(object):
def permuteUnique(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
result = []
nums.sort()
self.get_permute([], nums, result)
return result
def get_permute(self, current, num, result):
if not num:
result.append(current + [])
return
for i, v in enumerate(num):
if i - 1 >= 0 and num[i] == num[i - 1]:
continue
current.append(num[i])
self.get_permute(current, num[:i] + num[i + 1:], result)
current.pop()
if __name__ == "__main__":
assert Solution().permuteUnique([1, 2, 1]) == [[1, 1, 2], [1, 2, 1], [2, 1, 1]] |
tests/test_basis_evaluation.py | jiduque/scikit-fda | 147 | 12764386 |
from skfda.representation.basis import (
FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor)
import unittest
import numpy as np
class TestBasisEvaluationFourier(unittest.TestCase):
def test_evaluation_simple_fourier(self):
"""Test the evaluation of FDataBasis"""
fourier = Fourier(domain_range=(0, 2), n_basis=5)
coefficients = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 2, 11)
# Results in R package fda
res = np.array([[8.71, 9.66, 1.84, -4.71, -2.80, 2.71,
2.45, -3.82, -6.66, -0.30, 8.71],
[22.24, 26.48, 10.57, -4.95, -3.58, 6.24,
5.31, -7.69, -13.32, 1.13, 22.24]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(2), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(2), res)
def test_evaluation_point_fourier(self):
"""Test the evaluation of a single point FDataBasis"""
fourier = Fourier(domain_range=(0, 1), n_basis=3)
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
# Test different ways of call f with a point
res = np.array([-0.903918107989282, -0.267163981229459]
).reshape((2, 1, 1)).round(4)
np.testing.assert_array_almost_equal(f([0.5]).round(4), res)
np.testing.assert_array_almost_equal(f((0.5,)).round(4), res)
np.testing.assert_array_almost_equal(f(0.5).round(4), res)
np.testing.assert_array_almost_equal(f(np.array([0.5])).round(4), res)
# Problematic case, should be accepted or no?
#np.testing.assert_array_almost_equal(f(np.array(0.5)).round(4), res)
def test_evaluation_derivative_fourier(self):
"""Test the evaluation of the derivative of a FDataBasis"""
fourier = Fourier(domain_range=(0, 1), n_basis=3)
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 1, 4)
res = np.array([4.34138447771721, -7.09352774867064, 2.75214327095343,
4.34138447771721, 6.52573053999253,
-4.81336320468984, -1.7123673353027, 6.52573053999253]
).reshape((2, 4, 1)).round(3)
f_deriv = f.derivative()
np.testing.assert_array_almost_equal(
f_deriv(t).round(3), res
)
def test_evaluation_grid_fourier(self):
"""Test the evaluation of FDataBasis with the grid option set to
true. Nothing should be change due to the domain dimension is 1,
but can accept the """
fourier = Fourier(domain_range=(0, 1), n_basis=3)
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 1, 4)
res_test = f(t)
# Different ways to pass the axes
np.testing.assert_array_almost_equal(f(t, grid=True), res_test)
np.testing.assert_array_almost_equal(f((t,), grid=True), res_test)
np.testing.assert_array_almost_equal(f([t], grid=True), res_test)
np.testing.assert_array_almost_equal(f(np.atleast_2d(t), grid=True),
res_test)
# Number of axis different than the domain dimension (1)
with np.testing.assert_raises(ValueError):
f((t, t), grid=True)
def test_evaluation_composed_fourier(self):
"""Test the evaluation of FDataBasis the a matrix of times instead of
a list of times """
fourier = Fourier(domain_range=(0, 1), n_basis=3)
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 1, 4)
# Test same result than evaluation standart
np.testing.assert_array_almost_equal(f([1]),
f([[1], [1]],
aligned=False))
np.testing.assert_array_almost_equal(f(t), f(np.vstack((t, t)),
aligned=False))
# Different evaluation times
t_multiple = [[0, 0.5], [0.2, 0.7]]
np.testing.assert_array_almost_equal(f(t_multiple[0])[0],
f(t_multiple,
aligned=False)[0])
np.testing.assert_array_almost_equal(f(t_multiple[1])[1],
f(t_multiple,
aligned=False)[1])
def test_domain_in_list_fourier(self):
"""Test the evaluation of FDataBasis"""
for fourier in (Fourier(domain_range=[(0, 1)], n_basis=3),
Fourier(domain_range=((0, 1),), n_basis=3),
Fourier(domain_range=np.array((0, 1)), n_basis=3),
Fourier(domain_range=np.array([(0, 1)]), n_basis=3)):
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 1, 4)
res = np.array([0.905, 0.147, -1.05, 0.905, 0.303,
0.775, -1.024, 0.303]).reshape((2, 4, 1))
np.testing.assert_array_almost_equal(f(t).round(3), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(3), res)
class TestBasisEvaluationBSpline(unittest.TestCase):
def test_evaluation_simple_bspline(self):
"""Test the evaluation of FDataBasis"""
bspline = BSpline(domain_range=(0, 2), n_basis=5)
coefficients = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]])
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 2, 11)
# Results in R package fda
res = np.array([[1, 1.54, 1.99, 2.37, 2.7, 3,
3.3, 3.63, 4.01, 4.46, 5],
[6, 6.54, 6.99, 7.37, 7.7, 8,
8.3, 8.63, 9.01, 9.46, 10]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(2), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(2), res)
def test_evaluation_point_bspline(self):
"""Test the evaluation of a single point FDataBasis"""
bspline = BSpline(domain_range=(0, 1), n_basis=5, order=3)
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
# Test different ways of call f with a point
res = np.array([[0.5696], [0.3104]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f([0.5]).round(4), res)
np.testing.assert_array_almost_equal(f((0.5,)).round(4), res)
np.testing.assert_array_almost_equal(f(0.5).round(4), res)
np.testing.assert_array_almost_equal(f(np.array([0.5])).round(4), res)
# Problematic case, should be accepted or no?
#np.testing.assert_array_almost_equal(f(np.array(0.5)).round(4), res)
def test_evaluation_derivative_bspline(self):
"""Test the evaluation of the derivative of a FDataBasis"""
bspline = BSpline(domain_range=(0, 1), n_basis=5, order=3)
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 1, 4)
f_deriv = f.derivative()
np.testing.assert_array_almost_equal(
f_deriv(t).round(3),
np.array([[2.927, 0.453, -1.229, 0.6],
[4.3, -1.599, 1.016, -2.52]])[..., np.newaxis]
)
def test_evaluation_grid_bspline(self):
"""Test the evaluation of FDataBasis with the grid option set to
true. Nothing should be change due to the domain dimension is 1,
but can accept the """
bspline = BSpline(domain_range=(0, 1), n_basis=5, order=3)
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 1, 4)
res_test = f(t)
# Different ways to pass the axes
np.testing.assert_array_almost_equal(f(t, grid=True), res_test)
np.testing.assert_array_almost_equal(f((t,), grid=True), res_test)
np.testing.assert_array_almost_equal(f([t], grid=True), res_test)
np.testing.assert_array_almost_equal(
f(np.atleast_2d(t), grid=True), res_test)
# Number of axis different than the domain dimension (1)
with np.testing.assert_raises(ValueError):
f((t, t), grid=True)
def test_evaluation_composed_bspline(self):
"""Test the evaluation of FDataBasis the a matrix of times instead of
a list of times """
bspline = BSpline(domain_range=(0, 1), n_basis=5, order=3)
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 1, 4)
# Test same result than evaluation standart
np.testing.assert_array_almost_equal(f([1]),
f([[1], [1]],
aligned=False))
np.testing.assert_array_almost_equal(f(t), f(np.vstack((t, t)),
aligned=False))
# Different evaluation times
t_multiple = [[0, 0.5], [0.2, 0.7]]
np.testing.assert_array_almost_equal(f(t_multiple[0])[0],
f(t_multiple,
aligned=False)[0])
np.testing.assert_array_almost_equal(f(t_multiple[1])[1],
f(t_multiple,
aligned=False)[1])
def test_domain_in_list_bspline(self):
"""Test the evaluation of FDataBasis"""
for bspline in (BSpline(domain_range=[(0, 1)], n_basis=5, order=3),
BSpline(domain_range=((0, 1),), n_basis=5, order=3),
BSpline(domain_range=np.array((0, 1)), n_basis=5,
order=3),
BSpline(domain_range=np.array([(0, 1)]), n_basis=5,
order=3)
):
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 1, 4)
res = np.array([[0.001, 0.564, 0.435, 0.33],
[0.018, 0.468, 0.371, 0.12]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(3), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(3), res)
# Check error
with np.testing.assert_raises(ValueError):
BSpline(domain_range=[(0, 1), (0, 1)])
class TestBasisEvaluationMonomial(unittest.TestCase):
def test_evaluation_simple_monomial(self):
"""Test the evaluation of FDataBasis"""
monomial = Monomial(domain_range=(0, 2), n_basis=5)
coefficients = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]])
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 2, 11)
# Results in R package fda
res = np.array(
[[1.00, 1.56, 2.66, 4.79, 8.62, 15.00,
25.00, 39.86, 61.03, 90.14, 129.00],
[6.00, 7.81, 10.91, 16.32, 25.42, 40.00,
62.21, 94.59, 140.08, 201.98, 284.00]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(2), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(2), res)
def test_evaluation_point_monomial(self):
"""Test the evaluation of a single point FDataBasis"""
monomial = Monomial(domain_range=(0, 1), n_basis=3)
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
# Test different ways of call f with a point
res = np.array([[2.75], [1.525]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f([0.5]).round(4), res)
np.testing.assert_array_almost_equal(f((0.5,)).round(4), res)
np.testing.assert_array_almost_equal(f(0.5).round(4), res)
np.testing.assert_array_almost_equal(f(np.array([0.5])).round(4), res)
# Problematic case, should be accepted or no?
#np.testing.assert_array_almost_equal(f(np.array(0.5)).round(4), res)
def test_evaluation_derivative_monomial(self):
"""Test the evaluation of the derivative of a FDataBasis"""
monomial = Monomial(domain_range=(0, 1), n_basis=3)
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 1, 4)
f_deriv = f.derivative()
np.testing.assert_array_almost_equal(
f_deriv(t).round(3),
np.array([[2., 4., 6., 8.],
[1.4, 2.267, 3.133, 4.]])[..., np.newaxis]
)
def test_evaluation_grid_monomial(self):
"""Test the evaluation of FDataBasis with the grid option set to
true. Nothing should be change due to the domain dimension is 1,
but can accept the """
monomial = Monomial(domain_range=(0, 1), n_basis=3)
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 1, 4)
res_test = f(t)
# Different ways to pass the axes
np.testing.assert_array_almost_equal(f(t, grid=True), res_test)
np.testing.assert_array_almost_equal(f((t,), grid=True), res_test)
np.testing.assert_array_almost_equal(f([t], grid=True), res_test)
np.testing.assert_array_almost_equal(
f(np.atleast_2d(t), grid=True), res_test)
# Number of axis different than the domain dimension (1)
with np.testing.assert_raises(ValueError):
f((t, t), grid=True)
def test_evaluation_composed_monomial(self):
"""Test the evaluation of FDataBasis the a matrix of times instead of
a list of times """
monomial = Monomial(domain_range=(0, 1), n_basis=3)
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 1, 4)
# Test same result than evaluation standart
np.testing.assert_array_almost_equal(f([1]),
f([[1], [1]],
aligned=False))
np.testing.assert_array_almost_equal(f(t), f(np.vstack((t, t)),
aligned=False))
# Different evaluation times
t_multiple = [[0, 0.5], [0.2, 0.7]]
np.testing.assert_array_almost_equal(f(t_multiple[0])[0],
f(t_multiple,
aligned=False)[0])
np.testing.assert_array_almost_equal(f(t_multiple[1])[1],
f(t_multiple,
aligned=False)[1])
def test_domain_in_list_monomial(self):
"""Test the evaluation of FDataBasis"""
for monomial in (Monomial(domain_range=[(0, 1)], n_basis=3),
Monomial(domain_range=((0, 1),), n_basis=3),
Monomial(domain_range=np.array((0, 1)), n_basis=3),
Monomial(domain_range=np.array([(0, 1)]), n_basis=3)):
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 1, 4)
res = np.array([[1., 2., 3.667, 6.],
[0.5, 1.111, 2.011, 3.2]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(3), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(3), res)
class TestBasisEvaluationVectorValued(unittest.TestCase):
def test_vector_valued_constant(self):
basis_first = Constant()
basis_second = Constant()
basis = VectorValued([basis_first, basis_second])
fd = FDataBasis(basis=basis, coefficients=[[1, 2], [3, 4]])
self.assertEqual(fd.dim_codomain, 2)
res = np.array([[[1, 2]], [[3, 4]]])
np.testing.assert_allclose(fd(0), res)
def test_vector_valued_constant_monomial(self):
basis_first = Constant(domain_range=(0, 5))
basis_second = Monomial(n_basis=3, domain_range=(0, 5))
basis = VectorValued([basis_first, basis_second])
fd = FDataBasis(basis=basis, coefficients=[
[1, 2, 3, 4], [3, 4, 5, 6]])
self.assertEqual(fd.dim_codomain, 2)
np.testing.assert_allclose(fd.domain_range[0], (0, 5))
res = np.array([[[1, 2], [1, 9], [1, 24]],
[[3, 4], [3, 15], [3, 38]]])
np.testing.assert_allclose(fd([0, 1, 2]), res)
class TestBasisEvaluationTensor(unittest.TestCase):
def test_tensor_monomial_constant(self):
basis = Tensor([Monomial(n_basis=2), Constant()])
fd = FDataBasis(basis=basis, coefficients=[1, 1])
self.assertEqual(fd.dim_domain, 2)
self.assertEqual(fd.dim_codomain, 1)
np.testing.assert_allclose(fd([0., 0.]), [[[1.]]])
np.testing.assert_allclose(fd([0.5, 0.5]), [[[1.5]]])
np.testing.assert_allclose(
fd([(0., 0.), (0.5, 0.5)]), [[[1.0], [1.5]]])
fd_grid = fd.to_grid()
fd2 = fd_grid.to_basis(basis)
np.testing.assert_allclose(fd.coefficients, fd2.coefficients)
if __name__ == '__main__':
print()
unittest.main()
|
python/iceberg/api/expressions/expression_parser.py | moulimukherjee/incubator-iceberg | 2,161 | 12764392 | <reponame>moulimukherjee/incubator-iceberg
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Derived from the SimpleSQL Parser example in pyparsing, retrofitted to just handle the
# where clause predicates
# https://github.com/pyparsing/pyparsing/blob/master/examples/simpleSQL.py
import logging
from pyparsing import (
alphanums,
alphas,
CaselessKeyword,
delimitedList,
Group,
infixNotation,
oneOf,
opAssoc,
pyparsing_common as ppc,
quotedString,
Word
)
_logger = logging.getLogger(__name__)
AND, OR, IN, IS, NOT, NULL, BETWEEN = map(
CaselessKeyword, "and or in is not null between".split()
)
NOT_NULL = NOT + NULL
ident = Word(alphas, alphanums + "_$").setName("identifier")
columnName = delimitedList(ident, ".", combine=True).setName("column name")
binop = oneOf("= == != < > >= <= eq ne lt le gt ge <>", caseless=False)
realNum = ppc.real()
intNum = ppc.signed_integer()
columnRval = (realNum
| intNum
| quotedString
| columnName) # need to add support for alg expressions
whereCondition = Group(
(columnName + binop + columnRval)
| (columnName + IN + Group("(" + delimitedList(columnRval) + ")"))
| (columnName + IS + (NULL | NOT_NULL))
| (columnName + BETWEEN + columnRval + AND + columnRval)
)
whereExpression = infixNotation(
Group(whereCondition
| NOT + whereCondition
| NOT + Group('(' + whereCondition + ')')
| NOT + columnName),
[(NOT, 1, opAssoc.LEFT), (AND, 2, opAssoc.LEFT), (OR, 2, opAssoc.LEFT), (IS, 2, opAssoc.LEFT)],
)
op_map = {"=": "eq",
"==": "eq",
"eq": "eq",
">": "gt",
"gt": "gt",
">=": "gte",
"gte": "gte",
"<": "lt",
"lt": "lt",
"<=": "lte",
"lte": "lte",
"!": "not",
"not": "not",
"!=": "neq",
"<>": "neq",
"neq": "neq",
"||": "or",
"or": "or",
"&&": "and",
"and": "and",
"in": "in",
"between": "between",
"is": "is"}
def get_expr_tree(tokens):
if isinstance(tokens, (str, int)):
return tokens
if len(tokens) > 1:
if (tokens[0] == "not"):
return {"not": get_expr_tree(tokens[1])}
if (tokens[0] == "(" and tokens[-1] == ")"):
return get_expr_tree(tokens[1:-1])
else:
return get_expr_tree(tokens[0])
op = op_map[tokens[1]]
if op == "in":
return {'in': [get_expr_tree(tokens[0]), [token for token in tokens[2][1:-1]]]}
elif op == "between":
return {'and': [{"gte": [get_expr_tree(tokens[0]), tokens[2]]},
{"lte": [get_expr_tree(tokens[0]), tokens[4]]}]}
elif op == "is":
if tokens[2] == 'null':
return {"missing": tokens[0]}
else:
return {"exists": tokens[0]}
if len(tokens) > 3:
binary_tuples = get_expr_tree(tokens[2:])
else:
binary_tuples = get_expr_tree(tokens[2])
return {op: [get_expr_tree(tokens[0]),
binary_tuples]}
def get_expr(node, expr_map):
if isinstance(node, dict):
for i in node.keys():
op = i
if op == "literal":
return node["literal"]
mapped_op = expr_map.get(op, expr_map)
if len(mapped_op) == 1:
mapped_op = mapped_op[0]
if mapped_op is None:
raise RuntimeError("no mapping for op: %s" % op)
if op in ("not", "exists", "missing"):
return mapped_op(get_expr(node[op], expr_map))
return mapped_op(*get_expr(node[op], expr_map))
elif isinstance(node, (list, tuple)):
return (get_expr(item, expr_map) for item in node)
elif isinstance(node, (str, int, float)):
return node
else:
raise RuntimeError("unknown node type" % node)
def parse_expr_string(predicate_string, expr_map):
from pyparsing import ParseException
try:
expr = whereExpression.parseString(predicate_string, parseAll=True)
expr = get_expr_tree(expr)
return get_expr(expr, expr_map)
except ParseException as pe:
_logger.error("Error parsing string expression into iceberg expression: %s" % str(pe))
raise
|
rnnmorph/test_predictor.py | AliceCyber/rnnmorph | 124 | 12764404 | <reponame>AliceCyber/rnnmorph<filename>rnnmorph/test_predictor.py
import unittest
import logging
import sys
import numpy as np
import nltk
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
from rnnmorph.predictor import RNNMorphPredictor
from rnnmorph.tag_genres import tag_ru_files, tag_en_files
class TestLSTMMorph(unittest.TestCase):
@classmethod
def setUpClass(cls):
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
nltk.download("wordnet")
nltk.download('averaged_perceptron_tagger')
nltk.download('universal_tagset')
cls.en_predictor = RNNMorphPredictor(language="en")
cls.ru_predictor = RNNMorphPredictor(language="ru")
def __assert_parse(self, parse, pos, normal_form, tag):
self.assertEqual(parse.pos, pos)
self.assertEqual(parse.normal_form, normal_form)
self.assertEqual(parse.tag, tag)
def test_ru_sentence_analysis1(self):
forms = self.ru_predictor.predict(["косил", "косой", "косой", "косой"])
self.__assert_parse(forms[0], 'VERB', 'косить',
'Gender=Masc|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act')
self.assertIn(1, forms[0].vector)
def test_empty_sentence(self):
forms = self.ru_predictor.predict([])
self.assertEqual(forms, [])
def test_ru_sentence_analysis2(self):
forms = self.ru_predictor.predict(["мама", "мыла", "раму"])
self.__assert_parse(forms[0], 'NOUN', 'мама', 'Case=Nom|Gender=Fem|Number=Sing')
self.__assert_parse(forms[1], 'VERB', 'мыть',
'Gender=Fem|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act')
self.__assert_parse(forms[2], 'NOUN', 'рама', 'Case=Acc|Gender=Fem|Number=Sing')
def test_ru_sentences_analysis1(self):
forms = self.ru_predictor.predict_sentences([["косил", "косой", "косой", "косой"], ["мама", "мыла", "раму"]])
self.__assert_parse(forms[0][0], 'VERB', 'косить',
'Gender=Masc|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act')
self.__assert_parse(forms[1][0], 'NOUN', 'мама', 'Case=Nom|Gender=Fem|Number=Sing')
self.__assert_parse(forms[1][1], 'VERB', 'мыть',
'Gender=Fem|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act')
self.__assert_parse(forms[1][2], 'NOUN', 'рама', 'Case=Acc|Gender=Fem|Number=Sing')
def test_empty_sentences(self):
forms = self.ru_predictor.predict_sentences([[]])
self.assertEqual(forms, [[]])
def test_ru_one_empty_sentence_in_sentences(self):
forms = self.ru_predictor.predict_sentences([["косил", "косой", "косой", "косой"], []])
self.assertEqual(forms[1], [])
self.assertNotEqual(forms[0], [])
def test_ru_proba(self):
forms = self.ru_predictor.predict(["косил", "косой", "косой", "косой"], include_all_forms=True)
self.assertEqual(len(forms[0].possible_forms), 252)
indices = np.array([form.score for form in forms[2].possible_forms]).argsort()[-5:][::-1]
variants = [forms[2].possible_forms[i].tag for i in indices]
self.assertIn('Case=Nom|Degree=Pos|Gender=Masc|Number=Sing', variants)
def test_ru_genres_accuracy(self):
quality = tag_ru_files(self.ru_predictor)
self.assertGreater(quality['Lenta'].tag_accuracy, 95)
self.assertGreater(quality['Lenta'].sentence_accuracy, 70)
self.assertGreater(quality['VK'].tag_accuracy, 93)
self.assertGreater(quality['VK'].sentence_accuracy, 65)
self.assertGreater(quality['JZ'].tag_accuracy, 94)
self.assertGreater(quality['JZ'].sentence_accuracy, 70)
print("Точность по тегам по всем разделам: %.2f%%" % (quality['All']['tag_accuracy']*100))
print("Точность по PoS тегам по всем разделам: %.2f%%" % (quality['All']['pos_accuracy'] * 100))
print("Точность по предложениям по всем разделам: %.2f%%" % (quality['All']['sentence_accuracy'] * 100))
self.assertGreater(quality['All']['tag_accuracy'], 0.95)
def test_en_accuracy(self):
self.assertGreater(tag_en_files(self.en_predictor).tag_accuracy, 85)
|
lib/test/vot20/stark_st50_lt.py | tzhhhh123/Stark | 376 | 12764407 | <filename>lib/test/vot20/stark_st50_lt.py
from lib.test.vot20.stark_vot20lt import run_vot_exp
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '6'
run_vot_exp('stark_st', 'baseline', vis=False)
|
jax_cfd/base/subgrid_models_test.py | ngam/jax-cfd | 244 | 12764413 | <gh_stars>100-1000
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax_cfd.subgrid_models."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
from jax_cfd.base import advection
from jax_cfd.base import boundaries
from jax_cfd.base import finite_differences as fd
from jax_cfd.base import funcutils
from jax_cfd.base import grids
from jax_cfd.base import pressure
from jax_cfd.base import subgrid_models
from jax_cfd.base import test_util
import numpy as np
def periodic_grid_variable(data, offset, grid):
return grids.GridVariable(
array=grids.GridArray(data, offset, grid),
bc=boundaries.periodic_boundary_conditions(grid.ndim))
def zero_velocity_field(grid: grids.Grid) -> grids.GridVariableVector:
"""Returns an all-zero periodic velocity fields."""
return tuple(periodic_grid_variable(jnp.zeros(grid.shape), o, grid)
for o in grid.cell_faces)
def sinusoidal_velocity_field(grid: grids.Grid) -> grids.GridVariableVector:
"""Returns a divergence-free velocity flow on `grid`."""
mesh_size = jnp.array(grid.shape) * jnp.array(grid.step)
vs = tuple(jnp.sin(2. * np.pi * g / s)
for g, s in zip(grid.mesh(), mesh_size))
return tuple(periodic_grid_variable(v, o, grid)
for v, o in zip(vs[1:] + vs[:1], grid.cell_faces))
def gaussian_force_field(grid: grids.Grid) -> grids.GridArrayVector:
"""Returns a 'Gaussian-shaped' force field in the 'x' direction."""
mesh = grid.mesh()
mesh_size = jnp.array(grid.shape) * jnp.array(grid.step)
offsets = grid.cell_faces
v = [grids.GridArray(
jnp.exp(-sum([jnp.square(x / s - .5)
for x, s in zip(mesh, mesh_size)]) * 100.),
offsets[0], grid)]
for j in range(1, grid.ndim):
v.append(grids.GridArray(jnp.zeros(grid.shape), offsets[j], grid))
return tuple(v)
def gaussian_forcing(v: grids.GridVariableVector) -> grids.GridArrayVector:
"""Returns Gaussian field forcing."""
grid = grids.consistent_grid(*v)
return gaussian_force_field(grid)
def momentum(v: grids.GridVariableVector, density: float):
"""Returns the momentum due to velocity field `v`."""
grid = grids.consistent_grid(*v)
return jnp.array([u.data for u in v]).sum() * density * jnp.array(
grid.step).prod()
def _convect_upwind(v: grids.GridVariableVector) -> grids.GridArrayVector:
return tuple(advection.advect_upwind(u, v) for u in v)
class SubgridModelsTest(test_util.TestCase):
def test_smagorinsky_viscosity(self):
grid = grids.Grid((3, 3))
v = (periodic_grid_variable(jnp.zeros(grid.shape), (1, 0.5), grid),
periodic_grid_variable(jnp.zeros(grid.shape), (0.5, 1), grid))
c00 = grids.GridArray(jnp.zeros(grid.shape), offset=(0, 0), grid=grid)
c01 = grids.GridArray(jnp.zeros(grid.shape), offset=(0, 1), grid=grid)
c10 = grids.GridArray(jnp.zeros(grid.shape), offset=(1, 0), grid=grid)
c11 = grids.GridArray(jnp.zeros(grid.shape), offset=(1, 1), grid=grid)
s_ij = grids.GridArrayTensor(np.array([[c00, c01], [c10, c11]]))
viscosity = subgrid_models.smagorinsky_viscosity(
s_ij=s_ij, v=v, dt=0.1, cs=0.2)
self.assertIsInstance(viscosity, grids.GridArrayTensor)
self.assertEqual(viscosity.shape, (2, 2))
self.assertAllClose(viscosity[0, 0], c00)
self.assertAllClose(viscosity[0, 1], c01)
self.assertAllClose(viscosity[1, 0], c10)
self.assertAllClose(viscosity[1, 1], c11)
def test_evm_model(self):
grid = grids.Grid((3, 3))
v = (
periodic_grid_variable(jnp.zeros(grid.shape), (1, 0.5), grid),
periodic_grid_variable(jnp.zeros(grid.shape), (0.5, 1), grid))
viscosity_fn = functools.partial(
subgrid_models.smagorinsky_viscosity, dt=1.0, cs=0.2)
acceleration = subgrid_models.evm_model(v, viscosity_fn)
self.assertIsInstance(acceleration, tuple)
self.assertLen(acceleration, 2)
self.assertAllClose(acceleration[0], v[0].array)
self.assertAllClose(acceleration[1], v[1].array)
@parameterized.named_parameters(
dict(
testcase_name='sinusoidal_velocity_base',
cs=0.0,
velocity=sinusoidal_velocity_field,
forcing=None,
shape=(100, 100),
step=(1., 1.),
density=1.,
viscosity=1e-4,
convect=advection.convect_linear,
pressure_solve=pressure.solve_cg,
dt=1e-3,
time_steps=1000,
divergence_atol=1e-3,
momentum_atol=1e-3),
dict(
testcase_name='gaussian_force_upwind_with_subgrid_model',
cs=0.12,
velocity=zero_velocity_field,
forcing=gaussian_forcing,
shape=(40, 40, 40),
step=(1., 1., 1.),
density=1.,
viscosity=0,
convect=_convect_upwind,
pressure_solve=pressure.solve_cg,
dt=1e-3,
time_steps=100,
divergence_atol=1e-4,
momentum_atol=1e-4),
dict(
testcase_name='sinusoidal_velocity_with_subgrid_model',
cs=0.12,
velocity=sinusoidal_velocity_field,
forcing=None,
shape=(100, 100),
step=(1., 1.),
density=1.,
viscosity=1e-4,
convect=advection.convect_linear,
pressure_solve=pressure.solve_fast_diag,
dt=1e-3,
time_steps=1000,
divergence_atol=1e-3,
momentum_atol=1e-3),
)
def test_divergence_and_momentum(
self,
cs,
velocity,
forcing,
shape,
step,
density,
viscosity,
convect,
pressure_solve,
dt,
time_steps,
divergence_atol,
momentum_atol,
):
grid = grids.Grid(shape, step)
kwargs = dict(
density=density,
viscosity=viscosity,
cs=cs,
dt=dt,
grid=grid,
convect=convect,
pressure_solve=pressure_solve,
forcing=forcing)
# Explicit and implicit navier-stokes solvers:
explicit_eq = subgrid_models.explicit_smagorinsky_navier_stokes(**kwargs)
implicit_eq = subgrid_models.implicit_smagorinsky_navier_stokes(**kwargs)
v_initial = velocity(grid)
v_final = funcutils.repeated(explicit_eq, time_steps)(v_initial)
# TODO(dkochkov) consider adding more thorough tests for these models.
with self.subTest('divergence free'):
divergence = fd.divergence(v_final)
self.assertLess(jnp.max(divergence.data), divergence_atol)
with self.subTest('conservation of momentum'):
initial_momentum = momentum(v_initial, density)
final_momentum = momentum(v_final, density)
if forcing is not None:
expected_change = (
jnp.array([f.data for f in forcing(v_initial)]).sum() *
jnp.array(grid.step).prod() * dt * time_steps)
else:
expected_change = 0
expected_momentum = initial_momentum + expected_change
self.assertAllClose(expected_momentum, final_momentum, atol=momentum_atol)
with self.subTest('explicit-implicit consistency'):
v_final_2 = funcutils.repeated(implicit_eq, time_steps)(v_initial)
for axis in range(grid.ndim):
self.assertAllClose(v_final[axis], v_final_2[axis], atol=1e-4,
err_msg=f'axis={axis}')
if __name__ == '__main__':
absltest.main()
|
two_sigma_problems/problem_7.py | loftwah/Daily-Coding-Problem | 129 | 12764431 | <reponame>loftwah/Daily-Coding-Problem
"""This problem was asked by Two Sigma.
You’re tracking stock price at a given instance of time.
Implement an API with the following functions: add(), update(), remove(),
which adds/updates/removes a datapoint for the stock price you are tracking.
The data is given as (timestamp, price), where timestamp is specified in unix
epoch time.
Also, provide max(), min(), and average() functions that give the max/min/average
of all values seen thus far.
""" |
common/data_refinery_common/models/keywords.py | AlexsLemonade/refinebio | 106 | 12764488 | from django.db import models
class SampleKeyword(models.Model):
"""An ontology term associated with a sample in our database"""
name = models.ForeignKey("OntologyTerm", on_delete=models.CASCADE, related_name="+")
sample = models.ForeignKey("Sample", on_delete=models.CASCADE, related_name="keywords")
source = models.ForeignKey("Contribution", on_delete=models.CASCADE)
|
Contents/Libraries/Shared/guessit/rules/properties/title.py | jippo015/Sub-Zero.bundle | 1,553 | 12764507 | <gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
title property
"""
from rebulk import Rebulk, Rule, AppendMatch, RemoveMatch, AppendTags
from rebulk.formatters import formatters
from .film import FilmTitleRule
from .language import SubtitlePrefixLanguageRule, SubtitleSuffixLanguageRule, SubtitleExtensionRule
from ..common import seps, title_seps
from ..common.comparators import marker_sorted
from ..common.expected import build_expected_function
from ..common.formatters import cleanup, reorder_title
from ..common.validators import seps_surround
def title():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk().rules(TitleFromPosition, PreferTitleWithYear)
expected_title = build_expected_function('expected_title')
rebulk.functional(expected_title, name='title', tags=['expected', 'title'],
validator=seps_surround,
formatter=formatters(cleanup, reorder_title),
conflict_solver=lambda match, other: other,
disabled=lambda context: not context.get('expected_title'))
return rebulk
class TitleBaseRule(Rule):
"""
Add title match in existing matches
"""
# pylint:disable=no-self-use,unused-argument
consequence = [AppendMatch, RemoveMatch]
def __init__(self, match_name, match_tags=None, alternative_match_name=None):
super(TitleBaseRule, self).__init__()
self.match_name = match_name
self.match_tags = match_tags
self.alternative_match_name = alternative_match_name
def hole_filter(self, hole, matches):
"""
Filter holes for titles.
:param hole:
:type hole:
:param matches:
:type matches:
:return:
:rtype:
"""
return True
def filepart_filter(self, filepart, matches):
"""
Filter filepart for titles.
:param filepart:
:type filepart:
:param matches:
:type matches:
:return:
:rtype:
"""
return True
def holes_process(self, holes, matches):
"""
process holes
:param holes:
:type holes:
:param matches:
:type matches:
:return:
:rtype:
"""
cropped_holes = []
for hole in holes:
group_markers = matches.markers.named('group')
cropped_holes.extend(hole.crop(group_markers))
return cropped_holes
def is_ignored(self, match):
"""
Ignore matches when scanning for title (hole).
Full word language and countries won't be ignored if they are uppercase.
"""
return not (len(match) > 3 and match.raw.isupper()) and match.name in ['language', 'country', 'episode_details']
def should_keep(self, match, to_keep, matches, filepart, hole, starting):
"""
Check if this match should be accepted when ending or starting a hole.
:param match:
:type match:
:param to_keep:
:type to_keep: list[Match]
:param matches:
:type matches: Matches
:param hole: the filepart match
:type hole: Match
:param hole: the hole match
:type hole: Match
:param starting: true if match is starting the hole
:type starting: bool
:return:
:rtype:
"""
if match.name in ['language', 'country']:
# Keep language if exactly matching the hole.
if len(hole.value) == len(match.raw):
return True
# Keep language if other languages exists in the filepart.
outside_matches = filepart.crop(hole)
other_languages = []
for outside in outside_matches:
other_languages.extend(matches.range(outside.start, outside.end,
lambda c_match: c_match.name == match.name and
c_match not in to_keep))
if not other_languages:
return True
return False
def should_remove(self, match, matches, filepart, hole, context):
"""
Check if this match should be removed after beeing ignored.
:param match:
:param matches:
:param filepart:
:param hole:
:return:
"""
if context.get('type') == 'episode' and match.name == 'episode_details':
return match.start >= hole.start and match.end <= hole.end
return True
def check_titles_in_filepart(self, filepart, matches, context):
"""
Find title in filepart (ignoring language)
"""
# pylint:disable=too-many-locals,too-many-branches,too-many-statements
start, end = filepart.span
holes = matches.holes(start, end + 1, formatter=formatters(cleanup, reorder_title),
ignore=self.is_ignored,
predicate=lambda hole: hole.value)
holes = self.holes_process(holes, matches)
for hole in holes:
# pylint:disable=cell-var-from-loop
if not hole or (self.hole_filter and not self.hole_filter(hole, matches)):
continue
to_remove = []
to_keep = []
ignored_matches = matches.range(hole.start, hole.end, self.is_ignored)
if ignored_matches:
for ignored_match in reversed(ignored_matches):
# pylint:disable=undefined-loop-variable
trailing = matches.chain_before(hole.end, seps, predicate=lambda match: match == ignored_match)
if trailing:
should_keep = self.should_keep(ignored_match, to_keep, matches, filepart, hole, False)
if should_keep:
# pylint:disable=unpacking-non-sequence
try:
append, crop = should_keep
except TypeError:
append, crop = should_keep, should_keep
if append:
to_keep.append(ignored_match)
if crop:
hole.end = ignored_match.start
for ignored_match in ignored_matches:
if ignored_match not in to_keep:
starting = matches.chain_after(hole.start, seps,
predicate=lambda match: match == ignored_match)
if starting:
should_keep = self.should_keep(ignored_match, to_keep, matches, filepart, hole, True)
if should_keep:
# pylint:disable=unpacking-non-sequence
try:
append, crop = should_keep
except TypeError:
append, crop = should_keep, should_keep
if append:
to_keep.append(ignored_match)
if crop:
hole.start = ignored_match.end
for match in ignored_matches:
if self.should_remove(match, matches, filepart, hole, context):
to_remove.append(match)
for keep_match in to_keep:
if keep_match in to_remove:
to_remove.remove(keep_match)
if hole and hole.value:
hole.name = self.match_name
hole.tags = self.match_tags
if self.alternative_match_name:
# Split and keep values that can be a title
titles = hole.split(title_seps, lambda match: match.value)
for title_match in list(titles[1:]):
previous_title = titles[titles.index(title_match) - 1]
separator = matches.input_string[previous_title.end:title_match.start]
if len(separator) == 1 and separator == '-' \
and previous_title.raw[-1] not in seps \
and title_match.raw[0] not in seps:
titles[titles.index(title_match) - 1].end = title_match.end
titles.remove(title_match)
else:
title_match.name = self.alternative_match_name
else:
titles = [hole]
return titles, to_remove
def when(self, matches, context):
if matches.named(self.match_name, lambda match: 'expected' in match.tags):
return
fileparts = [filepart for filepart in list(marker_sorted(matches.markers.named('path'), matches))
if not self.filepart_filter or self.filepart_filter(filepart, matches)]
to_remove = []
# Priorize fileparts containing the year
years_fileparts = []
for filepart in fileparts:
year_match = matches.range(filepart.start, filepart.end, lambda match: match.name == 'year', 0)
if year_match:
years_fileparts.append(filepart)
ret = []
for filepart in fileparts:
try:
years_fileparts.remove(filepart)
except ValueError:
pass
titles = self.check_titles_in_filepart(filepart, matches, context)
if titles:
titles, to_remove_c = titles
ret.extend(titles)
to_remove.extend(to_remove_c)
break
# Add title match in all fileparts containing the year.
for filepart in years_fileparts:
titles = self.check_titles_in_filepart(filepart, matches, context)
if titles:
# pylint:disable=unbalanced-tuple-unpacking
titles, to_remove_c = titles
ret.extend(titles)
to_remove.extend(to_remove_c)
return ret, to_remove
class TitleFromPosition(TitleBaseRule):
"""
Add title match in existing matches
"""
dependency = [FilmTitleRule, SubtitlePrefixLanguageRule, SubtitleSuffixLanguageRule, SubtitleExtensionRule]
properties = {'title': [None], 'alternative_title': [None]}
def __init__(self):
super(TitleFromPosition, self).__init__('title', ['title'], 'alternative_title')
class PreferTitleWithYear(Rule):
"""
Prefer title where filepart contains year.
"""
dependency = TitleFromPosition
consequence = [RemoveMatch, AppendTags(['equivalent-ignore'])]
properties = {'title': [None]}
def when(self, matches, context):
with_year_in_group = []
with_year = []
titles = matches.named('title')
for title_match in titles:
filepart = matches.markers.at_match(title_match, lambda marker: marker.name == 'path', 0)
if filepart:
year_match = matches.range(filepart.start, filepart.end, lambda match: match.name == 'year', 0)
if year_match:
group = matches.markers.at_match(year_match, lambda group: group.name == 'group')
if group:
with_year_in_group.append(title_match)
else:
with_year.append(title_match)
to_tag = []
if with_year_in_group:
title_values = set([title_match.value for title_match in with_year_in_group])
to_tag.extend(with_year_in_group)
elif with_year:
title_values = set([title_match.value for title_match in with_year])
to_tag.extend(with_year)
else:
title_values = set([title_match.value for title_match in titles])
to_remove = []
for title_match in titles:
if title_match.value not in title_values:
to_remove.append(title_match)
return to_remove, to_tag
|
maistra/vendor/com_googlesource_chromium_v8/wee8/build/fuchsia/boot_data.py | knm3000/proxy | 643 | 12764564 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions used to provision Fuchsia boot images."""
import common
import logging
import os
import subprocess
import tempfile
import time
import uuid
_SSH_CONFIG_TEMPLATE = """
Host *
CheckHostIP no
StrictHostKeyChecking no
ForwardAgent no
ForwardX11 no
UserKnownHostsFile {known_hosts}
User fuchsia
IdentitiesOnly yes
IdentityFile {identity}
ServerAliveInterval 2
ServerAliveCountMax 5
ControlMaster auto
ControlPersist 1m
ControlPath /tmp/ssh-%r@%h:%p
ConnectTimeout 5
"""
FVM_TYPE_QCOW = 'qcow'
FVM_TYPE_SPARSE = 'sparse'
# Specifies boot files intended for use by an emulator.
TARGET_TYPE_QEMU = 'qemu'
# Specifies boot files intended for use by anything (incl. physical devices).
TARGET_TYPE_GENERIC = 'generic'
def _GetPubKeyPath(output_dir):
"""Returns a path to the generated SSH public key."""
return os.path.join(output_dir, 'id_ed25519.pub')
def ProvisionSSH(output_dir):
"""Generates a keypair and config file for SSH."""
host_key_path = os.path.join(output_dir, 'ssh_key')
host_pubkey_path = host_key_path + '.pub'
id_key_path = os.path.join(output_dir, 'id_ed25519')
id_pubkey_path = _GetPubKeyPath(output_dir)
known_hosts_path = os.path.join(output_dir, 'known_hosts')
ssh_config_path = os.path.join(output_dir, 'ssh_config')
logging.debug('Generating SSH credentials.')
if not os.path.isfile(host_key_path):
subprocess.check_call(['ssh-keygen', '-t', 'ed25519', '-h', '-f',
host_key_path, '-P', '', '-N', ''],
stdout=open(os.devnull))
if not os.path.isfile(id_key_path):
subprocess.check_call(['ssh-keygen', '-t', 'ed25519', '-f', id_key_path,
'-P', '', '-N', ''], stdout=open(os.devnull))
with open(ssh_config_path, "w") as ssh_config:
ssh_config.write(
_SSH_CONFIG_TEMPLATE.format(identity=id_key_path,
known_hosts=known_hosts_path))
if os.path.exists(known_hosts_path):
os.remove(known_hosts_path)
def GetTargetFile(filename, target_arch, target_type):
"""Computes a path to |filename| in the Fuchsia boot image directory specific
to |target_type| and |target_arch|."""
assert target_type == TARGET_TYPE_QEMU or target_type == TARGET_TYPE_GENERIC
return os.path.join(common.IMAGES_ROOT, target_arch, target_type, filename)
def GetSSHConfigPath(output_dir):
return output_dir + '/ssh_config'
def GetBootImage(output_dir, target_arch, target_type):
""""Gets a path to the Zircon boot image, with the SSH client public key
added."""
ProvisionSSH(output_dir)
pubkey_path = _GetPubKeyPath(output_dir)
zbi_tool = common.GetHostToolPathFromPlatform('zbi')
image_source_path = GetTargetFile('zircon-a.zbi', target_arch, target_type)
image_dest_path = os.path.join(output_dir, 'gen', 'fuchsia-with-keys.zbi')
cmd = [ zbi_tool, '-o', image_dest_path, image_source_path,
'-e', 'data/ssh/authorized_keys=' + pubkey_path ]
subprocess.check_call(cmd)
return image_dest_path
def GetKernelArgs(output_dir):
return ['devmgr.epoch=%d' % time.time()]
def AssertBootImagesExist(arch, platform):
assert os.path.exists(GetTargetFile('zircon-a.zbi', arch, platform)), \
'This checkout is missing the files necessary for\n' \
'booting this configuration of Fuchsia.\n' \
'To check out the files, add this entry to the "custom_vars"\n' \
'section of your .gclient file:\n\n' \
' "checkout_fuchsia_boot_images": "%s.%s"\n\n' % \
(platform, arch)
|
pypy/interpreter/pyparser/parser.py | nanjekyejoannah/pypy | 333 | 12764661 | """
A CPython inspired RPython parser.
"""
from rpython.rlib.objectmodel import not_rpython
class Grammar(object):
"""
Base Grammar object.
Pass this to ParserGenerator.build_grammar to fill it with useful values for
the Parser.
"""
def __init__(self):
self.symbol_ids = {}
self.symbol_names = {}
self.symbol_to_label = {}
self.keyword_ids = {}
self.token_to_error_string = {}
self.dfas = []
self.labels = [0]
self.token_ids = {}
self.start = -1
def shared_copy(self):
new = self.__class__()
new.symbol_ids = self.symbol_ids
new.symbols_names = self.symbol_names
new.keyword_ids = self.keyword_ids
new.token_to_error_string = self.token_to_error_string
new.dfas = self.dfas
new.labels = self.labels
new.token_ids = self.token_ids
return new
def classify(self, token):
"""Find the label for a token."""
if token.token_type == self.KEYWORD_TOKEN:
label_index = self.keyword_ids.get(token.value, -1)
if label_index != -1:
return label_index
label_index = self.token_ids.get(token.token_type, -1)
if label_index == -1:
raise ParseError("invalid token", token)
return label_index
def _freeze_(self):
# Remove some attributes not used in parsing.
try:
del self.symbol_to_label
del self.symbol_names
del self.symbol_ids
except AttributeError:
pass
return True
class DFA(object):
def __init__(self, grammar, symbol_id, states, first):
self.grammar = grammar
self.symbol_id = symbol_id
self.states = states
self.first = self._first_to_string(first)
def could_match_token(self, label_index):
pos = label_index >> 3
bit = 1 << (label_index & 0b111)
return bool(ord(self.first[label_index >> 3]) & bit)
@staticmethod
@not_rpython
def _first_to_string(first):
l = sorted(first.keys())
b = bytearray(32)
for label_index in l:
pos = label_index >> 3
bit = 1 << (label_index & 0b111)
b[pos] |= bit
return str(b)
class Token(object):
def __init__(self, token_type, value, lineno, column, line):
self.token_type = token_type
self.value = value
self.lineno = lineno
# 0-based offset
self.column = column
self.line = line
def __repr__(self):
return "Token(%s, %s)" % (self.token_type, self.value)
def __eq__(self, other):
# for tests
return (
self.token_type == other.token_type and
self.value == other.value and
self.lineno == other.lineno and
self.column == other.column and
self.line == other.line
)
def __ne__(self, other):
return not self == other
class Node(object):
__slots__ = ("grammar", "type")
def __init__(self, grammar, type):
assert grammar is None or isinstance(grammar, Grammar)
assert isinstance(type, int)
self.grammar = grammar
self.type = type
def __eq__(self, other):
raise NotImplementedError("abstract base class")
def __ne__(self, other):
return not self == other
def get_value(self):
return None
def get_child(self, i):
raise NotImplementedError("abstract base class")
def num_children(self):
return 0
def append_child(self, child):
raise NotImplementedError("abstract base class")
def get_lineno(self):
raise NotImplementedError("abstract base class")
def get_column(self):
raise NotImplementedError("abstract base class")
def get_line(self):
raise NotImplementedError("abstract base class")
def view(self):
from dotviewer import graphclient
import pytest
r = ["digraph G {"]
self._dot(r)
r.append("}")
p = pytest.ensuretemp("pyparser").join("temp.dot")
p.write("\n".join(r))
graphclient.display_dot_file(str(p))
def _dot(self, result):
raise NotImplementedError("abstract base class")
class Terminal(Node):
__slots__ = ("value", "lineno", "column", "line")
def __init__(self, grammar, type, value, lineno, column, line=None):
Node.__init__(self, grammar, type)
self.value = value
self.lineno = lineno
self.column = column
self.line = line
@staticmethod
def fromtoken(grammar, token):
return Terminal(
grammar,
token.token_type, token.value, token.lineno, token.column,
token.line)
def __repr__(self):
return "Terminal(type=%s, value=%r)" % (self.type, self.value)
def __eq__(self, other):
# For tests.
return (type(self) == type(other) and
self.type == other.type and
self.value == other.value)
def get_value(self):
return self.value
def get_lineno(self):
return self.lineno
def get_column(self):
return self.column
def get_line(self):
return self.line
def _dot(self, result):
result.append('%s [label="%r", shape=box];' % (id(self), self.value))
class AbstractNonterminal(Node):
__slots__ = ()
def get_lineno(self):
return self.get_child(0).get_lineno()
def get_column(self):
return self.get_child(0).get_column()
def get_line(self):
return self.get_child(0).get_line()
def __eq__(self, other):
# For tests.
# grumble, annoying
if not isinstance(other, AbstractNonterminal):
return False
if self.type != other.type:
return False
if self.num_children() != other.num_children():
return False
for i in range(self.num_children()):
if self.get_child(i) != other.get_child(i):
return False
return True
def _dot(self, result):
for i in range(self.num_children()):
child = self.get_child(i)
result.append('%s [label=%s, shape=box]' % (id(self), self.grammar.symbol_names[self.type]))
result.append('%s -> %s [label="%s"]' % (id(self), id(child), i))
child._dot(result)
class Nonterminal(AbstractNonterminal):
__slots__ = ("_children", )
def __init__(self, grammar, type, children=None):
Node.__init__(self, grammar, type)
if children is None:
children = []
self._children = children
def __repr__(self):
return "Nonterminal(type=%s, children=%r)" % (
self.grammar.symbol_names[self.type]
if self.grammar is not None else self.type,
self._children)
def get_child(self, i):
assert self._children is not None
return self._children[i]
def num_children(self):
return len(self._children)
def append_child(self, child):
self._children.append(child)
class Nonterminal1(AbstractNonterminal):
__slots__ = ("_child", )
def __init__(self, grammar, type, child):
Node.__init__(self, grammar, type)
self._child = child
def __repr__(self):
return "Nonterminal(type=%s, children=[%r])" % (
self.grammar.symbol_names[self.type]
if self.grammar is not None else self.type,
self._child)
def get_child(self, i):
assert i == 0 or i == -1
return self._child
def num_children(self):
return 1
def append_child(self, child):
assert 0, "should be unreachable"
class ParseError(Exception):
def __init__(self, msg, token, expected=-1, expected_str=None):
self.msg = msg
self.token = token
self.expected = expected
self.expected_str = expected_str
def __str__(self):
return "ParserError(%s)" % (self.token, )
class StackEntry(object):
def __init__(self, next, dfa, state):
self.next = next
self.dfa = dfa
self.state = state
self.node = None
def push(self, dfa, state):
return StackEntry(self, dfa, state)
def pop(self):
return self.next
def node_append_child(self, child):
node = self.node
if node is None:
self.node = Nonterminal1(self.dfa.grammar, self.dfa.symbol_id, child)
elif isinstance(node, Nonterminal1):
newnode = self.node = Nonterminal(
self.dfa.grammar,
self.dfa.symbol_id, [node._child, child])
else:
self.node.append_child(child)
def view(self):
from dotviewer import graphclient
import pytest
r = ["digraph G {"]
self._dot(r)
r.append("}")
p = pytest.ensuretemp("pyparser").join("temp.dot")
p.write("\n".join(r))
graphclient.display_dot_file(str(p))
def _dot(self, result):
result.append('%s [label=%s, shape=box, color=white]' % (id(self), self.dfa.grammar.symbol_names[self.dfa.symbol_id]))
if self.next:
result.append('%s -> %s [label="next"]' % (id(self), id(self.next)))
self.next._dot(result)
if self.node:
result.append('%s -> %s [label="node"]' % (id(self), id(self.node)))
self.node._dot(result)
class Parser(object):
def __init__(self, grammar):
self.grammar = grammar
self.root = None
def prepare(self, start=-1):
"""Setup the parser for parsing.
Takes the starting symbol as an argument.
"""
if start == -1:
start = self.grammar.start
self.root = None
self.stack = StackEntry(None, self.grammar.dfas[start - 256], 0)
def add_token(self, token):
label_index = self.grammar.classify(token)
sym_id = 0 # for the annotator
while True:
dfa = self.stack.dfa
state_index = self.stack.state
states = dfa.states
arcs, is_accepting = states[state_index]
for i, next_state in arcs:
sym_id = self.grammar.labels[i]
if label_index == i:
# We matched a non-terminal.
self.shift(next_state, token)
state = states[next_state]
# While the only possible action is to accept, pop nodes off
# the stack.
while state[1] and not state[0]:
self.pop()
if self.stack is None:
# Parsing is done.
return True
dfa = self.stack.dfa
state_index = self.stack.state
state = dfa.states[state_index]
return False
elif sym_id >= 256:
sub_node_dfa = self.grammar.dfas[sym_id - 256]
# Check if this token can start a child node.
if sub_node_dfa.could_match_token(label_index):
self.push(sub_node_dfa, next_state, sym_id)
break
else:
# We failed to find any arcs to another state, so unless this
# state is accepting, it's invalid input.
if is_accepting:
self.pop()
if self.stack is None:
raise ParseError("too much input", token)
else:
# If only one possible input would satisfy, attach it to the
# error.
if len(arcs) == 1:
expected = sym_id
expected_str = self.grammar.token_to_error_string.get(
arcs[0][0], None)
else:
expected = -1
expected_str = None
raise ParseError("bad input", token, expected, expected_str)
def shift(self, next_state, token):
"""Shift a non-terminal and prepare for the next state."""
new_node = Terminal.fromtoken(self.grammar, token)
self.stack.node_append_child(new_node)
self.stack.state = next_state
def push(self, next_dfa, next_state, node_type):
"""Push a terminal and adjust the current state."""
self.stack.state = next_state
self.stack = self.stack.push(next_dfa, 0)
def pop(self):
"""Pop an entry off the stack and make its node a child of the last."""
top = self.stack
self.stack = top.pop()
node = top.node
assert node is not None
if self.stack:
self.stack.node_append_child(node)
else:
self.root = node
|
torchrecipes/vision/core/optim/lr_scheduler.py | colin2328/recipes | 161 | 12764664 | <reponame>colin2328/recipes
#!/usr/bin/env python3
from typing import Union
import torch
from torch.optim.lr_scheduler import CosineAnnealingLR, LinearLR, SequentialLR
class CosineWithWarmup(SequentialLR):
r"""Cosine Decay Learning Rate Scheduler with Linear Warmup.
Args:
optimizer (Optimizer): Wrapped optimizer.
max_iters (int): Max number of iterations. (This should be number of epochs/steps
based on the unit of scheduler's step size.)
warmup_iters (int or float): number or fraction of iterations where
linear warmup happens. Approaching the end of the linear warmup
period the linear warmup line will intersect with the cosine decay curve.
Default: 0
last_epoch (int): The index of last epoch. Default: -1.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
max_iters: int,
warmup_iters: Union[int, float] = 0,
warmup_start_factor: float = 0.0,
last_epoch: int = -1,
) -> None:
if isinstance(warmup_iters, float):
warmup_iters = int(warmup_iters * max_iters)
linear_lr = LinearLR(optimizer, warmup_start_factor, total_iters=warmup_iters)
cosine_lr = CosineAnnealingLR(optimizer, T_max=max_iters - warmup_iters)
super().__init__(optimizer, [linear_lr, cosine_lr], [warmup_iters], last_epoch)
|
crabageprediction/venv/Lib/site-packages/mpl_toolkits/axes_grid/angle_helper.py | 13rianlucero/CrabAgePrediction | 603 | 12764680 | <reponame>13rianlucero/CrabAgePrediction
from mpl_toolkits.axisartist.angle_helper import *
|
tests/load/test_load_case.py | mhkc/scout | 111 | 12764683 | def test_load_case(case_obj, adapter):
## GIVEN a database with no cases
assert adapter.case_collection.find_one() is None
## WHEN loading a case
adapter._add_case(case_obj)
## THEN assert that the case have been loaded with correct info
assert adapter.case_collection.find_one()
def test_load_case_rank_model_version(case_obj, adapter):
## GIVEN a database with no cases
assert adapter.case_collection.find_one() is None
## WHEN loading a case
adapter._add_case(case_obj)
## THEN assert that the case have been loaded with rank_model
loaded_case = adapter.case_collection.find_one({"_id": case_obj["_id"]})
assert loaded_case["rank_model_version"] == case_obj["rank_model_version"]
assert loaded_case["sv_rank_model_version"] == case_obj["sv_rank_model_version"]
def test_load_case_limsid(case_obj, adapter):
"""Test loading a case with lims_id"""
## GIVEN a database with no cases
assert adapter.case_collection.find_one() is None
## WHEN loading a case
adapter._add_case(case_obj)
## THEN assert that the case have been loaded with lims id
loaded_case = adapter.case_collection.find_one({"_id": case_obj["_id"]})
assert loaded_case["lims_id"] == case_obj["lims_id"]
|
migrator/wix-bazel-migrator/src/main/resources/import_external.bzl | or-shachar/exodus | 186 | 12764692 | load("@bazel_tools//tools/build_defs/repo:jvm.bzl", "jvm_maven_import_external")
_default_server_urls = ["https://repo.maven.apache.org/maven2/",
"https://mvnrepository.com/artifact",
"https://maven-central.storage.googleapis.com",
"http://gitblit.github.io/gitblit-maven",
"https://repository.mulesoft.org/nexus/content/repositories/public/",]
def safe_exodus_maven_import_external(name, artifact, **kwargs):
if native.existing_rule(name) == None:
exodus_maven_import_external(
name = name,
artifact = artifact,
**kwargs
)
def exodus_maven_import_external(name, artifact, **kwargs):
fetch_sources = kwargs.get("srcjar_sha256") != None
exodus_maven_import_external_sources(name, artifact, fetch_sources, **kwargs)
def exodus_snapshot_maven_import_external(name, artifact, **kwargs):
exodus_maven_import_external_sources(name, artifact, True, **kwargs)
def exodus_maven_import_external_sources(name, artifact, fetch_sources, **kwargs):
jvm_maven_import_external(
name = name,
artifact = artifact,
licenses = ["notice"], # Apache 2.0
fetch_sources = fetch_sources,
server_urls = _default_server_urls,
**kwargs
) |
redis-monitor/plugins/stats_monitor.py | j3k00/scrapy-cluster | 1,108 | 12764725 | from __future__ import absolute_import
from .kafka_base_monitor import KafkaBaseMonitor
class StatsMonitor(KafkaBaseMonitor):
regex = "statsrequest:*:*"
def setup(self, settings):
'''
Setup kafka
'''
KafkaBaseMonitor.setup(self, settings)
def handle(self, key, value):
'''
Processes a vaild stats request
@param key: The key that matched the request
@param value: The value associated with the key
'''
# break down key
elements = key.split(":")
stats = elements[1]
appid = elements[2]
uuid = value
# log we received the stats request
extras = self.get_log_dict('stats', appid, uuid=uuid)
self.logger.info('Received {s} stats request'.format(s=stats),
extra=extras)
extras = {}
if stats == 'all':
extras = self.get_all_stats()
elif stats == 'kafka-monitor':
extras = self.get_kafka_monitor_stats()
elif stats == 'redis-monitor':
extras = self.get_redis_monitor_stats()
elif stats == 'crawler':
extras = self.get_crawler_stats()
elif stats == 'spider':
extras = self.get_spider_stats()
elif stats == 'machine':
extras = self.get_machine_stats()
elif stats == 'queue':
extras = self.get_queue_stats()
elif stats == 'rest':
extras = self.get_rest_stats()
else:
self.logger.warn('Received invalid stats request: {s}'\
.format(s=stats),
extra=extras)
return
extras['stats'] = stats
extras['appid'] = appid
extras['uuid'] = uuid
extras['server_time'] = int(self.get_current_time())
if self._send_to_kafka(extras):
extras['success'] = True
self.logger.info('Sent stats to kafka', extra=extras)
else:
extras['success'] = False
self.logger.error('Failed to send stats to kafka', extra=extras)
def get_all_stats(self):
'''
Gather all stats objects
'''
self.logger.debug("Gathering all stats")
the_dict = {}
the_dict['kafka-monitor'] = self.get_kafka_monitor_stats()
the_dict['redis-monitor'] = self.get_redis_monitor_stats()
the_dict['crawler'] = self.get_crawler_stats()
the_dict['rest'] = self.get_rest_stats()
return the_dict
def get_kafka_monitor_stats(self):
'''
Gather Kafka Monitor stats
@return: A dict of stats
'''
self.logger.debug("Gathering kafka-monitor stats")
return self._get_plugin_stats('kafka-monitor')
def get_redis_monitor_stats(self):
'''
Gather Redis Monitor stats
@return: A dict of stats
'''
self.logger.debug("Gathering redis-monitor stats")
return self._get_plugin_stats('redis-monitor')
def get_rest_stats(self):
'''
Gather Rest stats
@return: A dict of stats
'''
self.logger.debug("Gathering rest stats")
return self._get_plugin_stats('rest')
def _get_plugin_stats(self, name):
'''
Used for getting stats for Plugin based stuff, like Kafka Monitor
and Redis Monitor
@param name: the main class stats name
@return: A formatted dict of stats
'''
the_dict = {}
keys = self.redis_conn.keys('stats:{n}:*'.format(n=name))
for key in keys:
# break down key
elements = key.split(":")
main = elements[2]
end = elements[3]
if main == 'total' or main == 'fail':
if main not in the_dict:
the_dict[main] = {}
the_dict[main][end] = self._get_key_value(key, end == 'lifetime')
elif main == 'self':
if 'nodes' not in the_dict:
# main is self, end is machine, true_tail is uuid
the_dict['nodes'] = {}
true_tail = elements[4]
if end not in the_dict['nodes']:
the_dict['nodes'][end] = []
the_dict['nodes'][end].append(true_tail)
else:
if 'plugins' not in the_dict:
the_dict['plugins'] = {}
if main not in the_dict['plugins']:
the_dict['plugins'][main] = {}
the_dict['plugins'][main][end] = self._get_key_value(key, end == 'lifetime')
return the_dict
def _get_key_value(self, key, is_hll=False):
'''
Returns the proper key value for the stats
@param key: the redis key
@param is_hll: the key is a HyperLogLog, else is a sorted set
'''
if is_hll:
# get hll value
return self.redis_conn.execute_command("PFCOUNT", key)
else:
# get zcard value
return self.redis_conn.zcard(key)
def get_spider_stats(self):
'''
Gather spider based stats
'''
self.logger.debug("Gathering spider stats")
the_dict = {}
spider_set = set()
total_spider_count = 0
keys = self.redis_conn.keys('stats:crawler:*:*:*')
for key in keys:
# we only care about the spider
elements = key.split(":")
spider = elements[3]
if spider not in the_dict:
the_dict[spider] = {}
the_dict[spider]['count'] = 0
if len(elements) == 6:
# got a time based stat
response = elements[4]
end = elements[5]
if response not in the_dict[spider]:
the_dict[spider][response] = {}
the_dict[spider][response][end] = self._get_key_value(key, end == 'lifetime')
elif len(elements) == 5:
# got a spider identifier
the_dict[spider]['count'] += 1
total_spider_count += 1
spider_set.add(spider)
else:
self.logger.warn("Unknown crawler stat key", {"key":key})
# simple counts
the_dict['unique_spider_count'] = len(spider_set)
the_dict['total_spider_count'] = total_spider_count
ret_dict = {}
ret_dict['spiders'] = the_dict
return ret_dict
def get_machine_stats(self):
'''
Gather spider based stats
'''
self.logger.debug("Gathering machine stats")
the_dict = {}
keys = self.redis_conn.keys('stats:crawler:*:*:*:*')
for key in keys:
# break down key
elements = key.split(":")
machine = elements[2]
spider = elements[3]
response = elements[4]
end = elements[5]
# we only care about the machine, not spider type
if machine not in the_dict:
the_dict[machine] = {}
if response not in the_dict[machine]:
the_dict[machine][response] = {}
if end in the_dict[machine][response]:
the_dict[machine][response][end] = the_dict[machine][response][end] + \
self._get_key_value(key, end == 'lifetime')
else:
the_dict[machine][response][end] = self._get_key_value(key, end == 'lifetime')
# simple count
the_dict['count'] = len(list(the_dict.keys()))
ret_dict = {}
ret_dict['machines'] = the_dict
return ret_dict
def get_crawler_stats(self):
'''
Gather crawler stats
@return: A dict of stats
'''
self.logger.debug("Gathering crawler stats")
the_dict = {}
the_dict['spiders'] = self.get_spider_stats()['spiders']
the_dict['machines'] = self.get_machine_stats()['machines']
the_dict['queue'] = self.get_queue_stats()['queues']
return the_dict
def get_queue_stats(self):
'''
Gather queue stats
@return: A dict of stats
'''
self.logger.debug("Gathering queue based stats")
the_dict = {}
keys = self.redis_conn.keys('*:*:queue')
total_backlog = 0
for key in keys:
elements = key.split(":")
spider = elements[0]
domain = elements[1]
spider = 'queue_' + spider
if spider not in the_dict:
the_dict[spider] = {
'spider_backlog': 0,
'num_domains': 0,
'domains': []
}
count = self.redis_conn.zcard(key)
total_backlog += count
the_dict[spider]['spider_backlog'] += count
the_dict[spider]['num_domains'] += 1
the_dict[spider]['domains'].append({'domain': domain,
'backlog': count})
the_dict['total_backlog'] = total_backlog
ret_dict = {
'queues': the_dict
}
return ret_dict
|
TopQuarkAnalysis/TopEventProducers/python/sequences/ttSemiLepEvtHypotheses_cff.py | ckamtsikis/cmssw | 852 | 12764749 | import FWCore.ParameterSet.Config as cms
#
# produce ttSemiLep event hypotheses
#
## geom hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypGeom_cff import *
## wMassDeltaTopMass hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypWMassDeltaTopMass_cff import *
## wMassMaxSumPt hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypWMassMaxSumPt_cff import *
## maxSumPtWMass hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypMaxSumPtWMass_cff import *
## genMatch hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypGenMatch_cff import *
## mvaDisc hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypMVADisc_cff import *
## kinFit hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypKinFit_cff import *
## hitFit hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypHitFit_cff import *
## make all considered event hypotheses
makeTtSemiLepHypothesesTask = cms.Task(
makeHypothesis_geomTask,
makeHypothesis_wMassDeltaTopMassTask,
makeHypothesis_wMassMaxSumPtTask,
makeHypothesis_maxSumPtWMassTask,
makeHypothesis_genMatchTask,
makeHypothesis_mvaDiscTask,
makeHypothesis_kinFitTask,
makeHypothesis_hitFitTask
)
makeTtSemiLepHypotheses = cms.Sequence(makeTtSemiLepHypothesesTask)
|
ott/core/gromov_wasserstein.py | google-research/ott | 232 | 12764750 | # coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A Jax version of Sinkhorn's algorithm."""
from typing import Any, Dict, Optional, NamedTuple, Union
import jax
import jax.numpy as jnp
from ott.core import fixed_point_loop
from ott.core import problems
from ott.core import sinkhorn
from ott.geometry import epsilon_scheduler
from ott.geometry import geometry
class GWOutput(NamedTuple):
"""Holds the output of the Gromov-Wasserstein solver.
Attributes:
costs: Holds the sequence of regularized GW costs seen through the outer
loop of the solver.
linear_convergence: Holds the sequence of bool convergence flags of the
inner Sinkhorn iterations.
convergence: Bool convergence flag for the outer GW iterations.
errors: Holds sequence of vectors of errors of the Sinkhorn algorithm
at each iteration.
linear_state: State used to solve and store solutions to the local
linearization of GW.
geom: The geometry underlying the local linearization.
transport: The transport matrix.
reg_gw_cost: Regularized optimal transport cost of the linearization.
"""
costs: Optional[jnp.ndarray] = None
linear_convergence: Optional[jnp.ndarray] = None
convergence: bool = False
errors: Optional[jnp.ndarray] = None
linear_state: Any = None
geom: geometry.Geometry = None
def set(self, **kwargs) -> 'GWOutput':
"""Returns a copy of self, possibly with overwrites."""
return self._replace(**kwargs)
@property
def transport(self):
return self.linear_state.matrix
@property
def reg_gw_cost(self):
return self.linear_state.reg_ot_cost
class GWState(NamedTuple):
"""Holds the state of the Gromov-Wasserstein solver.
Attributes:
costs: Holds the sequence of regularized GW costs seen through the outer
loop of the solver.
linear_convergence: Holds the sequence of bool convergence flags of the
inner Sinkhorn iterations.
errors: Holds sequence of vectors of errors of the Sinkhorn algorithm
at each iteration.
linear_state: State used to solve and store solutions to the local
linearization of GW.
linear_pb: Local linearization of the quadratic GW problem.
"""
costs: Optional[jnp.ndarray] = None
linear_convergence: Optional[jnp.ndarray] = None
errors: Optional[jnp.ndarray] = None
linear_state: Any = None
linear_pb: Optional[problems.LinearProblem] = None
def set(self, **kwargs) -> 'GWState':
"""Returns a copy of self, possibly with overwrites."""
return self._replace(**kwargs)
def update(self, iteration: int, linear_sol, linear_pb, store_errors: bool):
costs = self.costs.at[iteration].set(linear_sol.reg_ot_cost)
errors = None
if store_errors and self.errors is not None:
errors = self.errors.at[iteration, :].set(linear_sol.errors)
linear_convergence = self.linear_convergence.at[iteration].set(
linear_sol.converged)
return self.set(linear_state=linear_sol,
linear_pb=linear_pb,
costs=costs,
linear_convergence=linear_convergence,
errors=errors)
@jax.tree_util.register_pytree_node_class
class GromovWasserstein:
"""A Gromov Wasserstein solver."""
def __init__(self,
epsilon: Union[epsilon_scheduler.Epsilon, float] = 1.0,
min_iterations: int = 5,
max_iterations: int = 50,
threshold: float = 1e-3,
jit: bool = True,
store_sinkhorn_errors: bool = False,
linear_ot_solver: sinkhorn.Sinkhorn = sinkhorn.Sinkhorn(),
**kwargs):
self.epsilon = epsilon
self.min_iterations = min_iterations
self.max_iterations = max_iterations
self.threshold = threshold
self.jit = jit
self.store_sinkhorn_errors = store_sinkhorn_errors
self.linear_ot_solver = linear_ot_solver
self._kwargs = kwargs
def tree_flatten(self):
return ([self.epsilon, self.linear_ot_solver, self.threshold],
dict(
min_iterations=self.min_iterations,
max_iterations=self.max_iterations,
jit=self.jit,
store_sinkhorn_errors=self.store_sinkhorn_errors,
**self._kwargs))
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(epsilon=children[0],
linear_ot_solver=children[1],
threshold=children[2],
**aux_data)
def not_converged(self, state, iteration):
costs, i, tol = state.costs, iteration, self.threshold
return jnp.logical_or(
i <= 2,
jnp.logical_and(
jnp.isfinite(costs[i - 1]),
jnp.logical_not(jnp.isclose(costs[i - 2], costs[i - 1], rtol=tol))))
def __call__(self, prob: problems.QuadraticProblem) -> GWOutput:
if not prob.is_balanced:
raise ValueError('Unbalanced Gromov-Wasserstein is not supported yet.')
gromov_fn = jax.jit(iterations) if self.jit else iterations
out = gromov_fn(self, prob)
# TODO(lpapaxanthos): remove stop_gradient when using backprop
linearization = prob.update_linearization(
jax.lax.stop_gradient(out.linear_state),
self.epsilon)
linear_state = out.linear_state.set_cost(linearization, True, True)
iteration = jnp.sum(out.costs != 0)
convergence = jnp.logical_not(self.not_converged(out, iteration))
return out.set(linear_state=linear_state,
convergence=convergence)
def init_state(self, prob: problems.QuadraticProblem) -> GWState:
"""Initializes the state of the Gromov-Wasserstein iterations."""
linearization = prob.init_linearization(self.epsilon)
linear_state = self.linear_ot_solver(linearization)
num_iter = self.max_iterations
if self.store_sinkhorn_errors:
errors = -jnp.ones((num_iter, self.linear_ot_solver.outer_iterations))
else:
errors = None
return GWState(jnp.zeros((num_iter,)), jnp.zeros((num_iter,)),
errors, linear_state, linearization)
def output_from_state(self, state):
"""Create an output from a loop state.
Arguments:
state: A GWState.
Returns:
A GWOutput.
"""
geom = state.linear_pb.geom
return GWOutput(costs=state.costs,
linear_convergence=state.linear_convergence,
errors=state.errors,
linear_state=state.linear_state,
geom=geom)
def iterations(solver: GromovWasserstein,
prob: problems.QuadraticProblem) -> GWOutput:
"""A jittable Gromov-Wasserstein outer loop."""
def cond_fn(iteration, constants, state):
solver = constants
return solver.not_converged(state, iteration)
def body_fn(iteration, constants, state, compute_error):
del compute_error # Always assumed True for outer loop of GW.
solver = constants
linear_pb = prob.update_linearization(
state.linear_state,
solver.epsilon)
out = solver.linear_ot_solver(linear_pb)
return state.update(
iteration, out, linear_pb, solver.store_sinkhorn_errors)
state = fixed_point_loop.fixpoint_iter(
cond_fn=cond_fn,
body_fn=body_fn,
min_iterations=solver.min_iterations,
max_iterations=solver.max_iterations,
inner_iterations=1,
constants=solver,
state=solver.init_state(prob))
return solver.output_from_state(state)
def make(
epsilon: Union[epsilon_scheduler.Epsilon, float] = 1.,
max_iterations: int = 50,
jit: bool = False,
warm_start: bool = True,
store_sinkhorn_errors: bool = False,
sinkhorn_kwargs: Optional[Dict[str, Any]] = None,
threshold: float = 1e-2,
min_iterations: int = 1,
**kwargs) -> GromovWasserstein:
"""Creates a GromovWasserstein solver.
Args:
epsilon: a regularization parameter or a epsilon_scheduler.Epsilon object.
max_iterations: int32, the maximum number of outer iterations for
Gromov Wasserstein.
jit: bool, if True, jits the function.
warm_start: deprecated.
store_sinkhorn_errors: whether or not to return all the errors of the inner
Sinkhorn iterations.
sinkhorn_kwargs: Optionally a dictionary containing the keywords arguments
for calls to the sinkhorn function.
threshold: threshold (progress between two iterate costs) used to stop GW.
min_iterations: see fixed_point_loop.
**kwargs: additional kwargs for epsilon.
Returns:
A GromovWasserstein solver.
"""
del warm_start
sinkhorn_kwargs = {} if sinkhorn_kwargs is None else sinkhorn_kwargs
sink = sinkhorn.make(**sinkhorn_kwargs)
return GromovWasserstein(
epsilon, max_iterations=max_iterations,
jit=jit, linear_ot_solver=sink, threshold=threshold,
store_sinkhorn_errors=store_sinkhorn_errors,
min_iterations=min_iterations, **kwargs)
def gromov_wasserstein(
geom_x: geometry.Geometry,
geom_y: geometry.Geometry,
a: Optional[jnp.ndarray] = None,
b: Optional[jnp.ndarray] = None,
loss: str = 'sqeucl',
**kwargs) -> GWOutput:
"""Fits Gromov Wasserstein.
Args:
geom_x: a Geometry object for the first view.
geom_y: a second Geometry object for the second view.
a: jnp.ndarray<float>[num_a,] or jnp.ndarray<float>[batch,num_a] weights.
b: jnp.ndarray<float>[num_b,] or jnp.ndarray<float>[batch,num_b] weights.
loss: str 'sqeucl' or 'kl' to define the GW loss.
**kwargs: keyword arguments to make.
Returns:
A GromovWassersteinState named tuple.
"""
losses = {'sqeucl': problems.make_square_loss, 'kl': problems.make_kl_loss}
loss_fn = losses.get(loss, None)
prob = problems.QuadraticProblem(geom_x, geom_y, a=a, b=b, loss=loss_fn())
solver = make(**kwargs)
return solver(prob)
|
wonk/policy.py | aminohealth/wonk | 103 | 12764761 | <filename>wonk/policy.py
"""Manage AWS policies."""
import json
import pathlib
import re
from typing import Dict, List, Tuple
from xdg import xdg_cache_home
from wonk import aws, exceptions, optimizer
from wonk.constants import MAX_MANAGED_POLICY_SIZE
from wonk.models import Policy, Statement, canonicalize_resources, smallest_json, to_set
POLICY_CACHE_DIR = xdg_cache_home() / "com.amino.wonk" / "policies"
def minify(policies: List[Policy]) -> List[Statement]:
"""Reduce the input policies to the minimal set of functionally identical equivalents."""
internal_statements: List[Statement] = []
for policy in policies:
internal_statements.extend(policy.statements)
this_changed = True
while this_changed:
changed, internal_statements = grouped_actions(internal_statements)
if not changed:
this_changed = False
changed, internal_statements = grouped_resources(internal_statements)
if not changed:
this_changed = False
return internal_statements
def grouped_actions(statements: List[Statement]) -> Tuple[bool, List[Statement]]:
"""Merge similar policies' actions.
Returns a list of statements whose actions have been combined when possible.
"""
statement_sets: Dict[str, Statement] = {}
changed = False
for statement in statements:
group = statement.grouping_for_actions()
try:
existing_item = statement_sets[group]
except KeyError:
statement_sets[group] = statement
continue
new_action_value = existing_item.action_value | statement.action_value
if existing_item.action_value != new_action_value:
changed = True
statement_sets[group] = existing_item.replace(action_value=new_action_value)
return changed, list(statement_sets.values())
def grouped_resources(statements: List[Statement]) -> Tuple[bool, List[Statement]]:
"""Merge similar policies' resources.
Returns a list of statements whose resources have been combined when possible.
"""
statement_sets: Dict[str, Statement] = {}
changed = False
for statement in statements:
group = statement.grouping_for_resources()
try:
existing_item = statement_sets[group]
except KeyError:
statement_sets[group] = statement
continue
new_resource_value = canonicalize_resources(
to_set(existing_item.resource_value) | to_set(statement.resource_value)
)
if existing_item.resource_value != new_resource_value:
changed = True
statement_sets[group] = existing_item.replace(resource_value=new_resource_value)
return changed, list(statement_sets.values())
def combine(policies: List[Policy]) -> List[Policy]:
"""Combine policy files into the smallest possible set of outputs."""
new_policy = Policy(statements=minify(policies))
# Simplest case: we're able to squeeze everything into a single file. This is the ideal.
try:
new_policy.render()
except exceptions.UnshrinkablePolicyError:
pass
else:
return [new_policy]
# Well, that didn't work. Now we need to split the policy into several documents. Subtract the
# length of the tightest packaging of the policy "envelope" from the maximum size, then
# subtract the number of statements[1] (because we might have to glue the results together
# with commas). This is how much room we have to pack statements into.
#
# [1] Why "len(statements) - 2"? Because you can glue n statements together with n-1 commas,
# and it's guaranteed that we can fit at most n-1 statements into a single document because if
# we could fit all n then we wouldn't have made it to this point in the program. And yes, this
# is exactly the part of the program where we start caring about every byte.
minimum_possible_policy_size = len(str(Policy(statements=[])))
max_number_of_commas = len(new_policy.statements) - 2
max_statement_size = (
MAX_MANAGED_POLICY_SIZE - minimum_possible_policy_size - max_number_of_commas
)
packed_list = []
for statement in new_policy.statements:
packed = str(statement)
if len(packed) <= max_statement_size:
packed_list.append(packed)
continue
for statement_dict in statement.split(max_statement_size):
packed_list.append(smallest_json(statement_dict))
statement_sets = optimizer.pack_statements(packed_list, max_statement_size, 10)
policies = []
for statement_set in statement_sets:
# The splitting process above might have resulted in this policy having multiple statements
# that could be merged back together. The easiest way to handle this is to create a new
# policy as-is, then group its statements together into *another* new, optimized policy,
# and emit that one.
unmerged_policy = Policy(
statements=[Statement(json.loads(statement)) for statement in statement_set]
)
merged_policy = Policy(statements=minify([unmerged_policy]))
policies.append(merged_policy)
return policies
def policy_set_pattern(policy_set: str) -> re.Pattern:
"""Return a regexp matching the policy set's name."""
final = policy_set.rsplit("/", maxsplit=1)[-1]
return re.compile(rf"^{final}_\d+$")
def write_policy_set(output_dir: pathlib.Path, base_name: str, policies: List[Policy]):
"""Write the packed sets, return the names of the files written, and collect garbage."""
# Get the list of existing files for this policy set so that we can delete them later. First,
# get a list of candidates with Path.glob() because that's faster and easier than getting a
# list of _every_ file and filtering it with Python. Then use a regular expression to match
# each candidate so that policy set "foo" doesn't unintentionally delete policy set "foo_bar"'s
# files.
pattern = policy_set_pattern(base_name)
cleanup = {
candidate
for candidate in output_dir.glob(f"{base_name}_*")
if pattern.match(candidate.stem)
}
if len(cleanup) > 10:
# Wonk only creates at most 10 policies for a policy set. If we've found more than 10
# matches then something's gone awry, like the policy set is "*" or such. Either way, pull
# the plug and refuse to delete them.
raise exceptions.TooManyPoliciesError(base_name, len(cleanup))
# For consistency, delete all of the pre-existing files before we start so we can't be left
# with a mix of old and new files.
for old in cleanup:
old.unlink()
# Write each of the files that file go into this policy set, and create a list of the filenames
# we've written.
output_filenames = []
for i, policy in enumerate(policies, 1):
output_path = output_dir / f"{base_name}_{i}.json"
output_filenames.append(str(output_path))
output_path.write_text(policy.render())
return output_filenames
def make_cache_file(name: str, version: str) -> pathlib.Path:
"""Return the path to the document's cache file."""
cache_dir = POLICY_CACHE_DIR / name
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir / f"{version}.json"
def fetch(client, arn: str, force: bool = False) -> str:
"""Return the contents of the policy."""
current_version = aws.get_policy_version(client, arn)
cache_file = make_cache_file(aws.name_for(arn), current_version)
policy_doc = None
try:
if not force:
policy_doc = cache_file.read_text()
except FileNotFoundError:
pass
if policy_doc is None:
policy_doc = aws.get_policy(client, arn, current_version)
cache_file.write_text(policy_doc)
return policy_doc
|
script/cal_overlap.py | zeta1999/SpinNet | 166 | 12764777 | <reponame>zeta1999/SpinNet
import os
from os.path import exists, join
import pickle
import numpy as np
import open3d
import cv2
import time
class ThreeDMatch(object):
"""
Given point cloud fragments and corresponding pose in '{root}'.
1. Save the aligned point cloud pts in '{savepath}/3DMatch_{downsample}_points.pkl'
2. Calculate the overlap ratio and save in '{savepath}/3DMatch_{downsample}_overlap.pkl'
3. Save the ids of anchor keypoints and positive keypoints in '{savepath}/3DMatch_{downsample}_keypts.pkl'
"""
def __init__(self, root, savepath, split, downsample):
self.root = root
self.savepath = savepath
self.split = split
self.downsample = downsample
# dict: from id to pts.
self.pts = {}
# dict: from id_id to overlap_ratio
self.overlap_ratio = {}
# dict: from id_id to anc_keypts id & pos_keypts id
self.keypts_pairs = {}
with open(os.path.join(root, f'scene_list_{split}.txt')) as f:
scene_list = f.readlines()
self.ids_list = []
self.scene_to_ids = {}
for scene in scene_list:
scene = scene.replace("\n", "")
self.scene_to_ids[scene] = []
for seq in sorted(os.listdir(os.path.join(self.root, scene))):
if not seq.startswith('seq'):
continue
scene_path = os.path.join(self.root, scene + f'/{seq}')
ids = [scene + f"/{seq}/" + str(filename.split(".")[0]) for filename in os.listdir(scene_path) if
filename.endswith('ply')]
ids = sorted(ids, key=lambda x: int(x.split("_")[-1]))
self.ids_list += ids
self.scene_to_ids[scene] += ids
print(f"Scene {scene}, seq {seq}: num ply: {len(ids)}")
print(f"Total {len(scene_list)} scenes, {len(self.ids_list)} point cloud fragments.")
self.idpair_list = []
self.load_all_ply(downsample)
self.cal_overlap(downsample)
def load_ply(self, data_dir, ind, downsample, aligned=True):
pcd = open3d.io.read_point_cloud(join(data_dir, f'{ind}.ply'))
pcd = open3d.geometry.PointCloud.voxel_down_sample(pcd, voxel_size=downsample)
if aligned is True:
matrix = np.load(join(data_dir, f'{ind}.pose.npy'))
pcd.transform(matrix)
return pcd
def load_all_ply(self, downsample):
pts_filename = join(self.savepath, f'3DMatch_{self.split}_{downsample:.3f}_points.pkl')
if exists(pts_filename):
with open(pts_filename, 'rb') as file:
self.pts = pickle.load(file)
print(f"Load pts file from {self.savepath}")
return
self.pts = {}
for i, anc_id in enumerate(self.ids_list):
anc_pcd = self.load_ply(self.root, anc_id, downsample=downsample, aligned=True)
points = np.array(anc_pcd.points)
print(len(points))
self.pts[anc_id] = points
print('processing ply: {:.1f}%'.format(100 * i / len(self.ids_list)))
with open(pts_filename, 'wb') as file:
pickle.dump(self.pts, file)
def get_matching_indices(self, anc_pts, pos_pts, search_voxel_size, K=None):
match_inds = []
bf_matcher = cv2.BFMatcher(cv2.NORM_L2)
match = bf_matcher.match(anc_pts, pos_pts)
for match_val in match:
if match_val.distance < search_voxel_size:
match_inds.append([match_val.queryIdx, match_val.trainIdx])
return np.array(match_inds)
def cal_overlap(self, downsample):
overlap_filename = join(self.savepath, f'3DMatch_{self.split}_{downsample:.3f}_overlap.pkl')
keypts_filename = join(self.savepath, f'3DMatch_{self.split}_{downsample:.3f}_keypts.pkl')
if exists(overlap_filename) and exists(keypts_filename):
with open(overlap_filename, 'rb') as file:
self.overlap_ratio = pickle.load(file)
print(f"Reload overlap info from {overlap_filename}")
with open(keypts_filename, 'rb') as file:
self.keypts_pairs = pickle.load(file)
print(f"Reload keypts info from {keypts_filename}")
import pdb
pdb.set_trace()
return
t0 = time.time()
for scene, scene_ids in self.scene_to_ids.items():
scene_overlap = {}
print(f"Begin processing scene {scene}")
for i in range(0, len(scene_ids)):
anc_id = scene_ids[i]
for j in range(i + 1, len(scene_ids)):
pos_id = scene_ids[j]
anc_pts = self.pts[anc_id].astype(np.float32)
pos_pts = self.pts[pos_id].astype(np.float32)
try:
matching_01 = self.get_matching_indices(anc_pts, pos_pts, self.downsample)
except BaseException as e:
print(f"Something wrong with get_matching_indices {e} for {anc_id}, {pos_id}")
matching_01 = np.array([])
overlap_ratio = len(matching_01) / len(anc_pts)
scene_overlap[f'{anc_id}@{pos_id}'] = overlap_ratio
if overlap_ratio > 0.30:
self.keypts_pairs[f'{anc_id}@{pos_id}'] = matching_01.astype(np.int32)
self.overlap_ratio[f'{anc_id}@{pos_id}'] = overlap_ratio
print(f'\t {anc_id}, {pos_id} overlap ratio: {overlap_ratio}')
print('processing {:s} ply: {:.1f}%'.format(scene, 100 * i / len(scene_ids)))
print('Finish {:s}, Done in {:.1f}s'.format(scene, time.time() - t0))
with open(overlap_filename, 'wb') as file:
pickle.dump(self.overlap_ratio, file)
with open(keypts_filename, 'wb') as file:
pickle.dump(self.keypts_pairs, file)
if __name__ == '__main__':
ThreeDMatch(root='path to your ply file.',
savepath='data/3DMatch',
split='train',
downsample=0.030
)
|
pytorch_lit/shared_params.py | lipovsek/PyTorch-LIT | 151 | 12764828 | <filename>pytorch_lit/shared_params.py
from torch.nn import Parameter
from .memory import Memory
class SharedParameterUtil:
_isHijacked = False
_memory = None
_mainNew = None
@staticmethod
def _shared_new(cls, data=None, requires_grad=True):
if data is None:
return SharedParameterUtil._mainNew(cls, data, requires_grad)
mShape = data.shape
fSize = 1
for i in mShape:
fSize *= i
sharedSlot = Memory.obtain(SharedParameterUtil._memory)
nT = sharedSlot.reshape(-1)[:fSize]
nT = nT.reshape(mShape)
return SharedParameterUtil._mainNew(cls, nT, requires_grad)
@staticmethod
def hijackParameters(memoryKey):
if SharedParameterUtil._isHijacked:
raise RuntimeError("already hijacked, reset first")
SharedParameterUtil._mainNew = Parameter.__new__
SharedParameterUtil._isHijacked = True
SharedParameterUtil._memory = memoryKey
Parameter.__new__ = SharedParameterUtil._shared_new
@staticmethod
def resetParameters(resetMemory=False):
if not SharedParameterUtil._isHijacked:
return
Parameter.__new__ = SharedParameterUtil._mainNew
SharedParameterUtil._isHijacked = False
SharedParameterUtil._mainNew = None
if resetMemory:
Memory.deallocKey(SharedParameterUtil._memory)
SharedParameterUtil._memory = None
|
backend/projects/tests/test_models.py | LucasSantosGuedes/App-Gestao | 142 | 12764849 | import pytest
from mixer.backend.django import mixer
from projects.models import Project, ProjectMembership
from users.models import User
@pytest.mark.django_db
class TestProject:
def test_project_create(self):
user = mixer.blend(User, username='test')
proj = mixer.blend(Project, owner = user)
assert proj.owner == user
def test_project_str(self):
proj = mixer.blend(Project)
assert str(proj) == proj.title
@pytest.mark.django_db
class TestProjectMembers:
def test_member(self):
proj = mixer.blend(Project)
user = mixer.blend(User, username='test')
mixer.blend(ProjectMembership, member=user, project=proj)
assert proj.members.get(username='test') == user
def test_proj_member_str(self):
pmem = mixer.blend(ProjectMembership)
assert str(pmem) == f'{pmem.member.full_name} , {pmem.project.title}' |
pymtl3/passes/rtlir/util/test_utility.py | kevinyuan/pymtl3 | 152 | 12764865 | <gh_stars>100-1000
#=========================================================================
# test_utility.py
#=========================================================================
# Author : <NAME>
# Date : Feb 21, 2019
"""Test utilities used by RTLIR tests."""
from contextlib import contextmanager
import pytest
@pytest.fixture
def do_test( request ):
"""Call `local_do_test` of the requesting module."""
return request.module.local_do_test
@contextmanager
def expected_failure( exception = Exception, msg = None ):
"""Mark one test case as should-fail.
Not to be confused with pytest.xfail, which is commonly used to mark
tests related to unimplemented functionality. This test only passes when
it throws an expected exception.
"""
try:
yield
except exception as e:
if msg is None or e.args[0].find( msg ) != -1:
return
else:
raise
raise Exception( 'expected-to-fail test unexpectedly passed!' )
def get_parameter( name, func ):
"""Return the parameter for `name` arg of `func`"""
try:
for mark in func.pytestmark:
if mark.name == 'parametrize':
# Find the position of the given name
pos = -1
for i, arg in enumerate( mark.args[0].split() ):
if arg == name:
pos = i
break
if pos == -1:
raise Exception( f'{func} does not have parameter named {name}!' )
if len(mark.args[0].split()) == 1:
return mark.args[1]
return list(map(lambda x: x[pos], mark.args[1]))
except AttributeError:
raise Exception( f'given function {func} does not have pytest marks!' )
|
promoterz/evaluationPool.py | mczero80/japonicus | 229 | 12764874 | <filename>promoterz/evaluationPool.py
#!/bin/python
import time
import random
import itertools
from multiprocessing import Pool, TimeoutError
from multiprocessing.pool import ThreadPool
class EvaluationPool():
def __init__(self,
World,
Urls, poolsize, individual_info):
self.World = World
self.Urls = Urls
self.lasttimes = [0 for x in Urls]
self.lasttimesperind = [0 for x in Urls]
self.poolsizes = [poolsize for x in Urls]
self.individual_info = individual_info
def evaluateBackend(self, datasets, I, inds):
stime = time.time()
dateInds = list(itertools.product(datasets, inds))
# print(list(dateInds))
Q = [
([dataset], Ind, self.Urls[I])
for dataset, Ind in dateInds
]
P = Pool(self.poolsizes[I])
fitnesses = P.starmap(self.World.tools.Evaluate, Q)
P.close()
P.join()
delta_time = time.time() - stime
return fitnesses, delta_time
def evaluatePopulation(self, locale):
individues_to_simulate = [
ind for ind in locale.population if not ind.fitness.valid
]
props = self.distributeIndividuals(individues_to_simulate)
args = [
[
locale.Dataset,
I,
props[I],
]
for I in range(len(self.Urls))
]
pool = ThreadPool(len(self.Urls))
results = []
try:
for A in args:
results.append(pool.apply_async(self.evaluateBackend, A))
pool.close()
except (SystemExit, KeyboardInterrupt):
print("Aborted by user.")
exit(0)
TimedOut = []
for A in range(len(results)):
try:
perindTime = 3 * self.lasttimesperind[A]\
if self.lasttimesperind[A] else 12
timeout = perindTime * len(props[A])\
if A else None # no timeout for local machine;
results[A] = results[A].get(timeout=timeout)
except TimeoutError: # Timeout: remote machine is dead;
print("Machine timeouts!")
args[A][1] = 0 # Set to evaluate @ local machine
results[A] = self.evaluateBackend(* args[A])
TimedOut.append(A)
pool.join()
TotalNumberOfTrades = 0
for PoolIndex in range(len(results)):
for i, fit in enumerate(results[PoolIndex][0]):
if self.individual_info:
print(self.World.tools.showIndividue(fit))
self.World.tools.ApplyResult(fit, props[PoolIndex][i])
TotalNumberOfTrades += fit['trades']
self.lasttimes[PoolIndex] = results[PoolIndex][1]
L = len(props[PoolIndex])
self.lasttimesperind[PoolIndex] =\
self.lasttimes[PoolIndex] / L if L else 5
F = [x.fitness.valid for x in individues_to_simulate]
assert (all(F))
for T in TimedOut:
self.ejectURL(T)
N = len(individues_to_simulate)
# RECORD NUMBER OF EVALUATIONS;
locale.World.totalEvaluations += N
# CALCULATE AVERAGE TRADE NUMBER;
averageTrades = TotalNumberOfTrades / max(1, N)
return N, averageTrades
|
tests/unit/test_non_empty_configs_provider.py | barryib/gitlabform | 299 | 12764886 | import pytest
from gitlabform import EXIT_INVALID_INPUT
from gitlabform.configuration.projects_and_groups import ConfigurationProjectsAndGroups
from gitlabform.filter import NonEmptyConfigsProvider
def test_error_on_missing_key():
config_yaml = """
---
# no key at all
"""
with pytest.raises(SystemExit) as e:
configuration = ConfigurationProjectsAndGroups(config_string=config_yaml)
NonEmptyConfigsProvider(configuration, None, None)
assert e.value.code == EXIT_INVALID_INPUT
def test_error_on_empty_key():
config_yaml = """
---
projects_and_groups:
"""
with pytest.raises(SystemExit) as e:
configuration = ConfigurationProjectsAndGroups(config_string=config_yaml)
NonEmptyConfigsProvider(configuration, None, None)
assert e.value.code == EXIT_INVALID_INPUT
|
GeneratorInterface/GenFilters/python/ZgammaFilter_cfi.py | ckamtsikis/cmssw | 852 | 12764901 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
# values tuned also according to slide 3 of :
# https://indico.cern.ch/getFile.py/access?contribId=23&sessionId=2&resId=0&materialId=slides&confId=271548
# selection efficiency of approx 6% for ZMM_8TeV
myZgammaFilter = cms.EDFilter('ZgammaMassFilter',
HepMCProduct = cms.InputTag("generator","unmeared"),
minPhotonPt = cms.double(7.),
minLeptonPt = cms.double(7.),
minPhotonEta = cms.double(-3),
minLeptonEta = cms.double(-3),
maxPhotonEta = cms.double(3),
maxLeptonEta = cms.double(3),
minDileptonMass = cms.double(30.),
minZgMass = cms.double(40.)
)
ZgammaFilter = cms.Sequence( myZgammaFilter )
|
xautodl/spaces/__init__.py | Joey61Liuyi/AutoDL-Projects | 817 | 12764915 | #####################################################
# Copyright (c) <NAME> [GitHub D-X-Y], 2021.01 #
#####################################################
# Define complex searc space for AutoDL #
#####################################################
from .basic_space import Categorical
from .basic_space import Continuous
from .basic_space import Integer
from .basic_space import Space
from .basic_space import VirtualNode
from .basic_op import has_categorical
from .basic_op import has_continuous
from .basic_op import is_determined
from .basic_op import get_determined_value
from .basic_op import get_min
from .basic_op import get_max
|
pygmy/rest/wsgi.py | ParikhKadam/pygmy | 571 | 12764932 | #!/usr/bin/env python3
from pygmy.core.initialize import initialize
initialize()
from pygmy.rest.manage import app
if __name__ == '__main__':
app.run()
|
infoxlm/src-infoxlm/infoxlm/models/__init__.py | Sanster/unilm | 5,129 | 12764957 | import argparse
import importlib
import os
from fairseq.models import MODEL_REGISTRY, ARCH_MODEL_INV_REGISTRY
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if not file.startswith('_') and not file.startswith('.') and (file.endswith('.py') or os.path.isdir(path)):
model_name = file[:file.find('.py')] if file.endswith('.py') else file
module = importlib.import_module('infoxlm.models.' + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group('Named architectures')
group_archs.add_argument('--arch', choices=ARCH_MODEL_INV_REGISTRY[model_name])
group_args = parser.add_argument_group('Additional command-line arguments')
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + '_parser'] = parser
|
bench/bench_long_empty_string.py | janaknat/markupsafe | 415 | 12764978 | <filename>bench/bench_long_empty_string.py
from markupsafe import escape
def run():
string = "Hello World!" * 1000
escape(string)
|
torchcam/cams/__init__.py | alexandrosstergiou/torch-cam | 749 | 12764981 | <reponame>alexandrosstergiou/torch-cam
from .cam import *
from .gradcam import *
from .utils import *
|
dfirtrack_config/migrations/0014_main_overview.py | thomas-kropeit/dfirtrack | 273 | 12764983 | <gh_stars>100-1000
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dfirtrack_config', '0013_csvimporterconfigmodel'),
]
operations = [
migrations.AddField(
model_name='mainconfigmodel',
name='main_overview',
field=models.CharField(
choices=[
('main_overview_artifact', 'Artifact'),
('main_overview_case', 'Case'),
('main_overview_system', 'System'),
('main_overview_tag', 'Tag'),
('main_overview_task', 'Task'),
],
default='main_overview_system',
max_length=50,
),
),
]
|
CommonTools/PileupAlgos/python/PUPuppi_cff.py | ckamtsikis/cmssw | 852 | 12764987 | import FWCore.ParameterSet.Config as cms
from CommonTools.PileupAlgos.Puppi_cff import *
pupuppi = puppi.clone(
invertPuppi = True
)
|
src/openprocurement/api/interfaces.py | EBRD-ProzorroSale/openprocurement.api | 102 | 12765008 | # -*- coding: utf-8 -*-
from zope.interface import Interface
class IOPContent(Interface):
""" Openprocurement Content """
class IContentConfigurator(Interface):
""" Content configurator """
|
insights/parsers/tests/test_neutron_server_log.py | lhuett/insights-core | 121 | 12765027 | <gh_stars>100-1000
from insights.parsers.neutron_server_log import NeutronServerLog
from insights.tests import context_wrap
NEUTRON_LOG = """
2016-09-13 05:56:45.155 30586 WARNING keystonemiddleware.auth_token [-] Identity response: {"error": {"message": "Could not find token: b45405915eb44e608885f894028d37b9", "code": 404, "title": "Not Found"}}
2016-09-13 05:56:45.156 30586 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
2016-09-13 06:06:45.884 30588 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
2016-09-13 06:06:45.886 30588 WARNING keystonemiddleware.auth_token [-] Identity response: {"error": {"message": "Could not find token: <PASSWORD>ba1<PASSWORD>", "code": 404, "title": "Not Found"}}
2016-09-13 06:06:45.887 30588 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
2016-09-13 06:06:46.131 30586 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
2016-09-13 06:06:46.131 30586 WARNING keystonemiddleware.auth_token [-] Identity response: {"error": {"message": "Could not find token: <KEY>0", "code": 404, "title": "Not Found"}}
2016-09-13 06:06:46.132 30586 WARNING keystonemiddleware.auth_token [-] Authorization failed for token
""".strip()
def test_server_log():
neutron_server = NeutronServerLog(context_wrap(NEUTRON_LOG))
assert len(neutron_server.get(["WARNING", "Authorization failed for token"])) == 5
assert len(neutron_server.get(["Identity response:"])) == 3
assert len(neutron_server.get("Identity response:")) == 3
|
benchmarks/src/garage_benchmarks/experiments/q_functions/__init__.py | blacksph3re/garage | 1,500 | 12765054 | <reponame>blacksph3re/garage
"""Benchmarking experiments for Q-functions."""
from garage_benchmarks.experiments.q_functions.continuous_mlp_q_function import ( # isort:skip # noqa: E501
continuous_mlp_q_function)
__all__ = ['continuous_mlp_q_function']
|
pytools/lib/common.py | virtualparadox/bbmap | 134 | 12765078 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Function definitions common to all programs.
"""
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## libraries to use
#import re
import os
import time
import sys
#import getpass
import logging
#from colorlog import ColoredFormatter
# import EnvironmentModules # get_read_count_fastq
from subprocess import Popen, PIPE
from email.mime.text import MIMEText
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## function definitions
'''
creates a logging instance
https://docs.python.org/2/howto/logging.html
https://pypi.python.org/pypi/colorlog
'''
def get_logger(log_name, log_file, log_level = "INFO", stdout = False, color = False):
log = logging.getLogger(log_name)
handler = None
if stdout:
handler = logging.StreamHandler(sys.stdout)
else:
handler = logging.FileHandler(log_file)
formatter = logging.Formatter('%(filename)-15s:%(process)d %(asctime)s %(levelname)s: %(message)s')
if color and 1==2:
"""
formatter = ColoredFormatter("%(filename)-15s:%(process)d %(asctime)s %(log_color)s%(levelname)s: %(message)s", datefmt=None, reset=True,
log_colors={
'DEBUG': 'blue',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red, bg_white',
},
secondary_log_colors={},
style='%')
Not working in conda - 2017-04-29
"""
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(log_level)
return log
'''
Checkpoint the status plus a timestamp
- appends the status
@param status_log: /path/to/status.log (or whatever you name it)
@param status: status to append to status.log
'''
def checkpoint_step(status_log, status):
status_line = "%s,%s\n" % (status, time.strftime("%Y-%m-%d %H:%M:%S"))
with open(status_log, "a") as myfile:
myfile.write(status_line)
'''
returns the last step (status) from the pipeline
@param status_log: /path/to/status.log (or whatever you name it)
@param log: logger object
@return last status in the status log, "start" if nothing there
'''
def get_status(status_log, log = None):
#status_log = "%s/%s" % (output_path, "test_status.log")
status = "start"
timestamp = str(time.strftime("%Y-%m-%d %H:%M:%S"))
if os.path.isfile(status_log):
fh = open(status_log, 'r')
lines = fh.readlines()
fh.close()
for line in lines:
if line.startswith('#'): continue
line_list = line.split(",")
assert len(line_list) == 2
status = str(line_list[0]).strip()
timestamp = str(line_list[1]).strip()
if not status:
status = "start"
if log:
log.info("Last checkpointed step: %s (%s)", status, timestamp)
else:
if log:
log.info("Cannot find status.log (%s), assuming new run", status_log)
status = status.strip().lower()
return status
'''
run a command from python
@param cmd: command to run
@param live: False = run in dry mode (print command), True = run normally
@param log: logger object
@return std_out, std_err, exit_code
'''
def run_command(cmd, live=False, log=None):
stdOut = None
stdErr = None
exitCode = None
#end = 0
#elapsedSec = 0
if cmd:
if not live:
stdOut = "Not live: cmd = '%s'" % (cmd)
exitCode = 0
else:
if log: log.info("cmd: %s" % (cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdOut, stdErr = p.communicate()
exitCode = p.returncode
if log:
log.info("Return values: exitCode=" + str(exitCode) + ", stdOut=" + str(stdOut) + ", stdErr=" + str(stdErr))
if exitCode != 0:
log.warn("- The exit code has non-zero value.")
else:
if log:
log.error("- No command to run.")
return None, None, -1
return stdOut, stdErr, exitCode
'''
replacement for run_command
- includes logging, convert_cmd & post_mortem
'''
def run_cmd(cmd, log=None):
std_out = None
std_err = None
exit_code = 0
if cmd:
# convert to work on genepool/denovo
cmd = convert_cmd(cmd)
if log:
log.info("- cmd: %s", cmd)
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
std_out, std_err = p.communicate()
exit_code = p.returncode
post_mortem_cmd(cmd, exit_code, std_out, std_err, log)
return std_out, std_err, exit_code
'''
Simple function to output to the log what happened only if exit code > 0
Typical usage:
std_out, std_err, exit_code = run_command(cmd, True)
post_mortem_cmd(cmd, exit_code, std_out, std_err)
'''
def post_mortem_cmd(cmd, exit_code, std_out, std_err, log = None):
if exit_code > 0:
if log:
log.error("- cmd failed: %s", cmd)
log.error("- exit code: %s", exit_code)
else:
print "- cmd failed: %s" % (cmd)
print "- exit code: %s" % (exit_code)
if std_out:
if log:
log.error("- std_out: %s", std_out)
else:
print "- std_out: %s" % (std_out)
if std_err:
if log:
log.error("- std_err: %s", std_err)
else:
print "- std_err: %s" % (std_err)
'''
Convert command to use genepool or denovo (shifter) to run
replace #placeholder; with shifter or module load command
#placeholder.v; should specify the version to use
This should be the only place in the pipelines that specifies the images/modules translation
'''
def convert_cmd(cmd):
new_cmd = cmd
shifter_img = {
"#bbtools" : "shifter --image=bryce911/bbtools ",
"#pigz" : "module load pigz;",
"#jamo" : "shifter --image=registry.services.nersc.gov/htandra/jamo_dev:1.0 ", # works, but would like simple module to use - have one on Denovo but not Cori
"#gnuplot" : "shifter --image=bryce911/bbtools ", # (1)
"#spades/3.9.0" : "shifter --image=bryce911/spades3.9.0 ",
"#spades/3.10.1" : "shifter --image=bryce911/spades3.10.1 ",
"#spades/3.11.0" : "shifter --image=bryce911/spades-3.11.0 ", # GAA-3383
"#spades/3.11.1-check" : "shifter --image=bryce911/spades3.11.1-check ", # development
"#prodigal/2.6.3" : "shifter --image=registry.services.nersc.gov/jgi/prodigal ", # RQCSUPPORT-1318
"#prodigal/2.5.0" : "shifter --image=registry.services.nersc.gov/jgi/prodigal ",
"#prodigal/2.50" : "shifter --image=registry.services.nersc.gov/jgi/prodigal ",
"#lastal/869" : "shifter --image=bryce911/lastal:869 ",
"#lastal/828" : "shifter --image=bryce911/lastal:869 ",
#"#lastal" : "shifter --image=bryce911/lastal:869 ",
"#R/3.3.2" : "module load R/3.3.2;",
"#texlive" : "shifter --image=bryce911/bbtools ", # (1)
"#java" : "shifter --image=bryce911/bbtools ", # (1)
"#blast+/2.6.0" : "shifter --image=sulsj/ncbi-blastplus:2.6.0 ",
"#blast" : "shifter --image=sulsj/ncbi-blastplus:2.7.0 ",
"#megahit-1.1.1" : "shifter --image=foster505/megahit:v1.1.1-2-g02102e1 ",
"#smrtanalysis/2.3.0_p5" : "shifter --image=registry.services.nersc.gov/jgi/smrtanalysis:2.3.0_p5 ", # meth - need more memory
"#mummer/3.23" : "shifter --image=bryce911/mummer3.23 ", # 3.23
"#hmmer" : "shifter --image=registry.services.nersc.gov/jgi/hmmer:latest ", # 3.1b2
"#samtools/1.4" : "shifter --image=rmonti/samtools ",
"#mothur/1.39.5" : "shifter --image=bryce911/mothur1.39.5 ",
"#vsearch/2.4.3" : "shifter --image=bryce911/vsearch2.4.3 ",
"#graphviz" : "shifter --image=bryce911/bbtools ",
"#ssu-align/0.1.1" : "shifter --image=bryce911/ssu-align0.1.1 ", # openmpi/1.10 included in docker container
"#smrtlink/4.0.0.190159" : "shifter --image=registry.services.nersc.gov/jgi/smrtlink:4.0.0.190159 /smrtlink/smrtcmds/bin/", # progs not in path
"#smrtlink/5.0.1.9585" : "shifter --image=registry.services.nersc.gov/jgi/smrtlink:5.0.1.9585 /smrtlink/smrtcmds/bin/", # progs not in path, Tony created 2017-10-16
"#smrtlink" : "shifter --image=registry.services.nersc.gov/jgi/smrtlink:5.0.1.9585 /smrtlink/smrtcmds/bin/", # progs not in path
"#prodege" : "shifter --image=bryce911/prodege ", # 2.2.1
#"#hmmer" : "shifter --image=registry.services.nersc.gov/jgi/hmmer ", # 3.1b2 - Feb 2015, latest as of Oct 2017
"#checkm" : "shifter --image=registry.services.nersc.gov/jgi/checkm ",
}
# (1) - added as part of the bryce911 bbtools package
#cmd = "#bbtools-shijie;bbmap...."
# this dict will be deprecated as of March 2018 when genepool passes into legend
genepool_mod = {
"#bbtools" : "module load bbtools",
"#pigz" : "module load pigz",
"#jamo" : "module load jamo",
"#gnuplot" : "module load gnuplot/4.6.2", # sag,iso,sps,ce:gc_cov, gc_histogram, contig_gc
"#spades/3.9.0" : "module load spades/3.9.0",
"#spades/3.10.1" : "module load spades/3.10.1",
"#spades/3.11.1" : "module load spades/3.11.1-check",
"#prodigal/2.6.3" : "module load prodigal/2.50", # aka 2.50, also 2.60 is available
"#prodigal/2.5.0" : "module load prodigal/2.50",
"#prodigal/2.50" : "module load prodigal/2.50",
#"#lastal" : "module load last/828",
"#lastal/828" : "module load last/828",
"#R/3.3.2" : "module unload R;module load R/3.3.1", # 3.3.2 not on genepool - RQCSUPPORT-1516 unload R for Kecia
"#texlive" : "module load texlive",
"#blast+/2.6.0" : "module load blast+/2.6.0",
#"#blast+/2.7.0" : "module load blast+/2.7.0", # not created
"#blast" : "module load blast+/2.6.0",
"#java" : "", # java runs natively on genepool
"#megahit-1.1.1" : "module load megahit/1.1.1",
"#smrtanalysis/2.3.0_p5" : "module load smrtanalysis/2.3.0_p5",
"#smrtanalysis/2.3.0_p5_xmx32g" : "module load smrtanalysis/2.3.0_p5;export _JAVA_OPTIONS='-Xmx32g'",
"#mummer/3.23" : "module load mummer/3.23",
"#hmmer" : "module load hmmer/3.1b2",
"#samtools/1.4" : "module load samtools/1.4",
"#mothur/1.39.5" : "module load mothur/1.32.1", # 1.26.0 default, 1.32.1
"#vsearch/2.4.3" : "module load vsearch/2.3.0", # 2.3.0
"#graphviz" : "module load graphviz",
"#ssu-align/0.1.1" : "module load ssu-align",
"#smrtlink/4.0.0.190159" : "module load smrtlink/4.0.0.190159",
"#smrtlink" : "module load smrtlink/5.0.1.9585",
"#smrtlink/5.0.1.9585" : "module load smrtlink/5.0.1.9585",
"#prodege" : "module load R;/projectb/sandbox/rqc/prod/pipelines/external_tools/sag_decontam/prodege-2.2/bin/",
"#checkm" : "module load hmmer prodigal pplacer", # checkm installed in python by default on genepool
}
#bbtools;stats.sh
if cmd.startswith("#"):
cluster = "genepool"
# any other env ids to use?
# cori, denovo, genepool
cluster = os.environ.get('NERSC_HOST', 'unknown')
f = cmd.find(";")
mod = "" # command to replace
if f > -1:
mod = cmd[0:f]
if mod:
# use module load jamo on denovo
if mod == "#jamo" and cluster == "denovo":
shifter_img[mod] = "module load jamo;"
if cluster in ("denovo", "cori"):
if mod in shifter_img:
new_cmd = new_cmd.replace(mod + ";", shifter_img[mod])
else:
if mod in genepool_mod:
if genepool_mod[mod] == "":
new_cmd = new_cmd.replace(mod + ";", "")
else:
new_cmd = new_cmd.replace(mod, genepool_mod[mod])
if new_cmd.startswith("#"):
print "Command not found! %s" % new_cmd
sys.exit(18)
#print new_cmd
return new_cmd
'''
returns human readable file size
@param num = file size (e.g. 1000)
@return: readable float e.g. 1.5 KB
'''
def human_size(num):
if not num:
num = 0.0
for x in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'XB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f %s" % (num, 'ZB')
'''
send out email
@param emailTo: email receipient (e.g. <EMAIL>)
@param emailSubject: subject line for the email
@param emailBody: content of the email
@param emailFrom: optional email from
'''
def send_email(email_to, email_subject, email_body, email_from = '<EMAIL>', log = None):
msg = ""
err_flag = 0
if not email_to:
msg = "- send_email: email_to parameter missing!"
if not email_subject:
msg = "- send_email: email_subject parameter missing!"
if not email_body:
msg = "- send_email: email_body parameter missing!"
if err_flag == 0:
msg = "- sending email to: %s" % (email_to)
if log:
log.info(msg)
else:
print msg
if err_flag == 1:
return 0
# assume html
email_msg = MIMEText(email_body, "html") # vs "plain"
email_msg['Subject'] = email_subject
email_msg['From'] = email_from
email_msg['To'] = email_to
p = Popen(["/usr/sbin/sendmail", "-t"], stdin = PIPE)
p.communicate(email_msg.as_string())
return err_flag
'''
Write to rqc_file (e.g. rqc-files.tmp) the file_key and file_value
@param rqc_file_log: full path to file containing key=file
@param file_key: key for the entry
@param file_value: value for the entry
'''
def append_rqc_file(rqc_file_log, file_key, file_value, log=None):
if file_key:
buffer = "%s = %s\n" % (file_key, file_value)
with open(rqc_file_log, "a") as myfile:
myfile.write(buffer)
if log: log.info("append_rqc_file: %s:%s" % (file_key, file_value))
else:
if log: log.warning("key or value error: %s:%s" % (file_key, file_value))
'''
Write to rqc_stats (e.g. rqc-stats.tmp) the stats_key and stats_value
@param rqc_file_log: full path to file containing key=file
@param file_key: key for the entry
@param file_value: value for the entry
'''
def append_rqc_stats(rqc_stats_log, stats_key, stats_value, log=None):
if stats_key:
buffer = "%s = %s\n" % (stats_key, stats_value)
with open(rqc_stats_log, "a") as myfile:
myfile.write(buffer)
if log: log.info("append_rqc_stats: %s:%s" % (stats_key, stats_value))
else:
if log: log.warning("key or value error: %s:%s" % (stats_key, stats_value))
'''
Return the file system path to jgi-rqc-pipeline so we can use */tools and */lib
@return /path/to/jgi-rqc-pipelines
'''
def get_run_path():
current_path = os.path.dirname(os.path.abspath(__file__))
run_path = os.path.abspath(os.path.join(current_path, os.pardir))
return run_path
'''
Simple read count using bbtools n_contigs field
- slightly different than in rqc_utility
n_scaffolds n_contigs scaf_bp contig_bp gap_pct scaf_N50 scaf_L50 ctg_N50 ctg_L50 scaf_N90 scaf_L90 ctg_N90 ctg_L90 scaf_max ctg_max scaf_n_gt50K scaf_pct_gt50K gc_avg gc_std
1346616 1346616 405331416 405331415 0.000 1346616 301 1346615 301 1346616 301 1346615 301 301 301 0 0.000 0.44824 0.02675
'''
def get_read_count_fastq(fastq, log = None):
read_cnt = 0
if os.path.isfile(fastq):
# EnvironmentModules.module(["load", "bbtools"])
# bbtools faster than zcat | wc because bbtools uses pigz
# cmd = "stats.sh format=3 in=%s" % fastq
cmd = "#bbtools;stats.sh format=3 in=%s" % fastq
cmd = convert_cmd(cmd)
if log:
log.info("- cmd: %s", cmd)
std_out, std_err, exit_code = run_command(cmd, True)
# EnvironmentModules.module(["unload", "bbtools"])
if exit_code == 0 and std_out:
line_list = std_out.split("\n")
#print line_list
val_list = str(line_list[1]).split() #.split('\t')
#print "v = %s" % val_list
read_cnt = int(val_list[1])
if log:
log.info("- read count: %s", read_cnt)
else:
if log:
post_mortem_cmd(cmd, exit_code, std_out, std_err, log)
else:
log.error("- fastq: %s does not exist!", fastq)
return read_cnt
'''
Subsampling calculation
0 .. 250k reads = 100%
250k .. 25m = 100% to 1%
25m .. 600m = 1%
600m+ .. oo < 1%
July 2014 - 15 runs > 600m (HiSeq-2500 Rapid) - 4 actual libraries / 85325 seq units
- returns new subsampling rate
'''
def get_subsample_rate(read_count):
subsample = 0
subsample_rate = 0.01
max_subsample = 6000000 # 4 hours of blast time
new_subsample_rate = 250000.0/read_count
subsample_rate = max(new_subsample_rate, subsample_rate)
subsample_rate = min(1, subsample_rate) # if subsample_rate > 1, then set to 1
subsample = int(read_count * subsample_rate)
if subsample > max_subsample:
subsample = max_subsample
subsample_rate = subsample / float(read_count)
return subsample_rate
'''
Set color hash
- need to update to remove "c" parameter - used in too many places
'''
def set_colors(c, use_color = False):
if use_color == False:
color = {
'black' : "",
'red' : "",
'green' : "",
'yellow' : "",
'blue' : "",
'pink' : "",
'cyan' : "",
'white' : "",
'' : ""
}
else:
color = {
'black' : "\033[1;30m",
'red' : "\033[1;31m",
'green' : "\033[1;32m",
'yellow' : "\033[1;33m",
'blue' : "\033[1;34m",
'pink' : "\033[1;35m",
'cyan' : "\033[1;36m",
'white' : "\033[1;37m",
'' : "\033[m"
}
return color
'''
New function that just returns colors
'''
def get_colors():
color = {
'black' : "\033[1;30m",
'red' : "\033[1;31m",
'green' : "\033[1;32m",
'yellow' : "\033[1;33m",
'blue' : "\033[1;34m",
'pink' : "\033[1;35m",
'cyan' : "\033[1;36m",
'white' : "\033[1;37m",
'' : "\033[m"
}
return color
'''
Returns msg_ok, msg_fail, msg_warn colored or not colored
'''
def get_msg_settings(color):
msg_ok = "[ "+color['green']+"OK"+color['']+" ]"
msg_fail = "[ "+color['red']+"FAIL"+color['']+" ]"
msg_warn = "[ "+color['yellow']+"WARN"+color['']+" ]"
return msg_ok, msg_fail, msg_warn
'''
Use RQC's ap_tool to get the status
set mode = "-sa" to show all, even completed
'''
def get_analysis_project_id(seq_proj_id, target_analysis_project_id, target_analysis_task_id, output_path, log = None, mode = ""):
if log:
log.info("get_analysis_project_id: spid = %s, tapid = %s, tatid = %s", seq_proj_id, target_analysis_project_id, target_analysis_task_id)
analysis_project_id = 0
analysis_task_id = 0
project_type = None
task_type = None
ap_list = os.path.join(output_path, "ap-info.txt")
AP_TOOL = "/global/dna/projectdirs/PI/rqc/prod/jgi-rqc-pipeline/tools/ap_tool.py"
#AP_TOOL = "/global/homes/b/brycef/git/jgi-rqc-pipeline/tools/ap_tool.py"
cmd = "%s -spid %s -m psv -tapid %s -tatid %s %s > %s 2>&1" % (AP_TOOL, seq_proj_id, target_analysis_project_id, target_analysis_task_id, mode, ap_list)
if log:
log.info("- cmd: %s", cmd)
else:
print "- cmd: %s" % cmd
std_out, std_err, exit_code = run_command(cmd, True)
post_mortem_cmd(cmd, exit_code, std_out, std_err, log)
if os.path.isfile(ap_list):
ap_dict = {} # header = value
cnt = 0
fh = open(ap_list, "r")
for line in fh:
arr = line.strip().split("|")
if cnt == 0:
c2 = 0 # position of title in header
for a in arr:
ap_dict[a.lower()] = c2
c2 += 1
else:
for a in ap_dict:
if ap_dict[a] + 1 > len(arr):
pass
else:
ap_dict[a] = arr[ap_dict[a]]
cnt += 1
fh.close()
analysis_project_id = ap_dict.get("analysis project id")
analysis_task_id = ap_dict.get("analysis task id")
project_type = ap_dict.get("analysis product name")
task_type = ap_dict.get("analysis task name")
# nno such project
if cnt == 1:
analysis_project_id = 0
analysis_task_id = 0
if log:
log.info("- project type: %s, task type: %s", project_type, task_type)
log.info("- analysis_project_id: %s, analysis_task_id: %s", analysis_project_id, analysis_task_id)
try:
analysis_project_id = int(analysis_project_id)
analysis_task_id = int(analysis_task_id)
except:
analysis_project_id = 0
analysis_task_id = 0
# ap = 4, at = 8 means its using the column names but didn't find anything
if analysis_project_id < 100:
analysis_project_id = 0
if analysis_task_id < 100:
analysis_task_id = 0
return analysis_project_id, analysis_task_id
'''
For creating a dot file from the pipeline flow
'''
def append_flow(flow_file, orig_node, orig_label, next_node, next_label, link_label):
fh = open(flow_file, "a")
fh.write("%s|%s|%s|%s|%s\n" % (orig_node, orig_label, next_node, next_label, link_label))
fh.close()
'''
Flow file format:
# comment
*label|PBMID Pipeline run for BTXXY<br><font point-size="10">Run Date: 2017-09-28 14:22:50</font>
# origin node, origin label, next node, next label, link label
input_h5|BTXXY H5<br><font point-size="10">3 smrtcells</font>|assembly|HGAP Assembly<FONT POINT-SIZE="10"><br>3 contigs, 13,283,382bp</FONT>|HGAP v4.0.1
nodes should be the output of the transformation between the nodes
e.g. input fastq (25m reads) --[ bbtools subsampling ]--> subsampled fastq (10m reads)
creates a dot file, to convert to png use:
$ module load graphviz
$ dot -T png (dot file) > (png file)
More info on formatting the labels
http://www.graphviz.org/content/node-shapes#html
'''
def dot_flow(flow_file, dot_file, log = None):
if not os.path.isfile(flow_file):
if log:
log.info("- cannot find flow file: %s", flow_file)
else:
print "Cannot find flow file: %s" % flow_file
return
fhw = open(dot_file, "w")
fhw.write("// dot file\n")
fhw.write("digraph rqc {\n") # directed graph
fhw.write(" node [shape=box];\n")
fhw.write(" rankdir=LR;\n")
fh = open(flow_file, "r")
for line in fh:
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# graph label
if line.startswith("*label"):
arr = line.split("|")
label = flow_replace(str(arr[1]))
fhw.write(" label=<%s>;\n" % label)
fhw.write(" labelloc=top;\n")
else:
arr = line.split("|")
#print arr
if len(arr) == 5:
org_node = arr[0]
org_label = str(arr[1])
next_node = arr[2]
next_label = str(arr[3])
link_label = str(arr[4])
# must be <br/> in the dot file, I have a habit of using <br>
org_label = flow_replace(org_label)
next_label = flow_replace(next_label)
link_label = flow_replace(link_label)
# label are enclosed by < > instead of " " to handle html-ish markups
if next_node:
link = " %s -> %s;\n" % (org_node, next_node)
if link_label:
link = " %s -> %s [label=<%s>];\n" % (org_node, next_node, link_label)
fhw.write(link)
if org_label:
label = " %s [label=<%s>];\n" % (org_node, org_label)
fhw.write(label)
if next_label:
label = " %s [label=<%s>];\n" % (next_node, next_label)
fhw.write(label)
fh.close()
fhw.write("}\n")
fhw.close()
if log:
log.info("- created dot file: %s", dot_file)
return dot_file
'''
simple replacements
'''
def flow_replace(my_str):
new_str = my_str.replace("<br>", "<br/>").replace("<smf>", "<font point-size=\"10\">").replace("</f>", "</font>")
return new_str
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## main program
if __name__ == "__main__":
# unit tests
print human_size(102192203)
print human_size(250000000000)
#print get_read_count_fastq("/global/projectb/scratch/brycef/sag/phix/11185.1.195330.UNKNOWN_matched.fastq.gz")
cmd = "#bbtools;bbduk.sh in=/global/dna/dm_archive/sdm/illumina//01/14/88/11488.1.208132.UNKNOWN.fastq.gz ref=/global/dna/shared/rqc/ref_databases/qaqc/databases/phix174_ill.ref.fa outm=/global/projectb/scratch/brycef/phix/11488/11488.1.208132.UNKNOWN_matched.fastq.gz outu=/global/projectb/scratch/brycef/phix/11488/11488.1.208132.UNKNOWN_unmatched.fastq.gz"
print convert_cmd(cmd)
cmd = "#pigz;pigz /global/projectb/scratch/brycef/align/BTOYH/genome/11463.6.208000.CAAGGTC-AGACCTT.filter-RNA.fastq.gz-genome.sam"
print convert_cmd(cmd)
cmd = "#java;java -version"
print convert_cmd(cmd)
dot_flow("/global/projectb/scratch/brycef/pbmid/BWOAU/f2.flow", "/global/projectb/scratch/brycef/pbmid/BWOAU/BWOUAx.dot")
sys.exit(0)
|
src/deepsparse/utils/data.py | SkalskiP/deepsparse | 460 | 12765091 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List
import numpy
from deepsparse.utils.log import log_init
__all__ = [
"arrays_to_bytes",
"bytes_to_arrays",
"verify_outputs",
]
log = log_init(os.path.basename(__file__))
def arrays_to_bytes(arrays: List[numpy.array]) -> bytearray:
"""
:param arrays: List of numpy arrays to serialize as bytes
:return: bytearray representation of list of numpy arrays
"""
to_return = bytearray()
for arr in arrays:
arr_dtype = bytearray(str(arr.dtype), "utf-8")
arr_shape = bytearray(",".join([str(a) for a in arr.shape]), "utf-8")
sep = bytearray("|", "utf-8")
arr_bytes = arr.ravel().tobytes()
to_return += arr_dtype + sep + arr_shape + sep + arr_bytes
return to_return
def bytes_to_arrays(serialized_arr: bytearray) -> List[numpy.array]:
"""
:param serialized_arr: bytearray representation of list of numpy arrays
:return: List of numpy arrays decoded from input
"""
sep = "|".encode("utf-8")
arrays = []
i_start = 0
while i_start < len(serialized_arr) - 1:
i_0 = serialized_arr.find(sep, i_start)
i_1 = serialized_arr.find(sep, i_0 + 1)
arr_dtype = numpy.dtype(serialized_arr[i_start:i_0].decode("utf-8"))
arr_shape = tuple(
[int(a) for a in serialized_arr[i_0 + 1 : i_1].decode("utf-8").split(",")]
)
arr_num_bytes = numpy.prod(arr_shape) * arr_dtype.itemsize
arr_str = serialized_arr[i_1 + 1 : arr_num_bytes + (i_1 + 1)]
arr = numpy.frombuffer(arr_str, dtype=arr_dtype).reshape(arr_shape)
arrays.append(arr.copy())
i_start = i_1 + arr_num_bytes + 1
return arrays
def verify_outputs(
outputs: List[numpy.array],
gt_outputs: List[numpy.array],
atol: float = 8.0e-4,
rtol: float = 0.0,
) -> List[float]:
"""
Compares two lists of output tensors, checking that they are sufficiently similar
:param outputs: List of numpy arrays, usually model outputs
:param gt_outputs: List of numpy arrays, usually reference outputs
:param atol: Absolute tolerance for allclose
:param rtol: Relative tolerance for allclose
:return: The list of max differences for each pair of outputs
"""
max_diffs = []
if len(outputs) != len(gt_outputs):
raise Exception(
f"number of outputs doesn't match, {len(outputs)} != {len(gt_outputs)}"
)
for i in range(len(gt_outputs)):
gt_output = gt_outputs[i]
output = outputs[i]
if output.shape != gt_output.shape:
raise Exception(
f"output shapes don't match, {output.shape} != {gt_output.shape}"
)
if type(output) != type(gt_output):
raise Exception(
f"output types don't match, {type(output)} != {type(gt_output)}"
)
max_diff = numpy.max(numpy.abs(output - gt_output))
max_diffs.append(max_diff)
log.info(f"output {i}: {output.shape} {gt_output.shape} MAX DIFF: {max_diff}")
if not numpy.allclose(output, gt_output, rtol=rtol, atol=atol):
raise Exception(
"output data doesn't match\n"
f"output {i}: {output.shape} {gt_output.shape} MAX DIFF: {max_diff}\n"
f" mean = {numpy.mean(output):.5f} {numpy.mean(gt_output):.5f}\n"
f" std = {numpy.std(output):.5f} {numpy.std(gt_output):.5f}\n"
f" max = {numpy.max(output):.5f} {numpy.max(gt_output):.5f}\n"
f" min = {numpy.min(output):.5f} {numpy.min(gt_output):.5f}"
)
return max_diffs
|
.sim-test.py | niw/linux-on-litex-vexriscv | 329 | 12765100 | #!/usr/bin/env python3
#
# This file is part of Linux-on-LiteX-VexRiscv
#
# Copyright (c) 2019-2021, Linux-on-LiteX-VexRiscv Developers
# SPDX-License-Identifier: BSD-2-Clause
import os
import sys
import pexpect
import time
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--sdram-module", type=str)
args = parser.parse_args()
tests = [
{
'id': 'linux-on-litex-vexriscv',
'command': f'./sim.py --with-sdram --sdram-module {args.sdram_module}',
'cwd': os.getcwd(),
'checkpoints': [
{ 'timeout': 240, 'good': [b'\n\\s*BIOS built on'] },
{ 'timeout': 60, 'good': [b'\n\\s*VexRiscv Machine Mode software'] },
{ 'timeout': 240, 'good': [b'Memory: \\d+K/\\d+K available'] },
]
}
]
def run_test(id, command, cwd, checkpoints):
print(f'*** Test ID: {id}')
print(f'*** CWD: {cwd}')
print(f'*** Command: {command}')
os.chdir(cwd)
p = pexpect.spawn(command, timeout=None, logfile=sys.stdout.buffer)
checkpoint_id = 0
for cp in checkpoints:
good = cp.get('good', [])
bad = cp.get('bad', [])
patterns = good + bad
timeout = cp.get('timeout', None)
timediff = time.time()
try:
match_id = p.expect(patterns, timeout=timeout)
except pexpect.EOF:
print(f'\n*** {id}: premature termination')
return False;
except pexpect.TIMEOUT:
timediff = time.time() - timediff
print(f'\n*** {id}: timeout (checkpoint {checkpoint_id}: +{int(timediff)}s)')
return False;
timediff = time.time() - timediff
if match_id >= len(good):
break
sys.stdout.buffer.write(b'<<checkpoint %d: +%ds>>' % (checkpoint_id, int(timediff)))
checkpoint_id += 1
is_success = checkpoint_id == len(checkpoints)
# Let it print rest of line
match_id = p.expect_exact([b'\n', pexpect.TIMEOUT, pexpect.EOF], timeout=1)
p.terminate(force=True)
line_break = '\n' if match_id != 0 else ''
print(f'{line_break}*** {id}: {"success" if is_success else "failure"}')
return is_success
for test in tests:
success = run_test(**test)
if not success:
sys.exit(1)
sys.exit(0)
|
wagtail/core/signals.py | smartfactory-gmbh/wagtail | 8,851 | 12765103 | <reponame>smartfactory-gmbh/wagtail
from django.dispatch import Signal
# Page signals
# provides args: instance, revision
page_published = Signal()
# provides args: instance
page_unpublished = Signal()
# provides args: instance, parent_page_before, parent_page_after, url_path_before, url_path_after
pre_page_move = Signal()
# provides args: instance, parent_page_before, parent_page_after, url_path_before, url_path_after
post_page_move = Signal()
# Workflow signals
# provides args: instance, user
workflow_approved = Signal()
# provides args: instance, user
workflow_rejected = Signal()
# provides args: instance, user
workflow_cancelled = Signal()
# provides args: instance, user
workflow_submitted = Signal()
# Workflow task signals
# provides args: instance, user
task_approved = Signal()
# provides args: instance, user
task_rejected = Signal()
# provides args: instance, user
task_submitted = Signal()
# provides args: instance, user
task_cancelled = Signal()
# Locale signals
# Like pre_delete, but sent on deletion before on_delete validation is applied.
# Currently only sent by the Locale model.
# Required as a workaround for https://code.djangoproject.com/ticket/6870
# provides args: sender, instance
pre_validate_delete = Signal()
|
src/detection_efffdet/utils.py | yellowdolphin/SIIM-COVID19-Detection | 153 | 12765127 | <filename>src/detection_efffdet/utils.py
import random
import os
import numpy as np
import torch
import pandas as pd
from mean_average_precision import MetricBuilder
import pickle
classes = [
'Negative for Pneumonia',
'Typical Appearance',
'Indeterminate Appearance',
'Atypical Appearance'
]
def seed_everything(seed=123):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def refine_det(boxes, labels, scores):
boxes = boxes.clip(0,1)
boxes_out = []
labels_out = []
scores_out = []
for box, label, score in zip(boxes, labels, scores):
x1, y1, x2, y2 = box
if x1==x2 or y1==y2:
continue
box = [min(x1,x2), min(y1,y2), max(x1,x2), max(y1,y2)]
boxes_out.append(box)
labels_out.append(label)
scores_out.append(score)
return boxes_out, labels_out, scores_out
def get_study_map(df, pred_dict, num_classes=6, stride=0.1):
assert num_classes in [4,6]
metric_fn = MetricBuilder.build_evaluation_metric("map_2d", async_mode=True, num_classes=num_classes)
### Study level ###
for studyid, grp in df.groupby('studyid'):
gts = []
for clsidx, clsname in enumerate(classes):
assert len(np.unique(grp[clsname].values)) == 1
if grp[clsname].values[0] == 1:
gts.append([0, 0, 1, 1, clsidx, 0, 0])
gts = np.array(gts)
study_preds = []
for _, row in grp.iterrows():
study_preds.append(pred_dict[row['imageid']])
study_preds = np.array(study_preds)
study_preds = np.mean(study_preds, axis=0)
preds = []
for clsidx in range(len(classes)):
preds.append([0, 0, 1, 1, clsidx, study_preds[clsidx]])
preds = np.array(preds)
metric_fn.add(preds, gts)
### Image level ###
if num_classes == 6:
for _, row in df.iterrows():
gts = []
arr = row['label'].split(' ')
nums = len(arr) // 6
for i in range(nums):
class_name = arr[6*i]
conf = int(arr[6*i+1])
if class_name == 'opacity':
clsid = 5
else:
clsid = 4
x1 = int(float(arr[6*i+2]))
y1 = int(float(arr[6*i+3]))
x2 = int(float(arr[6*i+4]))
y2= int(float(arr[6*i+5]))
gts.append([x1, y1, x2, y2, clsid, 0, 0])
gts = np.array(gts)
preds = np.array([[0, 0, 1, 1, 4, 1]])
metric_fn.add(preds, gts)
result = metric_fn.value(iou_thresholds=0.5, recall_thresholds=np.arange(0., 1.0+stride, stride), mpolicy='soft')
average_precision = {}
for clsid in range(num_classes):
average_precision[clsid] = []
for k, v in result.items():
if k=='mAP':
continue
for clsid in range(num_classes):
average_precision[clsid].append(v[clsid]['ap'])
output = {
'mAP': result['mAP'],
}
for clsid in range(num_classes):
average_precision[clsid] = np.mean(average_precision[clsid])
if clsid < len(classes):
output[classes[clsid]] = average_precision[clsid]
elif clsid == 4:
output['none'] = average_precision[clsid]
else:
output['opacity'] = average_precision[clsid]
return output
def save_dict(obj, name):
with open(name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_dict(name):
with open(name, 'rb') as f:
return pickle.load(f) |
mlcomp/contrib/catalyst/optim/cosineanneal.py | sUeharaE4/mlcomp | 166 | 12765135 | from torch.optim.lr_scheduler import CosineAnnealingLR
class OneCycleCosineAnnealLR(CosineAnnealingLR):
def __init__(self, *args, **kwargs):
self.start_epoch = None
self.last_epoch = None
super().__init__(*args, **kwargs)
def step(self, epoch=None):
if self.last_epoch is not None:
if self.start_epoch is None:
self.start_epoch = self.last_epoch
self.last_epoch = 0
for i in range(len(self.base_lrs)):
self.optimizer.param_groups[i]['lr'] = self.base_lrs[0]
if self.last_epoch >= self.T_max - 1:
self.start_epoch = self.last_epoch
self.last_epoch = -1
for i in range(len(self.base_lrs)):
self.optimizer.param_groups[i]['lr'] = self.base_lrs[0]
super().step(epoch)
__all__ = ['OneCycleCosineAnnealLR']
|
tests/runtime-trace-tests/cases/assign_stmt.py | jaydeetay/pxt | 977 | 12765144 | # regular assignment
foo = 7
print(foo)
# annotated assignmnet
bar: number = 9
print(bar)
|
tests/test_tools.py | frenners/python-amazon-paapi | 121 | 12765148 | <filename>tests/test_tools.py
from amazon_paapi.exceptions import AsinNotFoundException
from amazon_paapi.tools import get_asin
import pytest
def test_get_asin():
assert get_asin('B01N5IB20Q') == 'B01N5IB20Q'
assert get_asin('https://www.amazon.es/gp/product/B07PHPXHQS') == 'B07PHPXHQS'
assert get_asin('https://www.amazon.es/gp/product/B07PHPXHQS?pf_rd_r=3FXDZDV1W6KY83KEE2Z4&pf_rd_p=c6fa5af0-ec7c-40de-8332-fd1421de4244&pd_rd_r=58786171-de0f-4fe1-a2df-ee335d6715ee&pd_rd_w=KND7A&pd_rd_wg=kIr5z&ref_=pd_gw_unk') == 'B07PHPXHQS'
assert get_asin('https://www.amazon.es/dp/B07PKW4CKF') == 'B07PKW4CKF'
assert get_asin('https://www.amazon.es/dp/B07PKW4CKF?_encoding=UTF8&ref_=pocs_dp_m_sp_multi_c_more_nooffers_B08D1G2XVX') == 'B07PKW4CKF'
with pytest.raises(AsinNotFoundException):
get_asin('https://www.amazon.es/gp/')
with pytest.raises(AsinNotFoundException):
get_asin('this is not even a URL')
|
kotti/views/util.py | IonicaBizauKitchen/Kotti | 191 | 12765151 | import hashlib
from collections import defaultdict
from datetime import datetime
from urllib.parse import urlencode
from babel.dates import format_date
from babel.dates import format_datetime
from babel.dates import format_time
from babel.numbers import format_currency
from pyramid.decorator import reify
from pyramid.i18n import get_locale_name
from pyramid.interfaces import ILocation
from pyramid.location import inside
from pyramid.location import lineage
from pyramid.renderers import get_renderer
from pyramid.renderers import render
from pyramid.settings import asbool
from sqlalchemy import and_
from sqlalchemy import not_
from sqlalchemy import or_
from kotti import DBSession
from kotti import get_settings
from kotti.events import objectevent_listeners
from kotti.interfaces import INavigationRoot
from kotti.resources import Content
from kotti.resources import Document
from kotti.resources import Node
from kotti.resources import Tag
from kotti.resources import TagsToContents
from kotti.resources import get_root
from kotti.sanitizers import sanitize
from kotti.security import view_permitted
from kotti.util import TemplateStructure
from kotti.util import render_view
from kotti.views.site_setup import CONTROL_PANEL_LINKS
from kotti.views.slots import slot_events
class SettingHasValuePredicate:
def __init__(self, val, config):
self.name, self.value = val
if not isinstance(self.value, bool):
raise ValueError("Only boolean values supported")
def text(self):
return f"if_setting_has_value = {self.name} == {self.value}"
phash = text
def __call__(self, context, request):
return asbool(request.registry.settings[self.name]) == self.value
class RootOnlyPredicate:
def __init__(self, val, config):
self.val = val
def text(self):
return f"root_only = {self.val}"
phash = text
def __call__(self, context, request):
return (context is request.root) == self.val
def template_api(context, request, **kwargs):
return get_settings()["kotti.templates.api"][0](context, request, **kwargs)
def add_renderer_globals(event):
if event.get("renderer_name") != "json":
request = event["request"]
api = getattr(request, "template_api", None)
if api is None and request is not None:
api = template_api(event["context"], event["request"])
event["api"] = api
class Slots:
def __init__(self, context, request):
self.context = context
self.request = request
def __getattr__(self, key):
for event_type in slot_events:
if event_type.name == key:
break
else:
raise AttributeError(key)
value = []
event = event_type(self.context, self.request)
for snippet in objectevent_listeners(event):
if snippet is not None:
if isinstance(snippet, list):
value.extend(snippet)
else:
value.append(snippet)
setattr(self, key, value)
return value
class TemplateAPI:
"""This implements the ``api`` object that's passed to all templates.
Use dict-access as a shortcut to retrieve template macros from templates.
"""
# Instead of overriding these, consider using the
# ``kotti.overrides`` variable.
BARE_MASTER = "kotti:templates/master-bare.pt"
VIEW_MASTER = "kotti:templates/view/master.pt"
EDIT_MASTER = "kotti:templates/edit/master.pt"
SITE_SETUP_MASTER = "kotti:templates/site-setup/master.pt"
body_css_class = ""
def __init__(self, context, request, bare=None, **kwargs):
self.context, self.request = context, request
if getattr(request, "template_api", None) is None:
request.template_api = self
self.S = get_settings()
if request.is_xhr and bare is None:
bare = True # use bare template that renders just the content area
self.bare = bare
self.slots = Slots(context, request)
self.__dict__.update(kwargs)
@staticmethod
def is_location(context):
"""Does `context` implement :class:`pyramid.interfaces.ILocation`?
:param context: The context.
:type context: kotti.interfaces.INode
:rtype: bool
:returns: True if Is the context object implements
:class:`pyramid.interfaces.ILocation`.
"""
return ILocation.providedBy(context)
@reify
def edit_needed(self):
if "kotti.fanstatic.edit_needed" in self.S:
return [r.need() for r in self.S["kotti.fanstatic.edit_needed"]]
@reify
def view_needed(self):
if "kotti.fanstatic.view_needed" in self.S:
return [r.need() for r in self.S["kotti.fanstatic.view_needed"]]
def macro(self, asset_spec, macro_name="main"):
if self.bare and asset_spec in (
self.VIEW_MASTER,
self.EDIT_MASTER,
self.SITE_SETUP_MASTER,
):
asset_spec = self.BARE_MASTER
return get_renderer(asset_spec).implementation().macros[macro_name]
@reify
def site_title(self):
""" The site title.
:result: Value of the ``kotti.site_title`` setting (if specified) or
the root item's ``title`` attribute.
:rtype: str
"""
value = get_settings().get("kotti.site_title")
if not value:
value = self.root.title
return value
@reify
def page_title(self):
"""
Title for the current page as used in the ``<head>`` section of the
default ``master.pt`` template.
:result: '[Human readable view title ]``context.title`` -
:meth:`~TemplateAPI.site_title`''
:rtype: str
"""
view_title = self.request.view_name.replace("_", " ").title()
if view_title:
view_title += " "
view_title += self.context.title
return f"{view_title} - {self.site_title}"
def url(self, context=None, *elements, **kwargs):
"""
URL construction helper. Just a convenience wrapper for
:func:`pyramid.request.resource_url` with the same signature. If
``context`` is ``None`` the current context is passed to
``resource_url``.
"""
if context is None:
context = self.context
if not ILocation.providedBy(context):
return self.request.url
return self.request.resource_url(context, *elements, **kwargs)
@reify
def root(self):
"""
The site root.
:result: The root object of the site.
:rtype: :class:`kotti.resources.Node`
"""
if ILocation.providedBy(self.context):
return self.lineage[-1]
else:
return get_root()
@reify
def navigation_root(self):
"""
The root node for the navigation.
:result: Nearest node in the :meth:`lineage` that provides
:class:`kotti.interfaces.INavigationRoot` or :meth:`root` if
no node provides that interface.
:rtype: :class:`kotti.resources.Node`
"""
for o in self.lineage:
if INavigationRoot.providedBy(o):
return o
return self.root
@reify
def lineage(self):
"""
Lineage from current context to the root node.
:result: List of nodes.
:rtype: list of :class:`kotti.resources.Node`
"""
return list(lineage(self.context))
@reify
def breadcrumbs(self):
"""
List of nodes from the :meth:`navigation_root` to the context.
:result: List of nodes.
:rtype: list of :class:`kotti.resources.Node`
"""
breadcrumbs = self.lineage
if self.root != self.navigation_root:
index = breadcrumbs.index(self.navigation_root)
breadcrumbs = breadcrumbs[: index + 1]
return reversed(breadcrumbs)
def has_permission(self, permission, context=None):
""" Convenience wrapper for :func:`pyramid.security.has_permission`
with the same signature. If ``context`` is ``None`` the current
context is passed to ``has_permission``."""
if context is None:
context = self.context
return self.request.has_permission(permission, context)
def render_view(self, name="", context=None, request=None, secure=True, bare=True):
if context is None:
context = self.context
if request is None:
request = self.request
before = self.bare
try:
self.bare = bare
html = render_view(context, request, name, secure)
finally:
self.bare = before
return TemplateStructure(html)
def render_template(self, renderer, **kwargs):
return TemplateStructure(render(renderer, kwargs, self.request))
def list_children(self, context=None, permission="view"):
if context is None:
context = self.context
if isinstance(context, Node):
if permission is None:
return context.children
return context.children_with_permission(self.request, permission)
return [
c
for c in getattr(context, "values", lambda: [])()
if (not permission or self.request.has_permission(permission, c))
]
inside = staticmethod(inside)
def avatar_url(self, user=None, size="14", default_image="identicon"):
if user is None:
user = self.request.user
email = user.email
if not email:
email = user.name
h = hashlib.md5(email.encode("utf8")).hexdigest()
query = {"default": default_image, "size": str(size)}
url = "https://secure.gravatar.com/avatar/{}?{}".format(h, urlencode(query))
return url
@reify
def locale_name(self):
return get_locale_name(self.request)
def format_date(self, d, fmt=None):
if fmt is None:
fmt = self.S["kotti.date_format"]
return format_date(d, format=fmt, locale=self.locale_name)
def format_datetime(self, dt, fmt=None):
if fmt is None:
fmt = self.S["kotti.datetime_format"]
if not isinstance(dt, datetime):
dt = datetime.fromtimestamp(dt)
return format_datetime(dt, format=fmt, locale=self.locale_name)
def format_time(self, t, fmt=None):
if fmt is None:
fmt = self.S["kotti.time_format"]
return format_time(t, format=fmt, locale=self.locale_name)
def format_currency(self, n, currency, fmt=None):
return format_currency(n, currency, format=fmt, locale=self.locale_name)
@staticmethod
def get_type(name):
for class_ in get_settings()["kotti.available_types"]:
if class_.type_info.name == name:
return class_
def find_edit_view(self, item):
view_name = self.request.view_name
if not view_permitted(item, self.request, view_name):
view_name = "edit"
if not view_permitted(item, self.request, view_name):
view_name = ""
return view_name
@reify
def edit_links(self):
if not hasattr(self.context, "type_info"):
return []
return [
link
for link in self.context.type_info.edit_links
if link.visible(self.context, self.request)
]
@reify
def site_setup_links(self):
return [link for link in CONTROL_PANEL_LINKS
if link.visible(self.root, self.request)]
@staticmethod
def sanitize(html, sanitizer="default"):
""" Convenience wrapper for :func:`kotti.sanitizers.sanitize`.
:param html: HTML to be sanitized
:type html: str
:param sanitizer: name of the sanitizer to use.
:type sanitizer: str
:result: sanitized HTML
:rtype: str
"""
return sanitize(html, sanitizer)
class NodesTree:
def __init__(self, node, request, item_mapping, item_to_children, permission):
self._node = node
self._request = request
self._item_mapping = item_mapping
self._item_to_children = item_to_children
self._permission = permission
@property
def __parent__(self):
if self.parent_id:
return self._item_mapping[self.parent_id]
@property
def children(self):
return [
NodesTree(
child,
self._request,
self._item_mapping,
self._item_to_children,
self._permission,
)
for child in self._item_to_children[self.id]
if self._request.has_permission(self._permission, child)
]
def _flatten(self, item):
# noinspection PyProtectedMember
yield item._node
for ch in item.children:
yield from self._flatten(ch)
def tolist(self):
return list(self._flatten(self))
def __getattr__(self, key):
return getattr(self._node, key)
def nodes_tree(request, context=None, permission="view"):
item_mapping = {}
item_to_children = defaultdict(lambda: [])
for node in DBSession.query(Content).with_polymorphic(Content):
item_mapping[node.id] = node
if request.has_permission(permission, node):
item_to_children[node.parent_id].append(node)
for children in item_to_children.values():
children.sort(key=lambda ch: ch.position)
if context is None:
node = item_to_children[None][0]
else:
node = context
return NodesTree(node, request, item_mapping, item_to_children, permission)
def search_content(search_term, request=None):
return get_settings()["kotti.search_content"][0](search_term, request)
def default_search_content(search_term, request=None):
# noinspection PyUnresolvedReferences
searchstring = f"%{search_term}%"
# generic_filter can be applied to all Node (and subclassed) objects
generic_filter = or_(
Content.name.like(searchstring),
Content.title.like(searchstring),
Content.description.like(searchstring),
)
results = (
DBSession.query(Content)
.filter(generic_filter)
.order_by(Content.title.asc())
.all()
)
# specific result contain objects matching additional criteria
# but must not match the generic criteria (because these objects
# are already in the generic_results)
document_results = DBSession.query(Document).filter(
and_(Document.body.like(searchstring), not_(generic_filter))
)
for results_set in [content_with_tags([searchstring]), document_results.all()]:
[results.append(c) for c in results_set if c not in results]
result_dicts = []
for result in results:
if request.has_permission("view", result):
result_dicts.append(
dict(
name=result.name,
title=result.title,
description=result.description,
path=request.resource_path(result),
)
)
return result_dicts
def content_with_tags(tag_terms):
return (
DBSession.query(Content)
.join(TagsToContents)
.join(Tag)
.filter(or_(*[Tag.title.like(tag_term) for tag_term in tag_terms]))
.all()
)
def search_content_for_tags(tags, request=None):
result_dicts = []
for result in content_with_tags(tags):
if request.has_permission("view", result):
result_dicts.append(
dict(
name=result.name,
title=result.title,
description=result.description,
path=request.resource_path(result),
)
)
return result_dicts
def includeme(config):
""" Pyramid includeme hook.
:param config: app config
:type config: :class:`pyramid.config.Configurator`
"""
config.add_view_predicate("root_only", RootOnlyPredicate)
config.add_view_predicate("if_setting_has_value", SettingHasValuePredicate)
|
examples/txt2unicode/demo_utf8_2_tscii.py | nv-d/open-tamil | 218 | 12765168 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (C) 2014 Arulalan.T <<EMAIL>>
# (C) 2015 <NAME>
# This file is part of 'open-tamil/txt2unicode' package examples
#
import sys
sys.path.append("../..")
from tamil.txt2unicode import tscii2unicode, unicode2tscii
tscii = """¾¢ÕÅûÙÅ÷ «ÕǢ ¾¢ÕìÌÈû """
uni_1 = tscii2unicode(tscii)
tscii_from_uni = unicode2tscii(uni_1)
uni_2 = tscii2unicode(tscii_from_uni)
f = open("encode-result.txt", "w")
f.write("Initial tscii : " + tscii + "\n\n")
f.write("From tscii to unicode : " + uni_1 + "\n\n")
f.write("From unicode to tscii : " + tscii_from_uni + "\n\n")
f.write("Again back to unicode from above tscii : " + uni_2)
f.close()
assert uni_1 == uni_2, " Both unicode are 'not' same! "
assert tscii == tscii_from_uni, " Both tscii are 'not' same! "
print("tscii original input", tscii)
print("from tscii2unicode", uni_1)
print("from unicode2tscii", tscii_from_uni)
print("back to unicode", uni_2)
print("converted unicode stored in 'encode-result.txt' file")
|
deeptools/plotPCA.py | gartician/deepTools | 351 | 12765210 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
from deeptools import cm # noqa: F401
from deeptools.correlation import Correlation
from deeptools.parserCommon import writableFile
from deeptools._version import __version__
def parse_arguments(args=None):
basic_args = plotCorrelationArgs()
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Tool for generating a principal component analysis (PCA)
plot from multiBamSummary or multiBigwigSummary output. By default, the loadings for each sample in each principal component is plotted. If the data is transposed, the projections of each sample on the requested principal components is plotted instead.
Detailed help:
plotPCA -h
""",
epilog='example usages:\n'
'plotPCA -in coverages.npz -o pca.png\n\n'
' \n\n',
parents=[basic_args, ])
return parser
def plotCorrelationArgs():
parser = argparse.ArgumentParser(add_help=False)
required = parser.add_argument_group('Required arguments')
# define the arguments
required.add_argument('--corData', '-in',
metavar='FILE',
help='Coverage file (generated by multiBamSummary or multiBigwigSummary)',
required=True)
optional = parser.add_argument_group('Optional arguments')
optional.add_argument('--plotFile', '-o',
help='File name to save the plot to. '
'The extension determines the file format. '
'For example: '
'pca.pdf will save the PCA plot in PDF format. '
'The available options are: .png, '
'.eps, .pdf and .svg. If this option is omitted, then you MUST specify --outFileNameData',
type=writableFile,
metavar='FILE')
optional.add_argument('--labels', '-l',
metavar='sample1 sample2',
help='User defined labels instead of default labels from '
'file names. '
'Multiple labels have to be separated by spaces, e.g. '
'--labels sample1 sample2 sample3',
nargs='+')
optional.add_argument('--plotTitle', '-T',
help='Title of the plot, to be printed on top of '
'the generated image. Leave blank for no title. (Default: %(default)s)',
default='')
optional.add_argument('--plotFileFormat',
metavar='FILETYPE',
help='Image format type. If given, this option '
'overrides the image format based on the plotFile '
'ending. The available options are: png, '
'eps, pdf, plotly and svg.',
choices=['png', 'pdf', 'svg', 'eps', 'plotly'])
optional.add_argument('--plotHeight',
help='Plot height in cm. (Default: %(default)s)',
type=float,
default=10)
optional.add_argument('--plotWidth',
help='Plot width in cm. The minimum value is 1 cm. (Default: %(default)s)',
type=float,
default=10)
optional.add_argument('--outFileNameData',
metavar='file.tab',
type=writableFile,
help='File name to which the data underlying the plot '
'should be saved, such as myPCA.tab. For untransposed '
'data, this is the loading per-sample and PC as well '
'as the eigenvalues. For transposed data, this is the '
'rotation per-sample and PC and the eigenvalues. The '
'projections are truncated to the number of '
'eigenvalues for transposed data.')
optional.add_argument('--ntop',
help='Use only the top N most variable rows in the '
'original matrix. Specifying 0 will result in all '
'rows being used. If the matrix is to be transposed, '
'rows with 0 variance are always excluded, even if a '
'values of 0 is specified. The default is 1000. (Default: %(default)s)',
type=int,
default=1000)
optional.add_argument('--PCs',
help='The principal components to plot. If specified, '
'you must provide two different integers, greater '
'than zero, separated by a space. An example (and the default) is "1 2". (Default: %(default)s)',
type=int,
nargs=2,
default=[1, 2])
optional.add_argument('--log2',
help='log2 transform the datapoints prior to computing '
'the PCA. Note that 0.01 is added to all values to '
'prevent 0 values from becoming -infinity. Using this '
'option with input that contains negative values will '
'result in an error.',
action='store_true')
optional.add_argument('--colors',
metavar="COLORS",
nargs='+',
help="A list of colors for the symbols. Color names and html hex string (e.g., #eeff22) are accepted. The color names should be space separated. For example, --colors red blue green. If not specified, the symbols will be given automatic colors.")
optional.add_argument('--markers',
metavar="MARKERS",
nargs='+',
help="A list of markers for the symbols. (e.g., '<','>','o') are accepted. The marker values should be space separated. For example, --markers 's' 'o' 's' 'o'. If not specified, the symbols will be given automatic shapes.")
optional.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
optionalEx = optional.add_mutually_exclusive_group()
optionalEx.add_argument('--transpose',
help='Perform the PCA on the transposed matrix, (i.e., on the '
'matrix where rows are samples and columns are '
'bins/features. This then matches what is typically '
'done in R.',
action='store_true')
optionalEx.add_argument('--rowCenter',
help='When specified, each row (bin, gene, etc.) '
'in the matrix is centered at 0 before the PCA is '
'computed. This is useful only if you have a strong '
'bin/gene/etc. correlation and the resulting '
'principal component has samples stacked vertically. This option is not applicable if --transpose is specified.',
action='store_true')
return parser
def main(args=None):
args = parse_arguments().parse_args(args)
if args.plotFile is None and args.outFileNameData is None:
sys.exit("At least one of --plotFile and --outFileNameData must be specified!\n")
if args.ntop < 0:
sys.exit("The value specified for --ntop must be >= 0!\n")
if args.PCs[0] == args.PCs[1]:
sys.exit("You must specify different principal components!\n")
if args.PCs[0] <= 0 or args.PCs[1] <= 0:
sys.exit("The specified principal components must be at least 1!\n")
corr = Correlation(args.corData,
labels=args.labels,)
corr.rowCenter = args.rowCenter
corr.transpose = args.transpose
corr.ntop = args.ntop
corr.log2 = args.log2
Wt, eigenvalues = corr.plot_pca(args.plotFile,
PCs=args.PCs,
plot_title=args.plotTitle,
image_format=args.plotFileFormat,
plotWidth=args.plotWidth,
plotHeight=args.plotHeight,
cols=args.colors,
marks=args.markers)
if args.outFileNameData is not None:
of = open(args.outFileNameData, "w")
of.write("#plotPCA --outFileNameData\n")
of.write("Component\t{}\tEigenvalue\n".format("\t".join(corr.labels)))
n = eigenvalues.shape[0]
for i in range(n):
of.write("{}\t{}\t{}\n".format(i + 1, "\t".join(["{}".format(x) for x in Wt[i, :]]), eigenvalues[i]))
of.close()
if __name__ == "__main__":
main()
|
conans/test/conan_v2/conanfile/test_environment.py | matthiasng/conan | 6,205 | 12765219 | <gh_stars>1000+
import textwrap
from conans.client.tools.env import _environment_add
from conans.test.utils.conan_v2_tests import ConanV2ModeTestCase
class CollectLibsTestCase(ConanV2ModeTestCase):
def test_conan_username(self):
t = self.get_client()
conanfile = textwrap.dedent("""
from conans import ConanFile
class Recipe(ConanFile):
name = "name"
version = "version"
""")
t.save({'conanfile.py': conanfile})
with _environment_add({'CONAN_USERNAME': "user"}):
t.run('create .', assert_error=True)
self.assertIn("Conan v2 incompatible: Environment variable 'CONAN_USERNAME' is deprecated", t.out)
def test_conan_channel(self):
t = self.get_client()
conanfile = textwrap.dedent("""
from conans import ConanFile
class Recipe(ConanFile):
name = "name"
version = "version"
default_user = "user"
""")
t.save({'conanfile.py': conanfile})
with _environment_add({'CONAN_CHANNEL': "user"}):
t.run('create .', assert_error=True)
self.assertIn("Conan v2 incompatible: Environment variable 'CONAN_CHANNEL' is deprecated", t.out)
|
openbook_invitations/migrations/0002_auto_20190101_1413.py | TamaraAbells/okuna-api | 164 | 12765235 | <gh_stars>100-1000
# Generated by Django 2.1.4 on 2019-01-01 13:13
from django.conf import settings
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('openbook_invitations', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserInvite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('invited_date', models.DateField(verbose_name='invited date')),
('name', models.CharField(blank=True, max_length=256, null=True)),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('username', models.CharField(blank=True, error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and _ only.', max_length=30, null=True, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('badge_keyword', models.CharField(blank=True, max_length=16, null=True)),
('token', models.CharField(max_length=256)),
('invited_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invited_users', to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='inviteuser',
name='invited_by',
),
migrations.DeleteModel(
name='InviteUser',
),
]
|
evcouplings/utils/tracker/sql.py | mrunalimanj/EVcouplings | 117 | 12765242 | <reponame>mrunalimanj/EVcouplings
"""
SQL-based result tracker (cannot store actual results, only status).
Using this tracker requires installation of the sqlalchemy package.
Regarding using models from different sources in Flask-SQLAlchemy:
https://stackoverflow.com/questions/28789063/associate-external-class-model-with-flask-sqlalchemy
TODO: Note that this tracker doesn't handle job reruns gracefully yet, because the result field will be
progressively overwritten but not reset when the job is rerun.
Authors:
<NAME>
"""
from contextlib import contextmanager
import json
import os
from copy import deepcopy
from sqlalchemy import (
Column, Integer, String, DateTime, Text,
create_engine, func
)
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.exc import DBAPIError
from sqlalchemy.dialects import mysql
from evcouplings.utils.helpers import retry
from evcouplings.utils.config import InvalidParameterError
from evcouplings.utils.tracker import EStatus
from evcouplings.utils.tracker.base import ResultTracker
# create SQLALchemy declarative base for SQL models
Base = declarative_base()
JOB_TABLE_NAME = "evcouplings_jobs"
# work around 65k limitation for mysql (without introducing max length, which would
# cause issues with postgresql)
# see here: https://github.com/sqlalchemy/sqlalchemy/issues/4443
LongText = Text().with_variant(mysql.LONGTEXT(), "mysql")
class SQLTracker(ResultTracker):
"""
Tracks compute job results in an SQL backend
"""
def __init__(self, **kwargs):
"""
Create new SQL-based tracker. For now, this tracker will ignore file_list
and store all file paths in the database except for those in delete_list.
Parameters
----------
connection_string : str
SQLite connection URI. Must include database name,
and username/password if authentication is used.
job_id : str
Unique job identifier of job which should be tracked
prefix : str
Prefix of pipeline job
pipeline : str
Name of pipeline that is running
file_list : list(str)
List of file item keys from outconfig that should
be stored in database. For now, this parameter has no
effect and all file paths will be stored in database.
delete_list : list(str)
List of file item keys from outconfig that will be deleted
after run is finished. These files cannot be stored as paths
to the pipeline result in the output.
config : dict(str)
Entire configuration dictionary of job
retry_max_number : int, optional (default: None)
Maximum number of attemps to perform database queries / updates.
If None, will try forever.
retry_wait : int, optional (default: None)
Time in seconds between retries to connect to database
"""
super().__init__(**kwargs)
# for SQL tracker, job ID may not be longer than 255 chars to not interfere with older SQL DBs
if len(self.job_id) > 255:
raise InvalidParameterError(
"Length of job_id for SQL tracker may not exceed 255 characters for database compatibility reasons"
)
# create SQLAlchemy engine and session maker to
# instantiate later sessions
self._engine = create_engine(self.connection_string)
self._Session = sessionmaker(bind=self._engine)
# Make sure all tables are there in database
Base.metadata.create_all(bind=self._engine)
@contextmanager
def session_scope(self):
"""
Provide a transactional scope around a series of operations.
Source: https://docs.sqlalchemy.org/en/latest/orm/session_basics.html
"""
session = self._Session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def get(self):
"""
Return the current entry tracked by this tracker.
Does not attempt to retry if database connection fails.
"""
with self.session_scope() as session:
query_res = session.query(
ComputeJob
).filter_by(
job_id=self.job_id
).all()
q = [
deepcopy(x.__dict__) for x in query_res
]
if len(q) == 0:
return None
if len(q) > 1:
raise ValueError(
"Job ID not unique, found more than one job."
)
else:
return q[0]
def _retry_query(self, func, session, rollback=True):
"""
Retry database query until success or maximum number of attempts
is reached
Parameters
----------
func : callable
Query function that will be executed until successful
session : sqlalchemy.orm.session.Session
SQLALchemy database session
rollback : bool, optional (default: True)
Perform rollback of session before reattempt,
can be set to False for read-only queries
Returns
-------
Result of func()
Raises
------
ResourceError
If execution is not successful within maximum
number of attempts
"""
if rollback:
retry_action = session.rollback
else:
retry_action = None
return retry(
func,
self.retry_max_number,
self.retry_wait,
exceptions=DBAPIError,
retry_action=retry_action
)
def _execute_update(self, session, q, status=None, message=None, stage=None, results=None):
"""
Wraps update to SQL database (to allow for retries)
Parameters
----------
session : sqlalchemy.orm.session.Session
SQLALchemy database session
q : sqlalchemy.orm.query.Query
SQLAlchemy query if a job with self.job_id
already exists
For remaining parameters, see update()
"""
# check if we already have some job
num_rows = len(q.all())
# create new entry if not already existing
if num_rows == 0:
# Note: do not initialize location here, since this should
# be either set by outside code upon job creation,
# or based on current working dir of running job
r = ComputeJob(
job_id=self.job_id,
prefix=self.prefix,
status=EStatus.INIT,
config=json.dumps(self.config),
pipeline=self.pipeline,
time_created=func.now()
)
session.add(r)
else:
# can only be one row due to unique constraint
r = q.one()
# if status is given, update
if status is not None:
r.status = status
# if we switch into running state, record
# current time as starting time of actual computation
if status == EStatus.RUN:
r.time_started = func.now()
# pragmatic hack to filling in the location if not
# already set - can only do this based on current directory
# inside pipeline runner (i.e. when job is started), since
# any other code that creates the job entry may operate in a
# different working directory (e.g. batch submitter in evcouplings app)
if r.location is None:
r.location = os.getcwd()
# if stage is given, update
if stage is not None:
r.stage = stage
# set termination/fail message
if message is not None:
r.message = str(message)
# update timestamp of last modification
# (will correspond to finished time at the end)
r.time_updated = func.now()
# finally, also update results (stored as json)
if results is not None:
# first, extract current state in database to dict
if r.results is not None:
current_result_state = json.loads(r.results)
else:
current_result_state = {}
# store everything in database except files that are
# flagged for deletion on filesystem, since we only
# store the file paths to these files
result_update = {
k: v for (k, v) in results.items() if k not in self.delete_list
}
# create result update, make sure update overwrites
# any pre-existing keys
new_result_state = {
**current_result_state,
**result_update
}
# finally, add updated result state to database record
r.results = json.dumps(new_result_state)
session.commit()
def update(self, status=None, message=None, stage=None, results=None):
with self.session_scope() as session:
# see if we can find the job in the database already
q = self._retry_query(
lambda: session.query(ComputeJob).filter_by(job_id=self.job_id),
session=session,
rollback=False
)
# then execute actual update
self._retry_query(
lambda: self._execute_update(session, q, status, message, stage, results),
session=session,
rollback=True
)
class ComputeJob(Base):
"""
Single compute job. Holds general information about job
and its status, but not about individual parameters
(these are stored in config file to keep table schema
stable).
"""
__tablename__ = JOB_TABLE_NAME
# internal unique ID of this single compute job
key = Column(Integer, primary_key=True)
# human-readable job identifier (must be unique)
job_id = Column(String(255), unique=True)
# job prefix
prefix = Column(String(2048))
# job pipeline (monomer, complex, ...)
pipeline = Column(String(128))
# location - e.g., working dir, remote URI, asf
location = Column(String(2048))
# job status ("pending", "running", "finished",
# "failed", "terminated")
status = Column(String(128))
# message upon job failure / termination
# (e.g. exception, termination code, ...)
message = Column(LongText)
# job identifier e.g. on compute cluster
# e.g. if job should be stopped
runner_id = Column(String(2048))
# stage of computational pipeline
# ("align", "couplings", ...)
stage = Column(String(128))
# time the job was created
time_created = Column(DateTime())
# time the job started running
time_started = Column(DateTime())
# time the job finished running; last
# update corresponds to time job finished
time_updated = Column(DateTime())
# configuration of job (stringified JSON)
config = Column(LongText)
# Optional MD5 hash of configuration to identify
# unique job configurations
fingerprint = Column(String(32))
# results of job (stringified JSON)
results = Column(LongText)
|
NLP/Text2SQL-BASELINE/text2sql/dataproc/sql_preproc_v2.py | zhangyimi/Research | 1,319 | 12765311 | <gh_stars>1000+
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQL pre-processor for model decoder
Filname: sql_preproc.py
Authors: ZhangAo(@<EMAIL>)
Date: 2021-01-25 18:00:55
"""
import sys
import os
import traceback
import logging
import json
import collections
import collections.abc
import copy
import itertools
import shutil
from pathlib import Path
import attr
import numpy as np
import paddle
import paddle.nn.functional as F
from text2sql.dataproc import vocab
from text2sql.utils import serialization
def get_field_presence_info(ast_wrapper, node, field_infos):
"""get_field_presence_info"""
present = []
for field_info in field_infos:
field_value = node.get(field_info.name)
is_present = field_value is not None and field_value != []
maybe_missing = field_info.opt or field_info.seq
is_builtin_type = field_info.type in ast_wrapper.primitive_types
if maybe_missing and is_builtin_type:
# TODO: make it possible to deal with "singleton?"
present.append(is_present and type(field_value).__name__)
elif maybe_missing and not is_builtin_type:
present.append(is_present)
elif not maybe_missing and is_builtin_type:
present.append(type(field_value).__name__)
elif not maybe_missing and not is_builtin_type:
assert is_present
present.append(True)
return tuple(present)
@attr.s
class DecoderSQLItem:
"""DecoderSQLItem"""
tree = attr.ib()
orig_code = attr.ib()
sql_query = attr.ib(default="")
class SQLPreproc(object):
"""SQLPreproc"""
def __init__(self, base_path,
grammar_class,
predict_value=True,
min_freq=3,
max_count=5000,
use_seq_elem_rules=False,
is_cached=False):
"""init
Args:
base_path (TYPE): if is_cached is False, base_path is the asdl grammar file.
if is_cached is True, base_path is path to cached directory.
grammar_class (TYPE): grammar class, like grammars.dusql.DuSQLLanguage
predict_value (TYPE): Default is True
min_freq (TYPE): Default is 3
max_count (TYPE): Default is 5000
use_seq_elem_rules (TYPE): Default is False
is_cached (TYPE): Default is False
Raises: NULL
"""
self.base_path = base_path
self.predict_value = predict_value
self.vocab = None
self.all_rules = None
self.rules_mask = None
# key: train/dev/val/test/...
# value: examples
self.items = collections.defaultdict(list)
self.sum_type_constructors = collections.defaultdict(set)
self.field_presence_infos = collections.defaultdict(set)
self.seq_lengths = collections.defaultdict(set)
self.primitive_types = set()
if not is_cached:
self.grammar = grammar_class(self.base_path)
self.ast_wrapper = self.grammar.ast_wrapper
self.vocab_builder = vocab.VocabBuilder(min_freq, max_count)
else:
self.grammar = None
self.ast_wrapper = None
self.load(grammar_class)
self.use_seq_elem_rules = use_seq_elem_rules
if self.predict_value:
self.format_sql_value = self.transfer_sql_value
else:
self.format_sql_value = self.fix_sql_value
def _get_val_index(self, val, value_dict):
def _float(val):
try:
return True, str(int(float(val)))
except Exception as e:
return False, ''
val = str(val)
if val in value_dict:
return value_dict[val]
is_float, new_val = _float(val)
if is_float and new_val in value_dict:
return value_dict[new_val]
new_val = val.replace('.', '')
candi = []
for v, idx in value_dict.items():
v = v.replace('.', '')
if v.startswith(new_val) or new_val.startswith(v):
candi.append((v, idx))
if len(candi) == 1:
return candi[0][1]
elif len(candi) > 1:
candi.sort(key=lambda x: len(x[0]), reverse=True)
return candi[0][1]
return -1
def transfer_sql_value(self, sql_json, value_dict):
"""transfer value str to int index
Args:
sql_json (TYPE): [in/out]
value_dict (TYPE): NULL
Returns: TODO
Raises: NULL
"""
if 'cond_conn_op' in sql_json: # NL2SQL 的json 格式
self.transfer_simple_sql_value(sql_json, value_dict)
return
def _trans_cond(cond):
"""transfer condition value"""
val1 = cond[3]
val2 = cond[4]
if type(val1) is dict:
self.transfer_sql_value(val1, value_dict)
if val2 is not None:
val2 = self._get_val_index(val2, value_dict)
cond[4] = val2 if val2 >= 0 else 0
return
val1 = self._get_val_index(val1, value_dict)
if val2 is not None:
val2 = self._get_val_index(val2, value_dict)
if val1 == -1:
val1 = 0
logging.debug('lost value: %s. candidates: %s', cond[3], ', '.join(value_dict.keys()))
logging.debug('sql is: %s', json.dumps(sql_json, ensure_ascii=False))
if val2 == -1:
val2 = 0
cond[3] = val1
cond[4] = val2
for table_unit in sql_json['from']['table_units']:
if type(table_unit[1]) is dict:
self.transfer_sql_value(table_unit[1], value_dict)
for cond in sql_json['where'][::2]:
_trans_cond(cond)
for cond in sql_json['having'][::2]:
_trans_cond(cond)
if sql_json['limit'] is not None:
limit = str(sql_json['limit'])
else:
limit = '0'
if limit in value_dict:
sql_json['limit'] = value_dict[limit]
else:
logging.debug('value of limit is lost: %s. candidates: %s', limit, ', '.join(value_dict.keys()))
sql_json['limit'] = value_dict['0']
if sql_json['intersect'] is not None:
self.transfer_sql_value(sql_json['intersect'], value_dict)
if sql_json['union'] is not None:
self.transfer_sql_value(sql_json['union'], value_dict)
if sql_json['except'] is not None:
self.transfer_sql_value(sql_json['except'], value_dict)
def transfer_simple_sql_value(self, sql_json, value_dict):
"""
Args:
sql_json (TYPE): NULL
value_dict (TYPE): NULL
Returns: TODO
Raises: NULL
"""
for cond in sql_json['conds']:
value = cond[2]
new_val = self._get_val_index(value, value_dict)
if new_val == -1:
new_val = 0
cond[2] = new_val
def fix_sql_value(self, sql_json, value_dict):
"""fix sql value to 'value' token
Args:
sql_json (TYPE): NULL
value_dict (TYPE):
Returns: TODO
Raises: NULL
"""
def _fix_cond_value(cond):
"""transfer condition value"""
val1 = cond[3]
val2 = cond[4]
if type(val1) is dict:
self.fix_sql_value(val1, value_dict)
if val2 is not None:
val2 = self._get_val_index('value', value_dict)
cond[4] = val2 if val2 >= 0 else 0
return
val1 = self._get_val_index('value', value_dict)
if val2 is not None:
val2 = self._get_val_index('value', value_dict)
if val1 == -1:
val1 = 0
logging.info('lost value: %s. candidates: %s', cond[3], ', '.join(value_dict.keys()))
logging.debug('sql is: %s', json.dumps(sql_json, ensure_ascii=False))
if val2 == -1:
val2 = 0
cond[3] = val1
cond[4] = val2
for table_unit in sql_json['from']['table_units']:
if type(table_unit[1]) is dict:
self.fix_sql_value(table_unit[1], value_dict)
for cond in sql_json['where'][::2]:
_fix_cond_value(cond)
for cond in sql_json['having'][::2]:
_fix_cond_value(cond)
if sql_json['limit'] is not None:
limit = 'value'
else:
limit = 'empty'
assert limit in value_dict
sql_json['limit'] = value_dict[limit]
if sql_json['intersect'] is not None:
self.fix_sql_value(sql_json['intersect'], value_dict)
if sql_json['union'] is not None:
self.fix_sql_value(sql_json['union'], value_dict)
if sql_json['except'] is not None:
self.fix_sql_value(sql_json['except'], value_dict)
def add_item(self, section, sql_json, value_list):
"""add an item"""
value_dict = {val: idx for idx, val in enumerate(value_list)}
self.format_sql_value(sql_json, value_dict)
parsed = self.grammar.parse(sql_json, section)
self.ast_wrapper.verify_ast(parsed) # will raise AssertionError, if varify failed
root = parsed
if section == 'train':
for token in self._all_tokens(root):
self.vocab_builder.add_word(token)
self._record_productions(root)
item = DecoderSQLItem(tree=root, orig_code=sql_json)
self.items[section].append(item)
return item
def clear_items(self):
"""clear items"""
self.items = collections.defaultdict(list)
def _construct_cache_path(self, root_path):
"""
Args:
root_path (TYPE): NULL
Returns: TODO
Raises: NULL
"""
root_path = Path(root_path)
self.vocab_path = root_path / 'dec_vocab.json'
self.observed_productions_path = root_path / 'observed_productions.json'
self.grammar_rules_path = root_path / 'grammar_rules.json'
self.grammar_file = root_path / 'grammar.asdl'
def save(self, save_path):
"""save parsed items to disk"""
os.makedirs(save_path, exist_ok=True)
self._construct_cache_path(save_path)
self.vocab = self.vocab_builder.finish()
self.vocab.save(self.vocab_path)
""" sql preproc 不负责存储data部分
for section, items in self.items.items():
with open(os.path.join(self.data_dir, section + '.jsonl'), 'w') as f:
for item in items:
f.write(json.dumps(attr.asdict(item)) + '\n')
"""
# observed_productions
self.sum_type_constructors = serialization.to_dict_with_sorted_values(self.sum_type_constructors)
self.field_presence_infos = serialization.to_dict_with_sorted_values(self.field_presence_infos, key=str)
self.seq_lengths = serialization.to_dict_with_sorted_values(self.seq_lengths)
self.primitive_types = sorted(self.primitive_types)
with open(self.observed_productions_path, 'w') as f:
json.dump({
'sum_type_constructors': self.sum_type_constructors,
'field_presence_infos': self.field_presence_infos,
'seq_lengths': self.seq_lengths,
'primitive_types': self.primitive_types,
}, f, indent=2, sort_keys=True)
# grammar
self.all_rules, self.rules_mask = self._calculate_rules()
with open(self.grammar_rules_path, 'w') as f:
json.dump({
'all_rules': self.all_rules,
'rules_mask': self.rules_mask,
}, f, indent=2, sort_keys=True)
shutil.copy2(self.base_path, self.grammar_file)
def load(self, grammar_class):
"""load parsed items from disk"""
self._construct_cache_path(self.base_path)
self.grammar = grammar_class(self.grammar_file)
self.ast_wrapper = self.grammar.ast_wrapper
self.vocab = vocab.Vocab.load(self.vocab_path)
observed_productions = json.load(open(self.observed_productions_path))
self.sum_type_constructors = observed_productions['sum_type_constructors']
self.field_presence_infos = observed_productions['field_presence_infos']
self.seq_lengths = observed_productions['seq_lengths']
self.primitive_types = observed_productions['primitive_types']
grammar = json.load(open(self.grammar_rules_path))
self.all_rules = serialization.tuplify(grammar['all_rules'])
self.rules_mask = grammar['rules_mask']
def _record_productions(self, tree):
"""_record_productions"""
queue = [(tree, False)]
while queue:
node, is_seq_elem = queue.pop()
node_type = node['_type']
# Rules of the form:
# expr -> Attribute | Await | BinOp | BoolOp | ...
# expr_seq_elem -> Attribute | Await | ... | Template1 | Template2 | ...
for type_name in [node_type] + node.get('_extra_types', []):
if type_name in self.ast_wrapper.constructors:
sum_type_name = self.ast_wrapper.constructor_to_sum_type[type_name]
if is_seq_elem and self.use_seq_elem_rules:
self.sum_type_constructors[sum_type_name + '_seq_elem'].add(type_name)
else:
self.sum_type_constructors[sum_type_name].add(type_name)
# Rules of the form:
# FunctionDef
# -> identifier name, arguments args
# | identifier name, arguments args, stmt* body
# | identifier name, arguments args, expr* decorator_list
# | identifier name, arguments args, expr? returns
# ...
# | identifier name, arguments args, stmt* body, expr* decorator_list, expr returns
assert node_type in self.ast_wrapper.singular_types
field_presence_info = get_field_presence_info(
self.ast_wrapper,
node,
self.ast_wrapper.singular_types[node_type].fields)
self.field_presence_infos[node_type].add(field_presence_info)
for field_info in self.ast_wrapper.singular_types[node_type].fields:
field_value = node.get(field_info.name, [] if field_info.seq else None)
to_enqueue = []
if field_info.seq:
# Rules of the form:
# stmt* -> stmt
# | stmt stmt
# | stmt stmt stmt
self.seq_lengths[field_info.type + '*'].add(len(field_value))
to_enqueue = field_value
else:
to_enqueue = [field_value]
for child in to_enqueue:
if isinstance(child, collections.abc.Mapping) and '_type' in child:
queue.append((child, field_info.seq))
else:
self.primitive_types.add(type(child).__name__)
def _calculate_rules(self):
"""_calculate_rules"""
offset = 0
all_rules = []
rules_mask = {}
# Rules of the form:
# expr -> Attribute | Await | BinOp | BoolOp | ...
# expr_seq_elem -> Attribute | Await | ... | Template1 | Template2 | ...
for parent, children in sorted(self.sum_type_constructors.items()):
assert not isinstance(children, set)
rules_mask[parent] = (offset, offset + len(children))
offset += len(children)
all_rules += [(parent, child) for child in children]
# Rules of the form:
# FunctionDef
# -> identifier name, arguments args
# | identifier name, arguments args, stmt* body
# | identifier name, arguments args, expr* decorator_list
# | identifier name, arguments args, expr? returns
# ...
# | identifier name, arguments args, stmt* body, expr* decorator_list, expr returns
for name, field_presence_infos in sorted(self.field_presence_infos.items()):
assert not isinstance(field_presence_infos, set)
rules_mask[name] = (offset, offset + len(field_presence_infos))
offset += len(field_presence_infos)
all_rules += [(name, presence) for presence in field_presence_infos]
# Rules of the form:
# stmt* -> stmt
# | stmt stmt
# | stmt stmt stmt
for seq_type_name, lengths in sorted(self.seq_lengths.items()):
assert not isinstance(lengths, set)
rules_mask[seq_type_name] = (offset, offset + len(lengths))
offset += len(lengths)
all_rules += [(seq_type_name, i) for i in lengths]
return tuple(all_rules), rules_mask
def _all_tokens(self, root):
"""_all_tokens"""
queue = [root]
while queue:
node = queue.pop()
type_info = self.ast_wrapper.singular_types[node['_type']]
for field_info in reversed(type_info.fields):
field_value = node.get(field_info.name)
if field_info.type in self.grammar.pointers:
pass
elif field_info.type in self.ast_wrapper.primitive_types:
for token in self.grammar.tokenize_field_value(field_value):
yield token
elif isinstance(field_value, (list, tuple)):
queue.extend(field_value)
elif field_value is not None:
queue.append(field_value)
if __name__ == "__main__":
"""run some simple test cases"""
pass
|
CV/SemSegPaddle/train.py | zhangyimi/Research | 1,319 | 12765335 | <filename>CV/SemSegPaddle/train.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# GPU memory garbage collection optimization flags
os.environ['FLAGS_eager_delete_tensor_gb'] = "0.0"
import sys
import timeit
import argparse
import pprint
import shutil
import functools
import paddle
import numpy as np
import paddle.fluid as fluid
from src.utils.metrics import ConfusionMatrix
from src.utils.config import cfg
from src.utils.timer import Timer, calculate_eta
from src.utils import dist_utils
from src.datasets import build_dataset
from src.models.model_builder import build_model
from src.models.model_builder import ModelPhase
from src.models.model_builder import parse_shape_from_file
from eval import evaluate
from vis import visualize
def parse_args():
parser = argparse.ArgumentParser(description='semseg-paddle')
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file for training (and optionally testing)',
default=None,
type=str)
parser.add_argument(
'--use_gpu',
dest='use_gpu',
help='Use gpu or cpu',
action='store_true',
default=False)
parser.add_argument(
'--use_mpio',
dest='use_mpio',
help='Use multiprocess I/O or not',
action='store_true',
default=False)
parser.add_argument(
'--log_steps',
dest='log_steps',
help='Display logging information at every log_steps',
default=10,
type=int)
parser.add_argument(
'--debug',
dest='debug',
help='debug mode, display detail information of training',
action='store_true')
parser.add_argument(
'--use_vdl',
dest='use_vdl',
help='whether to record the data during training to VisualDL',
action='store_true')
parser.add_argument(
'--vdl_log_dir',
dest='vdl_log_dir',
help='VisualDL logging directory',
default=None,
type=str)
parser.add_argument(
'--do_eval',
dest='do_eval',
help='Evaluation models result on every new checkpoint',
action='store_true')
parser.add_argument(
'opts',
help='See utils/config.py for all options',
default=None,
nargs=argparse.REMAINDER)
return parser.parse_args()
def save_checkpoint(exe, program, ckpt_name):
"""
Save checkpoint for evaluation or resume training
"""
filename= '{}_{}_{}_epoch_{}.pdparams'.format(str(cfg.MODEL.MODEL_NAME),
str(cfg.MODEL.BACKBONE), str(cfg.DATASET.DATASET_NAME), ckpt_name)
ckpt_dir = cfg.TRAIN.MODEL_SAVE_DIR
print("Save model checkpoint to {}".format(ckpt_dir))
if not os.path.isdir(ckpt_dir):
os.makedirs(ckpt_dir)
fluid.io.save_params(exe, ckpt_dir, program, filename)
return ckpt_dir
def load_checkpoint(exe, program):
"""
Load checkpoiont from pretrained model directory for resume training
"""
print('Resume model training from:', cfg.TRAIN.RESUME_MODEL_DIR)
if not os.path.exists(cfg.TRAIN.RESUME_MODEL_DIR):
raise ValueError("TRAIN.PRETRAIN_MODEL {} not exist!".format(
cfg.TRAIN.RESUME_MODEL_DIR))
fluid.io.load_persistables(
exe, cfg.TRAIN.RESUME_MODEL_DIR, main_program=program)
model_path = cfg.TRAIN.RESUME_MODEL_DIR
# Check is path ended by path spearator
if model_path[-1] == os.sep:
model_path = model_path[0:-1]
epoch_name = os.path.basename(model_path)
# If resume model is final model
if epoch_name == 'final':
begin_epoch = cfg.SOLVER.NUM_EPOCHS
# If resume model path is end of digit, restore epoch status
elif epoch_name.isdigit():
epoch = int(epoch_name)
begin_epoch = epoch + 1
else:
raise ValueError("Resume model path is not valid!")
print("Model checkpoint loaded successfully!")
return begin_epoch
def print_info(*msg):
if cfg.TRAINER_ID == 0:
print(*msg)
def train(cfg):
startup_prog = fluid.Program()
train_prog = fluid.Program()
drop_last = True
dataset = build_dataset(cfg.DATASET.DATASET_NAME,
file_list=cfg.DATASET.TRAIN_FILE_LIST,
mode=ModelPhase.TRAIN,
shuffle=True,
data_dir=cfg.DATASET.DATA_DIR,
base_size= cfg.DATAAUG.BASE_SIZE, crop_size= cfg.DATAAUG.CROP_SIZE, rand_scale=True)
def data_generator():
if args.use_mpio:
data_gen = dataset.multiprocess_generator(
num_processes=cfg.DATALOADER.NUM_WORKERS,
max_queue_size=cfg.DATALOADER.BUF_SIZE)
else:
data_gen = dataset.generator()
batch_data = []
for b in data_gen:
batch_data.append(b)
if len(batch_data) == (cfg.TRAIN_BATCH_SIZE // cfg.NUM_TRAINERS):
for item in batch_data:
yield item[0], item[1], item[2]
batch_data = []
# If use sync batch norm strategy, drop last batch if number of samples
# in batch_data is less then cfg.BATCH_SIZE to avoid NCCL hang issues
if not cfg.TRAIN.SYNC_BATCH_NORM:
for item in batch_data:
yield item[0], item[1], item[2]
# Get device environment
gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
place = fluid.CUDAPlace(gpu_id) if args.use_gpu else fluid.CPUPlace()
places = fluid.cuda_places() if args.use_gpu else fluid.cpu_places()
# Get number of GPU
dev_count = cfg.NUM_TRAINERS if cfg.NUM_TRAINERS > 1 else len(places)
print_info("#device count: {}".format(dev_count))
cfg.TRAIN_BATCH_SIZE = dev_count * int(cfg.TRAIN_BATCH_SIZE_PER_GPU)
print_info("#train_batch_size: {}".format(cfg.TRAIN_BATCH_SIZE))
print_info("#batch_size_per_dev: {}".format(cfg.TRAIN_BATCH_SIZE_PER_GPU))
py_reader, avg_loss, lr, pred, grts, masks = build_model(
train_prog, startup_prog, phase=ModelPhase.TRAIN)
py_reader.decorate_sample_generator(
data_generator, batch_size=cfg.TRAIN_BATCH_SIZE_PER_GPU, drop_last=drop_last)
exe = fluid.Executor(place)
exe.run(startup_prog)
exec_strategy = fluid.ExecutionStrategy()
# Clear temporary variables every 100 iteration
if args.use_gpu:
exec_strategy.num_threads = fluid.core.get_cuda_device_count()
exec_strategy.num_iteration_per_drop_scope = 100
build_strategy = fluid.BuildStrategy()
if cfg.NUM_TRAINERS > 1 and args.use_gpu:
dist_utils.prepare_for_multi_process(exe, build_strategy, train_prog)
exec_strategy.num_threads = 1
if cfg.TRAIN.SYNC_BATCH_NORM and args.use_gpu:
if dev_count > 1:
# Apply sync batch norm strategy
print_info("Sync BatchNorm strategy is effective.")
build_strategy.sync_batch_norm = True
else:
print_info(
"Sync BatchNorm strategy will not be effective if GPU device"
" count <= 1")
compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel(
loss_name=avg_loss.name,
exec_strategy=exec_strategy,
build_strategy=build_strategy)
# Resume training
begin_epoch = cfg.SOLVER.BEGIN_EPOCH
if cfg.TRAIN.RESUME_MODEL_DIR:
begin_epoch = load_checkpoint(exe, train_prog)
# Load pretrained model
elif os.path.exists(cfg.TRAIN.PRETRAINED_MODEL_DIR):
print_info('Pretrained model dir: ', cfg.TRAIN.PRETRAINED_MODEL_DIR)
load_vars = []
load_fail_vars = []
def var_shape_matched(var, shape):
"""
Check whehter persitable variable shape is match with current network
"""
var_exist = os.path.exists(
os.path.join(cfg.TRAIN.PRETRAINED_MODEL_DIR, var.name))
if var_exist:
var_shape = parse_shape_from_file(
os.path.join(cfg.TRAIN.PRETRAINED_MODEL_DIR, var.name))
return var_shape == shape
return False
for x in train_prog.list_vars():
if isinstance(x, fluid.framework.Parameter):
shape = tuple(fluid.global_scope().find_var(
x.name).get_tensor().shape())
if var_shape_matched(x, shape):
load_vars.append(x)
else:
load_fail_vars.append(x)
fluid.io.load_vars(
exe, dirname=cfg.TRAIN.PRETRAINED_MODEL_DIR, vars=load_vars)
for var in load_vars:
print_info("Parameter[{}] loaded sucessfully!".format(var.name))
for var in load_fail_vars:
print_info(
"Parameter[{}] don't exist or shape does not match current network, skip"
" to load it.".format(var.name))
print_info("{}/{} pretrained parameters loaded successfully!".format(
len(load_vars),
len(load_vars) + len(load_fail_vars)))
else:
print_info(
'Pretrained model dir {} not exists, training from scratch...'.
format(cfg.TRAIN.PRETRAINED_MODEL_DIR))
fetch_list = [avg_loss.name, lr.name]
if args.debug:
# Fetch more variable info and use streaming confusion matrix to
# calculate IoU results if in debug mode
np.set_printoptions(
precision=4, suppress=True, linewidth=160, floatmode="fixed")
fetch_list.extend([pred.name, grts.name, masks.name])
cm = ConfusionMatrix(cfg.DATASET.NUM_CLASSES, streaming=True)
if args.use_vdl:
if not args.vdl_log_dir:
print_info("Please specify the log directory by --vdl_log_dir.")
exit(1)
from visualdl import LogWriter
log_writer = LogWriter(args.vdl_log_dir)
# trainer_id = int(os.getenv("PADDLE_TRAINER_ID", 0))
# num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
step = 0
all_step = cfg.DATASET.TRAIN_TOTAL_IMAGES // cfg.TRAIN_BATCH_SIZE
if cfg.DATASET.TRAIN_TOTAL_IMAGES % cfg.TRAIN_BATCH_SIZE and drop_last != True:
all_step += 1
all_step *= (cfg.SOLVER.NUM_EPOCHS - begin_epoch + 1)
avg_loss = 0.0
timer = Timer()
timer.start()
if begin_epoch > cfg.SOLVER.NUM_EPOCHS:
raise ValueError(
("begin epoch[{}] is larger than cfg.SOLVER.NUM_EPOCHS[{}]").format(
begin_epoch, cfg.SOLVER.NUM_EPOCHS))
if args.use_mpio:
print_info("Use multiprocess reader")
else:
print_info("Use multi-thread reader")
for epoch in range(begin_epoch, cfg.SOLVER.NUM_EPOCHS + 1):
py_reader.start()
while True:
try:
if args.debug:
# Print category IoU and accuracy to check whether the
# traning process is corresponed to expectation
loss, lr, pred, grts, masks = exe.run(
program=compiled_train_prog,
fetch_list=fetch_list,
return_numpy=True)
cm.calculate(pred, grts, masks)
avg_loss += np.mean(np.array(loss))
step += 1
if step % args.log_steps == 0:
speed = args.log_steps / timer.elapsed_time()
avg_loss /= args.log_steps
category_acc, mean_acc = cm.accuracy()
category_iou, mean_iou = cm.mean_iou()
print_info((
"epoch={}/{} step={}/{} lr={:.5f} loss={:.4f} acc={:.5f} mIoU={:.5f} step/sec={:.3f} | ETA {}"
).format(epoch, cfg.SOLVER.NUM_EPOCHS, step, all_step, lr[0], avg_loss, mean_acc,
mean_iou, speed,
calculate_eta(all_step - step, speed)))
print_info("Category IoU: ", category_iou)
print_info("Category Acc: ", category_acc)
if args.use_vdl:
log_writer.add_scalar('Train/mean_iou', mean_iou,
step)
log_writer.add_scalar('Train/mean_acc', mean_acc,
step)
log_writer.add_scalar('Train/loss', avg_loss,
step)
log_writer.add_scalar('Train/lr', lr[0],
step)
log_writer.add_scalar('Train/step/sec', speed,
step)
sys.stdout.flush()
avg_loss = 0.0
cm.zero_matrix()
timer.restart()
else:
# If not in debug mode, avoid unnessary log and calculate
loss, lr = exe.run(
program=compiled_train_prog,
fetch_list=fetch_list,
return_numpy=True)
avg_loss += np.mean(np.array(loss))
step += 1
if step % args.log_steps == 0 and cfg.TRAINER_ID == 0:
avg_loss /= args.log_steps
speed = args.log_steps / timer.elapsed_time()
print((
"epoch={}/{} step={}/{} lr={:.5f} loss={:.4f} step/sec={:.3f} | ETA {}"
).format(epoch, cfg.SOLVER.NUM_EPOCHS, global_step, all_step, lr[0], avg_loss, speed,
calculate_eta(all_step - global_step, speed)))
if args.use_vdl:
log_writer.add_scalar('Train/loss', avg_loss,
step)
log_writer.add_scalar('Train/lr', lr[0],
step)
log_writer.add_scalar('Train/speed', speed,
step)
sys.stdout.flush()
avg_loss = 0.0
timer.restart()
except fluid.core.EOFException:
py_reader.reset()
break
except Exception as e:
print(e)
if epoch % cfg.TRAIN.SNAPSHOT_EPOCH == 0 and cfg.TRAINER_ID == 0:
ckpt_dir = save_checkpoint(exe, train_prog, epoch)
if args.do_eval:
print("Evaluation start")
_, mean_iou, _, mean_acc = evaluate(
cfg=cfg,
ckpt_dir=ckpt_dir,
use_gpu=args.use_gpu,
use_mpio=args.use_mpio)
if args.use_vdl:
log_writer.add_scalar('Evaluate/mean_iou', mean_iou,
step)
log_writer.add_scalar('Evaluate/mean_acc', mean_acc,
step)
# Use VisualDL to visualize results
if args.use_vdl and cfg.DATASET.VIS_FILE_LIST is not None:
visualize(
cfg=cfg,
use_gpu=args.use_gpu,
vis_file_list=cfg.DATASET.VIS_FILE_LIST,
vis_dir="visual",
ckpt_dir=ckpt_dir,
log_writer=log_writer)
# save final model
if cfg.TRAINER_ID == 0:
save_checkpoint(exe, train_prog, 'final')
if args.use_vdl:
log_writer.close()
def main(args):
if args.cfg_file is not None:
cfg.update_from_file(args.cfg_file)
if args.opts:
cfg.update_from_list(args.opts)
cfg.TRAINER_ID = int(os.getenv("PADDLE_TRAINER_ID", 0))
cfg.NUM_TRAINERS = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
cfg.check_and_infer()
print_info(pprint.pformat(cfg))
train(cfg)
if __name__ == '__main__':
args = parse_args()
start = timeit.default_timer()
main(args)
end = timeit.default_timer()
print("training time: {} h".format(1.0*(end-start)/3600))
|
docs/basic_usage/bu01.py | jviide/htm.py | 112 | 12765351 | from htm import htm
@htm
def html(tag, props, children):
return tag, props, children
result01 = html("""
<div>Hello World</div>
""")
|
testing/util.py | bbhunter/fuzz-lightyear | 169 | 12765382 | <gh_stars>100-1000
import re
# Source: https://stackoverflow.com/a/14693789
_ansi_escape = re.compile(r'\x1b\[[0-?]*[ -/]*[@-~]')
def uncolor(text):
return _ansi_escape.sub('', text)
|
TeacherTree/venv/Lib/site-packages/flask_boost/templates/model.py | intuile/teacher-tree-website | 543 | 12765386 | # coding: utf-8
from datetime import datetime
from ._base import db
class #{model|title}(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True)
created_at = db.Column(db.DateTime, default=datetime.now)
def __repr__(self):
return '<#{model|title} %s>' % self.name
|
tensornetwork/linalg/krylov.py | khanhgithead/TensorNetwork | 1,681 | 12765392 | # Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Any, Union, Type, Callable, List, Text
import numpy as np
import tensornetwork.tensor
import tensornetwork.backends.abstract_backend as abstract_backend
from tensornetwork import backends
AbstractBackend = abstract_backend.AbstractBackend
Array = Any
Tensor = tensornetwork.tensor.Tensor
class MatvecCache:
"""
Caches matvec functions so that they have identical function signature
when called repeatedly. This circumvents extraneous recompilations when
Jit is used. Incoming matvec functions should be in terms of Tensor
and have function signature A = matvec(x, *args), where each of the
positional arguments in *args is also a Tensor.
"""
def __init__(self):
self.clear()
def clear(self):
self.cache = {}
def retrieve(self, backend_name: Text, matvec: Callable):
if backend_name not in self.cache:
self.cache[backend_name] = {}
if matvec not in self.cache[backend_name]:
def wrapped(x, *args):
X = Tensor(x, backend=backend_name)
Args = [Tensor(a, backend=backend_name) for a in args]
Y = matvec(X, *Args)
return Y.array
self.cache[backend_name][matvec] = wrapped
return self.cache[backend_name][matvec]
KRYLOV_MATVEC_CACHE = MatvecCache()
def krylov_error_checks(backend: Union[Text, AbstractBackend, None],
x0: Union[Tensor, None],
args: Union[List[Tensor], None]):
"""
Checks that at least one of backend and x0 are not None; that backend
and x0.backend agree; that if args is not None its elements are Tensors
whose backends also agree. Creates a backend object from backend
and returns the arrays housed by x0 and args.
Args:
backend: A backend, text specifying one, or None.
x0: A tn.Tensor, or None.
args: A list of tn.Tensor, or None.
Returns:
backend: A backend object.
x0_array: x0.array if x0 was supplied, or None.
args_arr: Each array in the list of args if it was supplied, or None.
"""
# If the backend wasn't specified, infer it from x0. If neither was specified
# raise ValueError.
if backend is None:
if x0 is None:
raise ValueError("One of backend or x0 must be specified.")
backend = x0.backend
else:
backend = backends.backend_factory.get_backend(backend)
# If x0 was specified, return the enclosed array. If attempting to do so
# raises AttributeError, instead raise TypeError. If backend was also
# specified, but was different than x0.backend, raise ValueError.
if x0 is not None:
try:
x0_array = x0.array
except AttributeError as err:
raise TypeError("x0 must be a tn.Tensor.") from err
if x0.backend.name != backend.name:
errstr = ("If both x0 and backend are specified the"
"backends must agree. \n"
f"x0 backend: {x0.backend.name} \n"
f"backend: {backend.name} \n")
raise ValueError(errstr)
else: # If x0 was not specified, set x0_array (the returned value) to None.
x0_array = None
# If args were specified, set the returned args_array to be all the enclosed
# arrays. If any of them raise AttributeError during the attempt, raise
# TypeError. If args was not specified, set args_array to None.
if args is not None:
try:
args_array = [a.array for a in args]
except AttributeError as err:
raise TypeError("Every element of args must be a tn.Tensor.") from err
else:
args_array = None
return (backend, x0_array, args_array)
def eigsh_lanczos(A: Callable,
backend: Optional[Union[Text, AbstractBackend]] = None,
args: Optional[List[Tensor]] = None,
x0: Optional[Tensor] = None,
shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[Type[np.number]] = None,
num_krylov_vecs: int = 20,
numeig: int = 1,
tol: float = 1E-8,
delta: float = 1E-8,
ndiag: int = 20,
reorthogonalize: bool = False) -> Tuple[Tensor, List]:
"""
Lanczos method for finding the lowest eigenvector-eigenvalue pairs
of `A`.
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Array`, and `res.shape` has to be `vector.shape`.
backend: A backend, text specifying one, or None.
args: A list of arguments to `A`. `A` will be called as
`res = A(x0, *args)`.
x0: An initial vector for the Lanczos algorithm. If `None`,
a random initial vector is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If both no `x0` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The nummber of eigenvector-eigenvalue pairs to be computed.
If `numeig > 1`, `reorthogonalize` has to be `True`.
tol: The desired precision of the eigenvalus. Uses
`backend.norm(eigvalsnew[0:numeig] - eigvalsold[0:numeig]) < tol`
as stopping criterion between two diagonalization steps of the
tridiagonal operator.
delta: Stopping criterion for Lanczos iteration.
If a Krylov vector :math: `x_n` has an L2 norm
:math:`\\lVert x_n\\rVert < delta`, the iteration
is stopped. It means that an (approximate) invariant subspace has
been found.
ndiag: The tridiagonal Operator is diagonalized every `ndiag`
iterations to check convergence.
reorthogonalize: If `True`, Krylov vectors are kept orthogonal by
explicit orthogonalization (more costly than `reorthogonalize=False`)
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` lowest eigenvalues
eigvecs: A list of `numeig` lowest eigenvectors
"""
backend, x0_array, args_array = krylov_error_checks(backend, x0, args)
mv = KRYLOV_MATVEC_CACHE.retrieve(backend.name, A)
result = backend.eigsh_lanczos(mv, args=args_array,
initial_state=x0_array,
shape=shape, dtype=dtype,
num_krylov_vecs=num_krylov_vecs, numeig=numeig,
tol=tol, delta=delta, ndiag=ndiag,
reorthogonalize=reorthogonalize)
eigvals, eigvecs = result
eigvecsT = [Tensor(ev, backend=backend) for ev in eigvecs]
return eigvals, eigvecsT
def eigs(A: Callable,
backend: Optional[Union[Text, AbstractBackend]] = None,
args: Optional[List[Tensor]] = None,
x0: Optional[Tensor] = None,
shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[Type[np.number]] = None,
num_krylov_vecs: int = 20,
numeig: int = 1,
tol: float = 1E-8,
which: Text = 'LR',
maxiter: int = 20) -> Tuple[Tensor, List]:
"""
Implicitly restarted Arnoldi method for finding the lowest
eigenvector-eigenvalue pairs of a linear operator `A`.
`A` is a function implementing the matrix-vector
product.
WARNING: This routine uses jax.jit to reduce runtimes. jitting is triggered
at the first invocation of `eigs`, and on any subsequent calls
if the python `id` of `A` changes, even if the formal definition of `A`
stays the same.
Example: the following will jit once at the beginning, and then never again:
```python
import jax
import numpy as np
def A(H,x):
return jax.np.dot(H,x)
for n in range(100):
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigs(A, [H],x) #jitting is triggerd only at `n=0`
```
The following code triggers jitting at every iteration, which
results in considerably reduced performance
```python
import jax
import numpy as np
for n in range(100):
def A(H,x):
return jax.np.dot(H,x)
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigs(A, [H],x) #jitting is triggerd at every step `n`
```
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Tensor`, and `res.shape` has to be `vector.shape`.
backend: A backend, text specifying one, or None.
args: A list of arguments to `A`. `A` will be called as
`res = A(initial_state, *args)`.
x0: An initial vector for the algorithm. If `None`,
a random initial `Tensor` is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If no `initial_state` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The number of eigenvector-eigenvalue pairs to be computed.
tol: The desired precision of the eigenvalues. For the jax backend
this has currently no effect, and precision of eigenvalues is not
guaranteed. This feature may be added at a later point. To increase
precision the caller can either increase `maxiter` or `num_krylov_vecs`.
which: Flag for targetting different types of eigenvalues. Currently
supported are `which = 'LR'` (larges real part) and `which = 'LM'`
(larges magnitude).
maxiter: Maximum number of restarts. For `maxiter=0` the routine becomes
equivalent to a simple Arnoldi method.
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` eigenvalues
eigvecs: A list of `numeig` eigenvectors
"""
backend, x0_array, args_array = krylov_error_checks(backend, x0, args)
mv = KRYLOV_MATVEC_CACHE.retrieve(backend.name, A)
result = backend.eigs(mv, args=args_array, initial_state=x0_array,
shape=shape, dtype=dtype,
num_krylov_vecs=num_krylov_vecs, numeig=numeig,
tol=tol, which=which, maxiter=maxiter)
eigvals, eigvecs = result
eigvecsT = [Tensor(eV, backend=backend) for eV in eigvecs]
return eigvals, eigvecsT
def gmres(A_mv: Callable,
b: Tensor,
A_args: Optional[List] = None,
x0: Optional[Tensor] = None,
tol: float = 1E-05,
atol: Optional[float] = None,
num_krylov_vectors: Optional[int] = None,
maxiter: Optional[int] = 1,
M: Optional[Callable] = None
) -> Tuple[Tensor, int]:
""" GMRES solves the linear system A @ x = b for x given a vector `b` and
a general (not necessarily symmetric/Hermitian) linear operator `A`.
As a Krylov method, GMRES does not require a concrete matrix representation
of the n by n `A`, but only a function
`vector1 = A_mv(vector0, *A_args, **A_kwargs)`
prescribing a one-to-one linear map from vector0 to vector1 (that is,
A must be square, and thus vector0 and vector1 the same size). If `A` is a
dense matrix, or if it is a symmetric/Hermitian operator, a different
linear solver will usually be preferable.
GMRES works by first constructing the Krylov basis
K = (x0, A_mv@x0, A_mv@A_mv@x0, ..., (A_mv^num_krylov_vectors)@x_0) and then
solving a certain dense linear system K @ q0 = q1 from whose solution x can
be approximated. For `num_krylov_vectors = n` the solution is provably exact
in infinite precision, but the expense is cubic in `num_krylov_vectors` so
one is typically interested in the `num_krylov_vectors << n` case.
The solution can in this case be repeatedly
improved, to a point, by restarting the Arnoldi iterations each time
`num_krylov_vectors` is reached. Unfortunately the optimal parameter choices
balancing expense and accuracy are difficult to predict in advance, so
applying this function requires a degree of experimentation.
In a tensor network code one is typically interested in A_mv implementing
some tensor contraction. This implementation thus allows `b` and `x0` to be
of whatever arbitrary, though identical, shape `b = A_mv(x0, ...)` expects.
Reshaping to and from a matrix problem is handled internally.
Args:
A_mv : A function `v0 = A_mv(v, *A_args, **A_kwargs)` where `v0` and
`v` have the same shape.
b : The `b` in `A @ x = b`; it should be of the shape `A_mv`
operates on.
A_args : Positional arguments to `A_mv`, supplied to this interface
as a list.
Default: None.
x0 : An optional guess solution. Zeros are used by default.
If `x0` is supplied, its shape and dtype must match those of
`b`, or an
error will be thrown.
Default: zeros.
tol, atol: Solution tolerance to achieve,
norm(residual) <= max(tol*norm(b), atol).
Default: tol=1E-05
atol=tol
num_krylov_vectors
: Size of the Krylov space to build at each restart.
Expense is cubic in this parameter. If supplied, it must be
an integer in 0 < num_krylov_vectors <= b.size.
Default: b.size.
maxiter : The Krylov space will be repeatedly rebuilt up to this many
times. Large values of this argument
should be used only with caution, since especially for nearly
symmetric matrices and small `num_krylov_vectors` convergence
might well freeze at a value significantly larger than `tol`.
Default: 1.
M : Inverse of the preconditioner of A; see the docstring for
`scipy.sparse.linalg.gmres`. This is only supported in the
numpy backend. Supplying this argument to other backends will
trigger NotImplementedError.
Default: None.
Raises:
ValueError: -if `x0` is supplied but its shape differs from that of `b`.
-in NumPy, if the ARPACK solver reports a breakdown (which
usually indicates some kind of floating point issue).
-if num_krylov_vectors is 0 or exceeds b.size.
-if tol was negative.
-if M was supplied with any backend but NumPy.
Returns:
x : The converged solution. It has the same shape as `b`.
info : 0 if convergence was achieved, the number of restarts otherwise.
"""
try:
b_array = b.array
except AttributeError as err:
raise TypeError("b must be a tn.Tensor") from err
backend, x0_array, args_array = krylov_error_checks(b.backend, x0, A_args)
mv = KRYLOV_MATVEC_CACHE.retrieve(backend.name, A_mv)
out = backend.gmres(mv, b_array, A_args=args_array,
x0=x0_array, tol=tol, atol=atol,
num_krylov_vectors=num_krylov_vectors,
maxiter=maxiter, M=M)
result, info = out
resultT = Tensor(result, backend=b.backend)
return (resultT, info)
|
workalendar/tests/test_mozambique.py | taiyeoguns/workalendar | 405 | 12765411 | from datetime import date
from . import GenericCalendarTest
from ..africa.mozambique import Mozambique
class MozambiqueTest(GenericCalendarTest):
cal_class = Mozambique
def test_year_new_year_shift(self):
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 1, 1), holidays)
self.assertNotIn(date(2019, 1, 2), holidays)
holidays = self.cal.holidays_set(2020)
self.assertIn(date(2020, 1, 1), holidays)
self.assertNotIn(date(2020, 1, 2), holidays)
def test_n_holidays(self):
n_holidays = len(self.cal.holidays_set(2019))
for holiday in self.cal.get_calendar_holidays(2020):
print(holiday)
assert n_holidays == 10
def test_year_2018(self):
holidays = self.cal.holidays_set(2018)
# Fixed days section:
# 1. New Year's Day
self.assertIn(date(2018, 1, 1), holidays)
# 2. Mozambican Heroes' Day
self.assertIn(date(2018, 2, 3), holidays)
# 3. Mozambican Women's Day
self.assertIn(date(2018, 4, 7), holidays)
# 4. Good Friday
self.assertIn(date(2018, 3, 30), holidays)
# 5. Labour Day
self.assertIn(date(2018, 5, 1), holidays)
# 6. Independence Day
self.assertIn(date(2018, 6, 25), holidays)
# 7. Victory Day
self.assertIn(date(2018, 9, 7), holidays)
# 8. Armed Forces Day
self.assertIn(date(2018, 9, 25), holidays)
# 9. Peace And Reconciliation Day
self.assertIn(date(2018, 10, 4), holidays)
# 10. Christmas day
self.assertIn(date(2018, 12, 25), holidays)
def test_year_2019(self):
holidays = self.cal.holidays_set(2019)
# Fixed days section:
# 1. New Year's Day
self.assertIn(date(2019, 1, 1), holidays)
# 2. Mozambican Heroes' Day
self.assertIn(date(2019, 2, 3), holidays)
# 3. Mozambican Women's Day
self.assertIn(date(2019, 4, 7), holidays)
# 4. Good Friday
self.assertIn(date(2019, 4, 19), holidays)
# 5. Labour Day
self.assertIn(date(2019, 5, 1), holidays)
# 6. Independence Day
self.assertIn(date(2019, 6, 25), holidays)
# 7. Victory Day
self.assertIn(date(2019, 9, 7), holidays)
# 8. Armed Forces Day
self.assertIn(date(2019, 9, 25), holidays)
# 9. Peace And Reconciliation Day
self.assertIn(date(2019, 10, 4), holidays)
# 10. Christmas day
self.assertIn(date(2019, 12, 25), holidays)
def test_year_2020(self):
holidays = self.cal.holidays_set(2020)
# Fixed days section:
# 1. New Year's Day
self.assertIn(date(2020, 1, 1), holidays)
# 2. Mozambican Heroes' Day
self.assertIn(date(2020, 2, 3), holidays)
# 3. Mozambican Women's Day
self.assertIn(date(2020, 4, 7), holidays)
# 4. Good Friday
self.assertIn(date(2020, 4, 10), holidays)
# 5. Labour Day
self.assertIn(date(2020, 5, 1), holidays)
# 6. Independence Day
self.assertIn(date(2020, 6, 25), holidays)
# 7. Victory Day
self.assertIn(date(2020, 9, 7), holidays)
# 8. Armed Forces Day
self.assertIn(date(2020, 9, 25), holidays)
# 9. Peace And Reconciliation Day
self.assertIn(date(2020, 10, 4), holidays)
# 10. Christmas day
self.assertIn(date(2020, 12, 25), holidays)
def test_2020_new_years_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 1, 1)], "New year")
def test_2020_heroes_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 2, 3)], "Mozambican Heroes' Day")
def test_2020_women_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 4, 7)], "Mozambican Women's Day")
def test_2020_good_friday_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 4, 10)], "Good Friday")
def test_2020_labour_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 5, 1)], "Labour Day")
def test_2020_independence_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 6, 25)], "Independence Day")
def test_2020_victory_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 9, 7)], "Victory Day")
def test_2020_armed_forces_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 9, 25)], "Armed Forces Day")
def test_2020_peace_and_reconciliation_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 10, 4)], "Peace And Reconciliation Day")
def test_2020_christmas_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 12, 25)], "Christmas Day")
|
grr/server/grr_response_server/bin/__init__.py | nkrios/grr | 4,238 | 12765429 | #!/usr/bin/env python
"""GRR server entry points."""
|
Chapter 04/4.01/model.py | ACsBlack/Tkinter-GUI-Application-Development-Blueprints-Second-Edition | 120 | 12765488 | <filename>Chapter 04/4.01/model.py<gh_stars>100-1000
"""
Code illustration: 4.01
@ Tkinter GUI Application Development Blueprints
"""
from configurations import *
class Model():
def __init__(self):
pass
|
amrlib/alignments/faa_aligner/faa_aligner.py | plandes/amrlib | 103 | 12765493 | <filename>amrlib/alignments/faa_aligner/faa_aligner.py
import os
import sys
import json
import subprocess
import logging
import tarfile
from .preprocess import preprocess_infer
from .postprocess import postprocess
from .get_alignments import GetAlignments
from ..penman_utils import to_graph_line
from ...defaults import data_dir
logger = logging.getLogger(__name__)
this_dir = os.path.dirname(os.path.realpath(__file__))
class FAA_Aligner(object):
def __init__(self, **kwargs):
self.model_dir = kwargs.get('model_dir', os.path.join(data_dir, 'model_aligner_faa'))
self.model_tar_fn = kwargs.get('model_tar_fn', os.path.join(this_dir, 'model_aligner_faa.tar.gz'))
self.setup_model_dir()
self.aligner = TrainedAligner(self.model_dir, **kwargs)
try:
self.aligner.check_for_binaries() # Will raise FileNotFoundError if binaries can't be found
except FileNotFoundError:
logger.critical('No binaries for fast_algin (https://github.com/clab/fast_align) found. ' \
'These must be installed to use the faa_aligner. See the amrlib docs for details.')
raise
# Input space_tok_sents is a list of space tokenized strings
# graph_strings is a list and amr graph strings, the same size.
def align_sents(self, space_tok_sents, graph_strings):
assert len(space_tok_sents) == len(graph_strings)
graph_strings = [to_graph_line(g) for g in graph_strings]
data = preprocess_infer(space_tok_sents, graph_strings, skip_empty_check=True)
# Filter lines for empty strings. The aligner doesn't return a value for blanks on either eng or amr
skips, eng_lines, amr_lines = set(), [], []
for i, (eng_l, amr_l) in enumerate(zip(data.eng_preproc_lines, data.amr_preproc_lines)):
eng_l, amr_l = eng_l.strip(), amr_l.strip()
if not eng_l or not amr_l:
skips.add(i)
else:
eng_lines.append(eng_l)
amr_lines.append(amr_l)
model_out_lines = self.aligner.align(eng_lines, amr_lines)
assert len(model_out_lines) == len(eng_lines)
# Add back in blanks for skipped lines
final_astrings = [''] * len(data.eng_preproc_lines)
for i in range(len(final_astrings)):
if i not in skips:
final_astrings[i] = model_out_lines.pop(0)
data.model_out_lines = final_astrings
amr_surface_aligns, alignment_strings = postprocess(data)
return amr_surface_aligns, alignment_strings
# check the model directory, if it doesn't have the metadata file try to create
# the directory from the tar.gz file
def setup_model_dir(self):
# Check for the metadata and if so, consider the model ready to go
if os.path.isfile(os.path.join(self.model_dir, 'amrlib_meta.json')):
return True
# if there's a local copy, etract it
elif os.path.isfile(self.model_tar_fn):
tar = tarfile.open(self.model_tar_fn)
tar.extractall(path=data_dir)
logger.info('Extracting a local copy of model')
if os.path.isfile(os.path.join(self.model_dir, 'amrlib_meta.json')):
return True
else:
return False
else:
logger.critical('No model in model_dir and no local version available to extract')
return False
# Code adapted from from https://github.com/clab/fast_align/blob/master/src/force_align.py
class TrainedAligner:
def __init__(self, model_in_dir, **kwargs):
# If the bin_dir is not provided, get it from the environment, but default
# to '' which means it must be in the path
bin_dir = os.environ.get('FABIN_DIR', '')
bin_dir = kwargs.get('bin_dir', bin_dir)
self.fast_align = os.path.join(bin_dir, 'fast_align')
self.atools = os.path.join(bin_dir, 'atools')
fwd_params_fn = os.path.join(model_in_dir, 'fwd_params')
rev_params_fn = os.path.join(model_in_dir, 'rev_params')
# Get the parameters from the metadata
with open(os.path.join(model_in_dir, 'amrlib_meta.json')) as f:
meta = json.load(f)
p = meta['train_params']
# timeout the exe to exit
self.timeout = kwargs.get('timeout', 1.0)
# Create the actual commands to execute
fwd_cmd = '%s -i - -d -q %f -a %f -T %f -m %f -f %s' % \
(self.fast_align, p['q'], p['a'], p['fwd_T'], p['fwd_m'], fwd_params_fn)
rev_cmd = '%s -i - -d -q %f -a %f -T %f -m %f -f %s -r' % \
(self.fast_align, p['q'], p['a'], p['fwd_T'], p['fwd_m'], rev_params_fn)
tools_cmd = '%s -i - -j - -c %s' % (self.atools, p['heuristic'])
self.fwd_cmd = fwd_cmd.split()
self.rev_cmd = rev_cmd.split()
self.tools_cmd = tools_cmd.split()
# Open a connection to the subprocess in text mode
@staticmethod
def popen_io(cmd):
return subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, text=True)
def align(self, eng_td_lines, amr_td_lines):
# Combine lines into fast align input format
lines = ['%s ||| %s' % (el, al) for el, al in zip(eng_td_lines, amr_td_lines)]
# Open connections to the alignment binaries
self.fwd_align = self.popen_io(self.fwd_cmd)
self.rev_align = self.popen_io(self.rev_cmd)
self.tools = self.popen_io(self.tools_cmd)
# Input to fast_align
fa_in = '\n'.join([l.strip() for l in lines])
fwd_out, fwd_err = self.fwd_align.communicate(fa_in, timeout=self.timeout)
rev_out, fwd_err = self.rev_align.communicate(fa_in, timeout=self.timeout)
# output is f words ||| e words ||| links ||| score
fwd_lines = [l.split('|||')[2].strip() for l in fwd_out.splitlines() if l]
rev_lines = [l.split('|||')[2].strip() for l in rev_out.splitlines() if l]
# Input to atools
# be sure to put a line-feed at the end or you'll get a duplicate line in the output
at_in = '\n'.join(['%s\n%s' % (fl, rl) for fl, rl in zip(fwd_lines, rev_lines)]) + '\n'
at_out, at_err = self.tools.communicate(at_in, timeout=self.timeout)
at_lines = [l.strip() for l in at_out.splitlines()]
return at_lines
# This will raise FileNotFoundError if either call fails
# Note that both commands trigger the help message and will produce a return-code of 1
# which is typically considered and error
def check_for_binaries(self):
ret_fa = subprocess.run(self.fast_align, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
ret_tool = subprocess.run(self.atools, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
|
examples/rkhs.py | gautam1858/autograd | 6,119 | 12765495 | <filename>examples/rkhs.py
"""
Inferring a function from a reproducing kernel Hilbert space (RKHS) by taking
gradients of eval with respect to the function-valued argument
"""
from __future__ import print_function
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.extend import primitive, defvjp, defjvp, VSpace, Box
from autograd.util import func
from autograd import grad
class RKHSFun(object):
def __init__(self, kernel, alphas={}):
self.alphas = alphas
self.kernel = kernel
self.vs = RKHSFunVSpace(self)
@primitive
def __call__(self, x):
return sum([a * self.kernel(x, x_repr)
for x_repr, a in self.alphas.items()], 0.0)
def __add__(self, f): return self.vs.add(self, f)
def __mul__(self, a): return self.vs.scalar_mul(self, a)
# TODO: add vjp of __call__ wrt x (and show it in action)
defvjp(func(RKHSFun.__call__),
lambda ans, f, x: lambda g: RKHSFun(f.kernel, {x : 1}) * g)
class RKHSFunBox(Box, RKHSFun):
@property
def kernel(self): return self._value.kernel
RKHSFunBox.register(RKHSFun)
class RKHSFunVSpace(VSpace):
def __init__(self, value):
self.kernel = value.kernel
def zeros(self): return RKHSFun(self.kernel)
def randn(self):
# These arbitrary vectors are not analogous to randn in any meaningful way
N = npr.randint(1,3)
return RKHSFun(self.kernel, dict(zip(npr.randn(N), npr.randn(N))))
def _add(self, f, g):
assert f.kernel is g.kernel
return RKHSFun(f.kernel, add_dicts(f.alphas, g.alphas))
def _scalar_mul(self, f, a):
return RKHSFun(f.kernel, {x : a * a_cur for x, a_cur in f.alphas.items()})
def _inner_prod(self, f, g):
assert f.kernel is g.kernel
return sum([a1 * a2 * f.kernel(x1, x2)
for x1, a1 in f.alphas.items()
for x2, a2 in g.alphas.items()], 0.0)
RKHSFunVSpace.register(RKHSFun)
def add_dicts(d1, d2):
d = {}
for k, v in d1.items() + d2.items():
d[k] = d[k] + v if k in d else v
return d
if __name__=="__main__":
def sq_exp_kernel(x1, x2): return np.exp(-(x1-x2)**2)
xs = range(5)
ys = [1, 2, 3, 2, 1]
def logprob(f, xs, ys):
return -sum((f(x) - y)**2 for x, y in zip(xs, ys))
f = RKHSFun(sq_exp_kernel)
for i in range(100):
f = f + grad(logprob)(f, xs, ys) * 0.01
for x, y in zip(xs, ys):
print('{}\t{}\t{}'.format(x, y, f(x)))
|
test/nmea_queue_test.py | quiet-oceans/libais | 161 | 12765509 | <filename>test/nmea_queue_test.py<gh_stars>100-1000
"""Tests for ais.nmea_queue."""
import contextlib
import unittest
import pytest
import six
from six.moves import StringIO
import ais
from ais import nmea
from ais import nmea_queue
BARE_NMEA = """
# pylint: disable=line-too-long
$GPZDA,203003.00,12,07,2009,00,00,*47
!AIVDM,1,1,,B,23?up2001gGRju>Ap:;R2APP08:c,0*0E
!BSVDM,1,1,,A,15Mj23`PB`o=Of>KjvnJg8PT0L2R,0*7E
!SAVDM,1,1,,B,35Mj2p001qo@5tVKLBWmIDJT01:@,0*33
!AIVDM,1,1,,A,B5NWV1P0<vSE=I3QdK4bGwoUoP06,0*4F
!SAVDM,1,1,,A,403Owi1utn1W0qMtr2AKStg020S:,0*4B
!SAVDM,2,1,4,A,55Mub7P00001L@;SO7TI8DDltqB222222222220O0000067<0620@jhQDTVG,0*43
!SAVDM,2,2,4,A,30H88888880,2*49
"""
TAG_BLOCK = r"""
# pylint: disable=line-too-long
\n:440661,s:r3669963,c:1428537660*0F\$GPZDA,000253,09,04,2015,+00,00*6C
\g:1-2-4372,s:rORBCOMM109,c:1426032000,T:2015-03-11 00.00.00*32\!AIVDM,2,1,2,B,576u>F02>hOUI8AGR20tt<j104p4l62222222216H14@@Hoe0JPEDp1TQH88,0*16
\s:rORBCOMM999u,c:1426032000,T:2015-03-11 00.00.00*36\!AIVDM,1,1,,,;5Qu0v1utmGssvvkA`DRgm100000,0*46
\g:2-2-4372,s:rORBCOMM109,c:1426032000,T:2015-03-11 00.00.00*31\!AIVDM,2,2,2,B,88888888880,2*25
\g:1-2-27300,n:636994,s:b003669710,c:1428621738*5F\!SAVDM,2,1,2,B,55Mw@A7J1adAL@?;7WPl58F0U<h4pB222222220t1PN5553fN4g?`4iSp5Rc,0*26
\g:2-2-27300,n:636995*15\!SAVDM,2,2,2,B,iP`88888880,2*5E
\n:636996,s:b003669710,c:1428621738*19\!SAVDM,1,1,,B,35Mv4LPP@Go?FFtEbDDWQmlT20k@,0*04
\g:4-4-993623,n:577969*22\$ARVSI,r003669930,,233948.825272,1831,-97,0*24
\n:80677,s:b003669952,c:1428884269*2A\!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17
"""
USCG = r"""
# pylint: disable=line-too-long
!SAVDM,1,1,,A,15N4OMPP01I<cGrA1v>Id?vF060l,0*22,b003669978,1429287189
!SAVDM,2,1,4,B,54h@7?02BAF=`L4wN21<eTH4hj2222222222220U4HG6553U06T0C3H0Q@@j,0*5D,d-86,S389,t161310.00,T10.377780,D07MN-MI-LAKBS1,1429287190
!SAVDM,2,2,4,B,88888888880,2*39,d-86,S389,t161310.00,T10.377780,D07MN-MI-LAKBS1,1429287190
!AIVDM,1,1,,B,3592u`iP03GWEflBRosm0Ov@0000,0*70,d-107,S0297,t161407.00,T07.92201452,r11CSDO1,1429287248
!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17,rMySat,1429287258
"""
MIXED = r"""
!SAVDM,1,1,,A,15N4OMPP01I<cGrA1v>Id?vF060l,0*22,b003669978,1429287189
!SAVDM,1,1,,A,403Owi1utn1W0qMtr2AKStg020S:,0*4B
\n:80677,s:b003669952,c:1428884269*2A\!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17
random text
"""
class NmeaQueueTest(unittest.TestCase):
def testTextData(self):
# These lines should all pass straight through.
src_lines = (
'',
'a',
'123',
# Not quite NMEA strings.
'$GPZDA',
'!AIVDM',
'*FF',)
queue = nmea_queue.NmeaQueue()
for line in src_lines:
queue.put(line)
self.assertEqual(queue.qsize(), len(src_lines))
for i in range(1, queue.qsize() + 1):
msg = queue.get()
self.assertEqual(msg['line_nums'], [i])
self.assertEqual(msg['line_type'], nmea.TEXT)
self.assertEqual(msg['lines'], list(src_lines[i-1:i]))
self.assertEqual(msg,
{'line_nums': [6], 'line_type': 'TEXT', 'lines': ['*FF']})
def testBareSingleLineData(self):
queue = nmea_queue.NmeaQueue()
lines = [line for line in BARE_NMEA.split('\n') if ',' in line]
for line in lines:
queue.put(line)
self.assertEqual(queue.qsize(), 7)
msgs = []
while not queue.empty():
msgs.append(queue.get())
self.assertEqual(msgs[0],
{'line_nums': [1],
'line_type': 'BARE',
'lines': ['$GPZDA,203003.00,12,07,2009,00,00,*47']})
self.assertEqual(
msgs[1],
{'decoded': {
'cog': 52.099998474121094,
'id': 2,
'md5': '99c8c2804fde0481e6143051930b66c4',
'mmsi': 218069000,
'nav_status': 0,
'position_accuracy': 0,
'raim': False,
'repeat_indicator': 0,
'rot': 0.0,
'rot_over_range': False,
'slot_number': 683,
'slot_timeout': 2,
'sog': 11.100000381469727,
'spare': 0,
'special_manoeuvre': 0,
'sync_state': 0,
'timestamp': 16,
'true_heading': 48,
'x': -118.227775,
'y': 31.24317},
'line_nums': [2],
'line_type': 'BARE',
'lines': ['!AIVDM,1,1,,B,23?up2001gGRju>Ap:;R2APP08:c,0*0E'],
'matches': [{
'body': '23?up2001gGRju>Ap:;R2APP08:c',
'chan': 'B',
'checksum': '0E',
'fill_bits': 0,
'sen_num': 1,
'sen_tot': 1,
'seq_id': None,
'talker': 'AI',
'vdm_type': 'VDM',
'vdm': '!AIVDM,1,1,,B,23?up2001gGRju>Ap:;R2APP08:c,0*0E'}]}
)
def testTagBlockLines(self):
queue = nmea_queue.NmeaQueue()
lines = [line for line in TAG_BLOCK.split('\n') if ',' in line]
for line in lines:
queue.put(line)
self.assertEqual(queue.qsize(), 6)
msgs = []
while not queue.empty():
msgs.append(queue.get())
# self.assertNotIn('decoded', msgs[0])
# TODO(schwehr): Check the ZDA message decoding.
for msg_num in range(1, 5):
self.assertIn('decoded', msgs[msg_num])
ids = [msg['decoded']['id'] for msg in msgs[1:] if 'decoded' in msg]
self.assertEqual(ids, [11, 5, 5, 3, 27])
self.assertEqual(
msgs[-1],
{'decoded': {
'cog': 131,
'gnss': True,
'id': 27,
'md5': '50898a3435865cf76f1b502b2821672b',
'mmsi': 577305000,
'nav_status': 5,
'position_accuracy': 1,
'raim': False,
'repeat_indicator': 0,
'sog': 0,
'spare': 0,
'x': -90.20666666666666,
'y': 29.145},
'line_nums': [9],
'line_type': 'TAGB',
'lines': [
'\\n:80677,s:b003669952,c:1428884269*2A'
'\\!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17'],
'matches': [{
'dest': None,
'group': None,
'group_id': None,
'line_num': 80677,
'metadata': 'n:80677,s:b003669952,c:1428884269*2A',
'payload': '!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17',
'quality': None,
'rcvr': 'b003669952',
'rel_time': None,
'sentence_num': None,
'sentence_tot': None,
'tag_checksum': '2A',
'text': None,
'text_date': None,
'time': 1428884269}],
'times': [1428884269]})
def testUscgLines(self):
queue = nmea_queue.NmeaQueue()
lines = [line for line in USCG.split('\n') if ',' in line]
for line in lines:
queue.put(line)
self.assertEqual(queue.qsize(), 4)
msgs = []
while not queue.empty():
msgs.append(queue.get())
for msg in msgs:
self.assertIn('decoded', msg)
ids = [msg['decoded']['id'] for msg in msgs]
self.assertEqual(ids, [1, 5, 3, 27])
self.assertEqual(
msgs[3],
{
'decoded': {
'cog': 131,
'gnss': True,
'id': 27,
'md5': '50898a3435865cf76f1b502b2821672b',
'mmsi': 577305000,
'nav_status': 5,
'position_accuracy': 1,
'raim': False,
'repeat_indicator': 0,
'sog': 0,
'spare': 0,
'x': -90.20666666666666,
'y': 29.145},
'line_nums': [5],
'line_type': 'USCG',
'lines': ['!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17,rMySat,1429287258'],
'matches': [{
'body': 'K8VSqb9LdU28WP8<',
'chan': 'B',
'checksum': '17',
'counter': None,
'fill_bits': 0,
'hour': None,
'minute': None,
'payload': '!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17',
'receiver_time': None,
'rssi': None,
'second': None,
'sen_num': 1,
'sen_tot': 1,
'seq_id': None,
'signal_strength': None,
'slot': None,
'station': 'rMySat',
'station_type': 'r',
'talker': 'SA',
'time': 1429287258,
'time_of_arrival': None,
'uscg_metadata': ',rMySat,1429287258',
'vdm': '!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17',
'vdm_type': 'VDM'}]})
def testMixedLines(self):
queue = nmea_queue.NmeaQueue()
lines = [line for line in MIXED.split('\n') if line.strip()]
for line in lines:
queue.put(line)
self.assertEqual(queue.qsize(), 4)
msgs = []
while not queue.empty():
msgs.append(queue.get())
for msg in msgs[:-1]:
self.assertIn('decoded', msg)
ids = [msg['decoded']['id'] for msg in msgs[:-1]]
self.assertEqual(ids, [1, 4, 27])
line_types = [msg['line_type'] for msg in msgs]
self.assertEqual(
line_types,
[nmea.USCG, nmea.BARE, nmea.TAGB, nmea.TEXT])
@pytest.mark.parametrize("nmea", [
six.text_type(BARE_NMEA.strip()),
six.text_type(TAG_BLOCK.strip()),
six.text_type(USCG.strip()),
six.text_type(MIXED.strip())
])
def test_NmeaFile_against_queue(nmea):
queue = nmea_queue.NmeaQueue()
for line in nmea.splitlines():
queue.put(line)
expected = []
msg = queue.GetOrNone()
while msg:
expected.append(msg)
msg = queue.GetOrNone()
with contextlib.closing(StringIO(nmea)) as f, ais.open(f) as src:
actual = list(src)
for e, a in zip(expected, actual):
assert e == a
if __name__ == '__main__':
unittest.main()
|
neo/test/iotest/test_elphyio.py | Mario-Kart-Felix/python-neo | 199 | 12765528 | """
Tests of neo.io.elphyo
"""
import unittest
from neo.io import ElphyIO
from neo.test.iotest.common_io_test import BaseTestIO
class TestElphyIO(BaseTestIO, unittest.TestCase):
ioclass = ElphyIO
entities_to_download = [
'elphy'
]
entities_to_test = ['elphy/DATA1.DAT',
'elphy/ElphyExample.DAT',
'elphy/ElphyExample_Mode1.dat',
'elphy/ElphyExample_Mode2.dat',
'elphy/ElphyExample_Mode3.dat']
def test_read_data(self):
for filename in self.entities_to_test:
io = ElphyIO(self.get_local_path(filename))
bl = io.read_block()
self.assertTrue(len(bl.segments) > 0)
# ensure that at least one data object is generated for each file
self.assertTrue(any(list(bl.segments[0].size.values())))
if __name__ == "__main__":
unittest.main()
|
tests/apps/packages/test_xmlrpc.py | tranarthur/localshop | 162 | 12765574 | import xmlrpc.client as xmlrpclib
import pytest
from tests.factories import ReleaseFactory
@pytest.fixture(params=['/RPC2', '/pypi'])
def rpc_endpoint(request):
return request.param
@pytest.mark.django_db
def test_search_package_name(client, admin_user, live_server, repository,
rpc_endpoint):
ReleaseFactory(
package__name='my-package', package__repository=repository,
summary='Test summary')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'name': 'my-package'})
assert response == [{
'_pypi_ordering': 0,
'name': 'my-package',
'summary': 'Test summary',
'version': '1.0.0'}]
@pytest.mark.django_db
def test_search_package_summary(client, admin_user, live_server, repository,
rpc_endpoint):
ReleaseFactory(
package__name='my-package', package__repository=repository,
summary='Test summary')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'summary': ['Test summary']})
assert response == [{
'_pypi_ordering': 0,
'name': 'my-package',
'summary': 'Test summary',
'version': '1.0.0'}]
@pytest.mark.django_db
def test_search_operator_and(client, admin_user, live_server, repository,
rpc_endpoint):
ReleaseFactory(package__name='my-package-1',
package__repository=repository,
summary='Test summary')
ReleaseFactory(package__name='arcoiro',
package__repository=repository,
summary='Test summary')
ReleaseFactory(package__name='my-package-2',
package__repository=repository,
summary='arcoiro')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'name': ['my-package'],
'summary': ['Test summary']}, 'and')
assert response == [{
'_pypi_ordering': 0,
'name': 'my-package-1',
'summary': 'Test summary',
'version': '1.0.0'}]
@pytest.mark.django_db
def test_search_operator_or(client, admin_user, live_server, repository,
rpc_endpoint):
ReleaseFactory(package__name='my-package-1',
package__repository=repository,
summary='Test summary')
ReleaseFactory(package__name='arcoiro',
package__repository=repository,
summary='Test summary')
ReleaseFactory(package__name='my-package-2',
package__repository=repository,
summary='arcoiro')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'name': ['my-package'],
'summary': ['Test summary']}, 'or')
assert response == [{
'_pypi_ordering': 0,
'name': 'arcoiro',
'summary': 'Test summary',
'version': '1.0.0'
},
{
'_pypi_ordering': 0,
'name': 'my-package-1',
'summary': 'Test summary',
'version': '1.0.0'
},
{
'_pypi_ordering': 0,
'name': 'my-package-2',
'summary': 'arcoiro',
'version': '1.0.0'
}]
@pytest.mark.django_db
def test_search_invalid_fields_are_ignores(client, admin_user, live_server,
repository, rpc_endpoint):
ReleaseFactory(package__name='my-package',
package__repository=repository,
summary='Test summary')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'name': ['my-package'], 'invalid': ['Ops']})
assert response == [{
'_pypi_ordering': 0,
'name': 'my-package',
'summary': 'Test summary',
'version': '1.0.0'}]
|
dmbrl/config/reacher.py | nikkik11/handful-of-trials | 358 | 12765583 | <reponame>nikkik11/handful-of-trials<gh_stars>100-1000
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
from dotmap import DotMap
import gym
from dmbrl.misc.DotmapUtils import get_required_argument
from dmbrl.modeling.layers import FC
import dmbrl.env
class ReacherConfigModule:
ENV_NAME = "MBRLReacher3D-v0"
TASK_HORIZON = 150
NTRAIN_ITERS = 100
NROLLOUTS_PER_ITER = 1
PLAN_HOR = 25
MODEL_IN, MODEL_OUT = 24, 17
GP_NINDUCING_POINTS = 200
def __init__(self):
self.ENV = gym.make(self.ENV_NAME)
self.ENV.reset()
cfg = tf.ConfigProto()
cfg.gpu_options.allow_growth = True
self.SESS = tf.Session(config=cfg)
self.NN_TRAIN_CFG = {"epochs": 5}
self.OPT_CFG = {
"Random": {
"popsize": 2000
},
"CEM": {
"popsize": 400,
"num_elites": 40,
"max_iters": 5,
"alpha": 0.1
}
}
self.UPDATE_FNS = [self.update_goal]
self.goal = tf.Variable(self.ENV.goal, dtype=tf.float32)
self.SESS.run(self.goal.initializer)
@staticmethod
def obs_postproc(obs, pred):
return obs + pred
@staticmethod
def targ_proc(obs, next_obs):
return next_obs - obs
def update_goal(self, sess=None):
if sess is not None:
self.goal.load(self.ENV.goal, sess)
def obs_cost_fn(self, obs):
if isinstance(obs, np.ndarray):
return np.sum(np.square(ReacherConfigModule.get_ee_pos(obs, are_tensors=False) - self.ENV.goal), axis=1)
else:
return tf.reduce_sum(tf.square(ReacherConfigModule.get_ee_pos(obs, are_tensors=True) - self.goal), axis=1)
@staticmethod
def ac_cost_fn(acs):
if isinstance(acs, np.ndarray):
return 0.01 * np.sum(np.square(acs), axis=1)
else:
return 0.01 * tf.reduce_sum(tf.square(acs), axis=1)
def nn_constructor(self, model_init_cfg):
model = get_required_argument(model_init_cfg, "model_class", "Must provide model class")(DotMap(
name="model", num_networks=get_required_argument(model_init_cfg, "num_nets", "Must provide ensemble size"),
sess=self.SESS, load_model=model_init_cfg.get("load_model", False),
model_dir=model_init_cfg.get("model_dir", None)
))
if not model_init_cfg.get("load_model", False):
model.add(FC(200, input_dim=self.MODEL_IN, activation="swish", weight_decay=0.00025))
model.add(FC(200, activation="swish", weight_decay=0.0005))
model.add(FC(200, activation="swish", weight_decay=0.0005))
model.add(FC(200, activation="swish", weight_decay=0.0005))
model.add(FC(self.MODEL_OUT, weight_decay=0.00075))
model.finalize(tf.train.AdamOptimizer, {"learning_rate": 0.00075})
return model
def gp_constructor(self, model_init_cfg):
model = get_required_argument(model_init_cfg, "model_class", "Must provide model class")(DotMap(
name="model",
kernel_class=get_required_argument(model_init_cfg, "kernel_class", "Must provide kernel class"),
kernel_args=model_init_cfg.get("kernel_args", {}),
num_inducing_points=get_required_argument(
model_init_cfg, "num_inducing_points", "Must provide number of inducing points."
),
sess=self.SESS
))
return model
@staticmethod
def get_ee_pos(states, are_tensors=False):
theta1, theta2, theta3, theta4, theta5, theta6, theta7 = \
states[:, :1], states[:, 1:2], states[:, 2:3], states[:, 3:4], states[:, 4:5], states[:, 5:6], states[:, 6:]
if are_tensors:
rot_axis = tf.concat([tf.cos(theta2) * tf.cos(theta1), tf.cos(theta2) * tf.sin(theta1), -tf.sin(theta2)],
axis=1)
rot_perp_axis = tf.concat([-tf.sin(theta1), tf.cos(theta1), tf.zeros(tf.shape(theta1))], axis=1)
cur_end = tf.concat([
0.1 * tf.cos(theta1) + 0.4 * tf.cos(theta1) * tf.cos(theta2),
0.1 * tf.sin(theta1) + 0.4 * tf.sin(theta1) * tf.cos(theta2) - 0.188,
-0.4 * tf.sin(theta2)
], axis=1)
for length, hinge, roll in [(0.321, theta4, theta3), (0.16828, theta6, theta5)]:
perp_all_axis = tf.cross(rot_axis, rot_perp_axis)
x = tf.cos(hinge) * rot_axis
y = tf.sin(hinge) * tf.sin(roll) * rot_perp_axis
z = -tf.sin(hinge) * tf.cos(roll) * perp_all_axis
new_rot_axis = x + y + z
new_rot_perp_axis = tf.cross(new_rot_axis, rot_axis)
new_rot_perp_axis = tf.where(tf.less(tf.norm(new_rot_perp_axis, axis=1), 1e-30),
rot_perp_axis, new_rot_perp_axis)
new_rot_perp_axis /= tf.norm(new_rot_perp_axis, axis=1, keepdims=True)
rot_axis, rot_perp_axis, cur_end = new_rot_axis, new_rot_perp_axis, cur_end + length * new_rot_axis
else:
rot_axis = np.concatenate([np.cos(theta2) * np.cos(theta1), np.cos(theta2) * np.sin(theta1), -np.sin(theta2)],
axis=1)
rot_perp_axis = np.concatenate([-np.sin(theta1), np.cos(theta1), np.zeros(theta1.shape)], axis=1)
cur_end = np.concatenate([
0.1 * np.cos(theta1) + 0.4 * np.cos(theta1) * np.cos(theta2),
0.1 * np.sin(theta1) + 0.4 * np.sin(theta1) * np.cos(theta2) - 0.188,
-0.4 * np.sin(theta2)
], axis=1)
for length, hinge, roll in [(0.321, theta4, theta3), (0.16828, theta6, theta5)]:
perp_all_axis = np.cross(rot_axis, rot_perp_axis)
x = np.cos(hinge) * rot_axis
y = np.sin(hinge) * np.sin(roll) * rot_perp_axis
z = -np.sin(hinge) * np.cos(roll) * perp_all_axis
new_rot_axis = x + y + z
new_rot_perp_axis = np.cross(new_rot_axis, rot_axis)
new_rot_perp_axis[np.linalg.norm(new_rot_perp_axis, axis=1) < 1e-30] = \
rot_perp_axis[np.linalg.norm(new_rot_perp_axis, axis=1) < 1e-30]
new_rot_perp_axis /= np.linalg.norm(new_rot_perp_axis, axis=1, keepdims=True)
rot_axis, rot_perp_axis, cur_end = new_rot_axis, new_rot_perp_axis, cur_end + length * new_rot_axis
return cur_end
CONFIG_MODULE = ReacherConfigModule
|
src/captcha/tests/urls.py | daniel-werner/stelagifts | 108 | 12765673 | <reponame>daniel-werner/stelagifts
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'test/$','captcha.tests.views.test',name='captcha-test'),
url(r'test2/$','captcha.tests.views.test_custom_error_message',name='captcha-test-custom-error-message'),
url(r'test3/$','captcha.tests.views.test_per_form_format', name='test_per_form_format'),
url(r'',include('captcha.urls')),
)
|
dataset/lfw.py | Vicent-xd/Residual_autoencoding | 432 | 12765685 | <filename>dataset/lfw.py
import os
import numpy as np
import joblib
from skimage import transform
import deeppy as dp
from .augment import (img_augment, sample_img_augment_params, AugmentedFeed,
SupervisedAugmentedFeed)
from .util import img_transform
cachedir = os.getenv('CACHE_HOME', './cache')
mem = joblib.Memory(cachedir=os.path.join(cachedir, 'lfw'))
@mem.cache
def lfw_imgs(alignment):
if alignment == 'landmarks':
dataset = dp.dataset.LFW('original')
imgs = dataset.imgs
landmarks = dataset.landmarks('68')
n_landmarks = 68
landmarks_mean = np.mean(landmarks, axis=0)
landmarks_mean = np.array([landmarks_mean[:n_landmarks],
landmarks_mean[n_landmarks:]])
aligned_imgs = []
for img, points in zip(imgs, landmarks):
points = np.array([points[:n_landmarks], points[n_landmarks:]])
transf = transform.estimate_transform('similarity',
landmarks_mean.T, points.T)
img = img / 255.
img = transform.warp(img, transf, order=3)
img = np.round(img*255).astype(np.uint8)
aligned_imgs.append(img)
imgs = np.array(aligned_imgs)
else:
dataset = dp.dataset.LFW(alignment)
imgs = dataset.imgs
return imgs
def lfw_imgs_split(alignment, split_name, with_attributes=True, test_fold=0):
imgs = lfw_imgs(alignment)
dataset = dp.dataset.LFW()
if split_name == 'testtrain':
all_persons = list(dataset.index.keys())
test_persons = dataset.people_splits['test'][test_fold]
persons = [p for p in all_persons if p not in test_persons]
if split_name == 'valtrain':
test_persons = dataset.people_splits['train']
elif split_name == 'val':
persons = dataset.people_splits[split_name]
elif split_name == 'test':
persons = dataset.people_splits[split_name][test_fold]
if not with_attributes:
new_imgs = []
for person_id in persons:
for img_idx in dataset.index[person_id]:
new_imgs.append(imgs[img_idx])
imgs = np.array(new_imgs)
return imgs
# Extract attributes vectors and discard images without attributes
new_imgs = []
attrs = []
for person_id in persons:
if person_id in dataset.attributes:
for img_no in range(1, len(dataset.index[person_id])+1):
if img_no in dataset.attributes[person_id]:
new_imgs.append(imgs[dataset.index[person_id][img_no-1]])
attrs.append(dataset.attributes[person_id][img_no])
imgs = np.array(new_imgs)
attrs = np.array(attrs).astype(dp.float_)
return imgs, attrs
def _resize(args):
img, crop_size, rescale_size = args
crop = (img.shape[0] - crop_size) // 2
img = img[crop:-crop, crop:-crop]
img = transform.resize(img, (rescale_size, rescale_size, 3), order=3)
img = (img*255).astype(np.uint8)
return img
def _resize_augment(args):
img, crop_size, rescale_size = args
augment_params = sample_img_augment_params(
translation_sigma=1.0, scale_sigma=0.01, rotation_sigma=0.01,
gamma_sigma=0.07, contrast_sigma=0.07, hue_sigma=0.0125
)
img = img_augment(img, *augment_params)
img = _resize((img, crop_size, rescale_size))
return img
@mem.cache
def resize_imgs(imgs, crop_size, rescale_size, n_augment=0):
if n_augment == 0:
preprocess_fun = _resize
n_imgs = len(imgs)
else:
preprocess_fun = _resize_augment
n_imgs = n_augment
def img_iter():
for i in range(n_imgs):
yield imgs[i % len(imgs)]
with joblib.Parallel(n_jobs=-2) as parallel:
imgs = parallel(joblib.delayed(preprocess_fun)
((img, crop_size, rescale_size)) for img in img_iter())
imgs = np.array(imgs)
return imgs
@mem.cache
def feeds(alignment, crop_size, rescale_size, batch_size, epoch_size,
n_augment=int(1e5), with_attributes=False, split='val'):
if split == 'val':
train_split = 'valtrain'
test_split = 'val'
elif split == 'test':
train_split = 'testtrain'
test_split = 'test'
x_train, y_train = lfw_imgs_split(alignment, train_split)
# Shuffle training images
idxs = np.random.permutation(len(x_train))
x_train = x_train[idxs]
y_train = y_train[idxs]
if n_augment > 0:
y_train = y_train[np.arange(n_augment) % len(x_train)]
x_train = resize_imgs(x_train, crop_size, rescale_size, n_augment)
x_train = np.transpose(x_train, (0, 3, 1, 2))
x_test, y_test = lfw_imgs_split(alignment, test_split)
x_test = resize_imgs(x_test, crop_size, rescale_size)
x_test = img_transform(x_test, to_bc01=True)
if with_attributes:
train_feed = SupervisedAugmentedFeed(
x_train, y_train, batch_size=batch_size, epoch_size=epoch_size
)
test_feed = dp.SupervisedFeed(
x_test, y_test, batch_size=batch_size
)
else:
train_feed = AugmentedFeed(x_train, batch_size, epoch_size)
test_feed = dp.Feed(x_test, batch_size)
return train_feed, test_feed
|
2017/quals/2017-re-food/generate_flag.py | tonghuaroot/google-ctf | 2,757 | 12765703 | #!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
def KSA(key):
keylength = len(key)
S = range(256)
j = 0
for i in range(256):
j = (j + S[i] + key[i % keylength]) % 256
S[i], S[j] = S[j], S[i] # swap
return S
def PRGA(S):
i = 0
j = 0
while True:
i = (i + 1) % 256
j = (j + S[i]) % 256
S[i], S[j] = S[j], S[i] # swap
K = S[(S[i] + S[j]) % 256]
yield K
def RC4(key):
S = KSA(key)
return PRGA(S)
def sig(v):
if v & 0x80:
return -0x100 + v
return v
flag = 'CTF{bacon_lettuce_tomato_lobster_soul}'
key = [random.choice(range(20)) for x in range(8)]
print 'key is', key
ks = RC4(key)
print 'flag is', [sig(ord(x) ^ y) for (x, y) in zip(flag, ks)]
xor = [random.choice(range(20)) for x in range(8)]
print 'xor 1', xor
print 'xor 2', [x ^ y for (x, y) in zip(key, xor)]
|
resource_emulation.py | sogeti-esec-lab/LKD | 102 | 12765707 | import ctypes
import itertools
import windows
import windows.hooks
from windows.generated_def.winstructs import *
class Ressource(object):
def __init__(self, filename, lpName, lpType):
self.filename = filename
self.lpName = lpName
self.lpType = lpType
self.driver_data = None
self.loaded_ressource = None
def match(self, hModule, lpName, lpType):
x = not hModule and self.lpName == lpName and self.lpType == lpType
return x
def get_driver_data(self):
if self.driver_data is not None:
return self.driver_data
self.driver_data = open(self.filename, 'rb').read()
return self.driver_data
def load_resource(self):
driver_data = self.get_driver_data()
char_p = ctypes.c_char_p(driver_data)
real_addr = ctypes.cast(char_p, ctypes.c_void_p).value
return real_addr
def resource_len(self):
return len(self.get_driver_data())
resource_list = []
HRSRC_dict = {}
HRSRC_attibution = itertools.count(0x42424242)
@windows.hooks.Callback(PVOID, PVOID, PVOID, PVOID)
def FindResourceWHook(hModule, lpName, lpType, real_function):
for res in resource_list:
if res.match(hModule, lpName, lpType):
HRSRC = next(HRSRC_attibution)
HRSRC_dict[HRSRC] = res
return HRSRC
return real_function()
@windows.hooks.SizeofResourceCallback
def SizeofResourceHook(hModule, hResInfo, real_function):
if hResInfo in HRSRC_dict:
return HRSRC_dict[hResInfo].resource_len()
return real_function()
@windows.hooks.LoadResourceCallback
def LoadResourceHook(hModule, hResInfo, real_function):
if hResInfo in HRSRC_dict:
return HRSRC_dict[hResInfo].load_resource()
return real_function()
@windows.hooks.LockResourceCallback
def LockResourceHook(hResData, real_function):
x = real_function()
return x
|
tests/api/serializers.py | TralahM/drf-generators | 340 | 12765723 | <reponame>TralahM/drf-generators
from rest_framework.serializers import ModelSerializer
from api.models import Category, Post
class CategorySerializer(ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class PostSerializer(ModelSerializer):
class Meta:
model = Post
fields = '__all__'
|
bibliopixel/layout/matrix.py | rec/leds | 253 | 12765730 | import math, threading, time
from .. import colors
from .. util import deprecated, log
from . import matrix_drawing as md
from . import font
from . layout import MultiLayout
from . geometry import make_matrix_coord_map_multi
from . geometry.matrix import (
make_matrix_coord_map, make_matrix_coord_map_positions)
ROTATION_WARNING = """
Matrix.rotation must be a multiple of 90 degrees but was in fact %s degress.
It was rounded to %s degrees."""
class Matrix(MultiLayout):
CLONE_ATTRS = MultiLayout.CLONE_ATTRS + (
'width', 'height', 'rotation', 'vert_flip', 'y_flip', 'serpentine',
'pixelSize')
def __init__(self, drivers, width=0, height=0,
rotation=0, vert_flip=False, y_flip=False,
serpentine=True,
threadedUpdate=False, brightness=255,
pixelSize=(1, 1), **kwargs):
"""Main class for matricies.
driver -- instance that inherits from DriverBase
width -- X axis size of matrix
height -- Y axis size of matrix
coord_map -- a 2D matrix defining the X,Y to strip index mapping.
Not needed in most cases
rotation -- how to rotate when generating the map.
Not used if coord_map specified
vert_flip - flips the generated map along the Y axis.
This along with rotation can achieve any orientation
"""
self.gen_multi = make_matrix_coord_map_multi
super().__init__(drivers, threadedUpdate, brightness, **kwargs)
rot_mod = rotation % 360
self.rotation = 90 * round(rot_mod / 90)
if self.rotation != rot_mod:
log.warning(ROTATION_WARNING, rotation, self.rotation)
self.width = width or getattr(self.drivers[0], 'width') or 0
self.height = height or getattr(self.drivers[0], 'height') or 0
self.vert_flip = vert_flip
self.y_flip = y_flip
self.serpentine = serpentine
self.pixelSize = pixelSize
pw, ph = self.pixelSize
# If both are 0, try to assume it's a square display.
if not (self.width or self.height):
square = int(math.sqrt(self.numLEDs))
if (square * square) == self.numLEDs:
self.width = self.height = square
else:
raise TypeError('No width or height passed but '
'the number of LEDs is not a perfect square')
if self.width * self.height > self.numLEDs:
raise ValueError(
'width * height cannot exceed total pixel count! %s * %s > %s'
% (self.width, self.height, self.numLEDs))
if not self.coord_map:
if len(self.drivers) == 1:
# TODO: this should really go into documentation
log.debug(
'Auto generating coordinate map. Use make_matrix_coord_map '
'directly if more control needed.')
# was switched to y_flip, but need to keep vert_flip available
y_flip = y_flip or vert_flip
self.coord_map = make_matrix_coord_map(
self.width, self.height,
serpentine=serpentine,
rotation=rotation,
y_flip=vert_flip)
elif self.drivers:
raise TypeError(
'Must provide coord_map if using multiple drivers!')
self.set_pixel_positions(
make_matrix_coord_map_positions(self.coord_map))
# If rotation is 90 or 270 degrees, dimensions need to be swapped so
# they match the matrix rotation.
if rotation in (90, 270):
w = self.width
h = self.height
self.width = h
self.height = w
self.texture = None
self.set = self._setColor
if pw < 0 or pw > self.width or ph < 0 or ph > self.height:
raise ValueError(
'pixelSize must be greater than 0 '
'and not larger than total matrix')
if self.width % pw != 0 or self.height % ph != 0:
raise ValueError(
'pixelSize must evenly divide into matrix dimensions!')
if pw == 1 and ph == 1:
self._set = self.__setNormal
else:
self._set = self.__setScaled
self.width = self.width / pw
self.height = self.height / ph
self.numLEDs = self.width * self.height
self.fonts = font.fonts
@property
def shape(self):
"""Returns ``width, height``"""
return self.width, self.height
def get(self, x, y):
"""
Return the pixel color at position (x, y), or Colors.black if that
position is out-of-bounds.
"""
try:
pixel = self.coord_map[y][x]
return self._get_base(pixel)
except IndexError:
return colors.COLORS.Black
def set(self, x, y, color):
"""Set the pixel color at position x, y."""
# The actual implementation of this method is computed at construction
# time and monkey-patched in from one of self._setTexture,
# self.__setNormal or self.__setScaled
raise NotImplementedError
def get_pixel_positions(self):
return make_matrix_coord_map_positions(self.coord_map)
def loadFont(self, name, height, width, data):
self.fonts[name] = {
'data': data,
'height': height,
'width': width
}
def setTexture(self, tex=None):
if tex is None:
self.texture = tex
self.set = self._setColor
return
if not isinstance(tex, list):
raise ValueError('Texture must be a list!')
if len(tex) != self.height:
raise ValueError(
'Given texture is must be {} high!'.format(self.height))
for r in tex:
if not isinstance(r, list):
raise ValueError('Texture rows must be lists!')
if len(r) != self.width:
raise ValueError(
'Texture rows must be {} wide!'.format(self.width))
self.texture = tex
self.set = self._setTexture
def __setNormal(self, x, y, color):
try:
pixel = self.coord_map[y][x]
self._set_base(pixel, color)
except IndexError:
pass
def __setScaled(self, x, y, color):
sx = x * self.pixelSize[0]
sy = y * self.pixelSize[1]
for xs in range(sx, sx + self.pixelSize[0]):
for ys in range(sy, sy + self.pixelSize[1]):
self.__setNormal(xs, ys, color)
# Set single pixel to Color value
def _setColor(self, x, y, color=None):
try:
self._set(x, y, color or (0, 0, 0))
except IndexError:
pass
def _setTexture(self, x, y, color=None):
if x >= 0 and y >= 0:
try:
self._set(x, y, color or self.texture[y][x])
except IndexError:
pass
def setHSV(self, x, y, hsv):
color = colors.hsv2rgb(hsv)
self._set(x, y, color)
def setRGB(self, x, y, r, g, b):
color = (r, g, b)
self._set(x, y, color)
##########################################################################
# Drawing Functions
# Lovingly borrowed from Adafruit
# https://github.com/adafruit/Adafruit-GFX-Library/blob/master/Adafruit_GFX.cpp
##########################################################################
def drawCircle(self, x0, y0, r, color=None):
"""
Draw a circle in an RGB color, with center x0, y0 and radius r.
"""
md.draw_circle(self.set, x0, y0, r, color)
def fillCircle(self, x0, y0, r, color=None):
"""
Draw a filled circle in an RGB color, with center x0, y0 and radius r.
"""
md.fill_circle(self.set, x0, y0, r, color)
def drawLine(self, x0, y0, x1, y1, color=None, colorFunc=None, aa=False):
"""
Draw a between x0, y0 and x1, y1 in an RGB color.
:param colorFunc: a function that takes an integer from x0 to x1 and
returns a color corresponding to that point
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.draw_line(self.set, x0, y0, x1, y1, color, colorFunc, aa)
# Bresenham's algorithm
def bresenham_line(self, x0, y0, x1, y1, color=None, colorFunc=None):
"""
Draw line from point x0, y0 to x1, y1 using Bresenham's algorithm.
Will draw beyond matrix bounds.
"""
md.bresenham_line(self.set, x0, y0, x1, y1, color, colorFunc)
# Xiaolin Wu's Line Algorithm
def wu_line(self, x0, y0, x1, y1, color=None, colorFunc=None):
"""
Draw a between x0, y0 and x1, y1 in an RGB color.
:param colorFunc: a function that takes an integer from x0 to x1 and
returns a color corresponding to that point
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.wu_line(self.set, x0, y0, x1, y1, color, colorFunc)
def drawRect(self, x, y, w, h, color=None, aa=False):
"""
Draw rectangle with top-left corner at x,y, width w and height h
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.draw_rect(self.set, x, y, w, h, color, aa)
def fillRect(self, x, y, w, h, color=None, aa=False):
"""
Draw a solid rectangle with top-left corner at (x, y), width w and
height h.
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.fill_rect(self.set, x, y, w, h, color, aa)
def fillScreen(self, color=None):
"""Fill the matrix with the given RGB color"""
md.fill_rect(self.set, 0, 0, self.width, self.height, color)
def drawRoundRect(self, x, y, w, h, r, color=None, aa=False):
"""
Draw a rounded rectangle with top-left corner at (x, y), width w,
height h, and corner radius r
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.draw_round_rect(self.set, x, y, w, h, r, color, aa)
def fillRoundRect(self, x, y, w, h, r, color=None, aa=False):
"""
Draw a rounded rectangle with top-left corner at (x, y), width w,
height h, and corner radius r
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.fill_round_rect(self.set, x, y, w, h, r, color, aa)
def drawTriangle(self, x0, y0, x1, y1, x2, y2, color=None, aa=False):
"""
Draw triangle with vertices (x0, y0), (x1, y1) and (x2, y2)
:param aa: if True, use Bresenham's algorithm for line drawing;
Otherwise use Xiaolin Wu's algorithm
"""
md.draw_triangle(self.set, x0, y0, x1, y1, x2, y2, color, aa)
def fillTriangle(self, x0, y0, x1, y1, x2, y2, color=None, aa=False):
"""
Draw filled triangle with points x0,y0 - x1,y1 - x2,y2
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.fill_triangle(self.set, x0, y0, x1, y1, x2, y2, color, aa)
if deprecated.allowed(): # pragma: no cover
fillTrangle = fillTriangle
def drawChar(self, x, y, c, color, bg,
aa=False, font=font.default_font, font_scale=1):
"""
Draw a single character c at at (x, y) in an RGB color.
"""
md.draw_char(self.fonts, self.set, self.width, self.height,
x, y, c, color, bg, aa, font, font_scale)
def drawText(self, text, x=0, y=0, color=None,
bg=colors.COLORS.Off, aa=False, font=font.default_font,
font_scale=1):
"""
Draw a line of text starting at (x, y) in an RGB color.
:param colorFunc: a function that takes an integer from x0 to x1 and
returns a color corresponding to that point
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.draw_text(self.fonts, self.set, text, self.width, self.height,
x, y, color, bg, aa, font, font_scale)
if deprecated.allowed(): # pragma: no cover
LEDMatrix = Matrix
|
RecoHI/HiTracking/python/HIPixelVertices_cff.py | ckamtsikis/cmssw | 852 | 12765746 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
# pixel cluster vertex finder
from RecoHI.HiTracking.HIPixelClusterVertex_cfi import *
# pixel track producer
from RecoHI.HiTracking.HIPixel3ProtoTracks_cfi import *
# fast vertex finding
from RecoHI.HiTracking.HIPixelMedianVertex_cfi import *
# selected pixel tracks
from RecoHI.HiTracking.HISelectedProtoTracks_cfi import *
# accurate vertex finding
from RecoHI.HiTracking.HIPixelAdaptiveVertex_cfi import *
# selection of best primary vertex
from RecoHI.HiTracking.HIBestVertexSequences_cff import *
hiPixelVerticesTask = cms.Task(hiPixelClusterVertex
, PixelLayerTriplets
, hiPixel3ProtoTracksTask
, hiPixelMedianVertex
, hiSelectedProtoTracks
, hiPixelAdaptiveVertex
, bestHiVertexTask )
hiPixelVertices = cms.Sequence(hiPixelVerticesTask)
|
zentral/contrib/inventory/migrations/0023_puppetdb.py | arubdesu/zentral | 634 | 12765749 | <reponame>arubdesu/zentral<filename>zentral/contrib/inventory/migrations/0023_puppetdb.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-15 13:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('inventory', '0022_auto_20170530_0724'),
]
operations = [
migrations.CreateModel(
name='PuppetCertificateExtension',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mt_hash', models.CharField(max_length=40, unique=True)),
('mt_created_at', models.DateTimeField(auto_now_add=True)),
('extension_key', models.TextField()),
('extension_value', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PuppetDBInventory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mt_hash', models.CharField(max_length=40, unique=True)),
('mt_created_at', models.DateTimeField(auto_now_add=True)),
('certname_trusted', models.TextField()),
('authenticated', models.TextField()),
('aio_agent_version', models.TextField(blank=True, null=True)),
('environment', models.TextField(blank=True, null=True)),
('timestamp', models.DateTimeField()),
('agent_specified_environment', models.TextField(blank=True, null=True)),
('clientversion', models.TextField(blank=True, null=True)),
('extensions', models.ManyToManyField(to='inventory.PuppetCertificateExtension')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PuppetFact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mt_hash', models.CharField(max_length=40, unique=True)),
('mt_created_at', models.DateTimeField(auto_now_add=True)),
('fact_key', models.TextField()),
('fact_key_display_name', models.TextField()),
('fact_value', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='puppetdbinventory',
name='facts',
field=models.ManyToManyField(to='inventory.PuppetFact'),
),
migrations.AddField(
model_name='machinesnapshot',
name='puppetdb_inventory',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='inventory.PuppetDBInventory'),
),
]
|
tests/fixtures/config_teamocil/test1.py | rfoliva/tmuxp | 1,607 | 12765753 | <reponame>rfoliva/tmuxp<filename>tests/fixtures/config_teamocil/test1.py
from .._util import loadfixture
teamocil_yaml = loadfixture('config_teamocil/test1.yaml')
teamocil_conf = {
'windows': [
{
'name': 'sample-two-panes',
'root': '~/Code/sample/www',
'layout': 'even-horizontal',
'panes': [{'cmd': ['pwd', 'ls -la']}, {'cmd': 'rails server --port 3000'}],
}
]
}
expected = {
'session_name': None,
'windows': [
{
'window_name': 'sample-two-panes',
'layout': 'even-horizontal',
'start_directory': '~/Code/sample/www',
'panes': [
{'shell_command': ['pwd', 'ls -la']},
{'shell_command': 'rails server --port 3000'},
],
}
],
}
|
pyrep/objects/vision_sensor.py | WeiWeic6222848/PyRep | 505 | 12765797 | import math
from typing import List, Union, Sequence
from pyrep.backend import sim
from pyrep.objects.object import Object, object_type_to_class
import numpy as np
from pyrep.const import ObjectType, PerspectiveMode, RenderMode
class VisionSensor(Object):
"""A camera-type sensor, reacting to light, colors and images.
"""
def __init__(self, name_or_handle: Union[str, int]):
super().__init__(name_or_handle)
self.resolution = sim.simGetVisionSensorResolution(self._handle)
@staticmethod
def create(resolution: List[int], explicit_handling=False,
perspective_mode=True, show_volume_not_detecting=True,
show_volume_detecting=True, passive=False,
use_local_lights=False, show_fog=True,
near_clipping_plane=1e-2, far_clipping_plane=10.0,
view_angle=60.0, ortho_size=1.0, sensor_size=None,
render_mode=RenderMode.OPENGL3,
position=None, orientation=None) -> 'VisionSensor':
""" Create a Vision Sensor
:param resolution: List of the [x, y] resolution.
:param explicit_handling: Sensor will be explicitly handled.
:param perspective_mode: Sensor will be operated in Perspective Mode.
Orthographic mode if False.
:param show_volume_not_detecting: Sensor volume will be shown when not
detecting anything.
:param show_volume_detecting: Sensor will be shown when detecting.
:param passive: Sensor will be passive (use an external image).
:param use_local_lights: Sensor will use local lights.
:param show_fog: Sensor will show fog (if enabled).
:param near_clipping_plane: Near clipping plane.
:param far_clipping_plane: Far clipping plane.
:param view_angle: Perspective angle (in degrees) if in Perspective Mode.
:param ortho_size: Orthographic projection size [m] if in Orthographic
Mode.
:param sensor_size: Size [x, y, z] of the Vision Sensor object.
:param render_mode: Sensor rendering mode, one of:
RenderMode.OPENGL
RenderMode.OPENGL_AUXILIARY
RenderMode.OPENGL_COLOR_CODED
RenderMode.POV_RAY
RenderMode.EXTERNAL
RenderMode.EXTERNAL_WINDOWED
RenderMode.OPENGL3
RenderMode.OPENGL3_WINDOWED
:param position: The [x, y, z] position, if specified.
:param orientation: The [x, y, z] orientation in radians, if specified.
:return: The created Vision Sensor.
"""
options = 0
if explicit_handling:
options |= 1
if perspective_mode:
options |= 2
if not show_volume_not_detecting:
options |= 4
if not show_volume_detecting:
options |= 8
if passive:
options |= 16
if use_local_lights:
options |= 32
if not show_fog:
options |= 64
int_params = [
resolution[0], # 0
resolution[1], # 1
0, # 2
0 # 3
]
if sensor_size is None:
sensor_size = [0.01, 0.01, 0.03]
float_params = [
near_clipping_plane, # 0
far_clipping_plane, # 1
math.radians(view_angle) if perspective_mode else ortho_size, # 2
sensor_size[0], # 3
sensor_size[1], # 4
sensor_size[2], # 5
0.0, # 6
0.0, # 7
0.0, # 8
0.0, # 9
0.0, # 10
]
vs = VisionSensor(
sim.simCreateVisionSensor(options, int_params, float_params, None)
)
vs.set_render_mode(render_mode)
if position is not None:
vs.set_position(position)
if orientation is not None:
vs.set_orientation(orientation)
return vs
def _get_requested_type(self) -> ObjectType:
return ObjectType.VISION_SENSOR
def handle_explicitly(self) -> None:
"""Handle sensor explicitly.
This enables capturing image (e.g., capture_rgb())
without PyRep.step().
"""
if not self.get_explicit_handling():
raise RuntimeError('The explicit_handling is disabled. '
'Call set_explicit_handling(value=1) to enable explicit_handling first.')
sim.simHandleVisionSensor(self._handle)
def capture_rgb(self) -> np.ndarray:
"""Retrieves the rgb-image of a vision sensor.
:return: A numpy array of size (width, height, 3)
"""
return sim.simGetVisionSensorImage(self._handle, self.resolution)
def capture_depth(self, in_meters=False) -> np.ndarray:
"""Retrieves the depth-image of a vision sensor.
:param in_meters: Whether the depth should be returned in meters.
:return: A numpy array of size (width, height)
"""
return sim.simGetVisionSensorDepthBuffer(
self._handle, self.resolution, in_meters)
def capture_pointcloud(self) -> np.ndarray:
"""Retrieves point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""
d = self.capture_depth(in_meters=True)
return self.pointcloud_from_depth(d)
def pointcloud_from_depth(self, depth: np.ndarray) -> np.ndarray:
"""Converts depth (in meters) to point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""
intrinsics = self.get_intrinsic_matrix()
return VisionSensor.pointcloud_from_depth_and_camera_params(
depth, self.get_matrix(), intrinsics)
@staticmethod
def pointcloud_from_depth_and_camera_params(
depth: np.ndarray, extrinsics: np.ndarray,
intrinsics: np.ndarray) -> np.ndarray:
"""Converts depth (in meters) to point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""
upc = _create_uniform_pixel_coords_image(depth.shape)
pc = upc * np.expand_dims(depth, -1)
C = np.expand_dims(extrinsics[:3, 3], 0).T
R = extrinsics[:3, :3]
R_inv = R.T # inverse of rot matrix is transpose
R_inv_C = np.matmul(R_inv, C)
extrinsics = np.concatenate((R_inv, -R_inv_C), -1)
cam_proj_mat = np.matmul(intrinsics, extrinsics)
cam_proj_mat_homo = np.concatenate(
[cam_proj_mat, [np.array([0, 0, 0, 1])]])
cam_proj_mat_inv = np.linalg.inv(cam_proj_mat_homo)[0:3]
world_coords_homo = np.expand_dims(_pixel_to_world_coords(
pc, cam_proj_mat_inv), 0)
world_coords = world_coords_homo[..., :-1][0]
return world_coords
def get_intrinsic_matrix(self):
res = np.array(self.get_resolution())
pp_offsets = res / 2
ratio = res[0] / res[1]
pa_x = pa_y = math.radians(self.get_perspective_angle())
if ratio > 1:
pa_y = 2 * np.arctan(np.tan(pa_y / 2) / ratio)
elif ratio < 1:
pa_x = 2 * np.arctan(np.tan(pa_x / 2) * ratio)
persp_angles = np.array([pa_x, pa_y])
focal_lengths = -res / (2 * np.tan(persp_angles / 2))
return np.array(
[[focal_lengths[0], 0., pp_offsets[0]],
[0., focal_lengths[1], pp_offsets[1]],
[0., 0., 1.]])
def get_resolution(self) -> List[int]:
""" Return the Sensor's resolution.
:return: Resolution [x, y]
"""
return sim.simGetVisionSensorResolution(self._handle)
def set_resolution(self, resolution: List[int]) -> None:
""" Set the Sensor's resolution.
:param resolution: New resolution [x, y]
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_resolution_x, resolution[0]
)
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_resolution_y, resolution[1]
)
self.resolution = resolution
def get_perspective_mode(self) -> PerspectiveMode:
""" Retrieve the Sensor's perspective mode.
:return: The current PerspectiveMode.
"""
perspective_mode = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_perspective_operation,
)
return PerspectiveMode(perspective_mode)
def set_perspective_mode(self, perspective_mode: PerspectiveMode) -> None:
""" Set the Sensor's perspective mode.
:param perspective_mode: The new perspective mode, one of:
PerspectiveMode.ORTHOGRAPHIC
PerspectiveMode.PERSPECTIVE
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_perspective_operation,
perspective_mode.value
)
def get_render_mode(self) -> RenderMode:
""" Retrieves the Sensor's rendering mode
:return: RenderMode for the current rendering mode.
"""
render_mode = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_render_mode
)
return RenderMode(render_mode)
def set_render_mode(self, render_mode: RenderMode) -> None:
""" Set the Sensor's rendering mode
:param render_mode: The new sensor rendering mode, one of:
RenderMode.OPENGL
RenderMode.OPENGL_AUXILIARY
RenderMode.OPENGL_COLOR_CODED
RenderMode.POV_RAY
RenderMode.EXTERNAL
RenderMode.EXTERNAL_WINDOWED
RenderMode.OPENGL3
RenderMode.OPENGL3_WINDOWED
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_render_mode,
render_mode.value
)
def get_windowed_size(self) -> Sequence[int]:
"""Get the size of windowed rendering.
:return: The (x, y) resolution of the window. 0 for full-screen.
"""
size_x = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_x)
size_y = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_y)
return size_x, size_y
def set_windowed_size(self, resolution: Sequence[int] = (0, 0)) -> None:
"""Set the size of windowed rendering.
:param resolution: The (x, y) resolution of the window.
0 for full-screen.
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_x,
resolution[0])
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_y,
resolution[1])
def get_perspective_angle(self) -> float:
""" Get the Sensor's perspective angle.
:return: The sensor's perspective angle (in degrees).
"""
return math.degrees(sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_perspective_angle
))
def set_perspective_angle(self, angle: float) -> None:
""" Set the Sensor's perspective angle.
:param angle: New perspective angle (in degrees)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_perspective_angle,
math.radians(angle)
)
def get_orthographic_size(self) -> float:
""" Get the Sensor's orthographic size.
:return: The sensor's orthographic size (in metres).
"""
return sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_ortho_size
)
def set_orthographic_size(self, ortho_size: float) -> None:
""" Set the Sensor's orthographic size.
:param angle: New orthographic size (in metres)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_ortho_size, ortho_size
)
def get_near_clipping_plane(self) -> float:
""" Get the Sensor's near clipping plane.
:return: Near clipping plane (metres)
"""
return sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_near_clipping
)
def set_near_clipping_plane(self, near_clipping: float) -> None:
""" Set the Sensor's near clipping plane.
:param near_clipping: New near clipping plane (in metres)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_near_clipping, near_clipping
)
def get_far_clipping_plane(self) -> float:
""" Get the Sensor's far clipping plane.
:return: Near clipping plane (metres)
"""
return sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_far_clipping
)
def set_far_clipping_plane(self, far_clipping: float) -> None:
""" Set the Sensor's far clipping plane.
:param far_clipping: New far clipping plane (in metres)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_far_clipping, far_clipping
)
def set_entity_to_render(self, entity_to_render: int) -> None:
""" Set the entity to render to the Sensor, this can be an object or more usefully a collection.
-1 to render all objects in scene.
:param entity_to_render: Handle of the entity to render
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_entity_to_render, entity_to_render
)
def get_entity_to_render(self) -> None:
""" Get the entity to render to the Sensor, this can be an object or more usefully a collection.
-1 if all objects in scene are rendered.
:return: Handle of the entity to render
"""
return sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_entity_to_render
)
def _create_uniform_pixel_coords_image(resolution: np.ndarray):
pixel_x_coords = np.reshape(
np.tile(np.arange(resolution[1]), [resolution[0]]),
(resolution[0], resolution[1], 1)).astype(np.float32)
pixel_y_coords = np.reshape(
np.tile(np.arange(resolution[0]), [resolution[1]]),
(resolution[1], resolution[0], 1)).astype(np.float32)
pixel_y_coords = np.transpose(pixel_y_coords, (1, 0, 2))
uniform_pixel_coords = np.concatenate(
(pixel_x_coords, pixel_y_coords, np.ones_like(pixel_x_coords)), -1)
return uniform_pixel_coords
def _transform(coords, trans):
h, w = coords.shape[:2]
coords = np.reshape(coords, (h * w, -1))
coords = np.transpose(coords, (1, 0))
transformed_coords_vector = np.matmul(trans, coords)
transformed_coords_vector = np.transpose(
transformed_coords_vector, (1, 0))
return np.reshape(transformed_coords_vector,
(h, w, -1))
def _pixel_to_world_coords(pixel_coords, cam_proj_mat_inv):
h, w = pixel_coords.shape[:2]
pixel_coords = np.concatenate(
[pixel_coords, np.ones((h, w, 1))], -1)
world_coords = _transform(pixel_coords, cam_proj_mat_inv)
world_coords_homo = np.concatenate(
[world_coords, np.ones((h, w, 1))], axis=-1)
return world_coords_homo
object_type_to_class[ObjectType.VISION_SENSOR] = VisionSensor
|
utils/mahalanobis.py | gautard/pystatsml | 123 | 12765803 | <reponame>gautard/pystatsml<filename>utils/mahalanobis.py
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 4 16:09:56 2016
@author: <EMAIL>
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
#%matplotlib inline
'''
Mahalanobis distance
====================
'''
from matplotlib.patches import Ellipse
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
n_samples, n_features = 100, 2
mean0, mean1 = np.array([0, 0]), np.array([0, 2])
Cov = np.array([[1, .8],[.8, 1]])
np.random.seed(42)
X0 = np.random.multivariate_normal(mean0, Cov, n_samples)
X1 = np.random.multivariate_normal(mean1, Cov, n_samples)
x = np.array([2, 2])
plt.scatter(X0[:, 0], X0[:, 1], color='b')
plt.scatter(X1[:, 0], X1[:, 1], color='r')
plt.scatter(mean0[0], mean0[1], color='b', s=200, label="m0")
plt.scatter(mean1[0], mean1[1], color='r', s=200, label="m2")
plt.scatter(x[0], x[1], color='k', s=200, label="x")
plot_cov_ellipse(Cov, pos=mean0, facecolor='none', linewidth=2, edgecolor='b')
plot_cov_ellipse(Cov, pos=mean1, facecolor='none', linewidth=2, edgecolor='r')
plt.legend(loc='upper left')
#
d2_m0x = scipy.spatial.distance.euclidean(mean0, x)
d2_m0m2 = scipy.spatial.distance.euclidean(mean0, mean1)
Covi = scipy.linalg.inv(Cov)
dm_m0x = scipy.spatial.distance.mahalanobis(mean0, x, Covi)
dm_m0m2 = scipy.spatial.distance.mahalanobis(mean0, mean1, Covi)
print('Euclidean dist(m0, x)=%.2f > dist(m0, m2)=%.2f' % (d2_m0x, d2_m0m2))
print('Mahalanobis dist(m0, x)=%.2f < dist(m0, m2)=%.2f' % (dm_m0x, dm_m0m2))
'''
## Exercise
- Write a function `euclidean(a, b)` that compute the euclidean distance
- Write a function `mahalanobis(a, b, Covi)` that compute the euclidean
distance, with the inverse of the covariance matrix. Use `scipy.linalg.inv(Cov)`
to invert your matrix.
'''
def euclidian(a, b):
return np.sqrt(np.sum((a - b) ** 2))
def mahalanobis(a, b, cov_inv):
return np.sqrt(np.dot(np.dot((a - b), cov_inv), (a - b).T))
assert mahalanobis(mean0, mean1, Covi) == dm_m0m2
assert euclidian(mean0, mean1) == d2_m0m2
mahalanobis(X0, mean0, Covi)
X = X0
mean = mean0
covi= Covi
np.sqrt(np.dot(np.dot((X - mean), covi), (X - mean).T))
def mahalanobis(X, mean, covi):
"""
from scipy.spatial.distance import mahalanobis
d2= np.array([mahalanobis(X[i], mean, covi) for i in range(X.shape[0])])
np.all(mahalanobis(X, mean, covi) == d2)
"""
return np.sqrt(np.sum(np.dot((X - mean), covi) * (X - mean), axis=1))
|
src/python/twitter/checkstyle/plugins/trailing_whitespace.py | zhouyijiaren/commons | 1,143 | 12765807 | # ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from collections import defaultdict
import tokenize
import sys
from ..common import CheckstylePlugin
class TrailingWhitespace(CheckstylePlugin):
"""Warn on invalid trailing whitespace."""
@classmethod
def build_exception_map(cls, tokens):
"""Generates a set of ranges where we accept trailing slashes, specifically within comments
and strings.
"""
exception_ranges = defaultdict(list)
for token in tokens:
token_type, _, token_start, token_end = token[0:4]
if token_type in (tokenize.COMMENT, tokenize.STRING):
if token_start[0] == token_end[0]:
exception_ranges[token_start[0]].append((token_start[1], token_end[1]))
else:
exception_ranges[token_start[0]].append((token_start[1], sys.maxint))
for line in range(token_start[0] + 1, token_end[0]):
exception_ranges[line].append((0, sys.maxint))
exception_ranges[token_end[0]].append((0, token_end[1]))
return exception_ranges
def __init__(self, *args, **kw):
super(TrailingWhitespace, self).__init__(*args, **kw)
self._exception_map = self.build_exception_map(self.python_file.tokens)
def has_exception(self, line_number, exception_start, exception_end=None):
exception_end = exception_end or exception_start
for start, end in self._exception_map.get(line_number, ()):
if start <= exception_start and exception_end <= end:
return True
return False
def nits(self):
for line_number, line in self.python_file.enumerate():
stripped_line = line.rstrip()
if stripped_line != line and not self.has_exception(line_number,
len(stripped_line), len(line)):
yield self.error('T200', 'Line has trailing whitespace.', line_number)
if line.rstrip().endswith('\\'):
if not self.has_exception(line_number, len(line.rstrip()) - 1):
yield self.error('T201', 'Line has trailing slashes.', line_number)
|
see_rnn/utils.py | MichaelHopwood/MLMassSpectrom | 149 | 12765820 | <filename>see_rnn/utils.py
import numpy as np
from copy import deepcopy
from pathlib import Path
from ._backend import WARN, NOTE, TF_KERAS, Layer
try:
import tensorflow as tf
except:
pass # handled in __init__ via _backend.py
TF24plus = bool(float(tf.__version__[:3]) > 2.3)
def _kw_from_configs(configs, defaults):
def _fill_absent_defaults(kw, defaults):
# override `defaults`, but keep those not in `configs`
for name, _dict in defaults.items():
if name not in kw:
kw[name] = _dict
else:
for k, v in _dict.items():
if k not in kw[name]:
kw[name][k] = v
return kw
configs = configs or {}
configs = deepcopy(configs) # ensure external dict unchanged
for key in configs:
if key not in defaults:
raise ValueError(f"unexpected `configs` key: {key}; "
"supported are: %s" % ', '.join(list(defaults)))
kw = deepcopy(configs) # ensure external dict unchanged
# override `defaults`, but keep those not in `configs`
kw = _fill_absent_defaults(configs, defaults)
return kw
def _validate_args(_id, layer=None):
def _ensure_list(_id, layer):
# if None, leave as-is
_ids, layer = [[x] if not isinstance(x, (list, type(None))) else x
for x in (_id, layer)]
# ensure external lists unaffected
_ids, layer = [x.copy() if isinstance(x, list) else x
for x in (_ids, layer)]
return _ids, layer
def _ids_to_names_and_idxs(_ids):
names, idxs = [], []
for _id in _ids:
if not isinstance(_id, (str, int, tuple)):
tp = type(_id).__name__
raise ValueError("unsupported _id list element type: %s" % tp
+ "; supported are: str, int, tuple")
if isinstance(_id, str):
names.append(_id)
else:
if isinstance(_id, int):
idxs.append(_id)
else:
assert all(isinstance(x, int) for x in _id)
idxs.append(_id)
return names or None, idxs or None
def _one_requested(_ids, layer):
return len(layer or _ids) == 1 # give `layer` precedence
if _id and layer:
print(WARN, "`layer` will override `_id`")
_ids, layer = _ensure_list(_id, layer)
if _ids is None:
names, idxs = None, None
else:
names, idxs = _ids_to_names_and_idxs(_ids)
return names, idxs, layer, _one_requested(_ids, layer)
def _process_rnn_args(model, _id, layer, input_data, labels, mode,
data=None, norm=None):
"""Helper method to validate `input_data` & `labels` dims, layer info args,
`mode` arg, and fetch various pertinent RNN attributes.
"""
from .inspect_gen import get_layer, get_gradients
from .inspect_rnn import get_rnn_weights
def _validate_args_(_id, layer, input_data, labels, mode, norm, data):
_validate_args(_id, layer)
if data is not None:
got_inputs = (input_data is not None) or (labels is not None)
if got_inputs:
print(NOTE, "`data` will override `input_data`, `labels`, "
"and `mode`")
if not isinstance(data, list):
raise Exception("`data` must be a list of kernel & gate matrices")
if not (isinstance(data[0], np.ndarray) or isinstance(data[0], list)):
raise Exception("`data` list elements must be numpy arrays "
+ "or lists")
elif isinstance(data[0], list):
if not isinstance(data[0][0], np.ndarray):
raise Exception("`data` list elements' elements must be "
+ "numpy arrays")
if mode not in ['weights', 'grads']:
raise Exception("`mode` must be one of: 'weights', 'grads'")
if mode == 'grads' and (input_data is None or labels is None):
raise Exception("must supply input_data and labels for mode=='grads'")
if mode == 'weights' and (input_data is not None or labels is not None):
print(NOTE, "`input_data` and `labels will` be ignored for "
"`mode`=='weights'")
is_iter = (isinstance(norm, list) or isinstance(norm, tuple) or
isinstance(norm, np.ndarray))
is_iter_len2 = is_iter and len(norm)==2
if (norm is not None) and (norm != 'auto') and not is_iter_len2:
raise Exception("`norm` must be None, 'auto' or iterable ( "
+ "list, tuple, np.ndarray) of length 2")
_validate_args_(_id, layer, input_data, labels, mode, norm, data)
if layer is None:
layer = get_layer(model, _id)
rnn_type = _validate_rnn_type(layer, return_value=True)
gate_names = _rnn_gate_names(rnn_type)
n_gates = len(gate_names)
is_bidir = hasattr(layer, 'backward_layer')
rnn_dim = layer.layer.units if is_bidir else layer.units
direction_names = ['FORWARD', 'BACKWARD'] if is_bidir else [[]]
if 'CuDNN' in rnn_type:
uses_bias = True
else:
uses_bias = layer.layer.use_bias if is_bidir else layer.use_bias
if data is None:
if mode=='weights':
data = get_rnn_weights(model, _id, as_tensors=False,
concat_gates=True)
else:
data = get_gradients(model, None, input_data, labels,
layer=layer, mode='weights')
rnn_info = dict(rnn_type=rnn_type, gate_names=gate_names,
n_gates=n_gates, is_bidir=is_bidir,
rnn_dim=rnn_dim, uses_bias=uses_bias,
direction_names=direction_names)
return data, rnn_info
def _validate_rnn_type(rnn_layer, return_value=False):
if hasattr(rnn_layer, 'backward_layer'):
rnn_type = type(rnn_layer.layer).__name__
else:
rnn_type = type(rnn_layer).__name__
supported_rnns = ['LSTM', 'GRU', 'CuDNNLSTM', 'CuDNNGRU',
'SimpleRNN', 'IndRNN']
if rnn_type not in supported_rnns:
raise Exception("unsupported RNN type `%s` - must be one of: %s" % (
rnn_type, ', '.join(supported_rnns)))
if return_value:
return rnn_type
def _rnn_gate_names(rnn_type):
return {'LSTM': ['INPUT', 'FORGET', 'CELL', 'OUTPUT'],
'GRU': ['UPDATE', 'RESET', 'NEW'],
'CuDNNLSTM': ['INPUT', 'FORGET', 'CELL', 'OUTPUT'],
'CuDNNGRU': ['UPDATE', 'RESET', 'NEW'],
'SimpleRNN': [''],
'IndRNN': [''],
}[rnn_type]
def _filter_duplicates_by_keys(keys, *data):
def _second_index(ls, k):
return [i for i, x in enumerate(ls) if x == k][1]
collected = []
for k in keys:
if k in collected:
for i in range(len(data)):
data[i].pop(_second_index(keys, k))
keys.pop(keys.index(k))
collected.append(k)
if isinstance(data, tuple) and len(data) == 1:
data = data[0]
return keys, data
def _save_rnn_fig(figs, savepath, kwargs):
if len(figs) == 1:
figs[0].savefig(savepath)
return
_dir = str(Path(savepath).parent)
ext = Path(savepath).suffix
basename = Path(savepath).stem
names = [basename + '_0', basename + '_1']
for fig, name in zip(figs, names):
fig.savefig(Path(_dir).joinpath(name, ext), **kwargs)
def _layer_of_output(output):
h = output._keras_history
if isinstance(h, tuple):
for x in h:
if isinstance(x, Layer):
return x
return h.layer
def clipnums(nums):
if not isinstance(nums, (list, tuple)):
nums = [nums]
clipped = []
for num in nums:
if isinstance(num, int) or (isinstance(num, float) and num.is_integer()):
clipped.append(str(int(num)))
elif abs(num) > 1e-3 and abs(num) < 1e3:
clipped.append("%.3f" % num)
else:
clipped.append(("%.2e" % num).replace("+0", "+").replace("-0", "-"))
return clipped if len(clipped) > 1 else clipped[0]
def _get_params(model, layers=None, params=None, mode='outputs', verbose=1):
def _validate_args(layers, params, mode):
got_both = (layers is not None and params is not None)
got_neither = (layers is None and params is None)
if got_both or got_neither:
raise ValueError("one (and only one) of `layers` or `params` "
"must be supplied")
if mode not in ('outputs', 'weights'):
raise ValueError("`mode` must be one of: 'outputs', 'weights'")
if layers is not None and not isinstance(layers, list):
layers = [layers]
if params is not None and not isinstance(params, list):
params = [params]
return layers, params
def _filter_params(params, verbose):
def _to_omit(p):
if isinstance(p, tf.Variable): # param is layer weight
return False
elif tf.is_tensor(p): # param is layer output
layer = _layer_of_output(p)
if (TF_KERAS or tf.__version__[0] == '2'
) and hasattr(layer, 'activation'):
# these activations don't have gradients defined (or ==0),
# and tf.keras doesn't re-route output gradients
# to the pre-activation weights transform
value = getattr(layer.activation, '__name__', '').lower() in (
'softmax',)
if value and verbose:
print(WARN, ("{} has {} activation, which has a None "
"gradient in tf.keras; will skip".format(
layer, layer.activation.__name__)))
return value
elif 'Input' in getattr(layer.__class__, '__name__'):
# omit input layer(s)
if verbose:
print(WARN, layer, "is an Input layer; getting input "
"gradients is unsupported - will skip")
return True
else:
return False
else:
raise ValueError(("unsupported param type: {} ({}); must be"
"tf.Variable or tf.Tensor".format(type(p), p)))
_params = []
for p in params:
if not _to_omit(p):
_params.append(p)
return _params
# run check even if `params` is not None to couple `_get_params` with
# `_validate_args` for other methods
layers, params = _validate_args(layers, params, mode)
if not params:
if mode == 'outputs':
params = [l.output for l in layers]
else:
params = [w for l in layers for w in l.trainable_weights]
params = _filter_params(params, verbose)
return params
def is_tensor(x):
return (tf.is_tensor(x) if TF24plus else
isinstance(x, tf.Tensor))
|
packages/python/setup.py | ufora/ufora | 571 | 12765846 | <filename>packages/python/setup.py<gh_stars>100-1000
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from distutils.core import Extension
import glob
import numpy
import os
import re
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
NEWS = open(os.path.join(here, 'NEWS.txt')).read()
def read_package_version():
version_file = 'pyfora/_version.py'
with open(version_file, 'rt') as version_file:
version_line = version_file.read()
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_line, re.M)
if match:
return match.group(1)
raise RuntimeError("Can't read version string from '%s'." % (version_file,))
version = read_package_version()
install_requires = ['futures', 'socketIO-client>=0.6.5', 'numpy', 'wsaccel','websocket-client==0.37.0']
ext_modules = []
extra_compile_args=['-std=c++11']
pythonObjectRehydratorModule = Extension('pyfora.PythonObjectRehydrator',
language='c++',
extra_compile_args=extra_compile_args,
sources=['pyfora/src/pythonObjectRehydratorModule.cpp',
'pyfora/src/BinaryObjectRegistry.cpp',
'pyfora/src/StringBuilder.cpp',
'pyfora/src/PureImplementationMappings.cpp',
'pyfora/src/PyObjectUtils.cpp',
'pyfora/src/ObjectRegistry.cpp',
'pyfora/src/IRToPythonConverter.cpp',
'pyfora/src/NamedSingletons.cpp',
'pyfora/src/BinaryObjectRegistryHelpers.cpp',
'pyfora/src/FreeVariableMemberAccessChain.cpp',
'pyfora/src/Json.cpp',
'pyfora/src/PyAbortSingletons.cpp',
'pyfora/src/ModuleLevelObjectIndex.cpp',
'pyfora/src/ScopedPyThreads.cpp',
'pyfora/src/PythonObjectRehydrator.cpp'] +
glob.glob('pyfora/src/TypeDescriptions/*.cpp') +
glob.glob('pyfora/src/serialization/*.cpp'),
include_dirs=[numpy.get_include()]
)
ext_modules.append(pythonObjectRehydratorModule)
stringbuildermodule = Extension('pyfora.StringBuilder',
language='c++',
extra_compile_args=['-std=c++11'],
sources=['pyfora/src/StringBuilder.cpp',
'pyfora/src/stringbuildermodule.cpp']
)
ext_modules.append(stringbuildermodule)
binaryObjectRegistryModule = Extension('pyfora.BinaryObjectRegistry',
language='c++',
extra_compile_args=extra_compile_args,
sources=['pyfora/src/BinaryObjectRegistry.cpp',
'pyfora/src/PyObjectWalker.cpp',
'pyfora/src/PureImplementationMappings.cpp',
'pyfora/src/binaryobjectregistrymodule.cpp',
'pyfora/src/StringBuilder.cpp',
'pyfora/src/FileDescription.cpp',
'pyfora/src/PyObjectUtils.cpp',
'pyfora/src/Exceptions.cpp',
'pyfora/src/PyAstUtil.cpp',
'pyfora/src/FreeVariableMemberAccessChain.cpp',
'pyfora/src/PyAstFreeVariableAnalyses.cpp',
'pyfora/src/PyforaInspect.cpp',
'pyfora/src/FreeVariableResolver.cpp',
'pyfora/src/Ast.cpp',
'pyfora/src/UnresolvedFreeVariableExceptions.cpp',
'pyfora/src/BinaryObjectRegistryHelpers.cpp',
'pyfora/src/Json.cpp',
'pyfora/src/ModuleLevelObjectIndex.cpp']
)
ext_modules.append(binaryObjectRegistryModule)
pyObjectWalkerModule = Extension('pyfora.PyObjectWalker',
language='c++',
extra_compile_args=extra_compile_args,
sources=['pyfora/src/pyobjectwalkermodule.cpp',
'pyfora/src/PyObjectWalker.cpp',
'pyfora/src/PureImplementationMappings.cpp',
'pyfora/src/BinaryObjectRegistry.cpp',
'pyfora/src/FileDescription.cpp',
'pyfora/src/StringBuilder.cpp',
'pyfora/src/PyObjectUtils.cpp',
'pyfora/src/FreeVariableResolver.cpp',
'pyfora/src/Exceptions.cpp',
'pyfora/src/PyAstUtil.cpp',
'pyfora/src/FreeVariableMemberAccessChain.cpp',
'pyfora/src/PyAstFreeVariableAnalyses.cpp',
'pyfora/src/PyforaInspect.cpp',
'pyfora/src/Ast.cpp',
'pyfora/src/UnresolvedFreeVariableExceptions.cpp',
'pyfora/src/BinaryObjectRegistryHelpers.cpp',
'pyfora/src/Json.cpp',
'pyfora/src/ModuleLevelObjectIndex.cpp']
)
ext_modules.append(pyObjectWalkerModule)
setup(
name='pyfora',
version=version,
description="A library for parallel execution of Python code in the Ufora runtime",
long_description=README + '\n\n' + NEWS,
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering'
],
keywords='ufora fora parallel remote data-science machine-learning',
author='<NAME>.',
author_email='<EMAIL>',
url='http://www.ufora.com/',
license='Apache',
packages=find_packages('.'),
package_dir={'': '.'},
package_data={
'': ['*.txt', '*.rst'],
'pyfora': ['fora/**/*.fora']
},
zip_safe=False,
install_requires=install_requires,
entry_points={
'console_scripts':
['pyfora_aws=pyfora.aws.pyfora_aws:main']
},
ext_modules=ext_modules
)
|
generate_result.py | SeitaroShinagawa/chainer-partial_convolution_image_inpainting | 116 | 12765852 | <gh_stars>100-1000
#!/usr/bin/env python
import argparse
import os
import chainer
from chainer import training
from chainer import cuda, serializers
from chainer.training import extension
from chainer.training import extensions
import sys
import common.net as net
import datasets
from updater import *
from evaluation import *
from chainer.links import VGG16Layers
import common.paths as paths
def main():
parser = argparse.ArgumentParser(
description='Train Completion Network')
parser.add_argument('--batch_size', '-b', type=int, default=8)
parser.add_argument('--gpu', '-g', type=int, default=0,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--eval_folder', '-e', default='generated_results',
help='Directory to output the evaluation result')
parser.add_argument("--load_model", help='completion model path')
parser.add_argument("--resize_to", type=int, default=256, help='resize the image to')
parser.add_argument("--crop_to", type=int, default=256, help='crop the resized image to')
parser.add_argument("--load_dataset", default='place2_test', help='load dataset')
#parser.add_argument("--layer_n", type=int, default=7, help='number of layers')
args = parser.parse_args()
print(args)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
#load completion model
model = getattr(net, "PartialConvCompletion")(ch0=3,input_size=args.crop_to)
#load vgg_model
print("loading vgg16 ...")
vgg = VGG16Layers()
print("ok")
if args.load_model != '':
serializers.load_npz(args.load_model, model)
print("Completion model loaded")
if not os.path.exists(args.eval_folder):
os.makedirs(args.eval_folder)
# select GPU
if args.gpu >= 0:
model.to_gpu()
vgg.to_gpu()
print("use gpu {}".format(args.gpu))
val_dataset = getattr(datasets, args.load_dataset)(paths.val_place2, mask_path="mask/256", resize_to=args.resize_to, crop_to=args.crop_to)
val_iter = chainer.iterators.SerialIterator(
val_dataset, args.batch_size)
#test_dataset = horse2zebra_Dataset_train(flip=args.flip, resize_to=args.resize_to, crop_to=args.crop_to)
#test_iter = chainer.iterators.SerialIterator(train_dataset, 8)
#generate results
xp = model.xp
batch = val_iter.next()
batchsize = len(batch)
image_size = args.crop_to
x = xp.zeros((batchsize, 3, image_size, image_size)).astype("f")
m = xp.zeros((batchsize, 3, image_size, image_size)).astype("f")
for i in range(batchsize):
x[i, :] = xp.asarray(batch[i][0])
m[i, :] = xp.asarray(batch[i][1])
mask_b = xp.array(m.astype("bool"))
I_gt = Variable(x)
M = Variable(m)
M_b = Variable(mask_b)
I_out = model(x, m)
I_comp = F.where(M_b,I_gt,I_out)
img = x.get()
img = batch_postprocess_images(img, batchsize, 1)
Image.fromarray(img).save(args.eval_folder+"/generated_3_Igt.jpg")
img = I_comp.data.get()
img = batch_postprocess_images(img, batchsize, 1)
Image.fromarray(img).save(args.eval_folder+"/generated_2_Icomp.jpg")
img = I_out.data.get()
img = batch_postprocess_images(img, batchsize, 1)
Image.fromarray(img).save(args.eval_folder+"/generated_1_Iout.jpg")
img = M.data.get()
img = batch_postprocess_images(img, batchsize, 1)
Image.fromarray(img).save(args.eval_folder+"/generated_0_mask.jpg")
if __name__ == '__main__':
main()
|
examples/issues/issue529_obj.py | tgolsson/appJar | 666 | 12765855 | <reponame>tgolsson/appJar
import sys
sys.path.append("../../")
from numpy import sin, pi, arange
from appJar import gui
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg as addToolbar
import random
from mpl_toolkits.mplot3d import Axes3D
with gui() as app:
fig = app.addPlotFig("p1", showNav=True)
ax = fig.add_subplot(111, projection='3d')
ax.scatter([1,2],[1,2],[1,2])
|
alipay/aop/api/domain/StageGroupInfoVO.py | antopen/alipay-sdk-python-all | 213 | 12765950 | <filename>alipay/aop/api/domain/StageGroupInfoVO.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.StageCateInfoVO import StageCateInfoVO
class StageGroupInfoVO(object):
def __init__(self):
self._group_name = None
self._stage_cate_infos = None
@property
def group_name(self):
return self._group_name
@group_name.setter
def group_name(self, value):
self._group_name = value
@property
def stage_cate_infos(self):
return self._stage_cate_infos
@stage_cate_infos.setter
def stage_cate_infos(self, value):
if isinstance(value, list):
self._stage_cate_infos = list()
for i in value:
if isinstance(i, StageCateInfoVO):
self._stage_cate_infos.append(i)
else:
self._stage_cate_infos.append(StageCateInfoVO.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.group_name:
if hasattr(self.group_name, 'to_alipay_dict'):
params['group_name'] = self.group_name.to_alipay_dict()
else:
params['group_name'] = self.group_name
if self.stage_cate_infos:
if isinstance(self.stage_cate_infos, list):
for i in range(0, len(self.stage_cate_infos)):
element = self.stage_cate_infos[i]
if hasattr(element, 'to_alipay_dict'):
self.stage_cate_infos[i] = element.to_alipay_dict()
if hasattr(self.stage_cate_infos, 'to_alipay_dict'):
params['stage_cate_infos'] = self.stage_cate_infos.to_alipay_dict()
else:
params['stage_cate_infos'] = self.stage_cate_infos
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = StageGroupInfoVO()
if 'group_name' in d:
o.group_name = d['group_name']
if 'stage_cate_infos' in d:
o.stage_cate_infos = d['stage_cate_infos']
return o
|
L1Trigger/L1THGCal/python/customTriggerCellSelect.py | Purva-Chaudhari/cmssw | 852 | 12765969 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
import SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi as digiparam
from L1Trigger.L1THGCal.hgcalConcentratorProducer_cfi import threshold_conc_proc, best_conc_proc, supertc_conc_proc, coarsetc_onebitfraction_proc, coarsetc_equalshare_proc, bestchoice_ndata_decentralized, custom_conc_proc, autoEncoder_conc_proc
def custom_triggercellselect_supertriggercell(process,
stcSize=supertc_conc_proc.stcSize,
type_energy_division=supertc_conc_proc.type_energy_division,
fixedDataSizePerHGCROC=supertc_conc_proc.fixedDataSizePerHGCROC
):
parameters = supertc_conc_proc.clone(stcSize = stcSize,
type_energy_division = type_energy_division,
fixedDataSizePerHGCROC = fixedDataSizePerHGCROC
)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
def custom_triggercellselect_threshold(process,
threshold_silicon=threshold_conc_proc.threshold_silicon, # in mipT
threshold_scintillator=threshold_conc_proc.threshold_scintillator, # in mipT
coarsenTriggerCells=threshold_conc_proc.coarsenTriggerCells
):
parameters = threshold_conc_proc.clone(
threshold_silicon = threshold_silicon,
threshold_scintillator = threshold_scintillator,
coarsenTriggerCells = coarsenTriggerCells
)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
def custom_triggercellselect_bestchoice(process,
triggercells=best_conc_proc.NData
):
parameters = best_conc_proc.clone(NData = triggercells)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
def custom_triggercellselect_bestchoice_decentralized(process):
return custom_triggercellselect_bestchoice(process, triggercells=bestchoice_ndata_decentralized)
def custom_coarsetc_onebitfraction(process,
stcSize=coarsetc_onebitfraction_proc.stcSize,
fixedDataSizePerHGCROC=coarsetc_onebitfraction_proc.fixedDataSizePerHGCROC,
oneBitFractionThreshold = coarsetc_onebitfraction_proc.oneBitFractionThreshold,
oneBitFractionLowValue = coarsetc_onebitfraction_proc.oneBitFractionLowValue,
oneBitFractionHighValue = coarsetc_onebitfraction_proc.oneBitFractionHighValue,
):
parameters = coarsetc_onebitfraction_proc.clone(
stcSize = stcSize,
fixedDataSizePerHGCROC = fixedDataSizePerHGCROC,
oneBitFractionThreshold = oneBitFractionThreshold,
oneBitFractionLowValue = oneBitFractionLowValue,
oneBitFractionHighValue = oneBitFractionHighValue,
)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
def custom_coarsetc_equalshare(process,
stcSize=coarsetc_equalshare_proc.stcSize,
fixedDataSizePerHGCROC=coarsetc_equalshare_proc.fixedDataSizePerHGCROC,
):
parameters = coarsetc_equalshare_proc.clone(
stcSize = stcSize,
fixedDataSizePerHGCROC = fixedDataSizePerHGCROC,
)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
def custom_triggercellselect_mixedBestChoiceSuperTriggerCell(process,
stcSize=custom_conc_proc.stcSize,
type_energy_division=custom_conc_proc.type_energy_division,
fixedDataSizePerHGCROC=custom_conc_proc.fixedDataSizePerHGCROC,
triggercells=custom_conc_proc.NData
):
parameters = custom_conc_proc.clone(stcSize = stcSize,
type_energy_division = type_energy_division,
fixedDataSizePerHGCROC = fixedDataSizePerHGCROC,
NData=triggercells,
Method = cms.vstring('bestChoiceSelect','superTriggerCellSelect','superTriggerCellSelect'),
)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
def custom_triggercellselect_mixedBestChoiceSuperTriggerCell_decentralized(process):
return custom_triggercellselect_mixedBestChoiceSuperTriggerCell(process, triggercells=bestchoice_ndata_decentralized)
def custom_triggercellselect_autoencoder(process,
cellRemap = autoEncoder_conc_proc.cellRemap,
nBitsPerInput = autoEncoder_conc_proc.nBitsPerInput,
maxBitsPerOutput = autoEncoder_conc_proc.maxBitsPerOutput,
bitsPerLink = autoEncoder_conc_proc.bitsPerLink,
modelFiles = autoEncoder_conc_proc.modelFiles,
linkToGraphMap = autoEncoder_conc_proc.linkToGraphMap,
zeroSuppresionThreshold = autoEncoder_conc_proc.zeroSuppresionThreshold,
saveEncodedValues = autoEncoder_conc_proc.saveEncodedValues,
preserveModuleSum = autoEncoder_conc_proc.preserveModuleSum,
scintillatorMethod = 'thresholdSelect',
):
parameters = autoEncoder_conc_proc.clone(
cellRemap = cellRemap,
nBitsPerInput = nBitsPerInput,
maxBitsPerOutput = maxBitsPerOutput,
bitsPerLink = bitsPerLink,
modelFiles = modelFiles,
linkToGraphMap = linkToGraphMap,
zeroSuppresionThreshold = zeroSuppresionThreshold,
saveEncodedValues = saveEncodedValues,
preserveModuleSum = preserveModuleSum,
Method = cms.vstring(['autoEncoder','autoEncoder', scintillatorMethod]),
)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
|
deepchem/feat/molecule_featurizers/maccs_keys_fingerprint.py | deloragaskins/deepchem | 3,782 | 12765970 | <gh_stars>1000+
import numpy as np
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
class MACCSKeysFingerprint(MolecularFeaturizer):
"""MACCS Keys Fingerprint.
The MACCS (Molecular ACCess System) keys are one of the most commonly used structural keys.
Please confirm the details in [1]_, [2]_.
Examples
--------
>>> import deepchem as dc
>>> smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
>>> featurizer = dc.feat.MACCSKeysFingerprint()
>>> features = featurizer.featurize([smiles])
>>> type(features[0])
<class 'numpy.ndarray'>
>>> features[0].shape
(167,)
References
----------
.. [1] <NAME>., et al. "Reoptimization of MDL keys for use in drug discovery."
Journal of chemical information and computer sciences 42.6 (2002): 1273-1280.
.. [2] https://github.com/rdkit/rdkit/blob/master/rdkit/Chem/MACCSkeys.py
Note
----
This class requires RDKit to be installed.
"""
def __init__(self):
"""Initialize this featurizer."""
self.calculator = None
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate MACCS keys fingerprint.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
1D array of RDKit descriptors for `mol`. The length is 167.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
if self.calculator is None:
try:
from rdkit.Chem.AllChem import GetMACCSKeysFingerprint
self.calculator = GetMACCSKeysFingerprint
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
return self.calculator(datapoint)
|
wg-manager-backend/script/wireguard_startup.py | SH-Daemon/wg-manager | 417 | 12765990 | import os
import typing
from sqlalchemy.orm import Session
import const
from database import models
from database.database import SessionLocal
from db.api_key import add_initial_api_key_for_admin
from db.wireguard import server_add_on_init
from script.wireguard import is_installed, start_interface, is_running, load_environment_clients
def setup_on_start():
_db: Session = SessionLocal()
servers: typing.List[models.WGServer] = _db.query(models.WGServer).all()
for s in servers:
try:
last_state = s.is_running
if is_installed() and last_state and is_running(s):
start_interface(s)
except Exception as e:
print(e)
if const.CLIENT:
load_environment_clients(_db)
if const.SERVER_INIT_INTERFACE is not None:
server_add_on_init(_db)
if const.SERVER_STARTUP_API_KEY is not None:
ADMIN_USERNAME = os.getenv("ADMIN_USERNAME")
add_initial_api_key_for_admin(_db, const.SERVER_STARTUP_API_KEY, ADMIN_USERNAME)
_db.close()
|
ChasingTrainFramework_GeneralOneClassDetection/data_provider_base/base_provider.py | dvt0101/A-Light-and-Fast-Face-Detector-for-Edge-Devices | 1,172 | 12765998 | """
This module takes an adapter as data supplier, pack data and provide data for data iterators
"""
class ProviderBaseclass(object):
"""
This is the baseclass of packer. Any other detailed packer must inherit this class.
"""
def __init__(self):
pass
def __str__(self):
return self.__class__.__name__
def __del__(self):
pass
def write(self):
"""
Write a single sample to the files
:return:
"""
raise NotImplementedError()
def read_by_index(self, index):
"""
Read a single sample
:return:
"""
raise NotImplementedError()
if __name__ == '__main__':
provider = ProviderBaseclass()
print(provider)
|
patch_rt_container_registry_repos/python/list_docker_repos.py | jfrog/log4j_tools | 170 | 12766004 | import sys
import requests
from urllib.parse import urljoin
JFROG_API_KEY_HEADER_NAME = 'X-JFrog-Art-Api'
class DockerRegistryPagination:
def __init__(self, concatenating_key):
self.concatenating_key = concatenating_key
def __call__(self, url, *args, **kwargs):
response = requests.get(url, *args, **kwargs)
response.raise_for_status()
concatenated_list = response.json().get(self.concatenating_key, [])
while 'next' in response.links.keys():
url = urljoin(url, response.links['next']['url'])
response = requests.get(url, *args, **kwargs)
response.raise_for_status()
concatenated_list.extend(response.json().get(self.concatenating_key, []))
return concatenated_list
class ArtifactoryIntegrationLogic:
def __init__(self, base_url, api_key, default_repo=None, username=None):
self.username = username
self.base_url = base_url
if not self.base_url.startswith('https://'):
self.base_url = 'https://' + base_url
if self.base_url.endswith('/'):
self.base_url = self.base_url[:-1]
self.api_key = api_key
self.default_repo = default_repo
def get_artifactory_headers(self):
return {
JFROG_API_KEY_HEADER_NAME: self.api_key,
}
def _get_all_repos_data(self):
res = requests.get(
self.base_url + '/artifactory/api/repositories',
headers=self.get_artifactory_headers(),
)
if res.status_code != 200:
if res.status_code == 403:
raise Exception(
'Artifactory token is not valid or has been revoked.'
)
raise Exception(
f'Failed to get repositories. '
f'Error: {res.text}. Code {res.status_code}'
)
return res.json()
def list_repos(self, search=''):
all_repos_data = self._get_all_repos_data()
return sorted([i['key'] for i in all_repos_data if search.lower() in i['key'].lower()])
def get_repo_type(self, repo_name):
all_repos_data = self._get_all_repos_data()
for i in all_repos_data:
if i['key'] == repo_name:
return i['packageType']
raise Exception(
f'Repository {repo_name} does not exist or user does not have permissions for it.'
)
def _list_docker_folders(self, repo, search=''):
request_func = DockerRegistryPagination('repositories')
try:
repos = request_func(
self.base_url + '/artifactory/api/docker/%s/v2/_catalog' % repo,
headers=self.get_artifactory_headers(),
)
return [i for i in repos if search.lower() in i.lower()]
except requests.exceptions.HTTPError as exc:
raise Exception(
f'Failed to get images list using docker catalog. '
f'Error: {exc.response.text}. Code {exc.response.status_code}'
) from exc
def list_folders(self, repo=None, search=''):
if not repo:
repo = self.default_repo
if not repo:
raise ValueError('Either send a repo or set the default repo for this to work.')
folders = self._list_docker_folders(repo, search)
return sorted(folders)
def _list_docker_images(self, folder, repo, search=''):
request_func = DockerRegistryPagination('tags')
try:
tags = request_func(
self.base_url + '/artifactory/api/docker/%s/v2/%s/tags/list' % (repo, folder),
headers=self.get_artifactory_headers()
)
return [i for i in tags if search.lower() in i.lower()]
except requests.exceptions.HTTPError as exc:
raise Exception(
f'Failed to get tag list using docker catalog. '
f'Error: {exc.response.text}. Code {exc.response.status_code}'
) from exc
def list_images(self, folder='', repo=None, search=''):
if not repo:
repo = self.default_repo
if not repo:
raise ValueError('Either send a repo or set the default repo for this to work.')
images = self._list_docker_images(folder, repo, search)
return sorted(images)
rt_domain = sys.argv[1]
api_key = sys.argv[2]
user = sys.argv[3]
with open("images.csv", "w") as outfile:
rt = ArtifactoryIntegrationLogic(f"https://{rt_domain}", api_key, username=user)
repositories = rt.list_repos()
for repository in repositories:
repo_type = rt.get_repo_type(repository).lower()
if repo_type == "docker":
repo_folders = rt.list_folders(repo=repository)
for repo_folder in repo_folders:
folder_images = rt.list_images(repo=repository, folder=repo_folder)
for folder_image in folder_images:
outfile.write(f"{repository}, {repo_folder}, {folder_image}\r\n")
|
tests/test_noncoap_tcp_client.py | mguc/aiocoap | 229 | 12766101 | # This file is part of the Python aiocoap library project.
#
# Copyright (c) 2012-2014 <NAME> <http://sixpinetrees.blogspot.com/>,
# 2013-2014 <NAME> <<EMAIL>>
#
# aiocoap is free software, this file is published under the MIT license as
# described in the accompanying LICENSE file.
"""Confront a CoAP over TCP server with a client that speaks so bad protocol it
is easier to mock with sending byte sequences than with aiocoap"""
import asyncio
import unittest
import aiocoap
from .test_server import WithTestServer, precise_warnings, no_warnings, asynctest
from .common import tcp_disabled
@unittest.skipIf(tcp_disabled, "TCP disabled in environment")
class TestNoncoapTCPClient(WithTestServer):
def setUp(self):
super().setUp()
self.mock_r, self.mock_w = self.loop.run_until_complete(
asyncio.open_connection(
self.serveraddress,
aiocoap.COAP_PORT))
def tearDown(self):
self.mock_w.close()
super().tearDown()
@staticmethod
def _read_as_messages(encoded: bytes):
"""Process the encoded data into CoAP-over-TCP messages, return them as
a list and trailing (unrecognized / incomplete) data."""
messages = []
while True:
size = aiocoap.transports.tcp._extract_message_size(encoded)
if size is not None:
size = sum(size)
if size is None or size > len(encoded):
return messages, encoded
messages.append(aiocoap.transports.tcp._decode_message(encoded[:size]))
encoded = encoded[size:]
async def should_abort_early(self, request: bytes):
"""Send request bytes, expect that the server closes the connection
after having sent possibly a CSM and an abort"""
self.mock_w.write(request)
r = await self.mock_r.read() # timing out would be a typical failure case here too
parsed, trail = self._read_as_messages(r)
self.assertEqual(trail, b"", "Leftover data after closing message")
if parsed[0].code == aiocoap.CSM:
# don't discard the CSM unconditionallly: the server might have
# read the request data before sending its own initial CSM.
parsed.pop(0)
self.assertEqual(len(parsed), 1, "Not exactly one (presumably abort) message received")
self.assertEqual(parsed[0].code, aiocoap.ABORT, "Received message is not an abort message")
async def should_idle(self, request: bytes, timeout=0.1):
"""Send request bytes, expect that the server sends CSM and does not
close the connection, awaiting more from the client.
Returns all messages received until the timeout."""
self.mock_w.write(request)
triggered_eof = False
async def kill_read():
"""After a timeout, synthesize an end-of-file condition into the
reader, hoping this doesn't beak too much."""
nonlocal triggered_eof
await asyncio.sleep(timeout)
triggered_eof = True
self.mock_r.feed_eof()
self.loop.create_task(kill_read())
r = await self.mock_r.read() # timing out would be a typical failure case here too
self.assertEqual(triggered_eof, True, "Server closed connection prematurely")
parsed, trail = self._read_as_messages(r)
# if this happens, the server is either sending garbage (announcing
# something long and not following up), or the timeout should be
# increased
self.assertEqual(trail, b"", "Leftover data after reading timeout")
if parsed[0].code == aiocoap.CSM:
# don't discard the CSM unconditionallly: the server might have
# read the request data before sending its own initial CSM.
parsed.pop(0)
return parsed
async def should_idle_quietly(self, request: bytes, timeout=0.1):
"""should_idle, but assert that no messages were returned"""
messages = await self.should_idle(request, timeout)
# it's not a per-spec wrong thing to do, but highly unusual
self.assertEqual(messages, [], "Server sent messages on its own")
@precise_warnings(["Aborting connection: Failed to parse message"])
@asynctest
async def test_http_get(self):
await self.should_abort_early(b'GET /.well-known/core HTTP/1.0')
@precise_warnings(["Aborting connection: No CSM received"])
@asynctest
async def test_early_get(self):
await self.should_abort_early(b'\0\x01')
@no_warnings
@asynctest
async def test_incomplete_small(self):
await self.should_idle_quietly(b'\0')
@no_warnings
@asynctest
async def test_incomplete_large1(self):
# announcing but not sending 1 bytes extlen
await self.should_idle_quietly(b'\xd0')
@no_warnings
@asynctest
async def test_incomplete_large2(self):
# sending one out of four bytes extlen
# a server could in theory reject this on grounds of "no matter what
# you say next, my buffer ain't large enough"
await self.should_idle_quietly(b'\xf0\0')
@no_warnings
@asynctest
async def test_incomplete_large3(self):
# announcing a 269 byte long message, but not even sendin the code
await self.should_idle_quietly(b'\xe0\0\0')
@precise_warnings(['Aborting connection: Overly large message announced'])
@asynctest
async def test_incomplete_large4(self):
# announcing the longest possible message, this should excede
# everyone's max-message-size.
#
# blocking to read more would be acceptable behavior as well.
await self.should_abort_early(b'\xf0\xff\xff\xff\xff')
@precise_warnings(['Aborting connection: Failed to parse message'])
@asynctest
async def test_wrong_tkl(self):
# send an unspecified token length of 15.
# the rest of the message is an empty CSM, so if the server were to
# extrapolate from the meaning of tkl 0..8, it'd read it as OK.
await self.should_abort_early(b'\x0fxxxxxxxxxxxxxxx\xe1')
# Fun inside the CSM
@no_warnings
@asynctest
async def test_exotic_elective_csm_option(self):
# send option number something-even (something-odd plus 269) as an empty option
await self.should_idle_quietly(b'\x30\xe1\xe0\xf1\xf1')
@precise_warnings(['Aborting connection: Option not supported'])
@asynctest
async def test_exotic_compulsory_csm_option(self):
# send option number something-odd (something-even plus 269) as an empty option
await self.should_abort_early(b'\x30\xe1\xe0\xf2\xf2')
@precise_warnings(['Aborting connection: Option not supported'])
@asynctest
async def test_exotic_compulsory_csm_option_late(self):
# send an empty CSM, and after that the one from compulsory_csm_option
await self.should_abort_early(b'\0\xe1\x30\xe1\xe0\xf2\xf2')
|
fexm/helpers/exceptions.py | fgsect/fexm | 105 | 12766124 | <filename>fexm/helpers/exceptions.py<gh_stars>100-1000
class CouldNotConfigureException(BaseException):
def __str__(self):
return "Could not configure the repository."
class NotABinaryExecutableException(BaseException):
def __str__(self):
return "The file given is not a binary executable"
class ParametersNotAcceptedException(BaseException):
def __str__(self):
return "The search parameters given were not accepted by the github api"
class NoCoverageInformation(BaseException):
def __init__(self, binary_path):
self.binary_path = binary_path
def __str__(self):
return "Could not get any coverage information for " + str(self.binary_path)
|
text_summarize/text_summarize.py | anshul2807/Automation-scripts | 496 | 12766160 | <filename>text_summarize/text_summarize.py
import argparse
from summarizer import Summarizer
def text_summarize(text, **kwargs):
"""
Summarize the given text. Returns the summarize
"""
model = Summarizer()
return model(text, **kwargs)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Summarize the given text')
parser.add_argument('-t', '--text', help="Text to summarize",
type=str)
parser.add_argument('-f', '--file', help="Filename to read text from",
type=str)
parser.add_argument('-r', '--ratio',
help="Given the ratio of the summarized text "
"(default: 0.2)",
type=float, default=0.2)
parser.add_argument('-o', '--output',
help="Given the path to an output file. "
"Otherwise stdout will be used",
type=str, default=None)
args = parser.parse_args()
if not (args.text or args.file):
parser.error("Either --text or --file is required")
if args.text and args.file:
parser.error("The arguments --text and --file are not "
"allowed together")
if args.file:
with open(args.file, 'r') as infile:
text = infile.readlines()
text = "\n".join(text)
if args.text:
text = args.text
summary = text_summarize(text, ratio=args.ratio)
if args.output:
with open(args.output, 'w') as outfile:
outfile.write(summary)
outfile.write("\n")
else:
print(summary)
|
Subsets and Splits