max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
cartoframes/viz/popups/default_popup_element.py | CartoDB/cartoframes | 236 | 11164355 | <gh_stars>100-1000
def default_popup_element(title=None, operation=None, format=None):
"""Helper function for quickly adding a default popup element based on the style.
A style helper is required.
Args:
title (str, optional): Title for the given value. By default, it's the name of the value.
operation (str, optional): Cluster operation, defaults to 'count'. Other options
available are 'avg', 'min', 'max', and 'sum'.
format (str, optional): Format to apply to number values in the widget, based on d3-format
specifier (https://github.com/d3/d3-format#locale_format).
Example:
>>> default_popup_element(title='Popup title', format='.2~s')
"""
return {
'value': None,
'title': title,
'operation': operation,
'format': format
}
|
Coupled-Electron-Pair-Approximation/integrals.py | andyj10224/psi4numpy | 214 | 11164389 | <reponame>andyj10224/psi4numpy<filename>Coupled-Electron-Pair-Approximation/integrals.py
"""
Returns the Fock matrix and needed two-electron integral blocks.
__authors__ = "<NAME>"
__credits__ = ["<NAME>"]
__copyright__ = "(c) 2014-2020, The Psi4NumPy Developers"
__license__ = "BSD-3-Clause"
"""
import numpy as np
import psi4
def orbitals_fock(mol, singles):
wfn = psi4.energy('scf', molecule=mol, return_wfn=True)[1]
### Orbitals
CA_O = wfn.Ca_subset("AO", "ACTIVE_OCC")
CB_O = wfn.Cb_subset("AO", "ACTIVE_OCC")
C_O = np.block([[CA_O, np.zeros(CB_O.shape)], [np.zeros(CA_O.shape), CB_O]])
CA_V = wfn.Ca_subset("AO", "ACTIVE_VIR")
CB_V = wfn.Cb_subset("AO", "ACTIVE_VIR")
C_V = np.block([[CA_V, np.zeros(CB_V.shape)], [np.zeros(CA_V.shape), CB_V]])
mints = psi4.core.MintsHelper(wfn.basisset())
### One-Electron Integrals
Fa = wfn.Fa()
Fb = wfn.Fb()
FI = np.block([[Fa, np.zeros(Fb.shape)], [np.zeros(Fa.shape), Fb]])
F = {
"oo": np.einsum('pP,qQ,pq->PQ', C_O, C_O, FI, optimize=True),
"vv": np.einsum('pP,qQ,pq->PQ', C_V, C_V, FI, optimize=True)
}
if singles:
F["ov"] = np.einsum('pP, qQ, pq -> PQ', C_O, C_V, FI, optimize=True)
return C_O, C_V, mints, F
def integrals(mol, singles=False, return_intermediates=False):
C_O, C_V, mints, F = orbitals_fock(mol, singles)
### Two-Electron Integrals
TEI = mints.ao_eri().np
# Construct electron-repulsion integrals in spinorbital basis from spatial orbital basis.
TEI = np.kron(np.eye(2), np.kron(np.eye(2), TEI).T)
# Transform integrals to physicist notation
TEI = TEI.swapaxes(1, 2)
# Antisymmetrize...
TEI -= TEI.swapaxes(2, 3)
I = {
"oovv": np.einsum('pP,qQ,rR,sS,pqrs->PQRS', C_O, C_O, C_V, C_V, TEI, optimize=True),
"oooo": np.einsum('pP,qQ,rR,sS,pqrs->PQRS', C_O, C_O, C_O, C_O, TEI, optimize=True),
"voov": np.einsum('pP,qQ,rR,sS,pqrs->PQRS', C_V, C_O, C_O, C_V, TEI, optimize=True),
"vvvv": np.einsum('pP,qQ,rR,sS,pqrs->PQRS', C_V, C_V, C_V, C_V, TEI, optimize=True)
}
if singles:
I["ovvv"] = np.einsum('pP,qQ,rR,sS,pqrs->PQRS', C_O, C_V, C_V, C_V, TEI, optimize=True)
I["ooov"] = np.einsum('pP,qQ,rR,sS,pqrs->PQRS', C_O, C_O, C_O, C_V, TEI, optimize=True)
if not return_intermediates:
return I, F
else:
OEI = np.kron(np.eye(2), mints.ao_kinetic().np + mints.ao_potential().np)
intermed = {"TEI": TEI, "O": C_O, "V": C_V, "OEI": OEI}
return I, F, intermed
def integrals_DF(mol, singles=False):
C_O, C_V, mints, F = orbitals_fock(mol, singles)
### Two-Electron Integrals
basis = mints.basisset()
zero_bas = psi4.core.BasisSet.zero_ao_basis_set()
aux = psi4.core.BasisSet.build(mol, "DF_BASIS_CC", "", "RIFIT", basis.name())
Ppq = np.squeeze(mints.ao_eri(aux, zero_bas, basis, basis))
Ppq = np.kron(np.eye(2), Ppq) # Construct spinorbital quantities
metric = mints.ao_eri(aux, zero_bas, aux, zero_bas)
metric.power(-0.5, 1.e-10)
metric = np.squeeze(metric)
Qpq = np.einsum("QP,Ppq->pqQ", metric, Ppq, optimize=True)
R = {
"oo": np.einsum('pP,qQ,pqX->PQX', C_O, C_O, Qpq, optimize=True),
"ov": np.einsum('pP,qQ,pqX->PQX', C_O, C_V, Qpq, optimize=True),
"vv": np.einsum('pP,qQ,pqX->PQX', C_V, C_V, Qpq, optimize=True)
}
return R, F
|
tests/integration/test_s3_low_cardinality_right_border/test.py | mrk-andreev/ClickHouse | 8,629 | 11164460 | <gh_stars>1000+
#!/usr/bin/env python3
# 1) Here we try to reproduce very rare error which is connected with
# LowCardinality. When we read data from S3 we are trying to make sequential
# reads without additional seeks. To achieve this we are trying to have as
# large mark ranges as possible for each thread which read data from S3.
# Additionaly, to avoid redundant reads we specify the "right border" for each
# read. Such possiblity supported by S3 API. For example you can send request
# to S3 to read data from 563 byte to 92753 byte and we use this feature in
# ClickHouse.
#
# 2) We use granules (range of data between marks) as a minimal task for each
# thread. For example, when we need to read data from 0 to 1092 mark and we
# have two threads with one task for each of them: thread_1 = [0, 546),
# thread_2 = [546, 1092). Of course S3 API knows nothing about marks, it works
# with bytes. So, each marks points to some offset in compressed file (stored
# in S3) and offset in decompressed block (here we don't need it). So to convert
# our mark range into bytes range we use range.begin_mark.offset_in_compressed_file as
# begin of bytes range and range.end.offset_in_compressed_file as end of bytes range. It
# works most of the times, because this last mark in range is not included and we can its
# offset_in_compressed_file as end for our range.
#
# LowCardinality serialization format consist of two files (except files for marks):
# file with index (column_name.bin) and file with dictionary (column_name.dict.bin). Data
# in index file points to real column values in dictionary. Also dictionary can be shared between
# several index marks (when you have a lot of rows with same value), for example:
# ....
# Mark 186, points to [2003111, 0]
# Mark 187, points to [2003111, 0]
# Mark 188, points to [2003111, 0]
# Mark 189, points to [2003111, 0]
# Mark 190, points to [2003111, 0]
# Mark 191, points to [2003111, 0]
# Mark 192, points to [2081424, 0]
# Mark 193, points to [2081424, 0]
# Mark 194, points to [2081424, 0]
# Mark 195, points to [2081424, 0]
# Mark 196, points to [2081424, 0]
# Mark 197, points to [2081424, 0]
# Mark 198, points to [2081424, 0]
# Mark 199, points to [2081424, 0]
# Mark 200, points to [2081424, 0]
# Mark 201, points to [2159750, 0]
# Mark 202, points to [2159750, 0]
# Mark 203, points to [2159750, 0]
# Mark 204, points to [2159750, 0]
# ....
#
# Imagine, this case when we have two threads: [0, 189) and [189, 378). Which
# bytes range we will have? Using logic from 2) we will get
# [0.offset_in_compressed_file, 189.offset_in_compressed_file] = [0, 2003111].
# But it's incorrect range, because actually dictionary ends in offset 2081424,
# but all marks from 186 to 191 share this same dictionary. If we try to read
# data from [0, 2003111] we will not be able to do it, because it will be
# impossible to read dictionary.
#
# So this buggy logic was fixed and this test confirms this. At first I've
# tried to get sane numbers for data, but the error didn't reproduce. After
# three tries with almost random numbers of rows the error was reproduced.
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1", main_configs=["configs/s3.xml"], with_minio=True)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_s3_right_border(started_cluster):
node1.query("drop table if exists s3_low_cardinality")
node1.query(
"""
CREATE TABLE s3_low_cardinality
(
str_column LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS storage_policy = 's3', min_bytes_for_wide_part = 0, index_granularity = 1024;
"""
)
node1.query("INSERT INTO s3_low_cardinality SELECT 'aaaaaa' FROM numbers(600000)")
node1.query(
"INSERT INTO s3_low_cardinality SELECT toString(number) FROM numbers(100000)"
)
node1.query("INSERT INTO s3_low_cardinality SELECT 'bbbbbb' FROM numbers(500000)")
node1.query(
"INSERT INTO s3_low_cardinality SELECT toString(number + 100000000) FROM numbers(100000)"
)
node1.query("OPTIMIZE TABLE s3_low_cardinality FINAL")
settings = {
"merge_tree_min_bytes_for_concurrent_read": "0",
"merge_tree_min_rows_for_concurrent_read": "0",
"max_threads": "2",
}
assert (
node1.query(
"SELECT COUNT() FROM s3_low_cardinality WHERE not ignore(str_column)",
settings=settings,
)
== "1300000\n"
)
def test_s3_right_border_2(started_cluster):
node1.query("drop table if exists s3_low_cardinality")
node1.query(
"create table s3_low_cardinality (key UInt32, str_column LowCardinality(String)) engine = MergeTree order by (key) settings storage_policy = 's3', min_bytes_for_wide_part = 0, index_granularity = 8192, min_compress_block_size=1, merge_max_block_size=10000"
)
node1.query(
"insert into s3_low_cardinality select number, number % 8000 from numbers(8192)"
)
node1.query(
"insert into s3_low_cardinality select number = 0 ? 0 : (number + 8192 * 1), number % 8000 + 1 * 8192 from numbers(8192)"
)
node1.query(
"insert into s3_low_cardinality select number = 0 ? 0 : (number + 8192 * 2), number % 8000 + 2 * 8192 from numbers(8192)"
)
node1.query("optimize table s3_low_cardinality final")
res = node1.query("select * from s3_low_cardinality where key = 9000")
assert res == "9000\t9000\n"
|
mmhuman3d/utils/demo_utils.py | ykk648/mmhuman3d | 472 | 11164476 | import colorsys
import os
from pathlib import Path
import mmcv
import numpy as np
from scipy import interpolate
from mmhuman3d.core.post_processing import build_post_processing
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
def xyxy2xywh(bbox_xyxy):
"""Transform the bbox format from x1y1x2y2 to xywh.
Args:
bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
"""
if not isinstance(bbox_xyxy, np.ndarray):
raise TypeError(
f'Input type is {type(bbox_xyxy)}, which should be numpy.ndarray.')
bbox_xywh = bbox_xyxy.copy()
bbox_xywh[..., 2] = bbox_xywh[..., 2] - bbox_xywh[..., 0]
bbox_xywh[..., 3] = bbox_xywh[..., 3] - bbox_xywh[..., 1]
return bbox_xywh
def xywh2xyxy(bbox_xywh):
"""Transform the bbox format from xywh to x1y1x2y2.
Args:
bbox_xywh (np.ndarray): Bounding boxes (with scores), shaped
(n, 4) or (n, 5). (left, top, width, height, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, right, bottom, [score])
"""
if not isinstance(bbox_xywh, np.ndarray):
raise TypeError(
f'Input type is {type(bbox_xywh)}, which should be numpy.ndarray.')
bbox_xyxy = bbox_xywh.copy()
bbox_xyxy[..., 2] = bbox_xyxy[..., 2] + bbox_xyxy[..., 0] - 1
bbox_xyxy[..., 3] = bbox_xyxy[..., 3] + bbox_xyxy[..., 1] - 1
return bbox_xyxy
def box2cs(bbox_xywh, aspect_ratio=1.0, bbox_scale_factor=1.25):
"""Convert xywh coordinates to center and scale.
Args:
bbox_xywh (numpy.ndarray): the height of the bbox_xywh
aspect_ratio (int, optional): Defaults to 1.0
bbox_scale_factor (float, optional): Defaults to 1.25
Returns:
numpy.ndarray: center of the bbox
numpy.ndarray: the scale of the bbox w & h
"""
if not isinstance(bbox_xywh, np.ndarray):
raise TypeError(
f'Input type is {type(bbox_xywh)}, which should be numpy.ndarray.')
bbox_xywh = bbox_xywh.copy()
pixel_std = 1
center = np.stack([
bbox_xywh[..., 0] + bbox_xywh[..., 2] * 0.5,
bbox_xywh[..., 1] + bbox_xywh[..., 3] * 0.5
], -1)
mask_h = bbox_xywh[..., 2] > aspect_ratio * bbox_xywh[..., 3]
mask_w = ~mask_h
bbox_xywh[mask_h, 3] = bbox_xywh[mask_h, 2] / aspect_ratio
bbox_xywh[mask_w, 2] = bbox_xywh[mask_w, 3] * aspect_ratio
scale = np.stack([
bbox_xywh[..., 2] * 1.0 / pixel_std,
bbox_xywh[..., 3] * 1.0 / pixel_std
], -1)
scale = scale * bbox_scale_factor
return center, scale
def convert_crop_cam_to_orig_img(cam: np.ndarray,
bbox: np.ndarray,
img_width: int,
img_height: int,
aspect_ratio: float = 1.0,
bbox_scale_factor: float = 1.25,
bbox_format: Literal['xyxy', 'xywh',
'cs'] = 'xyxy'):
"""This function is modified from [VIBE](https://github.com/
mkocabas/VIBE/blob/master/lib/utils/demo_utils.py#L242-L259). Original
license please see docs/additional_licenses.md.
Args:
cam (np.ndarray): cam (ndarray, shape=(frame, 3) or
(frame,num_person, 3)):
weak perspective camera in cropped img coordinates
bbox (np.ndarray): bbox coordinates
img_width (int): original image width
img_height (int): original image height
aspect_ratio (float, optional): Defaults to 1.0.
bbox_scale_factor (float, optional): Defaults to 1.25.
bbox_format (Literal['xyxy', 'xywh', 'cs']): Defaults to 'xyxy'.
'xyxy' means the left-up point and right-bottomn point of the
bbox.
'xywh' means the left-up point and the width and height of the
bbox.
'cs' means the center of the bbox (x,y) and the scale of the
bbox w & h.
Returns:
orig_cam: shape = (frame, 4) or (frame, num_person, 4)
"""
if not isinstance(bbox, np.ndarray):
raise TypeError(
f'Input type is {type(bbox)}, which should be numpy.ndarray.')
bbox = bbox.copy()
if bbox_format == 'xyxy':
bbox_xywh = xyxy2xywh(bbox)
center, scale = box2cs(bbox_xywh, aspect_ratio, bbox_scale_factor)
bbox_cs = np.concatenate([center, scale], axis=-1)
elif bbox_format == 'xywh':
center, scale = box2cs(bbox, aspect_ratio, bbox_scale_factor)
bbox_cs = np.concatenate([center, scale], axis=-1)
elif bbox_format == 'cs':
bbox_cs = bbox
else:
raise ValueError('Only supports the format of `xyxy`, `cs` and `xywh`')
cx, cy, h = bbox_cs[..., 0], bbox_cs[..., 1], bbox_cs[..., 2] + 1e-6
hw, hh = img_width / 2., img_height / 2.
sx = cam[..., 0] * (1. / (img_width / h))
sy = cam[..., 0] * (1. / (img_height / h))
tx = ((cx - hw) / hw / (sx + 1e-6)) + cam[..., 1]
ty = ((cy - hh) / hh / (sy + 1e-6)) + cam[..., 2]
orig_cam = np.stack([sx, sy, tx, ty], axis=-1)
return orig_cam
def convert_bbox_to_intrinsic(bboxes: np.ndarray,
img_width: int = 224,
img_height: int = 224,
bbox_scale_factor: float = 1.25,
bbox_format: Literal['xyxy', 'xywh'] = 'xyxy'):
"""Convert bbox to intrinsic parameters.
Args:
bbox (np.ndarray): (frame, num_person, 4) or (frame, 4)
img_width (int): image width of training data.
img_height (int): image height of training data.
bbox_scale_factor (float): scale factor for expanding the bbox.
bbox_format (Literal['xyxy', 'xywh'] ): 'xyxy' means the left-up point
and right-bottomn point of the bbox.
'xywh' means the left-up point and the width and height of the
bbox.
Returns:
np.ndarray: (frame, num_person, 3, 3) or (frame, 3, 3)
"""
if not isinstance(bboxes, np.ndarray):
raise TypeError(
f'Input type is {type(bboxes)}, which should be numpy.ndarray.')
assert bbox_format in ['xyxy', 'xywh']
if bbox_format == 'xyxy':
bboxes = xyxy2xywh(bboxes)
center_x = bboxes[..., 0] + bboxes[..., 2] / 2.0
center_y = bboxes[..., 1] + bboxes[..., 3] / 2.0
W = np.max(bboxes[..., 2:], axis=-1) * bbox_scale_factor
num_frame = bboxes.shape[0]
if bboxes.ndim == 3:
num_person = bboxes.shape[1]
Ks = np.zeros((num_frame, num_person, 3, 3))
elif bboxes.ndim == 2:
Ks = np.zeros((num_frame, 3, 3))
elif bboxes.ndim == 1:
Ks = np.zeros((3, 3))
else:
raise ValueError('Wrong input bboxes shape {bboxes.shape}')
Ks[..., 0, 0] = W / img_width
Ks[..., 1, 1] = W / img_height
Ks[..., 0, 2] = center_x - W / 2.0
Ks[..., 1, 2] = center_y - W / 2.0
Ks[..., 2, 2] = 1
return Ks
def get_default_hmr_intrinsic(num_frame=1,
focal_length=1000,
det_width=224,
det_height=224) -> np.ndarray:
"""Get default hmr intrinsic, defined by how you trained.
Args:
num_frame (int, optional): num of frames. Defaults to 1.
focal_length (int, optional): defined same as your training.
Defaults to 1000.
det_width (int, optional): the size you used to detect.
Defaults to 224.
det_height (int, optional): the size you used to detect.
Defaults to 224.
Returns:
np.ndarray: shape of (N, 3, 3)
"""
K = np.zeros((num_frame, 3, 3))
K[:, 0, 0] = focal_length
K[:, 1, 1] = focal_length
K[:, 0, 2] = det_width / 2
K[:, 1, 2] = det_height / 2
K[:, 2, 2] = 1
return K
def convert_kp2d_to_bbox(
kp2d: np.ndarray,
bbox_format: Literal['xyxy', 'xywh'] = 'xyxy') -> np.ndarray:
"""Convert kp2d to bbox.
Args:
kp2d (np.ndarray): shape should be (num_frame, num_points, 2/3)
or (num_frame, num_person, num_points, 2/3).
bbox_format (Literal['xyxy', 'xywh'], optional): Defaults to 'xyxy'.
Returns:
np.ndarray: shape will be (num_frame, num_person, 4)
"""
assert bbox_format in ['xyxy', 'xywh']
if kp2d.ndim == 2:
kp2d = kp2d[None, None]
elif kp2d.ndim == 3:
kp2d = kp2d[:, None]
num_frame, num_person, _, _ = kp2d.shape
x1 = np.max(kp2d[..., 0], axis=-2)
y1 = np.max(kp2d[..., 1], axis=-2)
x2 = np.max(kp2d[..., 2], axis=-2)
y2 = np.max(kp2d[..., 3], axis=-2)
bbox = np.concatenate([x1, y1, x2, y2], axis=-1)
assert bbox.shape == (num_frame, num_person, 4)
if bbox_format == 'xywh':
bbox = xyxy2xywh(bbox)
return bbox
def conver_verts_to_cam_coord(verts,
pred_cams,
bboxes_xy,
focal_length=5000.,
bbox_scale_factor=1.25,
bbox_format='xyxy'):
"""Convert vertices from the world coordinate to camera coordinate.
Args:
verts ([np.ndarray]): The vertices in the world coordinate.
The shape is (frame,num_person,6890,3) or (frame,6890,3).
pred_cams ([np.ndarray]): Camera parameters estimated by HMR or SPIN.
The shape is (frame,num_person,3) or (frame,6890,3).
bboxes_xy ([np.ndarray]): (frame, num_person, 4|5) or (frame, 4|5)
focal_length ([float],optional): Defined same as your training.
bbox_scale_factor (float): scale factor for expanding the bbox.
bbox_format (Literal['xyxy', 'xywh'] ): 'xyxy' means the left-up point
and right-bottomn point of the bbox.
'xywh' means the left-up point and the width and height of the
bbox.
Returns:
np.ndarray: The vertices in the camera coordinate.
The shape is (frame,num_person,6890,3) or (frame,6890,3).
np.ndarray: The intrinsic parameters of the pred_cam.
The shape is (num_frame, 3, 3).
"""
K0 = get_default_hmr_intrinsic(
focal_length=focal_length, det_height=224, det_width=224)
K1 = convert_bbox_to_intrinsic(
bboxes_xy,
bbox_scale_factor=bbox_scale_factor,
bbox_format=bbox_format)
# K1K0(RX+T)-> K0(K0_inv K1K0)
Ks = np.linalg.inv(K0) @ K1 @ K0
# convert vertices from world to camera
cam_trans = np.concatenate([
pred_cams[..., [1]], pred_cams[..., [2]], 2 * focal_length /
(224 * pred_cams[..., [0]] + 1e-9)
], -1)
verts = verts + cam_trans[..., None, :]
if verts.ndim == 4:
verts = np.einsum('fnij,fnkj->fnki', Ks, verts)
elif verts.ndim == 3:
verts = np.einsum('fij,fkj->fki', Ks, verts)
return verts, K0
def smooth_process(x,
smooth_type='savgol',
cfg_base_dir='configs/_base_/post_processing/'):
"""Smooth the array with the specified smoothing type.
Args:
x (np.ndarray): Shape should be (frame,num_person,K,C)
or (frame,K,C).
smooth_type (str, optional): Smooth type.
choose in ['oneeuro', 'gaus1d', 'savgol'].
Defaults to 'savgol'.
cfg_base_dir (str, optional): Config base dir,
default configs/_base_/post_processing/
Raises:
ValueError: check the input smoothing type.
Returns:
np.ndarray: Smoothed data. The shape should be
(frame,num_person,K,C) or (frame,K,C).
"""
assert smooth_type in ['oneeuro', 'gaus1d', 'savgol']
cfg = os.path.join(cfg_base_dir, smooth_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
x = x.copy()
assert x.ndim == 3 or x.ndim == 4
smooth_func = build_post_processing(dict(cfg['smooth_cfg']))
if x.ndim == 4:
for i in range(x.shape[1]):
x[:, i] = smooth_func(x[:, i])
elif x.ndim == 3:
x = smooth_func(x)
return x
def speed_up_process(x,
speed_up_type='deciwatch',
cfg_base_dir='configs/_base_/post_processing/'):
"""Speed up the process with the specified speed up type.
Args:
x (np.ndarray): Shape should be (frame,num_person,K,C)
or (frame,K,C).
speed_up_type (str, optional): Speed up type.
choose in ['deciwatch',
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',]. Defaults to 'deciwatch'.
cfg_base_dir (str, optional): Config base dir.
Defaults to 'configs/_base_/post_processing/'
Raises:
ValueError: check the input speed up type.
Returns:
np.ndarray: Completed data. The shape should be
(frame,num_person,K,C) or (frame,K,C).
"""
if speed_up_type == 'deciwatch':
speed_up_type = 'deciwatch_interval5_q3'
assert speed_up_type in [
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',
]
cfg = os.path.join(cfg_base_dir, speed_up_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
x = x.clone()
assert x.ndim == 4 or x.ndim == 5
cfg_dict = cfg['speed_up_cfg']
cfg_dict['device'] = x.device
speed_up_func = build_post_processing(cfg_dict)
if x.ndim == 5:
for i in range(x.shape[1]):
x[:, i] = speed_up_func(x[:, i])
elif x.ndim == 4:
x = speed_up_func(x)
return np.array(x.cpu())
def get_speed_up_interval(speed_up_type,
cfg_base_dir='configs/_base_/post_processing/'):
"""Get the interval of specific speed up type.
Args:
speed_up_type (str, optional): Speed up type.
choose in ['deciwatch',
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',]. Defaults to 'deciwatch'.
cfg_base_dir (str, optional): Config base dir,
default configs/_base_/post_processing/
Raises:
ValueError: check the input speed up type.
Returns:
int: speed up interval
"""
if speed_up_type == 'deciwatch':
speed_up_type = 'deciwatch_interval5_q3'
assert speed_up_type in [
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',
]
cfg = os.path.join(cfg_base_dir, speed_up_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
return cfg['speed_up_cfg']['interval']
def speed_up_interpolate(selected_frames, speed_up_frames, smpl_poses,
smpl_betas, pred_cams, bboxes_xyxy):
"""Interpolate smpl_betas, pred_cams, and bboxes_xyxyx for speed up.
Args:
selected_frames (np.ndarray): Shape should be (selected frame number).
speed_up_frames (int): Total speed up frame number
smpl_poses (np.ndarray): selected frame smpl poses parameter
smpl_betas (np.ndarray): selected frame smpl shape paeameter
pred_cams (np.ndarray): selected frame camera parameter
bboxes_xyxy (np.ndarray): selected frame bbox
Returns:
smpl_poses (np.ndarray): interpolated frame smpl poses parameter
smpl_betas (np.ndarray): interpolated frame smpl shape paeameter
pred_cams (np.ndarray): interpolated frame camera parameter
bboxes_xyxy (np.ndarray): interpolated frame bbox
"""
selected_frames = selected_frames[selected_frames <= speed_up_frames]
pred_cams[:speed_up_frames, :] = interpolate.interp1d(
selected_frames, pred_cams[selected_frames, :], kind='linear', axis=0)(
np.arange(0, max(selected_frames)))
bboxes_xyxy[:speed_up_frames, :] = interpolate.interp1d(
selected_frames,
bboxes_xyxy[selected_frames, :],
kind='linear',
axis=0)(
np.arange(0, max(selected_frames)))
smpl_betas[:speed_up_frames, :] = interpolate.interp1d(
selected_frames, smpl_betas[selected_frames, :], kind='linear',
axis=0)(
np.arange(0, max(selected_frames)))
return smpl_poses, smpl_betas, pred_cams, bboxes_xyxy
def process_mmtracking_results(mmtracking_results,
max_track_id,
bbox_thr=None):
"""Process mmtracking results.
Args:
mmtracking_results ([list]): mmtracking_results.
bbox_thr (float): threshold for bounding boxes.
max_track_id (int): the maximum track id.
Returns:
person_results ([list]): a list of tracked bounding boxes
max_track_id (int): the maximum track id.
instance_num (int): the number of instance.
"""
person_results = []
# 'track_results' is changed to 'track_bboxes'
# in https://github.com/open-mmlab/mmtracking/pull/300
if 'track_bboxes' in mmtracking_results:
tracking_results = mmtracking_results['track_bboxes'][0]
elif 'track_results' in mmtracking_results:
tracking_results = mmtracking_results['track_results'][0]
tracking_results = np.array(tracking_results)
if bbox_thr is not None:
assert tracking_results.shape[-1] == 6
valid_idx = np.where(tracking_results[:, 5] > bbox_thr)[0]
tracking_results = tracking_results[valid_idx]
for track in tracking_results:
person = {}
person['track_id'] = int(track[0])
if max_track_id < int(track[0]):
max_track_id = int(track[0])
person['bbox'] = track[1:]
person_results.append(person)
person_results = sorted(person_results, key=lambda x: x.get('track_id', 0))
instance_num = len(person_results)
return person_results, max_track_id, instance_num
def process_mmdet_results(mmdet_results, cat_id=1, bbox_thr=None):
"""Process mmdet results, and return a list of bboxes.
Args:
mmdet_results (list|tuple): mmdet results.
bbox_thr (float): threshold for bounding boxes.
cat_id (int): category id (default: 1 for human)
Returns:
person_results (list): a list of detected bounding boxes
"""
if isinstance(mmdet_results, tuple):
det_results = mmdet_results[0]
else:
det_results = mmdet_results
bboxes = det_results[cat_id - 1]
person_results = []
bboxes = np.array(bboxes)
if bbox_thr is not None:
assert bboxes.shape[-1] == 5
valid_idx = np.where(bboxes[:, 4] > bbox_thr)[0]
bboxes = bboxes[valid_idx]
for bbox in bboxes:
person = {}
person['bbox'] = bbox
person_results.append(person)
return person_results
def prepare_frames(input_path=None):
"""Prepare frames from input_path.
Args:
input_path (str, optional): Defaults to None.
Raises:
ValueError: check the input path.
Returns:
List[np.ndarray]: prepared frames
"""
if Path(input_path).is_file():
img_list = [mmcv.imread(input_path)]
if img_list[0] is None:
video = mmcv.VideoReader(input_path)
assert video.opened, f'Failed to load file {input_path}'
img_list = list(video)
elif Path(input_path).is_dir():
# input_type = 'folder'
file_list = [
os.path.join(input_path, fn) for fn in os.listdir(input_path)
if fn.lower().endswith(('.png', '.jpg'))
]
file_list.sort()
img_list = [mmcv.imread(img_path) for img_path in file_list]
assert len(img_list), f'Failed to load image from {input_path}'
else:
raise ValueError('Input path should be an file or folder.'
f' Got invalid input path: {input_path}')
return img_list
def extract_feature_sequence(extracted_results,
frame_idx,
causal,
seq_len,
step=1):
"""Extract the target frame from person results, and pad the sequence to a
fixed length.
Args:
extracted_results (List[List[Dict]]): Multi-frame feature extraction
results stored in a nested list. Each element of the outer list
is the feature extraction results of a single frame, and each
element of the inner list is the feature information of one person,
which contains:
features (ndarray): extracted features
track_id (int): unique id of each person, required when
``with_track_id==True```
bbox ((4, ) or (5, )): left, right, top, bottom, [score]
frame_idx (int): The index of the frame in the original video.
causal (bool): If True, the target frame is the first frame in
a sequence. Otherwise, the target frame is in the middle of a
sequence.
seq_len (int): The number of frames in the input sequence.
step (int): Step size to extract frames from the video.
Returns:
List[List[Dict]]: Multi-frame feature extraction results stored in a
nested list with a length of seq_len.
int: The target frame index in the padded sequence.
"""
if causal:
frames_left = 0
frames_right = seq_len - 1
else:
frames_left = (seq_len - 1) // 2
frames_right = frames_left
num_frames = len(extracted_results)
# get the padded sequence
pad_left = max(0, frames_left - frame_idx // step)
pad_right = max(0, frames_right - (num_frames - 1 - frame_idx) // step)
start = max(frame_idx % step, frame_idx - frames_left * step)
end = min(num_frames - (num_frames - 1 - frame_idx) % step,
frame_idx + frames_right * step + 1)
extracted_results_seq = [extracted_results[0]] * pad_left + \
extracted_results[start:end:step] + [extracted_results[-1]] * pad_right
return extracted_results_seq
def get_different_colors(number_of_colors,
flag=0,
alpha: float = 1.0,
mode: str = 'bgr',
int_dtype: bool = True):
"""Get a numpy of colors of shape (N, 3)."""
mode = mode.lower()
assert set(mode).issubset({'r', 'g', 'b', 'a'})
nst0 = np.random.get_state()
np.random.seed(flag)
colors = []
for i in np.arange(0., 360., 360. / number_of_colors):
hue = i / 360.
lightness = (50 + np.random.rand() * 10) / 100.
saturation = (90 + np.random.rand() * 10) / 100.
colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))
colors_np = np.asarray(colors)
if int_dtype:
colors_bgr = (255 * colors_np).astype(np.uint8)
else:
colors_bgr = colors_np.astype(np.float32)
# recover the random state
np.random.set_state(nst0)
color_dict = {}
if 'a' in mode:
color_dict['a'] = np.ones((colors_bgr.shape[0], 3)) * alpha
color_dict['b'] = colors_bgr[:, 0:1]
color_dict['g'] = colors_bgr[:, 1:2]
color_dict['r'] = colors_bgr[:, 2:3]
colors_final = []
for channel in mode:
colors_final.append(color_dict[channel])
colors_final = np.concatenate(colors_final, -1)
return colors_final
|
AutotestWebD/apps/config/views/http_conf.py | yangjourney/sosotest | 422 | 11164484 | <filename>AutotestWebD/apps/config/views/http_conf.py
from apps.config.services.http_confService import *
from apps.common.config import commonWebConfig
from apps.common.func.CommonFunc import *
import math
from django.shortcuts import HttpResponse
def getDebugBtn(request):
context = {}
httpConf = HttpConfService.queryHttpConfSort(request)
context['httpConf'] = httpConf
httpConfList = []
httpConfList.append([])
if len(httpConf) <= commonWebConfig.debugBtnCount:
for k in range(0,len(httpConf)):
httpConfList[0].append(httpConf[k])
else:
for k in range(0,commonWebConfig.debugBtnCount-1):
httpConfList[0].append(httpConf[k])
httpConfListSize = math.ceil((len(httpConf)-(commonWebConfig.debugBtnCount-1)) / commonWebConfig.debugBtnCount)
for i in range(1,httpConfListSize+1):
httpConfList.append([])
for j in range(i*commonWebConfig.debugBtnCount-1,i*commonWebConfig.debugBtnCount+(commonWebConfig.debugBtnCount-1)):
if j >= len(httpConf):
break
httpConfList[i].append(httpConf[j])
context["httpConfList"] = httpConfList
if len(httpConfList) > 1:
context["httpConfListPage"] = "open"
else:
context["httpConfListPage"] = "close"
return context
def getHttpConf(request):
httpConf = HttpConfService.queryHttpConfSort(request)
httpConfArr = {}
httpConfArr["key"] = httpConf
return HttpResponse(ApiReturn(ApiReturn.CODE_OK,body=httpConfArr).toJson())
def getUiConf(request):
httpConf = HttpConfService.getHttpConfForUI()
return HttpResponse(ApiReturn(ApiReturn.CODE_OK,body=httpConf).toJson()) |
tools/bin/pythonSrc/lockfile-0.9.1/setup.py | YangHao666666/hawq | 450 | 11164500 | #!/usr/bin/env python
V = "0.9.1"
from distutils.core import setup
setup(name='lockfile',
author='<NAME>',
author_email='<EMAIL>',
url='http://code.google.com/p/pylockfile/',
download_url=('http://code.google.com/p/pylockfile/downloads/'
'detail?name=lockfile-%s.tar.gz' % V),
version=V,
description="Platform-independent file locking module",
long_description=open("README").read(),
packages=['lockfile'],
license='MIT License',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows :: Windows NT/2000',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
wrappers/python/examples/box_dimensioner_multicam/calibration_kabsch.py | Moktarino/librealsense | 6,457 | 11164538 | <gh_stars>1000+
##################################################################################################
## License: Apache 2.0. See LICENSE file in root directory. ####
##################################################################################################
## Box Dimensioner with multiple cameras: Helper files ####
##################################################################################################
import pyrealsense2 as rs
import calculate_rmsd_kabsch as rmsd
import numpy as np
from helper_functions import cv_find_chessboard, get_chessboard_points_3D, get_depth_at_pixel, convert_depth_pixel_to_metric_coordinate
from realsense_device_manager import post_process_depth_frame
"""
_ _ _ _____ _ _
| | | | ___ | | _ __ ___ _ __ | ___|_ _ _ __ ___ | |_ (_) ___ _ __ ___
| |_| | / _ \| || '_ \ / _ \| '__| | |_ | | | || '_ \ / __|| __|| | / _ \ | '_ \ / __|
| _ || __/| || |_) || __/| | | _| | |_| || | | || (__ | |_ | || (_) || | | |\__ \
|_| |_| \___||_|| .__/ \___||_| |_| \__,_||_| |_| \___| \__||_| \___/ |_| |_||___/
_|
"""
def calculate_transformation_kabsch(src_points, dst_points):
"""
Calculates the optimal rigid transformation from src_points to
dst_points
(regarding the least squares error)
Parameters:
-----------
src_points: array
(3,N) matrix
dst_points: array
(3,N) matrix
Returns:
-----------
rotation_matrix: array
(3,3) matrix
translation_vector: array
(3,1) matrix
rmsd_value: float
"""
assert src_points.shape == dst_points.shape
if src_points.shape[0] != 3:
raise Exception("The input data matrix had to be transposed in order to compute transformation.")
src_points = src_points.transpose()
dst_points = dst_points.transpose()
src_points_centered = src_points - rmsd.centroid(src_points)
dst_points_centered = dst_points - rmsd.centroid(dst_points)
rotation_matrix = rmsd.kabsch(src_points_centered, dst_points_centered)
rmsd_value = rmsd.kabsch_rmsd(src_points_centered, dst_points_centered)
translation_vector = rmsd.centroid(dst_points) - np.matmul(rmsd.centroid(src_points), rotation_matrix)
return rotation_matrix.transpose(), translation_vector.transpose(), rmsd_value
"""
__ __ _ ____ _ _
| \/ | __ _ (_) _ __ / ___| ___ _ __ | |_ ___ _ __ | |_
| |\/| | / _` || || '_ \ | | / _ \ | '_ \ | __|/ _ \| '_ \ | __|
| | | || (_| || || | | | | |___| (_) || | | || |_| __/| | | || |_
|_| |_| \__,_||_||_| |_| \____|\___/ |_| |_| \__|\___||_| |_| \__|
"""
class Transformation:
def __init__(self, rotation_matrix, translation_vector):
self.pose_mat = np.zeros((4,4))
self.pose_mat[:3,:3] = rotation_matrix
self.pose_mat[:3,3] = translation_vector.flatten()
self.pose_mat[3,3] = 1
def apply_transformation(self, points):
"""
Applies the transformation to the pointcloud
Parameters:
-----------
points : array
(3, N) matrix where N is the number of points
Returns:
----------
points_transformed : array
(3, N) transformed matrix
"""
assert(points.shape[0] == 3)
n = points.shape[1]
points_ = np.vstack((points, np.ones((1,n))))
points_trans_ = np.matmul(self.pose_mat, points_)
points_transformed = np.true_divide(points_trans_[:3,:], points_trans_[[-1], :])
return points_transformed
def inverse(self):
"""
Computes the inverse transformation and returns a new Transformation object
Returns:
-----------
inverse: Transformation
"""
rotation_matrix = self.pose_mat[:3,:3]
translation_vector = self.pose_mat[:3,3]
rot = np.transpose(rotation_matrix)
trans = - np.matmul(np.transpose(rotation_matrix), translation_vector)
return Transformation(rot, trans)
class PoseEstimation:
def __init__(self, frames, intrinsic, chessboard_params):
assert(len(chessboard_params) == 3)
self.frames = frames
self.intrinsic = intrinsic
self.chessboard_params = chessboard_params
def get_chessboard_corners_in3d(self):
"""
Searches the chessboard corners in the infrared images of
every connected device and uses the information in the
corresponding depth image to calculate the 3d
coordinates of the chessboard corners in the coordinate system of
the camera
Returns:
-----------
corners3D : dict
keys: str
Serial number of the device
values: [success, points3D, validDepths]
success: bool
Indicates wether the operation was successfull
points3d: array
(3,N) matrix with the coordinates of the chessboard corners
in the coordinate system of the camera. N is the number of corners
in the chessboard. May contain points with invalid depth values
validDephts: [bool]*
Sequence with length N indicating which point in points3D has a valid depth value
"""
corners3D = {}
for (info, frameset) in self.frames.items():
serial = info[0]
product_line = info[1]
depth_frame = post_process_depth_frame(frameset[rs.stream.depth])
if product_line == "L500":
infrared_frame = frameset[(rs.stream.infrared, 0)]
else:
infrared_frame = frameset[(rs.stream.infrared, 1)]
depth_intrinsics = self.intrinsic[serial][rs.stream.depth]
found_corners, points2D = cv_find_chessboard(depth_frame, infrared_frame, self.chessboard_params)
corners3D[serial] = [found_corners, None, None, None]
if found_corners:
points3D = np.zeros((3, len(points2D[0])))
validPoints = [False] * len(points2D[0])
for index in range(len(points2D[0])):
corner = points2D[:,index].flatten()
depth = get_depth_at_pixel(depth_frame, corner[0], corner[1])
if depth != 0 and depth is not None:
validPoints[index] = True
[X,Y,Z] = convert_depth_pixel_to_metric_coordinate(depth, corner[0], corner[1], depth_intrinsics)
points3D[0, index] = X
points3D[1, index] = Y
points3D[2, index] = Z
corners3D[serial] = found_corners, points2D, points3D, validPoints
return corners3D
def perform_pose_estimation(self):
"""
Calculates the extrinsic calibration from the coordinate space of the camera to the
coordinate space spanned by a chessboard by retrieving the 3d coordinates of the
chessboard with the depth information and subsequently using the kabsch algortihm
for finding the optimal rigid transformation between the two coordinate spaces
Returns:
-----------
retval : dict
keys: str
Serial number of the device
values: [success, transformation, points2D, rmsd]
success: bool
transformation: Transformation
Rigid transformation from the coordinate system of the camera to
the coordinate system of the chessboard
points2D: array
[2,N] array of the chessboard corners used for pose_estimation
rmsd:
Root mean square deviation between the observed chessboard corners and
the corners in the local coordinate system after transformation
"""
corners3D = self.get_chessboard_corners_in3d()
retval = {}
for (serial, [found_corners, points2D, points3D, validPoints] ) in corners3D.items():
objectpoints = get_chessboard_points_3D(self.chessboard_params)
retval[serial] = [False, None, None, None]
if found_corners == True:
#initial vectors are just for correct dimension
valid_object_points = objectpoints[:,validPoints]
valid_observed_object_points = points3D[:,validPoints]
#check for sufficient points
if valid_object_points.shape[1] < 5:
print("Not enough points have a valid depth for calculating the transformation")
else:
[rotation_matrix, translation_vector, rmsd_value] = calculate_transformation_kabsch(valid_object_points, valid_observed_object_points)
retval[serial] =[True, Transformation(rotation_matrix, translation_vector), points2D, rmsd_value]
print("RMS error for calibration with device number", serial, "is :", rmsd_value, "m")
return retval
def find_chessboard_boundary_for_depth_image(self):
boundary = {}
for (info, frameset) in self.frames.items():
serial = info[0]
product_line = info[1]
depth_frame = post_process_depth_frame(frameset[rs.stream.depth])
if product_line == "L500":
infrared_frame = frameset[(rs.stream.infrared, 0)]
else:
infrared_frame = frameset[(rs.stream.infrared, 1)]
found_corners, points2D = cv_find_chessboard(depth_frame, infrared_frame, self.chessboard_params)
boundary[serial] = [np.floor(np.amin(points2D[0,:])).astype(int), np.floor(np.amax(points2D[0,:])).astype(int), np.floor(np.amin(points2D[1,:])).astype(int), np.floor(np.amax(points2D[1,:])).astype(int)]
return boundary |
test/setup_test_env.py | conan7882/CNN-Visualization | 201 | 11164554 | <reponame>conan7882/CNN-Visualization
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: setup_test_env.py
# Author: <NAME> <<EMAIL>>
import sys
sys.path.append('lib/')
IMPATH = 'data/'
CLASS_IMPATH = 'data/class_test/'
SAVE_DIR = 'data/'
|
Hash/to sha256.py | DazEB2/SimplePyScripts | 117 | 11164590 | <reponame>DazEB2/SimplePyScripts<filename>Hash/to sha256.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
def to_sha256(text):
import hashlib
return hashlib.sha256(text.encode()).hexdigest()
if __name__ == '__main__':
print(to_sha256('Hello World!'))
|
tools/ttrace_parser/scripts/ttrace.py | ziyik/TizenRT-1 | 511 | 11164594 | #!/usr/bin/env python
###########################################################################
#
# Copyright 2017 Samsung Electronics All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
###########################################################################
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Android system-wide tracing utility.
This is a tool for capturing a trace that includes data from both userland and
the kernel. It creates an HTML file for visualizing the trace.
"""
from __future__ import print_function
import os
import sys
import time
import zlib
import errno
import string
import select
import optparse
import pid_parser
import subprocess
flattened_css_file = 'style.css'
flattened_js_file = 'tscript.js'
g_device_serial = None
class OptionParserIgnoreErrors(optparse.OptionParser):
def error(self, msg):
pass
def exit(self):
pass
def print_usage(self):
pass
def print_help(self):
pass
def print_version(self):
pass
def compose_html_win(script_dir, options, css, js, templates):
data = []
ret_fd = os.open(options.from_file_win, os.O_RDONLY | os.O_BINARY)
out = os.read(ret_fd, 4096)
parts = out.split('TRACE:', 1)
data.append(parts[1])
while True:
out = os.read(ret_fd, 4096)
keepReading = False
if len(out) > 0:
keepReading = True
data.append(out)
if not keepReading:
break
data = ''.join(data)
if data.startswith('\r\n'):
data = data.replace('\r\n', '\n')
data = data[1:]
html_filename = options.output_file
html_prefix = read_asset(script_dir, 'prefix.html')
html_suffix = read_asset(script_dir, 'suffix.html')
html_file = open(html_filename, 'w')
html_file.write(html_prefix % (css, js, templates))
size = 4096
dec = zlib.decompressobj()
for chunk in (data[i:i + size] for i in range(0, len(data), size)):
decoded_chunk = dec.decompress(chunk)
html_chunk = decoded_chunk.replace('\n', '\\n\\\n')
html_file.write(html_chunk)
html_out = dec.flush().replace('\n', '\\n\\\n')
# write body
html_file.write(html_out)
# write suffix
html_file.write(html_suffix)
html_file.close()
print("\n wrote file://%s\n" % os.path.abspath(options.output_file))
return
def compose_html(script_dir, options, css, js, templates):
html_filename = options.output_file
html_prefix = read_asset(script_dir, 'prefix.html')
html_suffix = read_asset(script_dir, 'suffix.html')
html_file = open(html_filename, 'w')
html_file.write(html_prefix % (css, js, templates))
cur_dir = os.getcwd()
# remove useless 2 lines
with open(os.path.join(cur_dir, options.from_text_file), "r") as input:
with open(os.path.join(cur_dir, options.from_text_file + 'composing'), "wb") as output:
for line in input:
if "capturing trace" in line:
continue
elif "TRACE:" in line:
continue
elif " done" in line:
continue
elif '\n' == line:
continue
else:
output.write(line)
# case not compressed, boot case
html_out = read_asset(script_dir, os.path.join(cur_dir, options.from_text_file + 'composing'))
html_out = html_out.replace('\n', '\\n\\\n')
os.remove(os.path.join(cur_dir, options.from_text_file + 'composing'))
# write body
html_file.write(html_out)
# Write suffix
html_file.write(html_suffix)
html_file.close()
print("\n wrote file://%s\n" % os.path.abspath(options.output_file))
return
def get_os_cmd(cmdARGS):
fd_popen = subprocess.Popen(cmdARGS.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ready = select.select([fd_popen.stdout, fd_popen.stderr], [], [fd_popen.stdout, fd_popen.stderr])
if fd_popen.stdout in ready[0]:
out = os.read(fd_popen.stdout.fileno(), 4096)
return out
else:
return 0
def sdb_shell(str_param):
global g_options
cmd_str = ['sdb']
if g_device_serial:
cmd_str.extend(['-s', str(g_device_serial)])
cmd_str.extend([str(str_param)])
os.system(string.join(cmd_str))
os.system('sleep 2')
def is_sdb_available():
no = 0
max_no = 10
sdb_shell('kill-server')
while(no < max_no):
str_cmd = get_os_cmd('sdb start-server')
str_cmd = get_os_cmd('sdb devices')
os.system('sleep 2')
l_devices = str_cmd.split('\n')
if len(l_devices) > 3:
if g_device_serial is None:
print('Please specify serial with -e option')
sys.exit(1)
dev_type = str_cmd.split("List of devices attached")[-1].split()
if 'device' in dev_type:
print('Ready to connect')
return dev_type[0]
else:
no = no + 1
print('retry...' + str(no))
sdb_shell('kill-server')
if no == max_no:
print('Could not connect to SDB devices')
sys.exit(1)
def set_sdb_root():
dev_type = is_sdb_available()
if dev_type == 0:
return 0
sdb_shell('root on')
if not ('emulator' in dev_type):
sdb_shell('shell change-booting-mode.sh --update')
print('SDB was rooted!!!')
return 1
def trace_bootup(cmd):
if set_sdb_root() == 0:
return
print(cmd + ' > /etc/ttrace.conf\'')
str_cmd = cmd + ' > /etc/ttrace.conf\''
os.system(str_cmd)
os.system('sleep 2')
sdb_shell('shell sync')
sdb_shell('shell reboot')
sdb_shell('kill-server')
def add_sdb_serial(command, serial):
if serial is not None:
command.insert(1, serial)
command.insert(1, '-s')
def main():
global g_device_serial
usage = "Usage: %prog [options] [category1 [category2 ...]]"
desc = "Example: %prog -b 32768 -t 15 gfx input view sched freq"
parser = optparse.OptionParser(usage=usage, description=desc)
parser.add_option('-o', dest='output_file', help='write HTML to FILE',
default='trace.html', metavar='FILE')
parser.add_option('-t', '--time', dest='trace_time', type='int',
help='trace for N seconds', metavar='N')
parser.add_option('-b', '--buf-size', dest='trace_buf_size', type='int',
help='use a trace buffer size of N KB', metavar='N')
parser.add_option('-l', '--list-categories', dest='list_categories', default=False,
action='store_true', help='list the available categories and exit')
parser.add_option('-u', '--bootup', dest='trace_bootup', default=False,
action='store_true', help='trace boot up')
parser.add_option('--link-assets', dest='link_assets', default=False,
action='store_true', help='link to original CSS or JS resources '
'instead of embedding them')
parser.add_option('--from-file', dest='from_file', action='store',
help='read the trace from a file (compressed) rather than running a live trace')
parser.add_option('--from-file-win', dest='from_file_win', action='store',
help='read the trace from a file (compressed) rather than running a live trace on windows')
parser.add_option('--from-text-file', dest='from_text_file', action='store',
help='read the trace from a file (not compressed) rather than running a live trace')
parser.add_option('--asset-dir', dest='asset_dir', default='trace-viewer',
type='string', help='')
parser.add_option('-e', '--serial', dest='device_serial', type='string',
help='sdb device serial number')
parser.add_option('--async_start', dest='async_start', default=False, action='store_true',
help='start circular trace and return immediately')
parser.add_option('--async_dump', dest='async_dump', default=False, action='store_true',
help='dump the current contents of circular trace buffer')
parser.add_option('--async_stop', dest='async_stop', default=False, action='store_true',
help='stop tracing and dump the current contents of circular trace buffer')
parser.add_option('--append', dest='append', default=False, action='store_true',
help='append traces to the existing traces. do not clear the trace buffer')
parser.add_option('--backup', dest='backup', default=False, action='store_true',
help='back up the existing traces to /tmp/trace.backup and then clear the trace buffer')
options, args = parser.parse_args()
if options.list_categories:
atrace_args = ['sdb', 'shell', 'atrace', '--list_categories']
expect_trace = False
elif options.from_file is not None:
atrace_args = ['cat', options.from_file]
expect_trace = True
elif options.from_file_win is not None:
atrace_args = ['type', options.from_file_win]
expect_trace = True
elif options.from_text_file is not None:
atrace_args = ['cat', options.from_text_file]
expect_trace = True
else:
if options.trace_bootup:
atrace_args = ['sdb', 'shell', '\'echo', 'atrace']
expect_trace = True
else:
atrace_args = ['sdb', 'shell', 'atrace', '-z']
expect_trace = True
if options.trace_time is not None:
if options.trace_time > 0:
atrace_args.extend(['-t', str(options.trace_time)])
else:
parser.error('the trace time must be a positive number')
if options.trace_buf_size is not None:
if options.trace_buf_size > 0:
atrace_args.extend(['-b', str(options.trace_buf_size)])
else:
parser.error('the trace buffer size must be a positive number')
atrace_args.extend(args)
if atrace_args[0] == 'sdb':
add_sdb_serial(atrace_args, options.device_serial)
if options.device_serial:
g_device_serial = str(options.device_serial).strip()
else:
g_device_serial = None
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
if options.link_assets:
src_dir = os.path.join(script_dir, options.asset_dir, 'src')
build_dir = os.path.join(script_dir, options.asset_dir, 'build')
js_files, js_flattenizer, css_files, templates = get_assets(src_dir, build_dir)
css = '\n'.join(linked_css_tag % (os.path.join(src_dir, f)) for f in css_files)
js = '<script language="javascript">\n%s</script>\n' % js_flattenizer
js += '\n'.join(linked_js_tag % (os.path.join(src_dir, f)) for f in js_files)
else:
css_filename = os.path.join(script_dir, flattened_css_file)
js_filename = os.path.join(script_dir, flattened_js_file)
css = compiled_css_tag % (open(css_filename).read())
js = compiled_js_tag % (open(js_filename).read())
templates = ''
html_filename = options.output_file
if options.trace_bootup:
print("Trace for bootup")
atrace_args.extend(['--async_start'])
trace_bootup(string.join(atrace_args))
print("Please pull out the usb cable on target")
os.system('sleep ' + '40')
print("Please plug the usb cable to target")
os.system('sleep ' + '20')
atrace_args.remove('--async_start')
atrace_args.remove('\'echo')
atrace_args.extend(['-z', '--async_stop'])
expect_trace = True
if options.from_text_file:
compose_html(script_dir, options, css, js, templates)
return
elif options.from_file_win:
compose_html_win(script_dir, options, css, js, templates)
return
elif options.from_file:
print("From file")
if options.async_start:
atrace_args.extend(['--async_start'])
if options.async_dump:
atrace_args.extend(['--async_dump'])
if options.async_stop:
atrace_args.extend(['--async_stop'])
if options.append:
atrace_args.extend(['--append'])
if options.backup:
atrace_args.extend(['--backup'])
backup_trace = True
sdb = subprocess.Popen(atrace_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if options.async_start:
return
result = None
data = []
# Read the text portion of the output and watch for the 'TRACE:' marker that
# indicates the start of the trace data.
while result is None:
ready = select.select([sdb.stdout, sdb.stderr], [], [sdb.stdout, sdb.stderr])
if sdb.stderr in ready[0]:
err = os.read(sdb.stderr.fileno(), 4096)
sys.stderr.write(err)
sys.stderr.flush()
if sdb.stdout in ready[0]:
out = os.read(sdb.stdout.fileno(), 4096)
parts = out.split('\nTRACE:', 1)
txt = parts[0].replace('\r', '')
if len(parts) == 2:
# The '\nTRACE:' match stole the last newline from the text, so add it
# back here.
txt += '\n'
sys.stdout.write(txt)
sys.stdout.flush()
if len(parts) == 2:
data.append(parts[1])
sys.stdout.write("downloading trace...")
sys.stdout.flush()
break
result = sdb.poll()
# Read and buffer the data portion of the output.
while True:
ready = select.select([sdb.stdout, sdb.stderr], [], [sdb.stdout, sdb.stderr])
keepReading = False
if sdb.stderr in ready[0]:
err = os.read(sdb.stderr.fileno(), 4096)
if len(err) > 0:
keepReading = True
sys.stderr.write(err)
sys.stderr.flush()
if sdb.stdout in ready[0]:
out = os.read(sdb.stdout.fileno(), 4096)
if len(out) > 0:
keepReading = True
data.append(out)
if result is not None and not keepReading:
break
result = sdb.poll()
if result == 0:
if expect_trace:
if not data:
print(('No data was captured. Output file was not ' +
'written.'), file=sys.stderr)
sys.exit(1)
else:
# Indicate to the user that the data download is complete.
print(" done\n")
data = ''.join(data)
# Collapse CRLFs that are added by sdb shell.
if data.startswith('\r\n'):
data = data.replace('\r\n', '\n')
# Skip the initial newline.
data = data[1:]
html_prefix = read_asset(script_dir, 'prefix.html')
html_suffix = read_asset(script_dir, 'suffix.html')
html_file = open(html_filename, 'w')
trace_filename = html_filename + '.trace.raw'
trace_file = open(trace_filename, 'w')
html_file.write(html_prefix % (css, js, templates))
size = 4096
dec = zlib.decompressobj()
for chunk in (data[i:i + size] for i in range(0, len(data), size)):
decoded_chunk = dec.decompress(chunk)
html_chunk = decoded_chunk.replace('\n', '\\n\\\n')
html_file.write(html_chunk)
trace_file.write(html_chunk)
html_out = dec.flush().replace('\n', '\\n\\\n')
html_file.write(html_out)
# Write suffix
html_file.write(html_suffix)
html_file.close()
trace_file.close()
pid_parser.parse(trace_filename)
os.remove(trace_filename)
print("\nwrote file://%s\n" % os.path.abspath(options.output_file))
else: # i.e. result != 0
print('sdb returned error code %d' % result, file=sys.stderr)
sys.exit(1)
def read_asset(src_dir, filename):
return open(os.path.join(src_dir, filename)).read()
def get_assets(src_dir, build_dir):
sys.path.append(build_dir)
gen = __import__('generate_standalone_timeline_view', {}, {})
parse_deps = __import__('parse_deps', {}, {})
gen_templates = __import__('generate_template_contents', {}, {})
filenames = gen._get_input_filenames()
load_sequence = parse_deps.calc_load_sequence(filenames, src_dir)
js_files = []
js_flattenizer = "window.FLATTENED = {};\n"
js_flattenizer += "window.FLATTENED_RAW_SCRIPTS = {};\n"
css_files = []
for module in load_sequence:
js_files.append(os.path.relpath(module.filename, src_dir))
js_flattenizer += "window.FLATTENED['%s'] = true;\n" % module.name
for dependent_raw_script_name in module.dependent_raw_script_names:
js_flattenizer += (
"window.FLATTENED_RAW_SCRIPTS['%s'] = true;\n"
% dependent_raw_script_name)
for style_sheet in module.style_sheets:
css_files.append(os.path.relpath(style_sheet.filename, src_dir))
templates = gen_templates.generate_templates()
sys.path.pop()
return (js_files, js_flattenizer, css_files, templates)
compiled_css_tag = """<style type="text/css">%s</style>"""
compiled_js_tag = """<script language="javascript">%s</script>"""
linked_css_tag = """<link rel="stylesheet" href="%s"></link>"""
linked_js_tag = """<script language="javascript" src="%s"></script>"""
if __name__ == '__main__':
main()
|
tests/opytimizer/optimizers/population/test_osa.py | anukaal/opytimizer | 528 | 11164615 | from opytimizer.optimizers.population import osa
from opytimizer.spaces import search
def test_osa_params():
params = {
'beta': 1.9
}
new_osa = osa.OSA(params=params)
assert new_osa.beta == 1.9
def test_osa_params_setter():
new_osa = osa.OSA()
try:
new_osa.beta = 'a'
except:
new_osa.beta = 1.9
try:
new_osa.beta = -1
except:
new_osa.beta = 1.9
assert new_osa.beta == 1.9
def test_osa_update():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[-10, -10], upper_bound=[10, 10])
new_osa = osa.OSA()
new_osa.update(search_space, 1, 10)
|
tqsdk/lib/utils.py | contropist/tqsdk-python | 3,208 | 11164647 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'mayanqiong'
import numpy as np
from pandas import DataFrame
from tqsdk.datetime import _get_trading_timestamp, _get_trade_timestamp
from tqsdk.rangeset import _rangeset_head, _rangeset_slice, _rangeset_length
"""
检查参数类型
"""
from inspect import isfunction
def _check_volume_limit(min_volume, max_volume):
if min_volume is not None and min_volume <= 0:
raise Exception("最小下单手数(min_volume) %s 错误, 请检查 min_volume 是否填写正确" % min_volume)
if max_volume is not None and max_volume <= 0:
raise Exception("最大下单手数(max_volume) %s 错误, 请检查 max_volume 是否填写正确" % max_volume)
if (min_volume is None and max_volume) or (max_volume is None and min_volume):
raise Exception("最小下单手数(min_volume) %s 和 最大下单手数(max_volume) %s 必须用时填写" % (min_volume, max_volume))
if min_volume and max_volume and min_volume > max_volume:
raise Exception("最小下单手数(min_volume) %s ,最大下单手数(max_volume) %s 错误, 请检查 min_volume, max_volume 是否填写正确" % (
min_volume, max_volume))
return int(min_volume) if min_volume else None, int(max_volume) if max_volume else None
def _check_direction(direction):
if direction not in ("BUY", "SELL"):
raise Exception("下单方向(direction) %s 错误, 请检查 direction 参数是否填写正确" % direction)
return direction
def _check_offset(offset):
if offset not in ("OPEN", "CLOSE", "CLOSETODAY"):
raise Exception("开平标志(offset) %s 错误, 请检查 offset 是否填写正确" % offset)
return offset
def _check_offset_priority(offset_priority):
if len(offset_priority.replace(",", "").replace("今", "", 1).replace("昨", "", 1).replace("开", "", 1)) > 0:
raise Exception("开平仓顺序(offset_priority) %s 错误, 请检查 offset_priority 参数是否填写正确" % offset_priority)
return offset_priority
def _check_volume(volume):
_volume = int(volume)
if _volume <= 0:
raise Exception("下单手数(volume) %s 错误, 请检查 volume 是否填写正确" % volume)
return _volume
def _check_price(price):
if price in ("ACTIVE", "PASSIVE") or isfunction(price):
return price
else:
raise Exception("下单方式(price) %s 错误, 请检查 price 参数是否填写正确" % price)
def _check_time_table(time_table: DataFrame):
if not isinstance(time_table, DataFrame):
raise Exception(f"time_table 参数应该是 pandas.DataFrame 类型")
need_columns = {'price', 'target_pos', 'interval'} - set(time_table.columns)
if need_columns:
raise Exception(f"缺少必要的列 {need_columns}")
if time_table.shape[0] > 0:
if time_table['interval'].isnull().values.any() or np.where(time_table['interval'] < 0, True, False).any():
raise Exception(f"interval 列必须为正数,请检查参数 {time_table['interval']}")
if time_table['target_pos'].isnull().values.any() or not np.issubdtype(time_table['target_pos'].dtype, np.integer):
raise Exception(f"target_pos 列必须为整数,请检查参数 {time_table['target_pos']}")
if not (np.isin(time_table['price'], ('PASSIVE', 'ACTIVE', None)) | time_table['price'].apply(isfunction)).all():
raise Exception(f"price 列必须为 ('PASSIVE', 'ACTIVE', None, Callable) 之一,请检查参数 {time_table['price']}")
return time_table
def _get_deadline_from_interval(quote, interval):
"""将 interval (持续长度 seconds)列转换为 deadline(结束时间 nano_timestamp)"""
# 当前交易日完整的交易时间段
trading_timestamp = _get_trading_timestamp(quote, quote.datetime)
trading_timestamp_nano_range = trading_timestamp['night'] + trading_timestamp['day'] # 当前交易日完整的交易时间段
# 当前时间 行情时间
current_timestamp_nano = _get_trade_timestamp(quote.datetime, float('nan'))
if not trading_timestamp_nano_range[0][0] <= current_timestamp_nano < trading_timestamp_nano_range[-1][1]:
raise Exception("当前时间不在指定的交易时间段内")
deadline = []
for index, value in interval.items():
r = _rangeset_head(_rangeset_slice(trading_timestamp_nano_range, current_timestamp_nano), int(value * 1e9))
if _rangeset_length(r) < int(value * 1e9):
raise Exception("指定时间段超出当前交易日")
deadline.append(r[-1][1])
current_timestamp_nano = r[-1][1]
return deadline
|
RecoMET/METProducers/python/GlobalHaloData_cfi.py | ckamtsikis/cmssw | 852 | 11164681 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
# File: GlobalHaloData_cfi.py
# Original Author: <NAME>, The University of Florida
# Description: Module to build GlobalHaloData Object and put into the event
# Date: Oct. 15, 2009
GlobalHaloData = cms.EDProducer("GlobalHaloDataProducer",
# Higher Level Reco
metLabel = cms.InputTag("caloMet"),
calotowerLabel = cms.InputTag("towerMaker"),
CSCSegmentLabel = cms.InputTag("cscSegments"),
CSCRecHitLabel = cms.InputTag("csc2DRecHits"),
MuonLabel = cms.InputTag("muons"),
EcalMinMatchingRadiusParam = cms.double(110.),
EcalMaxMatchingRadiusParam = cms.double(330.),
HcalMinMatchingRadiusParam = cms.double(110.),
HcalMaxMatchingRadiusParam = cms.double(490.),
CSCHaloDataLabel = cms.InputTag("CSCHaloData"),
EcalHaloDataLabel = cms.InputTag("EcalHaloData"),
HcalHaloDataLabel = cms.InputTag("HcalHaloData"),
CaloTowerEtThresholdParam = cms.double(0.3),
#Parameters for CSC-calo matching
MaxSegmentTheta = cms.double(0.7),
rh_et_threshforcscmatching_eb = cms.double(10.),
rcalominrsegm_lowthresh_eb = cms.double(-30.),
rcalominrsegm_highthresh_eb = cms.double(15.),
dtcalosegm_thresh_eb = cms.double(15.),
dphicalosegm_thresh_eb = cms.double(0.04),
rh_et_threshforcscmatching_ee = cms.double(10.),
rcalominrsegm_lowthresh_ee = cms.double(-30.),
rcalominrsegm_highthresh_ee = cms.double(30.),
dtcalosegm_thresh_ee = cms.double(15.),
dphicalosegm_thresh_ee = cms.double(0.04),
rh_et_threshforcscmatching_hb = cms.double(20.),
rcalominrsegm_lowthresh_hb = cms.double(-100.),
rcalominrsegm_highthresh_hb = cms.double(20.),
dtcalosegm_thresh_hb = cms.double(15.),
dphicalosegm_thresh_hb = cms.double(0.15),
rh_et_threshforcscmatching_he = cms.double(20.),
rcalominrsegm_lowthresh_he = cms.double(-30.),
rcalominrsegm_highthresh_he = cms.double(30.),
dtcalosegm_thresh_he = cms.double(15.),
dphicalosegm_thresh_he = cms.double(0.1),
IsHLT = cms.bool(False)
)
|
scripts/sentiment/process_usage_german.py | de9uch1/stanza | 3,633 | 11164687 | <filename>scripts/sentiment/process_usage_german.py
"""
USAGE is produced by the same people as SCARE.
USAGE has a German and English part. This script parses the German part.
Run the script as
process_usage_german.py path
Here, path should be where USAGE was unpacked. It will have the
documents, files, etc subdirectories.
https://www.romanklinger.de/usagecorpus/
"""
import csv
import glob
import os
import sys
import stanza
import scripts.sentiment.process_utils as process_utils
basedir = sys.argv[1]
nlp = stanza.Pipeline('de', processors='tokenize')
num_short_items = 0
snippets = []
csv_files = glob.glob(os.path.join(basedir, "files/de*csv"))
for csv_filename in csv_files:
with open(csv_filename, newline='') as fin:
cin = csv.reader(fin, delimiter='\t', quotechar=None)
lines = list(cin)
for index, line in enumerate(lines):
begin, end, snippet, sentiment = [line[i] for i in [2, 3, 4, 6]]
begin = int(begin)
end = int(end)
if len(snippet) != end - begin:
raise ValueError("Error found in {} line {}. Expected {} got {}".format(csv_filename, index, (end-begin), len(snippet)))
if sentiment.lower() == 'unknown':
continue
elif sentiment.lower() == 'positive':
sentiment = 2
elif sentiment.lower() == 'neutral':
sentiment = 1
elif sentiment.lower() == 'negative':
sentiment = 0
else:
raise ValueError("Tell John he screwed up and this is why he can't have Mox Opal: {}".format(sentiment))
doc = nlp(snippet)
text = " ".join(" ".join(token.text for token in sentence.tokens) for sentence in doc.sentences)
num_tokens = sum(len(sentence.tokens) for sentence in doc.sentences)
if num_tokens < 4:
num_short_items = num_short_items + 1
snippets.append("%d %s" % (sentiment, text))
print(len(snippets))
process_utils.write_list(os.path.join(basedir, "de-train.txt"), snippets)
|
observations/r/strike.py | hajime9652/observations | 199 | 11164691 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def strike(path):
"""Strike Duration Data
a cross-section from 1968 to 1976
*number of observations* : 62
*country* : United States
A dataframe containing :
duration
strike duration in days
prod
unanticipated output
<NAME>. (1985) “The duration of contract strikes in U.S.
manufacturing”, *Journal of Econometrics*, **28**, 5-28.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `strike.csv`.
Returns:
Tuple of np.ndarray `x_train` with 62 rows and 2 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'strike.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/Strike.csv'
maybe_download_and_extract(path, url,
save_file_name='strike.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
tests/commands/test__vi_big_g.py | my-personal-forks/Vintageous | 1,146 | 11164727 | from Vintageous.vi.utils import modes
from Vintageous.tests import set_text
from Vintageous.tests import add_sel
from Vintageous.tests import get_sel
from Vintageous.tests import first_sel
from Vintageous.tests import ViewTest
class Test_vi_big_g_InNormalMode(ViewTest):
def testCanMoveInNormalMode(self):
self.write('abc\nabc')
self.clear_sel()
self.add_sel(a=0, b=0)
self.view.run_command('_vi_big_g', {'mode': modes.NORMAL, 'count': 1})
self.assertEqual(self.R(6, 6), first_sel(self.view))
def testGoToHardEofIfLastLineIsEmpty(self):
self.write('abc\nabc\n')
self.clear_sel()
self.add_sel(a=0, b=0)
self.view.run_command('_vi_big_g', {'mode': modes.NORMAL, 'count': 1})
self.assertEqual(self.R(8, 8), first_sel(self.view))
class Test_vi_big_g_InVisualMode(ViewTest):
def testCanMoveInVisualMode(self):
self.write('abc\nabc\n')
self.clear_sel()
self.add_sel(a=0, b=1)
self.view.run_command('_vi_big_g', {'mode': modes.VISUAL, 'count': 1})
self.assertEqual(self.R(0, 8), first_sel(self.view))
class Test_vi_big_g_InInternalNormalMode(ViewTest):
def testCanMoveInModeInternalNormal(self):
self.write('abc\nabc\n')
self.clear_sel()
self.add_sel(self.R(1, 1))
self.view.run_command('_vi_big_g', {'mode': modes.INTERNAL_NORMAL, 'count': 1})
self.assertEqual(self.R(0, 8), first_sel(self.view))
def testOperatesLinewise(self):
self.write('abc\nabc\nabc\n')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 1)))
self.view.run_command('_vi_big_g', {'mode': modes.INTERNAL_NORMAL, 'count': 1})
self.assertEqual(self.R((0, 3), (2, 4)), first_sel(self.view))
class Test_vi_big_g_InVisualLineMode(ViewTest):
def testCanMoveInModeVisualLine(self):
self.write('abc\nabc\n')
self.clear_sel()
self.add_sel(a=0, b=4)
self.view.run_command('_vi_big_g', {'mode': modes.VISUAL_LINE, 'count': 1})
self.assertEqual(self.R(0, 8), first_sel(self.view))
|
pywebcopy/structures.py | wasim961/pywebcopy | 257 | 11164731 | # -*- coding: utf-8 -*-
"""
pywebcopy.structures
~~~~~~~~~~~~~~~~~~~~
Structures powering pywebcopy.
"""
from collections import MutableMapping
import requests
from requests.structures import OrderedDict
from six.moves.urllib.robotparser import RobotFileParser
__all__ = ['CaseInsensitiveDict', 'RobotsTxtParser']
class CaseInsensitiveDict(MutableMapping):
""" Flexible dictionary which creates less errors
during lookups.
Examples:
dict = CaseInsensitiveDict()
dict['Config'] = 'Config'
dict.get('config') => 'Config'
dict.get('CONFIG') => 'Config'
dict.get('conFig') => 'Config'
"""
def __init__(self, data=None, **kwargs):
self._store = OrderedDict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
self._store[key.lower()] = value
def __getitem__(self, key):
return self._store[key.lower()]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (key for key, value in self._store.items())
def __len__(self): # pragma: no cover
return len(self._store)
def __copy__(self): # pragma: no cover
return CaseInsensitiveDict(self._store)
def lower_case_items(self):
return (
(key.lower(), value) for key, value in self._store.items()
)
def __eq__(self, other):
if isinstance(other, MutableMapping):
other = CaseInsensitiveDict(other)
else: # pragma: no cover
raise NotImplementedError
return dict(self.lower_case_items()) == dict(other.lower_case_items())
class RobotsTxtParser(RobotFileParser):
"""Reads the robots.txt from the site.
Usage::
>>> rp = RobotsTxtParser(user_agent='*', url='http://some-site.com/robots.txt')
>>> rp.read()
>>> rp.can_fetch('/hidden/url_path')
>>> False
>>> rp.can_fetch('/public/url_path/')
>>> True
"""
user_agent = '*'
_get = None
def set_ua(self, ua):
self.user_agent = ua
def read(self):
"""Modify the read method to use the inbuilt http session instead
of using a new raw urllib connection.
This usually sets up a session and a cookie jar.
Thus subsequent requests should be faster.
"""
try:
f = self._get(self.url)
f.raise_for_status()
except requests.exceptions.HTTPError as err:
code = err.response.status_code
if code in (401, 403):
self.disallow_all = True
elif 400 <= code < 500:
self.allow_all = True
except requests.exceptions.ConnectionError:
self.allow_all = True
else:
self.parse(f.text.splitlines())
def can_fetch(self, url, user_agent=None):
return super(RobotsTxtParser, self).can_fetch(user_agent or self.user_agent, url)
|
test/test_country_selector.py | Quentame/bimmer_connected | 141 | 11164740 | """Test the country selection class."""
import unittest
from bimmer_connected.country_selector import valid_regions, Regions, get_region_from_name
class TestCountrySelector(unittest.TestCase):
"""Test the country selection class."""
def test_valid_regions(self):
"""Test getting list of regions."""
self.assertIn('china', valid_regions())
def test_region_from_name(self):
"""Test parsing region from string."""
self.assertEqual(Regions.CHINA, get_region_from_name('China'))
self.assertEqual(Regions.REST_OF_WORLD, get_region_from_name('rest_of_world'))
self.assertEqual(Regions.NORTH_AMERICA, get_region_from_name('nOrTh_AmErica'))
def test_invalid_region(self):
"""Test exception handling."""
with self.assertRaises(ValueError):
get_region_from_name('random_text')
|
examples/plot_hidden_short_snakes_typed_gen.py | LemonLison/pystruct | 501 | 11164763 | """
==============================================
Conditional Interactions on the Snakes Dataset
==============================================
This is a variant of plot_snakes.py
Snake are hidding, so we have 2 tasks:
- determining if a snake is in the picture,
- identifying its head to tail body.
We use the NodeTypeEdgeFeatureGraphCRF class with 2 type of nodes.
HERE WE GENERATE THE SNAKES AT RANDOM INSTEAD OF USING THE SNAKE DATASET
This example uses the snake dataset introduced in
Nowozin, Rother, Bagon, Sharp, Yao, Kohli: Decision Tree Fields ICCV 2011
This dataset is specifically designed to require the pairwise interaction terms
to be conditioned on the input, in other words to use non-trival edge-features.
The task is as following: a "snake" of length ten wandered over a grid. For
each cell, it had the option to go up, down, left or right (unless it came from
there). The input consists of these decisions, while the desired output is an
annotation of the snake from 0 (head) to 9 (tail). See the plots for an
example.
As input features we use a 3x3 window around each pixel (and pad with background
where necessary). We code the five different input colors (for up, down, left, right,
background) using a one-hot encoding. This is a rather naive approach, not using any
information about the dataset (other than that it is a 2d grid).
The task can not be solved using the simple DirectionalGridCRF - which can only
infer head and tail (which are also possible to infer just from the unary
features). If we add edge-features that contain the features of the nodes that are
connected by the edge, the CRF can solve the task.
From an inference point of view, this task is very hard. QPBO move-making is
not able to solve it alone, so we use the relaxed AD3 inference for learning.
PS: This example runs a bit (5 minutes on 12 cores, 20 minutes on one core for me).
But it does work as well as Decision Tree Fields ;)
JL Meunier - January 2017
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943
Copyright Xerox
"""
import sys, os, time
import random, cPickle
import numpy as np
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.grid_search import GridSearchCV
from pystruct.learners import OneSlackSSVM
from pystruct.datasets import load_snakes
from pystruct.models import NodeTypeEdgeFeatureGraphCRF
from plot_snakes import one_hot_colors
from plot_hidden_snakes import augmentWithNoSnakeImages, shuffle_in_unison, shorten_snakes
from plot_hidden_short_snakes_typed import plot_snake, prepare_data, prepare_picture_data, convertToTwoType,listConstraints, listConstraints_ATMOSTONE, REPORT
#==============================================================================================
bFIXED_RANDOM_SEED = True
NCELL=10
#INFERENCE="ad3+" #ad3+ is required when there are hard logic constraints
INFERENCE="ad3" #ad3 is faster than ad3+
N_JOBS=8
#MAXITER=750
lNbSAMPLE=[200, 400, 600, 800] #how many sample do we generate for each experiment?
nbEXPERIMENT = 10
# N_JOBS=1
# lNbSAMPLE=[20]
# nbEXPERIMENT=1
# MAXITER=3
#==============================================================================================
def printConfig():
print "== NCELL=", NCELL
print "== FIXED_SEED=", bFIXED_RANDOM_SEED
print "== INFERENCE =", INFERENCE
print "== N_JOBS =", N_JOBS
#print "== MAX_ITER=", MAXITER
print "== lNbSAMPLE=", lNbSAMPLE
print "== nbEXPERIMENT=", nbEXPERIMENT
if __name__ == '__main__': printConfig()
class GenSnakeException(Exception): pass
def genSnakes(N, dUniqueSnakelij, ncell=NCELL):
"""
Generate snakes at random.
dUniqueSnakelij contains the signature of all Snakes. We ensure unicity of each Snake.
Return N tuple (snakes, Y)
"""
ltSnakeY = []
ndim = 1+ ncell+1+ncell +1 #where we'll draw each snake. Border, possible straight snake, centre, possible straight snake, border
aBoard = np.zeros( (ndim, ndim) , dtype=np.int8)
im,jm = 1+ ncell, 1+ ncell #middle of board
lDirection = range(4) #assume it is N, E, S, W
lDirectionIncr = [(-1,0), (0,1), (1,0), (0,-1)]
lDirectionColor = [ [255,0,0], [255,255,0], [0,255,0], [0,255,255] ]
for _n in range(N):
while True:
aBoard[:,:] = -1 #all background
i,j = im,jm
lij = list()
ldir=list()
aSnake, Y = None, None
try:
for _ncell in range(ncell):
random.shuffle(lDirection) #we will try each direction in turn
for dir in lDirection:
_i, _j = i+lDirectionIncr[dir][0], j+lDirectionIncr[dir][1]
if aBoard[_i,_j] == -1: break #ok, valid direction, we jump on a background pixel
if aBoard[_i,_j] != -1: raise GenSnakeException("Failed to generate a snake") #got stuck
aBoard[i,j] = dir
lij.append( (i,j) )
ldir.append(dir)
i,j = _i,_j
try:
dUniqueSnakelij[tuple(lij)]
raise GenSnakeException("Same as in trainset")
except KeyError:
dUniqueSnakelij[tuple(lij)] = True
#ok we have a Snake, let's create the image with background borders
imin,jmin = map(min, zip(*lij))
imax,jmax = map(max, zip(*lij))
aSnake = np.zeros((imax-imin+3, jmax-jmin+3, 3), dtype=np.uint8)
aSnake[:,:,2] = 255 #0,0,255
aY = np.zeros((imax-imin+3, jmax-jmin+3) , dtype=np.uint8)
for _lbl, ((_i,_j), _dir) in enumerate(zip(lij, ldir)):
aSnake[_i-imin+1, _j-jmin+1,:] = lDirectionColor[_dir]
aY [_i-imin+1, _j-jmin+1] = _lbl + 1
break
except GenSnakeException: pass
ltSnakeY.append( (aSnake, aY) )
# print aSnake
# print aY
# plot_snake(aSnake)
return ltSnakeY
def plot_many_snakes(lX, nv=10, nh=20, ncell=NCELL):
"""
Plot the one-hot-encoded snake on grids of size nv x nh
"""
N = ncell+1 #to have border
i = 0
while i < len(lX):
j = min(i+nv*nh, len(lX))
lImg = lX[i:j]
allimg = np.zeros(shape=(N*nv,N*nh,3), dtype=np.uint8)
ih,iw = 0,0
for i_img, img in enumerate(lImg):
h,w,c = img.shape
assert c == 3
allimg[ih:ih+h, iw:iw+w, :] = img
iw += N
if i_img % nh == (nh-1):
ih += N
iw = 0
plot_snake(allimg)
i = j
def plot_mistakes(lY_ref, lY_pred, lX_pict, ncell=NCELL):
"""
Plot snake wrongly predicted, first NoSnake pictures, then Snake pictures
"""
_ltSnake = (list(), list()) #misclassified NoSnake pictures, misclassified Snake pictures
for _y_ref, _y_pred, _x in zip(lY_ref, lY_pred, lX_pict):
assert _y_ref.shape==_y_pred.shape
assert _y_ref.size ==_x.size/3+1
assert _y_ref[-1] in [ncell+1,ncell+2]
if _y_ref[-1] != _y_pred[-1]:
iSnake = _y_ref[-1] - ncell - 1 #0=NoSnake 1=Snake
_ltSnake[iSnake].append(_x)
print "NoSnake pictures predicted as Snake"
plot_many_snakes(_ltSnake[0])
print "Snake pictures predicted as NoSnake"
plot_many_snakes(_ltSnake[1])
if __name__ == '__main__':
if bFIXED_RANDOM_SEED:
np.random.seed(1605)
random.seed(98)
else:
np.random.seed()
random.seed()
print("Please be patient...")
snakes = load_snakes()
#--------------------------------------------------------------------------------------------------
#we always test against the original test set
X_test, Y_test = snakes['X_test'], snakes['Y_test']
#plot_many_snakes(X_test)
# X_test_img = X_test
if NCELL <10: X_test, Y_test = shorten_snakes(X_test, Y_test, NCELL)
nb_hidden, X_test, Y_test = augmentWithNoSnakeImages(X_test, Y_test, "test", False, nCell=NCELL)
Y_test_pict = np.array([1]*(len(X_test)-nb_hidden) + [0]*nb_hidden)
print "TEST SET ", len(X_test), len(Y_test)
X_test_pict = X_test
X_test = [one_hot_colors(x) for x in X_test]
X_test_pict_feat = prepare_picture_data(X_test)
X_test_directions, X_test_edge_features = prepare_data(X_test)
#--------------------------------------------------------------------------------------------------
for iExp in range(nbEXPERIMENT):
print "#"*75
print "# EXPERIMENT %d / %d"%(iExp+1, nbEXPERIMENT)
print "#"*75
dUniqueSnakelij = dict()
lXY = genSnakes(max(lNbSAMPLE), dUniqueSnakelij)
X_train_all, Y_train_all = zip(*lXY)
X_train_all, Y_train_all = list(X_train_all), list(Y_train_all)
print "***** GENERATED %d snakes of length %d *****"%(len(X_train_all), NCELL)
#Also generate an additional test set
NTEST=100
lXYTest = genSnakes( NTEST, dUniqueSnakelij )
X_test_gen, Y_test_gen = zip(*lXYTest)
X_test_gen, Y_test_gen = list(X_test_gen), list(Y_test_gen)
print "***** GENERATED %d snakes of length %d *****"%(NTEST, NCELL)
# plot_many_snakes(X_test_img+X_test_gen)
nb_hidden, X_test_gen, Y_test_gen = augmentWithNoSnakeImages(X_test_gen, Y_test_gen, "test_gen", False, nCell=NCELL)
Y_test_gen_pict = np.array([1]*(len(X_test_gen)-nb_hidden) + [0]*nb_hidden)
print "GENERATED TEST SET ", len(X_test_gen), len(Y_test_gen)
X_test_gen = [one_hot_colors(x) for x in X_test_gen]
X_test_gen_pict_feat = prepare_picture_data(X_test_gen)
X_test_gen_directions, X_test_gen_edge_features = prepare_data(X_test_gen)
for nbSample in lNbSAMPLE:
print "======================================================================================================"
print "TRAINING"
X_train, Y_train = X_train_all[0:nbSample], Y_train_all[0:nbSample]
nb_hidden, X_train, Y_train = augmentWithNoSnakeImages(X_train, Y_train, "train", False, nCell=NCELL)
print "TRAIN SET ",len(X_train), len(Y_train)
Y_train_pict = np.array([1]*(len(X_train)-nb_hidden) + [0]*nb_hidden)
X_train = [one_hot_colors(x) for x in X_train]
X_train, Y_train, Y_train_pict = shuffle_in_unison(X_train, Y_train, Y_train_pict)
X_train_pict_feat = prepare_picture_data(X_train)
X_train_directions, X_train_edge_features = prepare_data(X_train)
#--------------------------------------------------------------------------------------------------
if True:
print "==========================================================================="
from pystruct.models.edge_feature_graph_crf import EdgeFeatureGraphCRF
print "ONE TYPE TRAINING AND TESTING: PIXELS"
inference = "qpbo"
crf = EdgeFeatureGraphCRF(inference_method=inference)
ssvm = OneSlackSSVM(crf, inference_cache=50, C=.1, tol=.1,
# max_iter=MAXITER,
n_jobs=N_JOBS
#,verbose=1
, switch_to='ad3'
)
Y_train_flat = [y_.ravel() for y_ in Y_train]
print "\ttrain label histogram : ", np.histogram(np.hstack(Y_train_flat), bins=range(NCELL+2))
t0 = time.time()
ssvm.fit(X_train_edge_features, Y_train_flat)
print "FIT DONE IN %.1fs"%(time.time() - t0)
sys.stdout.flush()
t0 = time.time()
_Y_pred = ssvm.predict( X_test_edge_features )
REPORT(Y_test, _Y_pred, time.time() - t0, NCELL, "gen_singletype_%d.csv"%nbSample, True, "singletype_%d"%nbSample)
_Y_pred = ssvm.predict( X_test_gen_edge_features )
REPORT(Y_test_gen, _Y_pred, None , NCELL, "gen_singletype_gentest_%d.csv"%nbSample, True, "singletype_%d_gentest"%nbSample)
#--------------------------------------------------------------------------------------------------
if True:
print "_"*50
print "ONE TYPE TRAINING AND TESTING: PICTURES"
print "\ttrain label histogram : ", np.histogram(Y_train_pict, bins=range(3))
lr = LogisticRegression(class_weight='balanced')
mdl = GridSearchCV(lr , {'C':[0.1, 0.5, 1.0, 2.0] })
XX = np.vstack(X_train_pict_feat)
t0 = time.time()
mdl.fit(XX, Y_train_pict)
print "FIT DONE IN %.1fs"%(time.time() - t0)
t0 = time.time()
_Y_pred = mdl.predict( np.vstack(X_test_pict_feat) )
REPORT([Y_test_pict], _Y_pred, time.time() - t0, 2, "gen_picture.csv", True, "picture_logit_%d"%nbSample)
#--------------------------------------------------------------------------------------------------
print "======================================================================================================"
l_n_states = [NCELL+1, 2] # 11 states for pixel nodes, 2 states for pictures
l_n_feat = [45, 7] # 45 features for pixels, 7 for pictures
ll_n_feat = [[180, 45], # 2 feature between pixel nodes, 1 between pixel and picture
[45 , 0]] # , nothing between picture nodes (no picture_to_picture edge anyway)
print " TRAINING MULTI-TYPE MODEL "
#TRAINING
crf = NodeTypeEdgeFeatureGraphCRF(2, # How many node types?
l_n_states, # How many states per type?
l_n_feat, # How many node features per type?
ll_n_feat, # How many edge features per type x type?
inference_method=INFERENCE
)
print crf
ssvm = OneSlackSSVM(crf, inference_cache=50, C=.1, tol=0.1,
# max_iter=MAXITER,
n_jobs=N_JOBS
)
print "======================================================================================================"
print "YY[0].shape", Y_train[0].shape
XX, YY = convertToTwoType(X_train,
X_train_edge_features, # list of node_feat , edges, edge_feat for pixel nodes
Y_train,
X_train_pict_feat, #a list of picture_node_features
Y_train_pict, #a list of integers [0,1]
nCell=NCELL)
print "\tlabel histogram : ", np.histogram( np.hstack([y.ravel() for y in YY]), bins=range(14))
print "YY[0].shape", YY[0].shape
crf.initialize(XX, YY)# check if the data is properly built
sys.stdout.flush()
t0 = time.time()
ssvm.fit(XX, YY)
print "FIT DONE IN %.1fs"%(time.time() - t0)
sys.stdout.flush()
print "_"*50
XX_test, YY_test =convertToTwoType(X_test,
X_test_edge_features, # list of node_feat , edges, edge_feat for pixel nodes
Y_test,
X_test_pict_feat, #a list of picture_node_features
Y_test_pict, #a list of integers [0,1]
nCell=NCELL)
print "\tlabel histogram (PIXELs and PICTUREs): ", np.histogram( np.hstack([y.ravel() for y in YY_test]), bins=range(14))
XX_test_gen, YY_test_gen =convertToTwoType(X_test_gen,
X_test_gen_edge_features, # list of node_feat , edges, edge_feat for pixel nodes
Y_test_gen,
X_test_pict_feat, #a list of picture_node_features
Y_test_pict, #a list of integers [0,1]
nCell=NCELL)
l_constraints = listConstraints_ATMOSTONE(XX_test , NCELL)
l_constraints_gen = listConstraints_ATMOSTONE(XX_test_gen, NCELL)
print "_"*50
print "\t- results without constraints (using %s)"%INFERENCE
t0 = time.time()
YY_pred = ssvm.predict( XX_test )
REPORT(YY_test, YY_pred, time.time() - t0 , NCELL+2, "gen_multitype_%d.csv"%nbSample, True, "multitype_%d"%nbSample)
#plot_mistakes(YY_test, YY_pred, X_test_pict)
YY_pred = ssvm.predict( XX_test_gen )
REPORT(YY_test_gen, YY_pred, None , NCELL+2, "gen_multitype_gentest_%d.csv"%nbSample, True, "multitype_%d_gentest"%nbSample)
print "_"*50
print "\t- results exploiting constraints (using ad3+)"
ssvm.model.inference_method = "ad3+"
t0 = time.time()
YY_pred = ssvm.predict( XX_test , l_constraints )
REPORT(YY_test, YY_pred, time.time() - t0 , NCELL+2, "gen_multitype_constraints_%d.csv"%nbSample, True, "multitype_constraints_%d"%nbSample)
YY_pred = ssvm.predict( XX_test_gen , l_constraints_gen )
REPORT(YY_test_gen, YY_pred, None , NCELL+2, "gen_multitype_constraints_gentest_%d.csv"%nbSample, True, "multitype_constraints_%d_gentest"%nbSample)
print "_"*50
print "One Experiment DONE"
print "ALL EXPERIMENTS DONE"
printConfig()
|
basenji/emerald.py | egilbertson-ucsf/basenji | 232 | 11164773 | <gh_stars>100-1000
# Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
import pdb
import os
import subprocess
import numpy as np
import pandas as pd
from pysam import VariantFile
'''
emerald.py
Methods to query LD using emeraLD.
'''
class EmeraldVCF:
def __init__(self, pop_vcf_stem):
self.pop_vcf_stem = pop_vcf_stem
def fetch(self, chrm, pos_start, pos_end, return_samples=False):
vcf_file = '%s.%s.vcf.gz' % (self.pop_vcf_stem, chrm)
vcf_open = VariantFile(vcf_file, drop_samples=(not return_samples))
return vcf_open.fetch(chrm, pos_start, pos_end)
def query_ld(self, snp_id, chrm, pos,
ld_t=0.1, return_pos=False,
max_ld_distance=1000000):
"""Retrieve SNPs in LD with the given SNP."""
chr_vcf_file = '%s.%s.vcf.gz' % (self.pop_vcf_stem, chrm)
if not os.path.isfile(chr_vcf_file):
print('WARNING: %s VCF not found.' % chrm)
ld_df = pd.DataFrame()
else:
# determine search region
ld_region_start = max(0, pos - max_ld_distance)
ld_region_end = pos + max_ld_distance
region_str = '--region %s:%d-%d' % (chrm, ld_region_start, ld_region_end)
# construct emerald command
cmd = 'emeraLD'
cmd += ' -i %s' % chr_vcf_file
cmd += ' --rsid %s' % snp_id
cmd += ' %s' % region_str
cmd += ' --threshold %f' % ld_t
cmd += ' --no-phase'
cmd += ' --extra'
cmd += ' --stdout'
cmd += ' 2> /dev/null'
ld_snps = [snp_id]
ld_r = [1.0]
ld_pos = [pos]
# parse returned lines
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
for line in proc.stdout:
line = line.decode('UTF-8')
if not line.startswith('#'):
a = line.split()
ld_pos.append(int(a[4]))
ld_snps.append(a[5])
ld_r.append(float(a[7]))
proc.communicate()
# sort by position
sort_indexes = np.argsort(ld_pos)
ld_snps = np.array(ld_snps)[sort_indexes]
ld_r = np.array(ld_r)[sort_indexes]
# construct data frame
ld_df = pd.DataFrame()
ld_df['snp'] = ld_snps
ld_df['r'] = ld_r
if return_pos:
ld_df['chr'] = chrm
ld_df['pos'] = np.array(ld_pos)[sort_indexes]
return ld_df
|
milk/tests/data/jugparallel_kmeans_jugfile.py | luispedro/milk | 284 | 11164801 | <gh_stars>100-1000
import milk.ext.jugparallel
from milksets.wine import load
from milk.tests.fast_classifier import fast_classifier
features,labels = load()
clustered = milk.ext.jugparallel.kmeans_select_best(features, ks=(2,8), repeats=2, max_iters=6)
|
example/example.py | GothAck/openpyscad | 105 | 11164842 | <reponame>GothAck/openpyscad
import sys
import os
sys.path.append("")
sys.path.insert(
0,
"/".join(os.path.dirname(os.path.abspath(__file__)).split("/")[:-1])
)
from openpyscad import *
Circle(10).offset(10).write("example.scad", with_print=True)
|
String/1374. Generate a String With Characters That Have Odd Counts.py | beckswu/Leetcode | 138 | 11164846 | <reponame>beckswu/Leetcode<filename>String/1374. Generate a String With Characters That Have Odd Counts.py
class Solution:
def generateTheString(self, n: int) -> str:
if not n:
return ''
if n == 1:
return 'a'
if n % 2 == 0:
return 'a'*(n-1) + 'b'
return 'a'*n
class Solution:
def generateTheString(self, n):
return 'b' + 'ab'[n & 1] * (n - 1) |
system/product.py | zhangyiiZ/saltshaker_backend | 115 | 11164849 | <reponame>zhangyiiZ/saltshaker_backend<filename>system/product.py<gh_stars>100-1000
# -*- coding:utf-8 -*-
from flask_restful import Resource, reqparse
from flask import g
from common.log import loggers
from common.audit_log import audit_log
from common.db import DB
from common.utility import uuid_prefix
from common.sso import access_required
import json
from system.user import update_user_privilege, update_user_product
from common.const import role_dict
from fileserver.rsync_fs import rsync_config
from common.saltstack_api import SaltAPI
import gitlab
logger = loggers()
parser = reqparse.RequestParser()
parser.add_argument("name", type=str, required=True, trim=True)
parser.add_argument("description", type=str)
parser.add_argument("salt_master_id", type=str, required=True, trim=True)
parser.add_argument("salt_master_url", type=str, required=True, trim=True)
parser.add_argument("salt_master_user", type=str, required=True, trim=True)
parser.add_argument("salt_master_password", type=str, required=True, trim=True)
parser.add_argument("file_server", type=str, required=True, trim=True)
# GitLab 设置
parser.add_argument("gitlab_url", type=str, default="", trim=True)
parser.add_argument("private_token", type=str, default="", trim=True)
parser.add_argument("oauth_token", type=str, default="", trim=True)
parser.add_argument("email", type=str, default="", trim=True)
parser.add_argument("password", type=str, default="", trim=True)
parser.add_argument("http_username", type=str, default="", trim=True)
parser.add_argument("http_password", type=str, default="", trim=True)
parser.add_argument("api_version", type=str, default="", trim=True)
parser.add_argument("state_project", type=str, default="", trim=True)
parser.add_argument("pillar_project", type=str, default="", trim=True)
class Product(Resource):
@access_required(role_dict["common_user"])
def get(self, product_id):
db = DB()
status, result = db.select_by_id("product", product_id)
db.close_mysql()
if status is True:
if result:
return {"data": result, "status": True, "message": ""}, 200
else:
return {"status": False, "message": "%s does not exist" % product_id}, 404
else:
return {"status": False, "message": result}, 500
@access_required(role_dict["product"])
def delete(self, product_id):
user = g.user_info["username"]
db = DB()
status, result = db.delete_by_id("product", product_id)
db.close_mysql()
if status is not True:
logger.error("Delete product error: %s" % result)
return {"status": False, "message": result}, 500
if result is 0:
return {"status": False, "message": "%s does not exist" % product_id}, 404
audit_log(user, product_id, product_id, "product", "delete")
info = update_user_privilege("product", product_id)
if info["status"] is False:
return {"status": False, "message": info["message"]}, 500
# 更新Rsync配置
rsync_config()
return {"status": True, "message": ""}, 200
@access_required(role_dict["product"])
def put(self, product_id):
user = g.user_info["username"]
args = parser.parse_args()
args["id"] = product_id
product = args
db = DB()
# 判断是否存在
select_status, select_result = db.select_by_id("product", product_id)
if select_status is not True:
db.close_mysql()
logger.error("Modify product error: %s" % select_result)
return {"status": False, "message": select_result}, 500
if not select_result:
db.close_mysql()
return {"status": False, "message": "%s does not exist" % product_id}, 404
# 判断名字是否重复
status, result = db.select("product", "where data -> '$.name'='%s'" % args["name"])
if status is True:
if result:
if product_id != result[0].get("id"):
db.close_mysql()
return {"status": False, "message": "The product name already exists"}, 200
status, result = db.update_by_id("product", json.dumps(product, ensure_ascii=False), product_id)
db.close_mysql()
if status is not True:
logger.error("Modify product error: %s" % result)
return {"status": False, "message": result}, 500
audit_log(user, args["id"], product_id, "product", "edit")
# 更新Rsync配置
if args["file_server"] == "rsync":
rsync_config()
return {"status": True, "message": ""}, 200
class ProductList(Resource):
@access_required(role_dict["common_user"])
def get(self):
db = DB()
user_info = g.user_info
role_sql = []
if user_info["role"]:
for role in user_info["role"]:
role_sql.append("data -> '$.id'='%s'" % role)
sql = " or ".join(role_sql)
role_status, role_result = db.select("role", "where %s" % sql)
if role_status and role_result:
for role in role_result:
if role["tag"] == 0:
status, result = db.select("product", "")
db.close_mysql()
product_list = []
if status is True:
if result:
product_list = result
else:
return {"status": False, "message": result}, 500
return {"data": product_list, "status": True, "message": ""}, 200
sql_list = []
product_list = []
if user_info["product"]:
for product in user_info["product"]:
sql_list.append("data -> '$.id'='%s'" % product)
sql = " or ".join(sql_list)
status, result = db.select("product", "where %s" % sql)
db.close_mysql()
if status is True:
if result:
product_list = result
return {"data": product_list, "status": True, "message": ""}, 200
else:
return {"status": False, "message": "Group does not exist"}, 404
else:
return {"status": False, "message": result}, 500
return {"data": product_list, "status": True, "message": ""}, 200
@access_required(role_dict["product"])
def post(self):
args = parser.parse_args()
args["id"] = uuid_prefix("p")
user = g.user_info["username"]
user_id = g.user_info["id"]
product = args
db = DB()
status, result = db.select("product", "where data -> '$.name'='%s'" % args["name"])
if status is True:
if len(result) == 0:
# 给用户添加产品线
info = update_user_product(user_id, args["id"])
if info["status"] is False:
return {"status": False, "message": info["message"]}, 500
insert_status, insert_result = db.insert("product", json.dumps(product, ensure_ascii=False))
db.close_mysql()
if insert_status is not True:
logger.error("Add product error: %s" % insert_result)
return {"status": False, "message": insert_result}, 500
audit_log(user, args["id"], "", "product", "add")
# 更新Rsync配置
if args["file_server"] == "rsync":
rsync_config()
return {"status": True, "message": ""}, 201
else:
db.close_mysql()
return {"status": False, "message": "The product name already exists"}, 200
else:
db.close_mysql()
logger.error("Select product name error: %s" % result)
return {"status": False, "message": result}, 500
class ProductCheck(Resource):
@access_required(role_dict["common_user"])
def post(self, name):
args = parser.parse_args()
if name == "salt_api":
if args["salt_master_url"] is "":
return {"status": False, "message": "Salt API address is empty"}, 200
salt_api = SaltAPI(
url=args["salt_master_url"],
user=args["salt_master_user"],
passwd=args["salt_master_password"]
)
if isinstance(salt_api, dict):
return {"status": False, "message": salt_api}, 200
else:
result = salt_api.shell_remote_execution(args["salt_master_id"], "echo true")
if result:
try:
if result['status'] is False:
return {"status": False, "message": result['message']}, 200
except KeyError:
return {"data": "", "status": True, "message": ""}, 200
else:
return {"data": "", "status": False,
"message": "Execute echo command on Master ID is not returned"}, 200
else:
if args["gitlab_url"] is "":
return {"status": False, "message": "GitLab API address is empty"}, 200
if args["api_version"] is "":
return {"status": False, "message": "GitLab API version is empty"}, 200
if args["state_project"] is "":
return {"status": False, "message": "GitLab State is empty"}, 200
try:
gl = gitlab.Gitlab(url=args["gitlab_url"],
private_token=args["private_token"],
timeout=120,
api_version=args["api_version"],
)
gl.projects.get(args["state_project"])
return {"data": "", "status": True, "message": ""}, 200
except Exception as e:
return {"data": "", "status": False, "message": str(e)}, 200
|
scc.py | Ziyadsk/scc | 122 | 11164861 | <reponame>Ziyadsk/scc<filename>scc.py
#!/usr/bin/env python3
import argparse
import json
import os
import random
import webbrowser
import textwrap
import sys
# argument system
home = os.path.expanduser("~")
parser = argparse.ArgumentParser(description="SCC - Commandline Cheat Sheet")
parser.add_argument("--export", choices=["html"], default=None)
group = parser.add_mutually_exclusive_group()
argument_list = ["-html", "-css", "-js", "-python"]
for i in argument_list:
group.add_argument(i, nargs="?", default=None)
parser.add_argument("-rand", choices=["html", "css", "js"])
arg = parser.parse_args()
line_separator = "━" * 72
# displays a banner , used primarly for dispaying titles
def banner(content):
banner = ""
banner += "┏"
banner += line_separator
banner += "┓\n┃\u001b[33;1m "
banner += content.ljust(71)
banner += "\u001b[0m┃\n"
banner += "┣" + line_separator + "┫"
print(banner)
# handles the arg
def handler(arg_type,argument,error_message):
with open(f"{home}/.scc/ccs/en/{arg_type}.json") as file:
data = json.load(file)
if argument == "all":
for line in data:
print(line)
exit(1)
try:
if arg.rand:
argument = random.choice(list(data))
if argument == None:
print("no arg!")
dictionary_data = data[argument.rstrip()]
banner(f"{argument}")
for k, v in dictionary_data.items():
title = f"┃ ● \033[4;33m{k}\033[0m".ljust(84) + "┃"
print("┃"+ " "*72+ "┃")
print(title)
print("┃"+ " "*72+ "┃")
for chunk_of_text in textwrap.wrap(v):
print("┃ " + chunk_of_text.ljust(71) + "┃")
print("┃"+ " "*72+ "┃")
print("┣" + line_separator + "┫")
sys.stdout.write('\x1b[1A')
sys.stdout.write('\x1b[2K')
print("┗" + line_separator + "┛")
except Exception:
print(error_message)
if arg_type == "js":
try:
lkeys = []
for key in data:
if argument.split(".")[0] == key.split(".")[0]:
lkeys.append(key)
if lkeys:
print("possible values :")
print("\n".join(lkeys))
except Exception:
pass
# when using the random argument
if arg.js:
handler("js",arg.js,f"[\033[33;1mNOT FOUND\033[0m]: {arg.js} ")
if arg.css:
handler("css",arg.css,"[\033[33;1mNOT FOUND\033[0m] The CSS property that you requested doesn't exist/isn't indexed")
if arg.html:
handler("html",arg.html,f"[\033[33;1mNOT FOUND\033[0m] <{arg.html}> : not found")
if arg.rand:
handler(arg.rand,arg.rand,"") |
mici/__init__.py | matt-graham/mici | 137 | 11164863 | # -*- coding: utf-8 -*-
""" MCMC samplers based on simulating Hamiltonian dynamics on a manifold. """
__authors__ = "<NAME>"
__license__ = "MIT"
import mici.adapters
import mici.autodiff
import mici.integrators
import mici.matrices
import mici.samplers
import mici.solvers
import mici.stagers
import mici.states
import mici.systems
import mici.transitions
|
examples/source_separation/conv_tasnet/__init__.py | popcornell/audio | 1,718 | 11164872 | <reponame>popcornell/audio
from . import train, trainer
__all__ = ["train", "trainer"]
|
scripts/migrations/es_migration_25042021.py | davidkartchner/rubrix | 888 | 11164876 | <reponame>davidkartchner/rubrix<filename>scripts/migrations/es_migration_25042021.py
from itertools import zip_longest
from typing import Any, Dict, List, Optional
from elasticsearch import Elasticsearch
from elasticsearch.helpers import scan, bulk
from pydantic import BaseSettings
from rubrix.server.tasks.commons import TaskType
class Settings(BaseSettings):
"""
Migration argument settings
"""
elasticsearch: str = "http://localhost:9200"
migration_datasets: List[str] = []
chunk_size: int = 1000
task: TaskType
settings = Settings()
source_datasets_index = ".rubric.datasets-v1"
target_datasets_index = ".rubrix.datasets-v0"
source_record_index_pattern = ".rubric.dataset.{}.records-v1"
target_record_index_pattern = ".rubrix.dataset.{}.records-v0"
def batcher(iterable, n, fillvalue=None):
"batches an iterable"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def map_doc_2_action(
index: str, doc: Dict[str, Any], task: TaskType
) -> Optional[Dict[str, Any]]:
"""Configures bulk action"""
doc_data = doc["_source"]
new_record = {
"id": doc_data["id"],
"metadata": doc_data.get("metadata"),
"last_updated": doc_data.get("last_updated"),
"words": doc_data.get("words"),
}
task_info = doc_data["tasks"].get(task)
if task_info is None:
return None
new_record.update(
{
"status": task_info.get("status"),
"prediction": task_info.get("prediction"),
"annotation": task_info.get("annotation"),
"event_timestamp": task_info.get("event_timestamp"),
"predicted": task_info.get("predicted"),
"annotated_as": task_info.get("annotated_as"),
"predicted_as": task_info.get("predicted_as"),
"annotated_by": task_info.get("annotated_by"),
"predicted_by": task_info.get("predicted_by"),
"score": task_info.get("confidences"),
"owner": task_info.get("owner"),
}
)
if task == TaskType.text_classification:
new_record.update(
{
"inputs": doc_data.get("text"),
"multi_label": task_info.get("multi_label"),
"explanation": task_info.get("explanation"),
}
)
elif task == TaskType.token_classification:
new_record.update(
{
"tokens": doc_data.get("tokens"),
"text": doc_data.get("raw_text"),
}
)
return {
"_op_type": "index",
"_index": index,
"_id": doc["_id"],
**new_record,
}
if __name__ == "__main__":
client = Elasticsearch(hosts=settings.elasticsearch)
for dataset in settings.migration_datasets:
source_index = source_record_index_pattern.format(dataset)
source_index_info = client.get(index=source_datasets_index, id=dataset)
target_dataset_name = f"{dataset}-{settings.task}".lower()
target_index = target_record_index_pattern.format(
target_dataset_name
)
target_index_info = source_index_info["_source"]
target_index_info["task"] = settings.task
target_index_info["name"] = target_dataset_name
client.index(
index=target_datasets_index,
id=target_index_info["name"],
body=target_index_info,
)
index_docs = scan(client, index=source_index)
for batch in batcher(index_docs, n=settings.chunk_size):
bulk(
client,
actions=(
map_doc_2_action(index=target_index, doc=doc, task=settings.task)
for doc in batch
if doc is not None
),
)
|
src/adafruit_circuitplayground/__init__.py | julianrendell/vscode-python-devicesimulator | 151 | 11164920 | # added compatibility for new import structure in CircuitPython
# https://github.com/adafruit/Adafruit_CircuitPython_CircuitPlayground/pull/79
from .express import cpx as cp
|
paddlespeech/s2t/exps/__init__.py | JiehangXie/PaddleSpeech | 1,540 | 11164933 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddlespeech.s2t.training.trainer import Trainer
from paddlespeech.s2t.utils.dynamic_import import dynamic_import
model_trainer_alias = {
"ds2": "paddlespeech.s2t.exp.deepspeech2.model:DeepSpeech2Trainer",
"u2": "paddlespeech.s2t.exps.u2.model:U2Trainer",
"u2_kaldi": "paddlespeech.s2t.exps.u2_kaldi.model:U2Trainer",
"u2_st": "paddlespeech.s2t.exps.u2_st.model:U2STTrainer",
}
def dynamic_import_trainer(module):
"""Import Trainer dynamically.
Args:
module (str): trainer name. e.g., ds2, u2, u2_kaldi
Returns:
type: Trainer class
"""
model_class = dynamic_import(module, model_trainer_alias)
assert issubclass(model_class,
Trainer), f"{module} does not implement Trainer"
return model_class
model_tester_alias = {
"ds2": "paddlespeech.s2t.exp.deepspeech2.model:DeepSpeech2Tester",
"u2": "paddlespeech.s2t.exps.u2.model:U2Tester",
"u2_kaldi": "paddlespeech.s2t.exps.u2_kaldi.model:U2Tester",
"u2_st": "paddlespeech.s2t.exps.u2_st.model:U2STTester",
}
def dynamic_import_tester(module):
"""Import Tester dynamically.
Args:
module (str): tester name. e.g., ds2, u2, u2_kaldi
Returns:
type: Tester class
"""
model_class = dynamic_import(module, model_tester_alias)
assert issubclass(model_class,
Trainer), f"{module} does not implement Tester"
return model_class
|
yinyang/src/mutators/GenTypeAwareMutation/Util.py | rainoftime/yinyang | 143 | 11165001 | <filename>yinyang/src/mutators/GenTypeAwareMutation/Util.py
# MIT License
#
# Copyright (c) [2020 - 2021] The yinyang authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
from yinyang.src.parsing.Ast import Term
from yinyang.src.parsing.Types import (
BOOLEAN_TYPE, REAL_TYPE, INTEGER_TYPE, ROUNDINGMODE_TYPE,
STRING_TYPE, REGEXP_TYPE
)
type2num = {
"Bool": 0,
"Real": 1,
"Int": 2,
"RoundingMode": 3,
"String": 4,
"RegLan": 5,
"Unknown": 6,
}
def get_subterms(expr):
"""
Get all subexpression of term object expr.
:returns: av_expr list of expressions
expr_types list of types
(s.t. expression e = av_expr[i] has type expr_types[i])
"""
av_expr = []
expr_types = []
if isinstance(expr, Term):
if expr.subterms:
for s in expr.subterms:
new_av, new_type = get_subterms(s)
av_expr += new_av
expr_types += new_type
new_type = expr.type
expr_types.append(new_type)
av_expr.append(expr)
else:
av_expr.append(expr)
expr_types.append(expr.type)
else:
if expr.term:
new_av, new_type = get_subterms(expr.term)
av_expr += new_av
expr_types += new_type
return av_expr, expr_types
def get_all_subterms(formula):
"""
Get all expressions within a formula and their types.
:returns: av_expr list of expressions
expr_types list of types
(s.t. expression e = av_expr[i] has type expr_types[i])
"""
av_expr = []
expr_type = []
for i in range(len(formula.assert_cmd)):
exps, typ = get_subterms(formula.assert_cmd[i])
av_expr += exps
expr_type += typ
return av_expr, expr_type
def get_unique_subterms(formula):
"""
Get all the unique expressions within a formula.
:returns: unique_expr list of lists of unique expression for
different types
"""
av_expr, expr_type = get_all_subterms(formula)
# Below index indicates what type of expressions are stored in each list
# 0: Bool, 1: Real, 2: Int, 3: RoundingMode, 4: String, 5: Regex, 6: Ukn
unique_expr = [[], [], [], [], [], []]
for i in range(len(expr_type)):
if expr_type[i] == BOOLEAN_TYPE:
unique_expr[0].append(copy.deepcopy(av_expr[i]))
elif expr_type[i] == REAL_TYPE:
unique_expr[1].append(copy.deepcopy(av_expr[i]))
elif expr_type[i] == INTEGER_TYPE:
unique_expr[2].append(copy.deepcopy(av_expr[i]))
elif expr_type[i] == ROUNDINGMODE_TYPE:
unique_expr[3].append(copy.deepcopy(av_expr[i]))
elif expr_type[i] == STRING_TYPE:
unique_expr[4].append(copy.deepcopy(av_expr[i]))
elif expr_type[i] == REGEXP_TYPE:
unique_expr[5].append(copy.deepcopy(av_expr[i]))
for i in range(6):
if unique_expr[i]:
temp = []
temp.append(unique_expr[i][0])
for j in range(1, len(unique_expr[i])):
flag = 0
for exp in temp:
if unique_expr[i][j] == exp:
flag = 1
pass
if flag == 0:
temp.append(unique_expr[i][j])
unique_expr[i] = temp
return unique_expr
def local_defs(term, local):
"""
term: term object
local: list of local variables defined in the parent terms
:returns: list of local vairables to be considered within the term
"""
term = term.parent
if term:
if term.quantifier:
for q_var in term.quantified_vars[0]:
local.add(q_var)
if term.let_terms:
for var in term.var_binders:
local.add(var)
if term.parent:
local_defs(term, local)
return local
def local_compatible(t1, t2):
"""
t1: term object
t2: term object
:returns: local compatibility of t2 for t, e.g. ,every local variables in
t2 is defined in t1
"""
loc_t1 = local_defs(t1, set())
loc_t2 = local_defs(t2, set())
return loc_t2.issubset(loc_t1)
|
pyransac3d/__init__.py | jungerm2/pyRANSAC-3D | 180 | 11165007 | <reponame>jungerm2/pyRANSAC-3D
from .aux_functions import *
from .circle import Circle
from .cuboid import Cuboid
from .cylinder import Cylinder
from .line import Line
from .plane import Plane
from .point import Point
from .sphere import Sphere
# from pyRANSAC_3D import Cylinder, Cuboid, Plane
|
deepscm/arch/medical.py | srtaheri/deepscm | 183 | 11165015 | <filename>deepscm/arch/medical.py
from torch import nn
import numpy as np
class Encoder(nn.Module):
def __init__(self, num_convolutions=1, filters=(16, 32, 64, 128), latent_dim: int = 128, input_size=(1, 192, 192)):
super().__init__()
self.num_convolutions = num_convolutions
self.filters = filters
self.latent_dim = latent_dim
layers = []
cur_channels = 1
for c in filters:
for _ in range(0, num_convolutions - 1):
layers += [nn.Conv2d(cur_channels, c, 3, 1, 1), nn.BatchNorm2d(c), nn.LeakyReLU(.1, inplace=True)]
cur_channels = c
layers += [nn.Conv2d(cur_channels, c, 4, 2, 1), nn.BatchNorm2d(c), nn.LeakyReLU(.1, inplace=True)]
cur_channels = c
self.cnn = nn.Sequential(*layers)
self.intermediate_shape = np.array(input_size) // (2 ** len(filters))
self.intermediate_shape[0] = cur_channels
self.fc = nn.Sequential(
nn.Linear(np.prod(self.intermediate_shape), latent_dim),
nn.BatchNorm1d(latent_dim),
nn.LeakyReLU(.1, inplace=True)
)
def forward(self, x):
x = self.cnn(x).view(-1, np.prod(self.intermediate_shape))
return self.fc(x)
class Decoder(nn.Module):
def __init__(self, num_convolutions=1, filters=(128, 64, 32, 16), latent_dim: int = 128, output_size=(1, 192, 192), upconv=False):
super().__init__()
self.num_convolutions = num_convolutions
self.filters = filters
self.latent_dim = latent_dim
self.intermediate_shape = np.array(output_size) // (2 ** (len(filters) - 1))
self.intermediate_shape[0] = filters[0]
self.fc = nn.Sequential(
nn.Linear(latent_dim, np.prod(self.intermediate_shape)),
nn.BatchNorm1d(np.prod(self.intermediate_shape)),
nn.LeakyReLU(.1, inplace=True)
)
layers = []
cur_channels = filters[0]
for c in filters[1:]:
for _ in range(0, num_convolutions - 1):
layers += [nn.Conv2d(cur_channels, cur_channels, 3, 1, 1), nn.BatchNorm2d(cur_channels), nn.LeakyReLU(.1, inplace=True)]
if upconv:
layers += [
nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(cur_channels, c, kernel_size=5, stride=1, padding=2)
]
else:
layers += [nn.ConvTranspose2d(cur_channels, c, kernel_size=4, stride=2, padding=1)]
layers += [nn.BatchNorm2d(c), nn.LeakyReLU(.1, inplace=True)]
cur_channels = c
layers += [nn.Conv2d(cur_channels, 1, 1, 1)]
self.cnn = nn.Sequential(*layers)
def forward(self, x):
x = self.fc(x).view(-1, *self.intermediate_shape)
return self.cnn(x)
|
omd/versions/1.2.8p15.cre/lib/python/pysphere-0.1.7-py2.7.egg/pysphere/__init__.py | NCAR/spol-nagios | 106 | 11165034 | #--
# Copyright (c) 2012, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# For ZSI:
#
# Copyright 2001, Zolera Systems, Inc. All Rights Reserved.
# Copyright 2002-2003, <NAME>. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, and/or
# sell copies of the Software, and to permit persons to whom the Software
# is furnished to do so, provided that the above copyright notice(s) and
# this permission notice appear in all copies of the Software and that
# both the above copyright notice(s) and this permission notice appear in
# supporting documentation.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
# OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
# INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
# OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
# OR PERFORMANCE OF THIS SOFTWARE.
#
# Except as contained in this notice, the name of a copyright holder
# shall not be used in advertising or otherwise to promote the sale, use
# or other dealings in this Software without prior written authorization
# of the copyright holder.
#
#
# Portions are also:
#
# Copyright (c) 2003, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of
# any required approvals from the U.S. Dept. of Energy). All rights
# reserved. Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# (3) Neither the name of the University of California, Lawrence Berkeley
# National Laboratory, U.S. Dept. of Energy nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# You are under no obligation whatsoever to provide any bug fixes,
# patches, or upgrades to the features, functionality or performance of
# the source code ("Enhancements") to anyone; however, if you choose to
# make your Enhancements available either publicly, or directly to
# Lawrence Berkeley National Laboratory, without imposing a separate
# written license agreement for such Enhancements, then you hereby grant
# the following license: a non-exclusive, royalty-free perpetual license
# to install, use, modify, prepare derivative works, incorporate into
# other computer software, distribute, and sublicense such Enhancements
# or derivative works thereof, in binary and source code form.
#
#
# For wstools also:
#
# Zope Public License (ZPL) Version 2.0
# -----------------------------------------------
#
# This software is Copyright (c) Zope Corporation (tm) and
# Contributors. All rights reserved.
#
# This license has been certified as open source. It has also
# been designated as GPL compatible by the Free Software
# Foundation (FSF).
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions in source code must retain the above
# copyright notice, this list of conditions, and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions, and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# 3. The name Zope Corporation (tm) must not be used to
# endorse or promote products derived from this software
# without prior written permission from Zope Corporation.
#
# 4. The right to distribute this software or to use it for
# any purpose does not give you the right to use Servicemarks
# (sm) or Trademarks (tm) of Zope Corporation. Use of them is
# covered in a separate agreement (see
# http://www.zope.com/Marks).
#
# 5. If any files are modified, you must cause the modified
# files to carry prominent notices stating that you changed
# the files and the date of any change.
#
# Disclaimer
#
# THIS SOFTWARE IS PROVIDED BY ZOPE CORPORATION ``AS IS''
# AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL ZOPE CORPORATION OR ITS CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
#
# This software consists of contributions made by Zope
# Corporation and many individuals on behalf of Zope
# Corporation. Specific attributions are listed in the
# accompanying credits file.
#
#--
__all__ = ['VIServer', 'VIException', 'VIApiException', 'VITask', 'FaultTypes',
'VIMor', 'MORTypes', 'VMPowerState', 'ToolsStatus', 'VIProperty']
from pysphere.resources.vi_exception import VIException, VIApiException, \
FaultTypes
from pysphere.vi_task import VITask
from pysphere.vi_property import VIProperty
from pysphere.vi_mor import VIMor, MORTypes
from pysphere.vi_server import VIServer
from pysphere.vi_virtual_machine import VMPowerState, ToolsStatus
#from version import version as __version__
|
analysis/random_forest/sklearn.py | szilard/GBM-perf | 201 | 11165046 | <reponame>szilard/GBM-perf
import pandas as pd
import numpy as np
from sklearn import preprocessing
from scipy import sparse
from sklearn import metrics, ensemble
d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-1m.csv")
d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv")
d_all = pd.concat([d_train,d_test])
vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"]
vars_num = ["DepTime","Distance"]
for col in vars_cat:
d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col])
X_all_cat = preprocessing.OneHotEncoder(categories="auto").fit_transform(d_all[vars_cat])
X_all = sparse.hstack((X_all_cat, d_all[vars_num])).tocsr()
y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0)
X_train = X_all[0:d_train.shape[0],]
y_train = y_all[0:d_train.shape[0]]
X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0]),]
y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])]
md = ensemble.RandomForestClassifier(max_depth = 10, n_estimators = 100, n_jobs = -1)
%time md.fit(X_train, y_train)
y_pred = md.predict_proba(X_test)[:,1]
print(metrics.roc_auc_score(y_test, y_pred))
|
ui/spaces.py | fsanges/glTools | 165 | 11165053 | <gh_stars>100-1000
import maya.cmds as mc
import glTools.tools.spaces
class UserInputError(Exception): pass
def charUI():
'''
'''
# Window
win = 'spacesCharUI'
if mc.window(win,q=True,ex=True): mc.deleteUI(win)
win = mc.window(win,t='Spaces - Character UI')
# Form Layout
spacesCharFL = mc.formLayout(numberOfDivisions=100)
# UI Elements
#-
# Character Prefix
charTFG = mc.textFieldGrp('spacesCharTFG',label='Character Prefix', text='',cw=[(1,120),(2,150)])
# Button
openB = mc.button(l='Open',c='glTools.ui.spaces.charUIFromUI()')
mc.formLayout(spacesCharFL, e=True, af=[(charTFG,'left',5),(charTFG,'top',5),(charTFG,'right',5)])
mc.formLayout(spacesCharFL, e=True, ac=[(openB,'top',5,charTFG)])
mc.formLayout(spacesCharFL, e=True, af=[(openB,'left',5),(openB,'bottom',5),(openB,'right',5)])
# Open window
mc.showWindow(win)
def charUIFromUI():
'''
'''
win = 'spacesCharUI'
if not mc.window(win,q=True,ex=True):
raise UserInputError('Spaces CharcterUI is not currently open!')
char = str(mc.textFieldGrp('spacesCharTFG',q=True,text=True))
glTools.tools.Spaces().ui(char)
def createAddUI():
'''
User interface for creation/addition of spaces.
'''
# Window
win = 'spacesAddCreateUI'
if mc.window(win,q=True,ex=True): mc.deleteUI(win)
win = mc.window(win,t='Spaces - Create/Add UI')
# Form Layout
spacesFL = mc.formLayout(numberOfDivisions=100)
# UI Elements
#-
# Text Field Grps
controlTFG = mc.textFieldGrp('spacesControlTFG',label='Control', text='',cw=[(1,80),(2,150)])
controlTagTFG = mc.textFieldGrp('spacesControlTagTFG',label='Control Tag', text='',cw=[(1,80),(2,150)])
targetTagTFG = mc.textFieldGrp('spacesTargetTagTFG',label='Target Tag:',text='',cw=[(1,85),(2,150)],cc='glTools.ui.spaces.addTargetTagToList()')
# Text Scroll List
targetTSL = mc.textScrollList('spacesTargetTSL',w=120,numberOfRows=8,allowMultiSelection=False,dkc='glTools.ui.spaces.removeFromList()',sc='glTools.ui.spaces.updateTagField()')
# Target List Text
targetListTXT = mc.text(l='Target List:',al='left')
# Create Button
createBTN = mc.button(l='Create / Add',c='glTools.ui.spaces.createAddFromUI()')
# Separator
controlSEP = mc.separator(h=5,style='single')
# Form Layout - MAIM
#-
# controlTFG
mc.formLayout(spacesFL, e=True, af=[(controlTFG,'left',5),(controlTFG,'top',5)])
mc.formLayout(spacesFL, e=True, ap=[(controlTFG,'right',5,50)])
# controlTagTFG
mc.formLayout(spacesFL, e=True, af=[(controlTagTFG,'right',5),(controlTagTFG,'top',5)])
mc.formLayout(spacesFL, e=True, ap=[(controlTagTFG,'left',5,50)])
# controlSEP
mc.formLayout(spacesFL, e=True, af=[(controlSEP,'left',5),(controlSEP,'right',5)])
mc.formLayout(spacesFL, e=True, ac=[(controlSEP,'top',5,controlTFG)])
# targetListTXT
mc.formLayout(spacesFL, e=True, af=[(targetListTXT,'left',5)])
mc.formLayout(spacesFL, e=True, ac=[(targetListTXT,'top',5,controlSEP)])
mc.formLayout(spacesFL, e=True, ap=[(targetListTXT,'right',5,50)])
# targetTSL
mc.formLayout(spacesFL, e=True, af=[(targetTSL,'left',5),(targetTSL,'bottom',5)])
mc.formLayout(spacesFL, e=True, ap=[(targetTSL,'right',5,50)])
mc.formLayout(spacesFL, e=True, ac=[(targetTSL,'top',5,targetListTXT)])
# targetTagTFG
mc.formLayout(spacesFL, e=True, af=[(targetTagTFG,'right',5)])
mc.formLayout(spacesFL, e=True, ap=[(targetTagTFG,'left',5,50),(targetTagTFG,'top',5,50)])
# createBTN
mc.formLayout(spacesFL, e=True, af=[(createBTN,'bottom',5),(createBTN,'right',5)])
mc.formLayout(spacesFL, e=True, ap=[(createBTN,'left',5,66)])
# Poup menus
targetListPUM = mc.popupMenu(parent=targetTSL)
mc.menuItem('Add selected objects',c='glTools.ui.spaces.addSelectedToList()')
mc.menuItem('Remove Highlighted objects',c='glTools.ui.spaces.removeFromList()')
controlPUM = mc.popupMenu(parent=controlTFG)
mc.menuItem('Get selected object',c='glTools.ui.spaces.getControlNameFromSel()')
# Open window
mc.showWindow(win)
def createAddFromUI():
'''
Wrapper method for creation/addition of spaces. Run from the creation UI.
'''
testCreateAddUI()
ctrl = mc.textFieldGrp('spacesControlTFG',q=True,text=True)
nameTag = mc.textFieldGrp('spacesControlTagTFG',q=True,text=True)
targetList = mc.textScrollList('spacesTargetTSL',q=True,ai=True)
targetNameList = []
targetTagList = []
for target in targetList:
if not target.count('::'):
targetNameList.append(target)
targetTagList.append(target)
else:
targetNameList.append(target.split('::')[0])
targetTagList.append(target.split('::')[1])
glTools.tools.spaces.Spaces().create(ctrl,targetNameList,targetTagList,nameTag)
def testCreateAddUI():
'''
Test if Spaces Create/Add UI is open.
'''
win = 'spacesAddCreateUI'
if not mc.window(win,q=True,ex=True):
raise UserInputError('Spaces Create/AddUI is not currently open!')
def getControlNameFromSel():
'''
Set control textField value to the name of the forst selected object
'''
# Check UI
testCreateAddUI()
selection = mc.ls(sl=True,type=['transform','joint'])
if not selection: return
else: mc.textFieldGrp('spacesControlTFG',e=True,text=str(selection[0]))
def addSelectedToList():
'''
Add selected transform names to the spaces target text scroll list.
'''
# Check UI
testCreateAddUI()
currentTargetList = mc.textScrollList('spacesTargetTSL',q=True,ai=True)
for obj in mc.ls(sl=True,type=['transform','joint']):
if type(currentTargetList) == list:
if currentTargetList.count(obj): continue
# Add target to list
mc.textScrollList('spacesTargetTSL',e=True,a=obj)
def removeFromList():
'''
Remove hilighted transform names from the spaces target text scroll list.
'''
# Check UI
testCreateAddUI()
currentTargetList = mc.textScrollList('spacesTargetTSL',q=True,ai=True)
selectedTargets = mc.textScrollList('spacesTargetTSL',q=True,si=True)
for obj in selectedTargets:
itemIndex = currentTargetList.index(obj)
mc.textScrollList('spacesTargetTSL',e=True,rii=itemIndex+1)
def addTargetTagToList():
'''
Add a tag string value to the selected target in the spaces target text scroll list.
'''
# Check UI
testCreateAddUI()
tag = str(mc.textFieldGrp('spacesTargetTagTFG',q=True,text=True))
if not len(tag): return
targetName = mc.textScrollList('spacesTargetTSL',q=True,si=True)
targetIndex = mc.textScrollList('spacesTargetTSL',q=True,sii=True)
if not targetName or not targetIndex:
print('Select a target from the target list to add a tag to!!')
return
mc.textScrollList('spacesTargetTSL',e=True,rii=targetIndex)
mc.textScrollList('spacesTargetTSL',e=True,ap=(targetIndex[0],targetName[0]+'::'+tag))
def updateTagField():
'''
Update the target tag textField based on the value of the selected entry in the spaces target text scroll list.
'''
# Check UI
testCreateAddUI()
targetName = mc.textScrollList('spacesTargetTSL',q=True,si=True)
if not targetName: return
if not targetName[0].count('::'): mc.textFieldGrp('spacesTargetTagTFG',e=True,text='')
else: mc.textFieldGrp('spacesTargetTagTFG',e=True,text=targetName[0].split('::')[1])
|
bookwyrm/migrations/0146_auto_20220316_2352.py | mouse-reeve/fedireads | 270 | 11165055 | <reponame>mouse-reeve/fedireads
# Generated by Django 3.2.12 on 2022-03-16 23:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0145_sitesettings_version"),
]
operations = [
migrations.AddField(
model_name="inviterequest",
name="answer",
field=models.TextField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name="sitesettings",
name="invite_question_text",
field=models.CharField(
blank=True, default="What is your favourite book?", max_length=255
),
),
migrations.AddField(
model_name="sitesettings",
name="invite_request_question",
field=models.BooleanField(default=False),
),
]
|
release/steam/create_steam_builds.py | noorbeast/blender | 365 | 11165096 | #!/usr/bin/env python3
import argparse
import pathlib
import requests
import shutil
import subprocess
from typing import Callable, Iterator, List, Tuple
# supported archive and platform endings, used to create actual archive names
archive_endings = ["windows64.zip", "linux64.tar.xz", "macOS.dmg"]
def add_optional_argument(option: str, help: str) -> None:
global parser
"""Add an optional argument
Args:
option (str): Option to add
help (str): Help description for the argument
"""
parser.add_argument(option, help=help, action='store_const', const=1)
def blender_archives(version: str) -> Iterator[str]:
"""Generator for Blender archives for version.
Yields for items in archive_endings an archive name in the form of
blender-{version}-{ending}.
Args:
version (str): Version string of the form 2.83.2
Yields:
Iterator[str]: Name in the form of blender-{version}-{ending}
"""
global archive_endings
for ending in archive_endings:
yield f"blender-{version}-{ending}"
def get_archive_type(archive_type: str, version: str) -> str:
"""Return the archive of given type and version.
Args:
archive_type (str): extension for archive type to check for
version (str): Version string in the form 2.83.2
Raises:
Exception: Execption when archive type isn't found
Returns:
str: archive name for given type
"""
for archive in blender_archives(version):
if archive.endswith(archive_type):
return archive
raise Exception("Unknown archive type")
def execute_command(cmd: List[str], name: str, errcode: int, cwd=".", capture_output=True) -> str:
"""Execute the given command.
Returns the process stdout upon success if any.
On error print message the command with name that has failed. Print stdout
and stderr of the process if any, and then exit with given error code.
Args:
cmd (List[str]): Command in list format, each argument as their own item
name (str): Name of command to use when printing to command-line
errcode (int): Error code to use in case of exit()
cwd (str, optional): Folder to use as current work directory for command
execution. Defaults to ".".
capture_output (bool, optional): Whether to capture command output or not.
Defaults to True.
Returns:
str: stdout if any, or empty string
"""
cmd_process = subprocess.run(
cmd, capture_output=capture_output, encoding="UTF-8", cwd=cwd)
if cmd_process.returncode == 0:
if cmd_process.stdout:
return cmd_process.stdout
else:
return ""
else:
print(f"ERROR: {name} failed.")
if cmd_process.stdout:
print(cmd_process.stdout)
if cmd_process.stderr:
print(cmd_process.stderr)
exit(errcode)
return ""
def download_archives(base_url: str, archives: Callable[[str], Iterator[str]], version: str, dst_dir: pathlib.Path):
"""Download archives from the given base_url.
Archives is a generator for Blender archive names based on version.
Archive names are appended to the base_url to load from, and appended to
dst_dir to save to.
Args:
base_url (str): Base URL to load archives from
archives (Callable[[str], Iterator[str]]): Generator for Blender archive
names based on version
version (str): Version string in the form of 2.83.2
dst_dir (pathlib.Path): Download destination
"""
if base_url[-1] != '/':
base_url = base_url + '/'
for archive in archives(version):
download_url = f"{base_url}{archive}"
target_file = dst_dir.joinpath(archive)
download_file(download_url, target_file)
def download_file(from_url: str, to_file: pathlib.Path) -> None:
"""Download from_url as to_file.
Actual downloading will be skipped if --skipdl is given on the command-line.
Args:
from_url (str): Full URL to resource to download
to_file (pathlib.Path): Full path to save downloaded resource as
"""
global args
if not args.skipdl or not to_file.exists():
print(f"Downloading {from_url}")
with open(to_file, "wb") as download_zip:
response = requests.get(from_url)
if response.status_code != requests.codes.ok:
print(f"ERROR: failed to download {from_url} (status code: {response.status_code})")
exit(1313)
download_zip.write(response.content)
else:
print(f"Downloading {from_url} skipped")
print(" ... OK")
def copy_contents_from_dmg_to_path(dmg_file: pathlib.Path, dst: pathlib.Path) -> None:
"""Copy the contents of the given DMG file to the destination folder.
Args:
dmg_file (pathlib.Path): Full path to DMG archive to extract from
dst (pathlib.Path): Full path to destination to extract to
"""
hdiutil_attach = ["hdiutil",
"attach",
"-readonly",
f"{dmg_file}"
]
attached = execute_command(hdiutil_attach, "hdiutil attach", 1)
# Last line of output is what we want, it is of the form
# /dev/somedisk Apple_HFS /Volumes/Blender
# We want to retain the mount point, and the folder the mount is
# created on. The mounted disk we need for detaching, the folder we
# need to be able to copy the contents to where we can use them
attachment_items = attached.splitlines()[-1].split()
mounted_disk = attachment_items[0]
source_location = pathlib.Path(attachment_items[2], "Blender.app")
print(f"{source_location} -> {dst}")
shutil.copytree(source_location, dst)
hdiutil_detach = ["hdiutil",
"detach",
f"{mounted_disk}"
]
execute_command(hdiutil_detach, "hdiutil detach", 2)
def create_build_script(template_name: str, vars: List[Tuple[str, str]]) -> pathlib.Path:
"""
Create the Steam build script
Use the given template and template variable tuple list.
Returns pathlib.Path to the created script.
Args:
template_name (str): [description]
vars (List[Tuple[str, str]]): [description]
Returns:
pathlib.Path: Full path to the generated script
"""
build_script = pathlib.Path(".", template_name).read_text()
for var in vars:
build_script = build_script.replace(var[0], var[1])
build_script_file = template_name.replace(".template", "")
build_script_path = pathlib.Path(".", build_script_file)
build_script_path.write_text(build_script)
return build_script_path
def clean_up() -> None:
"""Remove intermediate files depending on given command-line arguments
"""
global content_location, args
if not args.leavearch and not args.leaveextracted:
shutil.rmtree(content_location)
if args.leavearch and not args.leaveextracted:
shutil.rmtree(content_location.joinpath(zip_extract_folder))
shutil.rmtree(content_location.joinpath(tarxz_extract_folder))
shutil.rmtree(content_location.joinpath(dmg_extract_folder))
if args.leaveextracted and not args.leavearch:
import os
os.remove(content_location.joinpath(zipped_blender))
os.remove(content_location.joinpath(tarxz_blender))
os.remove(content_location.joinpath(dmg_blender))
def extract_archive(archive: str, extract_folder_name: str,
cmd: List[str], errcode: int) -> None:
"""Extract all files from archive to given folder name.
Will not extract if
target folder already exists, or if --skipextract was given on the
command-line.
Args:
archive (str): Archive name to extract
extract_folder_name (str): Folder name to extract to
cmd (List[str]): Command with arguments to use
errcode (int): Error code to use for exit()
"""
global args, content_location
extract_location = content_location.joinpath(extract_folder_name)
pre_extract = set(content_location.glob("*"))
if not args.skipextract or not extract_location.exists():
print(f"Extracting files from {archive}...")
cmd.append(content_location.joinpath(archive))
execute_command(cmd, cmd[0], errcode, cwd=content_location)
# in case we use a non-release archive the naming will be incorrect.
# simply rename to expected target name
post_extract = set(content_location.glob("*"))
diff_extract = post_extract - pre_extract
if not extract_location in diff_extract:
folder_to_rename = list(diff_extract)[0]
folder_to_rename.rename(extract_location)
print(" OK")
else:
print(f"Skipping extraction {archive}!")
# ==============================================================================
parser = argparse.ArgumentParser()
parser.add_argument("--baseurl", required=True,
help="The base URL for files to download, "
"i.e. https://download.blender.org/release/Blender2.83/")
parser.add_argument("--version", required=True,
help="The Blender version to release, in the form 2.83.3")
parser.add_argument("--appid", required=True,
help="The Blender App ID on Steam")
parser.add_argument("--winid", required=True,
help="The Windows depot ID")
parser.add_argument("--linuxid", required=True,
help="The Linux depot ID")
parser.add_argument("--macosid", required=True,
help="The MacOS depot ID")
parser.add_argument("--steamcmd", required=True,
help="Path to the steamcmd")
parser.add_argument("--steamuser", required=True,
help="The login for the Steam builder user")
parser.add_argument("--steampw", required=True,
help="Login password for the Steam builder user")
add_optional_argument("--dryrun",
"If set the Steam files will not be uploaded")
add_optional_argument("--leavearch",
help="If set don't clean up the downloaded archives")
add_optional_argument("--leaveextracted",
help="If set don't clean up the extraction folders")
add_optional_argument("--skipdl",
help="If set downloading the archives is skipped if it already exists locally.")
add_optional_argument("--skipextract",
help="If set skips extracting of archives. The tool assumes the archives"
"have already been extracted to their correct locations")
args = parser.parse_args()
VERSIONNODOTS = args.version.replace('.', '')
OUTPUT = f"output{VERSIONNODOTS}"
CONTENT = f"content{VERSIONNODOTS}"
# ===== set up main locations
content_location = pathlib.Path(".", CONTENT).absolute()
output_location = pathlib.Path(".", OUTPUT).absolute()
content_location.mkdir(parents=True, exist_ok=True)
output_location.mkdir(parents=True, exist_ok=True)
# ===== login
# Logging into Steam once to ensure the SDK updates itself properly. If we don't
# do that the combined +login and +run_app_build_http at the end of the tool
# will fail.
steam_login = [args.steamcmd,
"+login",
args.steamuser,
args.steampw,
"+quit"
]
print("Logging in to Steam...")
execute_command(steam_login, "Login to Steam", 10)
print(" OK")
# ===== prepare Steam build scripts
template_vars = [
("[APPID]", args.appid),
("[OUTPUT]", OUTPUT),
("[CONTENT]", CONTENT),
("[VERSION]", args.version),
("[WINID]", args.winid),
("[LINUXID]", args.linuxid),
("[MACOSID]", args.macosid),
("[DRYRUN]", f"{args.dryrun}" if args.dryrun else "0")
]
blender_app_build = create_build_script(
"blender_app_build.vdf.template", template_vars)
create_build_script("depot_build_win.vdf.template", template_vars)
create_build_script("depot_build_linux.vdf.template", template_vars)
create_build_script("depot_build_macos.vdf.template", template_vars)
# ===== download archives
download_archives(args.baseurl, blender_archives,
args.version, content_location)
# ===== set up file and folder names
zipped_blender = get_archive_type("zip", args.version)
zip_extract_folder = zipped_blender.replace(".zip", "")
tarxz_blender = get_archive_type("tar.xz", args.version)
tarxz_extract_folder = tarxz_blender.replace(".tar.xz", "")
dmg_blender = get_archive_type("dmg", args.version)
dmg_extract_folder = dmg_blender.replace(".dmg", "")
# ===== extract
unzip_cmd = ["unzip", "-q"]
extract_archive(zipped_blender, zip_extract_folder, unzip_cmd, 3)
untarxz_cmd = ["tar", "-xf"]
extract_archive(tarxz_blender, tarxz_extract_folder, untarxz_cmd, 4)
if not args.skipextract or not content_location.joinpath(dmg_extract_folder).exists():
print("Extracting files from Blender MacOS archive...")
blender_dmg = content_location.joinpath(dmg_blender)
target_location = content_location.joinpath(
dmg_extract_folder, "Blender.app")
copy_contents_from_dmg_to_path(blender_dmg, target_location)
print(" OK")
else:
print("Skipping extraction of .dmg!")
# ===== building
print("Build Steam game files...")
steam_build = [args.steamcmd,
"+login",
args.steamuser,
args.steampw,
"+run_app_build_http",
blender_app_build.absolute(),
"+quit"
]
execute_command(steam_build, "Build with steamcmd", 13)
print(" OK")
clean_up()
|
response/migrations/0009_commschannel_channel_name.py | ojno/response | 1,408 | 11165121 | <reponame>ojno/response
"""
This is a slightly more complex migration. This change adds a new CommsChannel
field to cache the channel name, updating it when the Slack API notifies us of
a channel rename. For existing CommsChannel in the DB, we need to populate
the channel_name by connecting to the Slack API.
This applies the migration in three stages. First, we add the new field, but
make it nullable, so there's no need to set a default. The next step iterates
over existing CommsChannels, and refreshes the name from the Slack API.
Finally, now that every CommsChannel has a channel_name set, we alter the field
so that a value is required.
"""
from django.conf import settings
from django.db import OperationalError, migrations, models
from response.slack.client import SlackError
def set_comms_channel_names(apps, schema_editor):
CommsChannel = apps.get_model("response", "CommsChannel")
for comms_channel in CommsChannel.objects.all().iterator():
try:
channel_name = settings.SLACK_CLIENT.get_channel_name(
comms_channel.channel_id
)
if not channel_name:
channel_name = "<channel not found>"
except SlackError as e:
raise OperationalError(
f"""Error connecting to the Slack API: {str(e)}
⚠️ This migration requires access to the Slack API to set CommsChannel.channel_name on existing CommsChannel object. Please make sure that the SLACK_TOKEN environment variable is set to a valid value."""
)
comms_channel.channel_name = channel_name
comms_channel.save()
class Migration(migrations.Migration):
dependencies = [("response", "0008_externaluser_email")]
operations = [
migrations.AddField(
model_name="commschannel",
name="channel_name",
field=models.CharField(null=True, max_length=80),
preserve_default=False,
),
migrations.RunPython(set_comms_channel_names, migrations.RunPython.noop),
migrations.AlterField(
model_name="commschannel",
name="channel_name",
field=models.CharField(null=False, max_length=80),
preserve_default=False,
),
]
|
pyGeno/tests/test_genome.py | cw00dw0rd/pyGeno | 309 | 11165127 | <reponame>cw00dw0rd/pyGeno
import unittest
from pyGeno.Genome import *
import pyGeno.bootstrap as B
from pyGeno.importation.Genomes import *
from pyGeno.importation.SNPs import *
class pyGenoSNPTests(unittest.TestCase):
def setUp(self):
# try :
# B.importGenome("Human.GRCh37.75_Y-Only.tar.gz")
# except KeyError :
# deleteGenome("human", "GRCh37.75_Y-Only")
# B.importGenome("Human.GRCh37.75_Y-Only.tar.gz")
# print "--> Seems to already exist in db"
# try :
# B.importSNPs("Human_agnostic.dummySRY.tar.gz")
# except KeyError :
# deleteSNPs("dummySRY_AGN")
# B.importSNPs("Human_agnostic.dummySRY.tar.gz")
# print "--> Seems to already exist in db"
# try :
# B.importSNPs("Human_agnostic.dummySRY_indels")
# except KeyError :
# deleteSNPs("dummySRY_AGN_indels")
# B.importSNPs("Human_agnostic.dummySRY_indels")
# print "--> Seems to already exist in db"
self.ref = Genome(name = 'GRCh37.75_Y-Only')
def tearDown(self):
pass
# @unittest.skip("skipping")
def test_vanilla(self) :
dummy = Genome(name = 'GRCh37.75_Y-Only', SNPs = 'dummySRY_AGN')
persProt = dummy.get(Protein, id = 'ENSP00000438917')[0]
refProt = self.ref.get(Protein, id = 'ENSP00000438917')[0]
self.assertEqual('ATGCAATCATATGCTTCTGC', refProt.transcript.cDNA[:20])
self.assertEqual('HTGCAATCATATGCTTCTGC', persProt.transcript.cDNA[:20])
# @unittest.skip("skipping")
def test_noModif(self) :
from pyGeno.SNPFiltering import SNPFilter
class MyFilter(SNPFilter) :
def __init__(self) :
SNPFilter.__init__(self)
def filter(self, chromosome, dummySRY_AGN) :
return None
dummy = Genome(name = 'GRCh37.75_Y-Only', SNPs = 'dummySRY_AGN', SNPFilter = MyFilter())
persProt = dummy.get(Protein, id = 'ENSP00000438917')[0]
refProt = self.ref.get(Protein, id = 'ENSP00000438917')[0]
self.assertEqual(persProt.transcript.cDNA[:20], refProt.transcript.cDNA[:20])
# @unittest.skip("skipping")
def test_insert(self) :
from pyGeno.SNPFiltering import SNPFilter
class MyFilter(SNPFilter) :
def __init__(self) :
SNPFilter.__init__(self)
def filter(self, chromosome, dummySRY_AGN) :
from pyGeno.SNPFiltering import SequenceInsert
refAllele = chromosome.refSequence[dummySRY_AGN.start]
return SequenceInsert('XXX')
dummy = Genome(name = 'GRCh37.75_Y-Only', SNPs = 'dummySRY_AGN', SNPFilter = MyFilter())
persProt = dummy.get(Protein, id = 'ENSP00000438917')[0]
refProt = self.ref.get(Protein, id = 'ENSP00000438917')[0]
self.assertEqual('ATGCAATCATATGCTTCTGC', refProt.transcript.cDNA[:20])
self.assertEqual('XXXATGCAATCATATGCTTC', persProt.transcript.cDNA[:20])
# @unittest.skip("skipping")
def test_SNP(self) :
from pyGeno.SNPFiltering import SNPFilter
class MyFilter(SNPFilter) :
def __init__(self) :
SNPFilter.__init__(self)
def filter(self, chromosome, dummySRY_AGN) :
from pyGeno.SNPFiltering import SequenceSNP
return SequenceSNP(dummySRY_AGN.alt)
dummy = Genome(name = 'GRCh37.75_Y-Only', SNPs = 'dummySRY_AGN', SNPFilter = MyFilter())
persProt = dummy.get(Protein, id = 'ENSP00000438917')[0]
refProt = self.ref.get(Protein, id = 'ENSP00000438917')[0]
self.assertEqual('M', refProt.sequence[0])
self.assertEqual('L', persProt.sequence[0])
# @unittest.skip("skipping")
def test_deletion(self) :
from pyGeno.SNPFiltering import SNPFilter
class MyFilter(SNPFilter) :
def __init__(self) :
SNPFilter.__init__(self)
def filter(self, chromosome, dummySRY_AGN) :
from pyGeno.SNPFiltering import SequenceDel
refAllele = chromosome.refSequence[dummySRY_AGN.start]
return SequenceDel(1)
dummy = Genome(name = 'GRCh37.75_Y-Only', SNPs = 'dummySRY_AGN', SNPFilter = MyFilter())
persProt = dummy.get(Protein, id = 'ENSP00000438917')[0]
refProt = self.ref.get(Protein, id = 'ENSP00000438917')[0]
self.assertEqual('ATGCAATCATATGCTTCTGC', refProt.transcript.cDNA[:20])
self.assertEqual('TGCAATCATATGCTTCTGCT', persProt.transcript.cDNA[:20])
# @unittest.skip("skipping")
def test_indels(self) :
from pyGeno.SNPFiltering import SNPFilter
class MyFilter(SNPFilter) :
def __init__(self) :
SNPFilter.__init__(self)
def filter(self, chromosome, dummySRY_AGN_indels) :
from pyGeno.SNPFiltering import SequenceInsert
ret = ""
for s in dummySRY_AGN_indels :
ret += "X"
return SequenceInsert(ret)
dummy = Genome(name = 'GRCh37.75_Y-Only', SNPs = 'dummySRY_AGN_indels', SNPFilter = MyFilter())
persProt = dummy.get(Protein, id = 'ENSP00000438917')[0]
refProt = self.ref.get(Protein, id = 'ENSP00000438917')[0]
self.assertEqual('XXXATGCAATCATATGCTTC', persProt.transcript.cDNA[:20])
# @unittest.skip("skipping")
def test_bags(self) :
dummy = Genome(name = 'GRCh37.75_Y-Only')
self.assertEqual(dummy.wrapped_object, self.ref.wrapped_object)
# @unittest.skip("skipping")
def test_prot_find(self) :
prot = self.ref.get(Protein, id = 'ENSP00000438917')[0]
needle = prot.sequence[:10]
self.assertEqual(0, prot.find(needle))
needle = prot.sequence[-10:]
self.assertEqual(len(prot)-10, prot.find(needle))
# @unittest.skip("skipping")
def test_trans_find(self) :
trans = self.ref.get(Transcript, name = "SRY-001")[0]
self.assertEqual(0, trans.find(trans[:5]))
# @unittest.skip("remote server down")
# def test_import_remote_genome(self) :
# self.assertRaises(KeyError, B.importRemoteGenome, "Human.GRCh37.75_Y-Only.tar.gz")
# @unittest.skip("remote server down")
# def test_import_remote_snps(self) :
# self.assertRaises(KeyError, B.importRemoteSNPs, "Human_agnostic.dummySRY.tar.gz")
def runTests() :
try :
B.importGenome("Human.GRCh37.75_Y-Only.tar.gz")
except KeyError :
deleteGenome("human", "GRCh37.75_Y-Only")
B.importGenome("Human.GRCh37.75_Y-Only.tar.gz")
print "--> Seems to already exist in db"
try :
B.importSNPs("Human_agnostic.dummySRY.tar.gz")
except KeyError :
deleteSNPs("dummySRY_AGN")
B.importSNPs("Human_agnostic.dummySRY.tar.gz")
print "--> Seems to already exist in db"
try :
B.importSNPs("Human_agnostic.dummySRY_indels")
except KeyError :
deleteSNPs("dummySRY_AGN_indels")
B.importSNPs("Human_agnostic.dummySRY_indels")
print "--> Seems to already exist in db"
# import time
# time.sleep(10)
unittest.main()
if __name__ == "__main__" :
runTests()
|
Python-3/basic_examples/python_wait.py | ghiloufibelgacem/jornaldev | 1,139 | 11165142 | <reponame>ghiloufibelgacem/jornaldev
import time
print('Hello There, next message will be printed after 5 seconds.')
time.sleep(5)
print('Sleep time is over.')
sec = input('Let us wait for user input. Let me know how many seconds to sleep now.\n')
print('Going to sleep for', sec, 'seconds.')
time.sleep(int(sec))
print('Enough of sleeping, I Quit!')
|
examples/stop_converged.py | farhansabir123/climin | 140 | 11165151 | <reponame>farhansabir123/climin<filename>examples/stop_converged.py
import scipy
from climin import GradientDescent
from climin.stops import converged
quadratic = lambda x: (x**2).sum()
quadraticprime = lambda x: 2 * x
if __name__ == '__main__':
dim = 10
wrt = scipy.random.standard_normal((dim,)) * 10 + 5
loss_converged = converged(lambda: quadratic(wrt))
opt = GradientDescent(wrt, quadraticprime, steprate=0.01)
for info in opt:
print "iteration %3i loss=%g" % (info['n_iter'], quadratic(wrt))
if loss_converged(info):
print "loss converged."
break
# the same can be achieved with minimize_until, if the user doesn't
# need control in between steps:
# opt.minimize_until( loss_converged ) |
models/ddpm.py | 4-geeks/score_sde_pytorch | 355 | 11165176 | <reponame>4-geeks/score_sde_pytorch
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""DDPM model.
This code is the pytorch equivalent of:
https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/models/unet.py
"""
import torch
import torch.nn as nn
import functools
from . import utils, layers, normalization
RefineBlock = layers.RefineBlock
ResidualBlock = layers.ResidualBlock
ResnetBlockDDPM = layers.ResnetBlockDDPM
Upsample = layers.Upsample
Downsample = layers.Downsample
conv3x3 = layers.ddpm_conv3x3
get_act = layers.get_act
get_normalization = normalization.get_normalization
default_initializer = layers.default_init
@utils.register_model(name='ddpm')
class DDPM(nn.Module):
def __init__(self, config):
super().__init__()
self.act = act = get_act(config)
self.register_buffer('sigmas', torch.tensor(utils.get_sigmas(config)))
self.nf = nf = config.model.nf
ch_mult = config.model.ch_mult
self.num_res_blocks = num_res_blocks = config.model.num_res_blocks
self.attn_resolutions = attn_resolutions = config.model.attn_resolutions
dropout = config.model.dropout
resamp_with_conv = config.model.resamp_with_conv
self.num_resolutions = num_resolutions = len(ch_mult)
self.all_resolutions = all_resolutions = [config.data.image_size // (2 ** i) for i in range(num_resolutions)]
AttnBlock = functools.partial(layers.AttnBlock)
self.conditional = conditional = config.model.conditional
ResnetBlock = functools.partial(ResnetBlockDDPM, act=act, temb_dim=4 * nf, dropout=dropout)
if conditional:
# Condition on noise levels.
modules = [nn.Linear(nf, nf * 4)]
modules[0].weight.data = default_initializer()(modules[0].weight.data.shape)
nn.init.zeros_(modules[0].bias)
modules.append(nn.Linear(nf * 4, nf * 4))
modules[1].weight.data = default_initializer()(modules[1].weight.data.shape)
nn.init.zeros_(modules[1].bias)
self.centered = config.data.centered
channels = config.data.num_channels
# Downsampling block
modules.append(conv3x3(channels, nf))
hs_c = [nf]
in_ch = nf
for i_level in range(num_resolutions):
# Residual blocks for this resolution
for i_block in range(num_res_blocks):
out_ch = nf * ch_mult[i_level]
modules.append(ResnetBlock(in_ch=in_ch, out_ch=out_ch))
in_ch = out_ch
if all_resolutions[i_level] in attn_resolutions:
modules.append(AttnBlock(channels=in_ch))
hs_c.append(in_ch)
if i_level != num_resolutions - 1:
modules.append(Downsample(channels=in_ch, with_conv=resamp_with_conv))
hs_c.append(in_ch)
in_ch = hs_c[-1]
modules.append(ResnetBlock(in_ch=in_ch))
modules.append(AttnBlock(channels=in_ch))
modules.append(ResnetBlock(in_ch=in_ch))
# Upsampling block
for i_level in reversed(range(num_resolutions)):
for i_block in range(num_res_blocks + 1):
out_ch = nf * ch_mult[i_level]
modules.append(ResnetBlock(in_ch=in_ch + hs_c.pop(), out_ch=out_ch))
in_ch = out_ch
if all_resolutions[i_level] in attn_resolutions:
modules.append(AttnBlock(channels=in_ch))
if i_level != 0:
modules.append(Upsample(channels=in_ch, with_conv=resamp_with_conv))
assert not hs_c
modules.append(nn.GroupNorm(num_channels=in_ch, num_groups=32, eps=1e-6))
modules.append(conv3x3(in_ch, channels, init_scale=0.))
self.all_modules = nn.ModuleList(modules)
self.scale_by_sigma = config.model.scale_by_sigma
def forward(self, x, labels):
modules = self.all_modules
m_idx = 0
if self.conditional:
# timestep/scale embedding
timesteps = labels
temb = layers.get_timestep_embedding(timesteps, self.nf)
temb = modules[m_idx](temb)
m_idx += 1
temb = modules[m_idx](self.act(temb))
m_idx += 1
else:
temb = None
if self.centered:
# Input is in [-1, 1]
h = x
else:
# Input is in [0, 1]
h = 2 * x - 1.
# Downsampling block
hs = [modules[m_idx](h)]
m_idx += 1
for i_level in range(self.num_resolutions):
# Residual blocks for this resolution
for i_block in range(self.num_res_blocks):
h = modules[m_idx](hs[-1], temb)
m_idx += 1
if h.shape[-1] in self.attn_resolutions:
h = modules[m_idx](h)
m_idx += 1
hs.append(h)
if i_level != self.num_resolutions - 1:
hs.append(modules[m_idx](hs[-1]))
m_idx += 1
h = hs[-1]
h = modules[m_idx](h, temb)
m_idx += 1
h = modules[m_idx](h)
m_idx += 1
h = modules[m_idx](h, temb)
m_idx += 1
# Upsampling block
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = modules[m_idx](torch.cat([h, hs.pop()], dim=1), temb)
m_idx += 1
if h.shape[-1] in self.attn_resolutions:
h = modules[m_idx](h)
m_idx += 1
if i_level != 0:
h = modules[m_idx](h)
m_idx += 1
assert not hs
h = self.act(modules[m_idx](h))
m_idx += 1
h = modules[m_idx](h)
m_idx += 1
assert m_idx == len(modules)
if self.scale_by_sigma:
# Divide the output by sigmas. Useful for training with the NCSN loss.
# The DDPM loss scales the network output by sigma in the loss function,
# so no need of doing it here.
used_sigmas = self.sigmas[labels, None, None, None]
h = h / used_sigmas
return h
|
money/tests/test_docs.py | g--/money | 212 | 11165206 | <reponame>g--/money
# -*- coding: utf-8 -*-
"""
Money doctests as unittest Suite
"""
# RADAR: Python2
from __future__ import absolute_import
import doctest
import unittest
# RADAR: Python2
import money.six
FILES = (
'../../README.rst',
)
def load_tests(loader, tests, pattern):
# RADAR Python 2.x
if money.six.PY2:
# Doc tests are Python 3.x
return unittest.TestSuite()
return doctest.DocFileSuite(*FILES)
|
examples/hdf3.py | yang69can/pyngl | 125 | 11165209 | <gh_stars>100-1000
#
# File:
# hdf3.py
#
# Synopsis:
# Unpacks and plots HDF5 data.
#
# Category:
# Contours over maps
# Labelbar
# Maps
#
# Author:
# <NAME> (based on NCL example from <NAME>)
#
# Date of initial publication:
# April, 2015
#
# Description:
# This example reads precipitation from a group called "Grid"
# off an HDF5 file, and creates a color contour plot. The
# data comes in as lon x lat, so you have to transpose it
# before plotting.
#
# Effects illustrated:
# o Reading group data from an HDF5 file.
# o Drawing colored contours using named colors
# o Contouring in triangular mesh mode.
# o Transposing data
#
# Output:
# A single visualization is produced showing precipitation
# over a map.
from __future__ import print_function
import Nio,Ngl
import sys, os
import numpy as np
# Test if file exists
filename = "3B-MO.MS.MRG.3IMERG.20140701-S000000-E235959.07.V03D.HDF5"
if(not os.path.exists(filename)):
print("You do not have the necessary {} HDF5 file to run this example.".format(filename))
print("You need to supply your own HDF5 data")
sys.exit()
# Be sure to read this file using the advanced file structure.
opt = Nio.options()
opt.FileStructure = 'advanced'
f = Nio.open_file(filename, "r", options=opt)
# Open group "Grid" which will now look like a regular NioFile
g = f.groups['Grid']
#print(g)
# Read data from this group
precip = g.variables['precipitation']
lat = g.variables['lat'][:]
lon = g.variables['lon'][:]
yyyymmdd = filename.split(".")[4].split('-')[0]
# Print the metadata of precip, and min/max values
print(precip)
print("min/max = {:g} / {:g}".format(precip[:].min(), precip[:].max()))
wks_type = "png"
wks = Ngl.open_wks(wks_type,"hdf3")
res = Ngl.Resources()
res.cnFillOn = True # turn on contour fill
res.cnLinesOn = False # turn off contour lines
res.cnLineLabelsOn = False # turn off line labels
res.cnFillMode = "RasterFill" # These two resources
res.trGridType = "TriangularMesh" # can speed up plotting.
res.cnLevelSelectionMode = "ExplicitLevels"
res.cnLevels = [0.01,0.02,0.04,0.08,0.16,0.32,0.64, 0.96]
res.cnFillColors = ["white","cyan", "green","yellow",
"darkorange","red","magenta","purple","black"]
# make Dateline the center of the plot (default is GM)
res.mpCenterLonF = 180
res.tiMainString = "precipitation ({}) ({})".format(precip.units, yyyymmdd)
res.lbLabelFontHeightF = 0.01 # default is a bit large
res.lbOrientation = "horizontal"
res.sfMissingValueV = precip._FillValue
res.sfXArray = lon
res.sfYArray = lat
# Be sure to transpose data before plotting
plot = Ngl.contour_map(wks,precip[:].transpose(),res)
Ngl.end()
|
Algo and DSA/LeetCode-Solutions-master/Python/find-the-kth-largest-integer-in-the-array.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 11165211 | # Time: O(n) ~ O(n^2), O(n) on average
# Space: O(1)
import random
class Solution(object):
def kthLargestNumber(self, nums, k):
"""
:type nums: List[str]
:type k: int
:rtype: str
"""
def nth_element(nums, n, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target, compare):
mid = left
while mid <= right:
if nums[mid] == target:
mid += 1
elif compare(nums[mid], target):
nums[left], nums[mid] = nums[mid], nums[left]
left += 1
mid += 1
else:
nums[mid], nums[right] = nums[right], nums[mid]
right -= 1
return left, right
left, right = 0, len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare)
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
nth_element(nums, k-1, compare=lambda a, b: a > b if len(a) == len(b) else len(a) > len(b))
return nums[k-1]
|
configs/_base_/backends/ncnn-int8.py | grimoire/mmdeploy | 746 | 11165233 | <gh_stars>100-1000
backend_config = dict(type='ncnn', precision='INT8', use_vulkan=False)
|
docs/conf.py | electroniceel/Glasgow | 1,014 | 11165245 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, os
import sphinx_rtd_theme
# Configure our load path
sys.path.insert(0, os.path.abspath('../software'))
# Configure Sphinx
extensions = ['sphinx.ext.viewcode', 'sphinx.ext.autodoc', 'sphinxarg.ext']
autodoc_member_order = 'bysource'
source_suffix = '.rst'
master_doc = 'index'
project = 'Glasgow Reference'
author = 'whitequark'
copyright = '2018-2019, whitequark'
pygments_style = 'sphinx'
html_theme = 'sphinx_rtd_theme'
|
amqp-postgres/amqp_postgres/main.py | ilan-WS/cloudify-manager | 124 | 11165250 | import logging
import argparse
from cloudify._compat import queue
from cloudify.amqp_client import get_client
from manager_rest import config
from manager_rest.flask_utils import setup_flask_app
from .amqp_consumer import AMQPLogsEventsConsumer, AckingAMQPConnection
from .postgres_publisher import DBLogEventPublisher
logger = logging.getLogger(__name__)
BROKER_PORT_SSL = 5671
BROKER_PORT_NO_SSL = 5672
DEFAULT_LOG_PATH = '/var/log/cloudify/amqp-postgres/amqp_postgres.log'
CONFIG_PATH = '/opt/manager/cloudify-rest.conf'
def _create_connections():
acks_queue = queue.Queue()
cfy_config = config.instance
port = BROKER_PORT_SSL if cfy_config.amqp_ca_path else BROKER_PORT_NO_SSL
amqp_client = get_client(
amqp_host=cfy_config.amqp_host,
amqp_user=cfy_config.amqp_username,
amqp_pass=cfy_config.amqp_password,
amqp_vhost='/',
amqp_port=port,
ssl_enabled=bool(cfy_config.amqp_ca_path),
ssl_cert_path=cfy_config.amqp_ca_path,
cls=AckingAMQPConnection
)
amqp_client.acks_queue = acks_queue
db_publisher = DBLogEventPublisher(config.instance, amqp_client)
amqp_consumer = AMQPLogsEventsConsumer(
message_processor=db_publisher.process
)
amqp_client.add_handler(amqp_consumer)
db_publisher.start()
return amqp_client, db_publisher
def main(args):
logging.basicConfig(
level=args.get('loglevel', 'INFO').upper(),
filename=args.get('logfile', DEFAULT_LOG_PATH),
format="%(asctime)s %(message)s")
config.instance.load_from_file(args['config'])
with setup_flask_app().app_context():
config.instance.load_from_db()
amqp_client, db_publisher = _create_connections()
logger.info('Starting consuming...')
amqp_client.consume()
if db_publisher.error_exit:
raise db_publisher.error_exit
def cli():
"""Parse arguments and run main"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', default=CONFIG_PATH,
help='Path to the config file')
parser.add_argument('--logfile', default=DEFAULT_LOG_PATH,
help='Path to the log file')
parser.add_argument('--log-level', dest='loglevel', default='INFO',
help='Logging level')
args = parser.parse_args()
main(vars(args))
if __name__ == '__main__':
cli()
|
cle/backends/externs/simdata/io_file.py | Atipriya/cle | 317 | 11165265 | import struct
import logging
from . import SimData, register
from ...symbol import SymbolType
from .common import PointTo
l = logging.getLogger(name=__name__)
#
# Here, we define a specific structure (part of it at least) for the FILE structure.
# These offsets are copied from glibc for maximum compatibility, but we are effectively
# implementing SOME libc with these symbols, so we need SOME implementation of FILE.
#
# this is supposed to be an opaque structure, the internals of which are only cared about
# by an angr simprocedure or whatever implements the fread/fwrite/etc we're linking to. And since we're linking to
# this crap instead of a real stdin/stdout/etc, someone in python land will probably be the guy which needs ABI
# compatibility with us.
#
# however, it is also a desirable property that this is abi-compatible with glibc or something so the someone in python
# land could use this to interface with the "real" structure, which would be filled out by someone other than the
# below code. To this end we so far only have the fileno, but we could add more things like buffers
#
_IO_FILE = {
'MIPS32': {
'size': 148,
'fd': 0x38,
},
'X86': {
'size': 148,
'fd': 0x38,
},
'AMD64': {
'size': 216,
'fd': 0x70,
},
# Bionic libc does not use __IO_FILE
# Refer to http://androidxref.com/5.1.1_r6/xref/bionic/libc/include/stdio.h
# __sFILE replaces __IO_FILE
# _file replaces _fileno
'ARM': {
'size': 84,
'fd': 0x0e,
},
'AARCH64': {
'size': 152,
'fd': 0x14,
},
}
_IO_FILE['ARMEL'] = _IO_FILE['ARM']
_IO_FILE['ARMHF'] = _IO_FILE['ARM']
def io_file_data_for_arch(arch):
if arch.name not in _IO_FILE:
l.error("missing _IO_FILE offsets for arch: %s", arch.name)
return _IO_FILE['AMD64']
return _IO_FILE[arch.name]
class IoFilePointer(PointTo):
libname = 'libc.so.6'
pointto_type = SymbolType.TYPE_OBJECT
class IoStdinPointer(IoFilePointer):
name = 'stdin'
pointto_name = '_io_stdin'
class IoStdoutPointer(IoFilePointer):
name = 'stdout'
pointto_name = '_io_stdout'
class IoStderrPointer(IoFilePointer):
name = 'stderr'
pointto_name = '_io_stderr'
class IoFile(SimData):
libname = 'libc.so.6'
type = SymbolType.TYPE_OBJECT
fd = NotImplemented # type: int
@classmethod
def static_size(cls, owner):
return io_file_data_for_arch(owner.arch)['size']
# the canonical verision of this should be the FILEBUF_LITERAL macro from glibc
# for maximum hyperrealism we could have a dependency on the IO_jumps table which would have dependencies on
# all the functions we could care about which would be implemented by simprocedures
# but that's way overkill. see above discussion.
def value(self):
val = bytearray(self.size)
struct.pack_into(self.owner.arch.struct_fmt(size=4), val, io_file_data_for_arch(self.owner.arch)['fd'], self.fd)
struct.pack_into(self.owner.arch.struct_fmt(size=4), val, 0, 0xFBAD2088)
return bytes(val)
class IoStdin(IoFile):
name = '_io_stdin'
fd = 0
class IoStdout(IoFile):
name = '_io_stdout'
fd = 1
class IoStderr(IoFile):
name = '_io_stderr'
fd = 2
register(IoStdinPointer)
register(IoStdoutPointer)
register(IoStderrPointer)
register(IoStdin)
register(IoStdout)
register(IoStderr)
|
pyleus/storm/serializers/msgpack_serializer.py | ecanzonieri/pyleus | 166 | 11165269 | <reponame>ecanzonieri/pyleus<filename>pyleus/storm/serializers/msgpack_serializer.py
"""Messagepack implementation of Pyleus serializer"""
import os
import msgpack
from pyleus.storm import StormWentAwayError
from pyleus.storm.serializers.serializer import Serializer
def _messages_generator(input_stream):
unpacker = msgpack.Unpacker()
while True:
# f.read(n) on sys.stdin blocks until n bytes are read, causing
# serializer to hang.
# os.read(fileno, n) will block if there is nothing to read, but will
# return as soon as it is able to read at most n bytes.
line = os.read(input_stream.fileno(), 1024 ** 2)
if not line:
# Handle EOF, which usually means Storm went away
raise StormWentAwayError()
# As python-msgpack docs suggest, we feed data to the unpacker
# internal buffer in order to let the unpacker deal with message
# boundaries recognition and uncomplete messages. In case input ends
# with a partial message, unpacker raises a StopIteration and will be
# able to continue after being feeded with the rest of the message.
unpacker.feed(line)
for i in unpacker:
yield i
class MsgpackSerializer(Serializer):
def __init__(self, input_stream, output_stream):
super(MsgpackSerializer, self).__init__(input_stream, output_stream)
self._messages = _messages_generator(self._input_stream)
def read_msg(self):
""""Messages are delimited by msgapck itself, no need for Storm
multilang end line.
"""
return next(self._messages)
def send_msg(self, msg_dict):
""""Messages are delimited by msgapck itself, no need for Storm
multilang end line.
"""
msgpack.pack(msg_dict, self._output_stream)
self._output_stream.flush()
|
pyNastran/dev/bdf_vectorized/cards/elements/spring/spring_element.py | ACea15/pyNastran | 293 | 11165270 | <gh_stars>100-1000
from pyNastran.dev.bdf_vectorized.cards.elements.element import Element
class SpringElement(Element):
def __init__(self, model):
"""
Defines the SpringElement object.
Parameters
----------
model : BDF
the BDF object
"""
Element.__init__(self, model)
|
socfaker/registry.py | priamai/soc-faker | 122 | 11165280 | <filename>socfaker/registry.py
from .baseclass import BaseClass
class Registry(BaseClass):
__hives = [
'HKEY_CURRENT_USER',
'HKEY_LOCAL_MACHINE',
'HKEY_USERS'
]
__root = None
__hive = None
__key = None
@property
def hive(self):
"""A random registry hive
Returns:
str: Returns a random registry hive
"""
if not self.__hive:
self.__hive = self.random.choice(self.__hives)
return self.__hive
@property
def root(self):
"""A random registry root path string
Returns:
str: Returns a random registry root path string
"""
if not self.__root:
self.__root = self.random.choice([
'SOFTWARE',
'SECURITY',
'SYSTEM'
])
return self.__root
@property
def key(self):
"""A random registry key
Returns:
str: Returns a random registry key
"""
if not self.__key:
NEXT_LEVEL = [
'POLICIES',
'CLASSES',
'SETUP',
'SOFTWARE'
]
PRODUCT = [
'MICROSOFT\\Windows\\CurrentVersion\\RunOnce\\',
'ADOBE',
'GOOGLE',
'ORACLE'
]
KEYS = [
'RUN',
'OPEN',
'SET'
]
self.__key = '{}\\{}\\{}\\{}'.format(self.random.choice(self.root), self.random.choice(NEXT_LEVEL), self.random.choice(PRODUCT), self.random.choice(KEYS))
return self.__key
@property
def path(self):
"""A full registry path
Returns:
str: Returns a random full registry path
"""
return '{}\\{}'.format(self.hive, self.key)
@property
def type(self):
"""A random registry key type
Returns:
str: A random registry key type
"""
return self.random.choice([
'REG_SZ',
'REG_DWORD',
'REG_EXPAND_SZ',
'REG_MULTI_SZ',
'REG_BINARY'
])
@property
def value(self):
"""A random registry key value
Returns:
str: A random registry key value
"""
return self.random.choice([
'Debugger',
'Enabled',
'Disabled',
'Unknown',
'1',
'0'
])
|
core_scripts/data_io/text_process/toolkit_all.py | Nijta/project-NN-Pytorch-scripts | 150 | 11165296 | #!/usr/bin/env python
"""
Simple text processer for all languages
Based on https://github.com/fatchord/WaveRNN
"""
import os
import sys
import re
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2021, Xin Wang"
#####
## Parse the curly bracket
#####
# from https://github.com/fatchord/WaveRNN
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
# symbol to indicate phonemic annotation
_curly_symbol = '{'
def parse_curly_bracket(text):
""" Prase the text based on curly brackets
Inspired by https://github.com/fatchord/WaveRNN: when input text
is mixed with raw text and phonemic annotation, the {} pair indicates
the phonemic part
input
-----
text: str
output
------
text_list: list of str
For example, 'text {AH II} test' -> ['text ', 'AH II', ' test']
"""
text_list = []
text_tmp = text
while len(text_tmp):
re_matched = _curly_re.match(text_tmp)
if re_matched:
# e.g., 'text {AH II} test'
# group(1), group(2) -> ['text ', 'AH II']
text_list.append(re_matched.group(1))
text_list.append(_curly_symbol + re_matched.group(2))
# group(3) -> ' test'
text_tmp = re_matched.group(3)
else:
text_list.append(text_tmp)
break
return text_list
if __name__ == "__main__":
print("Definition of text processing tools for all languages")
|
dockerfiles/settings/proxito.py | mforbes/readthedocs.org | 4,054 | 11165330 | from readthedocs.settings.proxito.base import CommunityProxitoSettingsMixin
from .docker_compose import DockerBaseSettings
class ProxitoDevSettings(CommunityProxitoSettingsMixin, DockerBaseSettings):
# El Proxito does not have django-debug-toolbar installed
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': lambda request: False,
}
ProxitoDevSettings.load_settings(__name__)
|
riptable/rt_pdataset.py | wenjuno/riptable | 307 | 11165452 | __all__ = ['PDataset']
import os
from typing import Union, List
import warnings
import numpy as np
from .rt_fastarray import FastArray
from .rt_enum import (
TypeRegister,
DisplayJustification,
)
from .rt_numpy import (
unique,
empty,
cumsum,
searchsorted,
max,
)
from .rt_dataset import Dataset
from .rt_sds import load_sds
from .rt_itemcontainer import ItemContainer
from .rt_groupby import GroupBy
class PDataset(Dataset):
'''
The PDataset class inherits from Dataset. It holds multiple datasets (preivously stacked together) in contiguous slices.
Each partition has a name and a contiguous slice that can be used to extract it from the larger Dataset.
Extracting a partition is zero-copy. Partitions can be extracted using partition(), or bracket [] indexing.
A PDataset is often returned when:
Multiple Datasets are hstacked, i.e. hstack([ds1, ds2, ds3])
Calling load_sds with stack=True, i.e. load_sds([file1, file2, file3], stack=True)
Properties: prows, pdict, pnames, pcount, pgb, pgbu, pgroupby, pslices, piter, pcutoffs
Methods: partition(), pslice(), showpartitions()
pds['20190204'] or pds[20190204] will return a dataset for the given partition name
Construction:
-------------
inputval : -list of files to load and stack
-list of datasets to stack
-regular dataset inputval (will only have one partition)
PDataset([path1, path2, path3], (pnames))
-call load_sds(stack=True)
-paths become filenames
-if pnames specified, use those, otherwise look for dates
-if no dates, auto generate pnames
PDataset([ds1, ds2, ds3], (filenames, pnames))
PDataset(ds, (filenames, pnames))
-call Dataset.hstack()
-if pnames specified, use those
-if filenames, look for dates
-if no dates, auto generate pnames
PDataset(arraydict, cutoffs, (filenames, pnames))
-constructor from load_sds()
-if pnames specified, use those
-if filenames, look for dates
-if no dates, auto generate pnames
'''
# ------------------------------------------------------------
def __init__(
self,
inputval: Union[list, dict, 'Dataset', 'ItemContainer'] = None,
cutoffs=None,
filenames: List[str] = None,
pnames=None,
showpartitions=True,
**kwargs,
):
if inputval is None:
inputval = dict()
if filenames is None:
filenames = list()
if type(inputval) == TypeRegister.Dataset:
inputval = [inputval]
# stack datasets or load from list of files
if isinstance(inputval, list):
inputval, cutoffs, filenames, pnames = self._init_from_list(
inputval, filenames, pnames
)
self._pre_init()
# fast track for itemcontainer
if isinstance(inputval, ItemContainer):
self._init_from_itemcontainer(inputval)
# load items from object that can be turned into dictionary
else:
inputval = self._init_columns_as_dict(inputval)
self._init_from_dict(inputval)
self._post_init(
cutoffs=cutoffs,
filenames=filenames,
pnames=pnames,
showpartitions=showpartitions,
)
# ------------------------------------------------------------
def _pre_init(self):
'''
Keep this in for chaining pre-inits in parent classes.
'''
super()._pre_init()
# ------------------------------------------------------------
def _post_init(self, cutoffs, filenames, pnames, showpartitions):
'''
Final initializer for variables specific to PDataset.
Also initializes variables from parent class.
'''
super()._post_init()
self._showpartitions = showpartitions
# cutoffs will be the same for dataset columns
if cutoffs is not None:
self._pcutoffs = list(cutoffs.values())[0]
else:
# assume one row, init from dataset
self._pcutoffs = FastArray([self._nrows])
# number of rows in each partition
self._prows = self._pcutoffs.copy()
if len(self._prows) > 1:
# calculate row length
self._prows[1:] -= self._pcutoffs[:-1]
# look for dates in filenames or autogenerate names
if pnames is None:
pnames, filenames = self._init_pnames_filenames(
len(self._prows), pnames, filenames
)
self._pfilenames = filenames
self._pnames = {p: i for i, p in enumerate(pnames)}
# use provided pnames
else:
self._pfilenames = filenames
if isinstance(pnames, list):
pnames = {p: i for i, p in enumerate(pnames)}
self._pnames = pnames
self._pcat = None
# ------------------------------------------------------------
@classmethod
def _filenames_to_pnames(cls, filenames):
'''
At least two filenames must be present to compare
Algo will reverse the string on the assumption that pathnames can vary in the front of the string
It also assumes that the filenames end similarly, such as ".SDS"
It will search for the difference and look for digits, then try to extract the digits
'''
# reverse all the filenames
if len(filenames) > 0:
rfilenames = [f[::-1] for f in filenames]
str_arr = TypeRegister.FastArray(rfilenames)
str_numba = str_arr.numbastring
if len(filenames) > 1:
match_mask = str_numba[0] != str_numba[1]
str_len = len(match_mask)
for i in range(len(filenames) - 2):
# inplace OR loop so that the TRUE propagates
match_mask += str_numba[0] != str_numba[i + 2]
for i in range(str_len):
if match_mask[i]:
break
start = i
for i in range(start + 1, str_len):
if not match_mask[i]:
break
end = i
# expand start if possible
while start > 0:
char = str_numba[0][start - 1]
# as long as a numeric digit, keep expanding
if char >= 48 and char <= 58:
start = start - 1
else:
break
# expand end if possible
while end < str_len:
char = str_numba[0][end]
if char >= 48 and char <= 58:
end = end + 1
else:
break
# check to see if we captured a number
firstchar = str_numba[0][start]
lastchar = str_numba[0][end - 1]
if (
start < end
and firstchar >= 48
and firstchar <= 58
and lastchar >= 48
and lastchar <= 58
):
pnames = []
viewtype = 'S' + str(end - start)
for i in range(len(filenames)):
newstring = str_numba[i][start:end].view(viewtype)
newstring = newstring[0].astype('U')
# append the reverse
pnames.append(newstring[::-1])
u = unique(pnames)
if len(u) == len(filenames):
return pnames
# removed, prints during every column index/copy
# print(f"Failed to find unique numbers in filenames {pnames}")
else:
# only one file
filename = str(rfilenames[0])
start = -1
stop = -1
# search for first number
for i in range(len(filename)):
if filename[i].isdigit():
if start == -1:
start = i
elif start != -1:
stop = i
break
if start != -1:
if stop == -1:
stop = start + 1
# extract just the number
filename = filename[start:stop]
return [filename[::-1]]
# failed to find unique strings in filenames
# default to p0, p1, p2
pnames = cls._auto_pnames(len(filenames))
return pnames
# ------------------------------------------------------------
@classmethod
def _init_from_list(cls, dlist, filenames, pnames):
'''
Construct a PDataset from multiple datasets, or by loading multiple files.
'''
# make sure only one type
listtype = {type(i) for i in dlist}
if len(listtype) == 1:
listtype = list(listtype)[0]
else:
raise TypeError(f'Found multiple types in constructor list {listtype}')
# hstack datasets
if listtype == Dataset:
start = 0
cutoffs = cumsum([ds.shape[0] for ds in dlist])
cutoffs = {'cutoffs': cutoffs}
ds = TypeRegister.Dataset.concat_rows(dlist)
# extract itemcontainer
ds = ds._all_items
pnames, filenames = cls._init_pnames_filenames(
len(dlist), pnames, filenames
)
# perform a .sds load from multiple files
elif issubclass(listtype, (str, bytes, os.PathLike)):
ds = load_sds(dlist, stack=True)
cutoffs = {'cutoffs': ds._pcutoffs}
filenames = ds._pfilenames
if pnames is None:
pnames = ds._pnames # dict
# extract itemcontainer
ds = ds._all_items
else:
raise TypeError(f'Cannot construct from list of type {listtype}')
return ds, cutoffs, filenames, pnames
# ------------------------------------------------------------
@classmethod
def _auto_pnames(cls, pcount):
'''
Auto generate partition names if none provided and no date found in filenames.
'''
return ['p' + str(i) for i in range(pcount)]
# ------------------------------------------------------------
def _autocomplete(self) -> str:
return f'PDataset{self.shape}'
# ------------------------------------------------------------
@classmethod
def _init_pnames_filenames(cls, pcount, pnames, filenames):
'''
Initialize filenames, pnames based on what was provided to the constructor.
If no pnames provided, try to derive a date from filenames
If no date found, or no filenames provided, use default names [p0, p1, p2 ...]
Parameters
----------
pcount : int
number of partitions, in case names need to be auto generated
pnames : list of str, optional
list of partition names or None
filenames : sequence of str, optional
list of file paths (possibly empty)
'''
if pnames is None:
if filenames is None or len(filenames) == 0:
filenames = []
pnames = cls._auto_pnames(pcount)
else:
pnames = cls._filenames_to_pnames(filenames)
return pnames, filenames
# ------------------------------------------------------------
def _copy(self, deep=False, rows=None, cols=None, base_index=0, cls=None):
''' returns a PDataset if no row selection, otherwise Dataset'''
if rows is None:
newcols = self._as_itemcontainer(
deep=deep, rows=rows, cols=cols, base_index=base_index
)
# create a new PDataset
pds = type(self)(
newcols,
cutoffs={'cutoffs': self.pcutoffs},
filenames=self._pfilenames,
pnames=self._pnames,
base_index=base_index,
)
pds = self._copy_attributes(pds, deep=deep)
else:
# row slicing will break partitions, return a regular Dataset
cls = TypeRegister.Dataset
pds = super()._copy(
deep=deep, rows=rows, cols=cols, base_index=base_index, cls=cls
)
return pds
# ------------------------------------------------------------
def _ipython_key_completions_(self):
# For tab autocomplete with __getitem__
# NOTE: %config IPCompleter.greedy=True might have to be set
# autocompleter will sort the keys
return self.keys() + self.pnames
# ------------------------------------------------------------
@property
def pcutoffs(self):
'''
Returns
-------
Cutoffs for partition. For slicing, maintain contiguous arrays.
Examples
--------
>>> pds.pcutoffs
FastArray([1447138, 3046565, 5344567], dtype=int64)
'''
return self._pcutoffs
# ------------------------------------------------------------
@property
def prows(self):
'''
Returns
-------
An array with the number of rows in each partition.
Examples
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1, file2, file3], stack=True)
>>> pds.prows
FastArray([1447138, 2599427, 1909895], dtype=int64)
'''
return self._prows
# ------------------------------------------------------------
@property
def pcount(self):
'''
Returns
-------
Number of partitions
Examples
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1, file2, file3], stack=True)
>>> pds.pcount
3
'''
return len(self._prows)
# ------------------------------------------------------------
@property
def pnames(self):
'''
Returns
-------
A list with the names of the partitions
Example
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1, file2, file3], stack=True)
>>> pds.pnames
['20190205', '20190206', '20190207']
'''
return [*self._pnames.keys()]
def set_pnames(self, pnames):
'''
Input
-----
A list of strings
Examples
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1, file2, file3], stack=True)
>>> pds.pnames
['20190205', '20190206', '20190207']
>>> pds.set_pnames(['Jane', 'John', 'Jill'])
['Jane', 'John', 'Jill']
'''
if isinstance(pnames, list):
if len(pnames) == len(self._pnames):
newpnames = {}
for i in range(len(pnames)):
newpnames[pnames[i]] = i
if len(newpnames) == len(self._pnames):
self._pnames = newpnames
else:
raise ValueError(f'The new pnames must be unique names: {pnames}')
else:
raise ValueError(
f'The length of the new pnames must match the length of the old pnames: {len(self._pnames)}'
)
else:
raise ValueError(f'A list of string must be passed in')
return [*self._pnames.keys()]
# ------------------------------------------------------------
@property
def pdict(self):
'''
Returns
--------
A dictionary with the partition names and the partition slices.
Examples
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1, file2, file3], stack=True)
>>> pds.pdict
{'20190204': slice(0, 1447138, None),
'20190205': slice(1447138, 3046565, None),
'20190206': slice(3046565, 4509322, None)}
'''
pdict = {name: self.pslice(i) for i, name in enumerate(self.pnames)}
return pdict
# ------------------------------------------------------------
# -------------------------------------------------------
def pgb(self, by, **kwargs):
"""Equivalent to :meth:`~rt.rt_dataset.Dataset.pgroupby`"""
kwargs['sort'] = True
return self.pgroupby(by, **kwargs)
# -------------------------------------------------------
def pgroupby(self, by, **kwargs):
return GroupBy(self, by, cutoffs=self._pcutoffs, **kwargs)
def igroupby(self):
'''
Lazily generate a categorical binned by each partition.
Data will be attached to categorical, so operations can be called without specifying data.
This allows reduce functions to be applied per partion.
Examples
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1,file2, file2], stack=True)
>>> pds.pgroupby['AskSize'].sum()
*Partition TradeSize
---------- ---------
20190204 1.561e+07
20190205 1.950e+07
20190206 1.532e+07
See Also: Dataset.groupby, Dataset.gb, Dataset.gbu
'''
reserved_name = 'Partition'
if reserved_name not in self:
self[reserved_name] = self.pcat
self.col_move_to_front(reserved_name)
return self.gb(reserved_name)
@property
def pcat(self):
'''
Lazy generates a categorical for row labels callback or pgroupby
'''
if self._pcat is None:
idx = empty((self.shape[0],), dtype=np.int32)
for i in range(self.pcount):
idx[self.pslice(i)] = i + 1
label = self.pnames
self._pcat = TypeRegister.Categorical(idx, label)
return self._pcat
# ------------------------------------------------------------
def prow_labeler(self, rownumbers, style):
'''
Display calls this routine back to replace row numbers.
rownumbers : fancy index of row numbers being displayed
style : ColumnStyle object - default from DisplayTable, can be changed
Returns: label header, label array, style
'''
if self._showpartitions:
style.align = DisplayJustification.Right
# use the cutoffs to generate which partition index
pindex = searchsorted(self._pcutoffs, rownumbers, side='right')
plabels = TypeRegister.FastArray(self.pnames)[pindex]
# find the maximum string width for the rownumber
if len(rownumbers) > 0: maxnum = max(rownumbers)
else: maxnum = 0
width = len(str(maxnum))
# right justify numbers
rownumbers = rownumbers.astype('S')
rownumbers = np.chararray.rjust(rownumbers, width)
# column header
header = 'partition + #'
rownumbers = plabels + ' ' + rownumbers
# set the style width to override the string trim
style.width = rownumbers.itemsize
return header, rownumbers, style
else:
return '#', rownumbers, style
# ------------------------------------------------------------
@property
def _row_numbers(self):
# display will check for the existence of this method
# return a callback to change the row numbers
return self.prow_labeler
# ------------------------------------------------------------
def showpartitions(self, show=True):
''' toggle whether partitions are shown on the left '''
if show:
self._showpartitions = True
else:
self._showpartitions = False
# ------------------------------------------------------------
@property
def piter(self):
'''
Iterate over dictionary of arrays for each partition.
Yields key (load source) -> value (dataset as dictionary)
Examples
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1,file2, file2], stack=True)
>>> for name, ds in pds.iter: print(name)
20190204
20190205
20190206
'''
label = self.pnames
start = 0
for i in range(self.pcount):
yield label[i], self.partition(i)
# -------------------------------------------------------
@property
def pslices(self):
'''
Return the slice (start,end) associated with the partition number
See Also
--------
pslices, pdict
Examples
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1,file2, file2], stack=True)
>>> pds.pslices
[slice(0, 1447138, None),
slice(1447138, 3046565, None),
slice(3046565, 4509322, None)]
'''
pslices = [self.pslice(i) for i in range(self.pcount)]
return pslices
# -------------------------------------------------------
def pslice(self, index):
'''
Return the slice (start,end) associated with the partition number
See Also
--------
pslices, pdict
Examples
--------
>>> pds.pslice(0)
slice(0, 1447138, None)
'''
if isinstance(index, (int, np.integer)):
if index == 0:
return slice(0, self.pcutoffs[index])
else:
return slice(self.pcutoffs[index - 1], self.pcutoffs[index])
raise IndexError(
f'Cannot slice a partition with type {type(index)!r}. Use an integer instead.'
)
# -------------------------------------------------------
def partition(self, index):
'''
Return the Dataset associated with the partition number
Examples
--------
Example below assumes 3 filenames with datasets
>>> pds = load_sds([file1, file2, file2], stack=True)
>>> pds.partition(0)
'''
if isinstance(index, (int, np.integer)):
# this will route to the dataset
return self._copy(rows=self.pslice(index))
if isinstance(index, str):
# this will loop back if the string is a partition name
return self[index]
raise IndexError(
f'Cannot index a parition with type {type(index)!r}. Use an integer instead.'
)
# -------------------------------------------------------
def __getitem__(self, index):
"""
:param index: (rowspec, colspec) or colspec
:return: the indexed row(s), cols(s), sub-dataset or single value
:raise IndexError:
:raise TypeError:
:raise KeyError:
"""
try:
return super().__getitem__(index)
except:
# if it fails, maybe it was a partition selection
if isinstance(index, (int, np.integer)):
# convert int to string to lookup
index = str(index)
# the string was not a column name, now check for partition name
if isinstance(index, str):
if index in self._pnames:
# return the dataset for that partition
return self.partition(self._pnames[index])
else:
raise KeyError(
f'the key {index!r} was not found as column name or parition name'
)
else:
raise KeyError(f'could not index PDataset with {type(index)}')
# --------------------------------------------------------------------------
def save(
self,
path='',
share=None,
compress=True,
overwrite=True,
name=None,
onefile: bool = False,
bandsize=None,
append=None,
complevel=None,
):
warnings.warn(
f"To be implemented. PDataset will currently be saved / loaded as a Dataset."
)
super().save(
path=path,
share=share,
compress=compress,
overwrite=overwrite,
name=name,
onefile=onefile,
bandsize=bandsize,
append=append,
complevel=complevel,
)
# --------------------------------------------------------------------------
@classmethod
def hstack(cls, pds_list):
'''
Stacks columns from multiple datasets.
see: Dataset.concat_rows
'''
raise NotImplementedError("PDataset does not stack yet")
# ------------------------------------------------------------
@classmethod
def pload(cls, path, start, end, include=None, threads=None, folders=None):
'''
Returns a PDataset of stacked files from multiple days.
Will load all files found within the date range provided.
Parameters:
-----------
path : format string for filepath, {} in place of YYYYMMDD. {} may appear multiple times.
start : integer or string start date in format YYYYMMDD
end : integer or string end date in format YYYYMMDD
'''
# insert date string at each of these
fmtcount = path.count('{}')
# final loader will check if dates exist, kill warnings?
pnames = TypeRegister.Date.range(str(start), str(end)).yyyymmdd.astype('U')
try:
import sotpath
files = [sotpath.path2platform(path.format(*[d] * fmtcount)) for d in pnames]
except:
files = [path.format(*[d] * fmtcount) for d in pnames]
pds = load_sds(
files, include=include, stack=True, threads=threads, folders=folders
)
return pds
# ------------------------------------------------------------
def psave(self):
'''
Does not work yet. Would save backout all the partitions.
'''
raise NotImplementedError(f'not implemented yet')
TypeRegister.PDataset = PDataset
|
lib/python/frugal/transport/memory_output_buffer.py | ariasheets-wk/frugal | 144 | 11165453 | <reponame>ariasheets-wk/frugal
# Copyright 2017 Workiva
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from thrift.transport.TTransport import TTransportException
from frugal.exceptions import TTransportExceptionType
from thrift.transport.TTransport import TMemoryBuffer
class TMemoryOutputBuffer(TMemoryBuffer, object):
"""
An implementation of TMemoryBuffer using a bounded memory buffer. Writes
that cause the buffer to exceed its size throw an FMessageSizeException.
This implementation handles framing data.
"""
def __init__(self, limit, value=None):
"""
Create an instance of FBoundedMemoryBuffer where size is the
maximum writable length of the buffer.
Args:
limit: integer size limit of the buffer
value: optional data value to initialize the buffer with.
"""
super(TMemoryOutputBuffer, self).__init__(value)
self._limit = limit
def write(self, buf):
"""
Bounded write to buffer
"""
if len(self) + len(buf) > self._limit > 0:
self._buffer = TMemoryBuffer()
raise TTransportException(
type=TTransportExceptionType.REQUEST_TOO_LARGE,
message="Buffer size reached {}".format(self._limit))
self._buffer.write(buf)
def getvalue(self):
# TODO make more efficient?
data = self._buffer.getvalue()
return struct.pack('!I', len(data)) + data
def read(self, sz):
raise Exception("don't call this")
def __len__(self):
return len(self.getvalue())
|
configs/cbnet/htc_cbv2_swin_large_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_1x_coco.py | minouei-kl/CBNetV2 | 271 | 11165493 | _base_ = 'htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco.py'
model = dict(
backbone=dict(
embed_dim=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=7,
ape=False,
drop_path_rate=0.2,
patch_norm=True,
use_checkpoint=False
),
neck=dict(in_channels=[192, 384, 768, 1536])
)
lr_config = dict(step=[8, 11])
runner = dict(type='EpochBasedRunnerAmp', max_epochs=12) |
neuralcompression/functional/_ndtr.py | tallamjr/NeuralCompression | 233 | 11165522 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from torch import Tensor
def ndtr(x: Tensor) -> Tensor:
"""The normal cumulative distribution function (CDF).
Args:
x: the input tensor.
Returns:
the area under the standard Normal probability density function (PDF),
integrated from negative infinity to :math:`x`.
"""
x = x.to(torch.float) * math.sqrt(0.5)
y = 0.5 * torch.erfc(abs(x))
return torch.where(
abs(x) < math.sqrt(0.5),
0.5 + 0.5 * torch.erf(x),
torch.where(
x > 0,
1 - y,
y,
),
)
|
Algorithm.Python/Benchmarks/StatelessCoarseUniverseSelectionBenchmark.py | BlackBoxAM/Lean | 6,580 | 11165536 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
class StatelessCoarseUniverseSelectionBenchmark(QCAlgorithm):
def Initialize(self):
self.UniverseSettings.Resolution = Resolution.Daily
self.SetStartDate(2017, 11, 1)
self.SetEndDate(2018, 1, 1)
self.SetCash(50000)
self.AddUniverse(self.CoarseSelectionFunction)
self.numberOfSymbols = 250
# sort the data by daily dollar volume and take the top 'NumberOfSymbols'
def CoarseSelectionFunction(self, coarse):
selected = [x for x in coarse if (x.HasFundamentalData)]
# sort descending by daily dollar volume
sortedByDollarVolume = sorted(selected, key=lambda x: x.DollarVolume, reverse=True)
# return the symbol objects of the top entries from our sorted collection
return [ x.Symbol for x in sortedByDollarVolume[:self.numberOfSymbols] ]
def OnSecuritiesChanged(self, changes):
# if we have no changes, do nothing
if changes is None: return
# liquidate removed securities
for security in changes.RemovedSecurities:
if security.Invested:
self.Liquidate(security.Symbol)
for security in changes.AddedSecurities:
self.SetHoldings(security.Symbol, 0.001)
|
minidump/utils/winapi/psapi.py | mrexodia/minidump | 215 | 11165583 | <reponame>mrexodia/minidump
from minidump.utils.winapi.defines import *
# typedef struct _MODULEINFO {
# LPVOID lpBaseOfDll;
# DWORD SizeOfImage;
# LPVOID EntryPoint;
# } MODULEINFO, *LPMODULEINFO;
class MODULEINFO(Structure):
_fields_ = [
("lpBaseOfDll", LPVOID), # remote pointer
("SizeOfImage", DWORD),
("EntryPoint", LPVOID), # remote pointer
]
LPMODULEINFO = POINTER(MODULEINFO)
# BOOL WINAPI EnumProcessModules(
# __in HANDLE hProcess,
# __out HMODULE *lphModule,
# __in DWORD cb,
# __out LPDWORD lpcbNeeded
# );
def EnumProcessModules(hProcess):
_EnumProcessModules = windll.psapi.EnumProcessModules
_EnumProcessModules.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD]
_EnumProcessModules.restype = bool
_EnumProcessModules.errcheck = RaiseIfZero
size = 0x1000
lpcbNeeded = DWORD(size)
unit = sizeof(HMODULE)
while 1:
lphModule = (HMODULE * (size // unit))()
_EnumProcessModules(hProcess, byref(lphModule), lpcbNeeded, byref(lpcbNeeded))
needed = lpcbNeeded.value
if needed <= size:
break
size = needed
return [ lphModule[index] for index in range(0, int(needed // unit)) ]
def GetModuleFileNameExW(hProcess, hModule = None):
_GetModuleFileNameExW = ctypes.windll.psapi.GetModuleFileNameExW
_GetModuleFileNameExW.argtypes = [HANDLE, HMODULE, LPWSTR, DWORD]
_GetModuleFileNameExW.restype = DWORD
nSize = MAX_PATH
while 1:
lpFilename = ctypes.create_unicode_buffer(u"", nSize)
nCopied = _GetModuleFileNameExW(hProcess, hModule, lpFilename, nSize)
if nCopied == 0:
raise ctypes.WinError()
if nCopied < (nSize - 1):
break
nSize = nSize + MAX_PATH
return lpFilename.value
# BOOL WINAPI GetModuleInformation(
# __in HANDLE hProcess,
# __in HMODULE hModule,
# __out LPMODULEINFO lpmodinfo,
# __in DWORD cb
# );
def GetModuleInformation(hProcess, hModule, lpmodinfo = None):
_GetModuleInformation = windll.psapi.GetModuleInformation
_GetModuleInformation.argtypes = [HANDLE, HMODULE, LPMODULEINFO, DWORD]
_GetModuleInformation.restype = bool
_GetModuleInformation.errcheck = RaiseIfZero
if lpmodinfo is None:
lpmodinfo = MODULEINFO()
_GetModuleInformation(hProcess, hModule, byref(lpmodinfo), sizeof(lpmodinfo))
return lpmodinfo |
pycorrector/deepcontext/infer.py | ParikhKadam/pycorrector | 3,153 | 11165611 | # -*- coding: utf-8 -*-
"""
@author:XuMing(<EMAIL>)
@description: Inference
"""
import operator
import os
import sys
import time
import torch
from torch import optim
sys.path.append('../..')
from pycorrector.deepcontext.model import Context2vec
from pycorrector.deepcontext.data_reader import read_config, load_word_dict
from pycorrector.utils.text_utils import is_chinese_string, convert_to_unicode
from pycorrector.utils.tokenizer import split_text_by_maxlen
from pycorrector.corrector import Corrector
from pycorrector.utils.logger import logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Inference(Corrector):
def __init__(self, model_dir, vocab_path):
super(Inference, self).__init__()
self.name = 'bert_corrector'
t1 = time.time()
# device
logger.debug("device: {}".format(device))
model, config_dict = self._read_model(model_dir)
# norm weight
model.norm_embedding_weight(model.criterion.W)
self.model = model
self.model.eval()
self.unk_token, self.sos_token, self.eos_token, self.pad_token, self.itos, self.stoi = self._get_config_data(
config_dict, vocab_path)
self.model_dir = model_dir
self.vocab_path = vocab_path
self.mask = "[]"
logger.debug('Loaded deep context model: %s, spend: %.3f s.' % (model_dir, time.time() - t1))
@staticmethod
def _read_model(model_dir):
config_file = os.path.join(model_dir, 'config.json')
config_dict = read_config(config_file)
model = Context2vec(vocab_size=config_dict['vocab_size'],
counter=[1] * config_dict['vocab_size'],
word_embed_size=config_dict['word_embed_size'],
hidden_size=config_dict['hidden_size'],
n_layers=config_dict['n_layers'],
use_mlp=config_dict['use_mlp'],
dropout=config_dict['dropout'],
pad_index=config_dict['pad_index'],
device=device,
is_inference=True
).to(device)
model.load_state_dict(torch.load(os.path.join(model_dir, 'model.pth')))
optimizer = optim.Adam(model.parameters(), lr=config_dict['learning_rate'])
optimizer.load_state_dict(torch.load(os.path.join(model_dir, 'model_optimizer.pth')))
return model, config_dict
@staticmethod
def _get_config_data(config_dict, vocab_path):
# load model
unk_token = config_dict['unk_token']
sos_token = config_dict['sos_token']
eos_token = config_dict['eos_token']
pad_token = config_dict['pad_token']
# read vocab
stoi = load_word_dict(vocab_path)
itos = {v: k for k, v in stoi.items()}
return unk_token, sos_token, eos_token, pad_token, itos, stoi
def predict_mask_token(self, tokens, mask_index, k=10):
pred_words = []
tokens[mask_index] = self.unk_token
tokens = [self.sos_token] + tokens + [self.eos_token]
indexed_sentence = [self.stoi[token] if token in self.stoi else self.stoi[self.unk_token] for token in tokens]
input_tokens = torch.tensor(indexed_sentence, dtype=torch.long, device=device).unsqueeze(0)
topv, topi = self.model.run_inference(input_tokens, target=None, target_pos=mask_index, k=k)
for value, key in zip(topv, topi):
score = value.item()
word = self.itos[key.item()]
if word in [self.unk_token, self.sos_token, self.eos_token, self.pad_token]:
continue
pred_words.append((word, score))
return pred_words
def predict(self, text, **kwargs):
details = []
text_new = ''
self.check_corrector_initialized()
# 编码统一,utf-8 to unicode
text = convert_to_unicode(text)
# 长句切分为短句
blocks = split_text_by_maxlen(text, maxlen=128)
for blk, start_idx in blocks:
blk_new = ''
for idx, s in enumerate(blk):
# 处理中文错误
if is_chinese_string(s):
sentence_lst = list(blk_new + blk[idx:])
sentence_lst[idx] = self.mask
# 预测,默认取top10
predict_words = self.predict_mask_token(sentence_lst, idx, k=10)
top_tokens = []
for w, _ in predict_words:
top_tokens.append(w)
if top_tokens and (s not in top_tokens):
# 取得所有可能正确的词
candidates = self.generate_items(s)
if candidates:
for token_str in top_tokens:
if token_str in candidates:
details.append((s, token_str, start_idx + idx, start_idx + idx + 1))
s = token_str
break
blk_new += s
text_new += blk_new
details = sorted(details, key=operator.itemgetter(2))
return text_new, details
if __name__ == "__main__":
from pycorrector.deepcontext import config
sents = ["而且我希望不再存在抽延的人。",
"男女分班有什膜好处?",
"由我开始作起。"]
inference = Inference(config.model_dir, config.vocab_path)
for i in sents:
r = inference.predict(i)
print(i, r)
|
cogdl/layers/sgc_layer.py | cenyk1230/cogdl | 1,072 | 11165639 | import torch.nn as nn
from cogdl.utils import spmm
class SGCLayer(nn.Module):
def __init__(self, in_features, out_features, order=3):
super(SGCLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.order = order
self.W = nn.Linear(in_features, out_features)
def forward(self, graph, x):
output = self.W(x)
for _ in range(self.order):
output = spmm(graph, output)
return output
|
src/test/pythonFiles/autocomp/suppress.py | ChaseKnowlden/vscode-jupyter | 615 | 11165703 | <filename>src/test/pythonFiles/autocomp/suppress.py
"string" #comment
"""
content
"""
#comment
'un#closed
|
preprocess_matches.py | b01901143/DeepMatchVO | 211 | 11165722 | #!/usr/bin/python
# <NAME>, HKUST, 2019.
# Copyright reserved.
# This file is an example to parse the feature and matching file,
# in accord with our internal format.
from __future__ import print_function
import os
import sys
import glob
import numpy as np
import math
from struct import unpack
from PIL import Image, ImageDraw
# REPLACE these paths with yours
sift_list_path = '/home/tianwei/Data/kitti/odometry/dataset/odometry/sequences/00/sift_list.txt'
match_folder = '/home/tianwei/Data/kitti/odometry/dataset/odometry/sequences/00/match'
def read_feature_repo(file_path):
"""Read feature file (*.sift)."""
with open(file_path, 'rb') as fin:
data = fin.read()
head_length = 20
head = data[0:head_length]
feature_name, _, num_features, loc_dim, des_dim = unpack('5i', head)
keypts_length = loc_dim * num_features * 4
if feature_name == ord('S') + (ord('I') << 8) + (ord('F') << 16) + (ord('T') << 24):
print(Notify.INFO, 'Reading SIFT file',
file_path, '#', num_features, Notify.ENDC)
desc_length = des_dim * num_features
desc_type = 'B'
elif feature_name == 21384864: # L2Net
print(Notify.INFO, 'Reading L2NET file',
file_path, '#', num_features, Notify.ENDC)
else:
print(Notify.FAIL, 'Unknown feature type.', Notify.ENDC)
desc_length = des_dim * num_features * 4
desc_type = 'f'
keypts_data = data[head_length: head_length + keypts_length]
keypts = np.array(unpack('f' * loc_dim * num_features, keypts_data))
keypts = np.reshape(keypts, (num_features, loc_dim))
desc_data = data[head_length +
keypts_length: head_length + keypts_length + desc_length]
desc = np.array(unpack(
desc_type * des_dim * num_features, desc_data))
desc = np.reshape(desc, (num_features, des_dim))
return keypts, desc
def read_match_repo(mat_file):
"""Read .mat file and read matches
Arguments:
mat_file {str} -- .mat file
Returns:
A list of tuples with each of format (second_sift_name (without .sift suffix),
match_num (putative, hinlier, finlier), homograph matrix, fundamental matrix,
match pairs (list of (feat1, feat2, flag)))
"""
match_ret = []
with open(mat_file, 'rb') as fin:
data = fin.read()
if len(data) == 0:
return match_ret
file_end = len(data)
end = 0
while True:
# read filename length
length_bytes = 4
length = data[end:end+length_bytes]
length = unpack('i', length)[0]
end += length_bytes
# read filename
filename_bytes = length
filename = data[end:end+filename_bytes]
filename = unpack('c' * length, filename)
sift_name2 = os.path.splitext(''.join(filename))[0]
end += filename_bytes
# read match number (putative, hinlier, finlier)
match_num_bytes = 4 * 3
match_num = data[end:end+match_num_bytes]
match_num = unpack('3i', match_num)
end += match_num_bytes
# read homograph (3x3) and fundamental matrix (3x3)
mat_bytes = 8 * 18
mat = data[end:end+mat_bytes]
mat = unpack('18d', mat)
hmat = mat[:9]
fmat = mat[9:]
hmat = np.matrix([hmat[:3],hmat[3:6],hmat[6:9]], dtype=np.float32)
fmat = np.matrix([fmat[:3],fmat[3:6],fmat[6:9]], dtype=np.float32)
end += mat_bytes
# read actual match (sift feature index pairs)
struct_bytes = 12 * match_num[0]
struct = data[end:end+struct_bytes]
struct = unpack(match_num[0] * '3i', struct)
struct = np.reshape(struct, (-1, 3))
end += struct_bytes
match_ret.append((sift_name2, match_num, hmat, fmat, struct))
if end == file_end:
break
return match_ret
def get_inlier_image_coords(sift_keys1, sift_keys2, feature_matches, type='f'):
"""Get inlier matches in image coordinates.
Arguments:
sift_keys1 {list of keys (x, y, color, scale, orientation)} -- first sift keys
sift_keys2 {list of keys} -- second sift keys
feature_matches {(first, second, flag)} -- sift key index pairs and flags
Keyword Arguments:
type {str} -- inlier type ('f' for fudamental matrix and 'h' for homography) (default: {'f'})
Returns:
list -- list of (x1, y1, x2, y2)
"""
image_matches = []
if type == 'f':
inlier_type = 2
elif type == 'h':
inlier_type = 1
else:
print('Unknown inlier type, should be "f" or "h"')
exit(-1)
for i in range(feature_matches.shape[0]):
if (feature_matches[i, 2] == inlier_type or feature_matches[i, 2] == 3):
index1 = feature_matches[i, 0]
index2 = feature_matches[i, 1]
image_matches.append([sift_keys1[index1][0], sift_keys1[index1]
[1], sift_keys2[index2][0], sift_keys2[index2][1]])
return np.array(image_matches, dtype=np.float32)
def compute_fmat_error(f, image_matches, homogeneous=False):
points1 = image_matches[:, :2]
points2 = image_matches[:, 2:4]
assert points1.shape == points2.shape
if not homogeneous:
ones = np.ones(shape=[points1.shape[0],1], dtype=points1.dtype)
points1 = np.concatenate((points1, ones), axis=1)
points2 = np.concatenate((points2, ones), axis=1)
epi_lines = np.matmul(f, points1.transpose())
dist_p2l = np.abs(np.sum(np.multiply(epi_lines.transpose(), points2), axis=1))
dist_div = np.sqrt(np.multiply(
epi_lines[0, :], epi_lines[0, :]) + np.multiply(epi_lines[1, :], epi_lines[1, :])) + 1e-6
dist_p2l = np.divide(dist_p2l, dist_div.transpose())
ave_p2l_error = np.mean(dist_p2l)
return ave_p2l_error
if __name__ == '__main__':
sift_list = []
with open(sift_list_path) as f:
lines = f.readlines()
for line in lines:
sift_list.append(line.strip())
match_files = glob.glob(os.path.join(match_folder, '*.mat'))
sift_list.sort()
match_files.sort()
# read all sift at once
sift_file_map = {}
count = 0
for sift_file in sift_list:
sift_name = os.path.splitext(os.path.split(sift_file)[1])[0]
# keypoint: (x, y, color, scale, orientation)
keypts, _ = read_feature_repo(sift_file)
sift_file_map[sift_name] = (count, keypts)
count = count+1
print("Read all sift files")
for one_mat_file in match_files:
print("Read", one_mat_file)
match_ret = read_match_repo(one_mat_file)
sift_name1 = os.path.splitext(os.path.split(one_mat_file)[1])[0]
for i in range(len(match_ret)):
sift_name2 = match_ret[i][0]
match_num = match_ret[i][1]
hmat = match_ret[i][2]
fmat = match_ret[i][3]
match_pairs = match_ret[i][4]
image_coords = get_inlier_image_coords(
sift_file_map[sift_name1][1], sift_file_map[sift_name2][1], match_pairs, 'f')
assert len(image_coords) == match_num[2]
ave_error = compute_fmat_error(fmat, image_coords, homogeneous=False)
|
route/give_delete_admin_group.py | k0000k/openNAMU | 126 | 11165732 | from .tool.func import *
def give_delete_admin_group_2(conn, name):
curs = conn.cursor()
if admin_check() != 1:
return re_error('/error/3')
if flask.request.method == 'POST':
admin_check(None, 'alist del ' + name)
curs.execute(db_change("delete from alist where name = ?"), [name])
curs.execute(db_change("update user_set set data = 'user' where name = 'acl' and data = ?"), [name])
conn.commit()
return redirect('/admin_group')
else:
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang("delete_admin_group"), wiki_set(), wiki_custom(), wiki_css(['(' + name + ')', 0])],
data = '''
<form method=post>
<button type=submit>''' + load_lang('start') + '''</button>
</form>
''',
menu = [['admin_group', load_lang('return')]]
))
|
infoxlm/fairseq/fairseq/iterative_refinement_generator.py | Maria-philna/unilm | 5,129 | 11165755 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
import torch
from fairseq import utils
DecoderOut = namedtuple('IterativeRefinementDecoderOut', [
'output_tokens',
'output_scores',
'attn',
'step',
'max_step',
'history'
])
class IterativeRefinementGenerator(object):
def __init__(
self,
tgt_dict,
models=None,
eos_penalty=0.0,
max_iter=10,
max_ratio=2,
decoding_format=None,
retain_dropout=False,
adaptive=True,
retain_history=False,
):
"""
Generates translations based on iterative refinement.
Args:
tgt_dict: target dictionary
eos_penalty: if > 0.0, it penalized early-stopping in decoding
max_iter: maximum number of refinement iterations
max_ratio: generate sequences of maximum length ax, where x is the source length
decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'}
retain_dropout: retaining dropout in the inference
adaptive: decoding with early stop
"""
self.bos = tgt_dict.bos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.eos_penalty = eos_penalty
self.max_iter = max_iter
self.max_ratio = max_ratio
self.decoding_format = decoding_format
self.retain_dropout = retain_dropout
self.retain_history = retain_history
self.adaptive = adaptive
self.models = models
def generate_batched_itr(
self,
data_itr,
maxlen_a=None,
maxlen_b=None,
cuda=False,
timer=None,
prefix_size=0,
):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
timer: StopwatchMeter for timing generations.
"""
for sample in data_itr:
if "net_input" not in sample:
continue
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(
self.models,
sample,
prefix_tokens=sample["target"][:, :prefix_size]
if prefix_size > 0
else None,
)
if timer is not None:
timer.stop(sample["ntokens"])
for i, id in enumerate(sample["id"]):
# remove padding
src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad)
ref = utils.strip_pad(sample["target"][i, :], self.pad)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample, prefix_tokens=None):
from fairseq.models.levenshtein_transformer import LevenshteinTransformerModel
from fairseq.models.nonautoregressive_ensembles import EnsembleLevT
if len(models) == 1:
# Keep this for other NAT models for which we have yet to implement ensemble wrappers. Later delete this.
model = models[0]
elif isinstance(models[0], LevenshteinTransformerModel):
model = EnsembleLevT(models)
else:
raise NotImplementedError
if not self.retain_dropout:
model.eval()
# TODO: better encoder inputs?
src_tokens = sample["net_input"]["src_tokens"]
src_lengths = sample["net_input"]["src_lengths"]
bsz, src_len = src_tokens.size()
sent_idxs = torch.arange(bsz)
# encoding
encoder_out = model.forward_encoder([src_tokens, src_lengths])
# initialize buffers (very model specific, with length prediction or not)
prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens)
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.retain_history:
prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens])
finalized = [[] for _ in range(bsz)]
def is_a_loop(x, y, s, a):
b, l_x, l_y = x.size(0), x.size(1), y.size(1)
if l_x > l_y:
y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1)
s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1)
if a is not None:
a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1)
elif l_x < l_y:
x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1)
return (x == y).all(1), y, s, a
def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn):
cutoff = prev_out_token.ne(self.pad)
tokens = prev_out_token[cutoff]
if prev_out_score is None:
scores, score = None, None
else:
scores = prev_out_score[cutoff]
score = scores.mean()
if prev_out_attn is None:
hypo_attn, alignment = None, None
else:
hypo_attn = prev_out_attn[cutoff]
alignment = hypo_attn.max(dim=1)[1]
return {
"steps": step,
"tokens": tokens,
"positional_scores": scores,
"score": score,
"hypo_attn": hypo_attn,
"alignment": alignment,
}
for step in range(self.max_iter + 1):
decoder_options = {
"eos_penalty": self.eos_penalty,
"max_ratio": self.max_ratio,
"decoding_format": self.decoding_format,
}
prev_decoder_out = prev_decoder_out._replace(
step=step,
max_step=self.max_iter + 1,
)
decoder_out = model.forward_decoder(
prev_decoder_out, encoder_out, **decoder_options
)
if self.adaptive:
# terminate if there is a loop
terminated, out_tokens, out_scores, out_attn = is_a_loop(
prev_output_tokens, decoder_out.output_tokens, decoder_out.output_scores, decoder_out.attn
)
decoder_out = decoder_out._replace(
output_tokens=out_tokens,
output_scores=out_scores,
attn=out_attn,
)
else:
terminated = decoder_out.output_tokens.new_zeros(decoder_out.output_tokens.size(0)).bool()
if step == self.max_iter: # reach last iteration, terminate
terminated.fill_(1)
# collect finalized sentences
finalized_idxs = sent_idxs[terminated]
finalized_tokens = decoder_out.output_tokens[terminated]
finalized_scores = decoder_out.output_scores[terminated]
finalized_attn = (
None if decoder_out.attn is None else decoder_out.attn[terminated]
)
if self.retain_history:
finalized_history_tokens = [h[terminated] for h in decoder_out.history]
for i in range(finalized_idxs.size(0)):
finalized[finalized_idxs[i]] = [
finalized_hypos(
step,
finalized_tokens[i],
finalized_scores[i],
None if finalized_attn is None else finalized_attn[i],
)
]
if self.retain_history:
finalized[finalized_idxs[i]][0]['history'] = []
for j in range(len(finalized_history_tokens)):
finalized[finalized_idxs[i]][0]['history'].append(
finalized_hypos(
step,
finalized_history_tokens[j][i],
None, None
)
)
# check if all terminated
if terminated.sum() == terminated.size(0):
break
# for next step
not_terminated = ~terminated
prev_decoder_out = decoder_out._replace(
output_tokens=decoder_out.output_tokens[not_terminated],
output_scores=decoder_out.output_scores[not_terminated],
attn=decoder_out.attn[not_terminated] if decoder_out.attn is not None else None,
history=[h[not_terminated] for h in decoder_out.history] if decoder_out.history is not None else None
)
encoder_out = model.encoder.reorder_encoder_out(encoder_out, not_terminated.nonzero().squeeze())
sent_idxs = sent_idxs[not_terminated]
prev_output_tokens = prev_decoder_out.output_tokens.clone()
return finalized
|
oss_test/fileio/gen-classpath.py | venkattgg/venkey | 493 | 11165756 | '''
Copyright (C) 2016 Turi
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from graphlab.connect.server import _get_hadoop_class_path
if __name__ == '__main__':
classpath = _get_hadoop_class_path()
with open('classpath.sh', 'w') as f:
f.write('export CLASSPATH=%s\n' % classpath)
|
demo/text_spotting/text_perceptron_spot/configs/__base__.py | hikopensource/DAVAR-Lab-OCR | 387 | 11165806 | <gh_stars>100-1000
# """
# #########################################################################
# # Copyright (c) <NAME> @ Hikvision Research Institute. All rights reserved.
# # Filename : __base__.py
# # Abstract : Base model settings for text perceptron based text spotter.
# # Current Version: 1.0.0
# # Date : 2021-09-15
# #########################################################################
# """
character = '../../datalist/character_list.txt'
batch_max_length = 32
type='SPOTTER'
model = dict(
type='TextPerceptronSpot',
# Pre-trained model, can be downloaded in the model zoo of mmdetection
pretrained=None,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
style='pytorch',),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
add_extra_convs="on_input",
num_outs=4),
mask_head=dict(
type='TPHead',
in_channels=256,
conv_out_channels=256,
conv_cfg=None,
norm_cfg=None,
# All of the segmentation losses, including center text/ head/ tail/ top&bottom boundary
loss_seg=dict(type='DiceLoss', loss_weight=1.0),
# Corner regression in head region
loss_reg_head=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.1, reduction='sum'),
# Corner regression in tail region
loss_reg_tail=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.1, reduction='sum'),
# boundary offset regression in center text region
loss_reg_bond=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.01, reduction='sum'),),
# rcg
rcg_roi_extractor=dict(
type='TPSRoIExtractor',
in_channels=256,
out_channels=256,
point_num=14,
output_size=(8, 32),
featmap_strides=[4],),
rcg_transformation=None,
rcg_backbone=dict(
type='LightCRNN',
in_channels=256,
out_channels=256
),
rcg_neck=None,
rcg_sequence_module=dict(
type='CascadeRNN',
rnn_modules=[
dict(
type='BidirectionalLSTM',
input_size=256,
hidden_size=256,
output_size=256,
with_linear=True,
bidirectional=True,),
dict(
type='BidirectionalLSTM',
input_size=256,
hidden_size=256,
output_size=256,
with_linear=True,
bidirectional=True,), ]),
rcg_sequence_head=dict(
type='AttentionHead',
input_size=256,
hidden_size=256,
batch_max_length=batch_max_length,
converter=dict(
type='AttnLabelConverter',
character=character,
use_cha_eos=True,),
loss_att=dict(
type='StandardCrossEntropyLoss',
ignore_index=0,
reduction='mean',
loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
# rcg
keep_dim=False),
test_cfg=dict(
# rcg
keep_dim=False,
batch_max_length=batch_max_length,
postprocess=dict(
type='TPPointsGeneration',
# Re-implenmented in C++ (You can implement it in CUDA for further speed up), comment to use default one
# lib_name='tp_points_generate.so',
# lib_dir='/path/to/davarocr/davar_det/core/post_processing/lib/'),
# Parameters for points generating
filter_ratio=0.6,
thres_text=0.35,
thres_head=0.45,
thres_bond=0.35,
point_num=14
)),
)
# training and testing settings
train_cfg = dict()
test_cfg = dict()
# Training dataset load type
dataset_type = 'DavarMultiDataset'
# File prefix path of the traning dataset
img_prefixes = [
'/path/to/Image/'
]
# Dataset Name
ann_files = [
'/path/to/datalist/train_datalist.json'
]
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='DavarLoadImageFromFile'),
dict(type='DavarLoadAnnotations',
with_poly_bbox=True, # bouding poly
with_care=True, # Ignore or not
with_text=True, # Transcription
text_profile=dict(text_max_length=batch_max_length, sensitive='same', filtered=False)
),
dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DavarRandomCrop', instance_key='gt_poly_bboxes'),
dict(type='RandomRotate', angles=[-15, 15], borderValue=(0, 0, 0)),
dict(type='DavarResize', img_scale=[(736, 736)], multiscale_mode='value', keep_ratio=True),
dict(type='Pad', size_divisor=32),
# Ground truth generation
dict(type='TPDataGeneration',
# Comment to use default setting
# lib_name='tp_data.so',
# lib_dir='/path/to/davarocr/davar_det/datasets/pipelines/lib/'),
shrink_head_ratio=0.25,
shrink_bond_ratio=0.09,
ignore_ratio=0.6),
dict(type='SegFormatBundle'),
dict(type='DavarCollect', keys=['img', 'gt_texts', 'gt_masks', 'gt_poly_bboxes']),
]
test_pipeline = [
dict(type='DavarLoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1350, 950), # Testing scale for Total-Text
flip=False,
transforms=[
dict(type='DavarResize', keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='DavarCollect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=0,
sampler=dict(
type='DistBatchBalancedSampler', # BatchBalancedSampler and DistBatchBalancedSampler
mode=0,
# model 0: Balance in batch, calculate the epoch according to the first iterative data set
# model 1: Balance in batch, calculate the epoch according to the last iterative data set
# model 2: Balance in batch, record unused data
# model -1: Each dataset is directly connected and shuffled
),
train=dict(
type=dataset_type,
batch_ratios=['1.0'],
dataset=dict(
type='TextSpotDataset',
ann_file=ann_files,
img_prefix=img_prefixes,
test_mode=False,
pipeline=train_pipeline)
),
val=dict(
type='TextSpotDataset',
ann_file='/path/to/datalist/test_datalist.json',
img_prefix='/path/to/Image/',
pipeline=test_pipeline),
test=dict(
type='TextSpotDataset',
ann_file='/path/to/datalist/test_datalist.json',
img_prefix='/path/to/Image/',
pipeline=test_pipeline))
# optimizer
find_unused_parameters = True
optimizer = dict(type='AdamW', lr=1e-3, weight_decay=0)
optimizer_config = dict(grad_clip=dict(max_norm=5, norm_type=2))
lr_config = dict(
policy='step',
step=[2, 3]
)
runner = dict(type='EpochBasedRunner', max_epochs=4)
checkpoint_config = dict(type="DavarCheckpointHook", interval=1, filename_tmpl='checkpoint/checkpoint_name_epoch_{}.pth')
# yapf:disable
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
])
# yapf:enable
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = '/path/to/workspace/log/'
load_from = '/path/to/Model_Zoo/tp_r50_tt-5b348520.pth'
resume_from = None
workflow = [('train', 1)]
# # Online evaluation
evaluation = dict(
model_type=type,
type='DavarDistEvalHook',
interval=1,
eval_func_params=dict(
# SPECIAL_CHARACTERS='[]+-#$()@=_!?,:;/.%&'\">*|<`{~}^\ ',
IOU_CONSTRAINT=0.5,
AREA_PRECISION_CONSTRAINT=0.5,
WORD_SPOTTING=False
),
by_epoch=True,
eval_mode='general',
# eval_mode='lightweight',
save_best='hmean',
rule='greater',
)
|
eth/vm/forks/tangerine_whistle/state.py | ggs134/py-evm | 1,641 | 11165810 | <gh_stars>1000+
from eth.vm.forks.homestead.state import HomesteadState
from .computation import TangerineWhistleComputation
class TangerineWhistleState(HomesteadState):
computation_class = TangerineWhistleComputation
|
tencentcloud/mps/v20190612/errorcodes.py | PlasticMem/tencentcloud-sdk-python | 465 | 11165811 | <reponame>PlasticMem/tencentcloud-sdk-python
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 操作失败:bucket 已经设置通知。
FAILEDOPERATION_BUCKETNOTIFYALREADYEXIST = 'FailedOperation.BucketNotifyAlreadyExist'
# 操作失败:COS 已经停服。
FAILEDOPERATION_COSSTATUSINAVLID = 'FailedOperation.CosStatusInavlid'
# 操作失败:非法 mps 用户。
FAILEDOPERATION_INVALIDMPSUSER = 'FailedOperation.InvalidMpsUser'
# 操作失败:无效用户。
FAILEDOPERATION_INVALIDUSER = 'FailedOperation.InvalidUser'
# 内部错误。
INTERNALERROR = 'InternalError'
# 内部错误:生成模板 ID 失败。
INTERNALERROR_GENDEFINITION = 'InternalError.GenDefinition'
# 内部错误:上传水印图片失败。
INTERNALERROR_UPLOADWATERMARKERROR = 'InternalError.UploadWatermarkError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# 参数错误:音频流码率。
INVALIDPARAMETERVALUE_AUDIOBITRATE = 'InvalidParameterValue.AudioBitrate'
# 参数值错误:AudioChannel。
INVALIDPARAMETERVALUE_AUDIOCHANNEL = 'InvalidParameterValue.AudioChannel'
# 参数错误:音频流编码格式。
INVALIDPARAMETERVALUE_AUDIOCODEC = 'InvalidParameterValue.AudioCodec'
# 参数错误:音频流采样率。
INVALIDPARAMETERVALUE_AUDIOSAMPLERATE = 'InvalidParameterValue.AudioSampleRate'
# 无效的音频/视频码率。
INVALIDPARAMETERVALUE_BITRATE = 'InvalidParameterValue.Bitrate'
# 参数值错误:BlockConfidence 参数取值非法。
INVALIDPARAMETERVALUE_BLOCKCONFIDENCE = 'InvalidParameterValue.BlockConfidence'
# 参数值错误:智能分类控制字段参数错误。
INVALIDPARAMETERVALUE_CLASSIFCATIONCONFIGURE = 'InvalidParameterValue.ClassifcationConfigure'
# 无效的音频/视频编编码格式。
INVALIDPARAMETERVALUE_CODEC = 'InvalidParameterValue.Codec'
# 参数值错误:ColumnCount。
INVALIDPARAMETERVALUE_COLUMNCOUNT = 'InvalidParameterValue.ColumnCount'
# 参数错误:对该模板的描述。
INVALIDPARAMETERVALUE_COMMENT = 'InvalidParameterValue.Comment'
# 参数错误:封装格式。
INVALIDPARAMETERVALUE_CONTAINER = 'InvalidParameterValue.Container'
# 参数值错误:ContainerType。
INVALIDPARAMETERVALUE_CONTAINERTYPE = 'InvalidParameterValue.ContainerType'
# 参数值错误:CoordinateOrigin。
INVALIDPARAMETERVALUE_COORDINATEORIGIN = 'InvalidParameterValue.CoordinateOrigin'
# 参数值错误:智能封面控制字段参数错误。
INVALIDPARAMETERVALUE_COVERCONFIGURE = 'InvalidParameterValue.CoverConfigure'
# 参数值错误:人脸默认库过滤标签非法。
INVALIDPARAMETERVALUE_DEFAULTLIBRARYLABELSET = 'InvalidParameterValue.DefaultLibraryLabelSet'
# 参数错误:Definition。
INVALIDPARAMETERVALUE_DEFINITION = 'InvalidParameterValue.Definition'
# 参数错误:Definitions。
INVALIDPARAMETERVALUE_DEFINITIONS = 'InvalidParameterValue.Definitions'
# 参数值错误:不允许删除默认模板。
INVALIDPARAMETERVALUE_DELETEDEFAULTTEMPLATE = 'InvalidParameterValue.DeleteDefaultTemplate'
# 无效的禁止码率低转高开关值。
INVALIDPARAMETERVALUE_DISABLEHIGHERVIDEOBITRATE = 'InvalidParameterValue.DisableHigherVideoBitrate'
# 无效的禁止分辨率低转高开关值。
INVALIDPARAMETERVALUE_DISABLEHIGHERVIDEORESOLUTION = 'InvalidParameterValue.DisableHigherVideoResolution'
# 参数值错误:人脸重复。
INVALIDPARAMETERVALUE_FACEDUPLICATE = 'InvalidParameterValue.FaceDuplicate'
# 参数值错误:人脸库参数非法。
INVALIDPARAMETERVALUE_FACELIBRARY = 'InvalidParameterValue.FaceLibrary'
# 参数值错误:人脸分数参数取值非法。
INVALIDPARAMETERVALUE_FACESCORE = 'InvalidParameterValue.FaceScore'
# 参数错误:填充方式错误。
INVALIDPARAMETERVALUE_FILLTYPE = 'InvalidParameterValue.FillType'
# 参数值错误:Format。
INVALIDPARAMETERVALUE_FORMAT = 'InvalidParameterValue.Format'
# 参数值错误:Format 为 webp 时,Width、Height 均为空。
INVALIDPARAMETERVALUE_FORMATWEBPLACKWIDTHANDHEIGHT = 'InvalidParameterValue.FormatWebpLackWidthAndHeight'
# 参数值错误:Format 为 webp 时,不允许 Width、Height 都为 0。
INVALIDPARAMETERVALUE_FORMATWEBPWIDTHANDHEIGHTBOTHZERO = 'InvalidParameterValue.FormatWebpWidthAndHeightBothZero'
# 参数错误:视频帧率。
INVALIDPARAMETERVALUE_FPS = 'InvalidParameterValue.Fps'
# 参数值错误:智能按帧标签控制字段参数错误。
INVALIDPARAMETERVALUE_FRAMETAGCONFIGURE = 'InvalidParameterValue.FrameTagConfigure'
# 参数值错误:FunctionArg。
INVALIDPARAMETERVALUE_FUNCTIONARG = 'InvalidParameterValue.FunctionArg'
# 参数值错误:FunctionName。
INVALIDPARAMETERVALUE_FUNCTIONNAME = 'InvalidParameterValue.FunctionName'
# 无效的Gop值。
INVALIDPARAMETERVALUE_GOP = 'InvalidParameterValue.Gop'
# 参数错误:高度。
INVALIDPARAMETERVALUE_HEIGHT = 'InvalidParameterValue.Height'
# ImageContent参数值无效。
INVALIDPARAMETERVALUE_IMAGECONTENT = 'InvalidParameterValue.ImageContent'
# 参数错误:图片水印模板。
INVALIDPARAMETERVALUE_IMAGETEMPLATE = 'InvalidParameterValue.ImageTemplate'
# 解析内容 Content 的值不合法。
INVALIDPARAMETERVALUE_INVALIDCONTENT = 'InvalidParameterValue.InvalidContent'
# 无效的操作类型。
INVALIDPARAMETERVALUE_INVALIDOPERATIONTYPE = 'InvalidParameterValue.InvalidOperationType'
# 参数值错误:LabelSet 参数取值非法。
INVALIDPARAMETERVALUE_LABELSET = 'InvalidParameterValue.LabelSet'
# 参数错误:Limit。
INVALIDPARAMETERVALUE_LIMIT = 'InvalidParameterValue.Limit'
# 参数值错误:不允许修改默认模板。
INVALIDPARAMETERVALUE_MODIFYDEFAULTTEMPLATE = 'InvalidParameterValue.ModifyDefaultTemplate'
# 参数值错误:Name 超过长度限制。
INVALIDPARAMETERVALUE_NAME = 'InvalidParameterValue.Name'
# 不支持状态不为处理中的任务。
INVALIDPARAMETERVALUE_NOTPROCESSINGTASK = 'InvalidParameterValue.NotProcessingTask'
# 参数值错误:物体库参数非法。
INVALIDPARAMETERVALUE_OBJECTLIBRARY = 'InvalidParameterValue.ObjectLibrary'
# 参数值错误:人脸图片格式错误。
INVALIDPARAMETERVALUE_PICFORMATERROR = 'InvalidParameterValue.PicFormatError'
# 参数值错误:Quality。
INVALIDPARAMETERVALUE_QUALITY = 'InvalidParameterValue.Quality'
# 参数值错误:RemoveAudio。
INVALIDPARAMETERVALUE_REMOVEAUDIO = 'InvalidParameterValue.RemoveAudio'
# 参数值错误:RemoveVideo。
INVALIDPARAMETERVALUE_REMOVEVIDEO = 'InvalidParameterValue.RemoveVideo'
# 参数错误:RepeatType 无效。
INVALIDPARAMETERVALUE_REPEATTYPE = 'InvalidParameterValue.RepeatType'
# 参数错误:分辨率错误。
INVALIDPARAMETERVALUE_RESOLUTION = 'InvalidParameterValue.Resolution'
# 无效的ResolutionAdaptive。
INVALIDPARAMETERVALUE_RESOLUTIONADAPTIVE = 'InvalidParameterValue.ResolutionAdaptive'
# 参数值错误:ReviewConfidence 参数取值非法。
INVALIDPARAMETERVALUE_REVIEWCONFIDENCE = 'InvalidParameterValue.ReviewConfidence'
# 参数值错误:RowCount。
INVALIDPARAMETERVALUE_ROWCOUNT = 'InvalidParameterValue.RowCount'
# 参数值错误:SampleInterval。
INVALIDPARAMETERVALUE_SAMPLEINTERVAL = 'InvalidParameterValue.SampleInterval'
# 无效的音频采样率。
INVALIDPARAMETERVALUE_SAMPLERATE = 'InvalidParameterValue.SampleRate'
# 参数值错误:SampleType。
INVALIDPARAMETERVALUE_SAMPLETYPE = 'InvalidParameterValue.SampleType'
# SessionContext 过长。
INVALIDPARAMETERVALUE_SESSIONCONTEXTTOOLONG = 'InvalidParameterValue.SessionContextTooLong'
# 去重识别码重复,请求被去重。
INVALIDPARAMETERVALUE_SESSIONID = 'InvalidParameterValue.SessionId'
# SessionId 过长。
INVALIDPARAMETERVALUE_SESSIONIDTOOLONG = 'InvalidParameterValue.SessionIdTooLong'
# 参数错误:音频通道方式。
INVALIDPARAMETERVALUE_SOUNDSYSTEM = 'InvalidParameterValue.SoundSystem'
# 源文件错误。
INVALIDPARAMETERVALUE_SRCFILE = 'InvalidParameterValue.SrcFile'
# 参数值错误:SubtitleFormat 参数非法。
INVALIDPARAMETERVALUE_SUBTITLEFORMAT = 'InvalidParameterValue.SubtitleFormat'
# 参数值错误:SVG 为空。
INVALIDPARAMETERVALUE_SVGTEMPLATE = 'InvalidParameterValue.SvgTemplate'
# 参数值错误:SVG 高度。
INVALIDPARAMETERVALUE_SVGTEMPLATEHEIGHT = 'InvalidParameterValue.SvgTemplateHeight'
# 参数值错误:SVG 宽度。
INVALIDPARAMETERVALUE_SVGTEMPLATEWIDTH = 'InvalidParameterValue.SvgTemplateWidth'
# 参数值错误:Switch 参数取值非法。
INVALIDPARAMETERVALUE_SWITCH = 'InvalidParameterValue.Switch'
# 参数值错误:TEHD Type 无效。
INVALIDPARAMETERVALUE_TEHDTYPE = 'InvalidParameterValue.TEHDType'
# 参数值错误:智能标签控制字段参数错误。
INVALIDPARAMETERVALUE_TAGCONFIGURE = 'InvalidParameterValue.TagConfigure'
# 任务 ID 不存在。
INVALIDPARAMETERVALUE_TASKID = 'InvalidParameterValue.TaskId'
# 参数错误:文字透明度。
INVALIDPARAMETERVALUE_TEXTALPHA = 'InvalidParameterValue.TextAlpha'
# 参数错误:文字模板。
INVALIDPARAMETERVALUE_TEXTTEMPLATE = 'InvalidParameterValue.TextTemplate'
# 参数错误:Type 参数值错误。
INVALIDPARAMETERVALUE_TYPE = 'InvalidParameterValue.Type'
# 参数值错误:人脸用户自定义库过滤标签非法。
INVALIDPARAMETERVALUE_USERDEFINELIBRARYLABELSET = 'InvalidParameterValue.UserDefineLibraryLabelSet'
# 参数错误:视频流码率。
INVALIDPARAMETERVALUE_VIDEOBITRATE = 'InvalidParameterValue.VideoBitrate'
# 参数错误:视频流的编码格式。
INVALIDPARAMETERVALUE_VIDEOCODEC = 'InvalidParameterValue.VideoCodec'
# 参数错误:宽度。
INVALIDPARAMETERVALUE_WIDTH = 'InvalidParameterValue.Width'
# 水印原点距离视频图像坐标原点的水平位置。支持 %、px 两种格式。
INVALIDPARAMETERVALUE_XPOS = 'InvalidParameterValue.XPos'
# 水印原点距离视频图像坐标原点的垂直位置。支持 %、px 两种格式。
INVALIDPARAMETERVALUE_YPOS = 'InvalidParameterValue.YPos'
# 超过限制值:模板数超限。
LIMITEXCEEDED_TOOMUCHTEMPLATE = 'LimitExceeded.TooMuchTemplate'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 资源不存在:Cos bucket 名称无效。
RESOURCENOTFOUND_COSBUCKETNAMEINVALID = 'ResourceNotFound.CosBucketNameInvalid'
# 资源不存在:Cos bucket 不存在。
RESOURCENOTFOUND_COSBUCKETNOTEXIST = 'ResourceNotFound.CosBucketNotExist'
# 资源不存在:人物。
RESOURCENOTFOUND_PERSON = 'ResourceNotFound.Person'
# 资源不存在:模板不存在。
RESOURCENOTFOUND_TEMPLATENOTEXIST = 'ResourceNotFound.TemplateNotExist'
# 资源不存在:关键词。
RESOURCENOTFOUND_WORD = 'ResourceNotFound.Word'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
|
lib/manalib.py | bcnoexceptions/mtgencode | 159 | 11165842 | <filename>lib/manalib.py<gh_stars>100-1000
# representation for mana costs and text with embedded mana costs
# data aggregating classes
import re
import random
import utils
class Manacost:
'''mana cost representation with data'''
# hardcoded to be dependent on the symbol structure... ah well
def get_colors(self):
colors = ''
for sym in self.symbols:
if self.symbols[sym] > 0:
symcolors = re.sub(r'2|P|S|X|C', '', sym)
for symcolor in symcolors:
if symcolor not in colors:
colors += symcolor
# sort so the order is always consistent
return ''.join(sorted(colors))
def check_colors(self, symbolstring):
for sym in symbolstring:
if not sym in self.colors:
return False
return True
def __init__(self, src, fmt = ''):
# source fields, exactly one will be set
self.raw = None
self.json = None
# flags
self.parsed = True
self.valid = True
self.none = False
# default values for all fields
self.inner = None
self.cmc = 0
self.colorless = 0
self.sequence = []
self.symbols = {sym : 0 for sym in utils.mana_syms}
self.allsymbols = {sym : 0 for sym in utils.mana_symall}
self.colors = ''
if fmt == 'json':
self.json = src
text = utils.mana_translate(self.json.upper())
else:
self.raw = src
text = self.raw
if text == '':
self.inner = ''
self.none = True
elif not (len(text) >= 2 and text[0] == '{' and text[-1] == '}'):
self.parsed = False
self.valid = False
else:
self.inner = text[1:-1]
# structure mirrors the decoding in utils, but we pull out different data here
idx = 0
while idx < len(self.inner):
# taking this branch is an infinite loop if unary_marker is empty
if (len(utils.mana_unary_marker) > 0 and
self.inner[idx:idx+len(utils.mana_unary_marker)] == utils.mana_unary_marker):
idx += len(utils.mana_unary_marker)
self.sequence += [utils.mana_unary_marker]
elif self.inner[idx:idx+len(utils.mana_unary_counter)] == utils.mana_unary_counter:
idx += len(utils.mana_unary_counter)
self.sequence += [utils.mana_unary_counter]
self.colorless += 1
self.cmc += 1
else:
old_idx = idx
for symlen in range(utils.mana_symlen_min, utils.mana_symlen_max + 1):
encoded_sym = self.inner[idx:idx+symlen]
if encoded_sym in utils.mana_symall_decode:
idx += symlen
# leave the sequence encoded for convenience
self.sequence += [encoded_sym]
sym = utils.mana_symall_decode[encoded_sym]
self.allsymbols[sym] += 1
if sym in utils.mana_symalt:
self.symbols[utils.mana_alt(sym)] += 1
else:
self.symbols[sym] += 1
if sym == utils.mana_X:
self.cmc += 0
elif utils.mana_2 in sym:
self.cmc += 2
else:
self.cmc += 1
break
# otherwise we'll go into an infinite loop if we see a symbol we don't know
if idx == old_idx:
idx += 1
self.valid = False
self.colors = self.get_colors()
def __str__(self):
if self.none:
return '_NOCOST_'
return utils.mana_untranslate(utils.mana_open_delimiter + ''.join(self.sequence)
+ utils.mana_close_delimiter)
def format(self, for_forum = False, for_html = False):
if self.none:
return '_NOCOST_'
else:
return utils.mana_untranslate(utils.mana_open_delimiter + ''.join(self.sequence)
+ utils.mana_close_delimiter, for_forum, for_html)
def encode(self, randomize = False):
if self.none:
return ''
elif randomize:
# so this won't work very well if mana_unary_marker isn't empty
return (utils.mana_open_delimiter
+ ''.join(random.sample(self.sequence, len(self.sequence)))
+ utils.mana_close_delimiter)
else:
return utils.mana_open_delimiter + ''.join(self.sequence) + utils.mana_close_delimiter
def vectorize(self, delimit = False):
if self.none:
return ''
elif delimit:
ld = '('
rd = ')'
else:
ld = ''
rd = ''
return ' '.join(map(lambda s: ld + s + rd, sorted(self.sequence)))
class Manatext:
'''text representation with embedded mana costs'''
def __init__(self, src, fmt = ''):
# source fields
self.raw = None
self.json = None
# flags
self.valid = True
# default values for all fields
self.text = src
self.costs = []
if fmt == 'json':
self.json = src
manastrs = re.findall(utils.mana_json_regex, src)
else:
self.raw = src
manastrs = re.findall(utils.mana_regex, src)
for manastr in manastrs:
cost = Manacost(manastr, fmt)
if not cost.valid:
self.valid = False
self.costs += [cost]
self.text = self.text.replace(manastr, utils.reserved_mana_marker, 1)
if (utils.mana_open_delimiter in self.text
or utils.mana_close_delimiter in self.text
or utils.mana_json_open_delimiter in self.text
or utils.mana_json_close_delimiter in self.text):
self.valid = False
def __str__(self):
text = self.text
for cost in self.costs:
text = text.replace(utils.reserved_mana_marker, str(cost), 1)
return text
def format(self, for_forum = False, for_html = False):
text = self.text
for cost in self.costs:
text = text.replace(utils.reserved_mana_marker, cost.format(for_forum=for_forum, for_html=for_html), 1)
if for_html:
text = text.replace('\n', '<br>\n')
return text
def encode(self, randomize = False):
text = self.text
for cost in self.costs:
text = text.replace(utils.reserved_mana_marker, cost.encode(randomize = randomize), 1)
return text
def vectorize(self):
text = self.text
special_chars = [utils.reserved_mana_marker,
utils.dash_marker,
utils.bullet_marker,
utils.this_marker,
utils.counter_marker,
utils.choice_open_delimiter,
utils.choice_close_delimiter,
utils.newline,
#utils.x_marker,
utils.tap_marker,
utils.untap_marker,
utils.newline,
';', ':', '"', ',', '.']
for char in special_chars:
text = text.replace(char, ' ' + char + ' ')
text = text.replace('/', '/ /')
for cost in self.costs:
text = text.replace(utils.reserved_mana_marker, cost.vectorize(), 1)
return ' '.join(text.split())
|
components/policy/tools/make_policy_zip.py | google-ar/chromium | 777 | 11165862 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a zip archive with policy template files. The list of input files is
extracted from a grd file with grit. This is to keep the length of input
arguments below the limit on Windows.
"""
import grd_helper
import optparse
import os
import sys
import zipfile
def add_files_to_zip(zip_file, base_dir, file_list):
"""Pack a list of files into a zip archive, that is already
opened for writing.
Args:
zip_file: An object representing the zip archive.
base_dir: Base path of all the files in the real file system.
files: List of file paths to add, all relative to base_dir.
The zip entries will only contain this componenet of the path.
"""
for file_path in file_list:
zip_file.write(base_dir + file_path, file_path)
return 0
def main(argv):
"""Pack a list of files into a zip archive.
Args:
zip_path: The file name of the zip archive.
base_dir: Base path of input files.
locales: The list of locales that are used to generate the list of file
names using INPUT_FILES.
"""
parser = optparse.OptionParser()
parser.add_option("--output", dest="output")
parser.add_option("--basedir", dest="basedir")
parser.add_option("--include_google_admx", action="store_true",
dest="include_google_admx", default=False)
parser.add_option("--extra_input", action="append", dest="extra_input",
default=[])
grd_helper.add_options(parser)
options, args = parser.parse_args(argv[1:])
if (options.basedir[-1] != '/'):
options.basedir += '/'
file_list = options.extra_input
file_list += grd_helper.get_grd_outputs(options)
# Pick up google.admx/adml files.
if (options.include_google_admx):
google_file_list = []
for path in file_list:
directory, filename = os.path.split(path)
filename, extension = os.path.splitext(filename)
if extension == ".admx" or extension == ".adml":
google_file_list.append(\
os.path.join(options.basedir, directory, "google" + extension))
file_list.extend(google_file_list)
zip_file = zipfile.ZipFile(options.output, 'w', zipfile.ZIP_DEFLATED)
try:
return add_files_to_zip(zip_file, options.basedir, file_list)
finally:
zip_file.close()
if '__main__' == __name__:
sys.exit(main(sys.argv))
|
alipay/aop/api/domain/AlipayEbppBillkeyUpgradeModel.py | antopen/alipay-sdk-python-all | 213 | 11165867 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEbppBillkeyUpgradeModel(object):
def __init__(self):
self._bill_key = None
self._biz_type = None
self._charge_inst = None
self._new_bill_key = None
self._operation_type = None
self._sub_biz_type = None
@property
def bill_key(self):
return self._bill_key
@bill_key.setter
def bill_key(self, value):
self._bill_key = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def charge_inst(self):
return self._charge_inst
@charge_inst.setter
def charge_inst(self, value):
self._charge_inst = value
@property
def new_bill_key(self):
return self._new_bill_key
@new_bill_key.setter
def new_bill_key(self, value):
self._new_bill_key = value
@property
def operation_type(self):
return self._operation_type
@operation_type.setter
def operation_type(self, value):
self._operation_type = value
@property
def sub_biz_type(self):
return self._sub_biz_type
@sub_biz_type.setter
def sub_biz_type(self, value):
self._sub_biz_type = value
def to_alipay_dict(self):
params = dict()
if self.bill_key:
if hasattr(self.bill_key, 'to_alipay_dict'):
params['bill_key'] = self.bill_key.to_alipay_dict()
else:
params['bill_key'] = self.bill_key
if self.biz_type:
if hasattr(self.biz_type, 'to_alipay_dict'):
params['biz_type'] = self.biz_type.to_alipay_dict()
else:
params['biz_type'] = self.biz_type
if self.charge_inst:
if hasattr(self.charge_inst, 'to_alipay_dict'):
params['charge_inst'] = self.charge_inst.to_alipay_dict()
else:
params['charge_inst'] = self.charge_inst
if self.new_bill_key:
if hasattr(self.new_bill_key, 'to_alipay_dict'):
params['new_bill_key'] = self.new_bill_key.to_alipay_dict()
else:
params['new_bill_key'] = self.new_bill_key
if self.operation_type:
if hasattr(self.operation_type, 'to_alipay_dict'):
params['operation_type'] = self.operation_type.to_alipay_dict()
else:
params['operation_type'] = self.operation_type
if self.sub_biz_type:
if hasattr(self.sub_biz_type, 'to_alipay_dict'):
params['sub_biz_type'] = self.sub_biz_type.to_alipay_dict()
else:
params['sub_biz_type'] = self.sub_biz_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEbppBillkeyUpgradeModel()
if 'bill_key' in d:
o.bill_key = d['bill_key']
if 'biz_type' in d:
o.biz_type = d['biz_type']
if 'charge_inst' in d:
o.charge_inst = d['charge_inst']
if 'new_bill_key' in d:
o.new_bill_key = d['new_bill_key']
if 'operation_type' in d:
o.operation_type = d['operation_type']
if 'sub_biz_type' in d:
o.sub_biz_type = d['sub_biz_type']
return o
|
tests/test_home.py | matrixorz/firefly | 247 | 11165871 | <reponame>matrixorz/firefly<gh_stars>100-1000
from __future__ import absolute_import
# coding: utf-8 -*-
from flask import url_for
import pytest
from firefly.models.topic import Category, Post
@pytest.mark.usefixtures('client_class')
class TestHome:
def setup(self):
c = Category.objects.create(
name=u'python', description=u'描述', _slug=u'python-slug'
)
for x in range(5):
Post.objects.create(
title=u'标题test%s' % x, content=u'内容test % x', category=c
)
def test_post_list(self):
posts = Post.objects.all()
rv = self.client.get(url_for('home.index'))
data = rv.get_data().decode('utf8')
for p in posts:
assert p.title in data
def test_keyboard(self):
assert self.client.get(url_for('keyboard.keyboard')).status_code == 200
|
ppdet/utils/stats.py | leakyH/PaddleDetection | 7,782 | 11165888 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import numpy as np
__all__ = ['SmoothedValue', 'TrainingStats']
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({avg:.4f})"
self.deque = collections.deque(maxlen=window_size)
self.fmt = fmt
self.total = 0.
self.count = 0
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
@property
def median(self):
return np.median(self.deque)
@property
def avg(self):
return np.mean(self.deque)
@property
def max(self):
return np.max(self.deque)
@property
def value(self):
return self.deque[-1]
@property
def global_avg(self):
return self.total / self.count
def __str__(self):
return self.fmt.format(
median=self.median, avg=self.avg, max=self.max, value=self.value)
class TrainingStats(object):
def __init__(self, window_size, delimiter=' '):
self.meters = None
self.window_size = window_size
self.delimiter = delimiter
def update(self, stats):
if self.meters is None:
self.meters = {
k: SmoothedValue(self.window_size)
for k in stats.keys()
}
for k, v in self.meters.items():
v.update(stats[k].numpy())
def get(self, extras=None):
stats = collections.OrderedDict()
if extras:
for k, v in extras.items():
stats[k] = v
for k, v in self.meters.items():
stats[k] = format(v.median, '.6f')
return stats
def log(self, extras=None):
d = self.get(extras)
strs = []
for k, v in d.items():
strs.append("{}: {}".format(k, str(v)))
return self.delimiter.join(strs)
|
common/data_collector.py | iamlukewang/openpilot | 121 | 11165898 | <reponame>iamlukewang/openpilot
from common.travis_checker import travis
from selfdrive.swaglog import cloudlog
from common.realtime import sec_since_boot
from common.op_params import opParams
import threading
import os
op_params = opParams()
class DataCollector:
def __init__(self, file_path, keys, write_frequency=60, write_threshold=2, log_data=True):
"""
This class provides an easy way to set up your own custom data collector to gather custom data.
Parameters:
file_path (str): The path you want your custom data to be written to.
keys: (list): A string list containing the names of the values you want to collect.
Your data list needs to be in this order.
write_frequency (int/float): The rate at which to write data in seconds.
write_threshold (int): The length of the data list we need to collect before considering writing.
Example:
data_collector = DataCollector('/data/openpilot/custom_data', ['v_ego', 'a_ego', 'custom_dict'], write_frequency=120)
"""
self.log_data = log_data
self.file_path = file_path
self.keys = keys
self.write_frequency = write_frequency
self.write_threshold = write_threshold
self.data = []
self.last_write_time = sec_since_boot()
self.thread_running = False
self._initialize()
def _initialize(self): # add keys to top of data file
if not os.path.exists(self.file_path) and not travis:
with open(self.file_path, "w") as f:
f.write('{}\n'.format(self.keys))
def update(self, sample):
"""
Appends your sample to a central self.data variable that gets written to your specified file path every n seconds.
Parameters:
sample: Can be any type of data. List, dictionary, numbers, strings, etc.
Or a combination: dictionaries, booleans, and floats in a list
Continuing from the example above, we assume that the first value is your velocity, and the second
is your acceleration. IMPORTANT: If your values and keys are not in the same order, you will have trouble figuring
what data is what when you want to process it later.
Example:
data_collector.append([17, 0.5, {'a': 1}])
"""
if self.log_data:
if len(sample) != len(self.keys):
raise Exception("You need the same amount of data as you specified in your keys")
self.data.append(sample)
self._check_if_can_write()
def _reset(self, reset_type=None):
if reset_type in ['data', 'all']:
self.data = []
if reset_type in ['time', 'all']:
self.last_write_time = sec_since_boot()
def _check_if_can_write(self):
"""
You shouldn't ever need to call this. It checks if we should write, then calls a thread to do so
with a copy of the current gathered data. Then it clears the self.data variable so that new data
can be added and it won't be duplicated in the next write.
If the thread is still writing by the time of the next write, which shouldn't ever happen unless
you set a low write frequency, it will skip creating another write thread. If this occurs,
something is wrong with writing.
"""
if (sec_since_boot() - self.last_write_time) >= self.write_frequency and len(self.data) >= self.write_threshold and not travis:
if not self.thread_running:
write_thread = threading.Thread(target=self._write, args=(self.data,))
write_thread.daemon = True
write_thread.start()
# self.write(self.data) # non threaded approach
self._reset(reset_type='all')
elif self.write_frequency > 30:
cloudlog.warning('DataCollector write thread is taking a while to write data.')
def _write(self, current_data):
"""
Only write data that has been added so far in background. self.data is still being appended to in
foreground so in the next write event, new data will be written. This eliminates lag causing openpilot
critical processes to pause while a lot of data is being written.
"""
self.thread_running = True
with open(self.file_path, "a") as f:
f.write('{}\n'.format('\n'.join(map(str, current_data)))) # json takes twice as long to write
self._reset(reset_type='time')
self.thread_running = False
|
nogotofail/mitm/util/ip.py | kbfl0912/nogotofail | 1,594 | 11165939 | <reponame>kbfl0912/nogotofail
r'''
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import subprocess
import re
def get_interface_addresses():
"""Get all ip addresses assigned to interfaces.
Returns a tuple of (v4 addresses, v6 addresses)
"""
try:
output = subprocess.check_output("ifconfig")
except subprocess.CalledProcessError:
# Couldn't call ifconfig. Best guess it.
return (["127.0.0.1"], [])
# Parse out the results.
v4 = re.findall("inet (addr:)?([^ ]*)", output)
v6 = re.findall("inet6 (addr: )?([^ ]*)", output)
v4 = [e[1] for e in v4]
v6 = [e[1] for e in v6]
return v4, v6
|
learntools/computer_vision/ex4.py | roannav/learntools | 359 | 11165942 | from learntools.core import *
import tensorflow as tf
class Q1(ThoughtExperiment):
_hint = r"Stacking the second layer expanded the receptive field by one neuron on each side, giving $3+1+1=5$ for each dimension. If you expanded by one neuron again, what would you get?"
_solution = r"The third layer would have a $7 \times 7$ receptive field."
qvars = bind_exercises(globals(), [
Q1,
],
var_format='q_{n}',
)
__all__ = list(qvars)
|
django_dynamic_fixture/tests/test_ddf_copier.py | Kaniabi/django-dynamic-fixture | 190 | 11165953 | # -*- coding: utf-8 -*-
from django.test import TestCase
import pytest
from django_dynamic_fixture.models_test import *
from django_dynamic_fixture.ddf import *
from django_dynamic_fixture.fixture_algorithms.sequential_fixture import SequentialDataFixture
data_fixture = SequentialDataFixture()
class DDFTestCase(TestCase):
def setUp(self):
self.ddf = DynamicFixture(data_fixture)
class CopyTest(DDFTestCase):
def test_it_should_copy_from_model_fields(self):
instance = self.ddf.get(ModelForCopy, int_a=Copier('int_b'), int_b=3)
assert instance.int_a == 3
def test_simple_scenario(self):
instance = self.ddf.get(ModelForCopy, int_b=Copier('int_a'))
assert instance.int_b == instance.int_a
def test_order_of_attributes_must_be_superfluous(self):
instance = self.ddf.get(ModelForCopy, int_a=Copier('int_b'))
assert instance.int_a == instance.int_b
def test_it_should_deal_with_multiple_copiers(self):
instance = self.ddf.get(ModelForCopy, int_a=Copier('int_b'), int_c=Copier('int_d'))
assert instance.int_a == instance.int_b
assert instance.int_c == instance.int_d
def test_multiple_copiers_can_depend_of_one_field(self):
instance = self.ddf.get(ModelForCopy, int_a=Copier('int_c'), int_b=Copier('int_c'))
assert instance.int_a == instance.int_c
assert instance.int_b == instance.int_c
def test_it_should_deal_with_dependent_copiers(self):
instance = self.ddf.get(ModelForCopy, int_a=Copier('int_b'), int_b=Copier('int_c'))
assert instance.int_a == instance.int_b
assert instance.int_b == instance.int_c
def test_it_should_deal_with_relationships(self):
instance = self.ddf.get(ModelForCopy, int_a=Copier('e.int_e'))
assert instance.int_a == instance.e.int_e
instance = self.ddf.get(ModelForCopy, int_a=Copier('e.int_e'), e=DynamicFixture(data_fixture, int_e=5))
assert instance.int_a == 5
def test_it_should_raise_a_bad_data_error_if_value_is_invalid(self):
with pytest.raises(BadDataError):
self.ddf.get(ModelForCopy, int_a=Copier('int_b'), int_b=None)
def test_it_should_raise_a_invalid_configuration_error_if_expression_is_bugged(self):
with pytest.raises(InvalidConfigurationError):
self.ddf.get(ModelForCopy, int_a=Copier('invalid_field'))
with pytest.raises(InvalidConfigurationError):
self.ddf.get(ModelForCopy, int_a=Copier('int_b.invalid_field'))
def test_it_should_raise_a_invalid_configuration_error_if_copier_has_cyclic_dependency(self):
with pytest.raises(InvalidConfigurationError):
self.ddf.get(ModelForCopy, int_a=Copier('int_b'), int_b=Copier('int_a'))
def test_it_must_copy_generated_data_mask_too(self):
import re
instance = self.ddf.get(ModelWithStrings, string=Mask('- _ #'), text=Copier('string'))
assert re.match(r'[A-Z]{1} [a-z]{1} [0-9]{1}', instance.string)
assert re.match(r'[A-Z]{1} [a-z]{1} [0-9]{1}', instance.text)
|
src/anyconfig/processors/datatypes.py | Terrance-forks/python-anyconfig | 213 | 11165957 | #
# Copyright (C) 2018 - 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: MIT
#
r"""Common functions and variables.
"""
import typing
from ..models import processor
ProcT = typing.TypeVar('ProcT', bound=processor.Processor)
ProcsT = typing.List[ProcT]
ProcClsT = typing.Type[ProcT]
ProcClssT = typing.List[ProcClsT]
MaybeProcT = typing.Optional[typing.Union[str, ProcT, ProcClsT]]
# vim:sw=4:ts=4:et:
|
benchmarks/bench_string_similarity.py | joshlk/vtext | 127 | 11165965 | <gh_stars>100-1000
from time import time
from glob import glob
from pathlib import Path
import nltk
import nltk.metrics.distance
import vtext
base_dir = Path(__file__).parent.parent.resolve()
try:
import Levenshtein
except ImportError:
Levenshtein = None
if __name__ == "__main__":
input_files = list(glob(str(base_dir / "data" / "comp.graphics" / "*")))
data = []
for file_path in input_files:
with open(file_path, "rt") as fh:
data.append(fh.read())
tokens = []
for document in data:
for word in document.split():
if len(word) > 1:
tokens.append(word)
print("# vectorizing {} documents:".format(len(data)))
tokens = tokens[:20000]
db = [
("vtext dice_similarity", vtext.metrics.string.dice_similarity),
("vtext jaro_similarity", vtext.metrics.string.jaro_similarity),
(
"vtext jaro_winkler_similarity",
lambda x, y: vtext.metrics.string.jaro_winkler_similarity(x, y, 0.1, 4),
),
(
"vtext edit_distance",
lambda x, y: vtext.metrics.string.edit_distance(x, y, 1, False),
),
("NLTK edit_distance", nltk.edit_distance),
]
if Levenshtein is not None:
db.extend(
[
("python-Levenshtein Levenshtein", Levenshtein.distance),
("python-Levenshtein jaro", Levenshtein.jaro),
("python-Levenshtein jaro_winkler", Levenshtein.jaro_winkler),
]
)
for label, func in db:
t0 = time()
for x, y in zip(tokens, tokens[1:]):
func(x, y)
dt = time() - t0
print(
"{:>40}: {:.2f}s [ {:.1f} · 10³ tokens/s]".format(
label, dt, len(tokens) / (dt * 1e3)
)
)
|
examples/cairns_excel/setup/make_spatially_averaged_function.py | samcom12/anuga_core | 136 | 11165967 | <gh_stars>100-1000
"""
Make function to compute average of another function inside mesh triangles
<NAME>, Geoscience Australia 2014+
"""
import scipy
import anuga.utilities.spatialInputUtil as su
from anuga.geometry.polygon import inside_polygon
##############################################################################
def make_spatially_averaged_function(q_function,
domain,
approx_grid_spacing=[1., 1.],
chunk_size=1e+04,
averaging='mean',
polygons_for_averaging=None,
verbose=True):
"""
Given a function q_function (used to set quantities), return a function
which estimates the spatial-average of q_function in each mesh triangle.
It does this by generating a grid near the mesh triangle, with points
spaced ~ approx_grid_spacing, then finding those points inside the mesh
triangle, computing q_function at each, and averaging the result.
If averaging='mean', then the mean is returned. Alternatively 'min' or
'max' can be used.
The returned function only applies to domain centroids, since mesh
triangles are used to spatial average.
All the domain centroids must be passed to the returned function at the
same time, in the order that they appear in domain, as we use the ordering
to associate centroids and vertices.
INPUTS:
q_function -- the function that you wish to spatially
average over the mesh triangles
domain -- the ANUGA domain
approx_grid_spacing --
averaging is computed from points in each triangle, generated by
anuga.utilities.SpatialInputUtil.gridPointsInPolygon, with the
value of approx_grid_spacing passed there
chunk_size -- Number of mesh triangles to average in each call to
q_function. A suitably large chunk_size can reduce function call
overhead for some q_functions, but might have consume lots of
memory if there are many grid-points in each triangle
averaging -- character, what to do with the values inside the cell
'mean' / 'min' / 'max'
polygons_for_averaging -- list of polygons or None. If not None, then
we only apply the averaging to points inside the polygons. Each
polygon can be specified as either its filename (accepted by read_polygon)
or directly in the list/array formats accepted by anuga.
verbose -- print information
OUTPUTS:
function F(x,y) which can be passed as e.g.
domain.set_quantity('elevation', F, location='centroids')
"""
chunk_size = int(chunk_size)
averaging = averaging
def elevation_setter(xc, yc):
# Return scipy array of values
out = xc * 0.
# Get multiple elevation values in each triangle.
# Process triangles in chunks to reduce function call overhead
lx = len(xc)
lx_div_cs = scipy.ceil(lx * 1. / (1. * chunk_size)).astype(int)
# Crude check that xc/yc are the centroid values
#
erMess = ' Result of make_meanFun can ONLY be applied to a vector' +\
' of ALL centroid coordinates\n' +\
' (since mesh triangles are used to spatially average)'
assert scipy.all(xc == domain.centroid_coordinates[:, 0]), erMess
assert scipy.all(yc == domain.centroid_coordinates[:, 1]), erMess
# Find triangles in which we want to average
if polygons_for_averaging is not None:
averaging_flag = 0*xc
# Need georeferenced centroid coordinates to find which
# are in the polygon
xll = domain.geo_reference.xllcorner
yll = domain.geo_reference.yllcorner
centroid_coordinates_georef = scipy.vstack([xc + xll, yc + yll]).transpose()
for j in range(len(polygons_for_averaging)):
poly_j = polygons_for_averaging[j]
# poly_j can either be a polygon, or a filename
if type(poly_j) is str:
poly_j = su.read_polygon(poly_j)
points_in_poly_j = inside_polygon(centroid_coordinates_georef,
poly_j)
averaging_flag[points_in_poly_j] = 1
else:
averaging_flag = 1 + 0*xc
for i in range(lx_div_cs):
# Evaluate in triangles lb:ub
lb = i * chunk_size
ub = min((i + 1) * chunk_size, lx)
if verbose:
print 'Averaging in triangles ', lb, '-', ub - 1
# Store x,y,triangleIndex
px = scipy.array([])
py = scipy.array([])
p_indices = scipy.array([])
for j in range(lb, ub):
# If we average this cell, then get a grid
# of points in it. Otherwise just get the centroid
# coordinates.
if averaging_flag[j] == 1:
mesh_tri = \
domain.mesh.vertex_coordinates[
range(3 * j, 3 * j + 3), :].tolist()
pts = su.gridPointsInPolygon(
mesh_tri,
approx_grid_spacing=approx_grid_spacing)
else:
# Careful to keep this a 2D array
pts = domain.centroid_coordinates[j,:, None].transpose()
px = scipy.hstack([px, pts[:, 0]])
py = scipy.hstack([py, pts[:, 1]])
p_indices = scipy.hstack([p_indices,
scipy.repeat(j, len(pts[:, 0]))])
# Get function values at all px,py
if verbose:
print ' Evaluating function at ', len(px), ' points'
allTopo = q_function(px, py)
# Set output values in lb:ub
for j in range(lb, ub):
out_indices = (p_indices == j).nonzero()[0]
assert len(out_indices) > 0
if(averaging == 'mean'):
out[j] = allTopo[out_indices].mean()
elif(averaging == 'min'):
out[j] = allTopo[out_indices].min()
elif(averaging == 'max'):
out[j] = allTopo[out_indices].max()
else:
raise Exception('Unknown value of averaging')
return(out)
return elevation_setter
# Quick test
if __name__ == '__main__':
import anuga
domain = anuga.rectangular_cross_domain(10, 5, len1=10.0, len2=5.0)
# Define a topography function where the spatial scale of variation matches
# the scale of a mesh triangle
def topography(x, y):
return x%0.5
# Do 'averaging' where 2 <= y <= 3
polygon_for_averaging = [ [[0.0, 2.0], [0.0, 3.0], [10.0, 3.0], [10.0, 2.0]] ]
topography_smooth = make_spatially_averaged_function(topography, domain,
approx_grid_spacing = [0.1, 0.1], averaging = 'min',
polygons_for_averaging = polygon_for_averaging,
verbose=False)
domain.set_quantity('elevation', topography_smooth, location='centroids') # Use function for elevation
# Check that it worked
inpol = ((domain.centroid_coordinates[:,1] >= 2.0) *
(domain.centroid_coordinates[:,1] <= 3.0)).nonzero()
outpol = ((domain.centroid_coordinates[:,1] <= 2.0) +
(domain.centroid_coordinates[:,1] >= 3.0)).nonzero()
elv = domain.quantities['elevation'].centroid_values
# Check that the elevation in the 'averaging' band is very small
# (since we used 'min' averaging)
if elv[inpol].mean() < 1.0e-06:
print 'PASS'
else:
print 'FAIL'
# Check that no 'averaging' occurred outside the polygon
x = domain.centroid_coordinates[:,0]
if all(elv[outpol] - x[outpol]%0.5 == 0.0):
print 'PASS'
else:
print 'FAIL'
# Another test which can catch index errors
topography_smooth2 = make_spatially_averaged_function(topography, domain,
approx_grid_spacing = [0.1, 0.1], averaging = 'min',
verbose=False)
domain.set_quantity('elevation', topography_smooth2, location='centroids')
# If we get to here, then the above function did not hit an index error.
print 'PASS'
# Another test which can catch index errors
# Make the polygon entirely outside of the domain!
polygon_for_averaging3 = [ [[0.0, -2.0], [0.0, -3.0], [10.0, -3.0], [10.0, -2.0]] ]
topography_smooth3 = make_spatially_averaged_function(topography, domain,
approx_grid_spacing = [0.1, 0.1], averaging = 'min',
verbose=False)
domain.set_quantity('elevation', topography_smooth3, location='centroids')
# If we get to here, then the above function did not hit an index error.
print 'PASS'
|
fsf-server/modules/template.py | akniffe1/fsf | 259 | 11165969 | #!/usr/bin/env python
#
# Author:
# Description:
# Date:
'''
Copyright 2015 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
def MODULE_NAME(s, buff):
# Function must return a dictionary
MY_DICTIONARY = {}
return MY_DICTIONARY
if __name__ == '__main__':
# For testing, s object can be None type if unused in function
print MODULE_NAME(None, sys.stdin.read())
|
node_sampler/loss.py | maorp/NeuralGraph | 117 | 11165981 | import sys,os
import torch
import torch.nn as nn
import numpy as np
import math
import config as cfg
from utils.pcd_utils import *
from nnutils.node_proc import convert_embedding_to_explicit_params, compute_inverse_occupancy, \
sample_rbf_surface, sample_rbf_weights, bounding_box_error, extract_view_omegas_from_embedding
class SamplerLoss(torch.nn.Module):
def __init__(self):
super(SamplerLoss, self).__init__()
self.point_loss = PointLoss()
self.node_center_loss = NodeCenterLoss()
self.affinity_loss = AffinityLoss()
self.unique_neighbor_loss = UniqueNeighborLoss()
self.viewpoint_consistency_loss = ViewpointConsistencyLoss()
self.surface_consistency_loss = SurfaceConsistencyLoss()
def forward(self, embedding, uniform_samples, near_surface_samples, surface_samples, \
grid, world2grid, world2orig, rotated2gaps, bbox_lower, bbox_upper, \
source_idxs, target_idxs, pred_distances, pair_weights, affinity_matrix, evaluate=False
):
loss_total = torch.zeros((1), dtype=embedding.dtype, device=embedding.device)
view_omegas = extract_view_omegas_from_embedding(embedding, cfg.num_nodes)
constants, scales, rotations, centers = convert_embedding_to_explicit_params(embedding, rotated2gaps, cfg.num_nodes, cfg.scaling_type)
# Uniform sampling loss.
loss_uniform = None
if cfg.lambda_sampling_uniform is not None:
loss_uniform = self.point_loss(uniform_samples, constants, scales, centers)
loss_total += cfg.lambda_sampling_uniform * loss_uniform
# Near surface sampling loss.
loss_near_surface = None
if cfg.lambda_sampling_near_surface is not None:
loss_near_surface = self.point_loss(near_surface_samples, constants, scales, centers)
loss_total += cfg.lambda_sampling_near_surface * loss_near_surface
# Node center loss.
loss_node_center = None
if cfg.lambda_sampling_node_center is not None:
loss_node_center = self.node_center_loss(constants, scales, centers, grid, world2grid, bbox_lower, bbox_upper)
loss_total += cfg.lambda_sampling_node_center * loss_node_center
# Affinity loss.
loss_affinity_rel = None
loss_affinity_abs = None
if (cfg.lambda_affinity_rel_dist is not None) or (cfg.lambda_affinity_abs_dist is not None):
loss_affinity_rel, loss_affinity_abs = self.affinity_loss(centers, source_idxs, target_idxs, pred_distances, pair_weights)
if cfg.lambda_affinity_rel_dist is not None: loss_total += cfg.lambda_affinity_rel_dist * loss_affinity_rel
if cfg.lambda_affinity_abs_dist is not None: loss_total += cfg.lambda_affinity_abs_dist * loss_affinity_abs
# Unique neighbor loss.
loss_unique_neighbor = None
if cfg.lambda_unique_neighbor is not None and affinity_matrix is not None:
loss_unique_neighbor = self.unique_neighbor_loss(affinity_matrix)
loss_total += cfg.lambda_unique_neighbor * loss_unique_neighbor
# Viewpoint consistency loss.
loss_viewpoint_position = None
loss_viewpoint_scale = None
loss_viewpoint_constant = None
loss_viewpoint_rotation = None
if (cfg.lambda_viewpoint_position is not None) or (cfg.lambda_viewpoint_scale is not None) or \
(cfg.lambda_viewpoint_constant is not None) or (cfg.lambda_viewpoint_rotation is not None):
loss_viewpoint_position, loss_viewpoint_scale, loss_viewpoint_constant, loss_viewpoint_rotation = \
self.viewpoint_consistency_loss(constants, scales, rotations, centers)
if cfg.lambda_viewpoint_position is not None:
loss_total += cfg.lambda_viewpoint_position * loss_viewpoint_position
if cfg.lambda_viewpoint_scale is not None:
loss_total += cfg.lambda_viewpoint_scale * loss_viewpoint_scale
if cfg.lambda_viewpoint_constant is not None:
loss_total += cfg.lambda_viewpoint_constant * loss_viewpoint_constant
if cfg.lambda_viewpoint_rotation is not None:
loss_total += cfg.lambda_viewpoint_rotation * loss_viewpoint_rotation
# Surface consistency loss.
loss_surface_consistency = None
if cfg.lambda_surface_consistency is not None:
loss_surface_consistency = self.surface_consistency_loss(constants, scales, rotations, centers, surface_samples, grid, world2grid)
loss_total += cfg.lambda_surface_consistency * loss_surface_consistency
if evaluate:
return loss_total, {
"loss_uniform": loss_uniform,
"loss_near_surface": loss_near_surface,
"loss_node_center": loss_node_center,
"loss_affinity_rel": loss_affinity_rel,
"loss_affinity_abs": loss_affinity_abs,
"loss_unique_neighbor": loss_unique_neighbor,
"loss_viewpoint_position": loss_viewpoint_position,
"loss_viewpoint_scale": loss_viewpoint_scale,
"loss_viewpoint_constant": loss_viewpoint_constant,
"loss_viewpoint_rotation": loss_viewpoint_rotation,
"loss_surface_consistency": loss_surface_consistency
}
else:
return loss_total
class PointLoss(nn.Module):
def __init__(self):
super(PointLoss, self).__init__()
def forward(self, points_with_sdf, constants, scales, centers):
batch_size = points_with_sdf.shape[0]
points = points_with_sdf[:, :, :3]
is_outside = (points_with_sdf[:, :, 3] > 0.0)
class_gt = is_outside.float() # outside: 1, inside: 0
# Evaluate predicted class at given points.
sdf_pred = sample_rbf_surface(points, constants, scales, centers, cfg.use_constants, cfg.aggregate_coverage_with_max)
class_pred = compute_inverse_occupancy(sdf_pred, cfg.soft_transfer_scale, cfg.level_set)
# We apply weight scaling to interior points.
weights = is_outside.float() + cfg.interior_point_weight * (~is_outside).float()
# Compute weighted L2 loss.
diff = class_gt - class_pred
diff2 = diff * diff
weighted_diff2 = weights * diff2
loss = weighted_diff2.mean()
return loss
class NodeCenterLoss(nn.Module):
def __init__(self):
super(NodeCenterLoss, self).__init__()
def forward(self, constants, scales, centers, grid, world2grid, bbox_lower, bbox_upper):
batch_size = constants.shape[0]
# Check if centers are inside the bounding box.
# If not, we penalize them by using L2 distance to nearest bbox corner,
# since there would be no SDF gradients there.
bbox_error = bounding_box_error(centers, bbox_lower, bbox_upper) # (bs, num_nodes)
# Query SDF grid, to encourage centers to be inside the shape.
# Convert center positions to grid CS.
centers_grid_cs = centers.view(batch_size, cfg.num_nodes, 3, 1)
A_world2grid = world2grid[:, :3, :3].view(batch_size, 1, 3, 3).expand(-1, cfg.num_nodes, -1, -1)
t_world2grid = world2grid[:, :3, 3].view(batch_size, 1, 3, 1).expand(-1, cfg.num_nodes, -1, -1)
centers_grid_cs = torch.matmul(A_world2grid, centers_grid_cs) + t_world2grid
centers_grid_cs = centers_grid_cs.view(batch_size, -1, 3)
# Sample signed distance field.
dim_z = grid.shape[1]
dim_y = grid.shape[2]
dim_x = grid.shape[3]
grid = grid.view(batch_size, 1, dim_z, dim_y, dim_x)
centers_grid_cs[..., 0] /= float(dim_x - 1)
centers_grid_cs[..., 1] /= float(dim_y - 1)
centers_grid_cs[..., 2] /= float(dim_z - 1)
centers_grid_cs = 2.0 * centers_grid_cs - 1.0
centers_grid_cs = centers_grid_cs.view(batch_size, -1, 1, 1, 3)
# We use border values for out-of-the-box queries, to have gradient zero at boundaries.
centers_sdf_gt = torch.nn.functional.grid_sample(grid, centers_grid_cs, align_corners=True, padding_mode="border")
# If SDF value is higher than 0, we penalize it.
centers_sdf_gt = centers_sdf_gt.view(batch_size, cfg.num_nodes)
center_distance_error = torch.max(centers_sdf_gt, torch.zeros_like(centers_sdf_gt)) # (bs, num_nodes)
# Final loss is just a sum of both losses.
node_center_loss = bbox_error + center_distance_error
return torch.mean(node_center_loss)
class AffinityLoss(nn.Module):
def __init__(self):
super(AffinityLoss, self).__init__()
def forward(self, centers, source_idxs, target_idxs, pred_distances, pair_weights):
batch_size = centers.shape[0]
num_pairs = pred_distances.shape[1]
loss_rel = 0.0
loss_abs = 0.0
if num_pairs > 0:
source_positions = centers[:, source_idxs]
target_positions = centers[:, target_idxs]
diff = (source_positions - target_positions)
dist2 = torch.sum(diff*diff, 2) # (bs, num_pairs)
abs_distance2 = pair_weights * dist2
loss_abs = abs_distance2.mean()
pred_distances2 = pred_distances * pred_distances
pred_distances2 = pred_distances2 # (bs, num_pairs)
weights_dist = pair_weights * torch.abs(pred_distances2 - dist2) # (bs, num_pairs)
loss_rel = weights_dist.mean()
return loss_rel, loss_abs
class UniqueNeighborLoss(nn.Module):
def __init__(self):
super(UniqueNeighborLoss, self).__init__()
def forward(self, affinity_matrix):
assert affinity_matrix.shape[0] == cfg.num_neighbors and affinity_matrix.shape[1] == cfg.num_nodes and affinity_matrix.shape[2] == cfg.num_nodes
loss = 0.0
for source_idx in range(cfg.num_neighbors):
for target_idx in range(source_idx + 1, cfg.num_neighbors):
affinity_source = affinity_matrix[source_idx].view(cfg.num_nodes, cfg.num_nodes)
affinity_target = affinity_matrix[target_idx].view(cfg.num_nodes, cfg.num_nodes)
# We want rows of different neighbors to be unique.
affinity_dot = affinity_source * affinity_target
affinity_dist = torch.sum(affinity_dot, dim=1)
loss += affinity_dist.sum()
# Normalize the loss by dividing with the number of pairs.
num_pairs = (cfg.num_neighbors * (cfg.num_neighbors - 1)) / 2
loss = loss / float(num_pairs)
return loss
class ViewpointConsistencyLoss(nn.Module):
def __init__(self):
super(ViewpointConsistencyLoss, self).__init__()
def forward(self, constants, scales, rotations, centers):
batch_size = constants.shape[0]
assert batch_size % 2 == 0
# We expect every two consecutive samples are different viewpoints at same time step.
loss_viewpoint_position = 0.0
if cfg.lambda_viewpoint_position is not None:
centers_pairs = centers.view(batch_size // 2, 2, cfg.num_nodes, -1)
centers_diff = centers_pairs[:, 0, :, :] - centers_pairs[:, 1, :, :]
centers_dist2 = centers_diff * centers_diff
loss_viewpoint_position += centers_dist2.mean()
loss_viewpoint_scale = 0.0
if cfg.lambda_viewpoint_scale is not None:
scales_pairs = scales.view(batch_size // 2, 2, cfg.num_nodes, -1)
scales_diff = scales_pairs[:, 0, :, :] - scales_pairs[:, 1, :, :]
scales_dist2 = scales_diff * scales_diff
loss_viewpoint_scale += scales_dist2.mean()
loss_viewpoint_constant = 0.0
if cfg.lambda_viewpoint_constant is not None:
constants_pairs = constants.view(batch_size // 2, 2, cfg.num_nodes, -1)
constants_diff = constants_pairs[:, 0, :, :] - constants_pairs[:, 1, :, :]
constants_dist2 = constants_diff * constants_diff
loss_viewpoint_constant += constants_dist2.mean()
loss_viewpoint_rotation = 0.0
if cfg.lambda_viewpoint_rotation is not None:
rotations_pairs = rotations.view(batch_size // 2, 2, cfg.num_nodes, 3, 3)
rotations_diff = rotations_pairs[:, 0, :, :, :] - rotations_pairs[:, 1, :, :, :]
rotations_dist2 = rotations_diff * rotations_diff
loss_viewpoint_rotation += rotations_dist2.mean()
return loss_viewpoint_position, loss_viewpoint_scale, loss_viewpoint_constant, loss_viewpoint_rotation
class SurfaceConsistencyLoss(nn.Module):
def __init__(self):
super(SurfaceConsistencyLoss, self).__init__()
def forward(self, constants, scales, rotations, centers, surface_samples, grid, world2grid):
batch_size = constants.shape[0]
num_points = surface_samples.shape[1]
loss = 0.0
surface_points = surface_samples[:, :, :3]
# Compute skinning weights for sampled points.
skinning_weights = sample_rbf_weights(surface_points, constants, scales, centers, cfg.use_constants) # (bs, num_points, num_nodes)
# Compute loss for pairs of frames.
for source_idx in range(batch_size):
target_idx = source_idx + 1 if source_idx < batch_size - 1 else 0
# Get source points and target grid.
source_points = surface_points[source_idx] # (num_points, 3)
target_grid = grid[target_idx] # (grid_dim, grid_dim, grid_dim)
# Get source and target rotations.
R_source = rotations[source_idx] # (num_nodes, 3, 3)
R_target = rotations[target_idx] # (num_nodes, 3, 3)
# Compute relative frame-to-frame rotation and translation estimates.
t_source = centers[source_idx]
t_target = centers[target_idx]
R_source_inv = R_source.permute(0, 2, 1)
R_rel = torch.matmul(R_target, R_source_inv) # (num_nodes, 3, 3)
# Get correspondending skinning weights and normalize them to sum up to 1.
weights = skinning_weights[source_idx].view(num_points, cfg.num_nodes)
weights_sum = weights.sum(dim=1, keepdim=True)
weights = weights.div(weights_sum)
# Apply deformation to sampled points.
t_source = t_source.view(1, cfg.num_nodes, 3, 1).expand(num_points, -1, -1, -1) # (num_points, num_nodes, 3, 1)
t_target = t_target.view(1, cfg.num_nodes, 3, 1).expand(num_points, -1, -1, -1) # (num_points, num_nodes, 3, 1)
R_rel = R_rel.view(1, cfg.num_nodes, 3, 3).expand(num_points, -1, -1, -1) # (num_points, num_nodes, 3, 3)
source_points = source_points.view(num_points, 1, 3, 1).expand(-1, cfg.num_nodes, -1, -1) # (num_points, num_nodes, 3, 1)
weights = weights.view(num_points, cfg.num_nodes, 1, 1).expand(-1, -1, 3, -1) # (num_points, num_nodes, 3, 1)
transformed_points = torch.matmul(R_rel, (source_points - t_source)) + t_target # (num_points, num_nodes, 3, 1)
transformed_points = torch.sum(weights * transformed_points, dim=1).view(num_points, 3)
# Convert transformed points to grid CS.
transformed_points = transformed_points.view(num_points, 3, 1)
A_world2grid = world2grid[target_idx, :3, :3].view(1, 3, 3).expand(num_points, -1, -1)
t_world2grid = world2grid[target_idx, :3, 3].view(1, 3, 1).expand(num_points, -1, -1)
transformed_points_grid_cs = torch.matmul(A_world2grid, transformed_points) + t_world2grid
transformed_points_grid_cs = transformed_points_grid_cs.view(num_points, 3)
# Sample signed distance field.
dim_z = target_grid.shape[0]
dim_y = target_grid.shape[1]
dim_x = target_grid.shape[2]
target_grid = target_grid.view(1, 1, dim_z, dim_y, dim_x)
transformed_points_grid_cs[..., 0] /= float(dim_x - 1)
transformed_points_grid_cs[..., 1] /= float(dim_y - 1)
transformed_points_grid_cs[..., 2] /= float(dim_z - 1)
transformed_points_grid_cs = 2.0 * transformed_points_grid_cs - 1.0
transformed_points_grid_cs = transformed_points_grid_cs.view(1, -1, 1, 1, 3)
# We use border values for out-of-the-box queries, to have gradient zero at boundaries.
transformed_points_sdf_gt = torch.nn.functional.grid_sample(target_grid, transformed_points_grid_cs, align_corners=True, padding_mode="border")
# If SDF value is different than 0, we penalize it.
transformed_points_sdf_gt = transformed_points_sdf_gt.view(num_points)
df_error = torch.mean(transformed_points_sdf_gt * transformed_points_sdf_gt)
loss += df_error
return loss |
tests/unit2/test_isometric.py | yegarti/arcade | 824 | 11166011 | <reponame>yegarti/arcade<filename>tests/unit2/test_isometric.py
import arcade
def test_isometric_grid_to_screen(window):
tile_x = 0
tile_y = 0
width = 10
height = 10
tile_width = 64
tile_height = 64
x, y = arcade.isometric_grid_to_screen(tile_x, tile_y,
width, height,
tile_width, tile_height)
assert x == 320
assert y == 608
tile_x = 2
tile_y = 2
width = 10
height = 10
tile_width = 64
tile_height = 64
x, y = arcade.isometric_grid_to_screen(tile_x, tile_y,
width, height,
tile_width, tile_height)
assert x == 320
assert y == 480
def test_screen_to_isometric_grid(window):
screen_x = 0
screen_y = 0
width = 10
height = 10
tile_width = 64
tile_height = 64
x, y = arcade.screen_to_isometric_grid(screen_x, screen_y,
width, height,
tile_width, tile_height)
print(x, y)
assert x == 4
assert y == 14
def test_create_isometric_grid_lines(window):
width = 10
height = 10
tile_width = 64
tile_height = 64
lines = arcade.create_isometric_grid_lines(width, height,
tile_width, tile_height,
arcade.color.BLACK, 2)
assert lines
|
examples/map.py | artemigkh/cassiopeia | 437 | 11166020 | <gh_stars>100-1000
import cassiopeia as cass
from cassiopeia import Map, Maps
def get_maps():
maps = cass.get_maps(region="NA")
for map in maps:
print(map.name, map.id)
map = Map(name="<NAME>", region="NA")
print(map.id)
if __name__ == "__main__":
get_maps()
|
exercises/zh/exc_02_02_01.py | Jette16/spacy-course | 2,085 | 11166022 | <filename>exercises/zh/exc_02_02_01.py
import spacy
nlp = spacy.load("zh_core_web_sm")
doc = nlp("我养了一只猫。")
# 查找词汇"猫"的哈希值
cat_hash = ____.____.____[____]
print(cat_hash)
# 查找cat_hash来得到字符串
cat_string = ____.____.____[____]
print(cat_string)
|
test/data/test_utils.py | parmeet/text | 3,172 | 11166026 | import io
from torchtext.data import get_tokenizer
from torchtext.utils import unicode_csv_reader
from ..common.torchtext_test_case import TorchtextTestCase
from ..common.assets import get_asset_path
class TestUtils(TorchtextTestCase):
TEST_STR = "A string, particularly one with slightly complex punctuation."
def test_get_tokenizer_split(self):
# Test the default case with str.split
assert get_tokenizer(str.split) == str.split
assert get_tokenizer(str.split)(self.TEST_STR) == str.split(self.TEST_STR)
def test_get_tokenizer_toktokt(self):
# Test Toktok option. Test strings taken from NLTK doctests.
# Note that internally, MosesTokenizer converts to unicode if applicable
toktok_tokenizer = get_tokenizer("toktok")
assert toktok_tokenizer(self.TEST_STR) == [
"A", "string", ",", "particularly", "one", "with", "slightly",
"complex", "punctuation", "."]
# Test that errors are raised for invalid input arguments.
with self.assertRaises(ValueError):
get_tokenizer(1)
with self.assertRaises(ValueError):
get_tokenizer("some other string")
def test_text_nomalize_function(self):
# Test text_nomalize function in torchtext.datasets.text_classification
ref_lines = []
test_lines = []
tokenizer = get_tokenizer("basic_english")
data_path = get_asset_path('text_normalization_ag_news_test.csv')
with io.open(data_path, encoding="utf8") as f:
reader = unicode_csv_reader(f)
for row in reader:
test_lines.append(tokenizer(' , '.join(row)))
data_path = get_asset_path('text_normalization_ag_news_ref_results.test')
with io.open(data_path, encoding="utf8") as ref_data:
for line in ref_data:
line = line.split()
self.assertEqual(line[0][:9], '__label__')
line[0] = line[0][9:] # remove '__label__'
ref_lines.append(line)
self.assertEqual(ref_lines, test_lines)
|
timeago/locales/guj_IN.py | s3q/andrunlook-apy | 220 | 11166028 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2020-10-04
@author : Aashrut
@Country : India
@Language : Gujarati
'''
LOCALE = [
['હમણાં', 'થોડી વાર'],
['%s સેકંડ પહેલા', '%s સેકંડમાં'],
['1 મિનિટ પહેલા', '1 મિનિટમાં'],
['%s મિનિટ પહેલા', '%s મિનિટમાં'] ,
['1 કલાક પહેલા', '1 કલાકમાં'] ,
['%s કલાક પહેલા', '%s કલાકમાં'] ,
['1 દિવસ પહેલા', '1 દિવસમાં'] ,
['%s દિવસ પહેલા', '%s દિવસમાં'] ,
['1 અઠવાડિયા પહેલા', '1 અઠવાડિયામાં'] ,
['%s અઠવાડિયા પહેલા', '%s અઠવાડિયામાં'] ,
['1 મહિના પહેલા', '1 મહિનામાં'] ,
['%s મહિના પહેલા', '%s મહિનામાં'] ,
['1 વર્ષ પહેલા', '1 વર્ષમાં'] ,
['%s વર્ષ પહેલા','%s વર્ષમાં'],
] |
esphome/components/ble_client/output/__init__.py | OttoWinter/esphomeyaml | 249 | 11166074 | <gh_stars>100-1000
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import ble_client, esp32_ble_tracker, output
from esphome.const import CONF_ID, CONF_SERVICE_UUID
from .. import ble_client_ns
DEPENDENCIES = ["ble_client"]
CONF_CHARACTERISTIC_UUID = "characteristic_uuid"
CONF_REQUIRE_RESPONSE = "require_response"
BLEBinaryOutput = ble_client_ns.class_(
"BLEBinaryOutput", output.BinaryOutput, ble_client.BLEClientNode, cg.Component
)
CONFIG_SCHEMA = cv.All(
output.BINARY_OUTPUT_SCHEMA.extend(
{
cv.Required(CONF_ID): cv.declare_id(BLEBinaryOutput),
cv.Required(CONF_SERVICE_UUID): esp32_ble_tracker.bt_uuid,
cv.Required(CONF_CHARACTERISTIC_UUID): esp32_ble_tracker.bt_uuid,
cv.Optional(CONF_REQUIRE_RESPONSE, default=False): cv.boolean,
}
)
.extend(cv.COMPONENT_SCHEMA)
.extend(ble_client.BLE_CLIENT_SCHEMA)
)
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
if len(config[CONF_SERVICE_UUID]) == len(esp32_ble_tracker.bt_uuid16_format):
cg.add(
var.set_service_uuid16(esp32_ble_tracker.as_hex(config[CONF_SERVICE_UUID]))
)
elif len(config[CONF_SERVICE_UUID]) == len(esp32_ble_tracker.bt_uuid32_format):
cg.add(
var.set_service_uuid32(esp32_ble_tracker.as_hex(config[CONF_SERVICE_UUID]))
)
elif len(config[CONF_SERVICE_UUID]) == len(esp32_ble_tracker.bt_uuid128_format):
uuid128 = esp32_ble_tracker.as_reversed_hex_array(config[CONF_SERVICE_UUID])
cg.add(var.set_service_uuid128(uuid128))
if len(config[CONF_CHARACTERISTIC_UUID]) == len(esp32_ble_tracker.bt_uuid16_format):
cg.add(
var.set_char_uuid16(
esp32_ble_tracker.as_hex(config[CONF_CHARACTERISTIC_UUID])
)
)
elif len(config[CONF_CHARACTERISTIC_UUID]) == len(
esp32_ble_tracker.bt_uuid32_format
):
cg.add(
var.set_char_uuid32(
esp32_ble_tracker.as_hex(config[CONF_CHARACTERISTIC_UUID])
)
)
elif len(config[CONF_CHARACTERISTIC_UUID]) == len(
esp32_ble_tracker.bt_uuid128_format
):
uuid128 = esp32_ble_tracker.as_reversed_hex_array(
config[CONF_CHARACTERISTIC_UUID]
)
cg.add(var.set_char_uuid128(uuid128))
cg.add(var.set_require_response(config[CONF_REQUIRE_RESPONSE]))
yield output.register_output(var, config)
yield ble_client.register_ble_node(var, config)
yield cg.register_component(var, config)
|
vel/launcher.py | galatolofederico/vel | 273 | 11166090 | <reponame>galatolofederico/vel<filename>vel/launcher.py<gh_stars>100-1000
#!/usr/bin/env python
import argparse
import multiprocessing
import sys
from vel.internals.model_config import ModelConfig
from vel.internals.parser import Parser
def main():
""" Paperboy entry point - parse the arguments and run a command """
parser = argparse.ArgumentParser(description='Paperboy deep learning launcher')
parser.add_argument('config', metavar='FILENAME', help='Configuration file for the run')
parser.add_argument('command', metavar='COMMAND', help='A command to run')
parser.add_argument('varargs', nargs='*', metavar='VARARGS', help='Extra options to the command')
parser.add_argument('-r', '--run_number', type=int, default=0, help="A run number")
parser.add_argument('-d', '--device', default='cuda', help="A device to run the model on")
parser.add_argument('-s', '--seed', type=int, default=None, help="Random seed for the project")
parser.add_argument(
'-p', '--param', type=str, metavar='NAME=VALUE', action='append', default=[],
help="Configuration parameters"
)
parser.add_argument(
'--continue', action='store_true', default=False, help="Continue previously started learning process"
)
parser.add_argument(
'--profile', type=str, default=None, help="Profiler output"
)
args = parser.parse_args()
model_config = ModelConfig.from_file(
args.config, args.run_number, continue_training=getattr(args, 'continue'), device=args.device, seed=args.seed,
params={k: v for (k, v) in (Parser.parse_equality(eq) for eq in args.param)}
)
if model_config.project_dir not in sys.path:
sys.path.append(model_config.project_dir)
multiprocessing_setting = model_config.provide_with_default('multiprocessing', default=None)
if multiprocessing_setting:
# This needs to be called before any of PyTorch module is imported
multiprocessing.set_start_method(multiprocessing_setting)
# Set seed already in the launcher
from vel.util.random import set_seed
set_seed(model_config.seed)
model_config.banner(args.command)
if args.profile:
print("[PROFILER] Running Vel in profiling mode, output filename={}".format(args.profile))
import cProfile
import pstats
profiler = cProfile.Profile()
profiler.enable()
model_config.run_command(args.command, args.varargs)
profiler.disable()
profiler.dump_stats(args.profile)
profiler.print_stats(sort='tottime')
print("======================================================================")
pstats.Stats(profiler).strip_dirs().sort_stats('tottime').print_stats(30)
print("======================================================================")
pstats.Stats(profiler).strip_dirs().sort_stats('cumtime').print_stats(30)
else:
model_config.run_command(args.command, args.varargs)
model_config.quit_banner()
if __name__ == '__main__':
main()
|
python/array/reorderarraybyindex.py | VinodKW/interview | 11,393 | 11166103 | # http://www.geeksforgeeks.org/reorder-a-array-according-to-given-indexes/
def reorder(input, index):
if len(input) != len(index):
raise ValueError
for i in range(len(index)):
while index[i] != i:
s_index = index[index[i]]
s_val = input[index[i]]
index[index[i]] = index[i]
input[index[i]] = input[i]
index[i] = s_index
input[i] = s_val
if __name__ == '__main__':
input = [50, 40, 70, 60, 90]
index = [3, 0, 4, 1, 2]
reorder(input, index)
print(input)
print(index)
|
tests/admin_scripts/broken_app/models.py | jpmallarino/django | 61,676 | 11166121 | <filename>tests/admin_scripts/broken_app/models.py
from django.db import modelz # NOQA
|
alipay/aop/api/domain/ItemSkuPropertyInfo.py | antopen/alipay-sdk-python-all | 213 | 11166149 | <filename>alipay/aop/api/domain/ItemSkuPropertyInfo.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ItemSkuPropertyInfo(object):
def __init__(self):
self._property_key = None
self._property_value = None
@property
def property_key(self):
return self._property_key
@property_key.setter
def property_key(self, value):
self._property_key = value
@property
def property_value(self):
return self._property_value
@property_value.setter
def property_value(self, value):
self._property_value = value
def to_alipay_dict(self):
params = dict()
if self.property_key:
if hasattr(self.property_key, 'to_alipay_dict'):
params['property_key'] = self.property_key.to_alipay_dict()
else:
params['property_key'] = self.property_key
if self.property_value:
if hasattr(self.property_value, 'to_alipay_dict'):
params['property_value'] = self.property_value.to_alipay_dict()
else:
params['property_value'] = self.property_value
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ItemSkuPropertyInfo()
if 'property_key' in d:
o.property_key = d['property_key']
if 'property_value' in d:
o.property_value = d['property_value']
return o
|
char_cnn.py | PlayDeep/CharCNN | 253 | 11166151 | import tensorflow as tf
from textify.models import _Classifier
from textify.layers.embeddings import Embedding
class CharCNN(_Classifier):
def _get_embeddings(self, features, mode=tf.estimator.ModeKeys.TRAIN):
embedding = Embedding(self._params['embedding_specs'])
embedding.build(None)
return embedding.call(features)
def _encode(self, embeddings, lengths=None, mode=tf.estimator.ModeKeys.TRAIN):
conv_layers = self._params.get('conv_layers', None)
if conv_layers is None:
conv_layers = [
[256, 7, 3],
[256, 7, 3],
[256, 3, None],
[256, 3, None],
[256, 3, None],
[256, 3, 3]
]
x = embeddings
vec_dim = self._params.get('seq_len', 1014)
for i, cl in enumerate(conv_layers):
vec_dim -= (cl[1] - 1)
x = tf.layers.conv1d(x, cl[0], cl[1], activation=tf.nn.relu, name='Conv_%d' % i)
if not cl[2] is None:
vec_dim -= cl[2]
vec_dim //= cl[2]
vec_dim += 1
x = tf.layers.max_pooling1d(x, cl[2], cl[2], name='Pool_%d' % i)
vec_dim *= cl[0]
x = tf.reshape(x, [-1, vec_dim])
return x |
html/semantics/links/downloading-resources/resources/inspect-header.py | meyerweb/wpt | 14,668 | 11166155 | <filename>html/semantics/links/downloading-resources/resources/inspect-header.py
def main(request, response):
headers = [(b"Content-Type", b"text/plain")]
command = request.GET.first(b"cmd").lower()
test_id = request.GET.first(b"id")
header = request.GET.first(b"header")
if command == b"put":
request.server.stash.put(test_id, request.headers.get(header, b""))
elif command == b"get":
stashed_header = request.server.stash.take(test_id)
if stashed_header is not None:
headers.append((b"x-request-" + header, stashed_header))
else:
response.set_error(400, u"Bad Command")
return b"ERROR: Bad Command!"
return headers, b""
|
src/ostorlab/agent/message/proto/v2/report/event/start_process_agent_pb2.py | bbhunter/ostorlab | 113 | 11166160 | <reponame>bbhunter/ostorlab
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: start_process_agent.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='start_process_agent.proto',
package='',
syntax='proto2',
serialized_pb=_b(
'\n\x19start_process_agent.proto\"\x89\x01\n\x13start_process_agent\x12\x0f\n\x07scan_id\x18\x01 \x02(\x05\x12\x0e\n\x06source\x18\x02 \x02(\t\x12$\n\x07message\x18\x03 \x01(\t:\x13start_process_agent\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\x12\x16\n\x0e\x63orrelation_id\x18\x05 \x02(\t')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_START_PROCESS_AGENT = _descriptor.Descriptor(
name='start_process_agent',
full_name='start_process_agent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scan_id', full_name='start_process_agent.scan_id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source', full_name='start_process_agent.source', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='start_process_agent.message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("start_process_agent").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='start_process_agent.description', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='correlation_id', full_name='start_process_agent.correlation_id', index=4,
number=5, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=30,
serialized_end=167,
)
DESCRIPTOR.message_types_by_name['start_process_agent'] = _START_PROCESS_AGENT
start_process_agent = _reflection.GeneratedProtocolMessageType('start_process_agent', (_message.Message,), dict(
DESCRIPTOR=_START_PROCESS_AGENT,
__module__='start_process_agent_pb2'
# @@protoc_insertion_point(class_scope:start_process_agent)
))
_sym_db.RegisterMessage(start_process_agent)
# @@protoc_insertion_point(module_scope)
|
pyxlib/xcomposite.py | helgeerbe/pi3d | 177 | 11166177 | <reponame>helgeerbe/pi3d
import ctypes
from xlib import Window, Display
libXcomposite = ctypes.CDLL('libXcomposite.so.1')
# void XCompositeRedirectSubwindows(Display *dpy, Window window, int update);
XCompositeRedirectSubwindows = libXcomposite.XCompositeRedirectSubwindows
XCompositeRedirectSubwindows.argtypes = [ctypes.POINTER(Display), Window, ctypes.c_int]
# Window XCompositeGetOverlayWindow(Display *dpy, Window window);
XCompositeGetOverlayWindow = libXcomposite.XCompositeGetOverlayWindow
XCompositeGetOverlayWindow.argtypes = [ctypes.POINTER(Display), Window]
XCompositeGetOverlayWindow.restype = Window
#define CompositeRedirectAutomatic 0
CompositeRedirectAutomatic = 0
#define CompositeRedirectManual 1
CompositeRedirectManual = 1
|
OnlineStudy/OnlineStudy/urls.py | NanRenTeam-9/MongoMicroCourse | 132 | 11166184 | <reponame>NanRenTeam-9/MongoMicroCourse
"""OnlineStudy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.static import serve
from django.conf import settings
from startX.serivce.v1 import site
from LoginAuth import views
urlpatterns = [
path('login/', views.login),
path('logout/', views.logout),
path('index/', views.index),
path('upload/',views.upload),
path('admin/', admin.site.urls),
path('api/v1/', include(('generic.urls', 'generic'))), # 主业务
path('api/v1/auth/', include(('LoginAuth.urls', 'LoginAuth'))), # 登录认证
path('api/v1/pay/', include(('Alipay.urls', 'Alipay'))), # 支付宝支付
path('blv/', include(('blwvideo.urls', 'blwvideo'))), # 保利威加密视频接口
path('backend/', site.urls), # 后台相关管理
re_path('media/(?P<path>.*)', serve, {'document_root': settings.MEDIA_ROOT}),
re_path(r'^account/trend/(?P<year>\d+)/$', views.account_trend, name='account_trend'),
re_path(r'^order/trend/(?P<year>\d+)/$', views.order_trend, name='order_trend'),
re_path(r'^rbac/', include(('rbac.urls', 'rbac'))), # 权限配置
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.