max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
OpenCV/Colorization/colorize_image.py | AathmanT/cv-tricks.com | 326 | 11198601 | # Script is based on https://github.com/richzhang/colorization/blob/master/colorization/colorize.py
import numpy as np
import argparse
import cv2 as cv
def parse_args():
parser = argparse.ArgumentParser(description='iColor: deep interactive colorization')
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
parser.add_argument('--prototxt', help='Path to colorization_deploy_v2.prototxt', default='colorization_deploy_v2.prototxt',required=True)
parser.add_argument('--caffemodel', help='Path to colorization_release_v2.caffemodel',default='colorization_release_v2.caffemodel', required=True)
parser.add_argument('--kernel', help='Path to pts_in_hull.npy', default='pts_in_hull.npy', required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
#Network input size
W_in = 224
H_in = 224
imshowSize = (640, 480)
args = parse_args()
# Create network graph and load weights
net = cv.dnn.readNetFromCaffe(args.prototxt, args.caffemodel)
# load cluster centers
pts_in_hull = np.load(args.kernel)
# populate cluster centers as 1x1 convolution kernel
pts_in_hull = pts_in_hull.transpose().reshape(2, 313, 1, 1)
net.getLayer(net.getLayerId('class8_ab')).blobs = [pts_in_hull.astype(np.float32)]
net.getLayer(net.getLayerId('conv8_313_rh')).blobs = [np.full([1, 313], 2.606, np.float32)]
# Read the input image in BGR format
frame=cv.imread(args.input)
#convert it to rgb format
frame= frame[:,:,[2, 1, 0]]
# Scale the image to handle the variations in intensity
img_rgb = ( frame * 1.0 / 255).astype(np.float32)
#convert to Lab color space
img_lab = cv.cvtColor(img_rgb, cv.COLOR_RGB2Lab)
# pull out L channel
img_l = img_lab[:,:,0]
(H_orig,W_orig) = img_rgb.shape[:2] # original image size
# resize image to network input size
img_rs = cv.resize(img_rgb, (W_in, H_in)) # resize image to network input size
img_lab_rs = cv.cvtColor(img_rs, cv.COLOR_RGB2Lab)
img_l_rs = img_lab_rs[:,:,0]
# subtract 50 for mean-centering
img_l_rs -= 50
# Set the input for forwarding through the openCV DNN module
net.setInput(cv.dnn.blobFromImage(img_l_rs))
#Inference on network
ab_dec = net.forward('class8_ab')[0,:,:,:].transpose((1,2,0)) # this is our result
# Get the a and b channels
(H_out,W_out) = ab_dec.shape[:2]
#Resize to original size
ab_dec_us = cv.resize(ab_dec, (W_orig, H_orig))
# concatenate with original image i.e. L channel
img_lab_out = np.concatenate((img_l[:,:,np.newaxis],ab_dec_us),axis=2)
# convert to BGR space from Lab space
img_bgr_out = cv.cvtColor(img_lab_out, cv.COLOR_Lab2BGR)
# Clip and then rescale to 0-255
img_bgr_out = 255 * np.clip(img_bgr_out, 0, 1)
img_bgr_out = np.uint8(img_bgr_out)
#concatenate input and output image to display
con = np.hstack([frame,img_bgr_out])
cv.imwrite('out'+args.input,con)
|
janitor/functions/fill.py | vishalbelsare/pyjanitor | 225 | 11198604 | from enum import Enum
from operator import methodcaller
from typing import Hashable, Iterable, Union
import pandas as pd
import pandas_flavor as pf
from janitor.utils import check, check_column, deprecated_alias
from multipledispatch import dispatch
@pf.register_dataframe_method
def fill_direction(df: pd.DataFrame, **kwargs) -> pd.DataFrame:
"""
Provide a method-chainable function for filling missing values
in selected columns.
It is a wrapper for `pd.Series.ffill` and `pd.Series.bfill`,
and pairs the column name with one of `up`, `down`, `updown`,
and `downup`.
Functional usage syntax:
```python
import pandas as pd
import janitor as jn
df = pd.DataFrame(...)
df = jn.fill_direction(
df = df,
column_1 = direction_1,
column_2 = direction_2,
)
```
Method-chaining usage syntax:
```python
import pandas as pd
import janitor as jn
df = pd.DataFrame(...)
.fill_direction(
column_1 = direction_1,
column_2 = direction_2,
)
```
:param df: A pandas DataFrame.
:param kwargs: Key - value pairs of columns and directions.
Directions can be either `down`, `up`, `updown`
(fill up then down) and `downup` (fill down then up).
:returns: A pandas DataFrame with modified column(s).
:raises ValueError: if direction supplied is not one of `down`, `up`,
`updown`, or `downup`.
"""
if not kwargs:
return df
fill_types = {fill.name for fill in _FILLTYPE}
for column_name, fill_type in kwargs.items():
check("column_name", column_name, [str])
check("fill_type", fill_type, [str])
if fill_type.upper() not in fill_types:
raise ValueError(
"""
fill_type should be one of
up, down, updown, or downup.
"""
)
check_column(df, kwargs)
new_values = {}
for column_name, fill_type in kwargs.items():
direction = _FILLTYPE[f"{fill_type.upper()}"].value
if len(direction) == 1:
direction = methodcaller(direction[0])
output = direction(df[column_name])
else:
direction = [methodcaller(entry) for entry in direction]
output = _chain_func(df[column_name], *direction)
new_values[column_name] = output
return df.assign(**new_values)
def _chain_func(column: pd.Series, *funcs):
"""
Apply series of functions consecutively
to a Series.
https://blog.finxter.com/how-to-chain-multiple-function-calls-in-python/
"""
new_value = column.copy()
for func in funcs:
new_value = func(new_value)
return new_value
class _FILLTYPE(Enum):
"""List of fill types for fill_direction."""
UP = ("bfill",)
DOWN = ("ffill",)
UPDOWN = "bfill", "ffill"
DOWNUP = "ffill", "bfill"
@pf.register_dataframe_method
@deprecated_alias(columns="column_names")
def fill_empty(
df: pd.DataFrame, column_names: Union[str, Iterable[str], Hashable], value
) -> pd.DataFrame:
"""
Fill `NaN` values in specified columns with a given value.
Super sugary syntax that wraps `pandas.DataFrame.fillna`.
This method mutates the original DataFrame.
Functional usage syntax:
```python
df = fill_empty(df, column_names=[col1, col2], value=0)
```
Method chaining syntax:
```python
import pandas as pd
import janitor
df = pd.DataFrame(...).fill_empty(column_names=col1, value=0)
```
:param df: A pandas DataFrame.
:param column_names: column_names: A column name or an iterable (list
or tuple) of column names. If a single column name is passed in, then
only that column will be filled; if a list or tuple is passed in, then
those columns will all be filled with the same value.
:param value: The value that replaces the `NaN` values.
:returns: A pandas DataFrame with `NaN` values filled.
"""
check_column(df, column_names)
return _fill_empty(df, column_names, value=value)
@dispatch(pd.DataFrame, (list, tuple))
def _fill_empty(df, column_names, value=None):
"""Fill empty function for the case that column_names is list or tuple."""
fill_mapping = {c: value for c in column_names}
return df.fillna(value=fill_mapping)
@dispatch(pd.DataFrame, str) # noqa: F811
def _fill_empty(df, column_names, value=None): # noqa: F811
"""Fill empty function for the case that column_names is a string."""
fill_mapping = {column_names: value}
return df.fillna(value=fill_mapping)
|
ev3dev2/wheel.py | TheVinhLuong102/ev3dev-lang-python | 306 | 11198660 | <filename>ev3dev2/wheel.py
#!/usr/bin/env python3
"""
Wheel and Rim classes
A great reference when adding new wheels is http://wheels.sariel.pl/
"""
from math import pi
class Wheel(object):
"""
A base class for various types of wheels, tires, etc. All units are in mm.
One scenario where one of the child classes below would be used is when the
user needs their robot to drive at a specific speed or drive for a specific
distance. Both of those calculations require the circumference of the wheel
of the robot.
Example:
.. code:: python
from ev3dev2.wheel import EV3Tire
tire = EV3Tire()
# calculate the number of rotations needed to travel forward 500 mm
rotations_for_500mm = 500 / tire.circumference_mm
"""
def __init__(self, diameter_mm, width_mm):
self.diameter_mm = float(diameter_mm)
self.width_mm = float(width_mm)
self.circumference_mm = diameter_mm * pi
@property
def radius_mm(self):
return float(self.diameter_mm / 2)
class EV3Rim(Wheel):
"""
part number 56145
comes in set 31313
"""
def __init__(self):
Wheel.__init__(self, 30, 20)
class EV3Tire(Wheel):
"""
part number 44309
comes in set 31313
"""
def __init__(self):
Wheel.__init__(self, 43.2, 21)
class EV3EducationSetRim(Wheel):
"""
part number 56908
comes in set 45544
"""
def __init__(self):
Wheel.__init__(self, 43, 26)
class EV3EducationSetTire(Wheel):
"""
part number 41897
comes in set 45544
"""
def __init__(self):
Wheel.__init__(self, 56, 28)
|
research/setup.py | 873040/Abhishek | 153 | 11198707 | """Setup script for object_detection."""
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['Pillow>=1.0', 'Matplotlib>=2.1', 'Cython>=0.28.1']
setup(
name='object_detection',
version='0.1',
install_requires=REQUIRED_PACKAGES,
include_package_data=True,
packages=[p for p in find_packages() if p.startswith('object_detection')],
description='Tensorflow Object Detection Library',
)
|
src/ttkbootstrap/__init__.py | dmalves/ttkbootstrap | 406 | 11198729 | from ttkbootstrap.style import Style
from ttkbootstrap.style import Bootstyle
from ttkbootstrap.widgets import *
from ttkbootstrap.window import Window, Toplevel
from tkinter.scrolledtext import ScrolledText
from tkinter import Variable, StringVar, IntVar, BooleanVar, DoubleVar
from tkinter import Canvas, Menu, Text
from tkinter import PhotoImage
Bootstyle.setup_ttkbootstap_api()
|
falsy/loader/yaml.py | marco-souza/falsy | 127 | 11198743 | <reponame>marco-souza/falsy
import os
# import pprint
import yaml
class LoaderMeta(type):
def __new__(metacls, __name__, __bases__, __dict__):
"""Add include constructer to class."""
# register the include constructor on the class
cls = super().__new__(metacls, __name__, __bases__, __dict__)
cls.add_constructor('!include', cls.construct_include)
cls.add_constructor('include!', cls.construct_include)
return cls
class Loader(yaml.Loader, metaclass=LoaderMeta):
"""YAML Loader with `!include` constructor."""
def __init__(self, stream):
"""Initialise Loader."""
try:
self._root = os.path.split(stream.name)[0]
except AttributeError:
self._root = os.path.curdir
super().__init__(stream)
def construct_include(self, node):
"""Include file referenced at node."""
filename = os.path.abspath(os.path.join(
self._root, self.construct_scalar(node)
))
filename = os.path.join(self._root, self.construct_scalar(node))
extension = os.path.splitext(filename)[1].lstrip('.')
with open(filename, 'r') as f:
if extension in ('yaml', 'yml'):
return yaml.load(f, Loader)
else:
return ''.join(f.readlines())
def load(filename):
with open(filename, 'r') as f:
data = yaml.load(f, Loader)
# pp = pprint.PrettyPrinter(indent=4)
# pp.pprint(data)
return data
|
src/yolo_utils.py | greck2908/BMW-YOLOv3-Training-Automation | 192 | 11198766 | import os
import re
import json
import subprocess
from pathlib import Path
from tensorboardX import SummaryWriter
current_map: float = None
current_iteration: int = 0
tensorboard_writer: bool = False
working_dir: str = None
summaries_path: Path = None
status_path: Path = None
pid_path: Path = None
# Create summary writer for tensorboard
writer: SummaryWriter = SummaryWriter(flush_secs=1)
def get_prediction(
darknet_path: str,
dot_data_path: str,
yolo_cfg_path: str,
weights_path: str,
image: str,
) -> None:
command: list = [
darknet_path,
"detector",
"test",
dot_data_path,
yolo_cfg_path,
weights_path,
image,
"-dont_show",
]
with open(os.devnull, "w") as DEVNULL:
subprocess.call(command, stderr=DEVNULL, stdout=DEVNULL)
def update_arg(tensorboard: bool, working_directory: str) -> None:
global tensorboard_writer
tensorboard_writer = tensorboard
global working_dir
working_dir = working_directory
global summaries_path
summaries_path = working_dir / "summaries.txt"
global status_path
status_path = working_dir / "status.txt"
global pid_path
pid_path = working_dir / "pid.txt"
def update_summary_map(summary_map: float) -> int:
global current_map
global current_iteration
current_map = summary_map
return current_iteration
# Define the summary of the yolo output in the needed format needed by the API
def define_summary(
current_iter: str,
total_loss: str,
avg_loss_error: str,
current_lr: str,
total_time: str,
nb_images: str,
) -> dict:
global current_map
global current_iteration
if current_iter:
current_iteration = int(current_iter)
else:
current_iteration += 1
result: dict = {
"current_training_iteration": str(current_iteration),
"total_loss": total_loss,
"average_loss_error": avg_loss_error,
"current_learning_rate": current_lr,
"total_time": total_time,
"number_of_images": nb_images,
"mAP": current_map,
}
return result
# Define the status of the yolo output in the needed format needed by the API
def define_status(
iou: str,
_cls: str,
region: str,
avg_iou: str,
avg_giou: str,
_class: str,
obj: str,
no_obj: str,
R5: str,
R75: str,
count: str,
) -> dict:
result: dict = {
"normalizer": {"iou": iou, "cls": _cls},
"region {}".format(region): {"avg": {"iou": avg_iou, "giou": avg_giou}},
"class": _class,
"obj": obj,
"no_obj": no_obj,
".5R": R5,
".75R": R75,
"count": count,
}
return result
def process_output(line):
summary: list = re.findall(
"(\d+)(: )(\d+\.\d+)(, )(\d+\.\d+)( avg loss, )(\d+\.\d+)( rate, )(\d+\.\d+)( seconds, )(\d+)( images *\\n*)",
line,
)
summary_map: list = re.findall(
"(mean_average_precision \([email protected]\) *= *)([0-9]*\.[0-9]*)", line
)
status: list = re.findall(
"(.*iou: *)(\d+\.\d+)(, cls: )(\d+\.\d+)(\) Region *)(\d+)( Avg \(IOU: )(\d+\.\d+)(, *GIOU: )(\d+\.\d+)(\), Class: *)(\d+\.\d+)(, Obj: )(\d+\.\d+)(, No Obj: )(\d+\.\d+)(, .5R: )(\d+\.\d+)(, .75R: )(\d+\.\d+)(, count: )(\d+)",
line,
)
training_ended: list = re.findall("Saving weights to .*_final.weights", line)
global tensorboard_writer
global working_dir
global writer
if summary:
last_summary: list = summary[0]
result: dict = define_summary(
current_iter=last_summary[0],
total_loss=last_summary[2],
avg_loss_error=last_summary[4],
current_lr=last_summary[6],
total_time=last_summary[8],
nb_images=last_summary[10],
)
if tensorboard_writer:
writer.add_scalar(
"loss",
float(result["total_loss"]),
int(result["current_training_iteration"]),
)
with open(summaries_path, "a") as summaries_file:
summaries_file.write("{}\n".format(json.dumps(result)))
if status:
last_status: list = status[0]
result: dict = define_status(
iou=last_status[1],
_cls=last_status[3],
region=last_status[5],
avg_iou=last_status[7],
avg_giou=last_status[9],
_class=last_status[11],
obj=last_status[13],
no_obj=last_status[15],
R5=last_status[17],
R75=last_status[19],
count=last_status[21],
)
with open(status_path, "a") as status_file:
status_file.write("{}\n".format(json.dumps(result)))
if summary_map:
summary_map = summary_map[0][1]
iteration_nb: int = update_summary_map(summary_map)
if tensorboard_writer:
writer.add_scalar("mAP", float(summary_map), int(iteration_nb))
if training_ended:
with open(pid_path, "w") as pid:
pid.write("Done")
|
test_project/core/migrations/0003_book_similar_books.py | violuke/djangoql | 807 | 11198783 | <filename>test_project/core/migrations/0003_book_similar_books.py
# Generated by Django 2.1.2 on 2018-11-11 19:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_book_genre'),
]
operations = [
migrations.AddField(
model_name='book',
name='similar_books',
field=models.ManyToManyField(to='core.Book', blank=True),
),
]
|
models/SelectionGAN/semantic_synthesis/models/networks/generator.py | xianjian-xie/pose-generation | 445 | 11198798 | <reponame>xianjian-xie/pose-generation
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.networks.base_network import BaseNetwork
from models.networks.normalization import get_nonspade_norm_layer
from models.networks.architecture import ResnetBlock as ResnetBlock
from models.networks.architecture import SPADEResnetBlock as SPADEResnetBlock
class SPADEGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.set_defaults(norm_G='spectralspadesyncbatch3x3')
parser.add_argument('--num_upsampling_layers',
choices=('normal', 'more', 'most'), default='normal',
help="If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator")
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
nf = opt.ngf
self.sw, self.sh = self.compute_latent_vector_size(opt)
if opt.use_vae:
# In case of VAE, we will sample from random z vector
self.fc = nn.Linear(opt.z_dim, 16 * nf * self.sw * self.sh)
else:
# Otherwise, we make the network deterministic by starting with
# downsampled segmentation map instead of random z
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == 'most':
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3*10, 3, padding=1)
self.conv_att = nn.Conv2d(final_nc, 10, kernel_size=1, stride=1, padding=0)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == 'normal':
num_up_layers = 5
elif opt.num_upsampling_layers == 'more':
num_up_layers = 6
elif opt.num_upsampling_layers == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
opt.num_upsampling_layers)
sw = opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
x = self.head_0(x, seg)
x = self.up(x)
x = self.G_middle_0(x, seg)
if self.opt.num_upsampling_layers == 'more' or \
self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.G_middle_1(x, seg)
x = self.up(x)
x = self.up_0(x, seg)
x = self.up(x)
x = self.up_1(x, seg)
x = self.up(x)
x = self.up_2(x, seg)
x = self.up(x)
x = self.up_3(x, seg)
if self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.up_4(x, seg)
# x = self.conv_img(F.leaky_relu(x, 2e-1))
# x = F.tanh(x)
image = self.conv_img(F.leaky_relu(x, 2e-1))
image = F.tanh(image)
image1 = image[:, 0:3, :, :]
image2 = image[:, 3:6, :, :]
image3 = image[:, 6:9, :, :]
image4 = image[:, 9:12, :, :]
image5 = image[:, 12:15, :, :]
image6 = image[:, 15:18, :, :]
image7 = image[:, 18:21, :, :]
image8 = image[:, 21:24, :, :]
image9 = image[:, 24:27, :, :]
image10 = image[:, 27:30, :, :]
attention = self.conv_att(F.leaky_relu(x, 2e-1))
softmax_ = torch.nn.Softmax(dim=1)
attention = softmax_(attention)
attention1_ = attention[:, 0:1, :, :]
attention2_ = attention[:, 1:2, :, :]
attention3_ = attention[:, 2:3, :, :]
attention4_ = attention[:, 3:4, :, :]
attention5_ = attention[:, 4:5, :, :]
attention6_ = attention[:, 5:6, :, :]
attention7_ = attention[:, 6:7, :, :]
attention8_ = attention[:, 7:8, :, :]
attention9_ = attention[:, 8:9, :, :]
attention10_ = attention[:, 9:10, :, :]
attention1 = attention1_.repeat(1, 3, 1, 1)
attention2 = attention2_.repeat(1, 3, 1, 1)
attention3 = attention3_.repeat(1, 3, 1, 1)
attention4 = attention4_.repeat(1, 3, 1, 1)
attention5 = attention5_.repeat(1, 3, 1, 1)
attention6 = attention6_.repeat(1, 3, 1, 1)
attention7 = attention7_.repeat(1, 3, 1, 1)
attention8 = attention8_.repeat(1, 3, 1, 1)
attention9 = attention9_.repeat(1, 3, 1, 1)
attention10 = attention10_.repeat(1, 3, 1, 1)
output1 = image1 * attention1
output2 = image2 * attention2
output3 = image3 * attention3
output4 = image4 * attention4
output5 = image5 * attention5
output6 = image6 * attention6
output7 = image7 * attention7
output8 = image8 * attention8
output9 = image9 * attention9
output10 = image10 * attention10
final = output1 + output2 + output3 + output4 + output5 + output6 + output7 + output8 + output9 + output10
return final
class Pix2PixHDGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument('--resnet_n_downsample', type=int, default=4, help='number of downsampling layers in netG')
parser.add_argument('--resnet_n_blocks', type=int, default=9, help='number of residual blocks in the global generator network')
parser.add_argument('--resnet_kernel_size', type=int, default=3,
help='kernel size of the resnet block')
parser.add_argument('--resnet_initial_kernel_size', type=int, default=7,
help='kernel size of the first convolution')
parser.set_defaults(norm_G='instance')
return parser
def __init__(self, opt):
super().__init__()
input_nc = opt.label_nc + (1 if opt.contain_dontcare_label else 0) + (0 if opt.no_instance else 1)
norm_layer = get_nonspade_norm_layer(opt, opt.norm_G)
activation = nn.ReLU(False)
model = []
# initial conv
model += [nn.ReflectionPad2d(opt.resnet_initial_kernel_size // 2),
norm_layer(nn.Conv2d(input_nc, opt.ngf,
kernel_size=opt.resnet_initial_kernel_size,
padding=0)),
activation]
# downsample
mult = 1
for i in range(opt.resnet_n_downsample):
model += [norm_layer(nn.Conv2d(opt.ngf * mult, opt.ngf * mult * 2,
kernel_size=3, stride=2, padding=1)),
activation]
mult *= 2
# resnet blocks
for i in range(opt.resnet_n_blocks):
model += [ResnetBlock(opt.ngf * mult,
norm_layer=norm_layer,
activation=activation,
kernel_size=opt.resnet_kernel_size)]
# upsample
for i in range(opt.resnet_n_downsample):
nc_in = int(opt.ngf * mult)
nc_out = int((opt.ngf * mult) / 2)
model += [norm_layer(nn.ConvTranspose2d(nc_in, nc_out,
kernel_size=3, stride=2,
padding=1, output_padding=1)),
activation]
mult = mult // 2
# final output conv
model += [nn.ReflectionPad2d(3),
nn.Conv2d(nc_out, opt.output_nc, kernel_size=7, padding=0),
nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, z=None):
return self.model(input)
|
dashlivesim/dashlib/sessionid.py | Dash-Industry-Forum/dash-live-source-simulator | 133 | 11198854 | <reponame>Dash-Industry-Forum/dash-live-source-simulator<gh_stars>100-1000
"""Session IDs to allow for tracing sessions."""
import random
MAX_NUMBER = 2**32 - 1
def generate_session_id():
"Generate a session ID as hex string."
return "%08x" % random.randint(0, MAX_NUMBER)
|
crossplane/errors.py | Triple-Z/crossplane | 525 | 11198865 | # -*- coding: utf-8 -*-
class NgxParserBaseException(Exception):
def __init__(self, strerror, filename, lineno):
self.args = (strerror, filename, lineno)
self.filename = filename
self.lineno = lineno
self.strerror = strerror
def __str__(self):
if self.lineno is not None:
return '%s in %s:%s' % self.args
else:
return '%s in %s' % self.args
class NgxParserSyntaxError(NgxParserBaseException):
pass
class NgxParserDirectiveError(NgxParserBaseException):
pass
class NgxParserDirectiveArgumentsError(NgxParserDirectiveError):
pass
class NgxParserDirectiveContextError(NgxParserDirectiveError):
pass
class NgxParserDirectiveUnknownError(NgxParserDirectiveError):
pass
|
tests/test_imap_utf7.py | thepeshka/imap_tools | 344 | 11198903 | import unittest
from imap_tools import imap_utf7
class ImapUtf7Test(unittest.TestCase):
data = (
('Test', b'Test'),
('Test One more', b'Test One more'),
('Might & Magic', b'Might &- Magic'),
('Might & magic', b'Might &- magic'),
('Imap&\xffworld', b'Imap&-&AP8-world'),
('\xff\xfe\xfd\xfc', b'&AP8A,gD9APw-'),
('\x00', b'&AAA-'),
('hello, <NAME> 你好,成龙', b'hello, <NAME> &T2BZff8MYhCfmQ-'), # RFC-2060
('str \t\n\r\f\vwhitespace \t\n\r\f\v', b'str &AAkACgANAAwACw-whitespace &AAkACgANAAwACw-')
)
def test_encode(self):
for string, code in self.data:
self.assertEqual(imap_utf7.encode(string), code)
def test_decode(self):
for string, code in self.data:
self.assertEqual(string, imap_utf7.decode(code))
def test_printable_chars(self):
for code in range(32, 127):
if code == 38: # &
continue
self.assertEqual(bytes((code,)), imap_utf7.encode(chr(code)))
self.assertEqual(chr(code), imap_utf7.decode(bytes((code,))))
self.assertEqual(imap_utf7.encode('&'), b'&-')
self.assertEqual(imap_utf7.decode(b'&-'), '&')
|
example/with_keras.py | IcyW/stagesepx | 369 | 11198904 | <filename>example/with_keras.py
"""
classify with keras model
"""
from keras.models import Sequential
from stagesepx.cutter import VideoCutter
from stagesepx.classifier.keras import KerasClassifier
from stagesepx.reporter import Reporter
from stagesepx.video import VideoObject
video_path = "../demo.mp4"
video = VideoObject(video_path)
video.load_frames()
# --- cutter ---
cutter = VideoCutter()
res = cutter.cut(video)
stable, unstable = res.get_range()
data_home = res.pick_and_save(stable, 10)
# --- classify ---
# We recommend that you read the code (KerasClassifier) directly for better understanding
# and actually you can build your own Classifier which based on it
class NewKerasClassifier(KerasClassifier):
def create_model(self) -> Sequential:
# overwrite this method to design your own model structure!
# model = Sequential()
# ...
pass
def train(self, data_path: str, *_, **__):
# ...
pass
# or use the default one
# and then init it
# epochs=1 is just a example
cl = KerasClassifier(epochs=1)
# train model and save weights
cl.train(data_home)
cl.save_model("keras_model.h5")
# you would better reuse the trained model for less time cost
# keras model takes much more time than SVM
# cl.load_model("keras_model.h5")
classify_result = cl.classify(video, stable, keep_data=True)
result_dict = classify_result.to_dict()
# --- draw ---
r = Reporter()
r.draw(classify_result)
|
wouso/core/ui.py | ruxandraS/wouso | 117 | 11198906 | from django.db import models
from wouso.core.game.models import Game
class BlockLibrary(object):
def __init__(self):
self.parts = {}
def get_blocks(self):
return self.parts.keys()
def get_block(self, key, context):
block = self.parts.get(key, '')
if callable(block):
return block(context)
return block
def add(self, key, callback):
self.parts[key] = callback
_libraries = {}
def get_library(library):
global _libraries
if not _libraries.get(library, None):
_libraries[library] = BlockLibrary()
return _libraries[library]
def get_sidebar():
return get_library('sidebar')
def get_header():
return get_library('header')
def get_footer():
return get_library('footer')
def register_block(library, name, callback):
lib = get_library(library)
lib.add(name, callback)
def register_sidebar_block(name, callback):
return register_block('sidebar', name, callback)
def register_header_link(name, callback):
return register_block('header', name, callback)
def register_footer_link(name, callback):
return register_block('footer', name, callback)
|
server/gunicorn_config.py | huhansan666666/flask_reddit | 461 | 11198915 | # Refer to the following link for help:
# http://docs.gunicorn.org/en/latest/settings.html
command = '/home/lucas/www/reddit.lucasou.com/reddit-env/bin/gunicorn'
pythonpath = '/home/lucas/www/reddit.lucasou.com/reddit-env/flask_reddit'
bind = '127.0.0.1:8040'
workers = 1
user = 'lucas'
accesslog = '/home/lucas/logs/reddit.lucasou.com/gunicorn-access.log'
errorlog = '/home/lucas/logs/reddit.lucasou.com/gunicorn-error.log'
|
axcell/errors.py | Kabongosalomon/axcell | 335 | 11198959 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
class PipelineError(Exception):
pass
class UnpackError(PipelineError):
pass
class LatexConversionError(PipelineError):
pass
|
LeetCode_problems/Running Sum of 1d Array/solution.py | gbrls/CompetitiveCode | 165 | 11198964 | <gh_stars>100-1000
class Solution:
def runningSum(self, nums: List[int]) -> List[int]:
# declare array to return
output = list()
# compute running sum for each index
for i in range(len(nums)):
output.append(sum(nums[:i+1]))
return output
|
exercises/fr/exc_01_09.py | Jette16/spacy-course | 2,085 | 11199003 | import spacy
nlp = spacy.load("fr_core_news_sm")
text = "Le constructeur Citröen présente la e-Méhari Courrèges au public."
# Traite le texte
doc = ____
# Itère sur les entités
for ____ in ____.____:
# Affiche le texte de l'entité et son label
print(____.____, ____.____)
# Obtiens la portion pour "e-Méhari Courrèges"
e_mehari_courreges = ____
# Affiche la portion de texte
print("Entité manquante :", e_mehari_courreges.text)
|
numpyro/nn/masked_dense.py | karm-patel/numpyro | 1,394 | 11199012 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from jax import random
from jax.nn.initializers import glorot_normal, normal
import jax.numpy as jnp
def MaskedDense(mask, bias=True, W_init=glorot_normal(), b_init=normal()):
"""
As in jax.example_libraries.stax, each layer constructor function returns
an (init_fun, apply_fun) pair, where `init_fun` takes an rng_key key and
an input shape and returns an (output_shape, params) pair, and
`apply_fun` takes params, inputs, and an rng_key key and applies the layer.
:param array mask: Mask of shape (input_dim, out_dim) applied to the weights of the layer.
:param bool bias: whether to include bias term.
:param array W_init: initialization method for the weights.
:param array b_init: initialization method for the bias terms.
:return: a (`init_fn`, `update_fn`) pair.
"""
def init_fun(rng_key, input_shape):
k1, k2 = random.split(rng_key)
W = W_init(k1, mask.shape)
if bias:
b = b_init(k2, mask.shape[-1:])
params = (W, b)
else:
params = W
return input_shape[:-1] + mask.shape[-1:], params
def apply_fun(params, inputs, **kwargs):
if bias:
W, b = params
return jnp.dot(inputs, W * mask) + b
else:
W = params
return jnp.dot(inputs, W * mask)
return init_fun, apply_fun
|
alipay/aop/api/domain/AlipayCommerceDataSendModel.py | snowxmas/alipay-sdk-python-all | 213 | 11199015 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceDataSendModel(object):
def __init__(self):
self._channel = None
self._op_code = None
self._op_data = None
self._scene_code = None
self._scene_data = None
self._target_id = None
self._target_id_type = None
self._version = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def op_code(self):
return self._op_code
@op_code.setter
def op_code(self, value):
self._op_code = value
@property
def op_data(self):
return self._op_data
@op_data.setter
def op_data(self, value):
self._op_data = value
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
@property
def scene_data(self):
return self._scene_data
@scene_data.setter
def scene_data(self, value):
self._scene_data = value
@property
def target_id(self):
return self._target_id
@target_id.setter
def target_id(self, value):
self._target_id = value
@property
def target_id_type(self):
return self._target_id_type
@target_id_type.setter
def target_id_type(self, value):
self._target_id_type = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
def to_alipay_dict(self):
params = dict()
if self.channel:
if hasattr(self.channel, 'to_alipay_dict'):
params['channel'] = self.channel.to_alipay_dict()
else:
params['channel'] = self.channel
if self.op_code:
if hasattr(self.op_code, 'to_alipay_dict'):
params['op_code'] = self.op_code.to_alipay_dict()
else:
params['op_code'] = self.op_code
if self.op_data:
if hasattr(self.op_data, 'to_alipay_dict'):
params['op_data'] = self.op_data.to_alipay_dict()
else:
params['op_data'] = self.op_data
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
if self.scene_data:
if hasattr(self.scene_data, 'to_alipay_dict'):
params['scene_data'] = self.scene_data.to_alipay_dict()
else:
params['scene_data'] = self.scene_data
if self.target_id:
if hasattr(self.target_id, 'to_alipay_dict'):
params['target_id'] = self.target_id.to_alipay_dict()
else:
params['target_id'] = self.target_id
if self.target_id_type:
if hasattr(self.target_id_type, 'to_alipay_dict'):
params['target_id_type'] = self.target_id_type.to_alipay_dict()
else:
params['target_id_type'] = self.target_id_type
if self.version:
if hasattr(self.version, 'to_alipay_dict'):
params['version'] = self.version.to_alipay_dict()
else:
params['version'] = self.version
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceDataSendModel()
if 'channel' in d:
o.channel = d['channel']
if 'op_code' in d:
o.op_code = d['op_code']
if 'op_data' in d:
o.op_data = d['op_data']
if 'scene_code' in d:
o.scene_code = d['scene_code']
if 'scene_data' in d:
o.scene_data = d['scene_data']
if 'target_id' in d:
o.target_id = d['target_id']
if 'target_id_type' in d:
o.target_id_type = d['target_id_type']
if 'version' in d:
o.version = d['version']
return o
|
openbook_auth/views/users/serializers.py | TamaraAbells/okuna-api | 164 | 11199023 | <reponame>TamaraAbells/okuna-api<gh_stars>100-1000
from rest_framework import serializers
from django.conf import settings
from openbook_auth.models import UserProfile, User
from openbook_auth.validators import username_characters_validator, user_username_exists
from openbook_circles.models import Circle
from openbook_common.models import Badge, Emoji
from openbook_common.serializers_fields.user import FollowersCountField, FollowingCountField, UserPostsCountField, \
IsFollowingField, IsConnectedField, IsFullyConnectedField, ConnectedCirclesField, FollowListsField, \
IsPendingConnectionConfirmation, IsBlockedField, IsUserReportedField, IsFollowedField, \
AreNewPostNotificationsEnabledForUserField, IsPendingFollowRequestApproval, IsFollowRequested
from openbook_lists.models import List
class GetUserUserProfileBadgeSerializer(serializers.ModelSerializer):
class Meta:
model = Badge
fields = (
'keyword',
'keyword_description'
)
class GetUserUserListEmojiSerializer(serializers.ModelSerializer):
class Meta:
model = Emoji
fields = (
'id',
'image',
'keyword'
)
class GetUserUserListSerializer(serializers.ModelSerializer):
emoji = GetUserUserListEmojiSerializer(many=False)
class Meta:
model = List
fields = (
'id',
'name',
'emoji'
)
class GetUserSerializer(serializers.Serializer):
username = serializers.CharField(max_length=settings.USERNAME_MAX_LENGTH,
allow_blank=False,
validators=[username_characters_validator, user_username_exists],
required=True)
class GetUserUserProfileSerializer(serializers.ModelSerializer):
badges = GetUserUserProfileBadgeSerializer(many=True)
class Meta:
model = UserProfile
fields = (
'name',
'avatar',
'location',
'cover',
'bio',
'url',
'badges'
)
class GetUserUserCircleSerializer(serializers.ModelSerializer):
class Meta:
model = Circle
fields = (
'id',
'name',
'color',
'users_count'
)
class GetUserUserListEmojiSerializer(serializers.ModelSerializer):
class Meta:
model = Emoji
fields = (
'id',
'image',
'keyword'
)
class GetUserUserListSerializer(serializers.ModelSerializer):
emoji = GetUserUserListEmojiSerializer(many=False)
class Meta:
model = List
fields = (
'id',
'name',
'emoji'
)
class GetUserUserSerializer(serializers.ModelSerializer):
profile = GetUserUserProfileSerializer(many=False)
followers_count = FollowersCountField()
following_count = FollowingCountField()
is_following = IsFollowingField()
is_followed = IsFollowedField()
are_new_post_notifications_enabled = AreNewPostNotificationsEnabledForUserField()
is_connected = IsConnectedField()
is_fully_connected = IsFullyConnectedField()
connected_circles = ConnectedCirclesField(circle_serializer=GetUserUserCircleSerializer)
follow_lists = FollowListsField(list_serializer=GetUserUserListSerializer)
is_pending_connection_confirmation = IsPendingConnectionConfirmation()
is_pending_follow_request_approval = IsPendingFollowRequestApproval()
is_follow_requested = IsFollowRequested()
is_reported = IsUserReportedField()
class Meta:
model = User
fields = (
'id',
'username',
'profile',
'followers_count',
'following_count',
'is_following',
'is_followed',
'is_follow_requested',
'are_new_post_notifications_enabled',
'is_connected',
'is_reported',
'is_fully_connected',
'connected_circles',
'follow_lists',
'date_joined',
'is_pending_connection_confirmation',
'is_pending_follow_request_approval',
'visibility'
)
class LegacyGetUserUserSerializer(serializers.ModelSerializer):
profile = GetUserUserProfileSerializer(many=False)
followers_count = FollowersCountField()
following_count = FollowingCountField()
posts_count = UserPostsCountField()
is_following = IsFollowingField()
is_followed = IsFollowedField()
is_follow_requested = IsFollowRequested()
are_new_post_notifications_enabled = AreNewPostNotificationsEnabledForUserField()
is_connected = IsConnectedField()
is_fully_connected = IsFullyConnectedField()
connected_circles = ConnectedCirclesField(circle_serializer=GetUserUserCircleSerializer)
follow_lists = FollowListsField(list_serializer=GetUserUserListSerializer)
is_pending_connection_confirmation = IsPendingConnectionConfirmation()
is_pending_follow_request_approval = IsPendingFollowRequestApproval()
is_reported = IsUserReportedField()
class Meta:
model = User
fields = (
'id',
'username',
'profile',
'followers_count',
'following_count',
'posts_count',
'is_following',
'is_followed',
'is_follow_requested',
'are_new_post_notifications_enabled',
'is_connected',
'is_reported',
'is_fully_connected',
'connected_circles',
'follow_lists',
'date_joined',
'is_pending_connection_confirmation',
'is_pending_follow_request_approval',
'visibility'
)
class GetBlockedUserSerializer(serializers.ModelSerializer):
is_blocked = IsBlockedField()
is_following = IsFollowingField()
are_new_post_notifications_enabled = AreNewPostNotificationsEnabledForUserField()
is_connected = IsConnectedField()
is_fully_connected = IsFullyConnectedField()
class Meta:
model = User
fields = (
'id',
'is_blocked',
'is_following',
'are_new_post_notifications_enabled',
'is_connected',
'is_fully_connected'
)
class SearchUsersSerializer(serializers.Serializer):
query = serializers.CharField(max_length=settings.SEARCH_QUERIES_MAX_LENGTH, required=True)
count = serializers.IntegerField(
required=False,
max_value=10
)
class SearchUsersUserProfileBadgeSerializer(serializers.ModelSerializer):
class Meta:
model = Badge
fields = (
'keyword',
'keyword_description'
)
class SearchUsersUserProfileSerializer(serializers.ModelSerializer):
badges = SearchUsersUserProfileBadgeSerializer(many=True)
class Meta:
model = UserProfile
fields = (
'id',
'avatar',
'name',
'badges'
)
class SearchUsersUserSerializer(serializers.ModelSerializer):
profile = SearchUsersUserProfileSerializer(many=False)
is_following = IsFollowingField()
are_new_post_notifications_enabled = AreNewPostNotificationsEnabledForUserField()
is_connected = IsConnectedField()
class Meta:
model = User
fields = (
'id',
'profile',
'username',
'is_following',
'are_new_post_notifications_enabled',
'is_connected',
'visibility',
)
class SubscribeToUserNewPostNotificationsUserSerializer(serializers.ModelSerializer):
are_new_post_notifications_enabled = AreNewPostNotificationsEnabledForUserField()
class Meta:
model = User
fields = (
'id',
'are_new_post_notifications_enabled',
)
class GetUserPostsCountUserSerializer(serializers.ModelSerializer):
posts_count = UserPostsCountField()
class Meta:
model = User
fields = (
'id',
'posts_count',
)
|
data_structures/Queue/python/queue_using_stack.py | avi-pal/al-go-rithms | 1,253 | 11199024 | # Using the given stack implementation
# Done in the directory data_structures/Stack/Python/Stack.py
class Stack(object):
def __init__(self, limit = 10):
# Initialize stack as empty array
self.stack = []
self.limit = limit
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if len(self.stack) > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
if len(self.stack)<self.limit:
self.stack.append(element)
# Return the last element of the stack array (without removing it).
def peek(self):
return self.stack[-1]
# Return whether the stack is empty or not.
def __bool__(self):
return len(self.stack) != 0
# Return the size of the stack
def size(self):
return len(self.stack)
# Return a string representation for the stack
def __str__(self):
return str(self.stack)
# Return whether the stack is empty or not.
def is_empty(self):
return len(self.stack) == 0
class Queue:
"""Represents a Queue (FIFO) data structure"""
def __init__(self, limit = 10):
self.primary_stack = Stack(limit)
self.secondary_stack = Stack(limit)
self.limit = limit
def __bool__(self):
return bool(self.primary_stack)
def __str__(self):
return str(self.primary_stack)
def enqueue(self, data):
"""Add an element to the end of queue."""
while not self.primary_stack.is_empty():
self.secondary_stack.push(self.primary_stack.pop())
self.primary_stack.push(data)
while not self.secondary_stack.is_empty():
self.primary_stack.push(self.secondary_stack.pop())
def dequeue(self):
"""Remove and element from the front of queue."""
return self.primary_stack.pop()
def front(self):
"""Return the value of the element in front of the queue."""
return self.primary_stack.peek()
def is_empty(self):
"""Check if the queue is empty."""
return not bool(self.primary_stack)
def is_full(self):
return self.size() == self.limit
def size(self):
"""Return the size of the queue."""
return self.primary_stack.size()
def __contains__(self, data: int):
"""Check if item is in the queue."""
return data in self.primary_stack
def test_queue() -> None:
queue = Queue(10)
assert queue.is_empty() is True
assert queue.is_full() is False
assert bool(queue) is False
assert str(queue) == "[]"
try:
queue.dequeue()
assert False # This should not happen
except:
assert True # This should happen
try:
queue.front()
assert False # This should not happen
except IndexError:
assert True # This should happen
for i in range(10):
assert queue.size() == i
queue.enqueue(i)
assert queue.is_full() is True
assert queue.is_empty() is False
assert queue.dequeue() == 0
assert queue.front() == 1
queue.enqueue(100)
assert str(queue) == str([100, 9, 8, 7, 6, 5, 4, 3, 2, 1])
try:
queue.enqueue(0)
assert False # This should not happen
except:
assert True # This should happen
assert queue.is_empty() is False
assert queue.size() == 10
if __name__ == "__main__":
test_queue() |
bcs-ui/backend/components/bcs_api.py | laodiu/bk-bcs | 599 | 11199033 | <filename>bcs-ui/backend/components/bcs_api.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from typing import Dict
from django.conf import settings
from requests import PreparedRequest
from requests.auth import AuthBase
from .base import BaseHttpClient, BkApiClient, ComponentAuth, update_url_parameters
class BcsApiConfig:
"""BcsApi 系统配置对象,为 Client 提供地址等信息"""
def __init__(self, host: str):
self.host = host
# BCS API 系统接口地址
self.query_cluster_id_url = f"{host}/{{env_name}}/rest/clusters/bcs/query_by_id/"
self.get_cluster_credentials_url = f"{host}/{{env_name}}/rest/clusters/{{bcs_cluster_id}}/client_credentials"
class BcsApiAuth(AuthBase):
"""用于调用 bcs-api 系统的鉴权对象"""
def __init__(self, access_token: str):
self.access_token = access_token
def __call__(self, r: PreparedRequest):
# 从配置文件读取访问系统的通用 Token,置入请求头中
auth_token = getattr(settings, "BCS_AUTH_TOKEN", "")
r.headers['Authorization'] = auth_token
r.headers['Content-Type'] = 'application/json'
# 在 GET 请求参数中追加 access_token
r.url = update_url_parameters(r.url, {'access_token': self.access_token})
return r
class BcsApiClient(BkApiClient):
"""访问 BCS API 服务的 Client 对象
:param auth: 包含校验信息的对象
API 方法常用请求参数说明
===
:param env_name: 集群环境,比如 stag/prod
:param project_id: 项目 ID
:param cluster_id: 集群 ID
:param bcs_cluster_id: 集群在 BCS 系统中的唯一 ID
"""
def __init__(self, auth: ComponentAuth):
self._config = BcsApiConfig(host=settings.BCS_API_PRE_URL)
self._client = BaseHttpClient(BcsApiAuth(auth.access_token))
def query_cluster_id(self, env_name: str, project_id: str, cluster_id: str) -> str:
"""查询集群在 BCS-Api 中的 ID
:returns: 集群 ID 字符串
"""
url = self._config.query_cluster_id_url.format(env_name=env_name)
resp = self._client.request_json(
'GET', url, params={'project_id': project_id, 'cluster_id': cluster_id}, raise_for_status=False
)
return resp['id']
def get_cluster_credentials(self, env_name: str, bcs_cluster_id: str) -> Dict:
"""
获取访问集群 apiserver 所需的鉴权信息,比如证书、user_token、server_address_path 等
:returns: 包含集群鉴权信息的字典
"""
url = self._config.get_cluster_credentials_url.format(env_name=env_name, bcs_cluster_id=bcs_cluster_id)
return self._client.request_json('GET', url, raise_for_status=False)
|
tests/python/twitter/common/dirutil/test_dirutil.py | zhouyijiaren/commons | 1,143 | 11199045 | <reponame>zhouyijiaren/commons<gh_stars>1000+
import atexit
import os
import tempfile
from twitter.common import dirutil
import mox
import pytest
def test_mkdtemp_setup_teardown():
m = mox.Mox()
def faux_cleaner():
pass
DIR1, DIR2 = 'fake_dir1__does_not_exist', 'fake_dir2__does_not_exist'
m.StubOutWithMock(atexit, 'register')
m.StubOutWithMock(os, 'getpid')
m.StubOutWithMock(tempfile, 'mkdtemp')
m.StubOutWithMock(dirutil, 'safe_rmtree')
atexit.register(faux_cleaner) # ensure only called once
tempfile.mkdtemp(dir='1').AndReturn(DIR1)
tempfile.mkdtemp(dir='2').AndReturn(DIR2)
os.getpid().MultipleTimes().AndReturn('unicorn')
dirutil.safe_rmtree(DIR1)
dirutil.safe_rmtree(DIR2)
# make sure other "pids" are not cleaned
dirutil._MKDTEMP_DIRS['fluffypants'].add('yoyo')
try:
m.ReplayAll()
assert dirutil.safe_mkdtemp(dir='1', cleaner=faux_cleaner) == DIR1
assert dirutil.safe_mkdtemp(dir='2', cleaner=faux_cleaner) == DIR2
assert 'unicorn' in dirutil._MKDTEMP_DIRS
assert dirutil._MKDTEMP_DIRS['unicorn'] == set([DIR1, DIR2])
dirutil._mkdtemp_atexit_cleaner()
assert 'unicorn' not in dirutil._MKDTEMP_DIRS
assert dirutil._MKDTEMP_DIRS['fluffypants'] == set(['yoyo'])
finally:
dirutil._MKDTEMP_DIRS.pop('unicorn', None)
dirutil._MKDTEMP_DIRS.pop('fluffypants', None)
dirutil._mkdtemp_unregister_cleaner()
m.UnsetStubs()
m.VerifyAll()
|
Training/MOOC Tensorflow 2.0/BeiDa/class2/p19_mse.py | church06/Pythons | 177 | 11199046 | import tensorflow as tf
import numpy as np
SEED = 23455
rdm = np.random.RandomState(seed=SEED) # 生成[0,1)之间的随机数
x = rdm.rand(32, 2)
y_ = [[x1 + x2 + (rdm.rand() / 10.0 - 0.05)] for (x1, x2) in x] # 生成噪声[0,1)/10=[0,0.1); [0,0.1)-0.05=[-0.05,0.05)
x = tf.cast(x, dtype=tf.float32)
w1 = tf.Variable(tf.random.normal([2, 1], stddev=1, seed=1))
epoch = 15000
lr = 0.002
for epoch in range(epoch):
with tf.GradientTape() as tape:
y = tf.matmul(x, w1)
loss_mse = tf.reduce_mean(tf.square(y_ - y))
grads = tape.gradient(loss_mse, w1)
w1.assign_sub(lr * grads)
if epoch % 500 == 0:
print("After %d training steps,w1 is " % (epoch))
print(w1.numpy(), "\n")
print("Final w1 is: ", w1.numpy())
|
tests/check_byte_order_marker_test.py | christhekeele/pre-commit-hooks | 2,864 | 11199055 | <gh_stars>1000+
from pre_commit_hooks import check_byte_order_marker
def test_failure(tmpdir):
f = tmpdir.join('f.txt')
f.write_text('ohai', encoding='utf-8-sig')
assert check_byte_order_marker.main((str(f),)) == 1
def test_success(tmpdir):
f = tmpdir.join('f.txt')
f.write_text('ohai', encoding='utf-8')
assert check_byte_order_marker.main((str(f),)) == 0
|
utils/CopyJniFile.py | RookieTerry/520apkhook | 390 | 11199072 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import shutil
def forceMergeFlatDir(srcDir, dstDir):
if not os.path.exists(dstDir):
os.makedirs(dstDir)
for item in os.listdir(srcDir):
srcFile = os.path.join(srcDir, item)
dstFile = os.path.join(dstDir, item)
forceCopyFile(srcFile, dstFile)
def forceCopyFile (sfile, dfile):
if os.path.isfile(sfile):
shutil.copy2(sfile, dfile)
def isAFlatDir(sDir):
for item in os.listdir(sDir):
sItem = os.path.join(sDir, item)
if os.path.isdir(sItem):
return False
return True
def CopyTree(src, dst):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isfile(s):
if not os.path.exists(dst):
os.makedirs(dst)
forceCopyFile(s,d)
if os.path.isdir(s):
isRecursive = not isAFlatDir(s)
if isRecursive:
CopyTree(s, d)
else:
forceMergeFlatDir(s, d)
if __name__ == '__main__':
CopyTree('../libs/ShellDexDir/jni_lib', '../WorkDir/apkfile/apk/lib') |
wolframclient/evaluation/cloud/__init__.py | jldohmann/WolframClientForPython | 358 | 11199099 | <reponame>jldohmann/WolframClientForPython
from __future__ import absolute_import, print_function, unicode_literals
from wolframclient.evaluation.cloud.asynccloudsession import (
WolframAPICallAsync,
WolframCloudAsyncSession,
)
from wolframclient.evaluation.cloud.base import SecuredAuthenticationKey, UserIDPassword
from wolframclient.evaluation.cloud.cloudsession import WolframAPICall, WolframCloudSession
from wolframclient.evaluation.cloud.server import WolframServer
__all__ = [
"WolframServer",
"WolframCloudSession",
"WolframAPICall",
"SecuredAuthenticationKey",
"UserIDPassword",
"WolframAPICallAsync",
"WolframCloudAsyncSession",
]
|
physt/io/root.py | janpipek/physt | 123 | 11199100 | """ROOT format I/O
See also
--------
- https://github.com/scikit-hep/uproot
- https://root.cern.ch
"""
import os
from typing import Optional
import uproot3
from physt.histogram_base import HistogramBase
def write_root(histogram: HistogramBase, hfile: uproot3.write.TFile.TFileUpdate, name: str):
"""Write histogram to an open ROOT file.
Parameters
----------
histogram : Any histogram
hfile : Updateable uproot file object
name : The name of the histogram inside the file
"""
hfile[name] = histogram
def save_root(histogram: HistogramBase, path: str, name: Optional[str] = None):
"""Write histogram to a (new) ROOT file.
Parameters
----------
histogram : Any histogram
path: path for the output file (perhaps should not exist?)
name : The name of the histogram inside the file
"""
if name is None:
name = histogram.name or histogram.title or repr(histogram)
if os.path.isfile(path):
# TODO: Not supported currently
hfile = uproot3.write.TFile.TFileUpdate(path)
else:
hfile = uproot3.write.TFile.TFileCreate(path)
write_root(histogram, hfile, name)
hfile.close()
|
tests/test_app.py | smallwat3r/shhh | 243 | 11199105 | import json
import re
import unittest
from datetime import datetime, timedelta
from http import HTTPStatus
from types import SimpleNamespace
from urllib.parse import urlparse
import responses
from flask import url_for
from shhh.entrypoint import create_app
from shhh.extensions import db, scheduler
from shhh.models import Entries
from shhh.scheduler import tasks
class Parse(SimpleNamespace):
"""Nested dicts to use dot notation for clarity in tests."""
def __init__(self, dictionary, **kwargs):
super().__init__(**kwargs)
for k, v in dictionary.items():
if isinstance(v, dict):
self.__setattr__(k, Parse(v))
else:
self.__setattr__(k, v)
class TestApplication(unittest.TestCase):
"""Flask application testing."""
@classmethod
def setUpClass(cls):
cls.app = create_app(env="testing")
cls.db = db
cls.db.app = cls.app
cls.db.create_all()
cls.scheduler = scheduler
cls.scheduler.app = cls.app
@classmethod
def tearDownClass(cls):
cls.db.drop_all()
cls.scheduler.shutdown(wait=False)
def setUp(self):
self.client = self.app.test_client()
self.app_context = self.app.app_context()
self.app_context.push()
for table in reversed(self.db.metadata.sorted_tables):
self.db.session.execute(table.delete())
# Mock responses from haveibeenpwned.
responses.add(
responses.GET,
re.compile(r"^(https:\/\/api\.pwnedpasswords\.com\/range\/836BA).*"),
body=(
"BDDC66080E01D52B8272AA9461C69EE0496:12145\n" # Hello123
"00d4f6e8fa6eecad2a3aa415eec418d38ec:2"
),
)
responses.add(
responses.GET,
re.compile(r"^(https:\/\/api\.pwnedpasswords\.com\/)(?!.*836BA).*"),
body=(
"BDDC66080E01D52B8272AA9461C69EE0496:12145\n" # Hello123
"00d4f6e8fa6eecad2a3aa415eec418d38ec:2"
),
)
def tearDown(self):
self.db.session.rollback()
self.app_context.pop()
def test_scheduler_setup(self):
jobs = self.scheduler.get_jobs()
# Test named scheduled task.
self.assertEqual(jobs[0].name, "delete_expired_links")
# Test task will run before next minute.
scheduled = jobs[0].next_run_time.strftime("%Y-%m-%d %H:%M:%S")
next_minute = (datetime.now() + timedelta(minutes=1)).strftime("%Y-%m-%d %H:%M:%S")
self.assertTrue(scheduled <= next_minute)
def test_scheduler_job(self):
# Temporarily pause the scheduler.
self.scheduler.pause_job("delete_expired_links")
# Add a dummy secret in database with an expired expiry date.
slug = "z6HNg2dCcvvaOXli1z3x"
encrypted_text = (
b"nKir73XhgyXxjwYyCG-QHQABhqCAAAAAAF6rPvPYX7OYFZRTzy"
b"PdIwvdo2SFwAN0VXrfosL54nGHr0MN1YtyoNjx4t5Y6058lFvDH"
b"zsnv_Q1KaGFL6adJgLLVreOZ9kt5HpwnEe_Lod5Or85Ig=="
)
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
expired_date = datetime.strptime(now, "%Y-%m-%d %H:%M:%S") - timedelta(days=1)
Entries.create(
slug_link=slug,
encrypted_text=encrypted_text,
date_created=now,
date_expires=expired_date,
)
# Run scheduler task.
tasks.delete_expired_links()
# Check that the secret has been deleted from the database.
link = Entries.query.filter_by(slug_link=slug).first()
self.assertIsNone(link)
# Resume the scheduler.
self.scheduler.resume_job("delete_expired_links")
def test_repr_entry(self):
slug = "z6HNg2dCcvvaOXli1z3x"
encrypted_text = (
b"nKir73XhgyXxjwYyCG-QHQABhqCAAAAAAF6rPvPYX7OYFZRTzy"
b"PdIwvdo2SFwAN0VXrfosL54nGHr0MN1YtyoNjx4t5Y6058lFvDH"
b"zsnv_Q1KaGFL6adJgLLVreOZ9kt5HpwnEe_Lod5Or85Ig=="
)
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
expired_date = datetime.strptime(now, "%Y-%m-%d %H:%M:%S") - timedelta(days=1)
entry = Entries.create(
slug_link=slug,
encrypted_text=encrypted_text,
date_created=now,
date_expires=expired_date,
)
self.assertEqual(repr(entry), f"<Entry {slug}>")
def test_views(self):
with self.app.test_request_context(), self.client as c:
# 200
self.assertEqual(c.get(url_for("views.create")).status_code, HTTPStatus.OK.value)
r = c.get("/robots.txt")
r.close() # avoids unclosed file warning.
self.assertEqual(r.status_code, HTTPStatus.OK.value)
self.assertEqual(
c.get(url_for("views.read", slug="fK6YTEVO2bvOln7pHOFi")).status_code,
HTTPStatus.OK.value,
)
self.assertEqual(
c.get(
f"{url_for('views.created')}?link=https://shhh-encrypt.herokuapp.com/r/"
"z6HNg2dCcvvaOXli1z3x&expires_on=2020-05-01%20"
"at%2022:28%20UTC"
).status_code,
HTTPStatus.OK.value,
)
# 302
self.assertEqual(
c.get(f"{url_for('views.created')}?link=only").status_code, HTTPStatus.FOUND.value
)
self.assertEqual(
c.get(f"{url_for('views.created')}?expires_on=only").status_code,
HTTPStatus.FOUND.value,
)
self.assertEqual(
c.get(f"{url_for('views.created')}?link=only&other=only").status_code,
HTTPStatus.FOUND.value,
)
# 404
self.assertEqual(c.get("/read").status_code, HTTPStatus.NOT_FOUND.value)
self.assertEqual(c.get("/donotexists").status_code, HTTPStatus.NOT_FOUND.value)
@responses.activate
def test_api_post_missing_all(self):
with self.app.test_request_context(), self.client as c:
response = json.loads(c.post(url_for("api.secret")).get_data())
# Test response request status and error details.
r = Parse(response)
self.assertEqual(r.response.status, "error")
@responses.activate
def test_api_post_too_much_days(self):
payload = {
"secret": "secret message",
"passphrase": "<PASSWORD>",
"expires": "12d",
}
with self.app.test_request_context(), self.client as c:
response = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
# Test response request status and error details.
r = Parse(response)
self.assertEqual(r.response.status, "error")
@responses.activate
def test_api_post_wrong_formats(self):
payload = {"secret": 1, "passphrase": 1, "expire": "do not exists"}
with self.app.test_request_context(), self.client as c:
response = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
# Test response request status and error details.
r = Parse(response)
self.assertEqual(r.response.status, "error")
@responses.activate
def test_api_post_missing_passphrase(self):
payload = {"secret": "secret message"}
with self.app.test_request_context(), self.client as c:
response = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
# Test response request status and error details.
r = Parse(response)
self.assertEqual(r.response.status, "error")
@responses.activate
def test_api_post_missing_secret(self):
payload = {"passphrase": "<PASSWORD>"}
with self.app.test_request_context(), self.client as c:
response = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
# Test response request status and error details.
r = Parse(response)
self.assertEqual(r.response.status, "error")
@responses.activate
def test_api_post_passphrase_pwned(self):
payload = {"passphrase": "<PASSWORD>"}
with self.app.test_request_context(), self.client as c:
response = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
# Test response request status and error details.
r = Parse(response)
self.assertEqual(r.response.status, "error")
def test_api_post_haveibeenpwned_not_reachable(self):
payload = {
"secret": "secret message",
"passphrase": "<PASSWORD>",
"haveibeenpwned": True,
}
with self.app.test_request_context(), self.client as c:
with responses.RequestsMock() as rsps:
rsps.add(
responses.GET,
re.compile(r"^(https:\/\/api\.pwnedpasswords\.com\/).*"),
body=Exception,
)
response = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
# haveibeenpwned wasn't reachable, but secret still created if it has
# all mandatory requirements.
r = Parse(response)
self.assertEqual(r.response.status, "created")
@responses.activate
def test_api_post_dont_check_haveibeenpwned(self):
payload = {
"secret": "secret message",
"passphrase": "<PASSWORD>", # This passphrase has been pwned in the mock
"haveibeenpwned": False,
}
with self.app.test_request_context(), self.client as c:
response = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
# Secret created without check from haveibeenpwned.
r = Parse(response)
self.assertEqual(r.response.status, "created")
@responses.activate
def test_api_post_check_haveibeenpwned_success(self):
payload = {
"secret": "secret message",
"passphrase": "<PASSWORD>",
"haveibeenpwned": True,
}
with self.app.test_request_context(), self.client as c:
response = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
# Secret created with check from haveibeenpwned.
r = Parse(response)
self.assertEqual(r.response.status, "created")
@responses.activate
def test_api_post_check_haveibeenpwned_failed(self):
payload = {
"secret": "secret message",
"passphrase": "<PASSWORD>", # This passphrase has been pwned in the mock
"haveibeenpwned": True,
}
with self.app.test_request_context(), self.client as c:
response = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
# Secret not created.
r = Parse(response)
self.assertEqual(r.response.status, "error")
@responses.activate
def test_api_post_weak_passphrase(self):
# Weak passphrase.
payload = {"secret": "secret message", "passphrase": "<PASSWORD>"}
with self.app.test_request_context(), self.client as c:
response = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
r = Parse(response)
self.assertEqual(r.response.status, "error")
# Long but all lowercase and no numbers.
payload = {"secret": "secret message", "passphrase": "<PASSWORD>"}
with self.app.test_request_context(), self.client as c:
response = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
r = Parse(response)
self.assertEqual(r.response.status, "error")
# Uppercase, lowercase, numbers, but too short.
payload = {"secret": "secret message", "passphrase": "<PASSWORD>"}
with self.app.test_request_context(), self.client as c:
response = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
r = Parse(response)
self.assertEqual(r.response.status, "error")
# Long with numbers, but no uppercase.
payload = {"secret": "secret message", "passphrase": "<PASSWORD>"}
with self.app.test_request_context(), self.client as c:
response = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
r = Parse(response)
self.assertEqual(r.response.status, "error")
@responses.activate
def test_api_post_created(self):
payload = {"secret": "secret message", "passphrase": "<PASSWORD>", "expire": "3d"}
with self.app.test_request_context(), self.client as c:
response = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
r = Parse(response)
# Test secret has been created and expiricy date is correct.
self.assertEqual(r.response.status, "created")
self.assertEqual(
r.response.expires_on.split(" at ")[0],
(datetime.now() + timedelta(days=3)).strftime("%B %d, %Y"),
)
# Test the link generated is using the test SHHH_HOST variable
hostname = urlparse(r.response.link).netloc
self.assertEqual(hostname, "test.test")
# Test all fields in the response are correct.
for field in ("status", "details", "slug", "link", "expires_on"):
self.assertIn(field, r.response.__dict__.keys())
# Test the slug link has been saved in the database.
slug = r.response.slug
link = Entries.query.filter_by(slug_link=slug).first()
self.assertEqual(link.slug_link, slug)
@responses.activate
def test_api_get_wrong_passphrase(self):
payload = {"secret": "secret message", "passphrase": "<PASSWORD>"}
with self.app.test_request_context(), self.client as c:
post = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
response = json.loads(
c.get(
f"{url_for('api.secret')}?slug={post['response']['slug']}&passphrase=wrong"
).get_data()
)
# Test passphrase is invalid.
r = Parse(response)
self.assertEqual(r.response.status, "invalid")
@responses.activate
def test_api_get_exceeded_tries(self):
payload = {
"secret": "secret message",
"passphrase": "<PASSWORD>",
"tries": 3,
}
with self.app.test_request_context(), self.client as c:
post = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
slug = post["response"]["slug"]
for _ in range(payload["tries"]):
response = json.loads(
c.get(
f"{url_for('api.secret')}?slug={post['response']['slug']}&passphrase=wrong"
).get_data()
)
r = Parse(response)
self.assertEqual(r.response.status, "invalid")
# Secret has been deleted in database as number of tries has exceeded
link = Entries.query.filter_by(slug_link=slug).first()
self.assertIsNone(link)
def test_api_get_wrong_slug(self):
with self.app.test_request_context(), self.client as c:
response = json.loads(
c.get(f"{url_for('api.secret')}?slug=hello&passphrase=wrong").get_data()
)
# Test slug doesn't exists.
r = Parse(response)
self.assertEqual(r.response.status, "expired")
@responses.activate
def test_api_get_decrypt_secret(self):
message, passphrase = "<PASSWORD>", "<PASSWORD>"
payload = {"secret": message, "passphrase": passphrase}
with self.app.test_request_context(), self.client as c:
post = json.loads(c.post(url_for("api.secret"), json=payload).get_data())
slug = post["response"]["slug"]
response = json.loads(
c.get(f"{url_for('api.secret')}?slug={slug}&passphrase={passphrase}").get_data()
)
r = Parse(response)
# Test if status of the request is correct.
self.assertEqual(r.response.status, "success")
# Test if message has been decrypted correctly.
self.assertEqual(r.response.msg, message)
# Test if secret has been deleted in database.
link = Entries.query.filter_by(slug_link=slug).first()
self.assertIsNone(link)
if __name__ == "__main__":
unittest.main()
|
tests/test_gltf.py | thachdo/trimesh | 1,882 | 11199107 | <reponame>thachdo/trimesh
try:
from . import generic as g
except BaseException:
import generic as g
# Khronos' official file validator
# can be installed with the helper script:
# `trimesh/docker/builds/gltf_validator.bash`
_gltf_validator = g.find_executable('gltf_validator')
def validate_glb(data):
"""
Run the Khronos validator on GLB files using
subprocess.
Parameters
------------
data : bytes
GLB export
Raises
------------
ValueError
If Khronos validator reports errors.
"""
# subprocess options not in old python
if g.PY_VER < (3, 7):
return
if _gltf_validator is None:
g.log.warning('no gltf_validator!')
return
with g.tempfile.NamedTemporaryFile(suffix='.glb') as f:
f.write(data)
f.flush()
# run the khronos gltf-validator
report = g.subprocess.run(
[_gltf_validator, f.name, '-o'],
capture_output=True)
# -o prints JSON to stdout
content = report.stdout.decode('utf-8')
if report.returncode != 0:
# log the whole error report
g.log.error(content)
raise ValueError('Khronos GLTF validator error!')
# log the GLTF validator report if
# there are any warnings or hints
decode = g.json.loads(content)
if any(decode['issues'][i] > 0 for i in
['numWarnings', 'numInfos', 'numHints']):
g.log.warning(content)
class GLTFTest(g.unittest.TestCase):
def test_duck(self):
scene = g.get_mesh('Duck.glb', process=False)
# should have one mesh
assert len(scene.geometry) == 1
# get the mesh
geom = next(iter(scene.geometry.values()))
# vertex normals should have been loaded
assert 'vertex_normals' in geom._cache.cache
# should not be watertight
assert not geom.is_volume
# make sure export doesn't crash
export = scene.export(file_type='glb')
validate_glb(export)
# check a roundtrip
reloaded = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb')
# make basic assertions
g.scene_equal(scene, reloaded)
# if we merge ugly it should now be watertight
geom.merge_vertices(
merge_tex=True, merge_norm=True)
assert geom.is_volume
def test_buffer_dedupe(self):
scene = g.trimesh.Scene()
box_1 = g.trimesh.creation.box()
box_2 = g.trimesh.creation.box()
box_3 = g.trimesh.creation.box()
box_3.visual.face_colors = [0, 255, 0, 255]
tm = g.trimesh.transformations.translation_matrix
scene.add_geometry(
box_1, 'box_1',
transform=tm((1, 1, 1)))
scene.add_geometry(
box_2, 'box_2',
transform=tm((-1, -1, -1)))
scene.add_geometry(
box_3, 'box_3',
transform=tm((-1, 20, -1)))
a = g.json.loads(scene.export(
file_type='gltf')['model.gltf'].decode('utf-8'))
assert len(a['buffers']) <= 3
def test_tex_export(self):
# load textured PLY
mesh = g.get_mesh('fuze.ply')
assert hasattr(mesh.visual, 'uv')
# make sure export as GLB doesn't crash on scenes
export = mesh.scene().export(file_type='glb')
validate_glb(export)
# make sure it works on meshes
export = mesh.export(file_type='glb')
validate_glb(export)
def test_cesium(self):
# A GLTF with a multi- primitive mesh
s = g.get_mesh('CesiumMilkTruck.glb')
# should be one Trimesh object per GLTF "primitive"
assert len(s.geometry) == 4
# every geometry displayed once, except wheels twice
assert len(s.graph.nodes_geometry) == 5
# make sure export doesn't crash
export = s.export(file_type='glb')
validate_glb(export)
reloaded = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb')
# make basic assertions
g.scene_equal(s, reloaded)
def test_units(self):
# Trimesh will store units as a GLTF extra if they
# are defined so check that.
original = g.get_mesh('pins.glb')
# export it as a a GLB file
export = original.export(file_type='glb')
validate_glb(export)
kwargs = g.trimesh.exchange.gltf.load_glb(
g.trimesh.util.wrap_as_stream(export))
# roundtrip it
reloaded = g.trimesh.exchange.load.load_kwargs(kwargs)
# make basic assertions
g.scene_equal(original, reloaded)
# make assertions on original and reloaded
for scene in [original, reloaded]:
# units should be stored as an extra
assert scene.units == 'mm'
# make sure we have two unique geometries
assert len(scene.geometry) == 2
# that should have seven instances
assert len(scene.graph.nodes_geometry) == 7
# all meshes should be well constructed
assert all(m.is_volume for m in
scene.geometry.values())
# check unit conversions for fun
extents = scene.extents.copy()
as_in = scene.convert_units('in')
# should all be exactly mm -> in conversion factor
assert g.np.allclose(
extents / as_in.extents, 25.4, atol=.001)
m = g.get_mesh('testplate.glb')
assert m.units == 'meters'
def test_basic(self):
# split a multibody mesh into a scene
scene = g.trimesh.scene.split_scene(
g.get_mesh('cycloidal.ply'))
# should be 117 geometries
assert len(scene.geometry) >= 117
# a dict with {file name: str}
export = scene.export(file_type='gltf')
# load from just resolver
r = g.trimesh.load(file_obj=None,
file_type='gltf',
resolver=export)
# will assert round trip is roughly equal
g.scene_equal(r, scene)
# try loading from a ZIP archive
zipped = g.trimesh.util.compress(export)
r = g.trimesh.load(
file_obj=g.trimesh.util.wrap_as_stream(zipped),
file_type='zip')
# try loading from a file name
# will require a file path resolver
with g.TemporaryDirectory() as d:
for file_name, data in export.items():
with open(g.os.path.join(d, file_name), 'wb') as f:
f.write(data)
# load from file path of header GLTF
rd = g.trimesh.load(
g.os.path.join(d, 'model.gltf'))
# will assert round trip is roughly equal
g.scene_equal(rd, scene)
def test_merge_buffers(self):
# split a multibody mesh into a scene
scene = g.trimesh.scene.split_scene(
g.get_mesh('cycloidal.ply'))
# export a gltf with the merge_buffers option set to true
export = scene.export(file_type='gltf', merge_buffers=True)
# We should end up with a single .bin and scene.gltf
assert len(export.keys()) == 2
# reload the export
reloaded = g.trimesh.exchange.load.load_kwargs(
g.trimesh.exchange.gltf.load_gltf(
file_obj=None,
resolver=g.trimesh.visual.resolvers.ZipResolver(export)))
# check to make sure the geometry keys are the same
assert set(reloaded.geometry.keys()) == set(scene.geometry.keys())
def test_merge_primitives(self):
# test to see if the `merge_primitives` logic is working
a = g.get_mesh('CesiumMilkTruck.glb')
assert len(a.geometry) == 4
# should combine the multiple primitives into a single mesh
b = g.get_mesh(
'CesiumMilkTruck.glb', merge_primitives=True)
assert len(b.geometry) == 2
def test_write_dir(self):
# try loading from a file name
# will require a file path resolver
original = g.get_mesh('fuze.obj')
assert isinstance(original, g.trimesh.Trimesh)
s = original.scene()
with g.TemporaryDirectory() as d:
path = g.os.path.join(d, 'heyy.gltf')
s.export(file_obj=path)
r = g.trimesh.load(path)
assert isinstance(r, g.trimesh.Scene)
assert len(r.geometry) == 1
m = next(iter(r.geometry.values()))
assert g.np.isclose(original.area, m.area)
def test_merge_primitives_materials(self):
# test to see if the `merge_primitives` logic is working
a = g.get_mesh('rgb_cube_with_primitives.gltf',
merge_primitives=True)
assert len(a.geometry['Cube'].visual.material) == 3
# what the face materials should be
truth = [0, 0, 0, 0, 1, 1,
1, 1, 2, 2, 2, 2]
assert g.np.allclose(
a.geometry['Cube'].visual.face_materials,
truth)
# make sure copying did the things correctly
c = a.copy()
assert g.np.allclose(
c.geometry['Cube'].visual.face_materials,
truth)
def test_merge_primitives_materials_roundtrip(self):
# test to see if gltf loaded with `merge_primitives`
# and then exported back
# to gltf, produces a valid gltf.
a = g.get_mesh('rgb_cube_with_primitives.gltf',
merge_primitives=True)
result = a.export(file_type='gltf', merge_buffers=True)
with g.TemporaryDirectory() as d:
for file_name, data in result.items():
with open(g.os.path.join(d, file_name), 'wb') as f:
f.write(data)
rd = g.trimesh.load(
g.os.path.join(d, 'model.gltf'), merge_primitives=True)
assert isinstance(rd, g.trimesh.Scene)
# will assert round trip is roughly equal
# TODO : restore
# g.scene_equal(rd, a)
def test_optional_camera(self):
gltf_cameras_key = 'cameras'
# if there's no camera in the scene, then it shouldn't be added to the gltf
box = g.trimesh.creation.box([1, 1, 1])
scene = g.trimesh.Scene(box)
export = scene.export(file_type='gltf')
assert gltf_cameras_key not in g.json.loads(export['model.gltf'].decode('utf8'))
# `scene.camera` creates a camera if it does not exist.
# once in the scene, it should be added to the gltf.
box = g.trimesh.creation.box([1, 1, 1])
scene = g.trimesh.Scene(box)
scene.set_camera()
export = scene.export(file_type='gltf')
assert gltf_cameras_key in g.json.loads(export['model.gltf'].decode('utf8'))
def test_gltf_pole(self):
scene = g.get_mesh('simple_pole.glb')
# should have multiple primitives
assert len(scene.geometry) == 11
export = scene.export(file_type='glb')
validate_glb(export)
# check a roundtrip
reloaded = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb')
# make basic assertions
g.scene_equal(scene, reloaded)
def test_material_hash(self):
# load mesh twice independently
a = g.get_mesh('fuze.obj')
b = g.get_mesh('fuze.obj')
# move one of the meshes away from the other
a.apply_translation([a.scale, 0, 0])
# materials should not be the same object
assert id(a.visual.material) != id(b.visual.material)
# despite being loaded separately material hash should match
assert hash(a.visual.material) == hash(b.visual.material)
# create a scene with two meshes
scene = g.trimesh.Scene([a, b])
# get the exported GLTF header of a scene with both meshes
header = g.json.loads(scene.export(
file_type='gltf')['model.gltf'].decode('utf-8'))
# header should contain exactly one material
assert len(header['materials']) == 1
# both meshes should be contained in the export
assert len(header['meshes']) == 2
# get a reloaded version
export = scene.export(file_type='glb')
validate_glb(export)
reloaded = g.trimesh.load(
file_obj=g.trimesh.util.wrap_as_stream(export),
file_type='glb')
# meshes should have survived
assert len(reloaded.geometry) == 2
# get meshes back
ar, br = reloaded.geometry.values()
# should have been loaded as a PBR material
assert isinstance(ar.visual.material,
g.trimesh.visual.material.PBRMaterial)
# materials should have the same memory location
assert id(ar.visual.material) == id(br.visual.material)
# make sure hash is returning something
ahash = hash(ar.visual.material)
# should be returning valid material hashes
assert isinstance(ahash, int)
assert ahash != 0
def test_node_name(self):
# Test to see if node names generally survive
# an export-import cycle.
# a scene
s = g.get_mesh('cycloidal.3DXML')
# export as GLB then re-load
export = s.export(file_type='glb')
validate_glb(export)
r = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb')
# make sure we have the same geometries before and after
assert set(s.geometry.keys()) == set(r.geometry.keys())
# make sure the node names are the same before and after
assert (set(s.graph.nodes_geometry) ==
set(r.graph.nodes_geometry))
def test_nested_scale(self):
# nested transforms with scale
s = g.get_mesh('nested.glb')
assert len(s.graph.nodes_geometry) == 3
assert g.np.allclose(
[[-1.16701, -2.3366, -0.26938],
[0.26938, 1., 0.26938]],
s.bounds, atol=1e-4)
def test_schema(self):
# get a copy of the GLTF schema and do simple checks
s = g.trimesh.exchange.gltf.get_schema()
# make sure it has at least the keys we expect
assert set(s['properties'].keys()).issuperset(
{'accessors',
'animations',
'asset',
'buffers',
'bufferViews',
'cameras',
'images',
'materials',
'meshes',
'nodes',
'samplers',
'scene',
'scenes',
'skins',
'textures',
'extensions',
'extras'})
# lightly check to see that no references exist
assert '$ref' not in g.json.dumps(s)
def test_export_custom_attributes(self):
# Write and read custom vertex attributes to gltf
sphere = g.trimesh.primitives.Sphere()
v_count, _ = sphere.vertices.shape
sphere.vertex_attributes['_CustomFloat32Scalar'] = g.np.random.rand(
v_count, 1).astype(
g.np.float32)
sphere.vertex_attributes['_CustomUIntScalar'] = g.np.random.randint(
0, 1000, size=(v_count, 1)
).astype(g.np.uintc)
sphere.vertex_attributes['_CustomFloat32Vec3'] = g.np.random.rand(
v_count, 3).astype(g.np.float32)
sphere.vertex_attributes['_CustomFloat32Mat4'] = g.np.random.rand(
v_count, 4, 4).astype(g.np.float32)
# export as GLB then re-load
export = sphere.export(file_type='glb')
validate_glb(export)
r = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb')
for _, val in r.geometry.items():
assert set(
val.vertex_attributes.keys()) == set(
sphere.vertex_attributes.keys())
for key in val.vertex_attributes:
is_same = g.np.array_equal(
val.vertex_attributes[key],
sphere.vertex_attributes[key])
assert is_same is True
def test_extras(self):
# if GLTF extras are defined, make sure they survive a round trip
s = g.get_mesh('cycloidal.3DXML')
# some dummy data
dummy = {'who': 'likes cheese', 'potatoes': 25}
# export as GLB with extras passed to the exporter then re-load
s.metadata = dummy
export = s.export(file_type='glb')
validate_glb(export)
r = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb')
# make sure extras survived a round trip
assert all(r.metadata[k] == v
for k, v in dummy.items())
def test_extras_nodes(self):
test_metadata = {
'test_str': 'test_value',
'test_int': 1,
'test_float': 0.123456789,
'test_bool': True,
'test_array': [1, 2, 3],
'test_dict': {'a': 1, 'b': 2}}
sphere1 = g.trimesh.primitives.Sphere(radius=1.0)
sphere1.metadata.update(test_metadata)
sphere2 = g.trimesh.primitives.Sphere(radius=2.0)
sphere2.metadata.update(test_metadata)
tf1 = g.trimesh.transformations.translation_matrix([0, 0, -2])
tf2 = g.trimesh.transformations.translation_matrix([5, 5, 5])
s = g.trimesh.scene.Scene()
s.add_geometry(
sphere1,
node_name="Sphere1",
geom_name="Geom Sphere1",
transform=tf1)
s.add_geometry(sphere2,
node_name="Sphere2",
geom_name="Geom Sphere2",
parent_node_name="Sphere1",
transform=tf2)
# Test extras appear in the exported model nodes
files = s.export(None, "gltf")
gltf_data = files["model.gltf"]
assert 'test_value' in gltf_data.decode('utf8')
# Check node extras survive a round trip
export = s.export(file_type='glb')
validate_glb(export)
r = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb')
files = r.export(None, "gltf")
gltf_data = files["model.gltf"]
assert 'test_value' in gltf_data.decode('utf8')
edge = r.graph.transforms.edge_data[("world", "Sphere1")]
assert g.np.allclose(edge['matrix'], tf1)
# all geometry should be the same
assert set(r.geometry.keys()) == set(s.geometry.keys())
for mesh in r.geometry.values():
# metadata should have all survived
assert all(mesh.metadata[k] == v
for k, v in test_metadata.items())
def test_read_scene_extras(self):
# loads a glb with scene extras
scene = g.get_mesh('monkey.glb', process=False)
# expected data
check = {'name': 'monkey', 'age': 32, 'height': 0.987}
meta = scene.metadata
for key in check:
# \check key existence and value
assert key in meta
assert meta[key] == check[key]
def test_load_empty_nodes(self):
# loads a glb with no meshes
scene = g.get_mesh('empty_nodes.glb', process=False)
# expected data
check = {"parent": [[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]],
"children_1": [[1.0, 0.0, 0.0, -5.0],
[0.0, 1.0, 0.0, 5.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]],
"children_2": [[1.0, 0.0, 0.0, 5.0],
[0.0, 1.0, 0.0, 5.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]]}
# get the scene nodes
objs = scene.graph.to_flattened()
# check number
assert len(objs) == 3
for key in check:
assert key in objs
assert objs[key]["transform"] == check[key]
def test_same_name(self):
s = g.get_mesh('TestScene.gltf')
# hardcode correct bounds to check against
bounds = g.np.array([[-5., -1.82578002, -5.],
[5., 1.86791301, 5.]])
# icosahedrons have two primitives each
print(len(s.geometry), len(s.graph.nodes_geometry))
assert len(s.graph.nodes_geometry) == 9
assert len(s.geometry) == 7
assert g.np.allclose(s.bounds, bounds, atol=1e-3)
# if merged should have combined the icosahedrons
s = g.get_mesh('TestScene.gltf', merge_primitives=True)
assert len(s.graph.nodes_geometry) == 7
assert len(s.geometry) == 6
assert g.np.allclose(s.bounds, bounds, atol=1e-3)
def test_vertex_colors(self):
# get a mesh with face colors
m = g.get_mesh('machinist.XAML')
# export as GLB then re-import
export = m.export(file_type='glb')
validate_glb(export)
r = next(iter(
g.trimesh.load(g.trimesh.util.wrap_as_stream(
export),
file_type='glb').geometry.values()))
# original mesh should have vertex colors
assert m.visual.kind == 'face'
assert m.visual.vertex_colors.ptp(axis=0).ptp() > 0
# vertex colors should have survived import-export
assert g.np.allclose(m.visual.vertex_colors,
r.visual.vertex_colors)
def test_vertex_attrib(self):
# test concatenation with texture
m = g.get_mesh('fuze.obj')
colors = (g.np.random.random(
(len(m.vertices), 4)) * 255).astype(g.np.uint8)
# set the color vertex attribute
m.visual.vertex_attributes['color'] = colors
export = m.export(file_type='glb')
validate_glb(export)
r = next(iter(
g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb').geometry.values()))
# make sure the color vertex attributes survived the roundtrip
assert g.np.allclose(
r.visual.vertex_attributes['color'], colors)
def test_export_postprocess(self):
scene = g.trimesh.Scene()
sphere = g.trimesh.primitives.Sphere()
sphere.visual.material = g.trimesh.visual.material.PBRMaterial(name='unlit_test')
scene.add_geometry(sphere)
def add_unlit(gltf_tree):
for material_dict in gltf_tree['materials']:
if 'unlit' in material_dict.get('name', '').lower():
material_dict["extensions"] = {
"KHR_materials_unlit": {}
}
gltf_tree["extensionsUsed"] = ["KHR_materials_unlit"]
gltf_1 = g.trimesh.exchange.gltf.export_gltf(scene)
gltf_2 = g.trimesh.exchange.gltf.export_gltf(scene, tree_postprocessor=add_unlit)
def extract_materials(gltf_files):
return g.json.loads(gltf_files['model.gltf'].decode('utf8'))['materials']
assert "extensions" not in extract_materials(gltf_1)[-1]
assert "extensions" in extract_materials(gltf_2)[-1]
def test_primitive_geometry_meta(self):
# Model with primitives
s = g.get_mesh('CesiumMilkTruck.glb')
# Assert that primitive geometries are marked as such
assert s.geometry['Cesium_Milk_Truck_0'].metadata['from_gltf_primitive']
# Assert that geometries that are not primitives are not marked as such
assert not s.geometry['Wheels'].metadata['from_gltf_primitive']
def test_points(self):
# test a simple pointcloud export-import cycle
points = g.np.arange(30).reshape((-1, 3))
export = g.trimesh.Scene(
g.trimesh.PointCloud(points)).export(file_type='glb')
validate_glb(export)
reloaded = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb')
# make sure points survived export and reload
assert g.np.allclose(next(iter(reloaded.geometry.values())).vertices, points)
def test_bulk(self):
# Try exporting every loadable model to GLTF and checking
# the generated header against the schema.
# strict mode runs a schema header validation
assert g.trimesh.tol.strict
# check mesh, path, pointcloud exports
for root in [g.dir_models, g.os.path.join(g.dir_models, '2D')]:
for fn in g.os.listdir(root):
path_in = g.os.path.join(root, fn)
try:
geom = g.trimesh.load(path_in)
if isinstance(geom, g.trimesh.path.path.Path):
geom = g.trimesh.Scene(geom)
except BaseException as E:
print(E)
continue
# voxels don't have an export to gltf mode
if not hasattr(geom, 'export'):
continue
elif hasattr(geom, 'vertices') and len(geom.vertices) == 0:
continue
elif hasattr(geom, 'geometry') and len(geom.geometry) == 0:
continue
g.log.info('Testing: {}'.format(fn))
# check a roundtrip which will validate on export
# and crash on reload if we've done anything screwey
export = geom.export(file_type='glb')
validate_glb(export)
# todo : importer breaks on `models/empty*` as it
# doesn't know what to do with empty meshes
# reloaded = g.trimesh.load(
# g.trimesh.util.wrap_as_stream(export),
# file_type='glb')
def test_equal_by_default(self):
# all things being equal we shouldn't be moving things
# for the usual load-export loop
s = g.get_mesh('fuze.obj')
# export as GLB then re-load
export = s.export(file_type='glb')
validate_glb(export)
reloaded = g.trimesh.load(
g.trimesh.util.wrap_as_stream(export),
file_type='glb', process=False)
assert len(reloaded.geometry) == 1
m = next(iter(reloaded.geometry.values()))
assert g.np.allclose(m.visual.uv,
s.visual.uv)
assert g.np.allclose(m.vertices,
s.vertices)
assert g.np.allclose(m.faces,
s.faces)
# will run a kdtree check
g.texture_equal(s, m)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
modules/face/samples/landmarks_demo.py | ptelang/opencv_contrib | 7,158 | 11199108 | import random
import numpy as np
import cv2 as cv
frame1 = cv.imread(cv.samples.findFile('lena.jpg'))
if frame1 is None:
print("image not found")
exit()
frame = np.vstack((frame1,frame1))
facemark = cv.face.createFacemarkLBF()
try:
facemark.loadModel(cv.samples.findFile('lbfmodel.yaml'))
except cv.error:
print("Model not found\nlbfmodel.yaml can be download at")
print("https://raw.githubusercontent.com/kurnianggoro/GSOC2017/master/data/lbfmodel.yaml")
cascade = cv.CascadeClassifier(cv.samples.findFile('lbpcascade_frontalface_improved.xml'))
if cascade.empty() :
print("cascade not found")
exit()
faces = cascade.detectMultiScale(frame, 1.05, 3, cv.CASCADE_SCALE_IMAGE, (30, 30))
ok, landmarks = facemark.fit(frame, faces=faces)
cv.imshow("Image", frame)
for marks in landmarks:
couleur = (random.randint(0,255),
random.randint(0,255),
random.randint(0,255))
cv.face.drawFacemarks(frame, marks, couleur)
cv.imshow("Image Landmarks", frame)
cv.waitKey()
|
middleware/BaseMiddleWare.py | AxueWong/django-restfulapi | 242 | 11199116 | <reponame>AxueWong/django-restfulapi
from rest_framework.response import Response
from rest_framework import utils
import json, os, copy, re, jwt, time
from django.shortcuts import render
from django.utils.deprecation import MiddlewareMixin
from rest_framework import status
import urllib
from django.http import QueryDict, HttpResponse, JsonResponse
from django.conf import settings
from django.core.cache import cache
from utils.utils import jwt_decode_handler,jwt_encode_handler,jwt_payload_handler,jwt_payload_handler,jwt_response_payload_handler, jwt_get_user_id_from_payload_handler
from user.models import User
from utils.ECB import ECBCipher
from django.db import connection
from utils.logger import logger
'''
0 没有错误
1 未知错误 针对此错误 线上版前端弹出网络错误等公共错误
2 前端弹窗错误(包括:字段验证错误、自定义错误、账号或数据不存在、提示错误)
'''
# 将 put 请求转为 patch 请求 中间件
class PUTtoPATCHMiddleware(MiddlewareMixin):
def process_request(self, request):
if request.method == 'PUT':
request.method = 'PATCH'
# 日志中间件
class LogMiddleware(MiddlewareMixin):
def process_request(self, request):
try:
logger.info('************************************************* 下面是新的一条日志 ***************************************************')
logger.info('拦截请求的地址:%s;请求的方法:%s' % (request.path, request.method))
logger.info('==================================== headers 头信息 ====================================================')
for key in request.META:
if key[:5] == 'HTTP_':
logger.debug('%s %s' % (str(key), str(request.META[key])))
logger.info('代理IP:%s' % request.META.get('REMOTE_ADDR'))
logger.info('真实IP:%s' % request.META.get('HTTP_X_FORWARDED_FOR')) # HTTP_X_REAL_IP
logger.info('==================================== request body信息 ==================================================')
logger.info('params参数:%s' % request.GET)
if request.path == '/uploadfile/':
logger.info('body参数:文件类型')
else:
logger.info('body参数:%s' % request.body.decode())
# if 'application/x-www-form-urlencoded' in request.META['CONTENT_TYPE']:
# print('body参数:', urllib.parse.unquote(request.body.decode()))
logger.info('================================== View视图函数内部信息 ================================================')
except Exception as e:
logger.error('发生错误:已预知的是上传文件导致,非预知错误见下:')
logger.error('未知错误:%s' % str(e))
return JsonResponse({"message": "出现了无法预料的错误:%s" % e, "errorCode": 1, "data": {}})
def process_exception(self, request, exception):
logger.error('发生错误的请求地址:%s;错误原因:%s' % (request.path, str(exception)))
return JsonResponse({"message": "出现了无法预料的view视图错误:%s" % exception.__str__(), "errorCode": 1, "data": {}})
def process_response(self,request,response):
if settings.SHOWSQL:
for sql in connection.queries:
logger.debug(sql)
if type(response) == Response:
if type(response.data) != utils.serializer_helpers.ReturnList:
if type(response.data) == dict and (response.data.get('errorCode') and response.data.get('errorCode') != 0):
logger.error('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 出现异常的日志 <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
logger.error(response.data)
logger.error('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 异常日志结束 <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
if type(response) == JsonResponse:
logger.error('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 出现异常的日志 <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
logger.error(json.loads(response.content.decode()))
logger.error('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 异常日志结束 <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
return response
# 权限 加密中间件
class PermissionMiddleware(MiddlewareMixin):
def process_request(self, request):
white_paths = ['/wechat/wxnotifyurl', '/', '/__debug__/', '/__debug__', '/favicon.ico']
if request.path not in white_paths and not re.match(r'/swagger.*', request.path, re.I) and not re.match(r'/redoc/.*', request.path, re.I) and not re.match(r'/export.*', request.path, re.I):
# print('查看authkey',request.META.get('HTTP_INTERFACEKEY'))
auth_key = request.META.get('HTTP_INTERFACEKEY') # key顺序必须符合要求:毫秒时间戳+后端分配的key+32位随机字符串(uuid更佳)
if auth_key:
# print('查看秘钥:', cache.get(auth_key))
if cache.get(auth_key):
logger.info('发现秘钥被多次使用,应当记录ip加入预备黑名单。')
return JsonResponse({"message": "非法访问!已禁止操作!" , "errorCode": 10, "data": {}})
# 先解密
target_obj = ECBCipher(settings.INTERFACE_KEY)
target_key = target_obj.decrypted(auth_key)
# print('明文:', target_key)
# 无法解密时直接禁止访问
if not target_key:
return JsonResponse({"message": "非法访问!已禁止操作!" , "errorCode": 10, "data": {}})
# 解密成功后
# 设置一个redis 记录当前时间戳
time_int = int(time.time()) # 记录秒
target_time, backend_key, random_str = target_key.split('+')
if backend_key not in settings.DISPATCH_KEYS:
return JsonResponse({"message": "非法访问!已禁止操作!" , "errorCode": 10, "data": {}})
if (time_int - int(int(target_time) / 1000)) > settings.INTERFACE_TIMEOUT:
logger.info('发现秘钥被多次使用,应当记录ip加入预备黑名单。')
return JsonResponse({"message": "非法访问!已禁止操作!" , "errorCode": 10, "data": {}})
cache.set(auth_key, "true", timeout=settings.INTERFACE_TIMEOUT)
pass
else:
return JsonResponse({"message": "接口秘钥未找到!禁止访问!" , "errorCode": 10, "data": {}})
# 格式化返回json中间件
class FormatReturnJsonMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if not type(response) == HttpResponse:
try:
if hasattr(self, 'process_request'):
response = self.process_request(request)
if not response:
response = self.get_response(request)
if hasattr(self, 'process_response'):
response = self.process_response(request, response)
if request.method == 'DELETE':
# print(response.data)
if response.status_code == 204:
response.data = {"message": '删除成功', "errorCode": 0, "data": {}}
else:
if response.data.get('detail'):
data = {"message": response.data.get('detail'), "errorCode": 2, "data": {}}
elif response.data.get('message'):
data = response.data
else:
data = {"message": 'error', "errorCode": 2, "data": response.data}
response.data = data
response.status_code = 200
response._is_rendered = False
response.render()
else:
white_list = ['/wxnotifyurl', '/alinotifyurl']
if request.path != '/' and request.path not in white_list and request.path != '/wechat/wxnotifyurl' and not re.match(r'/swagger.*', request.path, re.I) and not re.match(r'/redoc/.*', request.path, re.I) and not re.match(r'/export.*', request.path, re.I):
# 适配不分页返回数据的格式化
if type(response.data) == utils.serializer_helpers.ReturnList:
data = {"message": 'ok', "errorCode": 0,"data": response.data}
response.data = data
if response.data.get('detail'):
data = {"message": response.data.get('detail'), "errorCode": 2, "data": {}}
response.data = data
elif response.status_code > 200 and response.status_code <= 299:
data = {"message": 'ok', "errorCode": 0,"data": response.data}
response.data = data
elif response.status_code >= 400 and response.status_code <= 499:
if response.data.get('message'): # 兼容APIView返回data的设置
pass
else:
data = {"message": str(response.data), "errorCode": 2,"data": response.data}
response.data = data
else:
if response.data.get('message'): # 兼容APIView返回data的设置
pass
elif response.data.get('count') != None: # 兼容分页返回data的设置
response.data['errorCode'] = 0
response.data['message'] = 'ok'
else:
data = {"message": 'ok', "errorCode": 0,
"data": response.data}
response.data = data
response.status_code = 200
response._is_rendered = False
response.render()
except Exception as e:
logger.error('发生错误:%s' % str(e))
if e.__str__() == "'HttpResponseNotFound' object has no attribute 'data'":
return JsonResponse({"message": '路径/页面未找到。', "errorCode": 2,"data": {}})
if e.__str__() == "'JsonResponse' object has no attribute 'data'":
return response
return JsonResponse({"message": "出现了无法预料的view视图错误:%s" % e.__str__(), "errorCode": 1, "data": {}})
return response
# 冻结用户中间件
class BlockUserMiddleware(MiddlewareMixin):
def process_request(self, request):
if request.META.get('HTTP_AUTHORIZATION'):
if ' ' not in request.META.get('HTTP_AUTHORIZATION'):
return JsonResponse({"message": 'Token不合法' , "errorCode": 2, "data": {}})
token = (request.META.get('HTTP_AUTHORIZATION').split(' '))[1]
try:
payload = jwt_decode_handler(token)
user_id = jwt_get_user_id_from_payload_handler(payload)
if not user_id:
return JsonResponse({"message": "用户不存在!" , "errorCode": 2, "data": {}})
now_user = User.objects.values('id', 'is_freeze').filter(id=user_id).first()
if not now_user:
return JsonResponse({"message": "用户不存在!" , "errorCode": 2, "data": {}})
if now_user.get('is_freeze'):
return JsonResponse({"message": "账户被冻结!", "errorCode": 2, "data": {}})
except jwt.ExpiredSignature:
return JsonResponse({"message": 'Token过期' , "errorCode": 2, "data": {}})
except jwt.DecodeError:
return JsonResponse({"message": 'Token不合法' , "errorCode": 2, "data": {}})
except jwt.InvalidTokenError as e:
return JsonResponse({"message": "出现了无法预料的view视图错误:%s" % e, "errorCode": 1, "data": {}}) |
pgmpy/factors/FactorSet.py | echoyi/pgmpy | 2,144 | 11199126 | #!/usr/bin/env python3
from functools import reduce
from pgmpy.factors.base import BaseFactor
class FactorSet(object):
r"""
Base class of *DiscreteFactor Sets*.
A factor set provides a compact representation of higher dimensional factor
:math:`\phi_1\cdot\phi_2\cdots\phi_n`
For example the factor set corresponding to factor :math:`\phi_1\cdot\phi_2` would be the union of the factors
:math:`\phi_1` and :math:`\phi_2` i.e. factor set :math:`\vec\phi = \phi_1 \cup \phi_2`.
"""
def __init__(self, *factors_list):
"""
Initialize the factor set class.
Parameters
----------
factors_list: Factor1, Factor2, ....
All the factors whose product is represented by the factor set
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set = FactorSet(phi1, phi2)
>>> factor_set
<pgmpy.factors.FactorSet.FactorSet at 0x7f8e32af6d50>
>>> print(factor_set)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b4c2d0>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b4c710>])
"""
if not all(isinstance(phi, BaseFactor) for phi in factors_list):
raise TypeError("Input parameters must be child classes of BaseFactor")
self.factors = set([factor.copy() for factor in factors_list])
def add_factors(self, *factors):
"""
Adds factors to the factor set.
Parameters
----------
factors: Factor1, Factor2, ...., Factorn
factors to be added into the factor set
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set1.add_factors(phi3, phi4)
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b4ca10>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e4c393690>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b4c750>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b4cb50>])
"""
self.factors.update(factors)
def remove_factors(self, *factors):
"""
Removes factors from the factor set.
Parameters
----------
factors: Factor1, Factor2, ...., Factorn
factors to be removed from the factor set
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> factor_set1.add_factors(phi3)
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b5b050>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b250>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b5b150>])
>>> factor_set1.remove_factors(phi1, phi2)
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b4cb10>])
"""
for factor in factors:
self.factors.remove(factor)
def get_factors(self):
"""
Returns all the factors present in factor set.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> factor_set1.add_factors(phi3)
>>> factor_set1.get_factors()
{<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f827c0a23c8>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f827c0a2358>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f825243f9e8>}
"""
return self.factors
def product(self, factorset, inplace=True):
r"""
Return the factor sets product with the given factor sets
Suppose :math:`\vec\phi_1` and :math:`\vec\phi_2` are two factor sets then their product is a another factors
set :math:`\vec\phi_3 = \vec\phi_1 \cup \vec\phi_2`.
Parameters
----------
factorsets: FactorSet1, FactorSet2, ..., FactorSetn
FactorSets to be multiplied
inplace: A boolean (Default value True)
If inplace = True , then it will modify the FactorSet object, if False, it will
return a new FactorSet object.
Returns
--------
If inpalce = False, will return a new FactorSet object, which is product of two factors
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> print(factor_set2)
set([<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b050>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b690>])
>>> factor_set2.product(factor_set1)
>>> print(factor_set2)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b4c910>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b4cc50>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b050>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b690>])
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factor_set2.product(factor_set1, inplace=False)
>>> print(factor_set2)
set([<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b060>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b790>])
"""
factor_set = self if inplace else self.copy()
factor_set1 = factorset.copy()
factor_set.add_factors(*factor_set1.factors)
if not inplace:
return factor_set
def divide(self, factorset, inplace=True):
r"""
Returns a new factor set instance after division by the factor set
Division of two factor sets :math:`\frac{\vec\phi_1}{\vec\phi_2}` basically translates to union of all the
factors present in :math:`\vec\phi_2` and :math:`\frac{1}{\phi_i}` of all the factors present in
:math:`\vec\phi_2`.
Parameters
----------
factorset: FactorSet
The divisor
inplace: A boolean (Default value True)
If inplace = True ,then it will modify the FactorSet object, if False then will
return a new FactorSet object.
Returns
--------
If inplace = False, will return a new FactorSet Object which is division of
given factors.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factor_set2.divide(factor_set1)
>>> print(factor_set3)
set([<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b5ba10>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b650>,
<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b5b050>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b8d0>])
"""
factor_set = self if inplace else self.copy()
factor_set1 = factorset.copy()
factor_set.add_factors(
*[phi.identity_factor() / phi for phi in factor_set1.factors]
)
if not inplace:
return factor_set
def marginalize(self, variables, inplace=True):
"""
Marginalizes the factors present in the factor sets with respect to the given variables.
Parameters
----------
variables: list, array-like
List of the variables to be marginalized.
inplace: boolean (Default value True)
If inplace=True it will modify the factor set itself, would create a new factor set
Returns
-------
If inplace = False, will return a new marginalized FactorSet object.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> factor_set1.marginalize('x1')
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x2:3, x3:2) at 0x7f8e32b4cc10>,
<DiscreteFactor representing phi(x3:2, x4:2) at 0x7f8e32b4cf90>])
"""
if isinstance(variables, str):
raise TypeError("Expected list or array-like type got type str")
factor_set = self if inplace else self.copy()
factors_to_be_marginalized = set(
filter(lambda x: set(x.scope()).intersection(variables), factor_set.factors)
)
for factor in factors_to_be_marginalized:
variables_to_be_marginalized = list(
set(factor.scope()).intersection(variables)
)
if inplace:
factor.marginalize(variables_to_be_marginalized, inplace=True)
else:
factor_set.remove_factors(factor)
factor_set.add_factors(
factor.marginalize(variables_to_be_marginalized, inplace=False)
)
if not inplace:
return factor_set
def __mul__(self, other):
return self.product(other)
def __truediv__(self, other):
return self.divide(other)
def __str__(self):
return self.factors.__str__()
def copy(self):
"""
Create a copy of factor set.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set = FactorSet(phi1, phi2)
>>> factor_set
<pgmpy.factors.FactorSet.FactorSet at 0x7fa68f390320>
>>> factor_set_copy = factor_set.copy()
>>> factor_set_copy
<pgmpy.factors.FactorSet.FactorSet at 0x7f91a0031160>
"""
# No need to have copies of factors as argument because __init__ method creates copies.
return FactorSet(*self.factors)
def factorset_product(*factorsets_list):
r"""
Base method used for product of factor sets.
Suppose :math:`\vec\phi_1` and :math:`\vec\phi_2` are two factor sets then their product is a another factors set
:math:`\vec\phi_3 = \vec\phi_1 \cup \vec\phi_2`.
Parameters
----------
factorsets_list: FactorSet1, FactorSet2, ..., FactorSetn
All the factor sets to be multiplied
Returns
-------
Product of factorset in factorsets_list
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.factors import factorset_product
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factorset_product(factor_set1, factor_set2)
>>> print(factor_set3)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7fb3a1933e90>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7fb3a1933f10>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7fb3a1933f90>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7fb3a1933e10>])
"""
if not all(isinstance(factorset, FactorSet) for factorset in factorsets_list):
raise TypeError("Input parameters must be FactorSet instances")
return reduce(lambda x, y: x.product(y, inplace=False), factorsets_list)
def factorset_divide(factorset1, factorset2):
r"""
Base method for dividing two factor sets.
Division of two factor sets :math:`\frac{\vec\phi_1}{\vec\phi_2}` basically translates to union of all the factors
present in :math:`\vec\phi_2` and :math:`\frac{1}{\phi_i}` of all the factors present in :math:`\vec\phi_2`.
Parameters
----------
factorset1: FactorSet
The dividend
factorset2: FactorSet
The divisor
Returns
-------
The division of factorset1 and factorset2
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.factors import factorset_divide
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factorset_divide(factor_set2, factor_set1)
>>> print(factor_set3)
set([<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f119ad78f90>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f119ad78e50>,
<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f119ad78ed0>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f119ad78e90>])
"""
if not isinstance(factorset1, FactorSet) or not isinstance(factorset2, FactorSet):
raise TypeError("factorset1 and factorset2 must be FactorSet instances")
return factorset1.divide(factorset2, inplace=False)
|
tensorflow_federated/python/tests/backend_accelerators_test.py | zhihansh/federated-oss | 1,918 | 11199137 | # Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
def _create_tff_parallel_clients_with_dataset_reduce():
@tf.function
def reduce_fn(x, y):
return x + y
@tf.function
def dataset_reduce_fn(ds, initial_val):
return ds.reduce(initial_val, reduce_fn)
@tff.tf_computation(tff.SequenceType(tf.int64))
def dataset_reduce_fn_wrapper(ds):
initial_val = tf.Variable(np.int64(1.0))
return dataset_reduce_fn(ds, initial_val)
@tff.federated_computation(tff.at_clients(tff.SequenceType(tf.int64)))
def parallel_client_run(client_datasets):
return tff.federated_map(dataset_reduce_fn_wrapper, client_datasets)
return parallel_client_run
def _create_tff_parallel_clients_with_iter_dataset():
@tf.function
def reduce_fn(x, y):
return x + y
@tf.function
def dataset_reduce_fn(ds, initial_val):
for batch in iter(ds):
initial_val = reduce_fn(initial_val, batch)
return initial_val
@tff.tf_computation(tff.SequenceType(tf.int64))
def dataset_reduce_fn_wrapper(ds):
initial_val = tf.Variable(np.int64(1.0))
return dataset_reduce_fn(ds, initial_val)
@tff.federated_computation(tff.at_clients(tff.SequenceType(tf.int64)))
def parallel_client_run(client_datasets):
return tff.federated_map(dataset_reduce_fn_wrapper, client_datasets)
return parallel_client_run
class LocalExecutorMultiTPUTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
tpu_devices = tf.config.list_logical_devices('TPU')
if len(tpu_devices) < 2:
self.skipTest('Skip multi-tpu tests when {} tpus are provided'.format(
len(tpu_devices)))
@parameterized.named_parameters(
('iter_server_on_cpu', 'CPU',
_create_tff_parallel_clients_with_iter_dataset),
('iter_server_on_tpu', 'TPU',
_create_tff_parallel_clients_with_iter_dataset),
('reduce_server_on_cpu', 'CPU',
_create_tff_parallel_clients_with_dataset_reduce),
('reduce_server_on_tpu', 'TPU',
_create_tff_parallel_clients_with_dataset_reduce),
)
def test_local_executor_multi_tpus(self, tf_device,
create_tff_parallel_clients_fn):
self.skipTest('b/157625321')
tf_devices = tf.config.list_logical_devices(tf_device)
server_tf_device = None if not tf_devices else tf_devices[0]
client_devices = tf.config.list_logical_devices('TPU')
tff.backends.native.set_local_python_execution_context(
server_tf_device=server_tf_device, client_tf_devices=client_devices)
parallel_client_run = create_tff_parallel_clients_fn()
client_data = [
tf.data.Dataset.range(10),
tf.data.Dataset.range(10).map(lambda x: x + 1)
]
client_results = parallel_client_run(client_data)
self.assertEqual(client_results, [np.int64(46), np.int64(56)])
if __name__ == '__main__':
absltest.main()
|
insights/tests/test_file_permissions.py | lhuett/insights-core | 121 | 11199149 | import pytest
from insights.util.file_permissions import FilePermissions
from insights.core import FileListing
from insights.tests import test_file_listing, context_wrap
PERMISSIONS_TEST_EXCEPTION_VECTORS = [
('-rw------ 1 root root 762 Sep 23 002 /etc/ssh/sshd_config', True),
('bash: ls: command not found', True),
('-rw------ 1 root root 762 Se', True),
('-rw------- 1 root root 762 Sep 23 002 /etc/ssh/sshd_config', False),
('-rw-------. 1 root root 762 Sep 23 002 /etc/ssh/sshd_config', False),
('-rw-------@ 1 root root 762 Sep 23 002 /etc/ssh/sshd_config', False),
('-rw-------+ 1 root root 762 Sep 23 002 /etc/ssh/sshd_config', False),
('-rw-------* 1 root root 762 Sep 23 002 /etc/ssh/sshd_config', False),
('-rw-------asdf 1 root root 762 Sep 23 002 /etc/ssh/sshd_config', False),
('-rw------- 1 ro:t root 762 Sep 23 002 /etc/ssh/sshd_config', True),
('-rw------- 1 root r:ot 762 Sep 23 002 /etc/ssh/sshd_config', True),
('-rwasdfas- 1 root root 762 Sep 23 002 /etc/ssh/sshd_config', True),
('-rwx/----- 1 root root 762 Sep 23 002 /etc/ssh/sshd_config', True),
('/usr/bin/ls: cannot access /boot/grub2/grub.cfg: No such file or directory', True),
('cannot access /boot/grub2/grub.cfg: No such file or directory', True),
('No such file or directory', True),
('adsf', True),
]
PERMISSIONS_TEST_VECTORS = [
('-rw------- 1 root root 762 Sep 23 002 /etc/ssh/sshd_config', False,
'rw-', '---', '---', 'root', 'root', '/etc/ssh/sshd_config',
True, True,
True, True),
('-rw------- 1 root root 762 Sep 23 002 /a path/with/spaces everywhere', False,
'rw-', '---', '---', 'root', 'root', '/a path/with/spaces everywhere',
True, True,
True, True),
('-rw------- 1 root root 762 Sep 23 002 no_slash_here', False,
'rw-', '---', '---', 'root', 'root', 'no_slash_here',
True, True,
True, True),
('-rw-------. 1 root root 4308 Apr 22 15:57 /etc/ssh/sshd_config', False,
'rw-', '---', '---', 'root', 'root', '/etc/ssh/sshd_config',
True, True,
True, True),
('-rw-rw-rw-. 1 root root 4308 Apr 22 15:57 /etc/ssh/sshd_config', False,
'rw-', 'rw-', 'rw-', 'root', 'root', '/etc/ssh/sshd_config',
True, True,
False, False),
('-rw-rw---- 1 root user 762 Sep 23 002 /etc/ssh/sshd_config', True,
'rw-', 'rw-', '---', 'root', 'user', '/etc/ssh/sshd_config',
True, False,
False, False),
('-rw------- 1 root user 762 Sep 23 002 /etc/ssh/sshd_config', False,
'rw-', '---', '---', 'root', 'user', '/etc/ssh/sshd_config',
True, False,
True, True),
('-rw------- 1 user root 762 Sep 23 002 /etc/ssh/sshd_config', False,
'rw-', '---', '---', 'user', 'root', '/etc/ssh/sshd_config',
False, False,
False, False),
('-rw-rw---- 1 root root 762 Sep 23 002 /etc/ssh/sshd_config', True,
'rw-', 'rw-', '---', 'root', 'root', '/etc/ssh/sshd_config',
True, True,
True, True),
('-rw-rw-r-- 1 root root 762 Sep 23 002 /etc/ssh/sshd_config', True,
'rw-', 'rw-', 'r--', 'root', 'root', '/etc/ssh/sshd_config',
True, True,
False, True),
('-rw-rw--w- 1 root root 762 Sep 23 002 /etc/ssh/sshd_config', True,
'rw-', 'rw-', '-w-', 'root', 'root', '/etc/ssh/sshd_config',
True, True,
True, False),
('---------- 1 root root 762 Sep 23 002 /etc/ssh/sshd_config', False,
'---', '---', '---', 'root', 'root', '/etc/ssh/sshd_config',
True, True,
True, True),
]
def test_permissions():
for vector in PERMISSIONS_TEST_VECTORS:
(line, with_group,
permissions_owner, permissions_group, permissions_other, owner, group, path,
owned_by_root_user, owned_by_root_user_and_group,
only_root_can_read, only_root_can_write) = vector
p = FilePermissions(line)
assert p.perms_owner == permissions_owner
assert p.perms_group == permissions_group
assert p.perms_other == permissions_other
assert p.owner == owner
assert p.group == group
assert p.owned_by('root', also_check_group=False) == owned_by_root_user
assert p.owned_by('root', also_check_group=True) == owned_by_root_user_and_group
assert p.only_root_can_read(root_group_can_read=with_group) == only_root_can_read
assert p.only_root_can_write(root_group_can_write=with_group) == only_root_can_write
assert p.all_zero() == all((p.perms_owner == '---', p.perms_group == '---',
p.perms_other == '---'))
assert p.owner_can_read() == ('r' in p.perms_owner)
assert p.owner_can_write() == ('w' in p.perms_owner)
assert p.owner_can_only_read() == ('r--' == p.perms_owner)
assert p.group_can_read() == ('r' in p.perms_group)
assert p.group_can_write() == ('w' in p.perms_group)
assert p.group_can_only_read() == ('r--' == p.perms_group)
assert p.others_can_read() == ('r' in p.perms_other)
assert p.others_can_write() == ('w' in p.perms_other)
assert p.others_can_only_read() == ('r--' == p.perms_other)
def test_permissions_invalid():
for vector in PERMISSIONS_TEST_EXCEPTION_VECTORS:
garbage, should_raise = vector
if should_raise:
with pytest.raises(ValueError):
FilePermissions(garbage)
else:
# shouldn't raise an exception
FilePermissions(garbage)
def test_multiple_directories():
dirs = FileListing(context_wrap(test_file_listing.MULTIPLE_DIRECTORIES))
assert '/etc/sysconfig' in dirs
assert 'cbq' in dirs.dirs_of('/etc/sysconfig')
# drwxr-xr-x. 2 0 0 41 Jul 6 23:32 cbq
obj = FilePermissions.from_dict(dirs.path_entry('/etc/sysconfig/cbq'))
assert hasattr(obj, 'name')
assert obj.name == 'cbq'
assert obj.perms_owner == 'rwx'
assert obj.perms_group == 'r-x'
assert obj.perms_other == 'r-x'
|
examples/python/tsa_dates.py | CCHiggins/statsmodels | 6,931 | 11199159 | <filename>examples/python/tsa_dates.py
#!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook tsa_dates.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Dates in timeseries models
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.tsa.ar_model import AutoReg, ar_select_order
plt.rc("figure", figsize=(16, 8))
plt.rc("font", size=14)
# ## Getting started
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the
# year.
from datetime import datetime
dates = pd.date_range("1700-1-1", periods=len(data.endog), freq="A-DEC")
# ## Using Pandas
#
# Make a pandas TimeSeries or DataFrame
data.endog.index = dates
endog = data.endog
endog
# Instantiate the model
selection_res = ar_select_order(endog,
9,
old_names=False,
seasonal=True,
period=11)
pandas_ar_res = selection_res.model.fit()
# Out-of-sample prediction
pred = pandas_ar_res.predict(start="2005", end="2027")
print(pred)
fig = pandas_ar_res.plot_predict(start="2005", end="2027")
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/ElementType.py | htlcnn/ironpython-stubs | 182 | 11199164 | class ElementType(Element,IDisposable):
""" Base class for all Types within Autodesk Revit. """
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def Duplicate(self,name):
"""
Duplicate(self: ElementType,name: str) -> ElementType
Duplicates an existing element type and assigns it a new name.
name: The new name of the element type.
Returns: The duplicated element type.
"""
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetPreviewImage(self,size):
"""
GetPreviewImage(self: ElementType,size: Size) -> Bitmap
Get the preview image of an element. This image is similar to what is seen in
the Revit UI when selecting the type of an element.
size: The width and height of the preview image in pixels.
Returns: System::Drawing::Bitmap represents the preview image. ll if there is no preview
image.
"""
pass
def GetSimilarTypes(self):
"""
GetSimilarTypes(self: ElementType) -> ICollection[ElementId]
Obtains a set of types that are similar to this type.
Returns: A set of element IDs of types that are similar to this type.
"""
pass
def IsSimilarType(self,typeId):
"""
IsSimilarType(self: ElementType,typeId: ElementId) -> bool
Checks if given type is similar to this type.
typeId: ElementId of the type to check.
Returns: True if given type is similar to this type,false otherwise.
"""
pass
def IsValidDefaultFamilyType(self,familyCategoryId):
"""
IsValidDefaultFamilyType(self: ElementType,familyCategoryId: ElementId) -> bool
Identifies if this type is a valid default family type for the given family
category id.
familyCategoryId: The family category id.
Returns: True if this type is a valid default family type for the given family category
id.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
CanBeCopied=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Determine if this ElementType can create a copy
Get: CanBeCopied(self: ElementType) -> bool
"""
CanBeDeleted=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Determine if this ElementType can be deleted
Get: CanBeDeleted(self: ElementType) -> bool
"""
CanBeRenamed=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Determine if this ElementType can be renamed
Get: CanBeRenamed(self: ElementType) -> bool
"""
FamilyName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the family name of this element type.
Get: FamilyName(self: ElementType) -> str
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Set the name for the ElementType.
Set: Name(self: ElementType)=value
"""
|
src/robomaster/servo.py | yukaryote/RoboMaster-SDK | 204 | 11199175 | <gh_stars>100-1000
# -*-coding:utf-8-*-
# Copyright (c) 2020 DJI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import module
from . import protocol
from . import logger
from . import dds
from . import action
import struct
__all__ = ['Servo']
class ServoSubject(dds.Subject):
name = dds.DDS_SERVO
uid = dds.SUB_UID_MAP[name]
def __init__(self):
self._valid = [0] * 4
self._recv = [0]
self._speed = [0] * 4
self._angle = [0] * 4
def servo_data(self):
return self._valid, self._speed, self._angle
def data_info(self):
return self._valid, self._speed, self._angle
def decode(self, buf):
self._valid[0] = buf[0] & 0x01
self._valid[1] = (buf[0] >> 1) & 0x01
self._valid[2] = (buf[0] >> 2) & 0x01
self._valid[3] = (buf[0] >> 3) & 0x01
[self._recv, self._speed[0], self._speed[1], self._speed[2], self._speed[3],
self._angle[0], self._angle[1], self._angle[2], self._angle[3]] = struct.unpack('<Bhhhhhhhh', buf)
class ServoSetAngleAction(action.Action):
_action_proto_cls = protocol.ProtoServoCtrlSet
_push_proto_cls = protocol.ProtoServoCtrlPush
_target = protocol.host2byte(3, 6)
def __init__(self, index=0, angle=0, **kw):
super().__init__(**kw)
self._id = index
self._value = angle
self._angle = 0
def __repr__(self):
return "action_id:{0}, state:{1}, percent:{2}, value:{3}".format(
self._action_id, self._state, self._percent, self._angle)
def encode(self):
proto = protocol.ProtoServoCtrlSet()
proto._id = self._id
proto._value = (self._value+180)*10
return proto
def update_from_push(self, proto):
if proto.__class__ is not self._push_proto_cls:
return
self._percent = proto._percent
self._update_action_state(proto._action_state)
self._angle = proto._value
logger.info("{0} update_from_push: {1}".format(self.__class__.__name__, self))
class Servo(module.Module):
""" EP 舵机模块 """
_host = protocol.host2byte(3, 5)
def __init__(self, robot):
super().__init__(robot)
self._action_dispatcher = robot.action_dispatcher
def moveto(self, index=0, angle=0):
""" 舵机绝对位置移动
:param index: int [1, 3],舵机编号
:param angle: int: [-180, 180],舵机旋转角度,单位(°)
:return: action对象
"""
action = ServoSetAngleAction(index, angle)
self._action_dispatcher.send_action(action)
return action
def drive_speed(self, index=0, speed=0):
proto = protocol.ProtoServoModeSet()
proto._id = (index << 5) + 0x19
proto._mode = 1
msg = protocol.Msg(self._client.hostbyte, self._host, proto)
try:
resp_msg = self._client.send_sync_msg(msg)
if resp_msg:
prot = resp_msg.get_proto()
if prot._retcode == 0:
proto = protocol.ProtoServoControl()
proto._id = (index << 5) + 0x19
proto._enable = 1
proto._value = int(((speed + 49) * 900) / 98)
msg = protocol.Msg(self._client.hostbyte, self._host, proto)
try:
resp_msg = self._client.send_sync_msg(msg)
if resp_msg:
prot = resp_msg.get_proto()
if prot._retcode == 0:
return True
else:
return False
else:
return False
except Exception as e:
logger.warning("Servo: drive_speed, e {0}".format(e))
return False
else:
return False
else:
return False
except Exception as e:
logger.warning("Servo: drive_speed, send_sync_msg e {0}".format(e))
return False
def pause(self, index=0):
""" 停止
:param index: int: [1, 3],舵机编号
:return bool: 调用结果
"""
proto = protocol.ProtoServoControl()
proto._id = (index << 5) + 0x19
proto._enable = 0
proto._value = 0
msg = protocol.Msg(self._client.hostbyte, self._host, proto)
try:
resp_msg = self._client.send_sync_msg(msg)
if resp_msg:
prot = resp_msg.get_proto()
if prot._retcode == 0:
return True
else:
return False
else:
return False
except Exception as e:
logger.warning("Servo: pause, send_sync_msg e {0}".format(e))
return False
def get_angle(self, index=1):
""" 获取舵机角度值
:param index: int: [1,3],舵机编号
:return: int 舵机角度
"""
proto = protocol.ProtoServoGetAngle()
proto._id = (index << 5) + 0x19
msg = protocol.Msg(self._client.hostbyte, self._host, proto)
print(proto)
try:
resp_msg = self._client.send_sync_msg(msg)
if resp_msg:
prot = resp_msg.get_proto()
angle = prot._angle
return angle
else:
return False
except Exception as e:
logger.warning("Servo: get_angle, send_sync_msg e {0}".format(e))
return False
def sub_servo_info(self, freq=5, callback=None, *args, **kw):
""" 订阅舵机角度信息
:param freq: enum: (1, 5, 10, 20, 50) 设置数据订阅数据的推送频率,单位 Hz
:param callback: 回调函数,返回数据 (valid[4], speed[4], angle[4]):
:valid[4]: 4个舵机在线状态
:speed[4]: 4个舵机的速度值
:angle[4]: 4个舵机的角度值
:param args: 可变参数
:param kw: 关键字参数
:return: bool: 数据订阅结果
"""
sub = self._robot.dds
subject = ServoSubject()
subject.freq = freq
return sub.add_subject_info(subject, callback, args, kw)
def unsub_servo_info(self):
""" 取消订阅舵机的角度信息
:return: bool: 调用结果
"""
sub_dds = self._robot.dds
return sub_dds.del_subject_info(dds.DDS_SERVO)
|
observations/r/political_knowledge.py | hajime9652/observations | 199 | 11199181 | <filename>observations/r/political_knowledge.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def political_knowledge(path):
"""Political knowledge in the US and Europe
Data from McChesney and Nichols (2010) on domestic and international
knowledge in Denmark, Finland, the UK and the US among college
graduates, people with some college, and roughly 12th grade only.
A `data.frame` containing 12 columns and 4 rows.
country
a character vector of Denmark, Finland, UK, and US, being the four
countries comparied in this data set.
DomesticKnowledge.hs, DomesticKnowledge.sc, DomesticKnowledge.c
percent correct answers to calibrated questions regarding knowledge
of prominent items in domestic news in a survey of residents of the
four countries among college graduates (ending ".c"), some college
(".sc") and high school ("hs"). Source: McChesney and Nichols (2010,
chapter 1, chart 8).
InternationalKnowledge.hs, InternationalKnowledge.sc,
InternationalKnowledge.c
percent correct answers to calibrated questions regarding knowledge
of prominent items in international news in a survey of residents of
the four countries by education level as for DomesticKnowledge.
Source: McChesney and Nichols (2010, chapter 1, chart 7).
PoliticalKnowledge.hs, PoliticalKnowledge.sc, PoliticalKnowledge.c
average of domestic and international knowledge
PublicMediaPerCapita
Per capital spending on public media in 2007 in US dollars from
McChesney and Nichols (2010, chapter 4, chart 1)
PublicMediaRel2US
Spending on public media relative to the US, being
`PublicMediaPerCapita / PublicMediaPerCapita[4]`.
Author(s)
~~~~~~~~~
<NAME>
<NAME> and John Nichols (2010) *The Death and Life of
American Journalism* (Nation Books)
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `political_knowledge.csv`.
Returns:
Tuple of np.ndarray `x_train` with 4 rows and 12 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'political_knowledge.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/politicalKnowledge.csv'
maybe_download_and_extract(path, url,
save_file_name='political_knowledge.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
tests/unit/test_html_conversion.py | joelostblom/dash-docs | 379 | 11199185 | import pytest
import sys
from dash_docs.convert_to_html import convert_to_html
@pytest.mark.skipif(
sys.version_info < (3, 7),
reason="skip non-essential, potentially flaky tests"
)
def test_html_conversion():
exceptions = []
success = []
dcc_link = []
from dash_docs.chapter_index import URL_TO_CONTENT_MAP
for url in URL_TO_CONTENT_MAP:
try:
ssr_content = convert_to_html(URL_TO_CONTENT_MAP[url])
if '<dccLink' in ssr_content:
dcc_link.append(url)
else:
success.append(url)
except Exception as e:
exceptions.append([url, e])
error_message = ''
if len(exceptions) > 0:
for i in exceptions:
error_message += '\n===============\nIssue with ' + i[0] + '\n' + str(i[1]) + '\n\n\n'
error_message += '\n\n\nThese URLs were OK:\n{}'.format(
'-' + '\n-'.join(success)
)
error_message += '\n\n\nThese URLs had exceptions:\n{}'.format(
'-' + '\n-'.join([i[0] for i in exceptions])
)
error_message += '\n\n\nThese URLs still had dccLink in their content:\n{}'.format(
'-' + '\n-'.join(dcc_link)
)
error_message += '\n\n{} OK, {} need to be fixed, {} need dccLink fixed'.format(
len(success), len(exceptions), len(dcc_link))
assert len(exceptions) == 0 and len(dcc_link) == 0, error_message
|
hatsploit/core/utils/update.py | EntySec/HatSploit | 139 | 11199186 | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2022 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import requests
import shutil
import subprocess
from packaging import version
from hatsploit.core.cli.badges import Badges
from hatsploit.lib.config import Config
class Update:
config = Config()
badges = Badges()
def check_update(self):
try:
remote_config = requests.get(
'https://raw.githubusercontent.com/EntySec/HatSploit/main/hatsploit/config/core_config.yml',
stream=True).content
except Exception:
remote_config = None
if remote_config:
remote_version = self.config.get_config_file(remote_config)['details']['version']
local_version = self.config.core_config['details']['version']
return version.parse(local_version) < version.parse(remote_version)
return remote_config
def update(self):
if self.check_update():
self.badges.print_process("Updating HatSploit Framework...")
shutil.rmtree(os.path.abspath(self.config.path_config['root_path']))
subprocess.call(['pip3', 'install', 'git+https://github.com/EntySec/HatSploit', '--ignore-installed'],
shell=False)
self.badges.print_success("HatSploit updated successfully!")
return
self.badges.print_warning("Your HatSploit is up-to-date.")
|
deps/src/boost_1_65_1/libs/mpi/test/python/ring_test.py | shreyasvj25/turicreate | 11,356 | 11199192 | <reponame>shreyasvj25/turicreate
# Copyright (C) 2006 <NAME> <<EMAIL>g.gregor -at- gmail.com>.
# Use, modification and distribution is subject to the Boost Software
# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test basic communication.
import boost.parallel.mpi as mpi
def ring_test(comm, value, kind, root):
next_peer = (comm.rank + 1) % comm.size;
prior_peer = (comm.rank + comm.size - 1) % comm.size;
if comm.rank == root:
print ("Passing %s around a ring from root %d..." % (kind, root)),
comm.send(next_peer, 0, value)
(other_value, stat) = comm.recv(return_status = True)
assert value == other_value
assert stat.source == prior_peer
assert stat.tag == 0
else:
msg = comm.probe()
other_value = comm.recv(msg.source, msg.tag)
assert value == other_value
comm.send(next_peer, 0, other_value)
comm.barrier()
if comm.rank == root:
print "OK"
pass
if mpi.world.size < 2:
print "ERROR: ring_test.py must be executed with more than one process"
mpi.world.abort(-1);
ring_test(mpi.world, 17, 'integers', 0)
ring_test(mpi.world, 17, 'integers', 1)
ring_test(mpi.world, 'Hello, World!', 'string', 0)
ring_test(mpi.world, 'Hello, World!', 'string', 1)
ring_test(mpi.world, ['Hello', 'MPI', 'Python', 'World'], 'list of strings', 0)
ring_test(mpi.world, ['Hello', 'MPI', 'Python', 'World'], 'list of strings', 1)
|
hypernets/dispatchers/process/grpc/process_broker_service.py | Enpen/Hypernets | 1,080 | 11199272 | import queue
import subprocess
import time
from threading import Thread
from grpc import RpcError
from hypernets.dispatchers.process.grpc.proto import proc_pb2_grpc
from hypernets.dispatchers.process.grpc.proto.proc_pb2 import DataChunk
from hypernets.utils import logging
logger = logging.get_logger(__name__)
class ProcessBrokerService(proc_pb2_grpc.ProcessBrokerServicer):
def __init__(self):
super(ProcessBrokerService, self).__init__()
@staticmethod
def _read_data(f, q, buffer_size, encoding, data_kind):
try:
data = f.read(buffer_size)
while data and len(data) > 0:
if encoding:
chunk = DataChunk(kind=data_kind, data=data.encode(encoding))
else:
chunk = DataChunk(kind=data_kind, data=data)
q.put(chunk)
data = f.read(buffer_size)
except ValueError as e:
logger.error(e)
def run(self, request_iterator, context):
it = iter(request_iterator)
request = next(it)
program = request.program
args = request.args
cwd = request.cwd
buffer_size = request.buffer_size
encoding = request.encoding
if encoding is None or len(encoding) == 0:
encoding = None
with subprocess.Popen(args, buffer_size,
program if len(program) > 0 else None,
cwd=cwd if len(cwd) > 0 else None,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding=encoding,
shell=False) as p:
pid = p.pid
start_at = time.time()
peer = context.peer()
if logger.is_info_enabled():
logger.info(f'[{pid}] started, peer: {peer}, cmd:' + ' '.join(args), )
data_queue = queue.Queue()
t_out = Thread(target=self._read_data,
args=(p.stdout, data_queue, buffer_size, encoding, DataChunk.OUT))
t_err = Thread(target=self._read_data,
args=(p.stderr, data_queue, buffer_size, encoding, DataChunk.ERR))
t_out.start()
t_err.start()
# report pid to client
yield DataChunk(kind=DataChunk.ERR, data=f'pid: {pid}\n'.encode())
try:
while next(it):
chunk = None
while context.is_active() and \
(t_out.is_alive() or t_err.is_alive() or not data_queue.empty()):
try:
chunk = data_queue.get(False)
yield chunk
break
except queue.Empty:
time.sleep(0.1)
if not context.is_active():
p.kill()
code = 'killed (peer shutdown)'
break
elif chunk is None: # process exit and no more output
code = p.poll()
yield DataChunk(kind=DataChunk.END, data=str(code).encode())
# break
except StopIteration as e:
pass
except RpcError as e:
logger.error(e)
code = 'rpc error'
except Exception:
import traceback
traceback.print_exc()
code = 'exception'
if logger.is_info_enabled():
logger.info('[%s] done with code %s, elapsed %.3f seconds.'
% (pid, code, time.time() - start_at))
def download(self, request, context):
try:
peer = request.peer
path = request.path
encoding = request.encoding
buffer_size = request.buffer_size
if buffer_size is None or buffer_size <= 0:
buffer_size = 4096
# check peer here
start_at = time.time()
total = 0
if encoding:
with open(path, 'r', encoding=encoding) as f:
data = f.read(buffer_size)
while data and len(data) > 0:
if not context.is_active():
break
encoded_data = data.encode(encoding)
chunk = DataChunk(kind=DataChunk.DATA, data=encoded_data)
total += len(encoded_data)
yield chunk
data = f.read(buffer_size)
else:
with open(path, 'rb') as f:
data = f.read(buffer_size)
while data and len(data) > 0:
if not context.is_active():
break
chunk = DataChunk(kind=DataChunk.DATA, data=data)
total += len(data)
yield chunk
data = f.read(buffer_size)
if not context.is_active():
if logger.is_info_enabled():
logger.info('download %s broke (peer shutdown), %s bytes sent, elapsed %.3f seconds' %
(path, total, time.time() - start_at))
else:
yield DataChunk(kind=DataChunk.END, data=b'')
if logger.is_info_enabled():
logger.info('download %s (%s bytes) in %.3f seconds, encoding=%s' %
(path, total, time.time() - start_at, encoding))
except Exception as e:
import traceback
import sys
msg = f'{e.__class__.__name__}:\n'
msg += traceback.format_exc()
logger.error(msg)
yield DataChunk(kind=DataChunk.EXCEPTION, data=msg.encode())
def serve(addr, max_workers=10):
import grpc
from concurrent import futures
if logger.is_info_enabled():
logger.info(f'start broker at {addr}')
service = ProcessBrokerService()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers))
proc_pb2_grpc.add_ProcessBrokerServicer_to_server(service, server)
server.add_insecure_port(addr)
server.start()
return server, service
|
recipes/msdfgen/all/conanfile.py | rockandsalt/conan-center-index | 562 | 11199276 | <filename>recipes/msdfgen/all/conanfile.py
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class MsdfgenConan(ConanFile):
name = "msdfgen"
description = "Multi-channel signed distance field generator"
license = "MIT"
topics = ("conan", "msdfgen", "msdf", "shape", "glyph", "font")
homepage = "https://github.com/Chlumsky/msdfgen"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_openmp": [True, False],
"with_skia": [True, False],
"utility": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_openmp": False,
"with_skia": False,
"utility": True,
}
exports_sources = ["CMakeLists.txt", "patches/**"]
generators = "cmake", "cmake_find_package"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("freetype/2.10.4")
self.requires("lodepng/cci.20200615")
self.requires("tinyxml2/8.0.0")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 11)
if self.settings.compiler == "Visual Studio" and self.options.shared:
raise ConanInvalidConfiguration("msdfgen shared not supported by Visual Studio")
if self.options.with_skia:
raise ConanInvalidConfiguration("skia recipe not available yet in CCI")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmakelists = os.path.join(self._source_subfolder, "CMakeLists.txt")
# unvendor lodepng & tinyxml2
tools.rmdir(os.path.join(self._source_subfolder, "lib"))
tools.replace_in_file(cmakelists, "\"lib/*.cpp\"", "")
tools.replace_in_file(cmakelists,
"target_link_libraries(msdfgen-ext PUBLIC msdfgen::msdfgen Freetype::Freetype)",
"target_link_libraries(msdfgen-ext PUBLIC msdfgen::msdfgen ${CONAN_LIBS})")
# very weird but required for Visual Studio when libs are unvendored (at least for Ninja generator)
if self.settings.compiler == "Visual Studio":
tools.replace_in_file(cmakelists,
"set_target_properties(msdfgen-standalone PROPERTIES ARCHIVE_OUTPUT_DIRECTORY archive OUTPUT_NAME msdfgen)",
"set_target_properties(msdfgen-standalone PROPERTIES OUTPUT_NAME msdfgen IMPORT_PREFIX foo)")
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["MSDFGEN_BUILD_MSDFGEN_STANDALONE"] = self.options.utility
self._cmake.definitions["MSDFGEN_USE_OPENMP"] = self.options.with_openmp
self._cmake.definitions["MSDFGEN_USE_CPP11"] = True
self._cmake.definitions["MSDFGEN_USE_SKIA"] = self.options.with_skia
self._cmake.definitions["MSDFGEN_INSTALL"] = True
self._cmake.configure()
return self._cmake
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE.txt", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "msdfgen"
self.cpp_info.names["cmake_find_package_multi"] = "msdfgen"
includedir = os.path.join("include", "msdfgen")
self.cpp_info.components["_msdfgen"].names["cmake_find_package"] = "msdfgen"
self.cpp_info.components["_msdfgen"].names["cmake_find_package_multi"] = "msdfgen"
self.cpp_info.components["_msdfgen"].includedirs.append(includedir)
self.cpp_info.components["_msdfgen"].libs = ["msdfgen"]
self.cpp_info.components["_msdfgen"].defines = ["MSDFGEN_USE_CPP11"]
self.cpp_info.components["msdfgen-ext"].names["cmake_find_package"] = "msdfgen-ext"
self.cpp_info.components["msdfgen-ext"].names["cmake_find_package_multi"] = "msdfgen-ext"
self.cpp_info.components["msdfgen-ext"].includedirs.append(includedir)
self.cpp_info.components["msdfgen-ext"].libs = ["msdfgen-ext"]
self.cpp_info.components["msdfgen-ext"].requires = [
"_msdfgen", "freetype::freetype",
"lodepng::lodepng", "tinyxml2::tinyxml2",
]
if self.options.with_skia:
self.cpp_info.components["msdfgen-ext"].defines.append("MSDFGEN_USE_SKIA")
if self.options.utility:
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
|
tests/exceptions/source/backtrace/nested_wrapping.py | ponponon/loguru | 11,391 | 11199301 | import sys
from loguru import logger
logger.remove()
logger.add(sys.stderr, format="", colorize=False, backtrace=True, diagnose=False)
def f(i):
1 / i
@logger.catch
@logger.catch()
def a(x):
f(x)
a(0)
with logger.catch():
with logger.catch():
f(0)
try:
try:
f(0)
except ZeroDivisionError:
logger.exception("")
except Exception:
logger.exception("")
|
tests/refinement_test.py | ericwxia/SpectralCluster | 327 | 11199326 | import unittest
import numpy as np
from spectralcluster import refinement
ThresholdType = refinement.ThresholdType
SymmetrizeType = refinement.SymmetrizeType
class TestCropDiagonal(unittest.TestCase):
"""Tests for the CropDiagonal class."""
def test_3by3_matrix(self):
matrix = np.array([[1, 2, 3], [3, 4, 5], [4, 2, 1]])
adjusted_matrix = refinement.CropDiagonal().refine(matrix)
expected = np.array([[3, 2, 3], [3, 5, 5], [4, 2, 4]])
self.assertTrue(np.array_equal(expected, adjusted_matrix))
class TestGaussianBlur(unittest.TestCase):
"""Tests for the GaussianBlur class."""
def test_3by3_matrix(self):
matrix = np.array([[1.0, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])
adjusted_matrix = refinement.GaussianBlur(sigma=1).refine(matrix)
expected = np.array([[2.12, 2.61, 3.10], [2.76, 2.90, 3.06],
[3.16, 2.78, 2.46]])
self.assertTrue(np.allclose(expected, adjusted_matrix, atol=0.01))
class TestRowWiseThreshold(unittest.TestCase):
"""Tests for the RowWiseThreshold class."""
def test_3by3_matrix_percentile(self):
matrix = np.array([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])
adjusted_matrix = refinement.RowWiseThreshold(
p_percentile=0.5,
thresholding_soft_multiplier=0.01,
thresholding_type=ThresholdType.Percentile).refine(matrix)
expected = np.array([[0.005, 2.0, 3.0], [0.03, 4.0, 5.0], [4.0, 2.0, 0.01]])
self.assertTrue(np.allclose(expected, adjusted_matrix, atol=0.001))
def test_3by3_matrix_row_max(self):
matrix = np.array([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])
adjusted_matrix = refinement.RowWiseThreshold(
p_percentile=0.5,
thresholding_soft_multiplier=0.01,
thresholding_type=ThresholdType.RowMax).refine(matrix)
expected = np.array([[0.005, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 0.01]])
self.assertTrue(np.allclose(expected, adjusted_matrix, atol=0.001))
def test_3by3_matrix_binarization(self):
matrix = np.array([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])
adjusted_matrix = refinement.RowWiseThreshold(
p_percentile=0.5,
thresholding_soft_multiplier=0.01,
thresholding_type=ThresholdType.RowMax,
thresholding_with_binarization=True).refine(matrix)
expected = np.array([[0.005, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 0.01]])
self.assertTrue(np.allclose(expected, adjusted_matrix, atol=0.001))
def test_3by3_matrix_preserve_diagonal(self):
matrix = np.array([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])
adjusted_matrix = refinement.RowWiseThreshold(
p_percentile=0.5,
thresholding_soft_multiplier=0.01,
thresholding_type=ThresholdType.RowMax,
thresholding_with_binarization=True,
thresholding_preserve_diagonal=True).refine(matrix)
expected = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
self.assertTrue(np.allclose(expected, adjusted_matrix, atol=0.001))
class TestSymmetrize(unittest.TestCase):
"""Tests for the Symmetrize class."""
def test_3by3_matrix(self):
matrix = np.array([[1, 2, 3], [3, 4, 5], [4, 2, 1]])
adjusted_matrix = refinement.Symmetrize().refine(matrix)
expected = np.array([[1, 3, 4], [3, 4, 5], [4, 5, 1]])
self.assertTrue(np.array_equal(expected, adjusted_matrix))
def test_3by3_matrix_symmetrize_average(self):
matrix = np.array([[1, 2, 3], [3, 4, 5], [4, 2, 1]])
adjusted_matrix = refinement.Symmetrize(
symmetrize_type=SymmetrizeType.Average).refine(matrix)
expected = np.array([[1, 2.5, 3.5], [2.5, 4, 3.5], [3.5, 3.5, 1]])
self.assertTrue(np.array_equal(expected, adjusted_matrix))
class TestDiffuse(unittest.TestCase):
"""Tests for the Diffuse class."""
def test_2by2_matrix(self):
matrix = np.array([[1, 2], [3, 4]])
adjusted_matrix = refinement.Diffuse().refine(matrix)
expected = np.array([[5, 11], [11, 25]])
self.assertTrue(np.array_equal(expected, adjusted_matrix))
class TestRowWiseNormalize(unittest.TestCase):
"""Tests for the RowWiseNormalize class."""
def test_3by3_matrix(self):
matrix = np.array([[0.5, 2.0, 3.0], [3.0, 4.0, 5.0], [4.0, 2.0, 1.0]])
adjusted_matrix = refinement.RowWiseNormalize().refine(matrix)
expected = np.array([[0.167, 0.667, 1.0], [0.6, 0.8, 1.0], [1.0, 0.5,
0.25]])
self.assertTrue(np.allclose(expected, adjusted_matrix, atol=0.001))
if __name__ == "__main__":
unittest.main()
|
installer/core/terraform/resources/aws/cloudwatch.py | jonico/pacbot | 1,165 | 11199330 | from core.terraform.resources import TerraformResource
from core.config import Settings
from core.providers.aws.boto3 import cloudwatch_log
from core.providers.aws.boto3 import cloudwatch_event
class CloudWatchEventRuleResource(TerraformResource):
"""
Base resource class for Terraform AWS Cloudwatch event rule resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_cloudwatch_event_rule"
available_args = {
'name': {'required': True, 'prefix': True, 'sep': '-'},
'schedule_expression': {'required': True},
'event_pattern': {'required': False},
'role_arn ': {'required': False},
'is_enabled ': {'required': False},
'description': {'required': False}
}
description = Settings.RESOURCE_DESCRIPTION
def check_exists_before(self, input, tf_outputs):
"""
Check if the resource is already exists in AWS
Args:
input (instance): input object
tf_outputs (dict): Terraform output dictionary
Returns:
exists (boolean): True if already exists in AWS else False
checked_details (dict): Status of the existence check
"""
checked_details = {'attr': "name", 'value': self.get_input_attr('name')}
exists = False
if not self.resource_in_tf_output(tf_outputs):
exists = cloudwatch_event.check_rule_exists(
checked_details['value'],
input.AWS_AUTH_CRED)
return exists, checked_details
class CloudWatchEventTargetResource(TerraformResource):
resource_instance_name = "aws_cloudwatch_event_target"
available_args = {
'rule': {'required': True},
'target_id': {'required': True},
'arn': {'required': False},
'target_input': {'required': False, 'tf_arg_key': 'input'},
'run_command_targets': {'required': False}
}
class CloudWatchLogGroupResource(TerraformResource):
"""
Base resource class for Terraform AWS Cloudwatch log group resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_cloudwatch_log_group"
available_args = {
'name': {'required': True, 'prefix': True, 'sep': '/'},
'name_prefix': {'required': False},
'retention_in_days': {'required': False},
'tags': {'required': False}
}
def check_exists_before(self, input, tf_outputs):
"""
Check if the resource is already exists in AWS
Args:
input (instance): input object
tf_outputs (dict): Terraform output dictionary
Returns:
exists (boolean): True if already exists in AWS else False
checked_details (dict): Status of the existence check
"""
checked_details = {'attr': "name", 'value': self.get_input_attr('name')}
exists = False
if not self.resource_in_tf_output(tf_outputs):
exists = cloudwatch_log.check_log_group_exists(
checked_details['value'],
input.AWS_AUTH_CRED)
return exists, checked_details
class CloudWatchLogResourcePolicy(TerraformResource):
"""
Base resource class for Terraform AWS Cloudwatch log policy resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_cloudwatch_log_resource_policy"
available_args = {
'policy_name': {'required': True, 'prefix': True, 'sep': '/'},
'policy_document': {'required': True}
}
|
tests/st/ops/test_sqrt.py | tianjiashuo/akg | 286 | 11199338 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
sqrt test cast
"""
import os
import pytest
import akg.utils as utils
from tests.common.base import TestBase
from tests.common.test_run import sqrt_run
class TestSqrt(TestBase):
def setup(self):
case_name = "test_akg_sqrt_001"
case_path = os.getcwd()
# params init
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
# testflag, opfuncname, testRunArgs, dimArgs
("sqrt_01", sqrt_run, ((1, 128), "float16"), ((128, 128), (128, 128))),
("sqrt_02", sqrt_run, ((128, 128), "float16"), ((0, 0), (128, 128))),
("sqrt_03", sqrt_run, ((128, 256), "float16"), ((0, 0), (128, 128))),
# ("sqrt_04", sqrt_run, ((2, 1024), "float16"), ((2, 2), (1024, 1024)) ),
# ("sqrt_05", sqrt_run, ((30522,), "float16"), ((15261, 15261),) ),
# ("sqrt_06", sqrt_run, ((4096, 1024), "float16"), ((16, 16), (1024, 1024))),
("sqrt_07", sqrt_run, ((1,), "float16"), ((1, 1),)),
# ("sqrt_08", sqrt_run, ((1024, 4096), "float16"), ((4, 4),(4096, 4096)) ),
# ("sqrt_09", sqrt_run, ((4096,), "float16"), ((4096, 4096),) ),
# ("sqrt_10", sqrt_run, ((30522, 1024), "float16"), ((16, 16), (1024, 1024))),
("sqrt_11", sqrt_run, ((1024,), "float16"), ((1024, 1024),)),
("sqrt_12", sqrt_run, ((2,), "float16"), ((2, 2),)),
("sqrt_13", sqrt_run, ((512, 1024), "float16"), ((16, 16), (1024, 1024))),
("sqrt_14", sqrt_run, ((1024, 1024), "float16"), ((16, 16), (1024, 1024))),
]
self.testarg_cloud = [
("sqrt_01", sqrt_run, ((1, 128), "float32"), ((128, 128), (128, 128))),
]
self.testarg_level1 = [
# testflag, opfuncname, testRunArgs, dimArgs
# ("sqrt_01", sqrt_run, ((1, 128), "float16"), ((128, 128), (128, 128))),
# ("sqrt_02", sqrt_run, ((128, 128), "float16"), ((0, 0), (128, 128)) ),
# ("sqrt_03", sqrt_run, ((128, 256), "float16"), ((0, 0), (128, 128)) ),
("sqrt_04", sqrt_run, ((2, 1024), "float16"), ((2, 2), (1024, 1024))),
("sqrt_05", sqrt_run, ((30522,), "float16"), ((15261, 15261),)),
("sqrt_06", sqrt_run, ((4096, 1024), "float16"), ((16, 16), (1024, 1024))),
# run fail("sqrt_07", sqrt_run, ((1,), "float16"), ((1, 1),) ),
("sqrt_08", sqrt_run, ((1024, 4096), "float16"), ((4, 4), (4096, 4096))),
("sqrt_09", sqrt_run, ((4096,), "float16"), ((4096, 4096),)),
("sqrt_10", sqrt_run, ((30522, 1024), "float16"), ((16, 16), (1024, 1024))),
# ("sqrt_11", sqrt_run, ((1024,), "float16"), ((1024, 1024),) ),
# ("sqrt_12", sqrt_run, ((2,), "float16"), ((2, 2),) ),
# ("sqrt_13", sqrt_run, ((512, 1024), "float16"), ((16, 16), (1024, 1024))),
# ("sqrt_14", sqrt_run, ((1024, 1024), "float16"), ((16, 16), (1024, 1024))),
# ("sqrt_15", sqrt_run, ((128, 1024), "float16"), ((16, 16), (1024, 1024))),
]
self.args_default = [
("000_case", sqrt_run, ((512, 1), 'float32'), ["level0"]),
("001_case", sqrt_run, ((1024, 2), 'float32'), ["level0"]),
("001_case", sqrt_run, ((2, 1024), 'float32'), ["level0"]),
("001_case", sqrt_run, ((1024, 1024), 'float32'), ["level0"]),
]
return True
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_level0(self):
return self.run_cases(self.args_default, utils.CUDA, "level0")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_level0(self):
return self.run_cases(self.args_default, utils.LLVM, "level0")
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
self.common_run(self.testarg)
def test_run_cloud(self):
self.common_run(self.testarg_cloud)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run_level1(self):
self.common_run(self.testarg_level1)
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= {0} Teardown============".format(self.casename))
return
|
GPU-Re-Ranking/extension/adjacency_matrix/setup.py | TxuanYu/Person_reID_baseline_pytorch | 3,358 | 11199346 | <gh_stars>1000+
"""
Understanding Image Retrieval Re-Ranking: A Graph Neural Network Perspective
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
Project Page : https://github.com/Xuanmeng-Zhang/gnn-re-ranking
Paper: https://arxiv.org/abs/2012.07620v2
======================================================================
On the Market-1501 dataset, we accelerate the re-ranking processing from 89.2s to 9.4ms
with one K40m GPU, facilitating the real-time post-processing. Similarly, we observe
that our method achieves comparable or even better retrieval results on the other four
image retrieval benchmarks, i.e., VeRi-776, Oxford-5k, Paris-6k and University-1652,
with limited time cost.
"""
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='build_adjacency_matrix',
ext_modules=[
CUDAExtension('build_adjacency_matrix', [
'build_adjacency_matrix.cpp',
'build_adjacency_matrix_kernel.cu',
]),
],
cmdclass={
'build_ext':BuildExtension
})
|
torchdistill/models/classification/__init__.py | AhmedHussKhalifa/torchdistill | 576 | 11199353 | <reponame>AhmedHussKhalifa/torchdistill
from torchdistill.models.classification import densenet, resnet, wide_resnet
from torchdistill.models.registry import MODEL_FUNC_DICT
CLASSIFICATION_MODEL_FUNC_DICT = dict()
CLASSIFICATION_MODEL_FUNC_DICT.update(MODEL_FUNC_DICT)
|
models/match/multiview-simnet/data/preprocess.py | ziyoujiyi/PaddleRec | 2,739 | 11199363 | #encoding=utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import io
import jieba
import numpy as np
import random
f = io.open("./raw_data.txt", mode="r", encoding='utf-8')
lines = f.readlines()
f.close()
#建立字典
word_dict = {}
for line in lines:
line = line.strip().split("\t")
text = line[0].strip("") + line[1].strip("")
text = jieba.cut(text)
for word in text:
if word in word_dict:
continue
else:
word_dict[word] = len(word_dict) + 1
f = io.open("./raw_data.txt", mode="r", encoding='utf-8')
lines = f.readlines()
f.close()
lines = [line.strip().split("\t") for line in lines]
#建立以query为key,以负例为value的字典
neg_dict = {}
for line in lines:
if line[2] == "0":
if line[0] in neg_dict:
neg_dict[line[0]].append(line[1])
else:
neg_dict[line[0]] = [line[1]]
#建立以query为key,以正例为value的字典
pos_dict = {}
for line in lines:
if line[2] == "1":
if line[0] in pos_dict:
pos_dict[line[0]].append(line[1])
else:
pos_dict[line[0]] = [line[1]]
#划分训练集和测试集
query_list = list(pos_dict.keys())
print(len(query_list))
random.shuffle(query_list)
train_query = query_list[:11600]
test_query = query_list[11600:]
#获得训练集
train_set = []
for query in train_query:
for pos in pos_dict[query]:
if query not in neg_dict:
continue
for neg in neg_dict[query]:
train_set.append([query, pos, neg])
random.shuffle(train_set)
#获得测试集
test_set = []
for query in test_query:
for pos in pos_dict[query]:
test_set.append([query, pos, 1])
if query not in neg_dict:
continue
for neg in neg_dict[query]:
test_set.append([query, neg, 0])
random.shuffle(test_set)
#训练集中的query,pos,neg转化格式
_pad_ = 0
f = open("train.txt", "w")
for line in train_set:
query = jieba.cut(line[0].strip())
pos = jieba.cut(line[1].strip())
neg = jieba.cut(line[2].strip())
query_list = []
for word in query:
query_list.append(word_dict[word])
for i in range(79 - len(query_list)):
query_list.append(_pad_)
pos_list = []
for word in pos:
pos_list.append(word_dict[word])
for i in range(99 - len(pos_list)):
pos_list.append(_pad_)
neg_list = []
for word in neg:
neg_list.append(word_dict[word])
for i in range(90 - len(neg_list)):
neg_list.append(_pad_)
f.write(' '.join(["0:" + str(x) for x in query_list]) + " " + ' '.join([
"1:" + str(x) for x in pos_list
]) + " " + ' '.join(["2:" + str(x) for x in neg_list]) + "\n")
f.close()
#测试集中的query和pos转化格式
_pad_ = 0
f = open("test.txt", "w")
fa = open("label.txt", "w")
fb = open("testquery.txt", "w")
for line in test_set:
query = jieba.cut(line[0].strip())
pos = jieba.cut(line[1].strip())
label = line[2]
query_list = []
for word in query:
query_list.append(word_dict[word])
for i in range(79 - len(query_list)):
query_list.append(_pad_)
pos_list = []
for word in pos:
pos_list.append(word_dict[word])
for i in range(99 - len(pos_list)):
pos_list.append(_pad_)
f.write(' '.join(["0:" + str(x) for x in query_list]) + " " + ' '.join(
["1:" + str(x) for x in pos_list]) + "\n")
fa.write(str(label) + "\n")
fb.write(','.join([str(x) for x in query_list]) + "\n")
f.close()
fa.close()
fb.close()
|
twitchio/message.py | rn4n/TwitchIO | 514 | 11199392 | <filename>twitchio/message.py
"""
The MIT License (MIT)
Copyright (c) 2017-2021 TwitchIO
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import datetime
import time
from typing import TYPE_CHECKING, Union
if TYPE_CHECKING:
from .channel import Channel
from .chatter import Chatter, PartialChatter
class Message:
__slots__ = ("_raw_data", "content", "_author", "echo", "_timestamp", "_channel", "_tags", "_id")
def __init__(self, **kwargs):
self._raw_data = kwargs.get("raw_data")
self.content = kwargs.get("content")
self._author = kwargs.get("author")
self._channel = kwargs.get("channel")
self._tags = kwargs.get("tags")
self.echo = kwargs.get("echo", False)
try:
self._id = self._tags["id"]
self._timestamp = self._tags["tmi-sent-ts"]
except KeyError:
self._id = None
self._timestamp = time.time()
@property
def id(self) -> str:
"""The Message ID."""
return self._id
@property
def author(self) -> Union["Chatter", "PartialChatter"]:
"""The User object associated with the Message."""
return self._author
@property
def channel(self) -> "Channel":
"""The Channel object associated with the Message."""
return self._channel
@property
def raw_data(self) -> str:
"""The raw data received from Twitch for this Message."""
return self._raw_data
@property
def tags(self) -> dict:
"""The tags associated with the Message.
Could be None.
"""
return self._tags
@property
def timestamp(self) -> datetime.datetime:
"""The Twitch timestamp for this Message.
Returns
---------
timestamp:
UTC datetime object of the Twitch timestamp.
"""
return datetime.datetime.utcfromtimestamp(int(self._timestamp) / 1000)
|
tests/save_delete_hooks/models.py | bpeschier/django | 5,079 | 11199399 | <reponame>bpeschier/django
"""
Adding hooks before/after saving and deleting
To execute arbitrary code around ``save()`` and ``delete()``, just subclass
the methods.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
self.data = []
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
def save(self, *args, **kwargs):
self.data.append("Before save")
# Call the "real" save() method
super(Person, self).save(*args, **kwargs)
self.data.append("After save")
def delete(self):
self.data.append("Before deletion")
# Call the "real" delete() method
super(Person, self).delete()
self.data.append("After deletion")
|
ivy/array/device.py | saurbhc/ivy | 161 | 11199400 | # global
import abc
# ToDo: implement all device methods here as public class methods
class ArrayWithDevice(abc.ABC):
pass
|
seahub/file_participants/migrations/0001_initial.py | MJochim/seahub | 420 | 11199411 | <reponame>MJochim/seahub
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-08-28 02:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import seahub.base.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('tags', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FileParticipant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', seahub.base.fields.LowerCaseCharField(max_length=255)),
('uuid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tags.FileUUIDMap')),
],
),
migrations.AlterUniqueTogether(
name='fileparticipant',
unique_together=set([('uuid', 'username')]),
),
]
|
examples/pandocfilters/abc.py | jacobwhall/panflute | 361 | 11199437 | #!/usr/bin/env python
"""
Pandoc filter to process code blocks with class "abc" containing
ABC notation into images. Assumes that abcm2ps and ImageMagick's
convert are in the path. Images are put in the abc-images directory.
"""
import hashlib
import os
import sys
from pandocfilters import toJSONFilter, Para, Image
from subprocess import Popen, PIPE, call
imagedir = "abc-images"
def sha1(x):
return hashlib.sha1(x.encode(sys.getfilesystemencoding())).hexdigest()
def abc2eps(abc, filetype, outfile):
p = Popen(["abcm2ps", "-O", outfile + '.eps', "-"], stdin=PIPE)
p.stdin.write(abc)
p.communicate()
p.stdin.close()
call(["convert", outfile + '.eps', outfile + '.' + filetype])
def abc(key, value, format, meta):
if key == 'CodeBlock':
[[ident, classes, keyvals], code] = value
if "abc" in classes:
outfile = imagedir + '/' + sha1(code)
if format == "html":
filetype = "png"
elif format == "latex":
filetype = "pdf"
else:
filetype = "png"
src = outfile + '.' + filetype
if not os.path.isfile(src):
try:
os.mkdir(imagedir)
sys.stderr.write('Created directory ' + imagedir + '\n')
except OSError:
pass
abc2eps(code.encode("utf-8"), filetype, outfile)
sys.stderr.write('Created image ' + src + '\n')
return Para([Image(['', [], []], [], [src, ""])])
if __name__ == "__main__":
toJSONFilter(abc)
|
test/geometry/test_homography.py | Ishticode/kornia | 418 | 11199442 | import random
import pytest
import torch
from torch.autograd import gradcheck
import kornia
import kornia.testing as utils
from kornia.geometry.homography import (
find_homography_dlt,
find_homography_dlt_iterated,
oneway_transfer_error,
sample_is_valid_for_homography,
symmetric_transfer_error,
)
from kornia.testing import assert_close
class TestSampleValidation:
def test_good(self, device, dtype):
pts1 = torch.tensor([[0.0, 0.0],
[0.0, 1.0],
[1.0, 1.0],
[1.0, 0.0]], device=device, dtype=dtype)[None]
mask = sample_is_valid_for_homography(pts1, pts1)
expected = torch.tensor([True], device=device, dtype=torch.bool)
assert torch.equal(mask, expected)
def test_bad(self, device, dtype):
pts1 = torch.tensor([[0.0, 0.0],
[1.0, 0.0],
[0.0, 1.0],
[1.0, 1.0]], device=device, dtype=dtype)[None]
pts2 = torch.tensor([[0.0, 0.0],
[0.0, 1.0],
[1.0, 0.0],
[1.0, 1.0]], device=device, dtype=dtype)[None]
mask = sample_is_valid_for_homography(pts1, pts2)
expected = torch.tensor([False], device=device, dtype=torch.bool)
assert torch.equal(mask, expected)
def test_batch(self, device, dtype):
batch_size = 5
pts1 = torch.rand(batch_size, 4, 2, device=device, dtype=dtype)
pts2 = torch.rand(batch_size, 4, 2, device=device, dtype=dtype)
mask = sample_is_valid_for_homography(pts1, pts2)
assert (mask.shape == torch.Size([batch_size]))
class TestOneWayError:
def test_smoke(self, device, dtype):
pts1 = torch.rand(1, 6, 2, device=device, dtype=dtype)
pts2 = torch.rand(1, 6, 2, device=device, dtype=dtype)
H = utils.create_random_homography(1, 3).type_as(pts1).to(device)
assert oneway_transfer_error(pts1, pts2, H).shape == (1, 6)
def test_batch(self, device, dtype):
batch_size = 5
pts1 = torch.rand(batch_size, 3, 2, device=device, dtype=dtype)
pts2 = torch.rand(batch_size, 3, 2, device=device, dtype=dtype)
H = utils.create_random_homography(1, 3).type_as(pts1).to(device)
assert oneway_transfer_error(pts1, pts2, H).shape == (batch_size, 3)
def test_gradcheck(self, device):
# generate input data
batch_size, num_points, num_dims = 2, 3, 2
points1 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64, requires_grad=True)
points2 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64)
H = utils.create_random_homography(batch_size, 3).type_as(points1).to(device)
assert gradcheck(oneway_transfer_error, (points1, points2, H), raise_exception=True)
def test_shift(self, device, dtype):
pts1 = torch.zeros(3, 2, device=device, dtype=dtype)[None]
pts2 = torch.tensor([[1.0, 0.0], [2.0, 0.0], [2.0, 2.0]], device=device, dtype=dtype)[None]
H = torch.tensor([[1.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=dtype, device=device)[None]
expected = torch.tensor([0.0, 1.0, 5.0], device=device, dtype=dtype)[None]
assert_close(oneway_transfer_error(pts1, pts2, H), expected, atol=1e-4, rtol=1e-4)
class TestSymmetricTransferError:
def test_smoke(self, device, dtype):
pts1 = torch.rand(1, 6, 2, device=device, dtype=dtype)
pts2 = torch.rand(1, 6, 2, device=device, dtype=dtype)
H = utils.create_random_homography(1, 3).type_as(pts1).to(device)
assert symmetric_transfer_error(pts1, pts2, H).shape == (1, 6)
def test_batch(self, device, dtype):
batch_size = 5
pts1 = torch.rand(batch_size, 3, 2, device=device, dtype=dtype)
pts2 = torch.rand(batch_size, 3, 2, device=device, dtype=dtype)
H = utils.create_random_homography(1, 3).type_as(pts1).to(device)
assert symmetric_transfer_error(pts1, pts2, H).shape == (batch_size, 3)
def test_gradcheck(self, device):
# generate input data
batch_size, num_points, num_dims = 2, 3, 2
points1 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64, requires_grad=True)
points2 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64)
H = utils.create_random_homography(batch_size, 3).type_as(points1).to(device)
assert gradcheck(symmetric_transfer_error, (points1, points2, H), raise_exception=True)
def test_shift(self, device, dtype):
pts1 = torch.zeros(3, 2, device=device, dtype=dtype)[None]
pts2 = torch.tensor([[1.0, 0.0], [2.0, 0.0], [2.0, 2.0]], device=device, dtype=dtype)[None]
H = torch.tensor([[1.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=dtype, device=device)[None]
expected = torch.tensor([0.0, 2.0, 10.0], device=device, dtype=dtype)[None]
assert_close(symmetric_transfer_error(pts1, pts2, H), expected, atol=1e-4, rtol=1e-4)
class TestFindHomographyDLT:
def test_smoke(self, device, dtype):
points1 = torch.rand(1, 4, 2, device=device, dtype=dtype)
points2 = torch.rand(1, 4, 2, device=device, dtype=dtype)
weights = torch.ones(1, 4, device=device, dtype=dtype)
H = find_homography_dlt(points1, points2, weights)
assert H.shape == (1, 3, 3)
def test_nocrash(self, device, dtype):
points1 = torch.rand(1, 4, 2, device=device, dtype=dtype)
points2 = torch.rand(1, 4, 2, device=device, dtype=dtype)
weights = torch.ones(1, 4, device=device, dtype=dtype)
points1[0, 0, 0] = float('nan')
H = find_homography_dlt(points1, points2, weights)
assert H.shape == (1, 3, 3)
@pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)])
def test_shape(self, batch_size, num_points, device, dtype):
B, N = batch_size, num_points
points1 = torch.rand(B, N, 2, device=device, dtype=dtype)
points2 = torch.rand(B, N, 2, device=device, dtype=dtype)
weights = torch.ones(B, N, device=device, dtype=dtype)
H = find_homography_dlt(points1, points2, weights)
assert H.shape == (B, 3, 3)
@pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)])
def test_shape_noweights(self, batch_size, num_points, device, dtype):
B, N = batch_size, num_points
points1 = torch.rand(B, N, 2, device=device, dtype=dtype)
points2 = torch.rand(B, N, 2, device=device, dtype=dtype)
H = find_homography_dlt(points1, points2, None)
assert H.shape == (B, 3, 3)
@pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)])
def test_points_noweights(self, batch_size, num_points, device, dtype):
B, N = batch_size, num_points
points1 = torch.rand(B, N, 2, device=device, dtype=dtype)
points2 = torch.rand(B, N, 2, device=device, dtype=dtype)
weights = torch.ones(B, N, device=device, dtype=dtype)
H_noweights = find_homography_dlt(points1, points2, None)
H_withweights = find_homography_dlt(points1, points2, weights)
assert H_noweights.shape == (B, 3, 3) and H_withweights.shape == (B, 3, 3)
assert_close(H_noweights, H_withweights, rtol=1e-3, atol=1e-4)
@pytest.mark.parametrize("batch_size", [1, 2, 5])
def test_clean_points(self, batch_size, device, dtype):
# generate input data
points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype)
H = kornia.eye_like(3, points_src)
H = H * 0.3 * torch.rand_like(H)
H = H / H[:, 2:3, 2:3]
points_dst = kornia.geometry.transform_points(H, points_src)
weights = torch.ones(batch_size, 10, device=device, dtype=dtype)
# compute transform from source to target
dst_homo_src = find_homography_dlt(points_src, points_dst, weights)
assert_close(kornia.geometry.transform_points(dst_homo_src, points_src), points_dst, rtol=1e-3, atol=1e-4)
@pytest.mark.grad
@pytest.mark.skipif(torch.__version__ < '1.7', reason="pytorch bug of incopatible types: #33546 fixed in v1.7")
def test_gradcheck(self, device):
# Save initial seed
initial_seed = torch.random.initial_seed()
max_number_of_checks = 10
# Test gradients for a max_number_of_checks times
current_seed = initial_seed
for i in range(max_number_of_checks):
torch.manual_seed(current_seed)
points_src = torch.rand(1, 10, 2, device=device, dtype=torch.float64, requires_grad=True)
points_dst = torch.rand_like(points_src)
weights = torch.ones_like(points_src)[..., 0]
try:
gradcheck(
find_homography_dlt, (points_src, points_dst, weights), rtol=1e-6, atol=1e-6, raise_exception=True
)
# Gradcheck failed
except RuntimeError:
# All iterations failed
if i == max_number_of_checks - 1:
assert gradcheck(
find_homography_dlt,
(points_src, points_dst, weights),
rtol=1e-6,
atol=1e-6,
raise_exception=True,
)
# Next iteration
else:
current_seed = random.randrange(0xFFFFFFFFFFFFFFFF)
continue
# Gradcheck succeed
torch.manual_seed(initial_seed)
return
class TestFindHomographyDLTIter:
def test_smoke(self, device, dtype):
points1 = torch.rand(1, 4, 2, device=device, dtype=dtype)
points2 = torch.rand(1, 4, 2, device=device, dtype=dtype)
weights = torch.ones(1, 4, device=device, dtype=dtype)
H = find_homography_dlt_iterated(points1, points2, weights, 5)
assert H.shape == (1, 3, 3)
@pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)])
def test_shape(self, batch_size, num_points, device, dtype):
B, N = batch_size, num_points
points1 = torch.rand(B, N, 2, device=device, dtype=dtype)
points2 = torch.rand(B, N, 2, device=device, dtype=dtype)
weights = torch.ones(B, N, device=device, dtype=dtype)
H = find_homography_dlt_iterated(points1, points2, weights, 5)
assert H.shape == (B, 3, 3)
@pytest.mark.parametrize("batch_size", [1, 2])
def test_clean_points(self, batch_size, device, dtype):
# generate input data
points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype)
H = kornia.eye_like(3, points_src)
H = H * 0.3 * torch.rand_like(H)
H = H / H[:, 2:3, 2:3]
points_dst = kornia.geometry.transform_points(H, points_src)
weights = torch.ones(batch_size, 10, device=device, dtype=dtype)
# compute transform from source to target
dst_homo_src = find_homography_dlt_iterated(points_src, points_dst, weights, 10)
assert_close(kornia.geometry.transform_points(dst_homo_src, points_src), points_dst, rtol=1e-3, atol=1e-4)
@pytest.mark.grad
@pytest.mark.skipif(torch.__version__ < '1.7', reason="pytorch bug of incopatible types: #33546 fixed in v1.7")
def test_gradcheck(self, device):
# Save initial seed
initial_seed = torch.random.initial_seed()
max_number_of_checks = 10
# Test gradients for a max_number_of_checks times
current_seed = initial_seed
for i in range(max_number_of_checks):
torch.manual_seed(current_seed)
points_src = torch.rand(1, 10, 2, device=device, dtype=torch.float64, requires_grad=True)
points_dst = torch.rand_like(points_src)
weights = torch.ones_like(points_src)[..., 0]
try:
gradcheck(
find_homography_dlt_iterated,
(points_src, points_dst, weights),
rtol=1e-6,
atol=1e-6,
raise_exception=True,
)
# Gradcheck failed
except RuntimeError:
# All iterations failed
if i == max_number_of_checks - 1:
assert gradcheck(
find_homography_dlt_iterated,
(points_src, points_dst, weights),
rtol=1e-6,
atol=1e-6,
raise_exception=True,
)
# Next iteration
else:
current_seed = random.randrange(0xFFFFFFFFFFFFFFFF)
continue
# Gradcheck succeed
torch.manual_seed(initial_seed)
return
@pytest.mark.grad
@pytest.mark.parametrize("batch_size", [1, 2])
def test_dirty_points_and_gradcheck(self, batch_size, device, dtype):
# generate input data
points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype)
H = kornia.eye_like(3, points_src)
H = H * (1 + torch.rand_like(H))
H = H / H[:, 2:3, 2:3]
points_src = 100.0 * torch.rand(batch_size, 20, 2, device=device, dtype=dtype)
points_dst = kornia.geometry.transform_points(H, points_src)
# making last point an outlier
points_dst[:, -1, :] += 20
weights = torch.ones(batch_size, 20, device=device, dtype=dtype)
# compute transform from source to target
dst_homo_src = find_homography_dlt_iterated(points_src, points_dst, weights, 0.5, 10)
assert_close(
kornia.geometry.transform_points(dst_homo_src, points_src[:, :-1]), points_dst[:, :-1], rtol=1e-3, atol=1e-3
)
|
core/dbt/config/renderer.py | tomasfarias/dbt-core | 799 | 11199443 | <filename>core/dbt/config/renderer.py<gh_stars>100-1000
from typing import Dict, Any, Tuple, Optional, Union, Callable
from dbt.clients.jinja import get_rendered, catch_jinja
from dbt.context.target import TargetContext
from dbt.context.secret import SecretContext
from dbt.context.base import BaseContext
from dbt.contracts.connection import HasCredentials
from dbt.exceptions import DbtProjectError, CompilationException, RecursionException
from dbt.utils import deep_map_render
Keypath = Tuple[Union[str, int], ...]
class BaseRenderer:
def __init__(self, context: Dict[str, Any]) -> None:
self.context = context
@property
def name(self):
return "Rendering"
def should_render_keypath(self, keypath: Keypath) -> bool:
return True
def render_entry(self, value: Any, keypath: Keypath) -> Any:
if not self.should_render_keypath(keypath):
return value
return self.render_value(value, keypath)
def render_value(self, value: Any, keypath: Optional[Keypath] = None) -> Any:
# keypath is ignored.
# if it wasn't read as a string, ignore it
if not isinstance(value, str):
return value
try:
with catch_jinja():
return get_rendered(value, self.context, native=True)
except CompilationException as exc:
msg = f"Could not render {value}: {exc.msg}"
raise CompilationException(msg) from exc
def render_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
try:
return deep_map_render(self.render_entry, data)
except RecursionException:
raise DbtProjectError(
f"Cycle detected: {self.name} input has a reference to itself", project=data
)
def _list_if_none(value):
if value is None:
value = []
return value
def _dict_if_none(value):
if value is None:
value = {}
return value
def _list_if_none_or_string(value):
value = _list_if_none(value)
if isinstance(value, str):
return [value]
return value
class ProjectPostprocessor(Dict[Keypath, Callable[[Any], Any]]):
def __init__(self):
super().__init__()
self[("on-run-start",)] = _list_if_none_or_string
self[("on-run-end",)] = _list_if_none_or_string
for k in ("models", "seeds", "snapshots"):
self[(k,)] = _dict_if_none
self[(k, "vars")] = _dict_if_none
self[(k, "pre-hook")] = _list_if_none_or_string
self[(k, "post-hook")] = _list_if_none_or_string
self[("seeds", "column_types")] = _dict_if_none
def postprocess(self, value: Any, key: Keypath) -> Any:
if key in self:
handler = self[key]
return handler(value)
return value
class DbtProjectYamlRenderer(BaseRenderer):
_KEYPATH_HANDLERS = ProjectPostprocessor()
def __init__(
self, profile: Optional[HasCredentials] = None, cli_vars: Optional[Dict[str, Any]] = None
) -> None:
# Generate contexts here because we want to save the context
# object in order to retrieve the env_vars. This is almost always
# a TargetContext, but in the debug task we want a project
# even when we don't have a profile.
if cli_vars is None:
cli_vars = {}
if profile:
self.ctx_obj = TargetContext(profile, cli_vars)
else:
self.ctx_obj = BaseContext(cli_vars) # type:ignore
context = self.ctx_obj.to_dict()
super().__init__(context)
@property
def name(self):
"Project config"
# Uses SecretRenderer
def get_package_renderer(self) -> BaseRenderer:
return PackageRenderer(self.ctx_obj.cli_vars)
def render_project(
self,
project: Dict[str, Any],
project_root: str,
) -> Dict[str, Any]:
"""Render the project and insert the project root after rendering."""
rendered_project = self.render_data(project)
rendered_project["project-root"] = project_root
return rendered_project
def render_packages(self, packages: Dict[str, Any]):
"""Render the given packages dict"""
package_renderer = self.get_package_renderer()
return package_renderer.render_data(packages)
def render_selectors(self, selectors: Dict[str, Any]):
return self.render_data(selectors)
def render_entry(self, value: Any, keypath: Keypath) -> Any:
result = super().render_entry(value, keypath)
return self._KEYPATH_HANDLERS.postprocess(result, keypath)
def should_render_keypath(self, keypath: Keypath) -> bool:
if not keypath:
return True
first = keypath[0]
# run hooks are not rendered
if first in {"on-run-start", "on-run-end", "query-comment"}:
return False
# don't render vars blocks until runtime
if first == "vars":
return False
if first in {"seeds", "models", "snapshots", "tests"}:
keypath_parts = {(k.lstrip("+ ") if isinstance(k, str) else k) for k in keypath}
# model-level hooks
if "pre-hook" in keypath_parts or "post-hook" in keypath_parts:
return False
return True
class SecretRenderer(BaseRenderer):
def __init__(self, cli_vars: Dict[str, Any] = {}) -> None:
# Generate contexts here because we want to save the context
# object in order to retrieve the env_vars.
self.ctx_obj = SecretContext(cli_vars)
context = self.ctx_obj.to_dict()
super().__init__(context)
@property
def name(self):
return "Secret"
class ProfileRenderer(SecretRenderer):
@property
def name(self):
return "Profile"
class PackageRenderer(SecretRenderer):
@property
def name(self):
return "Packages config"
|
megatron/data/gpt_dataset.py | adammoody/Megatron-DeepSpeed | 2,869 | 11199447 | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPT style dataset."""
import os
import time
import numpy as np
import torch
from megatron import mpu, print_rank_0
from megatron.data.blendable_dataset import BlendableDataset
from megatron.data.dataset_utils import get_datasets_weights_and_num_samples
from megatron.data.dataset_utils import get_train_valid_test_split_
from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset
def build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
train_valid_test_num_samples,
seq_length, seed, skip_warmup):
"""Build train, valid, and test datasets."""
# Single dataset.
if len(data_prefix) == 1:
return _build_train_valid_test_datasets(data_prefix[0],
data_impl, splits_string,
train_valid_test_num_samples,
seq_length, seed, skip_warmup)
# Blending dataset.
# Parse the values.
output = get_datasets_weights_and_num_samples(data_prefix,
train_valid_test_num_samples)
prefixes, weights, datasets_train_valid_test_num_samples = output
# Build individual datasets.
train_datasets = []
valid_datasets = []
test_datasets = []
for i in range(len(prefixes)):
train_ds, valid_ds, test_ds = _build_train_valid_test_datasets(
prefixes[i], data_impl, splits_string,
datasets_train_valid_test_num_samples[i],
seq_length, seed, skip_warmup)
if train_ds:
train_datasets.append(train_ds)
if valid_ds:
valid_datasets.append(valid_ds)
if test_ds:
test_datasets.append(test_ds)
# Blend.
blending_train_dataset = None
if train_datasets:
blending_train_dataset = BlendableDataset(train_datasets, weights)
blending_valid_dataset = None
if valid_datasets:
blending_valid_dataset = BlendableDataset(valid_datasets, weights)
blending_test_dataset = None
if test_datasets:
blending_test_dataset = BlendableDataset(test_datasets, weights)
return (blending_train_dataset, blending_valid_dataset,
blending_test_dataset)
def _build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
train_valid_test_num_samples,
seq_length, seed, skip_warmup):
"""Build train, valid, and test datasets."""
# Indexed dataset.
indexed_dataset = get_indexed_dataset_(data_prefix,
data_impl,
skip_warmup)
total_num_of_documents = indexed_dataset.sizes.shape[0]
splits = get_train_valid_test_split_(splits_string, total_num_of_documents)
# Print stats about the splits.
print_rank_0(' > dataset split:')
def print_split_stats(name, index):
print_rank_0(' {}:'.format(name))
print_rank_0(' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1],
splits[index + 1] - splits[index]))
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_dataset(index, name):
dataset = None
if splits[index + 1] > splits[index]:
documents = np.arange(start=splits[index], stop=splits[index + 1],
step=1, dtype=np.int32)
dataset = GPTDataset(name, data_prefix,
documents, indexed_dataset,
train_valid_test_num_samples[index],
seq_length, seed)
return dataset
train_dataset = build_dataset(0, 'train')
valid_dataset = build_dataset(1, 'valid')
test_dataset = build_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
def get_indexed_dataset_(data_prefix, data_impl, skip_warmup):
"""Build indexed dataset."""
print_rank_0(' > building dataset index ...')
start_time = time.time()
indexed_dataset = make_indexed_dataset(data_prefix,
data_impl,
skip_warmup)
print_rank_0(' > finished creating indexed dataset in {:4f} '
'seconds'.format(time.time() - start_time))
print_rank_0(' number of documents: {}'.format(
indexed_dataset.sizes.shape[0]))
return indexed_dataset
class GPTDataset(torch.utils.data.Dataset):
def __init__(self, name, data_prefix, documents, indexed_dataset,
num_samples, seq_length, seed):
self.name = name
self.indexed_dataset = indexed_dataset
# Checks
assert np.min(documents) >= 0
assert np.max(documents) < indexed_dataset.sizes.shape[0]
# Build index mappings.
self.doc_idx, self.sample_idx, self.shuffle_idx = _build_index_mappings(
self.name, data_prefix, documents, self.indexed_dataset.sizes,
num_samples, seq_length, seed)
def __len__(self):
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
return self.sample_idx.shape[0] - 1
def __getitem__(self, idx):
# Get the shuffled index.
idx = self.shuffle_idx[idx]
# Start and end documents and offsets.
doc_index_f = self.sample_idx[idx][0]
doc_index_l = self.sample_idx[idx + 1][0]
offset_f = self.sample_idx[idx][1]
offset_l = self.sample_idx[idx + 1][1]
# If we are within the same document, just extract the chunk.
if doc_index_f == doc_index_l:
sample = self.indexed_dataset.get(self.doc_idx[doc_index_f],
offset=offset_f,
length=offset_l - offset_f + 1)
else:
# Otherwise, get the rest of the initial document.
sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f],
offset=offset_f)]
# Loop over all in between documents and add the entire document.
for i in range(doc_index_f + 1, doc_index_l):
sample_list.append(self.indexed_dataset.get(self.doc_idx[i]))
# And finally add the relevant portion of last document.
sample_list.append(self.indexed_dataset.get(
self.doc_idx[doc_index_l],
length=offset_l + 1))
sample = np.concatenate(sample_list)
return {'text': np.array(sample, dtype=np.int64)}
def _build_index_mappings(name, data_prefix, documents, sizes,
num_samples, seq_length, seed):
"""Build doc-idx, sample-idx, and shuffle-idx.
doc-idx: is an array (ordered) of documents to be used in training.
sample-idx: is the start document index and document offset for each
training sample.
shuffle-idx: maps the sample index into a random index into sample-idx.
"""
# Number of tokens in each epoch and number of required epochs.
tokens_per_epoch = _num_tokens(documents, sizes)
num_epochs = _num_epochs(tokens_per_epoch, seq_length, num_samples)
# rng state
np_rng = np.random.RandomState(seed=seed)
# Filename of the index mappings.
_filename = data_prefix
_filename += '_{}_indexmap'.format(name)
_filename += '_{}ns'.format(num_samples)
_filename += '_{}sl'.format(seq_length)
_filename += '_{}s'.format(seed)
doc_idx_filename = _filename + '_doc_idx.npy'
sample_idx_filename = _filename + '_sample_idx.npy'
shuffle_idx_filename = _filename + '_shuffle_idx.npy'
# Build the indexed mapping if not exist.
if torch.distributed.get_rank() == 0:
if (not os.path.isfile(doc_idx_filename)) or \
(not os.path.isfile(sample_idx_filename)) or \
(not os.path.isfile(shuffle_idx_filename)):
print_rank_0(' > WARNING: could not find index map files, building '
'the indices on rank 0 ...')
# For the last epoch, decide whether include the entire epoch
# in the global shuffle or not.
# If we need only one epoch, then separating last epoch does
# not mean anything.
if num_epochs == 1:
separate_last_epoch = False
print(' > only one epoch required, setting '
'separate_last_epoch to False', flush=True)
else:
# Get the number of samples for the last epoch
num_samples_from_epochs_minus_one = (
(num_epochs - 1) * tokens_per_epoch - 1) // seq_length
last_epoch_num_samples = num_samples - \
num_samples_from_epochs_minus_one
assert last_epoch_num_samples >= 0, \
'last epoch number of samples should be non-negative.'
num_samples_per_epoch = (tokens_per_epoch - 1) // seq_length
assert last_epoch_num_samples < (num_samples_per_epoch + 1), \
'last epoch number of samples exceeded max value.'
# If we have less than 80% of the samples for the last epoch,
# seperate out the epoch and treat it differently.
# Note: the 80% number is just based on common sense and can
# be adjusted if needed.
separate_last_epoch = (last_epoch_num_samples <
int(0.80 * num_samples_per_epoch))
if separate_last_epoch:
string = ' > last epoch number of samples ({}) is smaller '\
'than 80% of number of samples per epoch ({}), '\
'setting separate_last_epoch to True'
else:
string = ' > last epoch number of samples ({}) is larger '\
'than 80% of number of samples per epoch ({}), '\
'setting separate_last_epoch to False'
print(string.format(last_epoch_num_samples,
num_samples_per_epoch), flush=True)
# doc-idx.
start_time = time.time()
doc_idx = _build_doc_idx(documents, num_epochs, np_rng,
separate_last_epoch)
np.save(doc_idx_filename, doc_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save doc-idx mapping '
'(seconds): {:4f}'.format(time.time() - start_time))
# sample-idx.
start_time = time.time()
# Use C++ implementation for speed.
# First compile and then import.
from megatron.data import helpers
assert doc_idx.dtype == np.int32
assert sizes.dtype == np.int32
sample_idx = helpers.build_sample_idx(sizes, doc_idx, seq_length,
num_epochs, tokens_per_epoch)
# sample_idx = _build_sample_idx(sizes, doc_idx, seq_length,
# num_epochs, tokens_per_epoch)
np.save(sample_idx_filename, sample_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save sample-idx mapping '
'(seconds): {:4f}'.format(time.time() - start_time))
# shuffle-idx.
start_time = time.time()
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
if separate_last_epoch:
num_samples_ = num_samples_from_epochs_minus_one
else:
num_samples_ = sample_idx.shape[0] - 1
shuffle_idx = _build_shuffle_idx(num_samples_,
sample_idx.shape[0] - 1, np_rng)
np.save(shuffle_idx_filename, shuffle_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save shuffle-idx mapping'
' (seconds): {:4f}'.format(time.time() - start_time))
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
counts = torch.cuda.LongTensor([1])
torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group())
assert counts[0].item() == (
torch.distributed.get_world_size() //
torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group()))
# Load mappings.
start_time = time.time()
print_rank_0(' > loading doc-idx mapping from {}'.format(
doc_idx_filename))
doc_idx = np.load(doc_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' > loading sample-idx mapping from {}'.format(
sample_idx_filename))
sample_idx = np.load(sample_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' > loading shuffle-idx mapping from {}'.format(
shuffle_idx_filename))
shuffle_idx = np.load(shuffle_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
time.time() - start_time))
print_rank_0(' total number of samples: {}'.format(
sample_idx.shape[0]))
print_rank_0(' total number of epochs: {}'.format(num_epochs))
return doc_idx, sample_idx, shuffle_idx
def _num_tokens(documents, sizes):
"""Total number of tokens in the dataset."""
return np.sum(sizes[documents])
def _num_epochs(tokens_per_epoch, seq_length, num_samples):
"""Based on number of samples and sequence lenght, calculate how many
epochs will be needed."""
num_epochs = 0
total_tokens = 0
while True:
num_epochs += 1
total_tokens += tokens_per_epoch
# -1 is because we need to retrieve seq_length + 1 token each time
# but the last token will overlap with the first token of the next
# sample except for the last sample.
if ((total_tokens - 1) // seq_length) >= num_samples:
return num_epochs
def _build_doc_idx(documents, num_epochs, np_rng, separate_last_epoch):
"""Build an array with length = number-of-epochs * number-of-dcuments.
Each index is mapped to a corresponding document."""
if not separate_last_epoch or num_epochs == 1:
doc_idx = np.mgrid[0:num_epochs, 0:len(documents)][1]
doc_idx[:] = documents
doc_idx = doc_idx.reshape(-1)
doc_idx = doc_idx.astype(np.int32)
np_rng.shuffle(doc_idx)
return doc_idx
doc_idx_first = _build_doc_idx(documents, num_epochs-1, np_rng, False)
doc_idx_last = _build_doc_idx(documents, 1, np_rng, False)
return np.concatenate((doc_idx_first, doc_idx_last))
def _build_sample_idx(sizes, doc_idx, seq_length,
num_epochs, tokens_per_epoch):
"""Sample index mapping is a 2D array with sizes
[number-of-samples + 1, 2] where [..., 0] contains
the index into `doc_idx` and [..., 1] is the
starting offset in that document."""
# Total number of samples. For -1 see comments in `_num_epochs`.
num_samples = (num_epochs * tokens_per_epoch - 1) // seq_length
sample_idx = np.zeros([num_samples + 1, 2], dtype=np.int32)
# Index into sample_idx.
sample_index = 0
# Index into doc_idx.
doc_idx_index = 0
# Begining offset for each document.
doc_offset = 0
# Start with first document and no offset.
sample_idx[sample_index][0] = doc_idx_index
sample_idx[sample_index][1] = doc_offset
sample_index += 1
while sample_index <= num_samples:
# Start with a fresh sequence.
remaining_seq_length = seq_length + 1
while remaining_seq_length != 0:
# Get the document length.
doc_id = doc_idx[doc_idx_index]
doc_length = sizes[doc_id] - doc_offset
# And add it to the current sequence.
remaining_seq_length -= doc_length
# If we have more than a full sequence, adjust offset and set
# remaining length to zero so we return from the while loop.
# Note that -1 here is for the same reason we have -1 in
# `_num_epochs` calculations.
if remaining_seq_length <= 0:
doc_offset += (remaining_seq_length + doc_length - 1)
remaining_seq_length = 0
else:
# Otherwise, start from the begining of the next document.
doc_idx_index += 1
doc_offset = 0
# Record the sequence.
sample_idx[sample_index][0] = doc_idx_index
sample_idx[sample_index][1] = doc_offset
sample_index += 1
return sample_idx
def _build_shuffle_idx(num_samples, total_size, np_rng):
"""Build the range [0, size) and shuffle."""
print(' > building shuffle index with split [0, {}) and [{}, {}) '
'...'.format(num_samples, num_samples, total_size), flush=True)
dtype_ = np.uint32
if total_size >= (np.iinfo(np.uint32).max - 1):
dtype_ = np.int64
shuffle_idx_first = np.arange(start=0, stop=num_samples,
step=1, dtype=dtype_)
np_rng.shuffle(shuffle_idx_first)
if num_samples == total_size:
return shuffle_idx_first
shuffle_idx_last = np.arange(start=num_samples, stop=total_size,
step=1, dtype=dtype_)
np_rng.shuffle(shuffle_idx_last)
return np.concatenate((shuffle_idx_first, shuffle_idx_last))
|
compiler_gym/views/observation.py | sahirgomez1/CompilerGym | 562 | 11199490 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Dict, List
from deprecated.sphinx import deprecated
from compiler_gym.service.connection import ServiceError
from compiler_gym.service.proto import ObservationSpace
from compiler_gym.util.gym_type_hints import (
ActionType,
ObservationType,
RewardType,
StepType,
)
from compiler_gym.views.observation_space_spec import ObservationSpaceSpec
class ObservationView:
"""A view into the available observation spaces of a service.
Example usage:
>>> env = gym.make("llvm-v0")
>>> env.reset()
>>> env.observation.spaces.keys()
["Autophase", "Ir"]
>>> env.observation.spaces["Autophase"].space
Box(56,)
>>> env.observation["Autophase"]
[0, 1, ..., 2]
>>> observation["Ir"]
int main() {...}
"""
def __init__(
self,
raw_step: Callable[
[List[ActionType], List[ObservationType], List[RewardType]], StepType
],
spaces: List[ObservationSpace],
):
if not spaces:
raise ValueError("No observation spaces")
self.spaces: Dict[str, ObservationSpaceSpec] = {}
self._raw_step = raw_step
for i, s in enumerate(spaces):
self._add_space(ObservationSpaceSpec.from_proto(i, s))
def __getitem__(self, observation_space: str) -> ObservationType:
"""Request an observation from the given space.
:param observation_space: The observation space to query.
:return: An observation.
:raises KeyError: If the requested observation space does not exist.
:raises SessionNotFound: If :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>` has not been called.
:raises ServiceError: If the backend service fails to compute the
observation, or reports that a terminal state has been reached.
"""
observation_space: ObservationSpaceSpec = self.spaces[observation_space]
observations, _, done, info = self._raw_step(
actions=[], observations=[observation_space], rewards=[]
)
if done:
# Computing an observation should never cause a terminal state since
# no action has been applied.
msg = f"Failed to compute observation '{observation_space.id}'"
if info.get("error_details"):
msg += f": {info['error_details']}"
raise ServiceError(msg)
if len(observations) != 1:
raise ServiceError(
f"Expected 1 '{observation_space.id}' observation "
f"but the service returned {len(observations)}"
)
return observations[0]
def _add_space(self, space: ObservationSpaceSpec):
"""Register a new space."""
self.spaces[space.id] = space
# Bind a new method to this class that is a callback to compute the
# given observation space. E.g. if a new space is added with ID
# `FooBar`, this observation can be computed using
# env.observation.FooBar().
setattr(self, space.id, lambda: self[space.id])
@deprecated(
version="0.2.1",
reason=(
"Use the derived_observation_spaces argument to CompilerEnv constructor. "
"See <https://github.com/facebookresearch/CompilerGym/issues/461>."
),
)
def add_derived_space(
self,
id: str,
base_id: str,
**kwargs,
) -> None:
"""Alias to :func:`ObservationSpaceSpec.make_derived_space()
<compiler_gym.views.ObservationSpaceSpec.make_derived_space>` that adds
the derived space to the observation view.
Example usage:
>>> env.observation.add_derived_space(
id="src_len",
base_id="src",
translate=lambda src: np.array([len(src)], dtype=np.int32),
shape=Box(shape=(1,), dtype=np.int32),
)
>>> env.observation["src_len"]
1029
:param id: The name of the new observation space.
:param base_id: The name of the observation space that this is derived
from.
:param \\**kwargs: Arguments passed to
:func:`ObservationSpaceSpec.make_derived_space
<compiler_gym.views.ObservationSpaceSpec.make_derived_space>`.
"""
base_space = self.spaces[base_id]
self._add_space(base_space.make_derived_space(id=id, **kwargs))
# NOTE(github.com/facebookresearch/CompilerGym/issues/461): This method will
# be renamed to add_derived_space() once the current method with that name
# is removed.
def add_derived_space_internal(
self,
id: str,
base_id: str,
**kwargs,
) -> None:
"""Internal API for adding a new observation space."""
base_space = self.spaces[base_id]
self._add_space(base_space.make_derived_space(id=id, **kwargs))
def __repr__(self):
return f"ObservationView[{', '.join(sorted(self.spaces.keys()))}]"
|
tests/auths/test_jwt.py | abersheeran/asgi-ratelimit | 136 | 11199492 | import jwt
import pytest
from ratelimit.auths import EmptyInformation
from ratelimit.auths.jwt import create_jwt_auth
@pytest.mark.parametrize(
"scope, user, group",
[
(
{
"headers": (
(
b"authorization",
b"Bearer "
+ jwt.encode(
{"user": "user", "group": "group"},
"test-key",
"HS256",
),
),
),
},
"user",
"group",
),
(
{
"headers": (
(
b"authorization",
b"Bearer "
+ jwt.encode(
{"user": "user", "group": "group"},
"test-key",
"HS512",
),
),
),
},
"user",
"group",
),
(
{
"headers": (
(
b"authorization",
b"Bearer " + jwt.encode({"user": "user"}, "test-key", "HS256"),
),
),
},
"user",
"default",
),
],
)
@pytest.mark.asyncio
async def test_jwt_auth(scope, user, group):
assert (await create_jwt_auth("test-key", ["HS256", "HS512"])(scope)) == (
user,
group,
)
@pytest.mark.parametrize(
"scope, user, group",
[
(
{
"headers": (
(
b"authorization",
b"Bearer "
+ jwt.encode({"user_id": "user"}, "test-key", "HS256"),
),
),
},
"user",
"default",
),
],
)
@pytest.mark.asyncio
async def test_jwt_auth_other_user_key(scope, user, group):
val = await create_jwt_auth("test-key", ["HS256", "HS512"], user_key="user_id")(
scope
)
assert val == (
user,
group,
)
@pytest.mark.parametrize(
"scope",
[
{"headers": ()},
{
"headers": (
(
b"wrongkey",
b"Bearer " + jwt.encode({"username": "user"}, "test-key", "HS256"),
),
),
},
{
"headers": (
(
b"authorization",
b"Bearer " + jwt.encode({"username": "user"}, "test-key", "HS256"),
),
),
},
],
)
@pytest.mark.asyncio
async def test_error(scope):
with pytest.raises(EmptyInformation):
await create_jwt_auth("test-key", ["HS256", "HS512"])(scope)
|
watchmen/database/table/oracle_table_definition.py | Insurance-Metrics-Measure-Advisory/watchman-data-connector | 125 | 11199523 | from sqlalchemy import MetaData, Table, Column, String, CLOB, Date, DateTime, Integer
from watchmen_boot.config.config import settings
metadata = MetaData()
def get_primary_key(table_name):
return get_pid(table_name)
def get_pid(table_name):
if table_name == 'topics':
return 'topicId'
elif table_name == 'console_space_subjects':
return 'subjectId'
elif table_name == 'pipelines':
return 'pipelineId'
elif table_name == 'users':
return 'userId'
elif table_name == 'console_dashboards':
return 'dashboardId'
elif table_name == 'pipeline_graph':
return 'pipelineGraphId'
elif table_name == 'console_spaces':
return 'connectId'
elif table_name == 'console_space_favorites':
return 'userId'
elif table_name == 'spaces':
return 'spaceId'
elif table_name == 'console_space_subjects':
return 'subjectId'
elif table_name == 'console_reports':
return 'reportId'
elif table_name == 'user_groups':
return 'userGroupId'
elif table_name == 'enums':
return 'enumId'
elif table_name == 'console_reports':
return 'reportId'
elif table_name == "console_space_last_snapshot":
return "userId"
elif table_name == "tenants":
return "tenantId"
elif table_name == "pats":
return "patId"
elif table_name == "data_sources":
return "dataSourceId"
elif table_name == "external_writer":
return "writerId"
elif table_name == "factor_index":
return "factorindexid"
else:
raise Exception("table_name does not exist {0}".format(table_name))
users_table = Table("users", metadata,
Column('userid', String(60), primary_key=True),
Column('name', String(45), nullable=False),
Column('nickname', String(45), nullable=True),
Column('password', String(100), nullable=True),
Column('is_active', String(5), nullable=True),
Column('groupids', CLOB, nullable=True),
Column('role', String(45), nullable=True),
Column('tenantid', String(60), nullable=False),
Column('createtime', String(50), nullable=True),
Column('lastmodified', Date, nullable=True)
)
user_groups_table = Table("user_groups", metadata,
Column('usergroupid', String(60), primary_key=True),
Column('name', String(45), nullable=False),
Column('description', String(45), nullable=True),
Column('userids', CLOB, nullable=True),
Column('spaceids', CLOB, nullable=True),
Column('tenantid', String(60), nullable=False),
Column('createtime', String(50), nullable=True),
Column('lastmodified', Date, nullable=True)
)
external_writer_table = Table("external_writer", metadata,
Column('writerid', String(50), primary_key=True),
Column('writercode', String(50), nullable=False),
Column('type', String(50), nullable=False),
Column('pat', String(50), nullable=False),
Column('url', String(50), nullable=False),
Column('tenantid', String(60), nullable=True),
Column('createtime', String(50), nullable=True),
Column('lastmodified', Date, nullable=True)
)
console_space_last_snapshot_table = Table("console_space_last_snapshot", metadata,
Column('userid', String(60), primary_key=True),
Column('language', String(5), nullable=True),
Column('lastdashboardid', String(25), nullable=True),
Column('admindashboardid', String(25), nullable=True),
Column('favoritepin', String(5), nullable=True),
Column('tenantid', String(60), nullable=False),
Column('createtime', String(50), nullable=True),
Column('lastmodified', Date, nullable=True)
)
console_dashboards_table = Table("console_dashboards", metadata,
Column('dashboardid', String(60), primary_key=True),
Column('name', String(25), nullable=False),
Column('reports', CLOB, nullable=True),
Column('paragraphs', CLOB, nullable=True),
Column('lastvisittime', String(25), nullable=False),
Column('userid', String(60), nullable=False),
Column('tenantid', String(60), nullable=False),
Column('createtime', String(50), nullable=True),
Column('lastmodified', DateTime, nullable=True)
)
topics_table = Table("topics", metadata,
Column("topicid", String(60), primary_key=True),
Column("name", String(25), nullable=False),
Column("kind", String(10), nullable=True),
Column("type", String(10), nullable=True),
Column("description", String(250), nullable=True),
Column("factors", CLOB, nullable=True),
Column("datasourceid", String(60), nullable=True),
Column('tenantid', String(60), nullable=False),
Column('createtime', String(50), nullable=True),
# Column('last_modified', DateTime, nullable=True),
Column('lastmodified', DateTime, nullable=True)
)
enums_table = Table("enums", metadata,
Column("enumid", String(60), primary_key=True),
Column("name", String(25), nullable=False),
Column("description", String(25), nullable=True),
Column("parentenumid", String(60), nullable=True),
Column("items", CLOB, nullable=True),
Column('tenantid', String(60), nullable=False),
Column('createtime', String(50), nullable=True),
Column('lastmodified', DateTime, nullable=True)
)
spaces_table = Table("spaces", metadata,
Column("spaceid", String(60), primary_key=True),
Column("topicids", CLOB, nullable=True),
Column("groupids", CLOB, nullable=True),
Column("name", String(25), nullable=False),
Column("description", String(25), nullable=True),
Column("filters", CLOB, nullable=True),
Column('tenantid', String(60), nullable=False),
Column('createtime', String(50), nullable=True),
# Column('last_modified', DateTime, nullable=True),
Column('lastmodified', DateTime, nullable=True)
)
console_space_favorites_table = Table("console_space_favorites", metadata,
Column("userid", String(60), primary_key=True),
Column("connectedspaceids", CLOB, nullable=True),
Column("dashboardids", CLOB, nullable=True),
Column('tenantid', String(60), nullable=False),
Column('createtime', String(50), nullable=True),
# Column('last_modified', DateTime, nullable=True),
Column('lastmodified', DateTime, nullable=True)
)
console_space_graph_table = Table("console_space_graph", metadata,
Column("connectid", String(60), primary_key=True),
Column("topics", CLOB, nullable=True),
Column("subjects", CLOB, nullable=True),
Column("userid", String(60), nullable=False),
Column('tenantid', String(60), nullable=False),
Column('createtime', String(50), nullable=True),
# Column('last_modified', DateTime, nullable=True),
Column('lastmodified', DateTime, nullable=True)
)
console_spaces_table = Table("console_spaces", metadata,
Column("spaceid", String(60), primary_key=True),
Column("topics", CLOB, nullable=True),
Column("groupids", CLOB, nullable=True),
Column("name", String(25), nullable=False),
Column("connectid", String(25), nullable=False),
Column("type", String(10), nullable=True),
Column('lastvisittime', DateTime, nullable=True),
Column("userid", String(60), nullable=True),
Column("subjectids", CLOB, nullable=True),
Column("istemplate", String(5), default=False),
Column("subjects", CLOB, nullable=True),
Column('tenantid', String(60), nullable=False),
Column('createtime', String(50), nullable=True),
Column('lastmodified', DateTime, nullable=True)
)
pipelines_table = Table("pipelines", metadata,
Column("pipelineid", String(60), primary_key=True),
Column("topicid", String(60), nullable=False),
Column("name", String(25), nullable=False),
Column("type", String(10), nullable=True),
Column("stages", CLOB, nullable=True),
Column("conditional", String(5), nullable=True),
Column("enabled", String(5), nullable=True),
Column("on", CLOB, nullable=True),
Column('tenantid', String(60), nullable=False),
Column('createtime', String(50), nullable=True),
# Column('last_modified', DateTime, nullable=True),
Column('lastmodified', DateTime, nullable=True)
)
pipeline_graph_table = Table("pipeline_graph", metadata,
Column("pipelinegraphid", String(60), nullable=False),
Column("name", String(50), nullable=True),
Column("userid", String(60), nullable=False),
Column("topics", CLOB, nullable=True),
Column('tenantid', String(60), nullable=False),
Column('lastmodified', DateTime, nullable=True),
Column('createtime', String(50), nullable=True)
)
console_space_subjects_table = Table("console_space_subjects", metadata,
Column("subjectid", String(60), primary_key=True),
Column("name", String(50), nullable=False),
Column("topiccount", Integer, nullable=True),
Column("graphicscount", Integer, nullable=True),
Column("reports", CLOB, nullable=True),
Column("reportids", CLOB, nullable=True),
Column("dataset", CLOB, nullable=True),
Column('tenantid', String(60), nullable=False),
Column("lastvisittime", DateTime, nullable=True),
Column("createdat", String(50), nullable=True),
# Column('lastmodifytime', DateTime, nullable=True),
Column('lastmodified', DateTime, nullable=True),
Column('createtime', String(50), nullable=True)
)
console_reports_table = Table("reports", metadata,
Column("reportid", String(60), primary_key=True),
Column("name", String(50), nullable=False),
Column("indicators", CLOB, nullable=True),
Column("dimensions", CLOB, nullable=True),
Column("funnels", CLOB, nullable=True),
Column("filters", CLOB, nullable=True),
Column("description", String(50), nullable=True),
Column("rect", CLOB, nullable=True),
Column("chart", CLOB, nullable=True),
Column('tenantid', String(60), nullable=False),
Column("createdat", String(50), nullable=True),
Column("lastvisittime", String(50), nullable=True),
Column('lastmodified', DateTime, nullable=True),
Column('createtime', String(50), nullable=True),
Column('simulating', String(5), default=False),
Column('simulatedata', CLOB, nullable=True),
Column('simulatethumbnail', CLOB, nullable=True)
)
pats_table = Table("pats", metadata,
Column("patid", String(60), primary_key=True),
Column("tokenid", String(50), nullable=False),
Column("userid", String(50), nullable=False),
Column("username", String(50), nullable=False),
Column("tenantid", String(60), nullable=False),
Column("note", String(50), nullable=False),
Column("expired", Date, nullable=True),
Column("permissions", CLOB, nullable=True),
Column('lastmodified', DateTime, nullable=True),
Column('createtime', String(50), nullable=True)
)
tenants_table = Table("tenants", metadata,
Column("tenantid", String(60), primary_key=True),
Column("name", String(50), nullable=True),
Column('lastmodified', DateTime, nullable=True),
Column('createtime', String(50), nullable=True)
)
key_store_table = Table("key_stores", metadata,
Column("tenantid", String(50), primary_key=True),
Column("keyType", String(50), nullable=True),
Column("params", CLOB, nullable=True),
Column('lastmodified', DateTime, nullable=True),
Column('createtime', String(50), nullable=True)
)
data_sources_table = Table("data_sources", metadata,
Column("datasourceid", String(60), primary_key=True),
Column("datasourcecode", String(50), nullable=False),
Column("datasourcetype", String(50), nullable=False),
Column("host", String(50), nullable=True),
Column("port", String(50), nullable=True),
Column("username", String(60), nullable=True),
Column("password", String(50), nullable=True),
Column("name", String(50), nullable=True),
Column("url", String(60), nullable=True),
Column("tenantid", String(50), nullable=False),
Column("params", CLOB, nullable=True),
Column('lastmodified', DateTime, nullable=True),
Column('createtime', String(50), nullable=True)
)
if settings.INDEX_ON:
factor_index_table = Table("factor_index", metadata,
Column("factorindexid", String(50), primary_key=True),
Column("factorid", String(50), nullable=True),
Column("tenantid", String(50), nullable=True),
Column("topicid", String(50), nullable=True),
Column("name", String(50), nullable=True),
Column("label", String(50), nullable=True),
Column("topicname", String(50), nullable=True),
Column("description", String(50)),
Column("type", String(50)),
Column('lastmodified', DateTime, nullable=True),
Column('createtime', String(60), nullable=True)
)
pipeline_index_table = Table("pipeline_index", metadata,
Column("pipelineindexid", String(50), primary_key=True),
Column("factorid", String(50)),
Column("pipelineid", String(50)),
Column("topicid", String(50)),
Column("stageid", String(50), nullable=True),
Column("unitid", String(50), nullable=True),
Column("actionid", String(50), nullable=True),
Column("mappingtofactorid", String(50), nullable=True),
Column("mappingtotopicid", String(50), nullable=True),
Column("sourcefromfactorid", String(50), nullable=True),
Column("sourcefromtopicid", String(50), nullable=True),
Column("pipelinename", String(60)),
Column("stagename", String(60)),
Column("unitname", String(60)),
Column("reftype", String(50), nullable=True),
Column("tenantid", String(50), nullable=True),
Column('lastmodified', DateTime, nullable=True),
Column('createtime', String(60), nullable=True)
)
def get_table_by_name(table_name):
if table_name == "users":
return users_table
elif table_name == "console_space_last_snapshot":
return console_space_last_snapshot_table
elif table_name == "console_dashboards":
return console_dashboards_table
elif table_name == "topics":
return topics_table
elif table_name == "enums":
return enums_table
elif table_name == "spaces":
return spaces_table
elif table_name == "console_space_favorites":
return console_space_favorites_table
elif table_name == "console_space_graph":
return console_space_graph_table
elif table_name == "console_spaces":
return console_spaces_table
elif table_name == "user_groups":
return user_groups_table
elif table_name == "pipelines":
return pipelines_table
elif table_name == "pipeline_graph":
return pipeline_graph_table
elif table_name == "console_space_subjects":
return console_space_subjects_table
elif table_name == "console_reports":
return console_reports_table
elif table_name == "pats":
return pats_table
elif table_name == "tenants":
return tenants_table
elif table_name == "data_sources":
return data_sources_table
elif table_name == "external_writer":
return external_writer_table
elif table_name == "key_stores":
return key_store_table
elif table_name == "factor_index":
return factor_index_table
elif table_name == "pipeline_index":
return pipeline_index_table
else:
raise Exception("table_name does not exist {}".format(table_name))
|
s3prl/downstream/sv_voxceleb1/dataset.py | hhhaaahhhaa/s3prl | 856 | 11199526 | <filename>s3prl/downstream/sv_voxceleb1/dataset.py
import os
import re
import sys
import time
import random
import pickle
import tqdm
import torch
import torchaudio
import numpy as np
from torch import nn
from pathlib import Path
from sox import Transformer
from torchaudio import load
from librosa.util import find_files
from joblib.parallel import Parallel, delayed
from torch.utils.data import DataLoader, Dataset
from torchaudio.sox_effects import apply_effects_file
EFFECTS = [
["channels", "1"],
["rate", "16000"],
["gain", "-3.0"],
["silence", "1", "0.1", "0.1%", "-1", "0.1", "0.1%"],
]
# Voxceleb 2 Speaker verification
class SpeakerVerifi_train(Dataset):
def __init__(self, vad_config, key_list, file_path, meta_data, max_timestep=None, n_jobs=12):
self.roots = file_path
self.root_key = key_list
self.max_timestep = max_timestep
self.vad_c = vad_config
self.dataset = []
self.all_speakers = []
for index in range(len(self.root_key)):
cache_path = Path(os.path.dirname(__file__)) / '.wav_lengths' / f'{self.root_key[index]}_length.pt'
cache_path.parent.mkdir(exist_ok=True)
root = Path(self.roots[index])
if not cache_path.is_file():
def trimmed_length(path):
wav_sample, _ = apply_effects_file(path, EFFECTS)
wav_sample = wav_sample.squeeze(0)
length = wav_sample.shape[0]
return length
wav_paths = find_files(root)
wav_lengths = Parallel(n_jobs=n_jobs)(delayed(trimmed_length)(path) for path in tqdm.tqdm(wav_paths, desc="Preprocessing"))
wav_tags = [Path(path).parts[-3:] for path in wav_paths]
torch.save([wav_tags, wav_lengths], str(cache_path))
else:
wav_tags, wav_lengths = torch.load(str(cache_path))
wav_paths = [root.joinpath(*tag) for tag in wav_tags]
speaker_dirs = ([f.stem for f in root.iterdir() if f.is_dir()])
self.all_speakers.extend(speaker_dirs)
for path, length in zip(wav_paths, wav_lengths):
if length > self.vad_c['min_sec']:
self.dataset.append(path)
self.all_speakers.sort()
self.speaker_num = len(self.all_speakers)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
path = self.dataset[idx]
wav, _ = apply_effects_file(str(path), EFFECTS)
wav = wav.squeeze(0)
length = wav.shape[0]
if self.max_timestep != None:
if length > self.max_timestep:
start = random.randint(0, int(length - self.max_timestep))
wav = wav[start : start + self.max_timestep]
tags = Path(path).parts[-3:]
utterance_id = "-".join(tags).replace(".wav", "")
label = self.all_speakers.index(tags[0])
return wav.numpy(), utterance_id, label
def collate_fn(self, samples):
return zip(*samples)
class SpeakerVerifi_test(Dataset):
def __init__(self, vad_config, file_path, meta_data):
self.root = file_path
self.meta_data = meta_data
self.necessary_dict = self.processing()
self.vad_c = vad_config
self.dataset = self.necessary_dict['pair_table']
def processing(self):
pair_table = []
with open(self.meta_data, "r") as f:
usage_list = f.readlines()
for pair in usage_list:
list_pair = pair.split()
pair_1= os.path.join(self.root, list_pair[1])
pair_2= os.path.join(self.root, list_pair[2])
one_pair = [list_pair[0],pair_1,pair_2 ]
pair_table.append(one_pair)
return {
"spk_paths": None,
"total_spk_num": None,
"pair_table": pair_table
}
def __len__(self):
return len(self.necessary_dict['pair_table'])
def __getitem__(self, idx):
y_label, x1_path, x2_path = self.dataset[idx]
def path2name(path):
return Path("-".join((Path(path).parts)[-3:])).stem
x1_name = path2name(x1_path)
x2_name = path2name(x2_path)
wav1, _ = apply_effects_file(x1_path, EFFECTS)
wav2, _ = apply_effects_file(x2_path, EFFECTS)
wav1 = wav1.squeeze(0)
wav2 = wav2.squeeze(0)
return wav1.numpy(), wav2.numpy(), x1_name, x2_name, int(y_label[0])
def collate_fn(self, data_sample):
wavs1, wavs2, x1_names, x2_names, ylabels = zip(*data_sample)
all_wavs = wavs1 + wavs2
all_names = x1_names + x2_names
return all_wavs, all_names, ylabels
|
desktop/core/ext-py/Pygments-1.3.1/pygments/formatters/terminal.py | kokosing/hue | 5,079 | 11199528 | <reponame>kokosing/hue
# -*- coding: utf-8 -*-
"""
pygments.formatters.terminal
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for terminal output with ANSI sequences.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
from pygments.console import ansiformat
from pygments.util import get_choice_opt
__all__ = ['TerminalFormatter']
#: Map token types to a tuple of color values for light and dark
#: backgrounds.
TERMINAL_COLORS = {
Token: ('', ''),
Whitespace: ('lightgray', 'darkgray'),
Comment: ('lightgray', 'darkgray'),
Comment.Preproc: ('teal', 'turquoise'),
Keyword: ('darkblue', 'blue'),
Keyword.Type: ('teal', 'turquoise'),
Operator.Word: ('purple', 'fuchsia'),
Name.Builtin: ('teal', 'turquoise'),
Name.Function: ('darkgreen', 'green'),
Name.Namespace: ('_teal_', '_turquoise_'),
Name.Class: ('_darkgreen_', '_green_'),
Name.Exception: ('teal', 'turquoise'),
Name.Decorator: ('darkgray', 'lightgray'),
Name.Variable: ('darkred', 'red'),
Name.Constant: ('darkred', 'red'),
Name.Attribute: ('teal', 'turquoise'),
Name.Tag: ('blue', 'blue'),
String: ('brown', 'brown'),
Number: ('darkblue', 'blue'),
Generic.Deleted: ('red', 'red'),
Generic.Inserted: ('darkgreen', 'green'),
Generic.Heading: ('**', '**'),
Generic.Subheading: ('*purple*', '*fuchsia*'),
Generic.Error: ('red', 'red'),
Error: ('_red_', '_red_'),
}
class TerminalFormatter(Formatter):
r"""
Format tokens with ANSI color sequences, for output in a text console.
Color sequences are terminated at newlines, so that paging the output
works correctly.
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
"""
name = 'Terminal'
aliases = ['terminal', 'console']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = get_choice_opt(options, 'bg',
['light', 'dark'], 'light') == 'dark'
self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
def format(self, tokensource, outfile):
# hack: if the output is a terminal and has an encoding set,
# use that to avoid unicode encode problems
if not self.encoding and hasattr(outfile, "encoding") and \
hasattr(outfile, "isatty") and outfile.isatty():
self.encoding = outfile.encoding
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
for ttype, value in tokensource:
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(ansiformat(color, line))
outfile.write('\n')
if spl[-1]:
outfile.write(ansiformat(color, spl[-1]))
else:
outfile.write(value)
|
grr/client/grr_response_client/client_actions/file_finder_utils/__init__.py | nkrios/grr | 4,238 | 11199530 | <gh_stars>1000+
#!/usr/bin/env python
"""Implementation of utilities used in the client-side file-finder."""
|
robot-server/robot_server/service/session/session_types/__init__.py | knownmed/opentrons | 235 | 11199534 | <filename>robot-server/robot_server/service/session/session_types/__init__.py
from .check_session import CheckSession # noqa: F401
from .tip_length_calibration import TipLengthCalibration # noqa: F401
from .deck_calibration_session import DeckCalibrationSession # noqa: F401
from .pipette_offset_calibration import PipetteOffsetCalibrationSession # noqa: F401
from .base_session import BaseSession, SessionMetaData # noqa: F401
from .protocol.session import ProtocolSession # noqa: F401
from .live_protocol.session import LiveProtocolSession # noqa: F401
|
pythran/graph.py | jeanlaroche/pythran | 1,647 | 11199573 | '''
Minimal directed graph replacement for networkx.DiGraph
This has the sole advantage of being a standalone file that doesn't bring any
dependency with it.
'''
class DiGraph(object):
def __init__(self):
# adjacency[i][j] = True means j is a successor of i
self._adjacency = {}
self._edges = {}
def successors(self, node):
return (n for n in self._adjacency[node])
def predecessors(self, node):
return (k for k, v in self._adjacency.items() if node in v)
def add_node(self, node):
self._adjacency.setdefault(node, set())
def add_edge(self, src, dest, **props):
self.add_node(dest)
self._adjacency.setdefault(src, set()).add(dest)
self._edges[(src, dest)] = props
@property
def edges(self):
return self._edges
def remove_edge(self, src, dest):
self._adjacency[src].remove(dest)
del self._edges[(src, dest)]
def __len__(self):
return len(self._adjacency)
def __iter__(self):
return iter(self._adjacency.keys())
def __contains__(self, value):
return value in self._adjacency
def __getitem__(self, node):
return self._adjacency[node]
class Unfeasible(RuntimeError):
pass
def has_path(graph, src, dest):
visited = set()
worklist = [src]
while worklist:
current = worklist.pop()
if current in visited:
continue
visited.add(current)
if dest in graph.successors(current):
return True
worklist.extend(graph.successors(current))
return False
# Copied verbatim from NetworkX 2.6.1
#
# NetworkX is distributed with the 3-clause BSD license.
#
# ::
#
# Copyright (C) 2004-2021, NetworkX Developers
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NetworkX Developers nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _all_simple_paths_graph(G, source, targets, cutoff):
visited = dict.fromkeys([source])
stack = [iter(G[source])]
while stack:
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.popitem()
elif len(visited) < cutoff:
if child in visited:
continue
if child in targets:
yield list(visited) + [child]
visited[child] = None
if targets - set(visited.keys()): # expand stack until find all targets
stack.append(iter(G[child]))
else:
visited.popitem() # maybe other ways to child
else: # len(visited) == cutoff:
for target in (targets & (set(children) | {child})) - set(visited.keys()):
yield list(visited) + [target]
stack.pop()
visited.popitem()
def all_simple_paths(graph, src, target):
return _all_simple_paths_graph(graph, src, {target}, len(graph) - 1)
|
pypiserver/__init__.py | NoOneSurvives/docker-pypi-server | 1,257 | 11199584 | <reponame>NoOneSurvives/docker-pypi-server
import functools
import pathlib
import re as _re
import sys
import typing as t
from pypiserver.bottle import Bottle
from pypiserver.config import Config, RunConfig, strtobool
version = __version__ = "2.0.0dev1"
__version_info__ = tuple(_re.split("[.-]", __version__))
__updated__ = "2020-10-11 11:23:15"
__title__ = "pypiserver"
__summary__ = "A minimal PyPI server for use with pip/easy_install."
__uri__ = "https://github.com/pypiserver/pypiserver"
identity = lambda x: x
def backwards_compat_kwargs(kwargs: dict, warn: bool = True) -> dict:
"""Return a dict with deprecated kwargs converted to new kwargs.
:param kwargs: the incoming kwargs to convert
:param warn: whether to output a warning to stderr if there are deprecated
kwargs found in the incoming kwargs
"""
# A mapping of deprecated kwargs to a 2-tuple of their corresponding updated
# kwarg and a function to convert the value of the deprecated kwarg to a
# value for the new kwarg. `identity` is just a function that returns
# whatever it is passed and is used in cases where the only change from
# a legacy kwarg is its name.
backwards_compat = {
"authenticated": ("authenticate", identity),
"passwords": ("<PASSWORD>", identity),
# `root` could be a string or an array of strings. Handle both cases,
# converting strings to Path instances.
"root": (
"roots",
lambda root: [
# Convert strings to absolute Path instances
pathlib.Path(r).expanduser().resolve()
for r in ([root] if isinstance(root, str) else root)
],
),
# `redirect_to_fallback` was changed to `disable_fallback` for clearer
# use as a flag to disable the default behavior. Since its behavior
# is the opposite, we negate it.
"redirect_to_fallback": (
"disable_fallback",
lambda redirect: not redirect,
),
"server": ("server_method", identity),
# `welcome_msg` now is just provided as text, so that anyone using
# pypiserver as a library doesn't need to worry about distributing
# files if they don't need to. If we're still passed an old-style
# `welcome_file` argument, we go ahead and resolve it to an absolute
# path and read the text.
"welcome_file": (
"welcome_msg",
lambda p: pathlib.Path(p).expanduser().resolve().read_text(),
),
}
# Warn the user if they're using any deprecated arguments
if warn and any(k in backwards_compat for k in kwargs):
# Make nice instructions like `Please replace the following:
# 'authenticated' with 'authenticate'` and print to stderr.
replacement_strs = (
f"'{k}' with '{backwards_compat[k][0]}'"
for k in filter(lambda k: k in kwargs, backwards_compat)
)
warn_str = (
"You are using deprecated arguments. Please replace the following: \n"
f" {', '.join(replacement_strs)}"
)
print(warn_str, file=sys.stderr)
# Create an iterable of 2-tuple to collect into the updated dictionary. Each
# item will either be the existing key-value pair from kwargs, or, if the
# keyword is a legacy keyword, the new key and potentially adjusted value
# for that keyword. Note that depending on the order the argument are
# specified, this _could_ mean an updated legacy keyword could override
# a new argument if that argument is also specified. However, in that
# case, our updated kwargs dictionary would have a different number of
# keys compared to our incoming dictionary, so we check for that case
# below.
rv_iter = (
(
(k, v)
if k not in backwards_compat
else (backwards_compat[k][0], backwards_compat[k][1](v))
)
for k, v in kwargs.items()
)
updated_kwargs = dict(rv_iter)
# If our dictionaries have different lengths, we must have gotten duplicate
# legacy/modern keys. Figure out which keys were dupes and throw an error.
if len(updated_kwargs) != len(kwargs):
legacy_to_modern = {k: v[0] for k, v in backwards_compat.items()}
dupes = [
(k, v)
for k, v in legacy_to_modern.items()
if k in kwargs and v in kwargs
]
raise ValueError(
"Keyword arguments for pypiserver app() constructor contained "
"duplicate legacy and modern keys. Duplicates are shown below, in "
"the form (legacy_key, modern_key):\n"
f"{dupes}"
)
return updated_kwargs
def app(**kwargs: t.Any) -> Bottle:
"""Construct a bottle app running pypiserver.
:param kwds: Any overrides for defaults. Any property of RunConfig
(or its base), defined in `pypiserver.config`, may be overridden.
"""
config = Config.default_with_overrides(**backwards_compat_kwargs(kwargs))
return app_from_config(config)
def app_from_config(config: RunConfig) -> Bottle:
"""Construct a bottle app from the provided RunConfig."""
# The _app module instantiates a Bottle instance directly when it is
# imported. That is `_app.app`. We directly mutate some global variables
# on the imported `_app` module so that its endpoints will behave as
# we expect.
_app = __import__("_app", globals(), locals(), ["."], 1)
# Because we're about to mutate our import, we pop it out of the imported
# modules map, so that any future imports do not receive our mutated version
sys.modules.pop("pypiserver._app", None)
_app.config = config
# Add a reference to our config on the Bottle app for easy access in testing
# and other contexts.
_app.app._pypiserver_config = config
return _app.app
T = t.TypeVar("T")
def paste_app_factory(_global_config, **local_conf):
"""Parse a paste config and return an app.
The paste config is entirely strings, so we need to parse those
strings into values usable for the config, if they're present.
"""
def to_bool(val: t.Optional[str]) -> t.Optional[bool]:
"""Convert a string value, if provided, to a bool."""
return val if val is None else strtobool(val)
def to_int(val: t.Optional[str]) -> t.Optional[int]:
"""Convert a string value, if provided, to an int."""
return val if val is None else int(val)
def to_list(
val: t.Optional[str],
sep: str = " ",
transform: t.Callable[[str], T] = str.strip,
) -> t.Optional[t.List[T]]:
"""Convert a string value, if provided, to a list.
:param sep: the separator between items in the string representation
of the list
:param transform: an optional function to call on each string item of
the list
"""
if val is None:
return val
return list(filter(None, map(transform, val.split(sep))))
def _make_root(root: str) -> pathlib.Path:
"""Convert a specified string root into an absolute Path instance."""
return pathlib.Path(root.strip()).expanduser().resolve()
# A map of config keys we expect in the paste config to the appropriate
# function to parse the string config value. This map includes both
# current and legacy keys.
maps = {
"cache_control": to_int,
"roots": functools.partial(to_list, sep="\n", transform=_make_root),
# root is a deprecated argument for roots
"root": functools.partial(to_list, sep="\n", transform=_make_root),
"disable_fallback": to_bool,
# redirect_to_fallback is a deprecated argument for disable_fallback
"redirect_to_fallback": to_bool,
"overwrite": to_bool,
"authenticate": functools.partial(to_list, sep=" "),
# authenticated is a deprecated argument for authenticate
"authenticated": functools.partial(to_list, sep=" "),
"verbosity": to_int,
}
# First, convert values from strings to whatever types we need, or leave
# them as strings if there's no mapping function available for them.
mapped_conf = {k: maps.get(k, identity)(v) for k, v in local_conf.items()}
# Convert any legacy key/value pairs into their modern form.
updated_conf = backwards_compat_kwargs(mapped_conf)
return app(**updated_conf)
|
server.py | yutiansut/paperbroker | 227 | 11199593 | from flask import Flask, request, send_from_directory
from paperbroker import PaperBroker
from paperbroker.orders import Order
import ujson
# initialize a PaperBroker with defaults
broker = PaperBroker()
# set the project root directory as the static folder, you can set others.
app = Flask(__name__, static_url_path='')
# helper function to return json
def json(data):
global app
response = app.response_class(
response=ujson.dumps(data).replace('NaN', 'null'),
status=200,
mimetype='application/json'
)
return response
# begin routes
@app.route("/quotes/<asset>", methods=['GET'])
def get_quote(asset:str):
return json(broker.get_quote(asset))
@app.route("/quotes/<asset>/options/<expiration_date>", methods=['GET'])
def get_options(asset=None, expiration_date=None, only_priceable=True):
if only_priceable:
return json([_ for _ in broker.get_options(asset, expiration_date) if _.is_priceable()])
else:
return json([_ for _ in broker.get_options(asset, expiration_date)])
@app.route("/expirations/<asset>", methods=['GET'])
def get_expiration_dates(asset=None):
return json(broker.get_expiration_dates(asset))
@app.route("/accounts", methods=['POST'])
@app.route("/accounts/create", methods=['GET'])
def open_account():
return json(broker.open_account())
@app.route("/accounts/<account_id>", methods=['GET'])
def get_account(account_id: str = None):
return json(broker.get_account(account_id=account_id))
@app.route("/accounts/<account_id>/orders/buy_to_open/<asset>", methods=['POST'])
def buy_to_open(account_id:str = None, asset:str = None):
quantity = int(request.args.get('quantity', 1))
simulate = not ( not (request.args.get('simulate', False) ) )
return json(broker.buy_to_open(account = broker.get_account(account_id=account_id), asset=asset, quantity=quantity, simulate=simulate))
@app.route("/accounts/<account_id>/orders/sell_to_open/<asset>", methods=['POST'])
def sell_to_open(account_id:str = None, asset:str = None):
quantity = int(request.args.get('quantity', 1))
simulate = not ( not (request.args.get('simulate', False) ) )
return json(broker.sell_to_open(account = broker.get_account(account_id=account_id), asset=asset, quantity=quantity, simulate=simulate))
@app.route("/accounts/<account_id>/orders/buy_to_close/<asset>", methods=['POST'])
def buy_to_close(account_id:str = None, asset:str = None):
quantity = int(request.args.get('quantity', 1))
simulate = not ( not (request.args.get('simulate', False) ) )
return json(broker.buy_to_close(account = broker.get_account(account_id=account_id), asset=asset, quantity=quantity, simulate=simulate))
@app.route("/accounts/<account_id>/orders/sell_to_close/<asset>", methods=['POST'])
def sell_to_close(account_id:str = None, asset:str = None):
quantity = int(request.args.get('quantity', 1))
simulate = not ( not (request.args.get('simulate', False) ) )
return json(broker.sell_to_close(account = broker.get_account(account_id=account_id), asset=asset, quantity=quantity, simulate=simulate))
@app.route("/accounts/<account_id>/positions/liquidate", methods=['POST'])
def liquidate_account_positions(account_id:str, positions=None, simulate=None):
simulate = (not ( not (request.args.get('simulate', False) ) )) if simulate is not None else simulate
account = broker.get_account(account_id=account_id)
return json(broker.close_positions(account=account, positions=account.positions, simulate=simulate))
@app.route("/accounts/<account_id>/orders/simulate", methods=['POST'])
@app.route("/accounts/<account_id>/orders/create/simulate", methods=['GET'])
def simulate_order(account_id: str):
return enter_order(account_id, simulate=True)
@app.route("/accounts/<account_id>/orders", methods=['POST'])
@app.route("/accounts/<account_id>/orders/create", methods=['GET'])
def enter_order(account_id: str, simulate=None):
simulate = not(not(request.args.get('simulate', False))) if simulate is None else simulate
order = Order()
for x in range(4):
if request.args.get('legs[{}][asset]'.format(x-1), None) is not None:
asset = request.args.get('legs[{}][asset]'.format(x-1), None)
order_type = request.args.get('legs[{}][order_type]'.format(x-1), None)
quantity = request.args.get('legs[{}][quantity]'.format(x-1), None)
if order_type is None:
raise Exception('order_type is a required field')
if quantity is None:
raise Exception('quantity is a required field')
order.add_leg(asset=asset, order_type=order_type, quantity=quantity)
account = broker.get_account(account_id=account_id)
return json(broker.enter_order(account = account, order=order, simulate=simulate))
@app.route('/<path:path>')
def send_static(path):
return send_from_directory('static', path or 'index.html')
@app.route('/')
def send_index():
return send_from_directory('static', 'index.html')
if __name__ == "__main__":
port = 8231
app.debug = False
print("PaperBroker Flask Server is starting on localhost:{}".format(port))
app.run(host = "127.0.0.1", port = port, debug=False)
|
py/torch_tensorrt/fx/test/passes/test_fuse_permute_linear_trt.py | NVIDIA/Torch-TensorRT | 430 | 11199628 | <reponame>NVIDIA/Torch-TensorRT
# Owner(s): ["oncall: gpu_enablement"]
import torch
import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops
from torch.testing._internal.common_utils import run_tests
from torch_tensorrt.fx.passes.lower_basic_pass import (
fuse_permute_linear,
trt_transposed_linear,
)
from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase
class TestFusePermuteLinear(AccTestCase):
def test_fuse_permute_linear(self):
class TestModule(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features)
def forward(self, x):
return self.linear(x.permute(0, 2, 1))
inputs = [torch.randn(6, 10, 20)]
a = TestModule(10, 30)
self.run_test(
TestModule(10, 30),
inputs,
{trt_transposed_linear},
apply_passes=[fuse_permute_linear],
)
def test_fuse_permute_linear_keep_permute(self):
"""
Fusion while keep permute node since permute has more than one consumers
"""
class TestModule(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features)
def forward(self, x):
y = x.permute(0, 2, 1)
return self.linear(y), y
inputs = [torch.randn(6, 10, 20)]
a = TestModule(10, 30)
self.run_test(
TestModule(10, 30),
inputs,
{acc_ops.permute, trt_transposed_linear},
apply_passes=[fuse_permute_linear],
)
def test_multi_fuse_permute_linear(self):
"""
Fusion when permute output is shared by multiple linears
"""
class TestModule(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear1 = torch.nn.Linear(in_features, out_features)
self.linear2 = torch.nn.Linear(in_features, out_features)
def forward(self, x):
y = x.permute(0, 2, 1)
return self.linear1(y) + self.linear2(y)
inputs = [torch.randn(8, 10, 20)]
a = TestModule(10, 30)
self.run_test(
TestModule(10, 30),
inputs,
{trt_transposed_linear},
apply_passes=[fuse_permute_linear],
)
if __name__ == "__main__":
run_tests()
|
recipes/sota/2019/lm_analysis/filter_segmentations.py | Zilv1128/test1 | 5,921 | 11199718 | <reponame>Zilv1128/test1<gh_stars>1000+
import sys
from collections import defaultdict
def count(MIN_SIL_LENGTH, align_file):
lines = []
with open(align_file) as fin:
lines = fin.readlines()
res = {}
res["word_counter"] = [0] * 100 # number of word in each small chunk
res["chunk_counter"] = [0] * 100 # number of small chunk per audio
stat = defaultdict(list)
good_samples = []
for line in lines:
sp = line.split("\t")
# filename = sp[0]
alignments = sp[1].strip().split("\\n")
# Parse the alignments
chunk_starts = [0]
chunk_ends = []
words = []
cur_words = []
cur_end = 0
for i, alignment in enumerate(alignments):
sp = alignment.split()
begin = float(sp[2])
length = float(sp[3])
word = sp[4]
cur_end = begin + length
if i == 0:
continue
if word == "$":
if length > MIN_SIL_LENGTH:
chunk_ends.append(cur_end)
chunk_starts.append(cur_end)
words.append(" ".join(cur_words))
cur_words = []
continue
cur_words.append(word)
if len(cur_words) > 0:
chunk_ends.append(cur_end)
words.append(" ".join(cur_words))
else:
chunk_starts.pop()
# res
good = True
n_chunk = len(words)
# filter if n_segments == 1
if n_chunk < 2:
good = False
res["chunk_counter"][n_chunk] += 1
for word_chunk in words:
n_word = len(word_chunk.split())
res["word_counter"][n_word] += 1
stat[n_chunk].append(n_word)
# filter if number of words in a segment > 6
if n_word > 6:
good = False
if good:
good_samples.append(line)
print(len(good_samples))
return res, stat, good_samples
if __name__ == "__main__":
align_file = sys.argv[1]
original_file = sys.argv[2]
res, data, samples = count(0.13, align_file)
print(res)
fnames = set([line.strip().split("\t")[0].split("/")[-1] for line in samples])
# prepare original filtered file
with open(original_file, "r") as f, open(
"original.filtered_chunk_g1_ngrams_le6.lst", "w"
) as fout:
for line in f:
if line.split(" ")[1].split("/")[-1] in fnames:
fout.write(line)
with open(align_file + ".filtered_chunk_g1_ngrams_le6", "w") as f:
for sample in samples:
f.write(sample)
|
netutils_linux_monitoring/layout.py | henkaru/netutils-linux | 749 | 11199720 | <filename>netutils_linux_monitoring/layout.py
# coding=utf-8
""" Everything about console output's layout """
from prettytable import PrettyTable
from six import print_
def make_table(header, align_map=None, rows=None):
""" Wrapper for pretty table """
table = PrettyTable()
table.horizontal_char = table.vertical_char = table.junction_char = ' '
try:
table.field_names = header
except Exception as err:
print_(header)
raise err
if align_map:
for field, align in zip(header, align_map):
table.align[field] = align
if rows:
for row in rows:
if len(row) < len(table.field_names):
continue
try:
table.add_row(row)
except Exception as err:
print_('fields:', table.field_names)
print_('row:', row)
print_('rows:', rows)
raise err
return table
|
doc/en/example/fixtures/test_fixtures_order_scope.py | markshao/pytest | 9,225 | 11199781 | import pytest
@pytest.fixture(scope="session")
def order():
return []
@pytest.fixture
def func(order):
order.append("function")
@pytest.fixture(scope="class")
def cls(order):
order.append("class")
@pytest.fixture(scope="module")
def mod(order):
order.append("module")
@pytest.fixture(scope="package")
def pack(order):
order.append("package")
@pytest.fixture(scope="session")
def sess(order):
order.append("session")
class TestClass:
def test_order(self, func, cls, mod, pack, sess, order):
assert order == ["session", "package", "module", "class", "function"]
|
dizoo/multiagent_particle/config/cooperative_navigation_qtran_config.py | sailxjx/DI-engine | 464 | 11199790 | from copy import deepcopy
from ding.entry import serial_pipeline
from easydict import EasyDict
n_agent = 5
collector_env_num = 4
evaluator_env_num = 2
num_landmarks = n_agent
main_config = dict(
env=dict(
num_landmarks=num_landmarks,
max_step=100,
n_agent=n_agent,
collector_env_num=collector_env_num,
evaluator_env_num=evaluator_env_num,
manager=dict(shared_memory=False, ),
n_evaluator_episode=5,
stop_value=0,
),
policy=dict(
model=dict(
agent_num=n_agent,
obs_shape=2 + 2 + (n_agent - 1) * 2 + num_landmarks * 2,
global_obs_shape=n_agent * 2 + num_landmarks * 2 + n_agent * 2,
action_shape=5,
hidden_size_list=[128],
embedding_size=64,
lstm_type='gru',
dueling=False,
),
learn=dict(
update_per_collect=100,
batch_size=32,
learning_rate=0.0005,
double_q=True,
target_update_theta=0.001,
discount_factor=0.99,
td_weight=1,
opt_weight=0.1,
nopt_min_weight=0.0001,
),
collect=dict(
n_sample=600,
unroll_len=16,
env_num=collector_env_num,
),
eval=dict(env_num=evaluator_env_num, ),
other=dict(
eps=dict(
type='exp',
start=1.0,
end=0.05,
decay=100000,
),
replay_buffer=dict(
replay_buffer_size=15000,
# (int) The maximum reuse times of each data
max_reuse=1e+9,
max_staleness=1e+9,
),
),
),
)
main_config = EasyDict(main_config)
create_config = dict(
env=dict(
import_names=['dizoo.multiagent_particle.envs.particle_env'],
type='cooperative_navigation',
),
env_manager=dict(type='subprocess'),
policy=dict(type='qtran'),
)
create_config = EasyDict(create_config)
cooperative_navigation_qtran_config = main_config
cooperative_navigation_qtran_create_config = create_config
def train(args):
config = [main_config, create_config]
serial_pipeline(config, seed=args.seed)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seed', '-s', type=int, default=0)
args = parser.parse_args()
train(args)
|
neptune/new/attributes/series/series.py | Raalsky/neptune-client | 254 | 11199813 | <filename>neptune/new/attributes/series/series.py
#
# Copyright (c) 2020, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import time
from typing import List, Optional, TypeVar, Generic, Union, Iterable
from neptune.new.internal.operation import Operation
from neptune.new.internal.utils import verify_type, is_collection
from neptune.new.types.series.series import Series as SeriesVal
from neptune.new.attributes.attribute import Attribute
Val = TypeVar("Val", bound=SeriesVal)
Data = TypeVar("Data")
class Series(Attribute, Generic[Val, Data]):
def clear(self, wait: bool = False) -> None:
self._clear_impl(wait)
@abc.abstractmethod
def _get_log_operations_from_value(
self, value: Val, step: Optional[float], timestamp: float
) -> List[Operation]:
pass
# pylint: disable=unused-argument
# pylint: disable=assignment-from-none
def _get_config_operation_from_value(self, value: Val) -> Optional[Operation]:
return None
@abc.abstractmethod
def _get_clear_operation(self) -> Operation:
pass
@abc.abstractmethod
def _data_to_value(self, values: Iterable, **kwargs) -> Val:
pass
@abc.abstractmethod
def _is_value_type(self, value) -> bool:
pass
def assign(self, value, wait: bool = False) -> None:
if not self._is_value_type(value):
value = self._data_to_value(value)
clear_op = self._get_clear_operation()
config_op = self._get_config_operation_from_value(value)
with self._container.lock():
if config_op:
self._enqueue_operation(config_op, wait=False)
if not value.values:
self._enqueue_operation(clear_op, wait=wait)
else:
self._enqueue_operation(clear_op, wait=False)
ts = time.time()
ops = self._get_log_operations_from_value(value, None, ts)
for op in ops:
self._enqueue_operation(op, wait=wait)
def log(
self,
value: Union[Data, Iterable[Data]],
step: Optional[float] = None,
timestamp: Optional[float] = None,
wait: bool = False,
**kwargs
) -> None:
if is_collection(value):
if step is not None and len(value) > 1:
raise ValueError(
"Collection of values are not supported for explicitly defined 'step'."
)
value = self._data_to_value(value, **kwargs)
else:
value = self._data_to_value([value], **kwargs)
if step is not None:
verify_type("step", step, (float, int))
if timestamp is not None:
verify_type("timestamp", timestamp, (float, int))
if not timestamp:
timestamp = time.time()
ops = self._get_log_operations_from_value(value, step, timestamp)
with self._container.lock():
for op in ops:
self._enqueue_operation(op, wait)
def _clear_impl(self, wait: bool = False) -> None:
op = self._get_clear_operation()
with self._container.lock():
self._enqueue_operation(op, wait)
|
scripts/ltr_msmarco-passage/append_d2q_to_collection_jsonl.py | keleog/pyserini | 451 | 11199828 | <reponame>keleog/pyserini
#
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
import os
from pyserini.analysis import Analyzer, get_lucene_analyzer
"""
append d2q prediction as an extra field to collection jsonl
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Converts MSMARCO\'s tsv collection to Anserini jsonl files.')
parser.add_argument('--collection_path', required=True, help='MS MARCO .tsv collection file')
parser.add_argument('--predictions', required=True, help='File containing predicted queries.')
parser.add_argument('--output_folder', required=True, help='output folder')
parser.add_argument('--max_docs_per_file', default=1000000, type=int,
help='maximum number of documents in each jsonl file.')
args = parser.parse_args()
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder)
analyzer = Analyzer(get_lucene_analyzer())
print('Converting collection...')
file_index = 0
new_words = 0
total_words = 0
with open(args.collection_path) as f_corpus, open(args.predictions) as f_pred:
for i, (line_doc, line_pred) in enumerate(zip(f_corpus, f_pred)):
# Write to a new file when the current one reaches maximum capacity.
if i % args.max_docs_per_file == 0:
if i > 0:
output_jsonl_file.close()
output_path = os.path.join(args.output_folder, f'docs{file_index:02d}.json')
output_jsonl_file = open(output_path, 'w')
file_index += 1
doc_json = json.loads(line_doc)
pred_text = line_pred.rstrip()
predict_text = pred_text + ' '
analyzed = analyzer.analyze(predict_text)
for token in analyzed:
assert ' ' not in token
predict = ' '.join(analyzed)
doc_json['predict'] = predict
output_jsonl_file.write(json.dumps(doc_json) + '\n')
if i % 100000 == 0:
print('Converted {} docs in {} files'.format(i, file_index))
output_jsonl_file.close()
print('Done!') |
aws-stackreference-architecture/application/src/backend/helloworld.py | spara/examples | 1,628 | 11199871 | <reponame>spara/examples
from flask import Flask, request, render_template, jsonify
import pg8000
import random
import os
import requests
hash = random.getrandbits(128)
app = Flask(__name__)
@app.route('/')
def helloworld():
return jsonify(
message="hello world",
hash=str(hash),
postgres="please use the /postgres endpoint"
)
@app.route('/postgres')
def postgres():
conn = pg8000.connect(
host=os.environ['DB_HOST'],
port=int(os.environ['DB_PORT']),
user=os.environ['DB_USERNAME'],
password=os.environ['DB_PASSWORD'],
database=os.environ['DB_NAME']
)
cursor = conn.cursor()
cursor.execute("SELECT version();")
results = cursor.fetchall()
return jsonify(
hash=str(hash),
postgres=results
)
@app.route('/internet')
def connect_to_internet():
hostname = "google.com" # example
response = os.system("ping -c 1 " + hostname)
# and then check the response...
if response == 0:
return ("I CAN connect to " + hostname)
else:
return ("I CAN'T connect to " + hostname)
@app.route('/env-variables')
def list_all_env_variables():
html = ''
for key in os.environ.keys():
html = html + '<p>' + key + ": " + os.environ[key] + '<p>'
return html
@app.route('/public-ip')
def get_my_public_ip():
html = ''
external_ip = requests.get('https://jsonip.com/').json()['ip']
html = '<p>my external IP is: ' + \
external_ip + ' (source: jsonip.com) </p>'
return html
@app.route('/killme')
def kill_app():
shutdown_server()
return '<p>you have killed me :(<p><p>' + str(hash) + '<p>'
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=80)
|
chapter2_training/cifar10/train/src/configurations.py | sudabon/ml-system-in-actions | 133 | 11199898 | import os
from logging import getLogger
from src.constants import CONSTANTS, PLATFORM_ENUM
logger = getLogger(__name__)
class PlatformConfigurations:
platform = os.getenv("PLATFORM", PLATFORM_ENUM.DOCKER.value)
if not PLATFORM_ENUM.has_value(platform):
raise ValueError(f"PLATFORM must be one of {[v.value for v in PLATFORM_ENUM.__members__.values()]}")
class PreprocessConfigurations:
train_files = [
"data_batch_1",
"data_batch_2",
"data_batch_3",
"data_batch_4",
"data_batch_5",
]
test_files = ["test_batch"]
classes = {
0: "plane",
1: "car",
2: "bird",
3: "cat",
4: "deer",
5: "dog",
6: "frog",
7: "horse",
8: "ship",
9: "truck",
}
class ModelConfigurations:
pass
logger.info(f"{PlatformConfigurations.__name__}: {PlatformConfigurations.__dict__}")
logger.info(f"{PreprocessConfigurations.__name__}: {PreprocessConfigurations.__dict__}")
logger.info(f"{ModelConfigurations.__name__}: {ModelConfigurations.__dict__}")
|
agentnet/objective/base.py | mraihan19/AgentNet | 337 | 11199900 | from __future__ import division, print_function, absolute_import
import theano.tensor as T
import theano
from ..utils.format import check_list, unpack_list
class BaseObjective(object):
"""
Instance, that:
- determines rewards for all actions agent takes given environment state and agent action.
"""
def reset(self, batch_size):
"""Performs this action each time a new session [batch] is loaded
batch size: size of the new batch
"""
pass
def get_reward(self, last_environment_states, agent_actions, batch_id):
"""WARNING! This function is computed on a single session, not on a batch!
Reward given for taking the action in current environment state.
:param last_environment_states: Environment state before taking action.
:type last_environment_states: float[time_i, memory_id]
:param agent_actions: Agent action at this tick.
:type agent_actions: int[time_i]
:param batch_id: Session id.
:type batch_id: int
:return: Reward for taking action.
:rtype: float[time_i]
"""
raise NotImplementedError
def get_reward_sequences(self, env_state_sessions, agent_action_sessions):
"""Computes the rewards given to agent at each time step for each batch.
:param env_state_sessions: Environment state [batch_i,seq_i,state_units] history for all sessions.
:type env_state_sessions: theano tensor [batch_i,seq_i,state_units]
:param agent_action_sessions: Actions chosen by agent at each tick for all sessions.
:type agent_action_sessions: int[batch_i,seq_i]
:return rewards: What reward was given to an agent for corresponding action from state in that batch.
:rtype: float[batch_i,seq_i]
"""
env_state_sessions = check_list(env_state_sessions)
n_states = len(env_state_sessions)
agent_action_sessions = check_list(agent_action_sessions)
n_actions = len(agent_action_sessions)
def compute_reward(batch_i, *args):
session_states, session_actions = unpack_list(args, [n_states, n_actions])
return self.get_reward(session_states, session_actions, batch_i)
sequences = [T.arange(agent_action_sessions[0].shape[0], ), ] + env_state_sessions + agent_action_sessions
rewards, updates = theano.map(compute_reward, sequences=sequences)
assert len(updates) == 0
return rewards.reshape(agent_action_sessions[0].shape) # reshape bach to original
|
corehq/motech/fhir/tests/test_models.py | akashkj/commcare-hq | 471 | 11199904 | import doctest
from contextlib import contextmanager
from django.core.exceptions import ValidationError
from django.db import IntegrityError, transaction
from django.db.models import ProtectedError
from django.test import TestCase
from nose.tools import assert_in
from corehq.apps.data_dictionary.models import CaseProperty, CaseType
from corehq.apps.users.models import CommCareUser
from corehq.motech.const import IMPORT_FREQUENCY_DAILY
from corehq.motech.exceptions import ConfigurationError
from corehq.motech.fhir import models
from corehq.motech.models import ConnectionSettings
from corehq.motech.value_source import CaseProperty as CasePropertyValueSource
from corehq.motech.value_source import ValueSource
from ..const import (
FHIR_VERSION_4_0_1,
OWNER_TYPE_GROUP,
OWNER_TYPE_LOCATION,
OWNER_TYPE_USER,
)
from ..models import (
FHIRImportConfig,
FHIRImportResourceProperty,
FHIRImportResourceType,
FHIRResourceProperty,
FHIRResourceType,
ResourceTypeRelationship,
)
DOMAIN = 'test-domain'
class TestCaseWithConnectionSettings(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.conn = ConnectionSettings.objects.create(
domain=DOMAIN,
name='Test ConnectionSettings',
url='https://example.com/api/',
)
@classmethod
def tearDownClass(cls):
cls.conn.delete()
super().tearDownClass()
class TestFHIRImportConfig(TestCaseWithConnectionSettings):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = CommCareUser(
domain=DOMAIN,
username=f'<EMAIL>',
)
cls.user.save()
@classmethod
def tearDownClass(cls):
cls.user.delete(DOMAIN, deleted_by=None)
super().tearDownClass()
def test_connection_settings_null(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
owner_id=self.user.user_id,
owner_type=OWNER_TYPE_USER,
)
with self.assertRaises(ValidationError):
import_config.full_clean()
with self.assertRaises(IntegrityError), \
transaction.atomic():
import_config.save()
def test_connection_settings_protected(self):
import_config = FHIRImportConfig.objects.create(
domain=DOMAIN,
connection_settings=self.conn,
owner_id=self.user.user_id,
owner_type=OWNER_TYPE_USER,
)
self.addCleanup(import_config.delete)
with self.assertRaises(ProtectedError):
self.conn.delete()
def test_fhir_version_good(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
fhir_version=FHIR_VERSION_4_0_1,
owner_id=self.user.user_id,
owner_type=OWNER_TYPE_USER,
)
import_config.full_clean()
def test_fhir_version_bad(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
fhir_version='1.0.2',
owner_id=self.user.user_id,
owner_type=OWNER_TYPE_USER,
)
with self.assertRaises(ValidationError):
import_config.full_clean()
def test_frequency_good(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
frequency=IMPORT_FREQUENCY_DAILY,
owner_id=self.user.user_id,
owner_type=OWNER_TYPE_USER,
)
import_config.full_clean()
def test_frequency_bad(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
frequency='annually',
owner_id=self.user.user_id,
owner_type=OWNER_TYPE_USER,
)
with self.assertRaises(ValidationError):
import_config.full_clean()
def test_owner_id_missing(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
owner_type=OWNER_TYPE_USER,
)
with self.assertRaises(ValidationError):
import_config.full_clean()
def test_owner_id_too_long(self):
uuid = '4d4e6255-2139-49e0-98e9-9418e83a4944'
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
owner_id=uuid + 'X',
owner_type=OWNER_TYPE_USER,
)
try:
import_config.full_clean()
except ValidationError as err:
errors = err.message_dict['owner_id']
self.assertEqual(
errors,
['Ensure this value has at most 36 characters (it has 37).'],
)
class TestFHIRImportConfigGetOwner(TestCaseWithConnectionSettings):
def test_owner_type_missing(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
owner_id='b0b',
)
with self.assertRaises(ConfigurationError):
import_config.get_owner()
def test_owner_type_bad(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
owner_id='b0b',
owner_type='0rgunit',
)
with self.assertRaises(ConfigurationError):
import_config.get_owner()
def test_user_does_not_exist(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
owner_id='b0b',
owner_type=OWNER_TYPE_USER,
)
with self.assertRaises(ConfigurationError):
import_config.get_owner()
def test_group_does_not_exist(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
owner_id='the-clan-mcb0b',
owner_type=OWNER_TYPE_GROUP,
)
with self.assertRaises(ConfigurationError):
import_config.get_owner()
def test_location_does_not_exist(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
owner_id='b0bton',
owner_type=OWNER_TYPE_LOCATION,
)
with self.assertRaises(ConfigurationError):
import_config.get_owner()
class TestCaseWithReferral(TestCaseWithConnectionSettings):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.import_config = FHIRImportConfig.objects.create(
domain=DOMAIN,
connection_settings=cls.conn,
owner_id='b0b',
)
cls.referral = CaseType.objects.create(
domain=DOMAIN,
name='referral',
)
@classmethod
def tearDownClass(cls):
cls.referral.delete()
cls.import_config.delete()
super().tearDownClass()
class TestFHIRImportResourceType(TestCaseWithReferral):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.mother = CaseType.objects.create(
domain=DOMAIN,
name='mother',
)
@classmethod
def tearDownClass(cls):
cls.mother.delete()
super().tearDownClass()
def test_search_params_empty(self):
service_request = FHIRImportResourceType.objects.create(
import_config=self.import_config,
name='ServiceRequest',
case_type=self.referral,
)
self.assertEqual(service_request.search_params, {})
def test_related_resource_types(self):
service_request = FHIRImportResourceType.objects.create(
import_config=self.import_config,
name='ServiceRequest',
case_type=self.referral,
)
patient = FHIRImportResourceType.objects.create(
import_config=self.import_config,
name='Patient',
case_type=self.mother,
)
ResourceTypeRelationship.objects.create(
resource_type=service_request,
jsonpath='$.subject.reference',
related_resource_type=patient,
)
related = service_request.jsonpaths_to_related_resource_types.all()
self.assertEqual(len(related), 1)
self.assertEqual(related[0].related_resource_type.name, 'Patient')
case_type = related[0].related_resource_type.case_type
self.assertEqual(case_type.name, 'mother')
def test_domain(self):
service_request = FHIRImportResourceType.objects.create(
import_config=self.import_config,
name='ServiceRequest',
case_type=self.referral,
)
self.assertEqual(service_request.domain, DOMAIN)
class TestFHIRImportResourceProperty(TestCaseWithReferral):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.service_request = FHIRImportResourceType.objects.create(
import_config=cls.import_config,
name='ServiceRequest',
case_type=cls.referral,
)
cls.status_property = FHIRImportResourceProperty.objects.create(
resource_type=cls.service_request,
value_source_config={
'jsonpath': '$.status',
'case_property': 'fhir_status',
}
)
cls.intent_property = FHIRImportResourceProperty.objects.create(
resource_type=cls.service_request,
value_source_config={
'jsonpath': '$.intent',
'case_property': 'fhir_intent',
}
)
cls.subject_property = FHIRImportResourceProperty.objects.create(
resource_type=cls.service_request,
value_source_config={
'jsonpath': '$.subject.reference', # e.g. "Patient/12345"
'case_property': 'fhir_subject',
}
)
@classmethod
def tearDownClass(cls):
cls.subject_property.delete()
cls.intent_property.delete()
cls.status_property.delete()
cls.service_request.delete()
super().tearDownClass()
def test_related_name(self):
properties = self.service_request.properties.all()
names = sorted([str(p) for p in properties])
self.assertEqual(names, [
'ServiceRequest.intent',
'ServiceRequest.status',
'ServiceRequest.subject.reference',
])
def test_case_type(self):
properties = self.service_request.properties.all()
case_types = set(([str(p.case_type) for p in properties]))
self.assertEqual(case_types, {'referral'})
def test_jsonpath_set(self):
self.assertEqual(
self.subject_property.value_source_jsonpath,
'$.subject.reference',
)
def test_jsonpath_notset(self):
priority = FHIRImportResourceProperty(
resource_type=self.service_request,
value_source_config={
'case_property': 'fhir_priority',
}
)
self.assertEqual(priority.value_source_jsonpath, '')
def test_value_source_good(self):
value_source = self.subject_property.get_value_source()
self.assertIsInstance(value_source, ValueSource)
self.assertIsInstance(value_source, CasePropertyValueSource)
def test_value_source_bad(self):
priority = FHIRImportResourceProperty(
resource_type=self.service_request,
)
with self.assertRaises(ConfigurationError):
priority.save()
def test_iter_case_property_value_sources(self):
FHIRImportResourceProperty.objects.create(
resource_type=self.service_request,
value_source_config={
'jsonpath': "$.extension[?url='https://example.com/commcare/case_type'].value",
'case_property': 'case_type',
}
)
FHIRImportResourceProperty.objects.create(
resource_type=self.service_request,
value_source_config={
'jsonpath': "$.extension[?url='https://example.com/commcare/type'].value",
'case_property': 'type',
}
)
FHIRImportResourceProperty.objects.create(
resource_type=self.service_request,
value_source_config={
'jsonpath': "$.extension[?url='https://example.com/commcare/user_id'].value",
'case_property': 'user_id',
}
)
FHIRImportResourceProperty.objects.create(
resource_type=self.service_request,
value_source_config={
'jsonpath': "$.extension[?url='https://example.com/commcare/owner_id'].value",
'case_property': 'owner_id',
}
)
FHIRImportResourceProperty.objects.create(
resource_type=self.service_request,
value_source_config={
'jsonpath': "$.extension[?url='https://example.com/commcare/opened_on'].value",
'case_property': 'opened_on',
}
)
FHIRImportResourceProperty.objects.create(
resource_type=self.service_request,
value_source_config={
'jsonpath': "$.extension[?url='https://example.com/commcare/this_is_fine'].value",
'case_property': 'this_is_fine',
}
)
props = [
vs.case_property
for vs in self.service_request.iter_case_property_value_sources()
]
self.assertEqual(props, [
'fhir_status', 'fhir_intent', 'fhir_subject', 'this_is_fine',
])
class TestConfigurationErrors(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.case_type = CaseType.objects.create(
domain=DOMAIN,
name='person',
)
cls.case_property = CaseProperty.objects.create(
case_type=cls.case_type,
name='name',
)
cls.resource_type = models.FHIRResourceType.objects.create(
domain=DOMAIN,
case_type=cls.case_type,
)
@classmethod
def tearDownClass(cls):
cls.resource_type.delete()
cls.case_property.delete()
cls.case_type.delete()
super().tearDownClass()
def setUp(self):
self.resource_type.name = 'Patient'
def test_resource_type_name(self):
self.resource_type.name = 'Patinet'
with self.assertRaisesRegex(
ConfigurationError,
"^Unknown resource type 'Patinet' for FHIR version 4.0.1$"
):
self.resource_type.get_json_schema()
def test_case_types_dont_match(self):
with case_type_context('child') as child:
with case_property_context(child, 'name') as child_name:
prop = FHIRResourceProperty(
resource_type=self.resource_type,
case_property=child_name,
jsonpath='name[0].text',
)
with self.assertRaisesRegex(
ConfigurationError,
"^Invalid FHIRResourceProperty: case_property case "
"type 'child' does not match resource_type case type "
"'person'.$"
):
prop.save()
def test_value_source_config(self):
prop = FHIRResourceProperty(
resource_type=self.resource_type,
case_property=self.case_property,
jsonpath='name[0].text',
value_source_config={
'case_property': 'name',
'jsonpath': 'name[0].text',
}
)
with self.assertRaisesRegex(
ConfigurationError,
"^Invalid FHIRResourceProperty: Unable to set "
"'value_source_config' when 'case_property', 'jsonpath' or "
"'value_map' are set.$"
):
prop.save()
def test_no_jsonpath(self):
prop = FHIRResourceProperty(
resource_type=self.resource_type,
case_property=self.case_property,
)
with self.assertRaisesRegex(
ConfigurationError,
'^Unable to set FHIR resource property value without case '
'property and JSONPath.$'
):
prop.get_value_source()
def test_ok(self):
prop = FHIRResourceProperty(
resource_type=self.resource_type,
case_property=self.case_property,
jsonpath='name[0].text',
)
prop.save()
self.assertIsNotNone(prop.id)
value_source = prop.get_value_source()
self.assertEqual(value_source.__class__.__name__, 'CaseProperty')
self.assertEqual(value_source.case_property, 'name')
self.assertEqual(value_source.jsonpath, 'name[0].text')
class TestModelIntegrity(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.mother = CaseType.objects.create(
domain=DOMAIN,
name='mother',
)
cls.patient = models.FHIRResourceType.objects.create(
domain=DOMAIN,
case_type=cls.mother,
name='Patient'
)
@classmethod
def tearDownClass(cls):
cls.patient.delete()
cls.mother.delete()
super().tearDownClass()
def test_two_resource_types_one_case_type_bad(self):
"""
Case type "mother" can't be mapped to both "Patient" and "Person"
"""
with self.assertRaises(IntegrityError):
models.FHIRResourceType.objects.create(
domain=DOMAIN,
case_type=self.mother,
name='Person'
)
def test_two_case_types_one_resource_type_ok(self):
"""
Case types "mother" and "child" can both be mapped to "Patient"
"""
child = CaseType.objects.create(
domain=DOMAIN,
name='child',
)
self.addCleanup(child.delete)
patient_again = models.FHIRResourceType.objects.create(
domain=DOMAIN,
case_type=child,
name='Patient'
)
self.addCleanup(patient_again.delete)
class TestResourceValidation(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.case_type = CaseType.objects.create(
domain=DOMAIN,
name='person',
)
cls.resource_type = models.FHIRResourceType.objects.create(
domain=DOMAIN,
case_type=cls.case_type,
name='Patient'
)
@classmethod
def tearDownClass(cls):
cls.resource_type.delete()
cls.case_type.delete()
super().tearDownClass()
def test_minimal(self):
patient = {'resourceType': 'Patient'}
self.resource_type.validate_resource(patient)
def test_required_property(self):
patient = {}
with self.assertRaisesRegex(ConfigurationError,
"'resourceType' is a required property"):
self.resource_type.validate_resource(patient)
def test_bad_data_type(self):
patient = {
'birthDate': 1,
'resourceType': 'Patient',
}
with self.assertRaisesRegex(ConfigurationError,
"1 is not of type 'string'"):
self.resource_type.validate_resource(patient)
def test_bad_format(self):
patient = {
'birthDate': '05/05/43',
'resourceType': 'Patient',
}
with self.assertRaisesRegex(ConfigurationError,
"'05/05/43' does not match "):
self.resource_type.validate_resource(patient)
def test_bad_scalar(self):
patient = {
'name': '<NAME>',
'resourceType': 'Patient',
}
with self.assertRaisesRegex(ConfigurationError,
"'<NAME>' is not of type 'array'"):
self.resource_type.validate_resource(patient)
def test_bad_vector(self):
patient = {
'name': [{'family': ['Palin']}],
'resourceType': 'Patient'
}
with self.assertRaisesRegex(ConfigurationError,
r"\['Palin'\] is not of type 'string'"):
self.resource_type.validate_resource(patient)
def test_names():
names = FHIRResourceType.get_names()
assert_in('Patient', names)
def test_doctests():
results = doctest.testmod(models, optionflags=doctest.ELLIPSIS)
assert results.failed == 0
@contextmanager
def case_type_context(name):
case_type = CaseType.objects.create(
domain=DOMAIN,
name=name,
)
try:
yield case_type
finally:
case_type.delete()
@contextmanager
def case_property_context(case_type, name):
case_property = CaseProperty.objects.create(
case_type=case_type,
name=name,
)
try:
yield case_property
finally:
case_property.delete()
|
examples/delete_vowels.py | timmahrt/praatIO | 208 | 11199942 | <reponame>timmahrt/praatIO<filename>examples/delete_vowels.py
"""
Praatio example for deleting the vowels from the textgrids and audio files
"""
import os
from os.path import join
import copy
from praatio import textgrid
from praatio import praatio_scripts
from praatio import audio
from praatio.utilities import utils
def isVowel(label):
return any([vowel in label.lower() for vowel in ["a", "e", "i", "o", "u"]])
def deleteVowels(inputTGFN, inputWavFN, outputPath, doShrink, atZeroCrossing=True):
utils.makeDir(outputPath)
wavFN = os.path.split(inputWavFN)[1]
tgFN = os.path.split(inputTGFN)[1]
outputWavFN = join(outputPath, wavFN)
outputTGFN = join(outputPath, tgFN)
if atZeroCrossing is True:
zeroCrossingTGPath = join(outputPath, "zero_crossing_tgs")
zeroCrossingTGFN = join(zeroCrossingTGPath, tgFN)
utils.makeDir(zeroCrossingTGPath)
tg = textgrid.openTextgrid(inputTGFN, False)
wavObj = audio.WavQueryObj(inputWavFN)
praatio_scripts.tgBoundariesToZeroCrossings(tg, wavObj, zeroCrossingTGFN)
else:
tg = textgrid.openTextgrid(inputTGFN, False)
keepList = tg.tierDict["phone"].entryList
keepList = [entry for entry in keepList if not isVowel(entry[2])]
deleteList = utils.invertIntervalList(keepList, 0, tg.maxTimestamp)
wavObj = audio.openAudioFile(inputWavFN, keepList=keepList, doShrink=doShrink)
wavObj.save(outputWavFN)
shrunkTG = copy.deepcopy(tg)
for start, end in sorted(deleteList, reverse=True):
shrunkTG = shrunkTG.eraseRegion(start, end, doShrink=doShrink)
shrunkTG.save(outputTGFN, "short_textgrid", True)
# Shrink files
root = join(".", "files")
inputTGFN = join(root, "bobby_phones.TextGrid")
inputWavFN = join(root, "bobby.wav")
outputPath = join(root, "deleted_test")
deleteVowels(inputTGFN, inputWavFN, outputPath, True, True)
inputTGFN = join(root, "mary.TextGrid")
inputWavFN = join(root, "mary.wav")
outputPath = join(root, "deleted_test")
deleteVowels(inputTGFN, inputWavFN, outputPath, True, True)
# Maintain original duration of files
inputTGFN = join(root, "bobby_phones.TextGrid")
inputWavFN = join(root, "bobby.wav")
outputPath = join(root, "deleted_test_no_shrinking")
deleteVowels(inputTGFN, inputWavFN, outputPath, False, True)
inputTGFN = join(root, "mary.TextGrid")
inputWavFN = join(root, "mary.wav")
outputPath = join(root, "deleted_test_no_shrinking")
deleteVowels(inputTGFN, inputWavFN, outputPath, False, True)
|
library/mmap.py | creativemindplus/skybison | 278 | 11199952 | <filename>library/mmap.py<gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
# $builtin-init-module$
from _builtins import _builtin, _int_guard, _type_subclass_guard, _unimplemented
def _mmap_new(cls, fileno, length, flags, prot, offset):
_builtin()
ACCESS_READ = 1
ACCESS_WRITE = 2
ACCESS_COPY = 3
class mmap(bootstrap=True):
@staticmethod
def __new__(cls, fileno, length, flags=1, prot=3, access=0, offset=0):
"""
Creates a new mmap object.
Values for flags are ints:
* MAP_SHARED = 1
* MAP_PRIVATE = 2
Values for prot are ints:
* PROT_READ = 1
* PROT_WRITE = 2
* PROT_EXEC = 4
Some operating systems / file systems could provide additional values.
"""
_type_subclass_guard(cls, mmap)
_int_guard(fileno)
_int_guard(length)
_int_guard(flags)
_int_guard(prot)
_int_guard(access)
_int_guard(offset)
if length < 0:
raise OverflowError("memory mapped length must be positive")
if offset < 0:
raise OverflowError("memory mapped offset must be positive")
if access != 0 and (flags != 1 or prot != 3):
raise ValueError("mmap can't specify both access and flags, prot.")
if access == ACCESS_READ:
flags = 1
prot = 1
elif access == ACCESS_WRITE:
flags = 1
prot = 3
elif access == ACCESS_COPY:
flags = 2
prot = 3
elif access != 0:
raise ValueError("mmap invalid access parameter.")
return _mmap_new(cls, fileno, length, flags, prot, offset)
def close(self):
_builtin()
|
pahelix/utils/language_model_tools.py | agave233/PaddleHelix | 454 | 11199973 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
| Tools for language models.
"""
from copy import copy
import numpy as np
import random
def apply_bert_mask(inputs, pad_mask, tokenizer):
"""
Apply BERT mask to the token_ids.
Args:
token_ids: The list of token ids.
Returns:
masked_token_ids: The list of masked token ids.
labels: The labels for traininig BERT.
"""
vocab_size = len(tokenizer.vocab)
bert_mask = np.random.uniform(size=inputs.shape) < 0.15
bert_mask &= pad_mask
masked_inputs = inputs * ~bert_mask
random_uniform = np.random.uniform(size=inputs.shape)
token_bert_mask = random_uniform < 0.8
random_bert_mask = random_uniform > 0.9
true_bert_mask = ~token_bert_mask & ~random_bert_mask
token_bert_mask = token_bert_mask & bert_mask
random_bert_mask = random_bert_mask & bert_mask
true_bert_mask = true_bert_mask & bert_mask
masked_inputs += tokenizer.mask_token_id * token_bert_mask
masked_inputs += np.random.randint(0, vocab_size, size=(inputs.shape)) * random_bert_mask
masked_inputs += inputs * true_bert_mask
labels = np.where(bert_mask, inputs, -1)
return masked_inputs, labels |
RecoHI/HiTracking/python/hiTobTecStep_cff.py | malbouis/cmssw | 852 | 11199989 | from __future__ import absolute_import
import FWCore.ParameterSet.Config as cms
import RecoTracker.IterativeTracking.iterativeTkConfig as _cfg
from RecoTracker.IterativeTracking.TobTecStep_cff import tobTecStepSeedLayersPair,tobTecStepSeedLayersTripl,tobTecStepHitDoubletsPair,tobTecStepHitDoubletsTripl,tobTecStepHitTripletsTripl,tobTecStepTracks,tobTecStepSeedsPair,tobTecStepSeedsTripl,tobTecStepSeeds,tobTecStepTrajectoryBuilder,tobTecStepInOutTrajectoryFilter,tobTecStepClusterShapeHitFilter,tobTecStepTrajectoryCleanerBySharedHits,tobTecStepChi2Est,tobTecFlexibleKFFittingSmoother,tobTecStepFitterSmoother,tobTecStepRKTrajectorySmoother,tobTecStepRKTrajectoryFitter,tobTecStepFitterSmootherForLoopers,tobTecStepRKTrajectorySmootherForLoopers,tobTecStepRKTrajectoryFitterForLoopers
from .HIPixelTripletSeeds_cff import *
from .HIPixel3PrimTracks_cfi import *
#######################################################################
# Very large impact parameter tracking using TOB + TEC ring 5 seeding #
#######################################################################
from RecoHI.HiTracking.hiPixelLessStep_cff import hiPixelLessStepClusters
hiTobTecStepClusters = hiPixelLessStepClusters.clone(
trajectories = "hiPixelLessStepTracks",
overrideTrkQuals = 'hiPixelLessStepSelector:hiPixelLessStep'
)
# TRIPLET SEEDING LAYERS
tobTecStepSeedLayersTripl.TOB.skipClusters = 'hiTobTecStepClusters'
tobTecStepSeedLayersTripl.MTOB.skipClusters = 'hiTobTecStepClusters'
tobTecStepSeedLayersTripl.MTEC.skipClusters = 'hiTobTecStepClusters'
# Triplet TrackingRegion
from RecoHI.HiTracking.hiMixedTripletStep_cff import hiMixedTripletStepTrackingRegionsA as _hiMixedTripletStepTrackingRegionsA
hiTobTecStepTrackingRegionsTripl = _hiMixedTripletStepTrackingRegionsA.clone(RegionPSet=dict(
fixedError = 5.0,#20.0
ptMin = 0.9,#0.55
originRadius = 3.5,
maxPtMin = 1.2,#0.85
))
# Triplet seeding
tobTecStepHitDoubletsTripl.clusterCheck = ""
tobTecStepHitDoubletsTripl.trackingRegions = "hiTobTecStepTrackingRegionsTripl"
tobTecStepSeedLayersPair.TOB.skipClusters = 'hiTobTecStepClusters'
tobTecStepSeedLayersPair.TEC.skipClusters = 'hiTobTecStepClusters'
# Pair TrackingRegion
hiTobTecStepTrackingRegionsPair = hiTobTecStepTrackingRegionsTripl.clone(RegionPSet=dict(
fixedError = 7.5,#30.0
originRadius = 6.0,
maxPtMin = 1.5,#0.9
))
# Pair seeds
tobTecStepHitDoubletsPair.clusterCheck = ""
tobTecStepHitDoubletsPair.trackingRegions = "hiTobTecStepTrackingRegionsPair"
# QUALITY CUTS DURING TRACK BUILDING (for inwardss and outwards track building steps)
from RecoTracker.IterativeTracking.TobTecStep_cff import tobTecStepTrajectoryFilter
tobTecStepTrajectoryFilter.minimumNumberOfHits = 5
tobTecStepTrajectoryFilter.minPt = 0.85
# MAKING OF TRACK CANDIDATES
from RecoTracker.IterativeTracking.TobTecStep_cff import _tobTecStepTrackCandidatesCkf
tobTecStepTrackCandidates = _tobTecStepTrackCandidatesCkf.clone(clustersToSkip = 'hiTobTecStepClusters')
# TRACK FITTING
hiTobTecStepTracks = tobTecStepTracks.clone()
# Final selection
import RecoHI.HiTracking.hiMultiTrackSelector_cfi
hiTobTecStepSelector = RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiMultiTrackSelector.clone(
src = 'hiTobTecStepTracks',
useAnyMVA = False,
GBRForestLabel = 'HIMVASelectorIter13',
GBRForestVars = ['chi2perdofperlayer', 'nhits', 'nlayers', 'eta'],
trackSelectors = cms.VPSet(
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiLooseMTS.clone(
name = 'hiTobTecStepLoose',
applyAdaptedPVCuts = False,
useMVA = False,
), #end of pset
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiTightMTS.clone(
name = 'hiTobTecStepTight',
preFilterName = 'hiTobTecStepLoose',
applyAdaptedPVCuts = False,
useMVA = False,
minMVA = -0.2
),
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiHighpurityMTS.clone(
name = 'hiTobTecStep',
preFilterName = 'hiTobTecStepTight',
applyAdaptedPVCuts = False,
useMVA = False,
minMVA = -0.09
),
) #end of vpset
) #end of clone
import RecoTracker.FinalTrackSelectors.trackListMerger_cfi
hiTobTecStepQual = RecoTracker.FinalTrackSelectors.trackListMerger_cfi.trackListMerger.clone(
TrackProducers = ['hiTobTecStepTracks'],
hasSelector = [1],
selectedTrackQuals = ["hiTobTecStepSelector:hiTobTecStep"],
copyExtras = True,
makeReKeyedSeeds = cms.untracked.bool(False),
)
hiTobTecStepTask = cms.Task(hiTobTecStepClusters,
tobTecStepSeedLayersTripl,
hiTobTecStepTrackingRegionsTripl,
tobTecStepHitDoubletsTripl,
tobTecStepHitTripletsTripl,
tobTecStepSeedsTripl,
tobTecStepSeedLayersPair,
hiTobTecStepTrackingRegionsPair,
tobTecStepHitDoubletsPair,
tobTecStepSeedsPair,
tobTecStepSeeds,
tobTecStepTrackCandidates,
hiTobTecStepTracks,
hiTobTecStepSelector,
hiTobTecStepQual
)
hiTobTecStep = cms.Sequence(hiTobTecStepTask)
|
notebook/pandas_str_num_conversion.py | vhn0912/python-snippets | 174 | 12600048 | <filename>notebook/pandas_str_num_conversion.py
import pandas as pd
print(pd.__version__)
# 0.23.0
df = pd.DataFrame({'i': [0, 10, 200], 'f': [0, 0.9, 0.09],
's_i': ['0', '10', '200'], 's_f': ['0', '0.9', '0.09']})
print(df)
# i f s_i s_f
# 0 0 0.00 0 0
# 1 10 0.90 10 0.9
# 2 200 0.09 200 0.09
print(df.dtypes)
# i int64
# f float64
# s_i object
# s_f object
# dtype: object
print(df['i'].astype(str))
# 0 0
# 1 10
# 2 200
# Name: i, dtype: object
print(df['f'].astype(str))
# 0 0.0
# 1 0.9
# 2 0.09
# Name: f, dtype: object
print(df.astype(str))
# i f s_i s_f
# 0 0 0.0 0 0
# 1 10 0.9 10 0.9
# 2 200 0.09 200 0.09
print(df.astype(str).dtypes)
# i object
# f object
# s_i object
# s_f object
# dtype: object
print(df['i'].astype(float))
# 0 0.0
# 1 10.0
# 2 200.0
# Name: i, dtype: float64
print(df['f'].astype(int))
# 0 0
# 1 0
# 2 0
# Name: f, dtype: int64
print(df['s_i'].astype(int))
# 0 0
# 1 10
# 2 200
# Name: s_i, dtype: int64
print(df['s_i'].astype(float))
# 0 0.0
# 1 10.0
# 2 200.0
# Name: s_i, dtype: float64
print(df['s_f'].astype(float))
# 0 0.00
# 1 0.90
# 2 0.09
# Name: s_f, dtype: float64
# print(df['s_f'].astype(int))
# ValueError: invalid literal for int() with base 10: '0.1'
print(df['s_f'].astype(float).astype(int))
# 0 0
# 1 0
# 2 0
# Name: s_f, dtype: int64
df['i'] = df['i'].astype(str)
print(df)
# i f s_i s_f
# 0 0 0.00 0 0
# 1 10 0.90 10 0.9
# 2 200 0.09 200 0.09
df['f_s'] = df['f'].astype(str)
print(df)
# i f s_i s_f f_s
# 0 0 0.00 0 0 0.0
# 1 10 0.90 10 0.9 0.9
# 2 200 0.09 200 0.09 0.09
print(df.dtypes)
# i object
# f float64
# s_i object
# s_f object
# f_s object
# dtype: object
s_int = pd.Series([0xff, 0o77, 0b11])
print(s_int)
# 0 255
# 1 63
# 2 3
# dtype: int64
print(s_int.map(bin))
# 0 0b11111111
# 1 0b111111
# 2 0b11
# dtype: object
print(s_int.map(oct))
# 0 0o377
# 1 0o77
# 2 0o3
# dtype: object
print(s_int.map(hex))
# 0 0xff
# 1 0x3f
# 2 0x3
# dtype: object
print(s_int.map('{:b}'.format))
# 0 11111111
# 1 111111
# 2 11
# dtype: object
print(s_int.map('{:#b}'.format))
# 0 0b11111111
# 1 0b111111
# 2 0b11
# dtype: object
print(s_int.map('{:#010b}'.format))
# 0 0b11111111
# 1 0b00111111
# 2 0b00000011
# dtype: object
df_str = pd.DataFrame({'bin': ['0b01', '0b10', '0b11'],
'oct': ['0o07', '0o70', '0o77'],
'hex': ['0x0f', '0xf0', '0xff'],
'dec': ['1', '10', '11']})
print(df_str)
# bin oct hex dec
# 0 0b01 0o07 0x0f 1
# 1 0b10 0o70 0xf0 10
# 2 0b11 0o77 0xff 11
print(df_str.dtypes)
# bin object
# oct object
# hex object
# dec object
# dtype: object
# print(df_str['bin'].astype(int))
# ValueError: invalid literal for int() with base 10: '0b01'
print(df_str['bin'].map(lambda x: int(x, 2)))
# 0 1
# 1 2
# 2 3
# Name: bin, dtype: int64
print(df_str['oct'].map(lambda x: int(x, 8)))
# 0 7
# 1 56
# 2 63
# Name: oct, dtype: int64
print(df_str['hex'].map(lambda x: int(x, 16)))
# 0 15
# 1 240
# 2 255
# Name: hex, dtype: int64
print(df_str.applymap(lambda x: int(x, 0)))
# bin oct hex dec
# 0 1 7 15 1
# 1 2 56 240 10
# 2 3 63 255 11
print(df_str['dec'].map(lambda x: int(x, 2)))
# 0 1
# 1 2
# 2 3
# Name: dec, dtype: int64
s_str_dec = pd.Series(['01', '10', '11'])
print(s_str_dec)
# 0 01
# 1 10
# 2 11
# dtype: object
print(s_str_dec.astype(int))
# 0 1
# 1 10
# 2 11
# dtype: int64
# print(s_str_dec.map(lambda x: int(x, 0)))
# ValueError: invalid literal for int() with base 0: '01'
print(df_str['oct'].map(lambda x: int(x, 8)).map(hex))
# 0 0x7
# 1 0x38
# 2 0x3f
# Name: oct, dtype: object
s_str = pd.Series(['0', '10', 'xxx'])
print(s_str)
# 0 0
# 1 10
# 2 xxx
# dtype: object
print(s_str.str.zfill(8))
# 0 00000000
# 1 00000010
# 2 00000xxx
# dtype: object
print(s_str.str.rjust(8))
# 0 0
# 1 10
# 2 xxx
# dtype: object
print(s_str.str.rjust(8, '_'))
# 0 _______0
# 1 ______10
# 2 _____xxx
# dtype: object
print(s_str.str.center(8))
# 0 0
# 1 10
# 2 xxx
# dtype: object
print(s_str.str.center(8, '_'))
# 0 ___0____
# 1 ___10___
# 2 __xxx___
# dtype: object
print(s_str.str.ljust(8))
# 0 0
# 1 10
# 2 xxx
# dtype: object
print(s_str.str.ljust(8, '_'))
# 0 0_______
# 1 10______
# 2 xxx_____
# dtype: object
s_num = pd.Series([0, 10, 100])
# print(s_num.str.rjust(8, '_'))
# AttributeError: Can only use .str accessor with string values, which use np.object_ dtype in pandas
print(s_num.astype(str).str.rjust(8, '_'))
# 0 _______0
# 1 ______10
# 2 _____100
# dtype: object
df = pd.DataFrame({'i': [0, 10, 100],
'f': [0.1234, 1.234, 12.34],
'round': [0.4, 0.5, 0.6]})
print(df)
# i f round
# 0 0 0.1234 0.4
# 1 10 1.2340 0.5
# 2 100 12.3400 0.6
print(df.dtypes)
# i int64
# f float64
# round float64
# dtype: object
print(df['i'].map('{:08}'.format))
# 0 00000000
# 1 00000010
# 2 00000100
# Name: i, dtype: object
print(df['i'].map('{:_<8}'.format))
# 0 0_______
# 1 10______
# 2 100_____
# Name: i, dtype: object
print(df['i'].map('{:x}'.format))
# 0 0
# 1 a
# 2 64
# Name: i, dtype: object
print(df['i'].map('{:#010b}'.format))
# 0 0b00000000
# 1 0b00001010
# 2 0b01100100
# Name: i, dtype: object
print(df['f'].map('{:.2f}'.format))
# 0 0.12
# 1 1.23
# 2 12.34
# Name: f, dtype: object
print(df['f'].map('{:.2g}'.format))
# 0 0.12
# 1 1.2
# 2 12
# Name: f, dtype: object
print(df['f'].map('{:.2e}'.format))
# 0 1.23e-01
# 1 1.23e+00
# 2 1.23e+01
# Name: f, dtype: object
print(df['f'].map('{:.2%}'.format))
# 0 12.34%
# 1 123.40%
# 2 1234.00%
# Name: f, dtype: object
print(df['round'].map('{:.0f}'.format))
# 0 0
# 1 0
# 2 1
# Name: round, dtype: object
|
pnacl/driver/tests/driver_env_test.py | cohortfsllc/cohort-cocl2-sandbox | 2,151 | 12600053 | <filename>pnacl/driver/tests/driver_env_test.py
#!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests of the pnacl driver.
This tests the driver_env functionality.
"""
import unittest
import driver_env
class TestDriverEnv(unittest.TestCase):
def test_EnvSetMany(self):
myenv = driver_env.Environment()
self.assertFalse(myenv.has('foo_str'))
self.assertFalse(myenv.has('foo_unicode'))
self.assertFalse(myenv.has('foo_list'))
myenv.setmany(foo_str='string',
foo_unicode=u'unicode',
foo_list=['a', 'b', 'c'])
self.assertEqual('string', myenv.getone('foo_str'))
self.assertEqual(u'unicode', myenv.getone('foo_unicode'))
self.assertEqual(['a', 'b', 'c'], myenv.get('foo_list'))
if __name__ == '__main__':
unittest.main()
|
backend/django/test/core_utils/test_core_utils_annotate.py | RTIInternational/SMART | 185 | 12600061 | <filename>backend/django/test/core_utils/test_core_utils_annotate.py
from test.conftest import TEST_QUEUE_LEN
from test.util import assert_obj_exists
from core.models import AssignedData, Data, DataLabel, DataQueue, Label
from core.utils.utils_annotate import (
assign_datum,
get_assignments,
label_data,
move_skipped_to_admin_queue,
unassign_datum,
)
from core.utils.utils_queue import fill_queue
def test_assign_datum_project_queue_returns_datum(
db, test_queue, test_profile, test_redis
):
"""Assign a datum from a project-wide queue (null profile ID)."""
fill_queue(test_queue, orderby="random")
datum = assign_datum(test_profile, test_queue.project)
# Make sure we got the datum
assert isinstance(datum, Data)
def test_assign_datum_project_queue_correct_assignment(
db, test_queue, test_profile, test_redis
):
fill_queue(test_queue, orderby="random")
datum = assign_datum(test_profile, test_queue.project)
# Make sure the assignment is correct
assignment = AssignedData.objects.filter(data=datum)
assert len(assignment) == 1
assert assignment[0].profile == test_profile
assert assignment[0].queue == test_queue
assert assignment[0].assigned_timestamp is not None
def test_assign_datum_project_queue_pops_queues(
db, test_queue, test_profile, test_redis
):
fill_queue(test_queue, orderby="random")
datum = assign_datum(test_profile, test_queue.project)
# Make sure the datum was removed from queues but not set
assert test_redis.llen("queue:" + str(test_queue.pk)) == test_queue.length - 1
assert test_redis.scard("set:" + str(test_queue.pk)) == test_queue.length
# but not from the db queue
assert test_queue.data.count() == test_queue.length
assert datum in test_queue.data.all()
def test_assign_datum_profile_queue_returns_correct_datum(
db, test_profile_queue, test_profile, test_profile_queue2, test_profile2, test_redis
):
fill_queue(test_profile_queue, orderby="random")
fill_queue(test_profile_queue2, orderby="random")
datum = assign_datum(test_profile, test_profile_queue.project)
assert isinstance(datum, Data)
def test_assign_datum_profile_queue_correct_assignment(
db, test_profile_queue, test_profile, test_profile_queue2, test_profile2, test_redis
):
fill_queue(test_profile_queue, orderby="random")
fill_queue(test_profile_queue2, orderby="random")
datum = assign_datum(test_profile, test_profile_queue.project)
assignment = AssignedData.objects.filter(data=datum)
assert len(assignment) == 1
assert assignment[0].profile == test_profile
assert assignment[0].queue == test_profile_queue
assert assignment[0].assigned_timestamp is not None
def test_assign_datum_profile_queue_pops_queues(
db, test_profile_queue, test_profile, test_profile_queue2, test_profile2, test_redis
):
fill_queue(test_profile_queue, orderby="random")
fill_queue(test_profile_queue2, orderby="random")
datum = assign_datum(test_profile, test_profile_queue.project)
# Make sure the datum was removed from the correct queues but not sets
assert (
test_redis.llen("queue:" + str(test_profile_queue.pk))
== test_profile_queue.length - 1
)
assert (
test_redis.scard("set:" + str(test_profile_queue.pk))
== test_profile_queue.length
)
# ...but not the other queues
assert test_profile_queue.data.count() == test_profile_queue.length
assert datum in test_profile_queue.data.all()
assert (
test_redis.llen("queue:" + str(test_profile_queue2.pk))
== test_profile_queue2.length
)
assert (
test_redis.scard("set:" + str(test_profile_queue2.pk))
== test_profile_queue2.length
)
assert test_profile_queue2.data.count() == test_profile_queue2.length
def test_label_data(db, test_profile, test_queue, test_redis):
fill_queue(test_queue, orderby="random")
datum = assign_datum(test_profile, test_queue.project)
test_label = Label.objects.create(name="test", project=test_queue.project)
label_data(test_label, datum, test_profile, 3)
# Make sure the label was properly recorded
assert datum in test_profile.labeled_data.all()
assert_obj_exists(
DataLabel,
{
"data": datum,
"profile": test_profile,
"label": test_label,
"time_to_label": 3,
},
)
# Make sure the assignment was removed
assert not AssignedData.objects.filter(
profile=test_profile, data=datum, queue=test_queue
).exists()
def test_get_assignments_no_existing_assignment_one_assignment(
db, test_profile, test_project_data, test_queue, test_redis
):
fill_queue(test_queue, orderby="random")
assert AssignedData.objects.count() == 0
data = get_assignments(test_profile, test_project_data, 1)
assert len(data) == 1
assert isinstance(data[0], Data)
assert_obj_exists(AssignedData, {"data": data[0], "profile": test_profile})
def test_get_assignments_no_existing_assignment_half_max_queue_length(
db, test_profile, test_project_data, test_queue, test_redis
):
fill_queue(test_queue, orderby="random")
assert AssignedData.objects.count() == 0
data = get_assignments(test_profile, test_project_data, TEST_QUEUE_LEN // 2)
assert len(data) == TEST_QUEUE_LEN // 2
for datum in data:
assert isinstance(datum, Data)
assert_obj_exists(AssignedData, {"data": datum, "profile": test_profile})
def test_get_assignments_no_existing_assignment_max_queue_length(
db, test_profile, test_project_data, test_queue, test_redis
):
fill_queue(test_queue, orderby="random")
assert AssignedData.objects.count() == 0
data = get_assignments(test_profile, test_project_data, TEST_QUEUE_LEN)
assert len(data) == TEST_QUEUE_LEN
for datum in data:
assert isinstance(datum, Data)
assert_obj_exists(AssignedData, {"data": datum, "profile": test_profile})
def test_get_assignments_no_existing_assignment_over_max_queue_length(
db, test_profile, test_project_data, test_queue, test_redis
):
fill_queue(test_queue, orderby="random")
assert AssignedData.objects.count() == 0
data = get_assignments(test_profile, test_project_data, TEST_QUEUE_LEN + 10)
assert len(data) == TEST_QUEUE_LEN
for datum in data:
assert isinstance(datum, Data)
assert_obj_exists(AssignedData, {"data": datum, "profile": test_profile})
def test_get_assignments_one_existing_assignment(
db, test_profile, test_project_data, test_queue, test_redis
):
fill_queue(test_queue, orderby="random")
assigned_datum = assign_datum(test_profile, test_project_data)
data = get_assignments(test_profile, test_project_data, 1)
assert isinstance(data[0], Data)
# We should just get the datum that was already assigned
assert data[0] == assigned_datum
def test_get_assignments_multiple_existing_assignments(
db, test_profile, test_project_data, test_queue, test_redis
):
fill_queue(test_queue, orderby="random")
assigned_data = []
for i in range(5):
assigned_data.append(assign_datum(test_profile, test_project_data))
data = get_assignments(test_profile, test_project_data, 5)
assert len(data) == 5
assert len(data) == len(assigned_data)
for datum, assigned_datum in zip(data, assigned_data):
assert isinstance(datum, Data)
# We should just get the data that was already assigned
assert len(data) == len(assigned_data)
def test_unassign(db, test_profile, test_project_data, test_queue, test_redis):
fill_queue(test_queue, orderby="random")
assert test_redis.llen("queue:" + str(test_queue.pk)) == test_queue.length
assert test_redis.scard("set:" + str(test_queue.pk)) == test_queue.length
datum = get_assignments(test_profile, test_project_data, 1)[0]
assert test_redis.llen("queue:" + str(test_queue.pk)) == (test_queue.length - 1)
assert test_redis.scard("set:" + str(test_queue.pk)) == test_queue.length
assert AssignedData.objects.filter(data=datum, profile=test_profile).exists()
unassign_datum(datum, test_profile)
assert test_redis.llen("queue:" + str(test_queue.pk)) == test_queue.length
assert test_redis.scard("set:" + str(test_queue.pk)) == test_queue.length
assert not AssignedData.objects.filter(data=datum, profile=test_profile).exists()
# The unassigned datum should be the next to be assigned
reassigned_datum = get_assignments(test_profile, test_project_data, 1)[0]
assert reassigned_datum == datum
def test_unassign_after_fillqueue(
db, test_profile, test_project_data, test_queue, test_labels, test_redis
):
fill_queue(test_queue, "random")
assert test_redis.llen("queue:" + str(test_queue.pk)) == test_queue.length
assert test_redis.scard("set:" + str(test_queue.pk)) == test_queue.length
data = get_assignments(test_profile, test_project_data, 10)
assert test_redis.llen("queue:" + str(test_queue.pk)) == (test_queue.length - 10)
assert test_redis.scard("set:" + str(test_queue.pk)) == test_queue.length
test_label = test_labels[0]
for i in range(5):
label_data(test_label, data[i], test_profile, 3)
assert test_redis.llen("queue:" + str(test_queue.pk)) == (test_queue.length - 10)
assert test_redis.scard("set:" + str(test_queue.pk)) == (test_queue.length - 5)
fill_queue(test_queue, "random")
assert test_redis.llen("queue:" + str(test_queue.pk)) == test_queue.length - 5
assert test_redis.scard("set:" + str(test_queue.pk)) == test_queue.length
def test_skip_data(db, test_profile, test_queue, test_admin_queue, test_redis):
fill_queue(test_queue, orderby="random")
project = test_queue.project
datum = assign_datum(test_profile, project)
move_skipped_to_admin_queue(datum, test_profile, project)
# Make sure the assignment was removed
assert not AssignedData.objects.filter(
profile=test_profile, data=datum, queue=test_queue
).exists()
# make sure the item was re-assigned to the admin queue
assert DataQueue.objects.filter(data=datum, queue=test_admin_queue).exists()
# make sure not in normal queue
assert not DataQueue.objects.filter(data=datum, queue=test_queue).exists()
|
examples/gdb_api.py | tkmikan/pwntools | 8,966 | 12600067 | <filename>examples/gdb_api.py
#!/usr/bin/env python3
"""An example of using GDB Python API with Pwntools."""
from pwn import *
def check_write(gdb, exp_buf):
"""Check that write() was called with the expected arguments."""
fd = gdb.parse_and_eval('$rdi').cast(gdb.lookup_type('int'))
assert fd == 1, fd
buf_addr = gdb.parse_and_eval('$rsi').cast(gdb.lookup_type('long'))
count = gdb.parse_and_eval('$rdx').cast(gdb.lookup_type('long'))
buf = gdb.selected_inferior().read_memory(buf_addr, count).tobytes()
assert buf == exp_buf, buf
def demo_sync_breakpoint(cat, gdb, txt):
"""Demonstrate a synchronous breakpoint."""
# set the synchronous breakpoint on ``write``
gdb.Breakpoint('write', temporary=True)
# resume the program
gdb.continue_nowait()
# send the line
cat.sendline(txt)
# wait until we hit the breakpoint
gdb.wait()
# inspect program state
check_write(gdb, (txt + '\n').encode())
# resume the program
gdb.continue_nowait()
# expect to observe the line we just sent
cat.recvuntil(txt)
def demo_async_breakpoint(cat, gdb, txt):
"""Demonstrate asynchronous breakpoint."""
# set the asynchronous breakpoint on ``write``
class WriteBp(gdb.Breakpoint):
def __init__(self):
super().__init__('write')
self.count = 0
def stop(self):
# called in a separate thread
check_write(gdb, (txt + '\n').encode())
self.count += 1
bp = WriteBp()
# resume the program
gdb.continue_nowait()
# send the line and immediately expect to observe it
cat.sendline(txt)
cat.recvuntil(txt)
# check that we hit the breakpoint
assert bp.count == 1, bp.count
# interrupt the program
gdb.interrupt_and_wait()
# delete the breakpoint
bp.delete()
# resume the program
gdb.continue_nowait()
def main():
# start ``cat`` under GDB
with gdb.debug('cat', gdbscript='''
set logging on
set pagination off
''', api=True) as cat:
# the process is stopped
# set the synchronous breakpoint on ``read``
cat.gdb.Breakpoint('read', temporary=True)
# resume and wait until we hit it
cat.gdb.continue_and_wait()
# demonstrate a more interesting synchronous breakpoint
demo_sync_breakpoint(cat, cat.gdb, 'foo')
# terminate GDB
cat.gdb.quit()
# now start ``cat`` normally
with process('cat') as cat:
# attach GDB
_, cat_gdb = gdb.attach(cat, gdbscript='''
set logging on
set pagination off
''', api=True)
# the process is stopped
# demonstrate asynchronous breakpoint
demo_async_breakpoint(cat, cat_gdb, 'bar')
# terminate GDB
cat_gdb.quit()
if __name__ == '__main__':
main()
|
src/sage/tests/books/computational-mathematics-with-sagemath/sol/nonlinear_doctest.py | fchapoton/sage | 1,742 | 12600078 | <reponame>fchapoton/sage<gh_stars>1000+
## -*- encoding: utf-8 -*-
"""
This file (./sol/nonlinear_doctest.sage) was *autogenerated* from ./sol/nonlinear.tex,
with sagetex.sty version 2011/05/27 v2.3.1.
It contains the contents of all the sageexample environments from this file.
You should be able to doctest this file with:
sage -t ./sol/nonlinear_doctest.sage
It is always safe to delete this file; it is not used in typesetting your
document.
Sage example in ./sol/nonlinear.tex, line 17::
sage: def intervalgen(f, phi, s, t):
....: assert (f(s) * f(t) < 0), \
....: 'Wrong arguments: f(%s) * f(%s) >= 0)'%(s, t)
....: yield s
....: yield t
....: while 1:
....: u = phi(s, t)
....: yield u
....: fu = f(u)
....: if fu == 0:
....: return
....: if fu * f(s) < 0:
....: t = u
....: else:
....: s = u
Sage example in ./sol/nonlinear.tex, line 40::
sage: f(x) = 4 * x - 1
sage: a, b = 0, 1
sage: phi(s, t) = (s + t) / 2
sage: list(intervalgen(f, phi, a, b))
[0, 1, 1/2, 1/4]
Sage example in ./sol/nonlinear.tex, line 49::
sage: from types import GeneratorType, FunctionType
sage: def checklength(u, v, w, prec):
....: return abs(v - u) < 2 * prec
sage: def iterate(series,check=checklength,prec=10^-5,maxit=100):
....: assert isinstance(series, GeneratorType)
....: assert isinstance(check, FunctionType)
....: niter = 2
....: v, w = next(series), next(series)
....: while (niter <= maxit):
....: niter += 1
....: u, v, w = v, w, next(series)
....: if check(u, v, w, prec):
....: print('After {0} iterations: {1}'.format(niter, w))
....: return
....: print('Failed after {0} iterations'.format(maxit))
Sage example in ./sol/nonlinear.tex, line 76::
sage: f(x) = 4 * sin(x) - exp(x) / 2 + 1
sage: a, b = RR(-pi), RR(pi)
sage: def phi(s, t): return RR.random_element(s, t)
sage: random = intervalgen(f, phi, a, b)
sage: iterate(random, maxit=10000) # random
After 19 iterations: 2.15848379485564
Sage example in ./sol/nonlinear.tex, line 93::
sage: basering.<x> = PolynomialRing(SR, 'x')
sage: p = x^2 + x
sage: p.roots(multiplicities=False)
[-1, 0]
Sage example in ./sol/nonlinear.tex, line 101::
sage: from collections import deque
sage: basering = PolynomialRing(SR, 'x')
sage: q, method = None, None
sage: def quadraticgen(f, r, s):
....: global q, method
....: t = r - f(r) / f.derivative()(r)
....: method = 'newton'
....: yield t
....: pts = deque([(p, f(p)) for p in (r, s, t)], maxlen=3)
....: while True:
....: q = basering.lagrange_polynomial(pts)
....: roots = [r for r in q.roots(multiplicities=False) \
....: if CC(r).is_real()]
....: approx = None
....: for root in roots:
....: if (root - pts[2][0]) * (root - pts[1][0]) < 0:
....: approx = root
....: break
....: elif (root - pts[0][0]) * (root - pts[1][0]) < 0:
....: pts.pop()
....: approx = root
....: break
....: if approx:
....: method = 'quadratic'
....: else:
....: method = 'dichotomy'
....: approx = (pts[1][0] + pts[2][0]) / 2
....: pts.append((approx, f(approx)))
....: yield pts[2][0]
Sage example in ./sol/nonlinear.tex, line 141::
sage: basering = PolynomialRing(SR, 'x')
sage: a, b = pi/2, pi
sage: f(x) = 4 * sin(x) - exp(x) / 2 + 1
sage: generator = quadraticgen(f, a, b)
sage: next(generator)
1/2*pi - (e^(1/2*pi) - 10)*e^(-1/2*pi)
"""
|
ding/interaction/base/common.py | sailxjx/DI-engine | 464 | 12600102 | import random
import string
from abc import ABCMeta, abstractmethod
from typing import Optional, Callable, Mapping, Any, Dict
_LENGTH_OF_RANDOM_TOKEN = 64
def random_token(length: Optional[int] = None) -> str:
"""
Overview:
Generate random hex token
Arguments:
- length (:obj:`Optional[int]`): Length of the random token (`None` means `64`)
Returns:
- token (:obj:`str`): Generated random token
Example:
>>> random_token() # '4eAbd5218e3d0da5e7AAFcBF48Ea0Df2dadED1bdDF0B8724FdE1569AA78F24A7'
>>> random_token(24) # 'Cd1CdD98caAb8602ac6501aC'
"""
return ''.join([random.choice(string.hexdigits) for _ in range(length or _LENGTH_OF_RANDOM_TOKEN)])
class ControllableContext(metaclass=ABCMeta):
"""
Overview:
Basic context-supported class structure
Example:
- Common usage
>>> c = MyControllableContext() # One of the superclasses if ControllableContext
>>> c.start()
>>> try:
>>> pass # do anything you like
>>> finally:
>>> c.close()
- Use with keyword (the same as code above)
>>> c = MyControllableContext() # One of the superclasses if ControllableContext
>>> with c as cc: # cc is c, have the same id
>>> pass # do anything you like
"""
@abstractmethod
def start(self):
"""
Overview:
Start the context
"""
raise NotImplementedError # pragma: no cover
@abstractmethod
def close(self):
"""
Overview:
Close the context
"""
raise NotImplementedError # pragma: no cover
def __enter__(self):
"""
Overview:
Enter the context
Returns:
- self (:obj:`ControllableContext`): Context object itself
"""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Overview:
Exit the context
"""
self.close()
class ControllableService(ControllableContext, metaclass=ABCMeta):
"""
Overview:
Controllable service with context support, usually has concurrent feature.
Example:
- A common usage
>>> c = MyControllableService() # One of its superclasses is ControllableService
>>> c.start()
>>> try:
>>> pass # do anything you like
>>> finally:
>>> c.shutdown() # shutdown the service
>>> c.join() # wait until service is down
- Use with keyword (the same as code above)
>>> c = MyControllableService() # One of its superclasses is ControllableService
>>> with c as cc: # cc is c, have the same id
>>> pass # do anything you like
"""
@abstractmethod
def start(self):
"""
Overview:
Start the service
"""
raise NotImplementedError # pragma: no cover
@abstractmethod
def shutdown(self):
"""
Overview:
Shutdown the service (but service will not down immediately)
"""
raise NotImplementedError # pragma: no cover
@abstractmethod
def join(self):
"""
Overview:
Wait until the service is completely down
"""
raise NotImplementedError # pragma: no cover
def close(self):
"""
Overview:
Close the service, wait until the service is down.
"""
self.shutdown()
self.join()
def translate_dict_func(d: Mapping[str, Callable[..., Any]]) -> Callable[..., Dict[str, Any]]:
"""
Overview:
Transform dict with funcs to function generating dict.
Arguments:
- d (:obj:`Mapping[str, Callable[..., Any]]`): Dict with funcs
Returns:
- func (:obj:`Callable[..., Dict[str, Any]]`): Function generating dict
Example:
>>> f1 = lambda x, y: x + y
>>> f2 = lambda x, y: x - y
>>> f3 = lambda x, y: x * y
>>> fx = translate_dict_func({'a': f1, 'b': f2, 'c': f3})
>>> fx(2, 3) # {'a': 5, 'b': -1, 'c': 6}
>>> fx(5, 11) # ('a': 16, 'b': -6, 'c': 55}
"""
def _func(*args, **kwargs) -> Dict[str, Any]:
return {k: f(*args, **kwargs) for k, f in d.items()}
return _func
def default_func(return_value=None) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
"""
Overview:
Transform optional function (maybe `None`) to function with default value
Argument:
- return_value (:obj:): Return value of the default function
Returns:
- decorator (:obj:`Callable[[Callable[..., Any]], Callable[..., Any]]`): A decorator function \
that can decorator optional function to real function (must be not None)
Example:
>>> f1 = None
>>> f2 = lambda x, y: x + y
>>> ff1 = default_func()(f1)
>>> ft1 = default_func(0)(f1)
>>> ff2 = default_func()(f2)
>>> ff1(2, 3) # None
>>> ft1(2, 3) # 0
>>> ff2(2, 3) # 5
"""
def _decorator(func: Callable[..., Any]) -> Callable[..., Any]:
# noinspection PyUnusedLocal
def _func(*args, **kwargs):
return return_value
return func or _func
return _decorator
|
libsaas/services/twilio/numbers.py | MidtownFellowship/libsaas | 155 | 12600109 | <gh_stars>100-1000
from libsaas import http, parsers
from libsaas.services import base
from libsaas.services.twilio import resource
class AvailablePhoneNumbersBase(resource.TwilioResource):
path = '{0}'
def get_url(self):
path = self.path.format(self.object_id)
return '{0}/{1}'.format(self.parent.get_url(), path)
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class AvailablePhoneNumbersLocal(AvailablePhoneNumbersBase):
path = '{0}/Local'
@base.apimethod
def get(self, AreaCode=None, Contains=None, InRegion=None,
InPostalCode=None, NearLatLong=None, NearNumber=None, InLata=None,
InRateCenter=None, Distance=None):
"""
Fetch available local phone numbers for an account.
:var AreaCode: Find phone numbers in the specified area code.
:vartype AreaCode: str
:var Contains: A pattern to match phone numbers on.
Valid characters are `*` and [0-9a-zA-Z].
The `*` character will match any single digit.
:vartype Contains: str
:var InRegion: Limit results to a particular region (State/Province).
Given a phone number, search within the same Region as that number.
(US and Canada only)
:vartype InRegion: str
:var InPostalCode: Limit results to a particular postal code.
Given a phone number, search within the same postal code as
that number. (US and Canada only)
:vartype InPostalCode: str
:var NearLatLong: Given a latitude/longitude pair lat,long find
geographically close numbers within Distance miles.
(US and Canada only)
:vartype NearLatLong: str
:var NearNumber: Given a phone number, find a geographically close
number within Distance miles. Distance defaults to 25 miles.
(US and Canada only)
:vartype NearNumber: str
:var InLata: Limit results to a specific Local access and transport
area (LATA). Given a phone number, search within the same LATA
as that number.
(US and Canada only)
:vartype InLata: str
:var InRateCenter: Limit results to a specific rate center,
or given a phone number search within the same rate center as
that number. Requires InLata to be set as well.
(US and Canada only)
:vartype InRateCenter: str
:var InDistance: Specifies the search radius for a Near- query in miles.
If not specified this defaults to 25 miles.
(US and Canada only)
:vartype InDistance: int
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
class AvailablePhoneNumbersTollFree(AvailablePhoneNumbersBase):
path = '{0}/TollFree'
@base.apimethod
def get(self, AreaCode=None, Contains=None):
"""
Fetch available toll-free phone numbers for an account.
:var AreaCode: Find phone numbers in the specified area code.
:vartype AreaCode: str
:var Contains: A pattern to match phone numbers on.
Valid characters are `*` and [0-9a-zA-Z].
The `*` character will match any single digit.
:vartype Contains: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
class AvailablePhoneNumbers(AvailablePhoneNumbersBase):
path = 'AvailablePhoneNumbers'
def get(self, *args, **kwargs):
raise base.MethodNotSupported()
@base.resource(AvailablePhoneNumbersLocal)
def local(self, country_code):
"""
Return a list of local AvailablePhoneNumber resource representations
that match the specified filters, each representing a phone number
that is currently available for provisioning within this account.
"""
return AvailablePhoneNumbersLocal(self, country_code)
@base.resource(AvailablePhoneNumbersTollFree)
def toll_free(self, country_code):
"""
Return a list of toll-free AvailablePhoneNumber resource
representations that match the specified filters, each representing
a phone number that is currently available for provisioning within
this account.
"""
return AvailablePhoneNumbersTollFree(self, country_code)
class IncomingPhoneNumbersBase(resource.TwilioResource):
path = 'IncomingPhoneNumbers'
class IncomingPhoneNumber(IncomingPhoneNumbersBase):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class IncomingPhoneNumbersMixin(IncomingPhoneNumbersBase):
@base.apimethod
def get(self, PhoneNumber=None, FriendlyName=None,
Page=None, PageSize=None, AfterSid=None):
"""
Fetch incoming phone numbers list for an account.
:var PhoneNumber: Only show the incoming phone number resources
that match this pattern. You can specify partial numbers and
use `*` as a wildcard for any digit.
:vartype PhoneNumber: str
:var FriendlyName: Only show the incoming phone number resources
with friendly names that exactly match this name.
:vartype FriendlyName: str
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class IncomingPhoneNumbersLocal(IncomingPhoneNumbersMixin):
path = 'Local'
class IncomingPhoneNumbersTollFree(IncomingPhoneNumbersMixin):
path = 'TollFree'
class IncomingPhoneNumbers(IncomingPhoneNumbersMixin):
@base.resource(IncomingPhoneNumbersLocal)
def local(self):
return IncomingPhoneNumbersLocal(self)
@base.resource(IncomingPhoneNumbersTollFree)
def toll_free(self):
return IncomingPhoneNumbersTollFree(self)
class OutgoingCallerIdsBase(resource.TwilioResource):
path = 'OutgoingCallerIds'
class OutgoingCallerId(OutgoingCallerIdsBase):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class OutgoingCallerIds(OutgoingCallerIdsBase):
@base.apimethod
def get(self, PhoneNumber=None, FriendlyName=None,
Page=None, PageSize=None, AfterSid=None):
"""
Fetch outgoing caller ids for an account.
:var PhoneNumber: Only show the incoming phone number resources
that match this pattern. You can specify partial numbers and
use `*` as a wildcard for any digit.
:vartype PhoneNumber: str
:var FriendlyName: Only show the incoming phone number resources
with friendly names that exactly match this name.
:vartype FriendlyName: str
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
|
baselines/baseline_simple.py | Qin-Folks/graph-generation | 532 | 12600126 | <gh_stars>100-1000
from main import *
from scipy.linalg import toeplitz
import pyemd
import scipy.optimize as opt
def Graph_generator_baseline_train_rulebased(graphs,generator='BA'):
graph_nodes = [graphs[i].number_of_nodes() for i in range(len(graphs))]
graph_edges = [graphs[i].number_of_edges() for i in range(len(graphs))]
parameter = {}
for i in range(len(graph_nodes)):
nodes = graph_nodes[i]
edges = graph_edges[i]
# based on rule, calculate optimal parameter
if generator=='BA':
# BA optimal: nodes = n; edges = (n-m)*m
n = nodes
m = (n - np.sqrt(n**2-4*edges))/2
parameter_temp = [n,m,1]
if generator=='Gnp':
# Gnp optimal: nodes = n; edges = ((n-1)*n/2)*p
n = nodes
p = float(edges)/((n-1)*n/2)
parameter_temp = [n,p,1]
# update parameter list
if nodes not in parameter.keys():
parameter[nodes] = parameter_temp
else:
count = parameter[nodes][-1]
parameter[nodes] = [(parameter[nodes][i]*count+parameter_temp[i])/(count+1) for i in range(len(parameter[nodes]))]
parameter[nodes][-1] = count+1
# print(parameter)
return parameter
def Graph_generator_baseline(graph_train, pred_num=1000, generator='BA'):
graph_nodes = [graph_train[i].number_of_nodes() for i in range(len(graph_train))]
graph_edges = [graph_train[i].number_of_edges() for i in range(len(graph_train))]
repeat = pred_num//len(graph_train)
graph_pred = []
for i in range(len(graph_nodes)):
nodes = graph_nodes[i]
edges = graph_edges[i]
# based on rule, calculate optimal parameter
if generator=='BA':
# BA optimal: nodes = n; edges = (n-m)*m
n = nodes
m = int((n - np.sqrt(n**2-4*edges))/2)
for j in range(repeat):
graph_pred.append(nx.barabasi_albert_graph(n,m))
if generator=='Gnp':
# Gnp optimal: nodes = n; edges = ((n-1)*n/2)*p
n = nodes
p = float(edges)/((n-1)*n/2)
for j in range(repeat):
graph_pred.append(nx.fast_gnp_random_graph(n, p))
return graph_pred
def emd_distance(x, y, distance_scaling=1.0):
support_size = max(len(x), len(y))
d_mat = toeplitz(range(support_size)).astype(np.float)
distance_mat = d_mat / distance_scaling
# convert histogram values x and y to float, and make them equal len
x = x.astype(np.float)
y = y.astype(np.float)
if len(x) < len(y):
x = np.hstack((x, [0.0] * (support_size - len(x))))
elif len(y) < len(x):
y = np.hstack((y, [0.0] * (support_size - len(y))))
emd = pyemd.emd(x, y, distance_mat)
return emd
# def Loss(x,args):
# '''
#
# :param x: 1-D array, parameters to be optimized
# :param args: tuple (n, G, generator, metric).
# n: n for pred graph;
# G: real graph in networkx format;
# generator: 'BA', 'Gnp', 'Powerlaw';
# metric: 'degree', 'clustering'
# :return: Loss: emd distance
# '''
# # get argument
# generator = args[2]
# metric = args[3]
#
# # get real and pred graphs
# G_real = args[1]
# if generator=='BA':
# G_pred = nx.barabasi_albert_graph(args[0],int(np.rint(x)))
# if generator=='Gnp':
# G_pred = nx.fast_gnp_random_graph(args[0],x)
#
# # define metric
# if metric == 'degree':
# G_real_hist = np.array(nx.degree_histogram(G_real))
# G_real_hist = G_real_hist / np.sum(G_real_hist)
# G_pred_hist = np.array(nx.degree_histogram(G_pred))
# G_pred_hist = G_pred_hist/np.sum(G_pred_hist)
# if metric == 'clustering':
# G_real_hist, _ = np.histogram(
# np.array(list(nx.clustering(G_real).values())), bins=50, range=(0.0, 1.0), density=False)
# G_real_hist = G_real_hist / np.sum(G_real_hist)
# G_pred_hist, _ = np.histogram(
# np.array(list(nx.clustering(G_pred).values())), bins=50, range=(0.0, 1.0), density=False)
# G_pred_hist = G_pred_hist / np.sum(G_pred_hist)
#
# loss = emd_distance(G_real_hist,G_pred_hist)
# return loss
def Loss(x,n,G_real,generator,metric):
'''
:param x: 1-D array, parameters to be optimized
:param
n: n for pred graph;
G: real graph in networkx format;
generator: 'BA', 'Gnp', 'Powerlaw';
metric: 'degree', 'clustering'
:return: Loss: emd distance
'''
# get argument
# get real and pred graphs
if generator=='BA':
G_pred = nx.barabasi_albert_graph(n,int(np.rint(x)))
if generator=='Gnp':
G_pred = nx.fast_gnp_random_graph(n,x)
# define metric
if metric == 'degree':
G_real_hist = np.array(nx.degree_histogram(G_real))
G_real_hist = G_real_hist / np.sum(G_real_hist)
G_pred_hist = np.array(nx.degree_histogram(G_pred))
G_pred_hist = G_pred_hist/np.sum(G_pred_hist)
if metric == 'clustering':
G_real_hist, _ = np.histogram(
np.array(list(nx.clustering(G_real).values())), bins=50, range=(0.0, 1.0), density=False)
G_real_hist = G_real_hist / np.sum(G_real_hist)
G_pred_hist, _ = np.histogram(
np.array(list(nx.clustering(G_pred).values())), bins=50, range=(0.0, 1.0), density=False)
G_pred_hist = G_pred_hist / np.sum(G_pred_hist)
loss = emd_distance(G_real_hist,G_pred_hist)
return loss
def optimizer_brute(x_min, x_max, x_step, n, G_real, generator, metric):
loss_all = []
x_list = np.arange(x_min,x_max,x_step)
for x_test in x_list:
loss_all.append(Loss(x_test,n,G_real,generator,metric))
x_optim = x_list[np.argmin(np.array(loss_all))]
return x_optim
def Graph_generator_baseline_train_optimizationbased(graphs,generator='BA',metric='degree'):
graph_nodes = [graphs[i].number_of_nodes() for i in range(len(graphs))]
parameter = {}
for i in range(len(graph_nodes)):
print('graph ',i)
nodes = graph_nodes[i]
if generator=='BA':
n = nodes
m = optimizer_brute(1,10,1, nodes, graphs[i], generator, metric)
parameter_temp = [n,m,1]
elif generator=='Gnp':
n = nodes
p = optimizer_brute(1e-6,1,0.01, nodes, graphs[i], generator, metric)
## if use evolution
# result = opt.differential_evolution(Loss,bounds=[(0,1)],args=(nodes, graphs[i], generator, metric),maxiter=1000)
# p = result.x
parameter_temp = [n, p, 1]
# update parameter list
if nodes not in parameter.keys():
parameter[nodes] = parameter_temp
else:
count = parameter[nodes][2]
parameter[nodes] = [(parameter[nodes][i]*count+parameter_temp[i])/(count+1) for i in range(len(parameter[nodes]))]
parameter[nodes][2] = count+1
print(parameter)
return parameter
def Graph_generator_baseline_test(graph_nodes, parameter, generator='BA'):
graphs = []
for i in range(len(graph_nodes)):
nodes = graph_nodes[i]
if not nodes in parameter.keys():
nodes = min(parameter.keys(), key=lambda k: abs(k - nodes))
if generator=='BA':
n = int(parameter[nodes][0])
m = int(np.rint(parameter[nodes][1]))
print(n,m)
graph = nx.barabasi_albert_graph(n,m)
if generator=='Gnp':
n = int(parameter[nodes][0])
p = parameter[nodes][1]
print(n,p)
graph = nx.fast_gnp_random_graph(n,p)
graphs.append(graph)
return graphs
if __name__ == '__main__':
args = Args()
print('File name prefix', args.fname)
### load datasets
graphs = []
# synthetic graphs
if args.graph_type=='ladder':
graphs = []
for i in range(100, 201):
graphs.append(nx.ladder_graph(i))
args.max_prev_node = 10
if args.graph_type=='tree':
graphs = []
for i in range(2,5):
for j in range(3,5):
graphs.append(nx.balanced_tree(i,j))
args.max_prev_node = 256
if args.graph_type=='caveman':
graphs = []
for i in range(5,10):
for j in range(5,25):
graphs.append(nx.connected_caveman_graph(i, j))
args.max_prev_node = 50
if args.graph_type=='grid':
graphs = []
for i in range(10,20):
for j in range(10,20):
graphs.append(nx.grid_2d_graph(i,j))
args.max_prev_node = 40
if args.graph_type=='barabasi':
graphs = []
for i in range(100,200):
graphs.append(nx.barabasi_albert_graph(i,2))
args.max_prev_node = 130
# real graphs
if args.graph_type == 'enzymes':
graphs= Graph_load_batch(min_num_nodes=10, name='ENZYMES')
args.max_prev_node = 25
if args.graph_type == 'protein':
graphs = Graph_load_batch(min_num_nodes=20, name='PROTEINS_full')
args.max_prev_node = 80
if args.graph_type == 'DD':
graphs = Graph_load_batch(min_num_nodes=100, max_num_nodes=500, name='DD',node_attributes=False,graph_labels=True)
args.max_prev_node = 230
graph_nodes = [graphs[i].number_of_nodes() for i in range(len(graphs))]
graph_edges = [graphs[i].number_of_edges() for i in range(len(graphs))]
args.max_num_node = max(graph_nodes)
# show graphs statistics
print('total graph num: {}'.format(len(graphs)))
print('max number node: {}'.format(args.max_num_node))
print('max previous node: {}'.format(args.max_prev_node))
# start baseline generation method
generator = args.generator_baseline
metric = args.metric_baseline
print(args.fname_baseline + '.dat')
if metric=='general':
parameter = Graph_generator_baseline_train_rulebased(graphs,generator=generator)
else:
parameter = Graph_generator_baseline_train_optimizationbased(graphs,generator=generator,metric=metric)
graphs_generated = Graph_generator_baseline_test(graph_nodes, parameter,generator)
save_graph_list(graphs_generated,args.fname_baseline + '.dat') |
tests/test_config.py | upwork/python-upwork | 150 | 12600138 | <reponame>upwork/python-upwork
from upwork import config
def test_config_initialization():
cfg = config.Config(
{
"consumer_key": "keyxxxxxxxxxxxxxxxxxxxx",
"consumer_secret": "<KEY>",
"access_token": "<KEY>",
"access_token_secret": "<KEY>",
}
)
assert cfg.consumer_key == "keyxxxxxxxxxxxxxxxxxxxx"
assert cfg.consumer_secret == "<KEY>"
assert cfg.access_token == "<KEY>"
assert cfg.access_token_secret == "<KEY>"
|
parsing/sym2id.py | mfernezir/YellowFin | 455 | 12600151 | from __future__ import print_function
from utils import _build_vocab
import sys
if __name__ == '__main__':
if len(sys.argv) != 2:
print('usage: python sym2id.py train.gz')
sys.exit(0)
vocabs = _build_vocab(sys.argv[1])
for word, i in vocabs.iteritems():
print(word, i)
|
main_extract_TrainingSet_P.py | ryanxingql/MFQEv2.0 | 117 | 12600153 | """Extract training set.
Randomly select frame to patch.
Patches are stored in several npys.
Each npy contains several batches.
So there are n x batch_size patches in each npy.
Return: a few npy with shape (n x width_patch x width_height x 1), dtype=np.float32 \in [0,1]."""
import os, glob, gc, h5py
import numpy as np
import random, math
def y_import(video_path, height_frame, width_frame, nfs, startfrm, bar=True, opt_clear=True):
"""Import Y channel from a yuv video.
startfrm: start from 0
return: (nfs * height * width), dtype=uint8"""
fp = open(video_path, 'rb')
# target at startfrm
blk_size = int(height_frame * width_frame * 3 / 2)
fp.seek(blk_size * startfrm, 0)
d0 = height_frame // 2
d1 = width_frame // 2
Yt = np.zeros((height_frame, width_frame), dtype=np.uint8) # 0-255
for ite_frame in range(nfs):
for m in range(height_frame):
for n in range(width_frame):
Yt[m,n] = ord(fp.read(1))
for m in range(d0):
for n in range(d1):
fp.read(1)
for m in range(d0):
for n in range(d1):
fp.read(1)
if ite_frame == 0:
Y = Yt[np.newaxis, :, :]
else:
Y = np.vstack((Y, Yt[np.newaxis, :, :]))
if bar:
print("\r%4d | %4d" % (ite_frame + 1, nfs), end="", flush=True)
if opt_clear:
print("\r ", end="\r")
fp.close()
return Y
def func_PatchFrame(info_patch, num_patch, ite_npy, mode):
"""Patch and store four npys with a same index.
Shuffle the patches inside these four npys before saving."""
order_FirstFrame, order_FirstPatch, order_LastFrame, order_LastPatch, list_CmpVideo, \
VideoIndex_list_list, MidIndex_list_list, PreIndex_list_list, SubIndex_list_list, dir_save_stack = info_patch[:]
### Init stack
stack_pre = np.zeros((num_patch, height_patch, width_patch, 1), dtype=np.float32)
stack_cmp = np.zeros((num_patch, height_patch, width_patch, 1), dtype=np.float32)
stack_sub = np.zeros((num_patch, height_patch, width_patch, 1), dtype=np.float32)
stack_raw = np.zeros((num_patch, height_patch, width_patch, 1), dtype=np.float32)
### Extract patches
cal_patch_total = 0
num_frame_total = order_LastFrame - order_FirstFrame + 1
for ite_frame, order_frame in enumerate(range(order_FirstFrame, order_LastFrame + 1)):
print("\rframe %d | %d" % (ite_frame + 1, num_frame_total), end="")
cal_patch_frame = 0
### Extract basic information
index_video = VideoIndex_list_list[order_frame]
index_Mid = MidIndex_list_list[order_frame]
index_Pre = PreIndex_list_list[order_frame]
index_Sub = SubIndex_list_list[order_frame]
cmp_path = list_CmpVideo[index_video]
cmp_name = cmp_path.split("/")[-1].split(".")[0]
raw_name = cmp_name
raw_name = raw_name + ".yuv"
raw_path = os.path.join(dir_raw, raw_name)
dims_str = raw_name.split("_")[1]
width_frame = int(dims_str.split("x")[0])
height_frame = int(dims_str.split("x")[1])
### Cal step
step_height = int((height_frame - height_patch) / (num_patch_height - 1))
step_width = int((width_frame - width_patch) / (num_patch_width - 1))
### Load frames
Y_raw = np.squeeze(y_import(raw_path, height_frame, width_frame, 1, index_Mid, bar=False, opt_clear=False))
Y_cmp = np.squeeze(y_import(cmp_path, height_frame, width_frame, 1, index_Mid, bar=False, opt_clear=False))
Y_pre = np.squeeze(y_import(cmp_path, height_frame, width_frame, 1, index_Pre, bar=False, opt_clear=False))
Y_sub = np.squeeze(y_import(cmp_path, height_frame, width_frame, 1, index_Sub, bar=False, opt_clear=False))
### Patch
for ite_patch_height in range(num_patch_height):
start_height = ite_patch_height * step_height
for ite_patch_width in range(num_patch_width):
if (order_frame == order_FirstFrame) and (cal_patch_frame < order_FirstPatch):
cal_patch_frame += 1
continue
if (order_frame == order_LastFrame) and (cal_patch_frame > order_LastPatch):
cal_patch_frame += 1
continue
start_width = ite_patch_width * step_width
stack_pre[cal_patch_total, 0:height_patch, 0:width_patch, 0] = Y_pre[start_height:(start_height+height_patch), start_width:(start_width+width_patch)] / 255.0
stack_cmp[cal_patch_total, 0:height_patch, 0:width_patch, 0] = Y_cmp[start_height:(start_height+height_patch), start_width:(start_width+width_patch)] / 255.0
stack_sub[cal_patch_total, 0:height_patch, 0:width_patch, 0] = Y_sub[start_height:(start_height+height_patch), start_width:(start_width+width_patch)] / 255.0
stack_raw[cal_patch_total, 0:height_patch, 0:width_patch, 0] = Y_raw[start_height:(start_height+height_patch), start_width:(start_width+width_patch)] / 255.0
cal_patch_total += 1
cal_patch_frame += 1
### Shuffle and save npy
print("\nsaving 1/4...", end="")
random.seed(100)
random.shuffle(stack_pre)
save_path = os.path.join(dir_save_stack, "stack_" + mode + "_pre_" + str(ite_npy) + ".hdf5")
f = h5py.File(save_path, "w")
f.create_dataset('stack_pre', data=stack_pre)
f.close()
stack_pre = []
gc.collect()
print("\rsaving 2/4...", end="")
random.seed(100)
random.shuffle(stack_cmp)
save_path = os.path.join(dir_save_stack, "stack_" + mode + "_cmp_" + str(ite_npy) + ".hdf5")
f = h5py.File(save_path, "w")
f.create_dataset('stack_cmp', data=stack_cmp)
f.close()
stack_cmp = []
gc.collect()
print("\rsaving 3/4...", end="")
random.seed(100)
random.shuffle(stack_sub)
save_path = os.path.join(dir_save_stack, "stack_" + mode + "_sub_" + str(ite_npy) + ".hdf5")
f = h5py.File(save_path, "w")
f.create_dataset('stack_sub', data=stack_sub)
f.close()
stack_sub = []
gc.collect()
print("\rsaving 4/4...", end="")
random.seed(100)
random.shuffle(stack_raw)
save_path = os.path.join(dir_save_stack, "stack_" + mode + "_raw_" + str(ite_npy) + ".hdf5")
f = h5py.File(save_path, "w")
f.create_dataset('stack_raw', data=stack_raw)
f.close()
stack_raw = []
gc.collect()
print("\r ", end="\r") # clear bar
def main_extract_TrainingSet():
"""Extract training setself.
Select a non-PQF between each pair of PQFs.
Randomly select up to 20 non-PQFs each video."""
for QP in QP_list:
dir_cmp = dir_cmp_pre + str(QP)
dir_PQFLabel = dir_PQFLabel_pre + str(QP)
### List all cmp video
list_CmpVideo = glob.glob(os.path.join(dir_cmp, "*.yuv"))
num_CmpVideo = len(list_CmpVideo)
### Init dir_save_stack for this QP
dir_save_stack = dir_save_stack_pre + str(QP)
if not os.path.exists(dir_save_stack):
os.makedirs(dir_save_stack)
### List all randomly selected non-PQFs with their pre/sub PQFs and calculate the num of patches
VideoIndex_list_list = []
MidIndex_list_list = []
PreIndex_list_list = []
SubIndex_list_list = []
cal_frame = 0
for ite_CmpVideo in range(num_CmpVideo): # video by video
cmp_name = list_CmpVideo[ite_CmpVideo].split("/")[-1].split(".")[0]
# load PQF label
PQFLabel_path = os.path.join(dir_PQFLabel, "PQFLabel_" + cmp_name + PQFLabel_sub)
PQF_label = h5py.File(PQFLabel_path,'r')['PQF_label'][:]
# locate PQFs
PQFIndex_list = [i for i in range(len(PQF_label)) if PQF_label[i] == 1]
num_PQF = len(PQFIndex_list)
#
MidIndex_list = PQFIndex_list[1: (num_PQF - 1)]
PreIndex_list = PQFIndex_list[0: (num_PQF - 2)]
SubIndex_list = PQFIndex_list[2: num_PQF]
# randomly select maximum allowable pairs
random.seed(666)
random.shuffle(PreIndex_list)
random.seed(666)
random.shuffle(SubIndex_list)
random.seed(666)
random.shuffle(MidIndex_list)
num_pairs = len(PreIndex_list)
if num_pairs > max_NonPQF_OneVideo:
PreIndex_list = PreIndex_list[0: max_NonPQF_OneVideo]
SubIndex_list = SubIndex_list[0: max_NonPQF_OneVideo]
MidIndex_list = MidIndex_list[0: max_NonPQF_OneVideo]
# record
cal_frame += len(PreIndex_list)
VideoIndex_list_list += [ite_CmpVideo] * len(PreIndex_list) # video index for all selected non-PQFs
PreIndex_list_list += PreIndex_list
MidIndex_list_list += MidIndex_list
SubIndex_list_list += SubIndex_list
num_patch_available = cal_frame * num_patch_PerFrame
print("Available frames: %d - patches: %d" % (cal_frame, num_patch_available))
### Shuffle the numbering of all frames
random.seed(888)
random.shuffle(VideoIndex_list_list)
random.seed(888)
random.shuffle(MidIndex_list_list)
random.seed(888)
random.shuffle(PreIndex_list_list)
random.seed(888)
random.shuffle(SubIndex_list_list)
### Cut down the num of frames
max_patch_total = int(num_patch_available / batch_size) * batch_size
max_frame_total = math.ceil(max_patch_total / num_patch_PerFrame) # may need one more frame to patch
VideoIndex_list_list = VideoIndex_list_list[0: max_frame_total]
MidIndex_list_list = MidIndex_list_list[0: max_frame_total]
PreIndex_list_list = PreIndex_list_list[0: max_frame_total]
SubIndex_list_list = SubIndex_list_list[0: max_frame_total]
### Cal num of batch for each npy, including training and validation
num_patch_val = int(int((1 - ratio_training) * max_patch_total) / batch_size) * batch_size
num_patch_tra = max_patch_total - num_patch_val # we can make sure that it is a multiple of batch size
num_batch_tra = int(num_patch_tra / batch_size)
num_batch_val = int(num_patch_val / batch_size)
num_npy_tra = int(num_batch_tra / max_batch_PerNpy)
num_batch_PerNpy_list_tra = [max_batch_PerNpy] * num_npy_tra
if (num_batch_tra % max_batch_PerNpy) > 0:
num_batch_PerNpy_list_tra.append(num_batch_tra - max_batch_PerNpy * num_npy_tra)
num_npy_val = int(num_batch_val / max_batch_PerNpy)
num_batch_PerNpy_list_val = [max_batch_PerNpy] * num_npy_val
if (num_batch_val % max_batch_PerNpy) > 0:
num_batch_PerNpy_list_val.append(num_batch_val - max_batch_PerNpy * num_npy_val)
### Patch and stack
# some frames may be partly patched.
for ite_npy_tra in range(len(num_batch_PerNpy_list_tra)):
print("stacking tra npy %d / %d..." % (ite_npy_tra + 1, len(num_batch_PerNpy_list_tra)))
# Cal the position of the first patch and the last patch of this npy
first_patch_cal = sum(num_batch_PerNpy_list_tra[0: ite_npy_tra]) * batch_size + 1
order_FirstFrame = math.ceil(first_patch_cal / num_patch_PerFrame) - 1
order_FirstPatch = first_patch_cal - order_FirstFrame * num_patch_PerFrame - 1
last_patch_cal = sum(num_batch_PerNpy_list_tra[0: ite_npy_tra + 1]) * batch_size
order_LastFrame = math.ceil(last_patch_cal / num_patch_PerFrame) - 1
order_LastPatch = last_patch_cal - order_LastFrame * num_patch_PerFrame - 1
# patch
num_patch = num_batch_PerNpy_list_tra[ite_npy_tra] * batch_size
info_patch = (order_FirstFrame, order_FirstPatch, order_LastFrame, order_LastPatch, list_CmpVideo, \
VideoIndex_list_list, MidIndex_list_list, PreIndex_list_list, SubIndex_list_list, dir_save_stack)
func_PatchFrame(info_patch, num_patch=num_patch, ite_npy=ite_npy_tra, mode="tra")
for ite_npy_val in range(len(num_batch_PerNpy_list_val)):
print("stacking val npy %d / %d..." % (ite_npy_val + 1, len(num_batch_PerNpy_list_val)))
# Cal the position of the first patch and the last patch of this npy
first_patch_cal = (sum(num_batch_PerNpy_list_tra) + sum(num_batch_PerNpy_list_val[0: ite_npy_val])) * batch_size + 1
order_FirstFrame = math.ceil(first_patch_cal / num_patch_PerFrame) - 1
order_FirstPatch = first_patch_cal - order_FirstFrame * num_patch_PerFrame - 1
last_patch_cal = (sum(num_batch_PerNpy_list_tra) + sum(num_batch_PerNpy_list_val[0: ite_npy_val + 1])) * batch_size
order_LastFrame = math.ceil(last_patch_cal / num_patch_PerFrame) - 1
order_LastPatch = last_patch_cal - order_LastFrame * num_patch_PerFrame - 1
# patch
num_patch = num_batch_PerNpy_list_val[ite_npy_val] * batch_size
info_patch = (order_FirstFrame, order_FirstPatch, order_LastFrame, order_LastPatch, list_CmpVideo, \
VideoIndex_list_list, MidIndex_list_list, PreIndex_list_list, SubIndex_list_list, dir_save_stack)
func_PatchFrame(info_patch, num_patch=num_patch, ite_npy=ite_npy_val, mode="val")
if __name__ == '__main__':
QP_list = [32,42]
### Settings
num_patch_width = 26
num_patch_height = 16
height_patch = 64
width_patch = 64
num_patch_PerFrame = num_patch_width * num_patch_height
dir_database = "/home/x/SCI_1/Database/"
dir_raw = os.path.join(dir_database, "train_108/raw")
dir_cmp_pre = os.path.join(dir_database, "train_108/LDP_HM16.5/QP")
dir_PQFLabel_pre = "/home/x/SCI_1/MFQEv2.0/Database/PQF_label/ground_truth/train_108/QP"
dir_save_stack_pre = "/home/x/SCI_1/MFQEv2.0/Database/PQF_enhancement/QP"
PQFLabel_sub = "_MaxNfs_300.hdf5"
batch_size = 64
max_batch_PerNpy = 14500
ratio_training = 1.0 # we select a small part of test set for validation
max_NonPQF_OneVideo = 20
main_extract_TrainingSet()
|
Chapter18/SQLite/insert_data.py | add54/ADMIN_SYS_PYTHON | 116 | 12600158 | import sqlite3
con_obj = sqlite3.connect("test.db")
with con_obj:
cur_obj = con_obj.cursor()
cur_obj.execute("INSERT INTO books VALUES ('Pride and Prejudice', '<NAME>')")
cur_obj.execute("INSERT INTO books VALUES ('<NAME>', '<NAME>')")
cur_obj.execute("INSERT INTO books VALUES ('The Lord of the Rings', '<NAME>')")
cur_obj.execute("INSERT INTO books VALUES ('Murder on the Orient Express', '<NAME>')")
cur_obj.execute("INSERT INTO books VALUES ('A Study in Scarlet', '<NAME>')")
con_obj.commit()
print("Data inserted Successfully !!")
|
lib/model/pcl/pcl.py | BarneyQiao/pcl.pytorch | 233 | 12600240 | from __future__ import absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils.boxes as box_utils
import utils.blob as blob_utils
import utils.net as net_utils
from core.config import cfg
import numpy as np
from sklearn.cluster import KMeans
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def PCL(boxes, cls_prob, im_labels, cls_prob_new):
cls_prob = cls_prob.data.cpu().numpy()
cls_prob_new = cls_prob_new.data.cpu().numpy()
if cls_prob.shape[1] != im_labels.shape[1]:
cls_prob = cls_prob[:, 1:]
eps = 1e-9
cls_prob[cls_prob < eps] = eps
cls_prob[cls_prob > 1 - eps] = 1 - eps
cls_prob_new[cls_prob_new < eps] = eps
cls_prob_new[cls_prob_new > 1 - eps] = 1 - eps
proposals = _get_graph_centers(boxes.copy(), cls_prob.copy(),
im_labels.copy())
labels, cls_loss_weights, gt_assignment, bbox_targets, bbox_inside_weights, bbox_outside_weights \
= get_proposal_clusters(boxes.copy(), proposals, im_labels.copy())
return {'labels' : labels.reshape(1, -1).astype(np.int64).copy(),
'cls_loss_weights' : cls_loss_weights.reshape(1, -1).astype(np.float32).copy(),
'gt_assignment' : gt_assignment.reshape(1, -1).astype(np.float32).copy(),
'bbox_targets' : bbox_targets.astype(np.float32).copy(),
'bbox_inside_weights' : bbox_inside_weights.astype(np.float32).copy(),
'bbox_outside_weights' : bbox_outside_weights.astype(np.float32).copy()}
def OICR(boxes, cls_prob, im_labels, cls_prob_new):
cls_prob = cls_prob.data.cpu().numpy()
cls_prob_new = cls_prob_new.data.cpu().numpy()
if cls_prob.shape[1] != im_labels.shape[1]:
cls_prob = cls_prob[:, 1:]
eps = 1e-9
cls_prob[cls_prob < eps] = eps
cls_prob[cls_prob > 1 - eps] = 1 - eps
proposals = _get_highest_score_proposals(boxes, cls_prob, im_labels)
labels, cls_loss_weights, gt_assignment, bbox_targets, bbox_inside_weights, bbox_outside_weights \
= get_proposal_clusters(boxes.copy(), proposals, im_labels.copy())
return {'labels' : labels.reshape(1, -1).astype(np.int64).copy(),
'cls_loss_weights' : cls_loss_weights.reshape(1, -1).astype(np.float32).copy(),
'gt_assignment' : gt_assignment.reshape(1, -1).astype(np.float32).copy(),
'bbox_targets' : bbox_targets.astype(np.float32).copy(),
'bbox_inside_weights' : bbox_inside_weights.astype(np.float32).copy(),
'bbox_outside_weights' : bbox_outside_weights.astype(np.float32).copy()}
def _get_highest_score_proposals(boxes, cls_prob, im_labels):
"""Get proposals with highest score."""
num_images, num_classes = im_labels.shape
assert num_images == 1, 'batch size shoud be equal to 1'
im_labels_tmp = im_labels[0, :]
gt_boxes = np.zeros((0, 4), dtype=np.float32)
gt_classes = np.zeros((0, 1), dtype=np.int32)
gt_scores = np.zeros((0, 1), dtype=np.float32)
for i in xrange(num_classes):
if im_labels_tmp[i] == 1:
cls_prob_tmp = cls_prob[:, i].copy()
max_index = np.argmax(cls_prob_tmp)
gt_boxes = np.vstack((gt_boxes, boxes[max_index, :].reshape(1, -1)))
gt_classes = np.vstack((gt_classes, (i + 1) * np.ones((1, 1), dtype=np.int32)))
gt_scores = np.vstack((gt_scores,
cls_prob_tmp[max_index] * np.ones((1, 1), dtype=np.float32)))
cls_prob[max_index, :] = 0
proposals = {'gt_boxes' : gt_boxes,
'gt_classes': gt_classes,
'gt_scores': gt_scores}
return proposals
def _get_top_ranking_propoals(probs):
"""Get top ranking proposals by k-means"""
kmeans = KMeans(n_clusters=cfg.TRAIN.NUM_KMEANS_CLUSTER,
random_state=cfg.RNG_SEED).fit(probs)
high_score_label = np.argmax(kmeans.cluster_centers_)
index = np.where(kmeans.labels_ == high_score_label)[0]
if len(index) == 0:
index = np.array([np.argmax(probs)])
return index
def _build_graph(boxes, iou_threshold):
"""Build graph based on box IoU"""
overlaps = box_utils.bbox_overlaps(
boxes.astype(dtype=np.float32, copy=False),
boxes.astype(dtype=np.float32, copy=False))
return (overlaps > iou_threshold).astype(np.float32)
def _get_graph_centers(boxes, cls_prob, im_labels):
"""Get graph centers."""
num_images, num_classes = im_labels.shape
assert num_images == 1, 'batch size shoud be equal to 1'
im_labels_tmp = im_labels[0, :].copy()
gt_boxes = np.zeros((0, 4), dtype=np.float32)
gt_classes = np.zeros((0, 1), dtype=np.int32)
gt_scores = np.zeros((0, 1), dtype=np.float32)
for i in xrange(num_classes):
if im_labels_tmp[i] == 1:
cls_prob_tmp = cls_prob[:, i].copy()
idxs = np.where(cls_prob_tmp >= 0)[0]
idxs_tmp = _get_top_ranking_propoals(cls_prob_tmp[idxs].reshape(-1, 1))
idxs = idxs[idxs_tmp]
boxes_tmp = boxes[idxs, :].copy()
cls_prob_tmp = cls_prob_tmp[idxs]
graph = _build_graph(boxes_tmp, cfg.TRAIN.GRAPH_IOU_THRESHOLD)
keep_idxs = []
gt_scores_tmp = []
count = cls_prob_tmp.size
while True:
order = np.sum(graph, axis=1).argsort()[::-1]
tmp = order[0]
keep_idxs.append(tmp)
inds = np.where(graph[tmp, :] > 0)[0]
gt_scores_tmp.append(np.max(cls_prob_tmp[inds]))
graph[:, inds] = 0
graph[inds, :] = 0
count = count - len(inds)
if count <= 5:
break
gt_boxes_tmp = boxes_tmp[keep_idxs, :].copy()
gt_scores_tmp = np.array(gt_scores_tmp).copy()
keep_idxs_new = np.argsort(gt_scores_tmp)\
[-1:(-1 - min(len(gt_scores_tmp), cfg.TRAIN.MAX_PC_NUM)):-1]
gt_boxes = np.vstack((gt_boxes, gt_boxes_tmp[keep_idxs_new, :]))
gt_scores = np.vstack((gt_scores,
gt_scores_tmp[keep_idxs_new].reshape(-1, 1)))
gt_classes = np.vstack((gt_classes,
(i + 1) * np.ones((len(keep_idxs_new), 1), dtype=np.int32)))
# If a proposal is chosen as a cluster center,
# we simply delete a proposal from the candidata proposal pool,
# because we found that the results of different strategies are similar and this strategy is more efficient
cls_prob = np.delete(cls_prob.copy(), idxs[keep_idxs][keep_idxs_new], axis=0)
boxes = np.delete(boxes.copy(), idxs[keep_idxs][keep_idxs_new], axis=0)
proposals = {'gt_boxes' : gt_boxes,
'gt_classes': gt_classes,
'gt_scores': gt_scores}
return proposals
def _compute_targets(ex_rois, gt_rois, labels):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 4
targets = box_utils.bbox_transform_inv(ex_rois, gt_rois,
cfg.MODEL.BBOX_REG_WEIGHTS)
return np.hstack((labels[:, np.newaxis], targets)).astype(
np.float32, copy=False)
def _expand_bbox_targets(bbox_target_data):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
num_bbox_reg_classes = cfg.MODEL.NUM_CLASSES + 1
clss = bbox_target_data[:, 0]
bbox_targets = blob_utils.zeros((clss.size, 4 * num_bbox_reg_classes))
bbox_inside_weights = blob_utils.zeros(bbox_targets.shape)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = int(clss[ind])
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = (1.0, 1.0, 1.0, 1.0)
return bbox_targets, bbox_inside_weights
def get_proposal_clusters(all_rois, proposals, im_labels):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
num_images, num_classes = im_labels.shape
assert num_images == 1, 'batch size shoud be equal to 1'
# overlaps: (rois x gt_boxes)
gt_boxes = proposals['gt_boxes']
gt_labels = proposals['gt_classes']
gt_scores = proposals['gt_scores']
overlaps = box_utils.bbox_overlaps(
all_rois.astype(dtype=np.float32, copy=False),
gt_boxes.astype(dtype=np.float32, copy=False))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_labels[gt_assignment, 0]
cls_loss_weights = gt_scores[gt_assignment, 0]
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those with < FG_THRESH overlap
bg_inds = np.where(max_overlaps < cfg.TRAIN.FG_THRESH)[0]
ig_inds = np.where(max_overlaps < cfg.TRAIN.BG_THRESH)[0]
cls_loss_weights[ig_inds] = 0.0
labels[bg_inds] = 0
if cfg.MODEL.WITH_FRCNN:
bbox_targets = _compute_targets(all_rois, gt_boxes[gt_assignment, :],
labels)
bbox_targets, bbox_inside_weights = _expand_bbox_targets(bbox_targets)
bbox_outside_weights = np.array(
bbox_inside_weights > 0, dtype=bbox_inside_weights.dtype) \
* cls_loss_weights.reshape(-1, 1)
else:
bbox_targets, bbox_inside_weights, bbox_outside_weights = np.array([0]), np.array([0]), np.array([0])
gt_assignment[bg_inds] = -1
return labels, cls_loss_weights, gt_assignment, bbox_targets, bbox_inside_weights, bbox_outside_weights
class PCLLosses(nn.Module):
def forward(ctx, pcl_probs, labels, cls_loss_weights, gt_assignments):
cls_loss = 0.0
weight = cls_loss_weights.view(-1).float()
labels = labels.view(-1)
gt_assignments = gt_assignments.view(-1)
for gt_assignment in gt_assignments.unique():
inds = torch.nonzero(gt_assignment == gt_assignments,
as_tuple=False).view(-1)
if gt_assignment == -1:
assert labels[inds].sum() == 0
cls_loss -= (torch.log(pcl_probs[inds, 0].clamp(1e-9, 10000))
* weight[inds]).sum()
else:
assert labels[inds].unique().size(0) == 1
label_cur = labels[inds[0]]
cls_loss -= torch.log(
pcl_probs[inds, label_cur].clamp(1e-9, 10000).mean()
) * weight[inds].sum()
return cls_loss / max(float(pcl_probs.size(0)), 1.)
class OICRLosses(nn.Module):
def __init__(self):
super(OICRLosses, self).__init__()
def forward(self, prob, labels, cls_loss_weights, gt_assignments, eps = 1e-6):
loss = torch.log(prob + eps)[range(prob.size(0)), labels]
loss *= -cls_loss_weights
ret = loss.mean()
return ret
|
Stereo_Online_Adaptation.py | wtyuan96/Real-time-self-adaptive-deep-stereo | 385 | 12600252 | import tensorflow as tf
import numpy as np
import argparse
import Nets
import os
import sys
import time
import cv2
import json
import datetime
import shutil
from matplotlib import pyplot as plt
from Data_utils import data_reader,weights_utils,preprocessing
from Losses import loss_factory
from Sampler import sampler_factory
#static params
MAX_DISP=256
PIXEL_TH = 3
def scale_tensor(tensor,scale):
return preprocessing.rescale_image(tensor,[tf.shape(tensor)[1]//scale,tf.shape(tensor)[2]//scale])
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(x) / np.sum(np.exp(x), axis=0)
def main(args):
#load json file config
with open(args.blockConfig) as json_data:
train_config = json.load(json_data)
#read input data
with tf.variable_scope('input_reader'):
data_set = data_reader.dataset(
args.list,
batch_size = 1,
crop_shape=args.imageShape,
num_epochs=1,
augment=False,
is_training=False,
shuffle=False
)
left_img_batch, right_img_batch, gt_image_batch = data_set.get_batch()
inputs={
'left':left_img_batch,
'right':right_img_batch,
'target':gt_image_batch
}
#build inference network
with tf.variable_scope('model'):
net_args = {}
net_args['left_img'] = left_img_batch
net_args['right_img'] = right_img_batch
net_args['split_layers'] = [None]
net_args['sequence'] = True
net_args['train_portion'] = 'BEGIN'
net_args['bulkhead'] = True if args.mode=='MAD' else False
stereo_net = Nets.get_stereo_net(args.modelName, net_args)
print('Stereo Prediction Model:\n', stereo_net)
predictions = stereo_net.get_disparities()
full_res_disp = predictions[-1]
#build real full resolution loss
with tf.variable_scope('full_res_loss'):
# reconstruction loss between warped right image and original left image
full_reconstruction_loss = loss_factory.get_reprojection_loss('mean_SSIM_l1',reduced=True)(predictions,inputs)
#build validation ops
with tf.variable_scope('validation_error'):
# compute error against gt
abs_err = tf.abs(full_res_disp - gt_image_batch)
valid_map = tf.where(tf.equal(gt_image_batch, 0), tf.zeros_like(gt_image_batch, dtype=tf.float32), tf.ones_like(gt_image_batch, dtype=tf.float32))
filtered_error = abs_err * valid_map
abs_err = tf.reduce_sum(filtered_error) / tf.reduce_sum(valid_map)
bad_pixel_abs = tf.where(tf.greater(filtered_error, PIXEL_TH), tf.ones_like(filtered_error, dtype=tf.float32), tf.zeros_like(filtered_error, dtype=tf.float32))
bad_pixel_perc = tf.reduce_sum(bad_pixel_abs) / tf.reduce_sum(valid_map)
#build train ops
disparity_trainer = tf.train.MomentumOptimizer(args.lr,0.9)
train_ops = []
if args.mode == 'MAD':
#build train ops for separate portion of the network
predictions = predictions[:-1] #remove full res disp
inputs_modules = {
'left':scale_tensor(left_img_batch,args.reprojectionScale),
'right':scale_tensor(right_img_batch,args.reprojectionScale),
'target':scale_tensor(gt_image_batch,args.reprojectionScale)/args.reprojectionScale
}
assert(len(predictions)==len(train_config))
for counter,p in enumerate(predictions):
print('Build train ops for disparity {}'.format(counter))
#rescale predictions to proper resolution
multiplier = tf.cast(tf.shape(left_img_batch)[1]//tf.shape(p)[1],tf.float32)
p = preprocessing.resize_to_prediction(p,inputs_modules['left'])*multiplier
#compute reprojection error
with tf.variable_scope('reprojection_'+str(counter)):
reconstruction_loss = loss_factory.get_reprojection_loss('mean_SSIM_l1',reduced=True)([p],inputs_modules)
#build train op
layer_to_train = train_config[counter]
print('Going to train on {}'.format(layer_to_train))
var_accumulator=[]
for name in layer_to_train:
var_accumulator+=stereo_net.get_variables(name)
print('Number of variable to train: {}'.format(len(var_accumulator)))
#add new training op
train_ops.append(disparity_trainer.minimize(reconstruction_loss,var_list=var_accumulator))
print('Done')
print('='*50)
#create Sampler to fetch portions to train
sampler = sampler_factory.get_sampler(args.sampleMode,args.numBlocks,args.fixedID)
elif args.mode=='FULL':
#build single train op for the full network
train_ops.append(disparity_trainer.minimize(full_reconstruction_loss))
if args.summary:
#add summaries
tf.summary.scalar('EPE',abs_err)
tf.summary.scalar('bad3',bad_pixel_perc)
tf.summary.image('full_res_disp',preprocessing.colorize_img(full_res_disp,cmap='jet'),max_outputs=1)
tf.summary.image('gt_disp',preprocessing.colorize_img(gt_image_batch,cmap='jet'),max_outputs=1)
#create summary logger
summary_op = tf.summary.merge_all()
logger = tf.summary.FileWriter(args.output)
#start session
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
#init stuff
sess.run([tf.global_variables_initializer(),tf.local_variables_initializer()])
#restore disparity inference weights
var_to_restore = weights_utils.get_var_to_restore_list(args.weights, [])
assert(len(var_to_restore)>0)
restorer = tf.train.Saver(var_list=var_to_restore)
restorer.restore(sess,args.weights)
print('Disparity Net Restored?: {}, number of restored variables: {}'.format(True,len(var_to_restore)))
num_actions=len(train_ops)
if args.mode=='FULL':
selected_train_ops = train_ops
else:
selected_train_ops = [tf.no_op()]
epe_accumulator = []
bad3_accumulator = []
time_accumulator = []
exec_time = 0
fetch_counter=[0]*num_actions
sample_distribution=np.zeros(shape=[num_actions])
temp_score = np.zeros(shape=[num_actions])
loss_t_2 = 0
loss_t_1 = 0
expected_loss = 0
last_trained_blocks = []
reset_counter=0
step=0
max_steps=data_set.get_max_steps()
try:
start_time = time.time()
while True:
#fetch new network portion to train
if step%args.sampleFrequency==0 and args.mode=='MAD':
#Sample
distribution = softmax(sample_distribution)
blocks_to_train = sampler.sample(distribution)
selected_train_ops = [train_ops[i] for i in blocks_to_train]
#accumulate sampling statistics
for l in blocks_to_train:
fetch_counter[l]+=1
#build list of tensorflow operations that needs to be executed
#errors and full resolution loss
tf_fetches = [abs_err,bad_pixel_perc,full_reconstruction_loss]
if args.summary and step%100==0:
#summaries
tf_fetches = tf_fetches + [summary_op]
#update ops
tf_fetches = tf_fetches+selected_train_ops
if args.logDispStep!=-1 and step%args.logDispStep==0:
#prediction for serialization to disk
tf_fetches=tf_fetches + [full_res_disp]
#run network
fetches = sess.run(tf_fetches)
new_loss = fetches[2]
if args.mode == 'MAD':
#update sampling probabilities
if step==0:
loss_t_2 = new_loss
loss_t_1 = new_loss
expected_loss = 2*loss_t_1-loss_t_2
gain_loss=expected_loss-new_loss
sample_distribution = 0.99*sample_distribution
for i in last_trained_blocks:
sample_distribution[i] += 0.01*gain_loss
last_trained_blocks=blocks_to_train
loss_t_2 = loss_t_1
loss_t_1 = new_loss
#accumulate performance metrics
epe_accumulator.append(fetches[0])
bad3_accumulator.append(fetches[1])
if step%100==0:
#log on terminal
fbTime = (time.time()-start_time)
exec_time += fbTime
fbTime = fbTime/100
if args.summary:
logger.add_summary(fetches[3],global_step=step)
missing_time=(max_steps-step)*fbTime
print('Step:{:4d}\tbad3:{:.2f}\tEPE:{:.2f}\tSSIM:{:.2f}\tf/b time:{:3f}\tMissing time:{}'.format(step,fetches[1], fetches[0],new_loss,fbTime,datetime.timedelta(seconds=missing_time)))
start_time = time.time()
#reset network if necessary
if new_loss>args.SSIMTh:
restorer.restore(sess,args.weights)
reset_counter+=1
#save disparity if requested
if args.logDispStep!=-1 and step%args.logDispStep==0:
dispy=fetches[-1]
dispy_to_save = np.clip(dispy[0], 0, MAX_DISP)
dispy_to_save = (dispy_to_save*256.0).astype(np.uint16)
cv2.imwrite(os.path.join(args.output, 'disparities/disparity_{}.png'.format(step)), dispy_to_save)
step+=1
except tf.errors.OutOfRangeError:
pass
finally:
epe_array = epe_accumulator
bad3_array = bad3_accumulator
epe_accumulator = np.sum(epe_accumulator)
bad3_accumulator = np.sum(bad3_accumulator)
with open(os.path.join(args.output, 'stats.csv'), 'w+') as f_out:
# report series
f_out.write('Metrics,cumulative,average\n')
f_out.write('EPE,{},{}\n'.format(epe_accumulator,epe_accumulator/step))
f_out.write('bad3,{},{}\n'.format(bad3_accumulator,bad3_accumulator/step))
f_out.write('time,{},{}\n'.format(exec_time,exec_time/step))
f_out.write('FPS,{}\n'.format(1/(exec_time/step)))
f_out.write('#resets,{}\n'.format(reset_counter))
f_out.write('Blocks')
for n in range(len(predictions)):
f_out.write(',{}'.format(n))
f_out.write(',final\n')
f_out.write('fetch_counter')
for c in fetch_counter:
f_out.write(',{}'.format(c))
f_out.write('\n')
for c in sample_distribution:
f_out.write(',{}'.format(c))
f_out.write('\n')
step_time = exec_time/step
time_array = [str(x*step_time) for x in range(len(epe_array))]
with open(os.path.join(args.output,'series.csv'),'w+') as f_out:
f_out.write('Iteration,Time,EPE,bad3\n')
for i,(t,e,b) in enumerate(zip(time_array,epe_array,bad3_array)):
f_out.write('{},{},{},{}\n'.format(i,t,e,b))
print('Result saved in {}'.format(args.output))
print('All Done, Bye Bye!')
if __name__=='__main__':
parser=argparse.ArgumentParser(description='Script for online Adaptation of a Deep Stereo Network')
parser.add_argument("-l","--list", help='path to the list file with frames to be processed', required=True)
parser.add_argument("-o","--output", help="path to the output folder where the results will be saved", required=True)
parser.add_argument("--weights",help="path to the initial weights for the disparity estimation network",required=True)
parser.add_argument("--modelName", help="name of the stereo model to be used", default="Dispnet", choices=Nets.STEREO_FACTORY.keys())
parser.add_argument("--numBlocks", help="number of CNN portions to train at each iteration",type=int,default=1)
parser.add_argument("--lr", help="value for learning rate",default=0.0001, type=float)
parser.add_argument("--blockConfig",help="path to the block_config json file",required=True)
parser.add_argument("--sampleMode",help="choose the sampling heuristic to use",choices=sampler_factory.AVAILABLE_SAMPLER,default='SAMPLE')
parser.add_argument("--fixedID",help="index of the portions of network to train, used only if sampleMode=FIXED",type=int,nargs='+',default=[0])
parser.add_argument("--reprojectionScale",help="compute all loss function at 1/reprojectionScale",default=1,type=int)
parser.add_argument("--summary",help='flag to enable tensorboard summaries',action='store_true')
parser.add_argument("--imageShape", help='two int for the size of the crop extracted from each image [height,width]', nargs='+', type=int, default=[320,1216])
parser.add_argument("--SSIMTh",help="reset network to initial configuration if loss is above this value",type=float,default=0.5)
parser.add_argument("--sampleFrequency",help="sample new network portions to train every K frame",type=int,default=1)
parser.add_argument("--mode",help="online adaptation mode: NONE - perform only inference, FULL - full online backprop, MAD - backprop only on portions of the network", choices=['NONE','FULL','MAD'], default='MAD')
parser.add_argument("--logDispStep", help="save disparity every K step, -1 to disable", default=-1, type=int)
args=parser.parse_args()
if not os.path.exists(args.output):
os.makedirs(args.output)
if args.logDispStep!=-1 and not os.path.exists(os.path.join(args.output, 'disparities')):
os.makedirs(os.path.join(args.output, 'disparities'))
shutil.copy(args.blockConfig,os.path.join(args.output,'config.json'))
with open(os.path.join(args.output, 'params.sh'), 'w+') as out:
sys.argv[0] = os.path.join(os.getcwd(), sys.argv[0])
out.write('#!/bin/bash\n')
out.write('python3 ')
out.write(' '.join(sys.argv))
out.write('\n')
main(args)
|
osp/citations/validate_config.py | davidmcclure/open-syllabus-project | 220 | 12600260 | <gh_stars>100-1000
import inflect
from osp.citations.utils import tokenize_field
from osp.common.utils import read_yaml
class Validate_Config:
def __init__(self, package='osp.citations', path='config/validate.yml'):
"""
Read the config file.
"""
self.config = read_yaml(package, path)
@property
def max_fuzz(self):
"""
Get the max fuzz value.
Returns: float
"""
return self.config.get('max_fuzz', float('inf'))
@property
def blacklisted_titles(self):
"""
Pluralize the blacklisted titles.
Returns: list
"""
p = inflect.engine()
singulars = self.config.get('blacklisted_titles', [])
return map(
tokenize_field,
singulars + [p.plural(s) for s in singulars],
)
@property
def blacklisted_surnames(self):
"""
Pluralize the blacklisted surnames.
Returns: list
"""
return map(
tokenize_field,
self.config.get('blacklisted_surnames', [])
)
@property
def whitelist(self):
"""
Get the set of whitelisted ids.
Returns: list
"""
return self.config.get('whitelist', [])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.