file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
__init__.py | from .recognizer import Recognizer
from .distance_checker import DistanceChecker
from .time_checker import TimeChecker | from .pass_checker import PassChecker
from .stop_recognizer import StopRecognizer |
|
ClusterRole.py | # coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from ... import _utilities, _tables
from . import outputs
from ... import meta as _meta
from ._inputs import *
__all__ = ['ClusterRole']
class ClusterRole(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aggregation_rule: Optional[pulumi.Input[pulumi.InputType['AggregationRuleArgs']]] = None,
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None,
rules: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['PolicyRuleArgs']]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.22.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AggregationRuleArgs']] aggregation_rule: AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']] metadata: Standard object's metadata.
:param pulumi.Input[List[pulumi.Input[pulumi.InputType['PolicyRuleArgs']]]] rules: Rules holds all the PolicyRules for this ClusterRole
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['aggregation_rule'] = aggregation_rule
__props__['api_version'] = 'rbac.authorization.k8s.io/v1beta1'
__props__['kind'] = 'ClusterRole'
__props__['metadata'] = metadata
__props__['rules'] = rules
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:rbac.authorization.k8s.io/v1:ClusterRole"), pulumi.Alias(type_="kubernetes:rbac.authorization.k8s.io/v1alpha1:ClusterRole")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ClusterRole, __self__).__init__(
'kubernetes:rbac.authorization.k8s.io/v1beta1:ClusterRole',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ClusterRole': | properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ClusterRole(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="aggregationRule")
def aggregation_rule(self) -> pulumi.Output[Optional['outputs.AggregationRule']]:
"""
AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.
"""
return pulumi.get(self, "aggregation_rule")
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> pulumi.Output[Optional[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional['_meta.v1.outputs.ObjectMeta']]:
"""
Standard object's metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Optional[List['outputs.PolicyRule']]]:
"""
Rules holds all the PolicyRules for this ClusterRole
"""
return pulumi.get(self, "rules")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | """
Get an existing ClusterRole resource's state with the given name, id, and optional extra |
doc.js | $(() => {
let iPage = 1; // 当前页面页数
let iTotalPage = 1; // 总页数
let bIsLoadData =false; // 是否正在加载
fn_load_docs(); // 加载文件列表
// 页面滚动加载
$(window).scroll(function () {
// 浏览器窗口高度
let showHeigtht = $(window).height();
// 整个网页高度
let pageHeight = $(document).height();
//页面可以滚动的距离
let canScrollHeight = pageHeight - showHeigtht;
// 页面滚动了多少, 整个是随着页面滚动实时变化的
let nowScroll = $(document).scrollTop();
if ((canScrollHeight - nowScroll) < 100){
if(!bIsLoadData){
bIsLoadData = true;
//判断页数,去更新新闻,小于总数才加载
if(iPage < iTotalPage){
iPage += 1;
fn_load_docs();
}else {
message.showInfo('已全部加载,没有更多内容!');
$('a.btn-more').html('已全部加载,没有更多内容!')
}
}
| // 获取docs信息
function fn_load_docs() {
$
.ajax({
url: '/doc/docs/',
type: 'GET',
data: {page: iPage},
dataType: 'json'
})
.done((res) => {
if (res.errno === '0') {
iTotalPage = res.data.total_page;
res.data.docs.forEach((doc) => {
let content = `<li class="pay-item">
<div class="pay-img doc"></div>
<img src="${ doc.image_url }" alt="" class="pay-img doc">
<div class="d-contain">
<p class="doc-title">${ doc.title }</p>
<p class="doc-desc">${ doc.desc }</p>
<!-- /www/?xxx -->
<a href="${ doc.file_url }" class="pay-price" download="${ doc.file_name }">下载</a>
</div>
</li>`;
$('.pay-list').append(content);
bIsLoadData = false;
$('a.btn-more').html('滚动加载更多');
})
} else {
message.showError(res.errmsg)
}
})
.fail(() => {
message.showError('服务器超时,请重试!')
})
}
}); | }
});
|
S25.4.4.3_A7.1_T3.js | // Copyright 2014 Cubane Canada, Inc. All rights reserved.
// See LICENSE for details.
/*---
es6id: S25.4.4.3_A7.1_T3
author: Sam Mikes
description: Promise.race([p1, p2]) settles when first settles
includes: [promiseHelper.js]
flags: [async]
---*/
var sequence = [];
var p1 = new Promise(function() {}),
p2 = Promise.resolve(2),
p = Promise.race([p1, p2]);
| p.then(function(result) {
assert.sameValue(result, 2, 'The value of result is expected to be 2');
sequence.push(4);
assert.sameValue(sequence.length, 4, 'The value of sequence.length is expected to be 4');
checkSequence(sequence, "This happens second");
}).catch($DONE);
Promise.resolve().then(function() {
sequence.push(3);
assert.sameValue(sequence.length, 3, 'The value of sequence.length is expected to be 3');
checkSequence(sequence, "This happens first");
}).then(function() {
sequence.push(5);
assert.sameValue(sequence.length, 5, 'The value of sequence.length is expected to be 5');
checkSequence(sequence, "This happens third");
}).then($DONE, $DONE);
sequence.push(2); | sequence.push(1);
|
standalone_build_tests.py | import sys
import argparse
from .yamato_utils import get_base_path, run_standalone_build
def main(scene_path):
base_path = get_base_path()
print(f"Running in base path {base_path}")
executable_name = None
if scene_path is not None:
executable_name = scene_path.strip(".unity")
executable_name = executable_name.split("/")[-1]
executable_name = "testPlayer-" + executable_name |
if returncode == 0:
print("Test run SUCCEEDED!")
else:
print("Test run FAILED!")
sys.exit(returncode)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--scene", default=None)
args = parser.parse_args()
main(args.scene) |
returncode = run_standalone_build(
base_path, verbose=True, output_path=executable_name, scene_path=scene_path
) |
matchers.go | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package filter
import (
"regexp"
"strings"
"github.com/pingcap/errors"
)
// rule of a filter, consisting of a schema and table pattern, and may be an
// accept-list (positive) or deny-list (negative).
type rule struct {
schema matcher
table matcher
positive bool
}
// matcher matches a name against a pattern.
type matcher interface {
matchString(name string) bool
matchAllStrings() bool
toLower() matcher
}
// stringMatcher is a matcher with a literal string.
type stringMatcher string
func (m stringMatcher) matchString(name string) bool {
return string(m) == name
}
func (stringMatcher) matchAllStrings() bool {
return false
}
func (m stringMatcher) toLower() matcher {
return stringMatcher(strings.ToLower(string(m)))
}
// trueMatcher is a matcher which matches everything. The `*` pattern.
type trueMatcher struct{}
func (trueMatcher) matchString(string) bool {
return true
}
func (trueMatcher) matchAllStrings() bool {
return true
}
func (m trueMatcher) toLower() matcher {
return m
}
// regexpMatcher is a matcher based on a regular expression.
type regexpMatcher struct {
pattern *regexp.Regexp
}
func | (pat string) (matcher, error) {
if pat == "(?s)^.*$" {
// special case for '*'
return trueMatcher{}, nil
}
pattern, err := regexp.Compile(pat)
if err != nil {
return nil, errors.Trace(err)
}
return regexpMatcher{pattern: pattern}, nil
}
func (m regexpMatcher) matchString(name string) bool {
return m.pattern.MatchString(name)
}
func (regexpMatcher) matchAllStrings() bool {
return false
}
func (m regexpMatcher) toLower() matcher {
pattern := regexp.MustCompile("(?i)" + m.pattern.String())
return regexpMatcher{pattern: pattern}
}
| newRegexpMatcher |
main.go | package main
// Trickster - Reverse Proxy Cache for Prometheus HTTP API
import (
"fmt"
"net/http"
"os"
"github.com/go-kit/kit/log/level"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
)
const progversion = "0.0.11"
// main function to boot up everything
func main() | {
t := &TricksterHandler{}
t.ResponseChannels = make(map[string]chan *ClientRequestContext)
t.Config = NewConfig()
if err := loadConfiguration(t.Config, os.Args[1:]); err != nil {
// using fmt.Println because logger can't be instantiated without the config loaded
// to know the log path, and the config load just failed, so we just abort.
fmt.Println("Could not load trickster configuration: ", err.Error())
os.Exit(1)
}
if t.Config.Main.InstanceID > 0 {
t.Logger = newLogger(t.Config.Logging, fmt.Sprint(t.Config.Main.InstanceID))
} else {
t.Logger = newLogger(t.Config.Logging, "")
}
level.Info(t.Logger).Log("event", "application startup", "version", progversion)
t.Metrics = NewApplicationMetrics(t.Config, t.Logger)
t.Cacher = getCache(t)
if err := t.Cacher.Connect(); err != nil {
level.Error(t.Logger).Log("event", "Unable to connect to Cache", "detail", err.Error())
os.Exit(1)
}
defer t.Cacher.Close()
router := mux.NewRouter()
// API Version 1 Support
const apiV1Path = "/api/v1/"
// Health Check Paths
router.HandleFunc("/{originMoniker}/"+mnHealth, t.promHealthCheckHandler).Methods("GET")
router.HandleFunc("/"+mnHealth, t.promHealthCheckHandler).Methods("GET")
// Path-based multi-origin support - no support for full proxy of the prometheus UI, only querying
router.HandleFunc("/{originMoniker}"+apiV1Path+mnQueryRange, t.promQueryRangeHandler).Methods("GET")
router.HandleFunc("/{originMoniker}"+apiV1Path+mnQuery, t.promQueryHandler).Methods("GET")
router.HandleFunc("/{originMoniker}"+apiV1Path+mnLabels, t.promAPIProxyHandler).Methods("GET")
router.HandleFunc(apiV1Path+mnQueryRange, t.promQueryRangeHandler).Methods("GET")
router.HandleFunc(apiV1Path+mnQuery, t.promQueryHandler).Methods("GET")
router.HandleFunc(apiV1Path+mnLabels, t.promAPIProxyHandler).Methods("GET")
// Catch All for Single-Origin proxy
router.PathPrefix("/").HandlerFunc(t.promFullProxyHandler).Methods("GET")
level.Info(t.Logger).Log("event", "proxy http endpoint starting", "port", t.Config.ProxyServer.ListenPort)
// Start the Server
err := http.ListenAndServe(fmt.Sprintf(":%d", t.Config.ProxyServer.ListenPort), handlers.CompressHandler(router))
level.Error(t.Logger).Log("event", "exiting", "err", err)
} |
|
test_kitti_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import os
import tempfile
import numpy as np
import pytest
import torch
from mmdet3d.core.bbox import LiDARInstance3DBoxes, limit_period
from mmdet3d.datasets import KittiDataset
def _generate_kitti_dataset_config():
data_root = 'tests/data/kitti'
ann_file = 'tests/data/kitti/kitti_infos_train.pkl'
classes = ['Pedestrian', 'Cyclist', 'Car']
pts_prefix = 'velodyne_reduced'
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1.0, 1.0],
translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points'])
])
]
modality = dict(use_lidar=True, use_camera=False)
split = 'training'
return data_root, ann_file, classes, pts_prefix, pipeline, modality, split
def _generate_kitti_multi_modality_dataset_config():
data_root = 'tests/data/kitti'
ann_file = 'tests/data/kitti/kitti_infos_train.pkl'
classes = ['Pedestrian', 'Cyclist', 'Car']
pts_prefix = 'velodyne_reduced'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(type='Resize', multiscale_mode='value', keep_ratio=True),
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points', 'img'])
])
]
modality = dict(use_lidar=True, use_camera=True)
split = 'training'
return data_root, ann_file, classes, pts_prefix, pipeline, modality, split
def test_getitem():
np.random.seed(0)
data_root, ann_file, classes, pts_prefix, \
_, modality, split = _generate_kitti_dataset_config()
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=dict(backend='disk')),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
file_client_args=dict(backend='disk')),
dict(
type='ObjectSample',
db_sampler=dict(
data_root='tests/data/kitti/',
# in coordinate system refactor, this test file is modified
info_path='tests/data/kitti/kitti_dbinfos_train.pkl',
rate=1.0,
prepare=dict(
filter_by_difficulty=[-1],
filter_by_min_points=dict(Pedestrian=10)),
classes=['Pedestrian', 'Cyclist', 'Car'],
sample_groups=dict(Pedestrian=6))),
dict(
type='ObjectNoise',
num_try=100,
translation_std=[1.0, 1.0, 0.5],
global_rot_range=[0.0, 0.0],
rot_range=[-0.78539816, 0.78539816]),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05]),
dict(
type='PointsRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(
type='ObjectRangeFilter',
point_cloud_range=[0, -40, -3, 70.4, 40, 1]),
dict(type='PointShuffle'),
dict(
type='DefaultFormatBundle3D',
class_names=['Pedestrian', 'Cyclist', 'Car']),
dict(
type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
data = kitti_dataset[0]
points = data['points']._data
gt_bboxes_3d = data['gt_bboxes_3d']._data
gt_labels_3d = data['gt_labels_3d']._data
expected_gt_bboxes_3d = torch.tensor(
[[9.5081, -5.2269, -1.1370, 1.2288, 0.4915, 1.9353, 1.9988]])
expected_gt_labels_3d = torch.tensor([0])
rot_matrix = data['img_metas']._data['pcd_rotation']
rot_angle = data['img_metas']._data['pcd_rotation_angle']
horizontal_flip = data['img_metas']._data['pcd_horizontal_flip']
vertical_flip = data['img_metas']._data['pcd_vertical_flip']
expected_rot_matrix = torch.tensor([[0.8018, 0.5976, 0.0000],
[-0.5976, 0.8018, 0.0000],
[0.0000, 0.0000, 1.0000]])
expected_rot_angle = 0.6404654291602163
noise_angle = 0.20247319
assert torch.allclose(expected_rot_matrix, rot_matrix, atol=1e-4)
assert math.isclose(expected_rot_angle, rot_angle, abs_tol=1e-4)
assert horizontal_flip is True
assert vertical_flip is False
# after coord system refactor
expected_gt_bboxes_3d[:, :3] = \
expected_gt_bboxes_3d[:, :3] @ rot_matrix @ rot_matrix
expected_gt_bboxes_3d[:, -1:] = -np.pi - expected_gt_bboxes_3d[:, -1:] \
+ 2 * rot_angle - 2 * noise_angle
expected_gt_bboxes_3d[:, -1:] = limit_period(
expected_gt_bboxes_3d[:, -1:], period=np.pi * 2)
assert points.shape == (780, 4)
assert torch.allclose(
gt_bboxes_3d.tensor, expected_gt_bboxes_3d, atol=1e-4)
assert torch.all(gt_labels_3d == expected_gt_labels_3d)
# test multi-modality KITTI dataset
np.random.seed(0)
point_cloud_range = [0, -40, -3, 70.4, 40, 1]
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
multi_modality_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict(
type='Resize',
img_scale=[(640, 192), (2560, 768)],
multiscale_mode='range',
keep_ratio=True),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05],
translation_std=[0.2, 0.2, 0.2]),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle3D', class_names=classes),
dict(
type='Collect3D',
keys=['points', 'img', 'gt_bboxes_3d', 'gt_labels_3d']),
]
modality = dict(use_lidar=True, use_camera=True)
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
multi_modality_pipeline, classes, modality)
data = kitti_dataset[0]
img = data['img']._data
lidar2img = data['img_metas']._data['lidar2img']
expected_lidar2img = np.array(
[[6.02943726e+02, -7.07913330e+02, -1.22748432e+01, -1.70942719e+02],
[1.76777252e+02, 8.80879879e+00, -7.07936157e+02, -1.02568634e+02],
[9.99984801e-01, -1.52826728e-03, -5.29071223e-03, -3.27567995e-01],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
assert img.shape[:] == (3, 416, 1344)
assert np.allclose(lidar2img, expected_lidar2img)
def test_evaluate():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 0.4800, 1.2000, 1.8900, 0.0100]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
metric = ['mAP']
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
ap_dict = kitti_dataset.evaluate([result], metric)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_easy'],
3.0303030303030307)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_moderate'],
3.0303030303030307)
assert np.isclose(ap_dict['KITTI/Overall_3D_AP11_hard'],
3.0303030303030307)
def test_show():
from os import path as osp
import mmcv
from mmdet3d.core.bbox import LiDARInstance3DBoxes
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(
data_root, ann_file, split=split, modality=modality, pipeline=pipeline)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[46.1218, -4.6496, -0.9275, 0.5316, 1.4442, 1.7450, 1.1749],
[33.3189, 0.1981, 0.3136, 0.5656, 1.2301, 1.7985, 1.5723],
[46.1366, -4.6404, -0.9510, 0.5162, 1.6501, 1.7540, 1.3778],
[33.2646, 0.2297, 0.3446, 0.5746, 1.3365, 1.7947, 1.5430],
[58.9079, 16.6272, -1.5829, 1.5656, 3.9313, 1.4899, 1.5505]]))
scores_3d = torch.tensor([0.1815, 0.1663, 0.5792, 0.2194, 0.2780])
labels_3d = torch.tensor([0, 0, 1, 1, 2])
result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d)
results = [result]
kitti_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
# test show with pipeline
eval_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
# test multi-modality show
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
_, _, _, _, multi_modality_pipeline, modality, _ = \
_generate_kitti_multi_modality_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
multi_modality_pipeline, classes, modality)
kitti_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
img_file_path = osp.join(temp_dir, '000000', '000000_img.png')
img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')
img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
mmcv.check_file_exist(img_file_path)
mmcv.check_file_exist(img_pred_path)
mmcv.check_file_exist(img_gt_file)
tmp_dir.cleanup()
# test multi-modality show with pipeline
eval_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4),
dict(type='LoadImageFromFile'),
dict(
type='DefaultFormatBundle3D',
class_names=classes,
with_label=False),
dict(type='Collect3D', keys=['points', 'img'])
]
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
img_file_path = osp.join(temp_dir, '000000', '000000_img.png')
img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')
img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
mmcv.check_file_exist(img_file_path)
mmcv.check_file_exist(img_pred_path)
mmcv.check_file_exist(img_gt_file)
tmp_dir.cleanup()
def | ():
from mmdet3d.core.bbox import LiDARInstance3DBoxes
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
# coord system refactor
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [result]
result_files, tmp_dir = kitti_dataset.format_results(results)
expected_name = np.array(['Pedestrian'])
expected_truncated = np.array([0.])
expected_occluded = np.array([0])
# coord sys refactor
expected_alpha = np.array(-3.3410306 + np.pi)
expected_bbox = np.array([[710.443, 144.00221, 820.29114, 307.58667]])
expected_dimensions = np.array([[1.2, 1.89, 0.48]])
expected_location = np.array([[1.8399826, 1.4700007, 8.410018]])
expected_rotation_y = np.array([0.0100])
expected_score = np.array([0.5])
expected_sample_idx = np.array([0])
assert np.all(result_files[0]['name'] == expected_name)
assert np.allclose(result_files[0]['truncated'], expected_truncated)
assert np.all(result_files[0]['occluded'] == expected_occluded)
assert np.allclose(result_files[0]['alpha'], expected_alpha, 1e-3)
assert np.allclose(result_files[0]['bbox'], expected_bbox)
assert np.allclose(result_files[0]['dimensions'], expected_dimensions)
assert np.allclose(result_files[0]['location'], expected_location)
assert np.allclose(result_files[0]['rotation_y'], expected_rotation_y,
1e-3)
assert np.allclose(result_files[0]['score'], expected_score)
assert np.allclose(result_files[0]['sample_idx'], expected_sample_idx)
tmp_dir.cleanup()
def test_bbox2result_kitti():
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
boxes_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900, -1.5808]]))
labels_3d = torch.tensor([
0,
])
scores_3d = torch.tensor([0.5])
result = dict(boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [result]
tmp_dir = tempfile.TemporaryDirectory()
temp_kitti_result_dir = tmp_dir.name
det_annos = kitti_dataset.bbox2result_kitti(
results, classes, submission_prefix=temp_kitti_result_dir)
expected_file_path = os.path.join(temp_kitti_result_dir, '000000.txt')
expected_name = np.array(['Pedestrian'])
expected_dimensions = np.array([1.2000, 1.8900, 0.4800])
# coord system refactor (reverse sign)
expected_rotation_y = 0.0100
expected_score = np.array([0.5])
assert np.all(det_annos[0]['name'] == expected_name)
assert np.allclose(det_annos[0]['rotation_y'], expected_rotation_y, 1e-3)
assert np.allclose(det_annos[0]['score'], expected_score)
assert np.allclose(det_annos[0]['dimensions'], expected_dimensions)
assert os.path.exists(expected_file_path)
tmp_dir.cleanup()
tmp_dir = tempfile.TemporaryDirectory()
temp_kitti_result_dir = tmp_dir.name
boxes_3d = LiDARInstance3DBoxes(torch.tensor([]))
labels_3d = torch.tensor([])
scores_3d = torch.tensor([])
empty_result = dict(
boxes_3d=boxes_3d, labels_3d=labels_3d, scores_3d=scores_3d)
results = [empty_result]
det_annos = kitti_dataset.bbox2result_kitti(
results, classes, submission_prefix=temp_kitti_result_dir)
expected_file_path = os.path.join(temp_kitti_result_dir, '000000.txt')
assert os.path.exists(expected_file_path)
tmp_dir.cleanup()
def test_bbox2result_kitti2d():
data_root, ann_file, classes, pts_prefix, \
pipeline, modality, split = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix,
pipeline, classes, modality)
bboxes = np.array([[[46.1218, -4.6496, -0.9275, 0.5316, 0.5],
[33.3189, 0.1981, 0.3136, 0.5656, 0.5]],
[[46.1366, -4.6404, -0.9510, 0.5162, 0.5],
[33.2646, 0.2297, 0.3446, 0.5746, 0.5]]])
det_annos = kitti_dataset.bbox2result_kitti2d([bboxes], classes)
expected_name = np.array(
['Pedestrian', 'Pedestrian', 'Cyclist', 'Cyclist'])
expected_bbox = np.array([[46.1218, -4.6496, -0.9275, 0.5316],
[33.3189, 0.1981, 0.3136, 0.5656],
[46.1366, -4.6404, -0.951, 0.5162],
[33.2646, 0.2297, 0.3446, 0.5746]])
expected_score = np.array([0.5, 0.5, 0.5, 0.5])
assert np.all(det_annos[0]['name'] == expected_name)
assert np.allclose(det_annos[0]['bbox'], expected_bbox)
assert np.allclose(det_annos[0]['score'], expected_score)
| test_format_results |
points.rs | use crate::{
geometry::{Dimensions, Point},
primitives::{
rectangle::{self, Rectangle},
rounded_rectangle::{
ellipse_quadrant::{self, Quadrant},
RoundedRectangle,
},
ContainsPoint, Primitive,
},
};
/// Iterator over all points inside the rounded rectangle.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
pub struct Points {
rect_iter: rectangle::Points,
top_left_corner: Rectangle,
top_right_corner: Rectangle,
bottom_right_corner: Rectangle,
bottom_left_corner: Rectangle,
top_left_iter: ellipse_quadrant::Points,
top_right_iter: ellipse_quadrant::Points,
bottom_right_iter: ellipse_quadrant::Points,
bottom_left_iter: ellipse_quadrant::Points,
}
impl Points {
pub(in crate::primitives) fn | (shape: &RoundedRectangle) -> Self {
let top_left_ellipse = shape.get_confined_corner_quadrant(Quadrant::TopLeft);
let top_right_ellipse = shape.get_confined_corner_quadrant(Quadrant::TopRight);
let bottom_right_ellipse = shape.get_confined_corner_quadrant(Quadrant::BottomRight);
let bottom_left_ellipse = shape.get_confined_corner_quadrant(Quadrant::BottomLeft);
Self {
rect_iter: shape.rectangle.points(),
top_left_iter: top_left_ellipse.points(),
top_right_iter: top_right_ellipse.points(),
bottom_right_iter: bottom_right_ellipse.points(),
bottom_left_iter: bottom_left_ellipse.points(),
top_left_corner: top_left_ellipse.bounding_box(),
top_right_corner: top_right_ellipse.bounding_box(),
bottom_right_corner: bottom_right_ellipse.bounding_box(),
bottom_left_corner: bottom_left_ellipse.bounding_box(),
}
}
pub(in crate::primitives) fn empty() -> Self {
Self {
rect_iter: rectangle::Points::empty(),
top_left_iter: ellipse_quadrant::Points::empty(),
top_right_iter: ellipse_quadrant::Points::empty(),
bottom_right_iter: ellipse_quadrant::Points::empty(),
bottom_left_iter: ellipse_quadrant::Points::empty(),
top_left_corner: Rectangle::zero(),
top_right_corner: Rectangle::zero(),
bottom_right_corner: Rectangle::zero(),
bottom_left_corner: Rectangle::zero(),
}
}
}
impl Iterator for Points {
type Item = Point;
fn next(&mut self) -> Option<Self::Item> {
let Self {
top_left_corner,
top_right_corner,
bottom_right_corner,
bottom_left_corner,
..
} = self;
self.rect_iter
.find(|p| {
!top_left_corner.contains(*p)
&& !top_right_corner.contains(*p)
&& !bottom_right_corner.contains(*p)
&& !bottom_left_corner.contains(*p)
})
.or_else(|| self.top_left_iter.next())
.or_else(|| self.top_right_iter.next())
.or_else(|| self.bottom_right_iter.next())
.or_else(|| self.bottom_left_iter.next())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
geometry::Size, iterator::IntoPixels, pixelcolor::BinaryColor, style::PrimitiveStyle,
};
#[test]
fn points_equals_filled() {
let rounded_rect = RoundedRectangle::with_equal_corners(
Rectangle::new(Point::zero(), Size::new(10, 20)),
Size::new(4, 8),
);
assert!(rounded_rect.points().eq(rounded_rect
.into_styled(PrimitiveStyle::with_fill(BinaryColor::On))
.into_pixels()
.map(|pixel| pixel.0)));
}
}
| new |
networking_client.go | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
import (
v1beta1 "k8s.io/api/networking/v1beta1"
"github.com/Angus-F/client-go/kubernetes/scheme"
rest "github.com/Angus-F/client-go/rest"
)
type NetworkingV1beta1Interface interface {
RESTClient() rest.Interface
IngressesGetter
IngressClassesGetter
}
// NetworkingV1beta1Client is used to interact with features provided by the networking.k8s.io group.
type NetworkingV1beta1Client struct {
restClient rest.Interface
}
func (c *NetworkingV1beta1Client) Ingresses(namespace string) IngressInterface {
return newIngresses(c, namespace)
}
func (c *NetworkingV1beta1Client) IngressClasses() IngressClassInterface {
return newIngressClasses(c)
}
// NewForConfig creates a new NetworkingV1beta1Client for the given config.
func NewForConfig(c *rest.Config) (*NetworkingV1beta1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientFor(&config)
if err != nil {
return nil, err
}
return &NetworkingV1beta1Client{client}, nil
}
// NewForConfigOrDie creates a new NetworkingV1beta1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *NetworkingV1beta1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new NetworkingV1beta1Client for the given RESTClient.
func New(c rest.Interface) *NetworkingV1beta1Client {
return &NetworkingV1beta1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *NetworkingV1beta1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient | } |
|
groups.rs | use super::{HasResult, Instruction, Name, Operand, Type, Typed};
use std::convert::TryFrom;
/// Just the BinaryOps. This ends up being better than a `&dyn `[`BinaryOp`](../trait.BinaryOp.html) for various reasons.
#[derive(PartialEq, Clone, Debug)]
pub enum BinaryOp {
// Integer binary ops
Add(super::Add),
Sub(super::Sub),
Mul(super::Mul),
UDiv(super::UDiv),
SDiv(super::SDiv),
URem(super::URem),
SRem(super::SRem),
// Bitwise binary ops
And(super::And),
Or(super::Or),
Xor(super::Xor),
Shl(super::Shl),
LShr(super::LShr),
AShr(super::AShr),
// Floating-point binary ops
FAdd(super::FAdd),
FSub(super::FSub),
FMul(super::FMul),
FDiv(super::FDiv),
FRem(super::FRem),
}
/// Just the UnaryOps. This ends up being better than a `&dyn `[`UnaryOp`](../trait.UnaryOp.html) for various reasons.
#[derive(PartialEq, Clone, Debug)]
pub enum UnaryOp {
// listed alphabetically
AddrSpaceCast(super::AddrSpaceCast),
BitCast(super::BitCast),
FNeg(super::FNeg),
FPExt(super::FPExt),
FPToSI(super::FPToSI),
FPToUI(super::FPToUI),
FPTrunc(super::FPTrunc),
IntToPtr(super::IntToPtr),
PtrToInt(super::PtrToInt),
SExt(super::SExt),
SIToFP(super::SIToFP),
Trunc(super::Trunc),
UIToFP(super::UIToFP),
ZExt(super::ZExt),
}
impl From<BinaryOp> for Instruction {
fn from(bo: BinaryOp) -> Instruction |
}
impl From<UnaryOp> for Instruction {
fn from(uo: UnaryOp) -> Instruction {
match uo {
UnaryOp::AddrSpaceCast(i) => i.into(),
UnaryOp::BitCast(i) => i.into(),
UnaryOp::FNeg(i) => i.into(),
UnaryOp::FPExt(i) => i.into(),
UnaryOp::FPToSI(i) => i.into(),
UnaryOp::FPToUI(i) => i.into(),
UnaryOp::FPTrunc(i) => i.into(),
UnaryOp::IntToPtr(i) => i.into(),
UnaryOp::PtrToInt(i) => i.into(),
UnaryOp::SExt(i) => i.into(),
UnaryOp::SIToFP(i) => i.into(),
UnaryOp::Trunc(i) => i.into(),
UnaryOp::UIToFP(i) => i.into(),
UnaryOp::ZExt(i) => i.into(),
}
}
}
impl TryFrom<Instruction> for BinaryOp {
type Error = &'static str;
fn try_from(inst: Instruction) -> Result<Self, Self::Error> {
match inst {
Instruction::Add(i) => Ok(BinaryOp::Add(i)),
Instruction::Sub(i) => Ok(BinaryOp::Sub(i)),
Instruction::Mul(i) => Ok(BinaryOp::Mul(i)),
Instruction::UDiv(i) => Ok(BinaryOp::UDiv(i)),
Instruction::SDiv(i) => Ok(BinaryOp::SDiv(i)),
Instruction::URem(i) => Ok(BinaryOp::URem(i)),
Instruction::SRem(i) => Ok(BinaryOp::SRem(i)),
Instruction::And(i) => Ok(BinaryOp::And(i)),
Instruction::Or(i) => Ok(BinaryOp::Or(i)),
Instruction::Xor(i) => Ok(BinaryOp::Xor(i)),
Instruction::Shl(i) => Ok(BinaryOp::Shl(i)),
Instruction::LShr(i) => Ok(BinaryOp::LShr(i)),
Instruction::AShr(i) => Ok(BinaryOp::AShr(i)),
Instruction::FAdd(i) => Ok(BinaryOp::FAdd(i)),
Instruction::FSub(i) => Ok(BinaryOp::FSub(i)),
Instruction::FMul(i) => Ok(BinaryOp::FMul(i)),
Instruction::FDiv(i) => Ok(BinaryOp::FDiv(i)),
Instruction::FRem(i) => Ok(BinaryOp::FRem(i)),
_ => Err("Not a binary op"),
}
}
}
impl TryFrom<Instruction> for UnaryOp {
type Error = &'static str;
fn try_from(inst: Instruction) -> Result<Self, Self::Error> {
match inst {
Instruction::AddrSpaceCast(i) => Ok(UnaryOp::AddrSpaceCast(i)),
Instruction::BitCast(i) => Ok(UnaryOp::BitCast(i)),
Instruction::FNeg(i) => Ok(UnaryOp::FNeg(i)),
Instruction::FPExt(i) => Ok(UnaryOp::FPExt(i)),
Instruction::FPToSI(i) => Ok(UnaryOp::FPToSI(i)),
Instruction::FPToUI(i) => Ok(UnaryOp::FPToUI(i)),
Instruction::FPTrunc(i) => Ok(UnaryOp::FPTrunc(i)),
Instruction::IntToPtr(i) => Ok(UnaryOp::IntToPtr(i)),
Instruction::PtrToInt(i) => Ok(UnaryOp::PtrToInt(i)),
Instruction::SExt(i) => Ok(UnaryOp::SExt(i)),
Instruction::SIToFP(i) => Ok(UnaryOp::SIToFP(i)),
Instruction::Trunc(i) => Ok(UnaryOp::Trunc(i)),
Instruction::UIToFP(i) => Ok(UnaryOp::UIToFP(i)),
Instruction::ZExt(i) => Ok(UnaryOp::ZExt(i)),
_ => Err("Not a unary op"),
}
}
}
impl Typed for BinaryOp {
fn get_type(&self) -> Type {
match self {
BinaryOp::Add(i) => i.get_type(),
BinaryOp::Sub(i) => i.get_type(),
BinaryOp::Mul(i) => i.get_type(),
BinaryOp::UDiv(i) => i.get_type(),
BinaryOp::SDiv(i) => i.get_type(),
BinaryOp::URem(i) => i.get_type(),
BinaryOp::SRem(i) => i.get_type(),
BinaryOp::And(i) => i.get_type(),
BinaryOp::Or(i) => i.get_type(),
BinaryOp::Xor(i) => i.get_type(),
BinaryOp::Shl(i) => i.get_type(),
BinaryOp::LShr(i) => i.get_type(),
BinaryOp::AShr(i) => i.get_type(),
BinaryOp::FAdd(i) => i.get_type(),
BinaryOp::FSub(i) => i.get_type(),
BinaryOp::FMul(i) => i.get_type(),
BinaryOp::FDiv(i) => i.get_type(),
BinaryOp::FRem(i) => i.get_type(),
}
}
}
impl Typed for UnaryOp {
fn get_type(&self) -> Type {
match self {
UnaryOp::AddrSpaceCast(i) => i.get_type(),
UnaryOp::BitCast(i) => i.get_type(),
UnaryOp::FNeg(i) => i.get_type(),
UnaryOp::FPExt(i) => i.get_type(),
UnaryOp::FPToSI(i) => i.get_type(),
UnaryOp::FPToUI(i) => i.get_type(),
UnaryOp::FPTrunc(i) => i.get_type(),
UnaryOp::IntToPtr(i) => i.get_type(),
UnaryOp::PtrToInt(i) => i.get_type(),
UnaryOp::SExt(i) => i.get_type(),
UnaryOp::SIToFP(i) => i.get_type(),
UnaryOp::Trunc(i) => i.get_type(),
UnaryOp::UIToFP(i) => i.get_type(),
UnaryOp::ZExt(i) => i.get_type(),
}
}
}
/* --TODO not yet implemented: metadata
impl HasMetadata for BinaryOp {
fn get_metadata(&self) -> &InstructionMetadata {
match self {
BinaryOp::Add(i) => i.get_metadata(),
BinaryOp::Sub(i) => i.get_metadata(),
BinaryOp::Mul(i) => i.get_metadata(),
BinaryOp::UDiv(i) => i.get_metadata(),
BinaryOp::SDiv(i) => i.get_metadata(),
BinaryOp::URem(i) => i.get_metadata(),
BinaryOp::SRem(i) => i.get_metadata(),
BinaryOp::And(i) => i.get_metadata(),
BinaryOp::Or(i) => i.get_metadata(),
BinaryOp::Xor(i) => i.get_metadata(),
BinaryOp::Shl(i) => i.get_metadata(),
BinaryOp::LShr(i) => i.get_metadata(),
BinaryOp::AShr(i) => i.get_metadata(),
BinaryOp::FAdd(i) => i.get_metadata(),
BinaryOp::FSub(i) => i.get_metadata(),
BinaryOp::FMul(i) => i.get_metadata(),
BinaryOp::FDiv(i) => i.get_metadata(),
BinaryOp::FRem(i) => i.get_metadata(),
}
}
}
impl HasMetadata for UnaryOp {
fn get_metadata(&self) -> &InstructionMetadata {
match self {
UnaryOp::AddrSpaceCast(i) => i.get_metadata(),
UnaryOp::BitCast(i) => i.get_metadata(),
UnaryOp::FNeg(i) => i.get_metadata(),
UnaryOp::FPExt(i) => i.get_metadata(),
UnaryOp::FPToSI(i) => i.get_metadata(),
UnaryOp::FPToUI(i) => i.get_metadata(),
UnaryOp::FPTrunc(i) => i.get_metadata(),
UnaryOp::IntToPtr(i) => i.get_metadata(),
UnaryOp::PtrToInt(i) => i.get_metadata(),
UnaryOp::SExt(i) => i.get_metadata(),
UnaryOp::SIToFP(i) => i.get_metadata(),
UnaryOp::Trunc(i) => i.get_metadata(),
UnaryOp::UIToFP(i) => i.get_metadata(),
UnaryOp::ZExt(i) => i.get_metadata(),
}
}
}
*/
impl HasResult for BinaryOp {
fn get_result(&self) -> &Name {
match self {
BinaryOp::Add(i) => i.get_result(),
BinaryOp::Sub(i) => i.get_result(),
BinaryOp::Mul(i) => i.get_result(),
BinaryOp::UDiv(i) => i.get_result(),
BinaryOp::SDiv(i) => i.get_result(),
BinaryOp::URem(i) => i.get_result(),
BinaryOp::SRem(i) => i.get_result(),
BinaryOp::And(i) => i.get_result(),
BinaryOp::Or(i) => i.get_result(),
BinaryOp::Xor(i) => i.get_result(),
BinaryOp::Shl(i) => i.get_result(),
BinaryOp::LShr(i) => i.get_result(),
BinaryOp::AShr(i) => i.get_result(),
BinaryOp::FAdd(i) => i.get_result(),
BinaryOp::FSub(i) => i.get_result(),
BinaryOp::FMul(i) => i.get_result(),
BinaryOp::FDiv(i) => i.get_result(),
BinaryOp::FRem(i) => i.get_result(),
}
}
}
impl HasResult for UnaryOp {
fn get_result(&self) -> &Name {
match self {
UnaryOp::AddrSpaceCast(i) => i.get_result(),
UnaryOp::BitCast(i) => i.get_result(),
UnaryOp::FNeg(i) => i.get_result(),
UnaryOp::FPExt(i) => i.get_result(),
UnaryOp::FPToSI(i) => i.get_result(),
UnaryOp::FPToUI(i) => i.get_result(),
UnaryOp::FPTrunc(i) => i.get_result(),
UnaryOp::IntToPtr(i) => i.get_result(),
UnaryOp::PtrToInt(i) => i.get_result(),
UnaryOp::SExt(i) => i.get_result(),
UnaryOp::SIToFP(i) => i.get_result(),
UnaryOp::Trunc(i) => i.get_result(),
UnaryOp::UIToFP(i) => i.get_result(),
UnaryOp::ZExt(i) => i.get_result(),
}
}
}
impl super::BinaryOp for BinaryOp {
fn get_operand0(&self) -> &Operand {
match self {
BinaryOp::Add(i) => i.get_operand0(),
BinaryOp::Sub(i) => i.get_operand0(),
BinaryOp::Mul(i) => i.get_operand0(),
BinaryOp::UDiv(i) => i.get_operand0(),
BinaryOp::SDiv(i) => i.get_operand0(),
BinaryOp::URem(i) => i.get_operand0(),
BinaryOp::SRem(i) => i.get_operand0(),
BinaryOp::And(i) => i.get_operand0(),
BinaryOp::Or(i) => i.get_operand0(),
BinaryOp::Xor(i) => i.get_operand0(),
BinaryOp::Shl(i) => i.get_operand0(),
BinaryOp::LShr(i) => i.get_operand0(),
BinaryOp::AShr(i) => i.get_operand0(),
BinaryOp::FAdd(i) => i.get_operand0(),
BinaryOp::FSub(i) => i.get_operand0(),
BinaryOp::FMul(i) => i.get_operand0(),
BinaryOp::FDiv(i) => i.get_operand0(),
BinaryOp::FRem(i) => i.get_operand0(),
}
}
fn get_operand1(&self) -> &Operand {
match self {
BinaryOp::Add(i) => i.get_operand1(),
BinaryOp::Sub(i) => i.get_operand1(),
BinaryOp::Mul(i) => i.get_operand1(),
BinaryOp::UDiv(i) => i.get_operand1(),
BinaryOp::SDiv(i) => i.get_operand1(),
BinaryOp::URem(i) => i.get_operand1(),
BinaryOp::SRem(i) => i.get_operand1(),
BinaryOp::And(i) => i.get_operand1(),
BinaryOp::Or(i) => i.get_operand1(),
BinaryOp::Xor(i) => i.get_operand1(),
BinaryOp::Shl(i) => i.get_operand1(),
BinaryOp::LShr(i) => i.get_operand1(),
BinaryOp::AShr(i) => i.get_operand1(),
BinaryOp::FAdd(i) => i.get_operand1(),
BinaryOp::FSub(i) => i.get_operand1(),
BinaryOp::FMul(i) => i.get_operand1(),
BinaryOp::FDiv(i) => i.get_operand1(),
BinaryOp::FRem(i) => i.get_operand1(),
}
}
}
impl super::UnaryOp for UnaryOp {
fn get_operand(&self) -> &Operand {
match self {
UnaryOp::AddrSpaceCast(i) => i.get_operand(),
UnaryOp::BitCast(i) => i.get_operand(),
UnaryOp::FNeg(i) => i.get_operand(),
UnaryOp::FPExt(i) => i.get_operand(),
UnaryOp::FPToSI(i) => i.get_operand(),
UnaryOp::FPToUI(i) => i.get_operand(),
UnaryOp::FPTrunc(i) => i.get_operand(),
UnaryOp::IntToPtr(i) => i.get_operand(),
UnaryOp::PtrToInt(i) => i.get_operand(),
UnaryOp::SExt(i) => i.get_operand(),
UnaryOp::SIToFP(i) => i.get_operand(),
UnaryOp::Trunc(i) => i.get_operand(),
UnaryOp::UIToFP(i) => i.get_operand(),
UnaryOp::ZExt(i) => i.get_operand(),
}
}
}
| {
match bo {
BinaryOp::Add(i) => i.into(),
BinaryOp::Sub(i) => i.into(),
BinaryOp::Mul(i) => i.into(),
BinaryOp::UDiv(i) => i.into(),
BinaryOp::SDiv(i) => i.into(),
BinaryOp::URem(i) => i.into(),
BinaryOp::SRem(i) => i.into(),
BinaryOp::And(i) => i.into(),
BinaryOp::Or(i) => i.into(),
BinaryOp::Xor(i) => i.into(),
BinaryOp::Shl(i) => i.into(),
BinaryOp::LShr(i) => i.into(),
BinaryOp::AShr(i) => i.into(),
BinaryOp::FAdd(i) => i.into(),
BinaryOp::FSub(i) => i.into(),
BinaryOp::FMul(i) => i.into(),
BinaryOp::FDiv(i) => i.into(),
BinaryOp::FRem(i) => i.into(),
}
} |
go1_16_unicode.go | // Code generated by 'yaegi extract unicode'. DO NOT EDIT.
// +build go1.16
package stdlib
import (
"go/constant"
"go/token"
"reflect"
"unicode"
)
func init() | {
Symbols["unicode"] = map[string]reflect.Value{
// function, constant and variable definitions
"ASCII_Hex_Digit": reflect.ValueOf(&unicode.ASCII_Hex_Digit).Elem(),
"Adlam": reflect.ValueOf(&unicode.Adlam).Elem(),
"Ahom": reflect.ValueOf(&unicode.Ahom).Elem(),
"Anatolian_Hieroglyphs": reflect.ValueOf(&unicode.Anatolian_Hieroglyphs).Elem(),
"Arabic": reflect.ValueOf(&unicode.Arabic).Elem(),
"Armenian": reflect.ValueOf(&unicode.Armenian).Elem(),
"Avestan": reflect.ValueOf(&unicode.Avestan).Elem(),
"AzeriCase": reflect.ValueOf(&unicode.AzeriCase).Elem(),
"Balinese": reflect.ValueOf(&unicode.Balinese).Elem(),
"Bamum": reflect.ValueOf(&unicode.Bamum).Elem(),
"Bassa_Vah": reflect.ValueOf(&unicode.Bassa_Vah).Elem(),
"Batak": reflect.ValueOf(&unicode.Batak).Elem(),
"Bengali": reflect.ValueOf(&unicode.Bengali).Elem(),
"Bhaiksuki": reflect.ValueOf(&unicode.Bhaiksuki).Elem(),
"Bidi_Control": reflect.ValueOf(&unicode.Bidi_Control).Elem(),
"Bopomofo": reflect.ValueOf(&unicode.Bopomofo).Elem(),
"Brahmi": reflect.ValueOf(&unicode.Brahmi).Elem(),
"Braille": reflect.ValueOf(&unicode.Braille).Elem(),
"Buginese": reflect.ValueOf(&unicode.Buginese).Elem(),
"Buhid": reflect.ValueOf(&unicode.Buhid).Elem(),
"C": reflect.ValueOf(&unicode.C).Elem(),
"Canadian_Aboriginal": reflect.ValueOf(&unicode.Canadian_Aboriginal).Elem(),
"Carian": reflect.ValueOf(&unicode.Carian).Elem(),
"CaseRanges": reflect.ValueOf(&unicode.CaseRanges).Elem(),
"Categories": reflect.ValueOf(&unicode.Categories).Elem(),
"Caucasian_Albanian": reflect.ValueOf(&unicode.Caucasian_Albanian).Elem(),
"Cc": reflect.ValueOf(&unicode.Cc).Elem(),
"Cf": reflect.ValueOf(&unicode.Cf).Elem(),
"Chakma": reflect.ValueOf(&unicode.Chakma).Elem(),
"Cham": reflect.ValueOf(&unicode.Cham).Elem(),
"Cherokee": reflect.ValueOf(&unicode.Cherokee).Elem(),
"Chorasmian": reflect.ValueOf(&unicode.Chorasmian).Elem(),
"Co": reflect.ValueOf(&unicode.Co).Elem(),
"Common": reflect.ValueOf(&unicode.Common).Elem(),
"Coptic": reflect.ValueOf(&unicode.Coptic).Elem(),
"Cs": reflect.ValueOf(&unicode.Cs).Elem(),
"Cuneiform": reflect.ValueOf(&unicode.Cuneiform).Elem(),
"Cypriot": reflect.ValueOf(&unicode.Cypriot).Elem(),
"Cyrillic": reflect.ValueOf(&unicode.Cyrillic).Elem(),
"Dash": reflect.ValueOf(&unicode.Dash).Elem(),
"Deprecated": reflect.ValueOf(&unicode.Deprecated).Elem(),
"Deseret": reflect.ValueOf(&unicode.Deseret).Elem(),
"Devanagari": reflect.ValueOf(&unicode.Devanagari).Elem(),
"Diacritic": reflect.ValueOf(&unicode.Diacritic).Elem(),
"Digit": reflect.ValueOf(&unicode.Digit).Elem(),
"Dives_Akuru": reflect.ValueOf(&unicode.Dives_Akuru).Elem(),
"Dogra": reflect.ValueOf(&unicode.Dogra).Elem(),
"Duployan": reflect.ValueOf(&unicode.Duployan).Elem(),
"Egyptian_Hieroglyphs": reflect.ValueOf(&unicode.Egyptian_Hieroglyphs).Elem(),
"Elbasan": reflect.ValueOf(&unicode.Elbasan).Elem(),
"Elymaic": reflect.ValueOf(&unicode.Elymaic).Elem(),
"Ethiopic": reflect.ValueOf(&unicode.Ethiopic).Elem(),
"Extender": reflect.ValueOf(&unicode.Extender).Elem(),
"FoldCategory": reflect.ValueOf(&unicode.FoldCategory).Elem(),
"FoldScript": reflect.ValueOf(&unicode.FoldScript).Elem(),
"Georgian": reflect.ValueOf(&unicode.Georgian).Elem(),
"Glagolitic": reflect.ValueOf(&unicode.Glagolitic).Elem(),
"Gothic": reflect.ValueOf(&unicode.Gothic).Elem(),
"Grantha": reflect.ValueOf(&unicode.Grantha).Elem(),
"GraphicRanges": reflect.ValueOf(&unicode.GraphicRanges).Elem(),
"Greek": reflect.ValueOf(&unicode.Greek).Elem(),
"Gujarati": reflect.ValueOf(&unicode.Gujarati).Elem(),
"Gunjala_Gondi": reflect.ValueOf(&unicode.Gunjala_Gondi).Elem(),
"Gurmukhi": reflect.ValueOf(&unicode.Gurmukhi).Elem(),
"Han": reflect.ValueOf(&unicode.Han).Elem(),
"Hangul": reflect.ValueOf(&unicode.Hangul).Elem(),
"Hanifi_Rohingya": reflect.ValueOf(&unicode.Hanifi_Rohingya).Elem(),
"Hanunoo": reflect.ValueOf(&unicode.Hanunoo).Elem(),
"Hatran": reflect.ValueOf(&unicode.Hatran).Elem(),
"Hebrew": reflect.ValueOf(&unicode.Hebrew).Elem(),
"Hex_Digit": reflect.ValueOf(&unicode.Hex_Digit).Elem(),
"Hiragana": reflect.ValueOf(&unicode.Hiragana).Elem(),
"Hyphen": reflect.ValueOf(&unicode.Hyphen).Elem(),
"IDS_Binary_Operator": reflect.ValueOf(&unicode.IDS_Binary_Operator).Elem(),
"IDS_Trinary_Operator": reflect.ValueOf(&unicode.IDS_Trinary_Operator).Elem(),
"Ideographic": reflect.ValueOf(&unicode.Ideographic).Elem(),
"Imperial_Aramaic": reflect.ValueOf(&unicode.Imperial_Aramaic).Elem(),
"In": reflect.ValueOf(unicode.In),
"Inherited": reflect.ValueOf(&unicode.Inherited).Elem(),
"Inscriptional_Pahlavi": reflect.ValueOf(&unicode.Inscriptional_Pahlavi).Elem(),
"Inscriptional_Parthian": reflect.ValueOf(&unicode.Inscriptional_Parthian).Elem(),
"Is": reflect.ValueOf(unicode.Is),
"IsControl": reflect.ValueOf(unicode.IsControl),
"IsDigit": reflect.ValueOf(unicode.IsDigit),
"IsGraphic": reflect.ValueOf(unicode.IsGraphic),
"IsLetter": reflect.ValueOf(unicode.IsLetter),
"IsLower": reflect.ValueOf(unicode.IsLower),
"IsMark": reflect.ValueOf(unicode.IsMark),
"IsNumber": reflect.ValueOf(unicode.IsNumber),
"IsOneOf": reflect.ValueOf(unicode.IsOneOf),
"IsPrint": reflect.ValueOf(unicode.IsPrint),
"IsPunct": reflect.ValueOf(unicode.IsPunct),
"IsSpace": reflect.ValueOf(unicode.IsSpace),
"IsSymbol": reflect.ValueOf(unicode.IsSymbol),
"IsTitle": reflect.ValueOf(unicode.IsTitle),
"IsUpper": reflect.ValueOf(unicode.IsUpper),
"Javanese": reflect.ValueOf(&unicode.Javanese).Elem(),
"Join_Control": reflect.ValueOf(&unicode.Join_Control).Elem(),
"Kaithi": reflect.ValueOf(&unicode.Kaithi).Elem(),
"Kannada": reflect.ValueOf(&unicode.Kannada).Elem(),
"Katakana": reflect.ValueOf(&unicode.Katakana).Elem(),
"Kayah_Li": reflect.ValueOf(&unicode.Kayah_Li).Elem(),
"Kharoshthi": reflect.ValueOf(&unicode.Kharoshthi).Elem(),
"Khitan_Small_Script": reflect.ValueOf(&unicode.Khitan_Small_Script).Elem(),
"Khmer": reflect.ValueOf(&unicode.Khmer).Elem(),
"Khojki": reflect.ValueOf(&unicode.Khojki).Elem(),
"Khudawadi": reflect.ValueOf(&unicode.Khudawadi).Elem(),
"L": reflect.ValueOf(&unicode.L).Elem(),
"Lao": reflect.ValueOf(&unicode.Lao).Elem(),
"Latin": reflect.ValueOf(&unicode.Latin).Elem(),
"Lepcha": reflect.ValueOf(&unicode.Lepcha).Elem(),
"Letter": reflect.ValueOf(&unicode.Letter).Elem(),
"Limbu": reflect.ValueOf(&unicode.Limbu).Elem(),
"Linear_A": reflect.ValueOf(&unicode.Linear_A).Elem(),
"Linear_B": reflect.ValueOf(&unicode.Linear_B).Elem(),
"Lisu": reflect.ValueOf(&unicode.Lisu).Elem(),
"Ll": reflect.ValueOf(&unicode.Ll).Elem(),
"Lm": reflect.ValueOf(&unicode.Lm).Elem(),
"Lo": reflect.ValueOf(&unicode.Lo).Elem(),
"Logical_Order_Exception": reflect.ValueOf(&unicode.Logical_Order_Exception).Elem(),
"Lower": reflect.ValueOf(&unicode.Lower).Elem(),
"LowerCase": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"Lt": reflect.ValueOf(&unicode.Lt).Elem(),
"Lu": reflect.ValueOf(&unicode.Lu).Elem(),
"Lycian": reflect.ValueOf(&unicode.Lycian).Elem(),
"Lydian": reflect.ValueOf(&unicode.Lydian).Elem(),
"M": reflect.ValueOf(&unicode.M).Elem(),
"Mahajani": reflect.ValueOf(&unicode.Mahajani).Elem(),
"Makasar": reflect.ValueOf(&unicode.Makasar).Elem(),
"Malayalam": reflect.ValueOf(&unicode.Malayalam).Elem(),
"Mandaic": reflect.ValueOf(&unicode.Mandaic).Elem(),
"Manichaean": reflect.ValueOf(&unicode.Manichaean).Elem(),
"Marchen": reflect.ValueOf(&unicode.Marchen).Elem(),
"Mark": reflect.ValueOf(&unicode.Mark).Elem(),
"Masaram_Gondi": reflect.ValueOf(&unicode.Masaram_Gondi).Elem(),
"MaxASCII": reflect.ValueOf(constant.MakeFromLiteral("127", token.INT, 0)),
"MaxCase": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"MaxLatin1": reflect.ValueOf(constant.MakeFromLiteral("255", token.INT, 0)),
"MaxRune": reflect.ValueOf(constant.MakeFromLiteral("1114111", token.INT, 0)),
"Mc": reflect.ValueOf(&unicode.Mc).Elem(),
"Me": reflect.ValueOf(&unicode.Me).Elem(),
"Medefaidrin": reflect.ValueOf(&unicode.Medefaidrin).Elem(),
"Meetei_Mayek": reflect.ValueOf(&unicode.Meetei_Mayek).Elem(),
"Mende_Kikakui": reflect.ValueOf(&unicode.Mende_Kikakui).Elem(),
"Meroitic_Cursive": reflect.ValueOf(&unicode.Meroitic_Cursive).Elem(),
"Meroitic_Hieroglyphs": reflect.ValueOf(&unicode.Meroitic_Hieroglyphs).Elem(),
"Miao": reflect.ValueOf(&unicode.Miao).Elem(),
"Mn": reflect.ValueOf(&unicode.Mn).Elem(),
"Modi": reflect.ValueOf(&unicode.Modi).Elem(),
"Mongolian": reflect.ValueOf(&unicode.Mongolian).Elem(),
"Mro": reflect.ValueOf(&unicode.Mro).Elem(),
"Multani": reflect.ValueOf(&unicode.Multani).Elem(),
"Myanmar": reflect.ValueOf(&unicode.Myanmar).Elem(),
"N": reflect.ValueOf(&unicode.N).Elem(),
"Nabataean": reflect.ValueOf(&unicode.Nabataean).Elem(),
"Nandinagari": reflect.ValueOf(&unicode.Nandinagari).Elem(),
"Nd": reflect.ValueOf(&unicode.Nd).Elem(),
"New_Tai_Lue": reflect.ValueOf(&unicode.New_Tai_Lue).Elem(),
"Newa": reflect.ValueOf(&unicode.Newa).Elem(),
"Nko": reflect.ValueOf(&unicode.Nko).Elem(),
"Nl": reflect.ValueOf(&unicode.Nl).Elem(),
"No": reflect.ValueOf(&unicode.No).Elem(),
"Noncharacter_Code_Point": reflect.ValueOf(&unicode.Noncharacter_Code_Point).Elem(),
"Number": reflect.ValueOf(&unicode.Number).Elem(),
"Nushu": reflect.ValueOf(&unicode.Nushu).Elem(),
"Nyiakeng_Puachue_Hmong": reflect.ValueOf(&unicode.Nyiakeng_Puachue_Hmong).Elem(),
"Ogham": reflect.ValueOf(&unicode.Ogham).Elem(),
"Ol_Chiki": reflect.ValueOf(&unicode.Ol_Chiki).Elem(),
"Old_Hungarian": reflect.ValueOf(&unicode.Old_Hungarian).Elem(),
"Old_Italic": reflect.ValueOf(&unicode.Old_Italic).Elem(),
"Old_North_Arabian": reflect.ValueOf(&unicode.Old_North_Arabian).Elem(),
"Old_Permic": reflect.ValueOf(&unicode.Old_Permic).Elem(),
"Old_Persian": reflect.ValueOf(&unicode.Old_Persian).Elem(),
"Old_Sogdian": reflect.ValueOf(&unicode.Old_Sogdian).Elem(),
"Old_South_Arabian": reflect.ValueOf(&unicode.Old_South_Arabian).Elem(),
"Old_Turkic": reflect.ValueOf(&unicode.Old_Turkic).Elem(),
"Oriya": reflect.ValueOf(&unicode.Oriya).Elem(),
"Osage": reflect.ValueOf(&unicode.Osage).Elem(),
"Osmanya": reflect.ValueOf(&unicode.Osmanya).Elem(),
"Other": reflect.ValueOf(&unicode.Other).Elem(),
"Other_Alphabetic": reflect.ValueOf(&unicode.Other_Alphabetic).Elem(),
"Other_Default_Ignorable_Code_Point": reflect.ValueOf(&unicode.Other_Default_Ignorable_Code_Point).Elem(),
"Other_Grapheme_Extend": reflect.ValueOf(&unicode.Other_Grapheme_Extend).Elem(),
"Other_ID_Continue": reflect.ValueOf(&unicode.Other_ID_Continue).Elem(),
"Other_ID_Start": reflect.ValueOf(&unicode.Other_ID_Start).Elem(),
"Other_Lowercase": reflect.ValueOf(&unicode.Other_Lowercase).Elem(),
"Other_Math": reflect.ValueOf(&unicode.Other_Math).Elem(),
"Other_Uppercase": reflect.ValueOf(&unicode.Other_Uppercase).Elem(),
"P": reflect.ValueOf(&unicode.P).Elem(),
"Pahawh_Hmong": reflect.ValueOf(&unicode.Pahawh_Hmong).Elem(),
"Palmyrene": reflect.ValueOf(&unicode.Palmyrene).Elem(),
"Pattern_Syntax": reflect.ValueOf(&unicode.Pattern_Syntax).Elem(),
"Pattern_White_Space": reflect.ValueOf(&unicode.Pattern_White_Space).Elem(),
"Pau_Cin_Hau": reflect.ValueOf(&unicode.Pau_Cin_Hau).Elem(),
"Pc": reflect.ValueOf(&unicode.Pc).Elem(),
"Pd": reflect.ValueOf(&unicode.Pd).Elem(),
"Pe": reflect.ValueOf(&unicode.Pe).Elem(),
"Pf": reflect.ValueOf(&unicode.Pf).Elem(),
"Phags_Pa": reflect.ValueOf(&unicode.Phags_Pa).Elem(),
"Phoenician": reflect.ValueOf(&unicode.Phoenician).Elem(),
"Pi": reflect.ValueOf(&unicode.Pi).Elem(),
"Po": reflect.ValueOf(&unicode.Po).Elem(),
"Prepended_Concatenation_Mark": reflect.ValueOf(&unicode.Prepended_Concatenation_Mark).Elem(),
"PrintRanges": reflect.ValueOf(&unicode.PrintRanges).Elem(),
"Properties": reflect.ValueOf(&unicode.Properties).Elem(),
"Ps": reflect.ValueOf(&unicode.Ps).Elem(),
"Psalter_Pahlavi": reflect.ValueOf(&unicode.Psalter_Pahlavi).Elem(),
"Punct": reflect.ValueOf(&unicode.Punct).Elem(),
"Quotation_Mark": reflect.ValueOf(&unicode.Quotation_Mark).Elem(),
"Radical": reflect.ValueOf(&unicode.Radical).Elem(),
"Regional_Indicator": reflect.ValueOf(&unicode.Regional_Indicator).Elem(),
"Rejang": reflect.ValueOf(&unicode.Rejang).Elem(),
"ReplacementChar": reflect.ValueOf(constant.MakeFromLiteral("65533", token.INT, 0)),
"Runic": reflect.ValueOf(&unicode.Runic).Elem(),
"S": reflect.ValueOf(&unicode.S).Elem(),
"STerm": reflect.ValueOf(&unicode.STerm).Elem(),
"Samaritan": reflect.ValueOf(&unicode.Samaritan).Elem(),
"Saurashtra": reflect.ValueOf(&unicode.Saurashtra).Elem(),
"Sc": reflect.ValueOf(&unicode.Sc).Elem(),
"Scripts": reflect.ValueOf(&unicode.Scripts).Elem(),
"Sentence_Terminal": reflect.ValueOf(&unicode.Sentence_Terminal).Elem(),
"Sharada": reflect.ValueOf(&unicode.Sharada).Elem(),
"Shavian": reflect.ValueOf(&unicode.Shavian).Elem(),
"Siddham": reflect.ValueOf(&unicode.Siddham).Elem(),
"SignWriting": reflect.ValueOf(&unicode.SignWriting).Elem(),
"SimpleFold": reflect.ValueOf(unicode.SimpleFold),
"Sinhala": reflect.ValueOf(&unicode.Sinhala).Elem(),
"Sk": reflect.ValueOf(&unicode.Sk).Elem(),
"Sm": reflect.ValueOf(&unicode.Sm).Elem(),
"So": reflect.ValueOf(&unicode.So).Elem(),
"Soft_Dotted": reflect.ValueOf(&unicode.Soft_Dotted).Elem(),
"Sogdian": reflect.ValueOf(&unicode.Sogdian).Elem(),
"Sora_Sompeng": reflect.ValueOf(&unicode.Sora_Sompeng).Elem(),
"Soyombo": reflect.ValueOf(&unicode.Soyombo).Elem(),
"Space": reflect.ValueOf(&unicode.Space).Elem(),
"Sundanese": reflect.ValueOf(&unicode.Sundanese).Elem(),
"Syloti_Nagri": reflect.ValueOf(&unicode.Syloti_Nagri).Elem(),
"Symbol": reflect.ValueOf(&unicode.Symbol).Elem(),
"Syriac": reflect.ValueOf(&unicode.Syriac).Elem(),
"Tagalog": reflect.ValueOf(&unicode.Tagalog).Elem(),
"Tagbanwa": reflect.ValueOf(&unicode.Tagbanwa).Elem(),
"Tai_Le": reflect.ValueOf(&unicode.Tai_Le).Elem(),
"Tai_Tham": reflect.ValueOf(&unicode.Tai_Tham).Elem(),
"Tai_Viet": reflect.ValueOf(&unicode.Tai_Viet).Elem(),
"Takri": reflect.ValueOf(&unicode.Takri).Elem(),
"Tamil": reflect.ValueOf(&unicode.Tamil).Elem(),
"Tangut": reflect.ValueOf(&unicode.Tangut).Elem(),
"Telugu": reflect.ValueOf(&unicode.Telugu).Elem(),
"Terminal_Punctuation": reflect.ValueOf(&unicode.Terminal_Punctuation).Elem(),
"Thaana": reflect.ValueOf(&unicode.Thaana).Elem(),
"Thai": reflect.ValueOf(&unicode.Thai).Elem(),
"Tibetan": reflect.ValueOf(&unicode.Tibetan).Elem(),
"Tifinagh": reflect.ValueOf(&unicode.Tifinagh).Elem(),
"Tirhuta": reflect.ValueOf(&unicode.Tirhuta).Elem(),
"Title": reflect.ValueOf(&unicode.Title).Elem(),
"TitleCase": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"To": reflect.ValueOf(unicode.To),
"ToLower": reflect.ValueOf(unicode.ToLower),
"ToTitle": reflect.ValueOf(unicode.ToTitle),
"ToUpper": reflect.ValueOf(unicode.ToUpper),
"TurkishCase": reflect.ValueOf(&unicode.TurkishCase).Elem(),
"Ugaritic": reflect.ValueOf(&unicode.Ugaritic).Elem(),
"Unified_Ideograph": reflect.ValueOf(&unicode.Unified_Ideograph).Elem(),
"Upper": reflect.ValueOf(&unicode.Upper).Elem(),
"UpperCase": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"UpperLower": reflect.ValueOf(constant.MakeFromLiteral("1114112", token.INT, 0)),
"Vai": reflect.ValueOf(&unicode.Vai).Elem(),
"Variation_Selector": reflect.ValueOf(&unicode.Variation_Selector).Elem(),
"Version": reflect.ValueOf(constant.MakeFromLiteral("\"13.0.0\"", token.STRING, 0)),
"Wancho": reflect.ValueOf(&unicode.Wancho).Elem(),
"Warang_Citi": reflect.ValueOf(&unicode.Warang_Citi).Elem(),
"White_Space": reflect.ValueOf(&unicode.White_Space).Elem(),
"Yezidi": reflect.ValueOf(&unicode.Yezidi).Elem(),
"Yi": reflect.ValueOf(&unicode.Yi).Elem(),
"Z": reflect.ValueOf(&unicode.Z).Elem(),
"Zanabazar_Square": reflect.ValueOf(&unicode.Zanabazar_Square).Elem(),
"Zl": reflect.ValueOf(&unicode.Zl).Elem(),
"Zp": reflect.ValueOf(&unicode.Zp).Elem(),
"Zs": reflect.ValueOf(&unicode.Zs).Elem(),
// type definitions
"CaseRange": reflect.ValueOf((*unicode.CaseRange)(nil)),
"Range16": reflect.ValueOf((*unicode.Range16)(nil)),
"Range32": reflect.ValueOf((*unicode.Range32)(nil)),
"RangeTable": reflect.ValueOf((*unicode.RangeTable)(nil)),
"SpecialCase": reflect.ValueOf((*unicode.SpecialCase)(nil)),
}
} |
|
0001_initial.py | # Generated by Django 1.11.2 on 2017-06-22 10:22
import bitfield.models
import django.contrib.auth.models
import django.core.validators
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.contrib.postgres.indexes import GinIndex
from django.contrib.postgres.search import SearchVectorField
from django.db import migrations, models
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models.functions import Upper
from zerver.models import generate_email_token_for_stream
def | (
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
Attachment = apps.get_model("zerver", "Attachment")
Recipient = apps.get_model("zerver", "Recipient")
Stream = apps.get_model("zerver", "Stream")
attachments = Attachment.objects.all()
for entry in attachments:
owner = entry.owner
entry.realm = owner.realm
for message in entry.messages.all():
if owner == message.sender:
if message.recipient.type == Recipient.STREAM:
stream = Stream.objects.get(id=message.recipient.type_id)
is_realm_public = (
not stream.realm.is_zephyr_mirror_realm and not stream.invite_only
)
entry.is_realm_public = entry.is_realm_public or is_realm_public
entry.save()
class Migration(migrations.Migration):
initial = True
dependencies = [
("auth", "0001_initial"),
]
if settings.POSTGRESQL_MISSING_DICTIONARIES:
fts_sql = """
CREATE TEXT SEARCH CONFIGURATION zulip.english_us_search (COPY=pg_catalog.english);
"""
else:
fts_sql = """
CREATE TEXT SEARCH DICTIONARY english_us_hunspell
(template = ispell, DictFile = en_us, AffFile = en_us, StopWords = zulip_english);
CREATE TEXT SEARCH CONFIGURATION zulip.english_us_search (COPY=pg_catalog.english);
ALTER TEXT SEARCH CONFIGURATION zulip.english_us_search
ALTER MAPPING FOR asciiword, asciihword, hword_asciipart, word, hword, hword_part
WITH english_us_hunspell, english_stem;
"""
fts_sql += """
CREATE FUNCTION escape_html(text) RETURNS text IMMUTABLE LANGUAGE 'sql' AS $$
SELECT replace(replace(replace(replace(replace($1, '&', '&'), '<', '<'),
'>', '>'), '"', '"'), '''', ''');
$$ ;
CREATE TABLE fts_update_log (id SERIAL PRIMARY KEY, message_id INTEGER NOT NULL);
CREATE FUNCTION do_notify_fts_update_log() RETURNS trigger LANGUAGE plpgsql AS
$$ BEGIN NOTIFY fts_update_log; RETURN NEW; END $$;
CREATE TRIGGER fts_update_log_notify AFTER INSERT ON fts_update_log
FOR EACH STATEMENT EXECUTE PROCEDURE do_notify_fts_update_log();
CREATE FUNCTION append_to_fts_update_log() RETURNS trigger LANGUAGE plpgsql AS
$$ BEGIN INSERT INTO fts_update_log (message_id) VALUES (NEW.id); RETURN NEW; END $$;
CREATE TRIGGER zerver_message_update_search_tsvector_async
BEFORE INSERT OR UPDATE OF subject, rendered_content ON zerver_message
FOR EACH ROW EXECUTE PROCEDURE append_to_fts_update_log();
"""
operations = [
migrations.CreateModel(
name="UserProfile",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
("email", models.EmailField(db_index=True, max_length=75, unique=True)),
("is_staff", models.BooleanField(default=False)),
("is_active", models.BooleanField(default=True)),
("is_bot", models.BooleanField(default=False)),
("date_joined", models.DateTimeField(default=django.utils.timezone.now)),
("is_mirror_dummy", models.BooleanField(default=False)),
("full_name", models.CharField(max_length=100)),
("short_name", models.CharField(max_length=100)),
("pointer", models.IntegerField()),
("last_pointer_updater", models.CharField(max_length=64)),
("api_key", models.CharField(max_length=32)),
("enable_stream_desktop_notifications", models.BooleanField(default=True)),
("enable_stream_sounds", models.BooleanField(default=True)),
("enable_desktop_notifications", models.BooleanField(default=True)),
("enable_sounds", models.BooleanField(default=True)),
("enable_offline_email_notifications", models.BooleanField(default=True)),
("enable_offline_push_notifications", models.BooleanField(default=True)),
("enable_digest_emails", models.BooleanField(default=True)),
("default_desktop_notifications", models.BooleanField(default=True)),
(
"last_reminder",
models.DateTimeField(default=django.utils.timezone.now, null=True),
),
("rate_limits", models.CharField(default="", max_length=100)),
("default_all_public_streams", models.BooleanField(default=False)),
("enter_sends", models.NullBooleanField(default=True)),
("autoscroll_forever", models.BooleanField(default=False)),
("twenty_four_hour_time", models.BooleanField(default=False)),
(
"avatar_source",
models.CharField(
choices=[
("G", "Hosted by Gravatar"),
("U", "Uploaded by user"),
("S", "System generated"),
],
default="G",
max_length=1,
),
),
(
"tutorial_status",
models.CharField(
choices=[("W", "Waiting"), ("S", "Started"), ("F", "Finished")],
default="W",
max_length=1,
),
),
("onboarding_steps", models.TextField(default="[]")),
("invites_granted", models.IntegerField(default=0)),
("invites_used", models.IntegerField(default=0)),
("alert_words", models.TextField(default="[]")),
("muted_topics", models.TextField(default="[]")),
(
"bot_owner",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="Client",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("name", models.CharField(db_index=True, max_length=30, unique=True)),
],
),
migrations.CreateModel(
name="DefaultStream",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
],
),
migrations.CreateModel(
name="Huddle",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("huddle_hash", models.CharField(db_index=True, max_length=40, unique=True)),
],
),
migrations.CreateModel(
name="Message",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("subject", models.CharField(db_index=True, max_length=60)),
("content", models.TextField()),
("rendered_content", models.TextField(null=True)),
("rendered_content_version", models.IntegerField(null=True)),
("pub_date", models.DateTimeField(db_index=True, verbose_name="date published")),
("last_edit_time", models.DateTimeField(null=True)),
("edit_history", models.TextField(null=True)),
("has_attachment", models.BooleanField(db_index=True, default=False)),
("has_image", models.BooleanField(db_index=True, default=False)),
("has_link", models.BooleanField(db_index=True, default=False)),
],
),
migrations.CreateModel(
name="PreregistrationUser",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("email", models.EmailField(max_length=75)),
("invited_at", models.DateTimeField(auto_now=True)),
("status", models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name="PushDeviceToken",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("kind", models.PositiveSmallIntegerField(choices=[(1, "apns"), (2, "gcm")])),
("token", models.CharField(max_length=4096, unique=True)),
(
"last_updated",
models.DateTimeField(auto_now=True, default=django.utils.timezone.now),
),
("ios_app_id", models.TextField(null=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="Realm",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("domain", models.CharField(db_index=True, max_length=40, unique=True)),
("name", models.CharField(max_length=40, null=True)),
("restricted_to_domain", models.BooleanField(default=True)),
("invite_required", models.BooleanField(default=False)),
("invite_by_admins_only", models.BooleanField(default=False)),
("mandatory_topics", models.BooleanField(default=False)),
("show_digest_email", models.BooleanField(default=True)),
("name_changes_disabled", models.BooleanField(default=False)),
("date_created", models.DateTimeField(default=django.utils.timezone.now)),
("deactivated", models.BooleanField(default=False)),
],
options={
"permissions": (("administer", "Administer a realm"),),
},
),
migrations.CreateModel(
name="RealmAlias",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("domain", models.CharField(db_index=True, max_length=80, unique=True)),
(
"realm",
models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
],
),
migrations.CreateModel(
name="RealmEmoji",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("name", models.TextField()),
("img_url", models.TextField()),
(
"realm",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
],
),
migrations.CreateModel(
name="RealmFilter",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("pattern", models.TextField()),
("url_format_string", models.TextField()),
(
"realm",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
],
),
migrations.CreateModel(
name="Recipient",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("type_id", models.IntegerField(db_index=True)),
("type", models.PositiveSmallIntegerField(db_index=True)),
],
),
migrations.CreateModel(
name="Referral",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("email", models.EmailField(max_length=75)),
("timestamp", models.DateTimeField(auto_now_add=True)),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="ScheduledJob",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("scheduled_timestamp", models.DateTimeField()),
("type", models.PositiveSmallIntegerField()),
("data", models.TextField()),
("filter_id", models.IntegerField(null=True)),
("filter_string", models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name="Stream",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("name", models.CharField(db_index=True, max_length=60)),
("invite_only", models.NullBooleanField(default=False)),
(
"email_token",
models.CharField(default=generate_email_token_for_stream, max_length=32),
),
("description", models.CharField(default="", max_length=1024)),
("date_created", models.DateTimeField(default=django.utils.timezone.now)),
("deactivated", models.BooleanField(default=False)),
(
"realm",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
],
),
migrations.CreateModel(
name="Subscription",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("active", models.BooleanField(default=True)),
("in_home_view", models.NullBooleanField(default=True)),
("color", models.CharField(default="#c2c2c2", max_length=10)),
("desktop_notifications", models.BooleanField(default=True)),
("audible_notifications", models.BooleanField(default=True)),
("notifications", models.BooleanField(default=False)),
(
"recipient",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Recipient"
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="UserActivity",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("query", models.CharField(db_index=True, max_length=50)),
("count", models.IntegerField()),
("last_visit", models.DateTimeField(verbose_name="last visit")),
(
"client",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Client"
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="UserActivityInterval",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("start", models.DateTimeField(db_index=True, verbose_name="start time")),
("end", models.DateTimeField(db_index=True, verbose_name="end time")),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="UserMessage",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"flags",
bitfield.models.BitField(
[
"read",
"starred",
"collapsed",
"mentioned",
"wildcard_mentioned",
"summarize_in_home",
"summarize_in_stream",
"force_expand",
"force_collapse",
"has_alert_word",
"historical",
"is_me_message",
],
default=0,
),
),
(
"message",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Message"
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="UserPresence",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("timestamp", models.DateTimeField(verbose_name="presence changed")),
("status", models.PositiveSmallIntegerField(default=1)),
(
"client",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Client"
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.AlterUniqueTogether(
name="userpresence",
unique_together={("user_profile", "client")},
),
migrations.AlterUniqueTogether(
name="usermessage",
unique_together={("user_profile", "message")},
),
migrations.AlterUniqueTogether(
name="useractivity",
unique_together={("user_profile", "client", "query")},
),
migrations.AlterUniqueTogether(
name="subscription",
unique_together={("user_profile", "recipient")},
),
migrations.AlterUniqueTogether(
name="stream",
unique_together={("name", "realm")},
),
migrations.AlterUniqueTogether(
name="recipient",
unique_together={("type", "type_id")},
),
migrations.AlterUniqueTogether(
name="realmfilter",
unique_together={("realm", "pattern")},
),
migrations.AlterUniqueTogether(
name="realmemoji",
unique_together={("realm", "name")},
),
migrations.AddField(
model_name="realm",
name="notifications_stream",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="zerver.Stream",
),
),
migrations.AddField(
model_name="preregistrationuser",
name="realm",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
migrations.AddField(
model_name="preregistrationuser",
name="referred_by",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
migrations.AddField(
model_name="preregistrationuser",
name="streams",
field=models.ManyToManyField(null=True, to="zerver.Stream"),
),
migrations.AddField(
model_name="message",
name="recipient",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Recipient"
),
),
migrations.AddField(
model_name="message",
name="sender",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
migrations.AddField(
model_name="message",
name="sending_client",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Client"
),
),
migrations.AddField(
model_name="defaultstream",
name="realm",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"),
),
migrations.AddField(
model_name="defaultstream",
name="stream",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Stream"
),
),
migrations.AlterUniqueTogether(
name="defaultstream",
unique_together={("realm", "stream")},
),
migrations.AddField(
model_name="userprofile",
name="default_events_register_stream",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="zerver.Stream",
),
),
migrations.AddField(
model_name="userprofile",
name="default_sending_stream",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="zerver.Stream",
),
),
migrations.AddField(
model_name="userprofile",
name="groups",
field=models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
migrations.AddField(
model_name="userprofile",
name="realm",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"),
),
migrations.AddField(
model_name="userprofile",
name="user_permissions",
field=models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
migrations.AddField(
model_name="message",
name="search_tsvector",
field=SearchVectorField(null=True),
),
migrations.AddIndex(
model_name="message",
index=GinIndex(
"search_tsvector", fastupdate=False, name="zerver_message_search_tsvector"
),
),
migrations.RunSQL(
sql=fts_sql,
),
migrations.AlterModelManagers(
name="userprofile",
managers=[
("objects", django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name="preregistrationuser",
name="email",
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name="preregistrationuser",
name="streams",
field=models.ManyToManyField(to="zerver.Stream"),
),
migrations.AlterField(
model_name="pushdevicetoken",
name="last_updated",
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name="referral",
name="email",
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name="userprofile",
name="email",
field=models.EmailField(db_index=True, max_length=254, unique=True),
),
migrations.AlterField(
model_name="userprofile",
name="last_login",
field=models.DateTimeField(blank=True, null=True, verbose_name="last login"),
),
migrations.AddIndex(
model_name="message",
index=models.Index(Upper("subject"), name="upper_subject_idx"),
),
migrations.AddIndex(
model_name="stream",
index=models.Index(Upper("name"), name="upper_stream_name_idx"),
),
migrations.AddField(
model_name="userprofile",
name="left_side_userlist",
field=models.BooleanField(default=False),
),
migrations.AlterModelOptions(
name="realm",
options={
"permissions": (
("administer", "Administer a realm"),
("api_super_user", "Can send messages as other users for mirroring"),
)
},
),
migrations.AddIndex(
model_name="userprofile",
index=models.Index(Upper("email"), name="upper_userprofile_email_idx"),
),
migrations.AlterField(
model_name="userprofile",
name="is_active",
field=models.BooleanField(db_index=True, default=True),
),
migrations.AlterField(
model_name="userprofile",
name="is_bot",
field=models.BooleanField(db_index=True, default=False),
),
migrations.AddIndex(
model_name="preregistrationuser",
index=models.Index(Upper("email"), name="upper_preregistration_email_idx"),
),
migrations.AlterField(
model_name="userprofile",
name="enable_stream_desktop_notifications",
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name="userprofile",
name="enable_stream_sounds",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="userprofile",
name="is_api_super_user",
field=models.BooleanField(db_index=True, default=False),
),
migrations.AddField(
model_name="userprofile",
name="is_realm_admin",
field=models.BooleanField(db_index=True, default=False),
),
migrations.AlterField(
model_name="realmemoji",
name="img_url",
field=models.URLField(),
),
migrations.AlterField(
model_name="realmemoji",
name="name",
field=models.TextField(
validators=[
django.core.validators.MinLengthValidator(1),
django.core.validators.RegexValidator(regex="^[0-9a-zA-Z.\\-_]+(?<![.\\-_])$"),
]
),
),
migrations.AlterField(
model_name="realmemoji",
name="img_url",
field=models.URLField(max_length=1000),
),
migrations.CreateModel(
name="Attachment",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("file_name", models.CharField(db_index=True, max_length=100)),
("path_id", models.TextField(db_index=True)),
(
"create_time",
models.DateTimeField(db_index=True, default=django.utils.timezone.now),
),
("messages", models.ManyToManyField(to="zerver.Message")),
(
"owner",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
("is_realm_public", models.BooleanField(default=False)),
],
),
migrations.AddField(
model_name="realm",
name="create_stream_by_admins_only",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="userprofile",
name="bot_type",
field=models.PositiveSmallIntegerField(db_index=True, null=True),
),
migrations.AlterField(
model_name="realmemoji",
name="name",
field=models.TextField(
validators=[
django.core.validators.MinLengthValidator(1),
django.core.validators.RegexValidator(
message="Invalid characters in emoji name",
regex="^[0-9a-zA-Z.\\-_]+(?<![.\\-_])$",
),
]
),
),
migrations.AddField(
model_name="preregistrationuser",
name="realm_creation",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="attachment",
name="realm",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="zerver.Realm",
),
),
migrations.RunPython(
code=migrate_existing_attachment_data,
elidable=True,
),
migrations.AddField(
model_name="subscription",
name="pin_to_top",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="userprofile",
name="default_language",
field=models.CharField(default="en", max_length=50),
),
migrations.AddField(
model_name="realm",
name="allow_message_editing",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="realm",
name="message_content_edit_limit_seconds",
field=models.IntegerField(default=600),
),
migrations.AddField(
model_name="realm",
name="default_language",
field=models.CharField(default="en", max_length=50),
),
migrations.AddField(
model_name="userprofile",
name="tos_version",
field=models.CharField(max_length=10, null=True),
),
]
| migrate_existing_attachment_data |
main.py | # -*- coding: utf-8 -*-
from screws.freeze.main import FrozenOnly
from objects.CSCG._2d.mesh.trace.visualize import _2dCSCG_Trace_Visualize
from objects.CSCG._2d.mesh.trace.elements.main import _2dCSCG_Trace_Elements
class _2dCSCG_Trace(FrozenOnly):
def __init__(self, mesh):
self._mesh_ = mesh
self._elements_ = _2dCSCG_Trace_Elements(self)
self._visualize_ = _2dCSCG_Trace_Visualize(self) | self.___PRIVATE_reset_cache___()
self._freeze_self_()
def ___PRIVATE_reset_cache___(self):
self.elements.___PRIVATE_reset_cache___()
@property
def elements(self):
return self._elements_
@property
def visualize(self):
return self._visualize_ | |
api_op_ListEndpoints.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package eventbridge
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/eventbridge/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// List the global endpoints associated with this account. For more information
// about global endpoints, see Making applications Regional-fault tolerant with
// global endpoints and event replication
// (https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-global-endpoints.html)
// in the Amazon EventBridge User Guide..
func (c *Client) ListEndpoints(ctx context.Context, params *ListEndpointsInput, optFns ...func(*Options)) (*ListEndpointsOutput, error) {
if params == nil {
params = &ListEndpointsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListEndpoints", params, optFns, c.addOperationListEndpointsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListEndpointsOutput)
out.ResultMetadata = metadata
return out, nil
}
type ListEndpointsInput struct {
// The primary Region of the endpoints associated with this account. For example
// "HomeRegion": "us-east-1".
HomeRegion *string
// The maximum number of results returned by the call.
MaxResults *int32
// A value that will return a subset of the endpoints associated with this account.
// For example, "NamePrefix": "ABC" will return all endpoints with "ABC" in the
// name.
NamePrefix *string
// If nextToken is returned, there are more results available. The value of
// nextToken is a unique pagination token for each page. Make the call again using
// the returned token to retrieve the next page. Keep all other arguments
// unchanged. Each pagination token expires after 24 hours. Using an expired
// pagination token will return an HTTP 400 InvalidToken error.
NextToken *string
noSmithyDocumentSerde
}
type ListEndpointsOutput struct {
// The endpoints returned by the call.
Endpoints []types.Endpoint
// If nextToken is returned, there are more results available. The value of
// nextToken is a unique pagination token for each page. Make the call again using
// the returned token to retrieve the next page. Keep all other arguments
// unchanged. Each pagination token expires after 24 hours. Using an expired
// pagination token will return an HTTP 400 InvalidToken error.
NextToken *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationListEndpointsMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpListEndpoints{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListEndpoints{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListEndpoints(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil |
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opListEndpoints(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "events",
OperationName: "ListEndpoints",
}
}
| {
return err
} |
layers-counter.component.ts | import { Component, HostBinding, Input, OnChanges, Optional, SimpleChanges } from '@angular/core';
import { DomSanitizer, SafeHtml } from '@angular/platform-browser';
import { counter, CounterParams, Styles } from '@fortawesome/fontawesome-svg-core';
import { faWarnIfParentNotExist } from '../shared/errors/warn-if-parent-not-exist';
import { FaLayersComponent } from './layers.component';
@Component({
selector: 'fa-layers-counter',
template: '',
host: {
class: 'ng-fa-layers-counter',
},
})
export class | implements OnChanges {
@Input() content: string;
@Input() title?: string;
@Input() styles?: Styles;
@Input() classes?: string[] = [];
@HostBinding('innerHTML') renderedHTML: SafeHtml;
constructor(@Optional() private parent: FaLayersComponent, private sanitizer: DomSanitizer) {
faWarnIfParentNotExist(this.parent, 'FaLayersComponent', this.constructor.name);
}
ngOnChanges(changes: SimpleChanges) {
if (changes) {
const params = this.buildParams();
this.updateContent(params);
}
}
protected buildParams(): CounterParams {
return {
title: this.title,
classes: this.classes,
styles: this.styles,
};
}
private updateContent(params: CounterParams) {
this.renderedHTML = this.sanitizer.bypassSecurityTrustHtml(counter(this.content || '', params).html.join(''));
}
}
| FaLayersCounterComponent |
app.component.ts | import { NgModule, Component, enableProdMode } from '@angular/core';
import { BrowserModule } from '@angular/platform-browser';
import { platformBrowserDynamic } from '@angular/platform-browser-dynamic';
import { DxTreeListModule } from 'devextreme-angular';
import { Task, Employee, Service } from './app.service';
if(!/localhost/.test(document.location.host)) {
enableProdMode();
}
@Component({
selector: 'demo-app',
templateUrl: 'app/app.component.html',
providers: [Service]
})
export class AppComponent {
tasksData: Task[];
statusesData: string[];
employeesData: Employee[];
constructor(service: Service) {
this.tasksData = service.getTasksData();
this.employeesData = service.getEmployeesData();
this.statusesData = [
"Not Started",
"Need Assistance",
"In Progress",
"Deferred",
"Completed"
];
}
}
@NgModule({
imports: [
BrowserModule,
DxTreeListModule
],
declarations: [AppComponent],
bootstrap: [AppComponent]
})
export class | { }
platformBrowserDynamic().bootstrapModule(AppModule); | AppModule |
diffractometers.py | from cohere import Diffractometer
class Diffractometer_34idc(Diffractometer):
"""
Subclass of Diffractometer. Encapsulates "34idc" diffractometer.
"""
name = "34idc"
sampleaxes = ('y+', 'z-', 'y+') # in xrayutilities notation
detectoraxes = ('y+', 'x-')
incidentaxis = (0, 0, 1)
sampleaxes_name = ('th', 'chi', 'phi') # using the spec mnemonics for scan id.
detectoraxes_name = ('delta', 'gamma')
def __init__(self):
super(Diffractometer_34idc, self).__init__('34idc')
def create_diffractometer(diff_name):
if diff_name == '34idc':
return Diffractometer_34idc()
else:
print ('diffractometer ' + diff_name + ' not defined.')
def | (diff_name):
if diff_name == '34idc':
return True
else:
return False
| verify_diffractometer |
main.rs | #![type_length_limit = "2861949"]
use clap::{crate_version, App, Arg};
use tifs::mount_tifs;
use tifs::MountOption;
use tracing_libatrace as tracing_atrace;
use tracing_subscriber::{layer::SubscriberExt, registry::Registry};
#[async_std::main]
async fn main() {
let matches = App::new("TiFS") | .arg(
Arg::with_name("pd")
.long("pd-endpoints")
.short("p")
.multiple(true)
.value_name("ENDPOINTS")
.default_value("127.0.0.1:2379")
.help("set all pd endpoints of the tikv cluster")
.takes_value(true),
)
.arg(
Arg::with_name("mount-point")
.long("mount-point")
.short("m")
.value_name("MOUNT_POINT")
.required(true)
.help("Act as a client, and mount FUSE at given path")
.takes_value(true),
)
.arg(
Arg::with_name("options")
.value_name("OPTION")
.long("option")
.short("o")
.multiple(true)
.help("filesystem mount options"),
)
.get_matches();
setup_global_subscriber();
let endpoints: Vec<&str> = matches
.values_of("pd")
.unwrap_or_default()
.to_owned()
.collect();
let mountpoint: String = matches.value_of("mount-point").unwrap().to_string();
let options = MountOption::to_vec(matches.values_of("options").unwrap_or_default());
mount_tifs(mountpoint, endpoints, options).await.unwrap();
}
fn setup_global_subscriber() {
let layer = tracing_atrace::layer()
.unwrap()
.with_data_field(Option::Some("data".to_string()));
let subscriber = Registry::default().with(layer);
tracing::subscriber::set_global_default(subscriber).unwrap();
} | .version(crate_version!())
.author("Hexi Lee") |
switch.py | import logging
from homeassistant.components import persistent_notification
from homeassistant.helpers.entity import ToggleEntity
from . import DOMAIN
from .core.gateway3 import Gateway3
from .core.helpers import XiaomiEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
def setup(gateway: Gateway3, device: dict, attr: str):
if attr == 'firmware lock':
async_add_entities([FirmwareLock(gateway, device, attr)])
elif device['type'] == 'mesh':
async_add_entities([XiaomiMeshSwitch(gateway, device, attr)])
else:
async_add_entities([XiaomiZigbeeSwitch(gateway, device, attr)])
gw: Gateway3 = hass.data[DOMAIN][config_entry.entry_id]
gw.add_setup('switch', setup)
class XiaomiZigbeeSwitch(XiaomiEntity, ToggleEntity):
@property
def is_on(self):
return self._state
async def async_update(self, data: dict = None):
# thread.run > mqtt.loop_forever > ... > thread.on_message
# > entity.update
# > entity.schedule_update_ha_state *
# > hass.add_job *
# > loop.call_soon_threadsafe *
# > hass.async_add_job *
# > hass.async_add_hass_job *
# > loop.create_task *
# > entity.async_update_ha_state *
# > entyty._async_write_ha_state | if self.attr in data:
self._state = bool(data[self.attr])
self.async_write_ha_state()
async def async_turn_on(self):
await self.gw.send_zigbee(self.device, {self.attr: 1})
async def async_turn_off(self):
await self.gw.send_zigbee(self.device, {self.attr: 0})
class XiaomiMeshSwitch(XiaomiEntity, ToggleEntity):
@property
def should_poll(self):
return False
@property
def is_on(self):
return self._state
async def async_update(self, data: dict = None):
if data is None:
self.gw.mesh_force_update()
return
if self.attr in data:
# handle main attribute as online state
if data[self.attr] is not None:
self._state = bool(data[self.attr])
self.device['online'] = True
else:
self.device['online'] = False
self.async_write_ha_state()
async def async_turn_on(self, **kwargs):
self._state = True
await self.gw.send_mesh(self.device, {self.attr: True})
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
self._state = False
await self.gw.send_mesh(self.device, {self.attr: False})
self.async_write_ha_state()
class FirmwareLock(XiaomiZigbeeSwitch):
@property
def icon(self):
return 'mdi:cloud-lock'
async def async_turn_on(self):
if await self.gw.lock_firmware(enable=True):
self._state = True
self.async_write_ha_state()
persistent_notification.async_create(
self.hass, "Firmware update is locked. You can sleep well.",
"Xiaomi Gateway 3"
)
async def async_turn_off(self):
if await self.gw.lock_firmware(enable=False):
self._state = False
self.async_write_ha_state() | # > hass.states.async_set
# > bus.async_fire
# > hass.async_add_hass_job
# > loop.call_soon |
main.go | package main
import(
"time"
"flag"
"strings"
"log"
"fmt"
"gopkg.in/libgit2/git2go.v22"
"github.com/searchspring/repo-tsar/tsar"
"github.com/searchspring/repo-tsar/config"
)
var configFileName string
var branch string
var repos string
var version bool
const (
versioninfo = "v0.1.4"
)
func main() {
// Parse commandline
flag.StringVar(&configFileName, "config", "repotsar.yml", "YAML config file")
flag.StringVar(&branch, "branch", "", "Create branch in repos")
flag.StringVar(&repos, "repos", "", "Non-spaced Comma separated list of repos (defaults to all)")
flag.BoolVar(&version, "version",false,"RepoTsar version")
flag.Parse()
if version == true {
fmt.Printf("RepoTsar version %s\n", versioninfo)
return
}
config,err := config.ReadConfig(configFileName)
if err != nil { | signature := &git.Signature{
Name: config.Signature.Name,
Email: config.Signature.Email,
When: time.Now(),
}
tsar := &tsar.RepoTsar{
Config: config,
Branch: branch,
ReposList: reposlist,
Signature: signature,
}
err = tsar.Run()
if err != nil {
log.Fatal(err)
}
} | log.Fatal(err)
}
reposlist := strings.Split(repos, ",")
// Git Signature |
trampoline.rs | use crate::convert::implicitly_convert_to_int;
use crate::extn::core::integer::Integer;
use crate::extn::prelude::*;
pub fn chr(interp: &mut Artichoke, value: Value, encoding: Option<Value>) -> Result<Value, Error> {
let value = value.try_convert_into::<Integer>(interp)?;
let s = value.chr(interp, encoding)?;
interp.try_convert_mut(s)
}
pub fn element_reference(interp: &mut Artichoke, value: Value, bit: Value) -> Result<Value, Error> {
let value = value.try_convert_into::<Integer>(interp)?;
let bit = implicitly_convert_to_int(interp, bit)?;
let bit = value.bit(bit)?;
Ok(interp.convert(bit))
}
pub fn div(interp: &mut Artichoke, value: Value, denominator: Value) -> Result<Value, Error> {
let value = value.try_convert_into::<Integer>(interp)?;
let quotient = value.div(interp, denominator)?;
Ok(interp.convert_mut(quotient))
}
pub fn is_allbits(interp: &mut Artichoke, value: Value, mask: Value) -> Result<Value, Error> {
let value = value.try_convert_into::<Integer>(interp)?;
let mask = implicitly_convert_to_int(interp, mask)?;
let result = value.is_allbits(mask);
Ok(interp.convert(result))
} | let mask = implicitly_convert_to_int(interp, mask)?;
let result = value.is_anybits(mask);
Ok(interp.convert(result))
}
pub fn is_nobits(interp: &mut Artichoke, value: Value, mask: Value) -> Result<Value, Error> {
let value = value.try_convert_into::<Integer>(interp)?;
let mask = implicitly_convert_to_int(interp, mask)?;
let result = value.is_nobits(mask);
Ok(interp.convert(result))
}
pub fn size(interp: &Artichoke) -> Result<Value, Error> {
// This `as` cast is lossless because `size_of::<i64>` is guaranteed to be
// less than `i64::MAX`.
const SIZE: i64 = Integer::size() as i64;
Ok(interp.convert(SIZE))
} |
pub fn is_anybits(interp: &mut Artichoke, value: Value, mask: Value) -> Result<Value, Error> {
let value = value.try_convert_into::<Integer>(interp)?; |
opcode32.rs | use std::convert::TryFrom;
use crate::emulator::*;
use crate::emulator::access::register::*;
use crate::emulator::instruction::opcode::*;
pub struct Opcode32 (pub super::OpcodeArr);
impl Opcode32 {
pub fn new(op: super::OpcodeArr) -> Self {
Self (op)
}
}
impl super::OpcodeTrait for Opcode32 {
fn init_opcode(&mut self) -> () {
macro_rules! setop {
($n:expr, $fnc:ident, $flg:expr) => { self.0[$n & 0x1ff] = OpcodeType{func:Self::$fnc, flag:$flg} }
}
// 0x00 : add_rm8_r8
setop!(0x01, add_rm32_r32, OpFlags::MODRM);
// 0x02 : add_r8_rm8
setop!(0x03, add_r32_rm32, OpFlags::MODRM);
// 0x04 : add_al_imm8
setop!(0x05, add_eax_imm32, OpFlags::IMM32);
// 0x08 : or_rm8_r8
setop!(0x09, or_rm32_r32, OpFlags::MODRM);
// 0x0a : or_r8_rm8
setop!(0x0b, or_r32_rm32, OpFlags::MODRM);
// 0x0c : or_al_imm8
setop!(0x0d, or_eax_imm32, OpFlags::IMM32);
// 0x10 : adc_rm8_r8
setop!(0x11, adc_rm32_r32, OpFlags::MODRM);
// 0x12 : adc_r8_rm8
setop!(0x13, adc_r32_rm32, OpFlags::MODRM);
// 0x14 : adc_al_imm8
setop!(0x15, adc_eax_imm32, OpFlags::IMM32);
// 0x18 : sbb_rm8_r8
setop!(0x19, sbb_rm32_r32, OpFlags::MODRM);
// 0x1a : sbb_r8_rm8
setop!(0x1b, sbb_r32_rm32, OpFlags::MODRM);
// 0x1c : sbb_al_imm8
setop!(0x1d, sbb_eax_imm32, OpFlags::IMM32);
// 0x20 : and_rm8_r8
setop!(0x21, and_rm32_r32, OpFlags::MODRM);
// 0x22 : and_r8_rm8
setop!(0x23, and_r32_rm32, OpFlags::MODRM);
// 0x24 : and_al_imm8
setop!(0x25, and_eax_imm32, OpFlags::IMM32);
// 0x28 : sub_rm8_r8
setop!(0x29, sub_rm32_r32, OpFlags::MODRM);
// 0x2a : sub_r8_rm8
setop!(0x2b, sub_r32_rm32, OpFlags::MODRM);
// 0x2c : sub_al_imm8
setop!(0x2d, sub_eax_imm32, OpFlags::IMM32);
// 0x30 : xor_rm8_r8
setop!(0x31, xor_rm32_r32, OpFlags::MODRM);
// 0x32 : xor_r8_rm8
setop!(0x33, xor_r32_rm32, OpFlags::MODRM);
// 0x34 : xor_al_imm8
setop!(0x35, xor_eax_imm32, OpFlags::IMM32);
// 0x38 : cmp_rm8_r8
setop!(0x39, cmp_rm32_r32, OpFlags::MODRM);
// 0x3a : cmp_r8_rm8
setop!(0x3b, cmp_r32_rm32, OpFlags::MODRM);
// 0x3c : cmp_al_imm8
setop!(0x3d, cmp_eax_imm32, OpFlags::IMM32);
for i in 0..8 {
setop!(0x40+i, inc_opr32, OpFlags::NONE);
setop!(0x48+i, dec_opr32, OpFlags::NONE);
setop!(0x50+i, push_opr32, OpFlags::NONE);
setop!(0x58+i, pop_opr32, OpFlags::NONE);
}
setop!(0x60, pushad, OpFlags::NONE);
setop!(0x61, popad, OpFlags::NONE);
setop!(0x68, push_imm32, OpFlags::IMM32);
setop!(0x69, imul_r32_rm32_imm32, OpFlags::MODRM | OpFlags::IMM32);
setop!(0x6a, push_imm8, OpFlags::IMM8);
setop!(0x6b, imul_r32_rm32_imm8, OpFlags::MODRM | OpFlags::IMM8);
// 0x70-0x7f : jcc
// 0x84 : test_rm8_r8
setop!(0x85, test_rm32_r32, OpFlags::MODRM);
// 0x86 : xchg_r8_rm8
setop!(0x87, xchg_r32_rm32, OpFlags::MODRM);
// 0x88 : mov_rm8_r8
setop!(0x89, mov_rm32_r32, OpFlags::MODRM);
// 0x8a : mov_r8_rm8
setop!(0x8b, mov_r32_rm32, OpFlags::MODRM);
setop!(0x8c, mov_rm32_sreg, OpFlags::MODRM);
setop!(0x8d, lea_r32_m32, OpFlags::MODRM);
// 0x8e : mov_sreg_rm32
// 0x90 : nop
for i in 0..8 {
setop!(0x90+i, xchg_eax_opr32, OpFlags::NONE);
}
setop!(0x98, cwde, OpFlags::NONE);
setop!(0x99, cdq, OpFlags::NONE);
setop!(0x9a, callf_ptr16_imm32, OpFlags::PTR16 | OpFlags::IMM32);
setop!(0x9c, pushf, OpFlags::NONE);
setop!(0x9d, popf, OpFlags::NONE);
// 0xa0 : mov_al_moffs8
setop!(0xa1, mov_eax_moffs32, OpFlags::MOFFS);
// 0xa2 : mov_moffs8_al
setop!(0xa3, mov_moffs32_eax, OpFlags::MOFFS);
setop!(0xa4, movs_m8, OpFlags::NONE);
setop!(0xa5, movs_m32, OpFlags::NONE);
setop!(0xa6, cmps_m8, OpFlags::NONE);
setop!(0xa7, cmps_m32, OpFlags::NONE);
// 0xa8 : test_al_imm8
setop!(0xa9, test_eax_imm32, OpFlags::IMM32);
setop!(0xaa, stos_m8, OpFlags::NONE);
setop!(0xab, stos_m32, OpFlags::NONE);
setop!(0xac, lods_m8, OpFlags::NONE);
setop!(0xad, lods_m32, OpFlags::NONE);
setop!(0xae, scas_m8, OpFlags::NONE);
setop!(0xaf, scas_m32, OpFlags::NONE);
// 0xb0-0xb7 : mov_r8_imm
for i in 0..8 {
setop!(0xb8+i, mov_opr32_imm32, OpFlags::IMM32);
}
setop!(0xc3, ret, OpFlags::NONE);
setop!(0xc7, mov_rm32_imm32, OpFlags::MODRM | OpFlags::IMM32);
setop!(0xc9, leave, OpFlags::NONE);
setop!(0xcb, retf, OpFlags::NONE);
// 0xcc : int3
// 0xcd : int_imm8
setop!(0xcf, iret, OpFlags::NONE);
// 0xe4 : in_al_imm8
setop!(0xe5, in_eax_imm8, OpFlags::IMM8);
// 0xe6 : out_imm8_al
setop!(0xe7, out_imm8_eax, OpFlags::IMM8);
setop!(0xe8, call_imm32, OpFlags::IMM32);
setop!(0xe9, jmp_imm32, OpFlags::IMM32);
setop!(0xea, jmpf_ptr16_imm32, OpFlags::PTR16 | OpFlags::IMM32);
// 0xeb : jmp_imm8
// 0xec : in_al_dx
setop!(0xed, in_eax_dx, OpFlags::NONE);
// 0xee : out_dx_al
setop!(0xef, out_dx_eax, OpFlags::NONE);
setop!(0x0f80, jo_imm32, OpFlags::IMM32);
setop!(0x0f81, jno_imm32, OpFlags::IMM32);
setop!(0x0f82, jb_imm32, OpFlags::IMM32);
setop!(0x0f83, jnb_imm32, OpFlags::IMM32);
setop!(0x0f84, jz_imm32, OpFlags::IMM32);
setop!(0x0f85, jnz_imm32, OpFlags::IMM32);
setop!(0x0f86, jbe_imm32, OpFlags::IMM32);
setop!(0x0f87, jnbe_imm32, OpFlags::IMM32);
setop!(0x0f88, js_imm32, OpFlags::IMM32);
setop!(0x0f89, jns_imm32, OpFlags::IMM32);
setop!(0x0f8a, jp_imm32, OpFlags::IMM32);
setop!(0x0f8b, jnp_imm32, OpFlags::IMM32);
setop!(0x0f8c, jl_imm32, OpFlags::IMM32);
setop!(0x0f8d, jnl_imm32, OpFlags::IMM32);
setop!(0x0f8e, jle_imm32, OpFlags::IMM32);
setop!(0x0f8f, jnle_imm32, OpFlags::IMM32);
setop!(0x0faf, imul_r32_rm32, OpFlags::MODRM);
setop!(0x0fb6, movzx_r32_rm8, OpFlags::MODRM);
setop!(0x0fb7, movzx_r32_rm32, OpFlags::MODRM);
setop!(0x0fbe, movsx_r32_rm8, OpFlags::MODRM);
setop!(0x0fbf, movsx_r32_rm32, OpFlags::MODRM);
// 0x80 : code_80
setop!(0x81, code_81, OpFlags::MODRM | OpFlags::IMM32);
setop!(0x82, code_82, OpFlags::MODRM | OpFlags::IMM8);
setop!(0x83, code_83, OpFlags::MODRM | OpFlags::IMM8);
// 0xc0 : code_c0
setop!(0xc1, code_c1, OpFlags::MODRM | OpFlags::IMM8);
// 0xd2 : code_d2
setop!(0xd3, code_d3, OpFlags::MODRM);
// 0xf6 : code_f6
setop!(0xf7, code_f7, OpFlags::MODRM | OpFlags::IMM32);
// 0xfe : code_fe
setop!(0xff, code_ff, OpFlags::MODRM);
// 0x0f00 : code_0f00
setop!(0x0f01, code_0f01, OpFlags::MODRM);
}
fn exec(&self, exec: &mut exec::Exec) -> Result<(), EmuException> {
(self.0[exec.idata.opcode as usize].func)(exec)
}
fn flag(&self, opcode: u16) -> OpFlags { self.0[opcode as usize].flag }
}
impl Opcode32 {
add_dst_src!(32, rm32, r32);
add_dst_src!(32, r32, rm32);
add_dst_src!(32, eax, imm32);
or_dst_src!(32, rm32, r32);
or_dst_src!(32, r32, rm32);
or_dst_src!(32, eax, imm32);
adc_dst_src!(32, rm32, r32);
adc_dst_src!(32, r32, rm32);
adc_dst_src!(32, eax, imm32);
sbb_dst_src!(32, rm32, r32);
sbb_dst_src!(32, r32, rm32);
sbb_dst_src!(32, eax, imm32);
and_dst_src!(32, rm32, r32);
and_dst_src!(32, r32, rm32);
and_dst_src!(32, eax, imm32);
sub_dst_src!(32, rm32, r32);
sub_dst_src!(32, r32, rm32);
sub_dst_src!(32, eax, imm32);
xor_dst_src!(32, rm32, r32);
xor_dst_src!(32, r32, rm32);
xor_dst_src!(32, eax, imm32);
cmp_dst_src!(32, rm32, r32);
cmp_dst_src!(32, r32, rm32);
cmp_dst_src!(32, eax, imm32);
inc_dst!(opr32);
dec_dst!(opr32);
push_src!(32, opr32);
pop_dst!(32, opr32);
fn pushad(exec: &mut exec::Exec) -> Result<(), EmuException> {
debug!("pushad");
let sp = exec.ac.get_gpreg(GpReg32::ESP)?;
for i in 0..4 {
exec.ac.push_u32(exec.ac.get_gpreg(GpReg32::try_from(i).unwrap())?)?;
}
exec.ac.push_u32(sp)?;
for i in 5..8 {
exec.ac.push_u32(exec.ac.get_gpreg(GpReg32::try_from(i).unwrap())?)?;
}
Ok(())
}
fn popad(exec: &mut exec::Exec) -> Result<(), EmuException> {
debug!("popad");
for i in (5..8).rev() {
let v = exec.ac.pop_u32()?;
exec.ac.set_gpreg(GpReg32::try_from(i).unwrap(), v)?;
}
let sp = exec.ac.pop_u32()?;
for i in (0..4).rev() {
let v = exec.ac.pop_u32()?;
exec.ac.set_gpreg(GpReg32::try_from(i).unwrap(), v)?;
}
exec.ac.set_gpreg(GpReg32::ESP, sp)
}
push_src!(32, imm8);
imul_dst_src1_src2!(32, r32, rm32, imm32);
push_src!(32, imm32);
imul_dst_src1_src2!(32, r32, rm32, imm8);
test_dst_src!(32, rm32, r32);
xchg_dst_src!(32, r32, rm32);
mov_dst_src!(32, rm32, r32);
mov_dst_src!(32, r32, rm32);
mov_dst_src!(32, rm32, sreg);
lea_dst_src!(32, r32, m32);
xchg_dst_src!(32, eax, opr32);
fn cwde(exec: &mut exec::Exec) -> Result<(), EmuException> {
let ax = exec.ac.get_gpreg(GpReg16::AX)? as i16;
exec.ac.set_gpreg(GpReg32::EAX, ax as u32)
}
fn cdq(exec: &mut exec::Exec) -> Result<(), EmuException> {
let eax = exec.ac.get_gpreg(GpReg32::EAX)? as i32;
exec.ac.set_gpreg(GpReg32::EDX, if eax < 0 { u32::MAX } else { 0 })
}
callf_abs!(32, ptr16, imm32);
pushf!(32);
popf!(32);
mov_dst_src!(32, eax, moffs32);
mov_dst_src!(32, moffs32, eax);
movs_dst_src!(32, 8);
movs_dst_src!(32, 32);
cmps_src_dst!(32, 8);
cmps_src_dst!(32, 32);
test_dst_src!(32, eax, imm32);
stos_dst_src!(32, 8);
stos_dst_src!(32, 32);
lods_dst_src!(32, 8);
lods_dst_src!(32, 32);
scas_src_dst!(32, 8);
scas_src_dst!(32, 32);
mov_dst_src!(32, opr32, imm32);
ret!(32);
mov_dst_src!(32, rm32, imm32);
fn leave(exec: &mut exec::Exec) -> Result<(), EmuException> {
let ebp = exec.ac.get_gpreg(GpReg32::EBP)?;
exec.ac.set_gpreg(GpReg32::ESP, ebp)?;
let new_ebp = exec.ac.pop_u32()?;
debug!("leave: esp <- 0x{:08x}, ebp <- 0x{:08x}", ebp, new_ebp);
exec.ac.set_gpreg(GpReg32::EBP, new_ebp)
}
retf!(32);
iret!(32);
in_reg_port!(32, eax, imm8);
out_port_reg!(32, imm8, eax);
call_rel!(32, imm32);
jmp_rel!(32, imm32);
jmpf_abs!(32, ptr16, imm32);
in_reg_port!(32, eax, dx);
out_port_reg!(32, dx, eax);
jcc_rel!(32, o, imm32);
jcc_rel!(32, b, imm32);
jcc_rel!(32, z, imm32);
jcc_rel!(32, be, imm32);
jcc_rel!(32, s, imm32);
jcc_rel!(32, p, imm32);
jcc_rel!(32, l, imm32);
jcc_rel!(32, le, imm32);
imul_dst_src!(32, r32, rm32);
movzx_dst_src!(32, r32, 8, rm8);
movzx_dst_src!(32, r32, 32, rm32);
movsx_dst_src!(32, r32, 8, rm8);
movsx_dst_src!(32, r32, 32, rm32);
fn code_81(exec: &mut exec::Exec) -> Result<(), EmuException> {
match exec.idata.modrm.reg as u8 {
0 => Opcode32::add_rm32_imm32(exec)?,
1 => Opcode32::or_rm32_imm32(exec)?,
2 => Opcode32::adc_rm32_imm32(exec)?,
3 => Opcode32::sbb_rm32_imm32(exec)?,
4 => Opcode32::and_rm32_imm32(exec)?,
5 => Opcode32::sub_rm32_imm32(exec)?,
6 => Opcode32::xor_rm32_imm32(exec)?,
7 => Opcode32::cmp_rm32_imm32(exec)?,
_ => { return Err(EmuException::UnexpectedError); },
}
Ok(())
}
add_dst_src!(32, rm32, imm32);
or_dst_src!(32, rm32, imm32);
adc_dst_src!(32, rm32, imm32);
sbb_dst_src!(32, rm32, imm32);
and_dst_src!(32, rm32, imm32);
sub_dst_src!(32, rm32, imm32);
xor_dst_src!(32, rm32, imm32);
cmp_dst_src!(32, rm32, imm32);
fn code_82(exec: &mut exec::Exec) -> Result<(), EmuException> {
super::common::code_82(exec)
}
fn code_83(exec: &mut exec::Exec) -> Result<(), EmuException> {
match exec.idata.modrm.reg as u8 {
0 => Opcode32::add_rm32_imm8(exec)?,
1 => Opcode32::or_rm32_imm8(exec)?,
2 => Opcode32::adc_rm32_imm8(exec)?,
3 => Opcode32::sbb_rm32_imm8(exec)?,
4 => Opcode32::and_rm32_imm8(exec)?,
5 => Opcode32::sub_rm32_imm8(exec)?,
6 => Opcode32::xor_rm32_imm8(exec)?,
7 => Opcode32::cmp_rm32_imm8(exec)?,
_ => { return Err(EmuException::UnexpectedError); },
}
Ok(())
}
add_dst_src!(32, rm32, imm8);
or_dst_src!(32, rm32, imm8);
adc_dst_src!(32, rm32, imm8);
sbb_dst_src!(32, rm32, imm8);
and_dst_src!(32, rm32, imm8);
sub_dst_src!(32, rm32, imm8);
xor_dst_src!(32, rm32, imm8);
cmp_dst_src!(32, rm32, imm8);
fn code_c1(exec: &mut exec::Exec) -> Result<(), EmuException> {
match exec.idata.modrm.reg as u8 {
/*
0 => Opcode32::rol_rm32_imm8(exec)?,
1 => Opcode32::ror_rm32_imm8(exec)?,
2 => Opcode32::rcl_rm32_imm8(exec)?,
3 => Opcode32::rcr_rm32_imm8(exec)?,
*/
4 => Opcode32::shl_rm32_imm8(exec)?,
5 => Opcode32::shr_rm32_imm8(exec)?,
6 => Opcode32::sal_rm32_imm8(exec)?,
7 => Opcode32::sar_rm32_imm8(exec)?,
_ => { return Err(EmuException::UnexpectedError); },
}
Ok(())
}
/*
rol_dst_src!(32, rm32, imm8);
ror_dst_src!(32, rm32, imm8);
rcl_dst_src!(32, rm32, imm8);
rcr_dst_src!(32, rm32, imm8);
*/
shl_dst_src!(32, rm32, imm8);
shr_dst_src!(32, rm32, imm8);
sal_dst_src!(32, rm32, imm8);
sar_dst_src!(32, rm32, imm8);
fn code_d3(exec: &mut exec::Exec) -> Result<(), EmuException> {
match exec.idata.modrm.reg as u8 {
/*
0 => Opcode32::rol_rm32_cl(exec)?,
1 => Opcode32::ror_rm32_cl(exec)?,
2 => Opcode32::rcl_rm32_cl(exec)?,
3 => Opcode32::rcr_rm32_cl(exec)?,
*/
4 => Opcode32::shl_rm32_cl(exec)?,
5 => Opcode32::shr_rm32_cl(exec)?,
6 => Opcode32::sal_rm32_cl(exec)?,
7 => Opcode32::sar_rm32_cl(exec)?,
_ => { return Err(EmuException::UnexpectedError); },
}
Ok(())
}
/*
rol_dst_src!(32, rm32, cl);
ror_dst_src!(32, rm32, cl);
rcl_dst_src!(32, rm32, cl);
rcr_dst_src!(32, rm32, cl);
*/
shl_dst_src!(32, rm32, cl);
shr_dst_src!(32, rm32, cl);
sal_dst_src!(32, rm32, cl);
sar_dst_src!(32, rm32, cl);
fn code_f7(exec: &mut exec::Exec) -> Result<(), EmuException> {
let back = match exec.idata.modrm.reg as u8 {
0 => { Opcode32::test_rm32_imm32(exec)?; 0},
2 => | ,
3 => { Opcode32::neg_rm32(exec)?; -4},
4 => { Opcode32::mul_edx_eax_rm32(exec)?; -4},
5 => { Opcode32::imul_edx_eax_rm32(exec)?; -4},
6 => { Opcode32::div_eax_edx_rm32(exec)?; -4},
7 => { Opcode32::idiv_eax_edx_rm32(exec)?; -4},
_ => { return Err(EmuException::UnexpectedError); },
};
exec.ac.update_ip(back)
}
test_dst_src!(32, rm32, imm32);
not_dst!(32, rm32);
neg_dst!(32, rm32);
mul_high_low_src!(32, edx, eax, rm32);
imul_high_low_src!(32, edx, eax, rm32);
div_quot_rem_src!(32, eax, edx, rm32);
idiv_quot_rem_src!(32, eax, edx, rm32);
fn code_ff(exec: &mut exec::Exec) -> Result<(), EmuException> {
match exec.idata.modrm.reg as u8 {
0 => Opcode32::inc_rm32(exec)?,
1 => Opcode32::dec_rm32(exec)?,
_ => { return Err(EmuException::UnexpectedError); },
}
Ok(())
}
inc_dst!(rm32);
dec_dst!(rm32);
fn code_0f01(exec: &mut exec::Exec) -> Result<(), EmuException> {
match exec.idata.modrm.reg as u8 {
2 => Opcode32::lgdt_m16_32(exec)?,
3 => Opcode32::lidt_m16_32(exec)?,
_ => { return Err(EmuException::NotImplementedOpcode); },
}
Ok(())
}
fn lgdt_m16_32(exec: &mut exec::Exec) -> Result<(), EmuException> {
let (sg, adr) = exec.get_m()?;
if exec.ac.get_cpl()? > 0 {
return Err(EmuException::CPUException(CPUException::GP(None)));
}
let limit = exec.ac.get_data16((sg,adr))?;
let base = exec.ac.get_data32((sg,adr+2))?;
debug!("lgdt: base = {:08x}, limit = {:04x}", base, limit);
exec.ac.set_gdtr(base as u64, limit)
}
fn lidt_m16_32(exec: &mut exec::Exec) -> Result<(), EmuException> {
let (sg, adr) = exec.get_m()?;
if exec.ac.get_cpl()? > 0 {
return Err(EmuException::CPUException(CPUException::GP(None)));
}
let limit = exec.ac.get_data16((sg,adr))?;
let base = exec.ac.get_data32((sg,adr+2))?;
debug!("lidt: base = {:08x}, limit = {:04x}", base, limit);
exec.ac.set_idtr(base as u64, limit)
}
}
| { Opcode32::not_rm32(exec)?; -4} |
smart.py | from pydocmd.preprocessors.rst import Preprocessor as RSTPreprocessor
from pydocmd.preprocessors.google import Preprocessor as GooglePreprocessor
class Preprocessor(object):
"""
This class implements the preprocessor for restructured text and google.
"""
def __init__(self, config=None):
|
def is_google_format(self, docstring):
"""
Check if `docstring` is written in Google docstring format
https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html
"""
lines = [line.strip() for line in docstring.split('\n')]
google_section_names = self._google_preprocessor.get_section_names()
for section_name in google_section_names:
if section_name in lines:
return True
return False
def preprocess_section(self, section):
"""
Preprocessors a given section into it's components.
"""
if self.is_google_format(section.content):
return self._google_preprocessor.preprocess_section(section)
return self._rst_preprocessor.preprocess_section(section)
@staticmethod
def _append_section(lines, key, sections):
section = sections.get(key)
if not section:
return
if lines and lines[-1]:
lines.append('')
# add an extra line because of markdown syntax
lines.extend(['**{}**:'.format(key), ''])
lines.extend(section)
| self.config = config
self._google_preprocessor = GooglePreprocessor(config)
self._rst_preprocessor = RSTPreprocessor(config) |
main.rs | #![type_length_limit = "2000000"]
extern crate configure_me;
#[macro_use]
extern crate serde;
use anyhow::Error;
use btc_rpc_proxy;
// use block_filters;
mod create_state;
#[tokio::main]
async fn main() -> Result<(), Error> | {
let state = create_state::create_state()?.arc();
btc_rpc_proxy::main(state).await
// block_filters::main();
} |
|
BRF.py | import collections
from supriya import CalculationRate
from supriya.ugens.Filter import Filter
class | (Filter):
"""
A 2nd order Butterworth band-reject filter.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> b_r_f =supriya.ugens.BRF.ar(source=source)
>>> b_r_f
BRF.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Filter UGens"
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 440.0), ("reciprocal_of_q", 1.0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
| BRF |
arm_build_with_custom_config.py |
from kraken import plugins
from kraken.core.maths import Xfo, Vec3, Quat
from kraken.core.configs.config import Config
from kraken_components.biped.arm_component import ArmComponentGuide, ArmComponentRig
class CustomConfig(Config):
"""Base Configuration for Kraken builders."""
def | (self):
super(CustomConfig, self).__init__()
# ======================
# Name Template Methods
# ======================
def initNameTemplate(self):
"""Initializes the name template.
Returns:
dict: name template.
"""
nameTemplate = super(CustomConfig, self).initNameTemplate()
nameTemplate["formats"] = {
"Container": ["name"],
"Layer": ["container", "sep", "name"],
"ComponentGroup": ["location", "sep", "name", "sep", "type"],
"default": ["location", "sep", "component", "sep", "name", "sep", "type"],
}
return nameTemplate
armGuide = ArmComponentGuide("arm")
armGuide.loadData({
"name": "Arm",
"location": "L",
"bicepXfo": Xfo(Vec3(2.27, 15.295, -0.753)),
"forearmXfo": Xfo(Vec3(5.039, 13.56, -0.859)),
"wristXfo": Xfo(Vec3(7.1886, 12.2819, 0.4906)),
"handXfo": Xfo(tr=Vec3(7.1886, 12.2819, 0.4906),
ori=Quat(Vec3(-0.0865, -0.2301, -0.2623), 0.9331)),
"bicepFKCtrlSize": 1.75,
"forearmFKCtrlSize": 1.5
})
armGuideData = armGuide.getRigBuildData()
arm = ArmComponentRig()
arm.loadData(armGuideData)
builder = plugins.getBuilder()
builder.build(arm)
# Set the custom config as the new singleton value.
CustomConfig.makeCurrent()
builder = plugins.getBuilder()
builder.build(arm)
| __init__ |
wechat.go | package service
import (
"encoding/json"
"github.com/gin-gonic/gin"
wechatSdk "github.com/silenceper/wechat/v2"
"github.com/silenceper/wechat/v2/cache"
offConfig "github.com/silenceper/wechat/v2/officialaccount/config"
"github.com/silenceper/wechat/v2/officialaccount/message"
"github.com/spf13/viper"
"lime/pkg/api/front/dao"
"lime/pkg/api/front/domain/wechat"
)
var ConfigDao = dao.ConfigDao{}
type WechatService struct{}
type WechatSetting struct {
Url string
Token string
Encodingaeskey string
}
type WechatBaseSetting struct {
Appid string
AppSecret string
IpWhitelist string
}
func (ws WechatService) Callback(c *gin.Context) {
wc := wechatSdk.NewWechat()
redisOpts := &cache.RedisOpts{
Host: viper.GetString("redis.host"),
Database: viper.GetInt("redis.db"),
MaxActive: 10,
MaxIdle: 10,
IdleTimeout: 60, //second
}
redisCache := cache.NewRedis(redisOpts)
config := ConfigDao.GetByCode("wechatSetting")
setting := &WechatSetting{}
errSetting := json.Unmarshal(config,&setting)
if errSetting != nil |
wechatBaseSetting := ConfigDao.GetByCode("wechatBaseSetting")
settingBase := &WechatBaseSetting{}
errBaseSetting := json.Unmarshal(wechatBaseSetting,&settingBase)
if errBaseSetting != nil {
return
}
cfg := &offConfig.Config{
AppID: settingBase.Appid,
AppSecret: settingBase.AppSecret,
Token: setting.Token,
EncodingAESKey: setting.Encodingaeskey,
Cache: redisCache,
}
officialAccount := wc.GetOfficialAccount(cfg)
server := officialAccount.GetServer(c.Request, c.Writer)
//设置接收消息的处理方法
server.SetMessageHandler(func(msg message.MixMessage) *message.Reply {
switch msg.Event {
case message.EventSubscribe://关注公众号事件
return wechat.Subscribe()
case message.EventUnsubscribe:
break
default:
break
}
text := message.NewText("欢迎使用lime soft!")
return &message.Reply{MsgType: message.MsgTypeText, MsgData: text}
})
//处理消息接收以及回复
err := server.Serve()
if err != nil {
return
}
//发送回复的消息
server.Send()
return
}
| {
return
} |
softsigner.rs | //! A signer atop the OpenSSL library.
//!
//! Because this adds a dependency to openssl libs this is disabled by
//! default and should only be used by implementations that need to use
//! software keys to sign things, such as an RPKI Certificate Authority or
//! Publication Server. In particular, this is not required when validating.
use std::io;
use std::sync::{Arc, RwLock};
use openssl::rsa::Rsa;
use openssl::pkey::{PKey, Private};
use openssl::hash::MessageDigest;
use ring::rand;
use ring::rand::SecureRandom;
use super::keys::{PublicKey, PublicKeyFormat};
use super::signature::{Signature, SignatureAlgorithm};
use super::signer::{KeyError, Signer, SigningError};
//------------ OpenSslSigner -------------------------------------------------
/// An OpenSSL based signer.
///
/// Keeps the keys in memory (for now).
pub struct OpenSslSigner {
keys: RwLock<Vec<Option<Arc<KeyPair>>>>,
rng: rand::SystemRandom,
}
impl OpenSslSigner {
pub fn new() -> OpenSslSigner {
OpenSslSigner {
keys: Default::default(),
rng: rand::SystemRandom::new(),
}
}
pub fn key_from_der(&self, der: &[u8]) -> Result<KeyId, io::Error> {
Ok(self.insert_key(KeyPair::from_der(der)?))
}
pub fn key_from_pem(&self, pem: &[u8]) -> Result<KeyId, io::Error> {
Ok(self.insert_key(KeyPair::from_pem(pem)?))
}
fn insert_key(&self, key: KeyPair) -> KeyId {
let mut keys = self.keys.write().unwrap();
let res = keys.len();
keys.push(Some(key.into()));
KeyId(res)
}
fn get_key(&self, id: KeyId) -> Result<Arc<KeyPair>, KeyError<io::Error>> {
self.keys.read().unwrap().get(id.0).and_then(|key| {
key.as_ref().cloned()
}).ok_or(KeyError::KeyNotFound)
}
fn delete_key(&self, key: KeyId) -> Result<(), KeyError<io::Error>> {
let mut keys = self.keys.write().unwrap();
match keys.get_mut(key.0) {
Some(key) => {
if key.is_some() {
*key = None;
Ok(())
}
else {
Err(KeyError::KeyNotFound)
}
}
None => Err(KeyError::KeyNotFound)
}
}
}
impl Signer for OpenSslSigner {
type KeyId = KeyId;
type Error = io::Error;
fn create_key(
&self, algorithm: PublicKeyFormat
) -> Result<Self::KeyId, Self::Error> {
Ok(self.insert_key(KeyPair::new(algorithm)?))
}
fn get_key_info(
&self,
id: &Self::KeyId
) -> Result<PublicKey, KeyError<Self::Error>> {
self.get_key(*id)?.get_key_info().map_err(KeyError::Signer)
}
fn destroy_key(
&self, key: &Self::KeyId
) -> Result<(), KeyError<Self::Error>> {
self.delete_key(*key)
}
fn sign<D: AsRef<[u8]> + ?Sized>(
&self,
key: &Self::KeyId,
algorithm: SignatureAlgorithm,
data: &D
) -> Result<Signature, SigningError<Self::Error>> {
self.get_key(*key)?.sign(algorithm, data.as_ref()).map_err(Into::into)
}
fn sign_one_off<D: AsRef<[u8]> + ?Sized>(
&self,
algorithm: SignatureAlgorithm,
data: &D
) -> Result<(Signature, PublicKey), Self::Error> {
let key = KeyPair::new(algorithm.public_key_format())?;
let info = key.get_key_info()?;
let sig = key.sign(algorithm, data.as_ref())?;
Ok((sig, info))
}
fn rand(&self, target: &mut [u8]) -> Result<(), Self::Error> {
self.rng.fill(target).map_err(|_|
io::Error::new(io::ErrorKind::Other, "rng error")
)
}
}
impl Default for OpenSslSigner {
fn default() -> Self {
Self::new()
}
}
//------------ KeyId ---------------------------------------------------------
/// This signer’s key identifier.
//
// We wrap this in a newtype so that people won’t start mucking about with
// the integers.
#[derive(Clone, Copy, Debug)]
pub struct KeyId(usize);
//------------ KeyPair -------------------------------------------------------
/// A key pair kept by the signer.
struct KeyPair(PKey<Private>);
impl KeyPair {
fn new(algorithm: PublicKeyFormat) -> Result<Self, io::Error> {
if algorithm != PublicKeyFormat::Rsa {
return Err(io::Error::new(
io::ErrorKind::Other, "invalid algorithm"
));
}
// Issues unwrapping this indicate a bug in the openssl library.
// So, there is no way to recover.
let rsa = Rsa::generate(2048)?;
let pkey = PKey::from_rsa(rsa)?;
Ok(KeyPair(pkey))
}
fn from_der(der: &[u8]) -> Result<Self, io::Error> {
let res = PKey::private_key_from_der(der)?;
if res.bits() != 2048 {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("invalid key length {}", res.bits())
))
}
Ok(KeyPair(res))
}
fn from | : &[u8]) -> Result<Self, io::Error> {
let res = PKey::private_key_from_pem(pem)?;
if res.bits() != 2048 {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("invalid key length {}", res.bits())
))
}
Ok(KeyPair(res))
}
fn get_key_info(&self) -> Result<PublicKey, io::Error>
{
// Issues unwrapping this indicate a bug in the openssl
// library. So, there is no way to recover.
let der = self.0.rsa().unwrap().public_key_to_der()?;
Ok(PublicKey::decode(der.as_ref()).unwrap())
}
fn sign(
&self,
_algorithm: SignatureAlgorithm,
data: &[u8]
) -> Result<Signature, io::Error> {
let mut signer = ::openssl::sign::Signer::new(
MessageDigest::sha256(), &self.0
)?;
signer.update(data)?;
Ok(Signature::new(
SignatureAlgorithm::default(),
signer.sign_to_vec()?.into()
))
}
}
//------------ Tests ---------------------------------------------------------
#[cfg(test)]
pub mod tests {
use super::*;
#[test]
fn info_sign_delete() {
let s = OpenSslSigner::new();
let ki = s.create_key(PublicKeyFormat::Rsa).unwrap();
let data = b"foobar";
let _ = s.get_key_info(&ki).unwrap();
let _ = s.sign(&ki, SignatureAlgorithm::default(), data).unwrap();
s.destroy_key(&ki).unwrap();
}
#[test]
fn one_off() {
let s = OpenSslSigner::new();
s.sign_one_off(SignatureAlgorithm::default(), b"foobar").unwrap();
}
}
| _pem(pem |
controller.py | """
This module holds functionality that connects the models to the views
"""
from flask import session
from app.models import db
from app import utilities
def process_form_data(dict_form_data, *args):
"""
After casting form data to dict, the values
become lists. Transform the lists to non-iterables
"""
new_dict = {}
try:
for key in dict_form_data.keys():
new_dict[key] = dict_form_data[key][0]
except AttributeError:
raise AttributeError('The input should be a dictionary')
# check for mandatory fields as directed by args
for arg in args:
try:
value = new_dict[arg]
if isinstance(value, str):
if len(value.strip()) == 0:
raise ValueError('%s should not be an empty string' % str(arg))
except KeyError:
raise ValueError('%s is an expected key' % str(arg))
return new_dict
def process_args_data(dict_args_data, *args):
"""
Raise ValueError if mandatory values are empty strings or
non-existent
"""
if utilities.check_type(dict_args_data, dict):
for arg in args:
try:
value = dict_args_data[arg]
if isinstance(value, str):
if len(value.strip()) == 0:
raise ValueError('%s should not be an empty string' % str(arg))
except KeyError:
raise ValueError('%s is an expected key' % str(arg))
return dict_args_data
def get_logged_in_user_key():
"""
This checks the session and gets the logged in user's key
"""
if 'user_key' in session.keys():
return session['user_key']
else:
return None
def remove_user_from_session():
"""
Removes the session variable user_key
from the session to logout the user
"""
if 'user_key' in session.keys():
session.pop('user_key')
session.modified = True
else:
raise KeyError('User does not exist in the session')
def | (user_key):
"""
Adds the session variable user_key for
logged in user
"""
user = db.get_user(user_key)
if user is None:
raise KeyError('User does not exist')
session['user_key'] = user_key
session.modified = True
| add_user_to_session |
test.ts | /// <reference path="../out/main.d.ts" />
/// <reference path="../typings/main.d.ts" />
| import test = require('blue-tape'); |
|
utils.py | """Utilities for measuring frame rate, and reading frames in a separate thread.
This code was mostly taken from:
http://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
"""
import cv2
import datetime
import time
from threading import Thread
class FPS:
"""Helper class to track number of frames and time elapsed."""
def | (self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self._numFrames = 0
self._last_update_time = time.time()
def start(self):
# start the timer
self._start = datetime.datetime.now()
return self
def stop(self):
# stop the timer
self._end = datetime.datetime.now()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self._numFrames += 1
self._last_update_time = time.time()
def elapsed_since_last_update(self):
return (time.time() - self._last_update_time)
def elapsed(self):
# return the total number of seconds between the start and
# end interval, or if that's missing, then till now
end_time = self._end
if not end_time:
end_time = datetime.datetime.now()
return (end_time - self._start).total_seconds()
def fps(self):
# compute the (approximate) frames per second
return self._numFrames / self.elapsed()
class WebcamVideoStream:
"""Helper class that replaces the standard OpenCV usb camera reading methods, with a threaded version."""
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
if not self.grabbed:
raise ValueError("Unable to read from camera device.")
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
if not self.grabbed:
raise ValueError("Unable to read from camera device.")
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
| __init__ |
shortest_paths.rs | use std::cmp::{max, Reverse};
use std::collections::{hash_map::Entry, BinaryHeap, HashSet};
use std::hash::BuildHasherDefault;
use rustc_hash::{FxHashMap, FxHashSet, FxHasher};
use crate::index::{EdgeIndex, IndexType, VertexIndex, Virtual};
use crate::infra::VisitSet;
use crate::marker::{EdgeType, Outgoing};
use crate::traits::*;
use crate::weight::Weighted;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[non_exhaustive]
pub enum Algo {
Dijkstra,
BellmanFord,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Error {
NegativeWeight,
NegativeCycle,
}
#[derive(Debug)]
pub struct ShortestPaths<W> {
start: VertexIndex,
// Using HashMaps because the algorithm supports early termination when
// reaching given goal. It is likely that reaching goal means visiting a
// subgraph which is significantly smaller than the original graph.
dist: FxHashMap<VertexIndex, W>,
pred: FxHashMap<VertexIndex, VertexIndex>,
}
impl<W> ShortestPaths<W>
where
W: Weight,
{
pub fn run_algo<V, E, Ty: EdgeType, G, F>(
graph: &G,
start: VertexIndex,
goal: Option<VertexIndex>,
edge_weight: F,
algo: Option<Algo>,
) -> Result<Self, Error>
where
G: Vertices<V>
+ Edges<E, Ty>
+ VerticesWeak<V>
+ EdgesWeak<E, Ty, EdgeIndex = EdgeIndex>
+ Neighbors,
F: Fn(&E) -> W,
{
let algo = algo.unwrap_or_else(|| {
if !W::is_unsigned() {
// There is a possibility that a negative weight is encountered,
// so we conservatively use Bellman-Ford.
Algo::BellmanFord
} else if goal.is_some() {
// If the goal is specified, Dijkstra's algorithm likely
// finishes without the need of traversing the entire graph.
Algo::Dijkstra
} else {
let v = graph.vertex_count();
let e = graph.edge_count();
// Compare the worst-case bounds. This will result in choosing
// Dijkstra in vast majority of cases.
if v * e < max(e, v * (v as f64).log2() as usize) {
Algo::BellmanFord
} else {
Algo::Dijkstra
}
}
});
match algo {
Algo::Dijkstra => dijkstra(graph, start, goal, edge_weight),
Algo::BellmanFord => bellman_ford(graph, start, edge_weight),
}
}
pub fn run<V, E, Ty: EdgeType, G, F>(
graph: &G,
start: VertexIndex,
goal: Option<VertexIndex>,
edge_weight: F,
) -> Result<Self, Error>
where
G: Vertices<V>
+ Edges<E, Ty>
+ VerticesWeak<V>
+ EdgesWeak<E, Ty, EdgeIndex = EdgeIndex>
+ Neighbors,
F: Fn(&E) -> W,
{
Self::run_algo(graph, start, goal, edge_weight, None)
}
pub fn run_dijkstra<V, E, Ty: EdgeType, G, F>(
graph: &G,
start: VertexIndex,
goal: Option<VertexIndex>,
edge_weight: F,
) -> Result<Self, Error>
where
G: VerticesWeak<V> + EdgesWeak<E, Ty, EdgeIndex = EdgeIndex> + Neighbors,
F: Fn(&E) -> W,
{
dijkstra(graph, start, goal, edge_weight)
}
pub fn run_bellman_ford<V, E, Ty: EdgeType, G, F>(
graph: &G,
start: VertexIndex,
edge_weight: F,
) -> Result<Self, Error>
where
G: Vertices<V> + Edges<E, Ty>,
F: Fn(&E) -> W,
{
bellman_ford(graph, start, edge_weight)
}
pub fn start(&self) -> VertexIndex {
self.start
}
pub fn dist(&self, from: VertexIndex) -> Option<&W> {
self.dist.get(&from)
}
pub fn reconstruct(&self, from: VertexIndex) -> PathReconstruction<'_> {
PathReconstruction {
curr: from,
pred: &self.pred,
}
}
}
pub struct PathReconstruction<'a> {
curr: VertexIndex,
pred: &'a FxHashMap<VertexIndex, VertexIndex>,
}
impl<'a> Iterator for PathReconstruction<'a> {
type Item = VertexIndex;
fn next(&mut self) -> Option<Self::Item> {
self.curr = self.pred.get(&self.curr).copied()?;
Some(self.curr)
}
}
pub fn identity<E: Clone>(edge: &E) -> E {
edge.clone()
}
pub fn unit<E>(_edge: &E) -> usize {
1
}
fn dijkstra<'a, V, E, Ty: EdgeType, G, W, F>(
graph: &G,
start: VertexIndex,
goal: Option<VertexIndex>,
edge_weight: F,
) -> Result<ShortestPaths<W>, Error>
where
G: VerticesWeak<V> + EdgesWeak<E, Ty, EdgeIndex = EdgeIndex> + Neighbors,
W: Weight,
F: Fn(&E) -> W,
{
// Not using FixedBitSet with CompactIndexMap because the algorithm supports
// early termination when reaching given goal. It is likely that reaching
// goal means visiting a subgraph which is significantly smaller than the
// original graph.
let mut visited: FxHashSet<_> = HashSet::with_capacity_and_hasher(
graph.vertex_count_hint().unwrap_or(32),
BuildHasherDefault::<FxHasher>::default(),
);
let mut dist = FxHashMap::default();
let mut pred = FxHashMap::default();
let mut queue = BinaryHeap::new();
dist.insert(start, W::zero());
queue.push(Reverse(Weighted(start, W::zero())));
while let Some(Reverse(Weighted(vertex, vertex_dist))) = queue.pop() {
// This can happen due to duplication of vertices when doing relaxation
// in our implementation.
if visited.is_visited(vertex) {
continue;
}
if goal.as_ref() == Some(&vertex) {
break;
}
for neighbor in graph.neighbors_directed(vertex, Outgoing) {
let edge = graph.edge_weak(neighbor.edge()).unwrap();
let next = neighbor.index();
if visited.is_visited(next) {
continue;
}
let edge_dist = edge_weight(edge.as_ref());
// The check for unsignedness should eliminate the negativity weight
// check, because the implementation of `is_unsigned` method is
// always a constant boolean in practice.
if !W::is_unsigned() && edge_dist < W::zero() {
return Err(Error::NegativeWeight);
}
let next_dist = vertex_dist.clone() + edge_dist;
match dist.entry(next) {
Entry::Occupied(curr_dist) => {
// Relaxation operation. If the distance is better than what
// we had so far, update it.
if next_dist < *curr_dist.get() {
*curr_dist.into_mut() = next_dist.clone();
// A textbook version of the algorithm would update the
// priority of `next`. Adding it as a new item causes
// duplicities which is unfortunate for dense graphs,
// but should be fine in practice.
queue.push(Reverse(Weighted(next, next_dist)));
pred.insert(next, vertex);
}
}
Entry::Vacant(slot) => {
slot.insert(next_dist.clone());
queue.push(Reverse(Weighted(next, next_dist)));
pred.insert(next, vertex);
}
}
// The vertex is finished.
visited.visit(vertex);
}
}
Ok(ShortestPaths { start, dist, pred })
}
fn bellman_ford<'a, V, E, Ty: EdgeType, G, W, F>(
graph: &G,
start: VertexIndex,
edge_weight: F,
) -> Result<ShortestPaths<W>, Error>
where
G: Vertices<V> + Edges<E, Ty>,
W: Weight,
F: Fn(&E) -> W,
{
let vertex_map = graph.vertex_index_map();
let mut dist = vec![W::inf(); vertex_map.len()];
let mut pred = vec![Virtual::null(); vertex_map.len()];
dist[vertex_map.virt(start).to_usize()] = W::zero();
let mut terminated_early = false;
// Try to relax edges |V| - 1 times.
for _ in 1..graph.vertex_count() {
let mut relaxed = false;
for edge in graph.edges() {
let u = vertex_map.virt(edge.src());
let v = vertex_map.virt(edge.dst());
let edge_dist = edge_weight(edge.data());
let next_dist = dist[u.to_usize()].clone() + edge_dist;
// Relax if better.
if next_dist < dist[v.to_usize()] {
dist[v.to_usize()] = next_dist;
pred[v.to_usize()] = u;
relaxed = true;
}
}
// If no distance was improved, then subsequent iterations would not
// improve as well. So we can terminate early.
if !relaxed {
terminated_early = true;
break;
}
}
// Check for negative cycles. If the main loop was terminated early, then
// the absence of cycle if guaranteed.
if !terminated_early {
for edge in graph.edges() {
let u = vertex_map.virt(edge.src());
let v = vertex_map.virt(edge.dst());
let edge_dist = edge_weight(edge.data());
if dist[u.to_usize()].clone() + edge_dist < dist[v.to_usize()] {
return Err(Error::NegativeCycle);
}
}
}
let dist = dist
.into_iter()
.enumerate()
.filter_map(|(i, d)| {
if d != W::inf() {
Some((vertex_map.real(Virtual::new(i)), d))
} else {
None
}
})
.collect();
let pred = pred
.into_iter()
.enumerate()
.filter_map(|(i, p)| {
if !p.is_null() {
Some((vertex_map.real(Virtual::new(i)), vertex_map.real(p)))
} else {
None
}
})
.collect();
Ok(ShortestPaths { start, dist, pred })
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use super::*;
use crate::marker::{Directed, Undirected};
use crate::storage::AdjList;
fn create_basic_graph() -> AdjList<(), i32, Undirected> |
#[test]
fn dijkstra_basic() {
let graph = create_basic_graph();
let shortest_paths = ShortestPaths::run_dijkstra(&graph, 0.into(), None, identity).unwrap();
assert_eq!(shortest_paths.dist(4.into()), Some(&8));
assert_eq!(
shortest_paths.reconstruct(4.into()).collect::<Vec<_>>(),
vec![3.into(), 1.into(), 0.into()]
);
assert_eq!(shortest_paths.dist(2.into()), Some(&2));
}
#[test]
fn dijkstra_early_termination() {
let graph = create_basic_graph();
let shortest_paths =
ShortestPaths::run_dijkstra(&graph, 0.into(), Some(4.into()), identity).unwrap();
assert!(shortest_paths.dist(5.into()).is_none());
}
#[test]
fn dijkstra_negative_edge() {
let mut graph = create_basic_graph();
graph.replace_edge(2.into(), -1);
let shortest_paths =
ShortestPaths::run_dijkstra(&graph, 0.into(), Some(4.into()), identity);
assert_matches!(shortest_paths, Err(Error::NegativeWeight));
}
#[test]
fn bellman_ford_basic() {
let graph = create_basic_graph();
let shortest_paths = ShortestPaths::run_bellman_ford(&graph, 0.into(), identity).unwrap();
assert_eq!(shortest_paths.dist(4.into()), Some(&8));
assert_eq!(
shortest_paths.reconstruct(4.into()).collect::<Vec<_>>(),
vec![3.into(), 1.into(), 0.into()]
);
assert_eq!(shortest_paths.dist(2.into()), Some(&2));
}
#[test]
fn bellman_ford_negative_edge() {
let mut graph = create_basic_graph();
graph.replace_edge(2.into(), -1);
let shortest_paths = ShortestPaths::run_bellman_ford(&graph, 0.into(), identity).unwrap();
assert_eq!(shortest_paths.dist(4.into()), Some(&8));
assert_eq!(
shortest_paths.reconstruct(4.into()).collect::<Vec<_>>(),
vec![3.into(), 1.into(), 0.into()]
);
assert_eq!(shortest_paths.dist(2.into()), Some(&2));
}
#[test]
fn bellman_ford_negative_cycle() {
let mut graph = AdjList::<(), i32, Directed>::new();
let v0 = graph.add_vertex(());
let v1 = graph.add_vertex(());
let v2 = graph.add_vertex(());
let v3 = graph.add_vertex(());
let v4 = graph.add_vertex(());
graph.add_edge(v0, v1, 3);
graph.add_edge(v1, v2, -2);
graph.add_edge(v2, v3, 2);
graph.add_edge(v2, v1, -2);
graph.add_edge(v2, v4, 3);
let shortest_paths = ShortestPaths::run_bellman_ford(&graph, 0.into(), identity);
assert_matches!(shortest_paths, Err(Error::NegativeCycle));
}
}
| {
let mut graph = AdjList::default();
let v0 = graph.add_vertex(());
let v1 = graph.add_vertex(());
let v2 = graph.add_vertex(());
let v3 = graph.add_vertex(());
let v4 = graph.add_vertex(());
let v5 = graph.add_vertex(());
graph.add_edge(v0, v1, 3);
graph.add_edge(v0, v2, 2);
graph.add_edge(v1, v2, 2);
graph.add_edge(v1, v3, 2);
graph.add_edge(v1, v4, 7);
graph.add_edge(v2, v3, 5);
graph.add_edge(v3, v4, 3);
graph.add_edge(v4, v5, 10);
graph
} |
imitation_learning.py | import gym
import time
import pickle
import argparse
import numpy as np
import tensorflow as tf
from typing import Callable, Union, Tuple, List
from models.models import actor_fc_discrete_network, actor_critic_fc_discrete_network
from algorithms.imitation.utils import plot_training_results
from util.replay_buffer import ReplayBuffer
# Set up
GAMMA = 0.99
LEARNING_RATE = 0.0001
class ImitationAgent:
def __init__(self,
environment: gym.Env,
model_fn: Callable[..., tf.keras.Model],
optimizer: tf.keras.optimizers,
run_dagger: bool,
expert_policy,
expert_data_path,
replay_buffer: ReplayBuffer,
model_kwargs: dict = None,
train_kwargs: dict = None,
save_dir: str = None) -> None:
# Env vars
self.env = environment
self.state_dims = model_kwargs.get('state_dims')
self.num_actions = model_kwargs.get('num_actions')
num_hidden_layers = model_kwargs.get("num_hidden_layers")
hidden_size = model_kwargs.get("hidden_size")
# Algorithm
self.run_dagger = run_dagger
# Expert
self.expert_policy = expert_policy
self.expert_data = ImitationAgent.load_expert_data(expert_data_path)
# Actor model
self.model = model_fn(state_dims=self.state_dims,
num_actions=self.num_actions,
num_hidden_layers=num_hidden_layers,
hidden_size=hidden_size)
self.optimizer = optimizer
self.loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False) # Discrete action space only
# Replay buffer
self.replay_buffer = replay_buffer
# Training vars
self.cur_episode = 0
self.total_steps = 0
self.max_ep_len = train_kwargs.get("max_ep_len")
self.batch_size = train_kwargs.get("batch_size") # Batch size of data collection from buffer
self.train_batch_size = train_kwargs.get('train_batch_size') # Batch size for training models
self.eval_batch_size = train_kwargs.get('eval_batch_size') # Batch size for eval
self.num_agent_train_steps_per_iter = train_kwargs.get('num_agent_train_steps_per_iter') # Grad updates per run
# Save directories
self.save_dir = save_dir
def save_models(self) -> None:
self.model.save(self.save_dir)
def load_models(self) -> tf.keras.Model:
self.model = tf.keras.models.load_model(self.save_dir)
return self.model
@staticmethod
def load_expert_data(path):
with open(path, 'rb') as f:
expert_data = pickle.load(f)
return expert_data
def sample_random_trajectory(self) -> Tuple[List[Tuple], Union[int, float]]:
"""
Sample 1 trajectory.
:param max_path_length: the maximum number of steps to take in the trajectory
:param random: whether or not to sample actions randomly or using MPC
:return:
"""
state = tf.expand_dims(tf.convert_to_tensor(self.env.reset()), 0)
num_steps = 0
total_rewards = 0
transitions = [] # transition tuples (s,a,r,s',d)
while True:
num_steps += 1
action_prob = self.model(state)
action = np.random.choice(self.num_actions, p=np.squeeze(action_prob))
next_state, reward, done, _ = self.env.step(action)
next_state = tf.reshape(next_state, [1, self.state_dims])
total_rewards += reward
if done or num_steps > self.max_ep_len:
transitions.append((state, action, reward, next_state, 1))
break
transitions.append((state, action, reward, next_state, 0))
state = next_state
return transitions, total_rewards
def sample_n_trajectories(self) -> Tuple[List, List, int]:
"""
Sample `self.batch_size` trajectories. Each trajectory should be no longer than
`max_path_length` steps/transitions. Note that transitions are different than trajectories!
A transition is a tuple (s,a,r,s',d) and a trajectory is made up of 1 to `max_path_length` transitions.
:param batch_size: The number of transitions to sample.
:param max_path_length: The maximum steps/transitions per trajectory | num_steps_this_batch = 0
trajectory_rewards = []
transitions = []
while num_steps_this_batch < self.batch_size:
traj, rews = self.sample_random_trajectory()
num_steps_this_batch += len(traj)
trajectory_rewards.append(rews)
# Note that we're extending, not appending, because we don't care about trajectories, we care about
# the transitions. If we appended, it would be ([[(tran 1), (tran 2)], ..., [(tran n), (tran n+1)]],
# where each sublist is a trajectory. But by extending, it's instead ([(tran 1), ..., (tran n)]
transitions.extend(traj)
return transitions, trajectory_rewards, num_steps_this_batch
def relabel_actions_with_expert(self, transitions: List[Tuple]) -> List[Tuple]:
"""
Given a batch of transition tuples, query the Expert Policy and update the action based on
the Expert. This is the key difference between vanilla behavioral cloning and DAgger. This
step is equivalent to asking a human expert to label our dataset with actions the correct actions.
"""
updated_transitions = []
for transition in transitions:
state, action, reward, next_state, done = transition
action_prob, _ = self.expert_policy(state)
expert_action = np.argmax(np.squeeze(action_prob))
updated_transitions.append((state, expert_action, reward, next_state, done))
return updated_transitions
def train_episode(self) -> List:
# Step 1: Sample trajectories
if self.cur_episode == 0:
# Load expert_data
transitions = self.expert_data
else:
# Or sample trajectories using current policy
transitions, _, _ = self.sample_n_trajectories()
# Step 2: For DAgger only, ask expert policy to label data with actions
if self.run_dagger and self.cur_episode > 0:
transitions = self.relabel_actions_with_expert(transitions)
# Step 3: Store the sampled transitions in the replay buffer
self.replay_buffer.store_transitions_batch(transitions)
# Step 4: Train model!
losses = []
for train_step in range(self.num_agent_train_steps_per_iter):
# Sample a random batch of data from the replay buffer
states, actions, _, _, _ = self.replay_buffer.sample(batch_size=self.train_batch_size)
with tf.GradientTape() as tape:
action_prob = self.model(states)
loss = self.loss_fn(actions, action_prob)
grads = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
losses.append(loss)
self.cur_episode += 1
return losses
def run_agent(self, render=False) -> Tuple[float, int]:
total_reward, total_steps = 0, 0
state = self.env.reset()
done = False
while not done:
if render:
self.env.render()
# Select action
action_prob = self.model(tf.expand_dims(state, axis=0))
action = np.argmax(np.squeeze(action_prob))
# Interact with environment
state, reward, done, _ = self.env.step(action)
# Bookkeeping
total_reward += reward
total_steps += 1
return total_reward, total_steps
def main() -> None:
# Check input params
if args.run_dagger:
assert args.epochs > 1, "DAgger needs more than 1 iteration of training, where each iter" \
"we query the expert and train"
else:
assert args.epochs == 1, "Vanilla behavior cloning collects expert data only once and does traditional" \
"supervised learning on that dataset."
# Create environment
env = gym.make(args.env)
# Set seeds
if args.seed:
np.random.seed(args.seed)
tf.random.set_seed(args.seed)
env.seed(args.seed)
# Create helper vars for model creation
_state_dims = len(env.observation_space.high)
_action_dims = 1
_num_actions = env.action_space.n
# Create Replay Buffer
buffer = ReplayBuffer(state_dims=_state_dims, action_dims=_action_dims)
# Instantiate optimizer
opt = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
# Instantiate expert policy from file
# TODO >> I think it's a bit cleaner to load the entire model instead of just the weights
# but I'm getting a TF error that I think was fixed in a later version. I should probably
# try updating the version and seeing if it fixes itself.
expert = actor_critic_fc_discrete_network(state_dims=_state_dims,
num_actions=_num_actions,
num_hidden_layers=2,
hidden_size=128)
expert.load_weights(args.expert_policy_file)
# Create agent
agent = ImitationAgent(environment=env,
model_fn=actor_fc_discrete_network,
optimizer=opt,
replay_buffer=buffer,
run_dagger=args.run_dagger,
expert_policy=expert,
expert_data_path=args.expert_data,
model_kwargs=dict(state_dims=_state_dims,
num_actions=_num_actions,
num_hidden_layers=2,
hidden_size=256),
train_kwargs=dict(max_ep_len=args.max_ep_len,
batch_size=args.batch_size,
train_batch_size=args.train_batch_size,
eval_batch_size=args.eval_batch_size,
num_agent_train_steps_per_iter=args.num_agent_train_steps_per_iter)
)
# Run training
ep_mean_rewards_history, ep_max_rewards_history, ep_min_rewards_history = [], [], []
ep_mean_loss_history, ep_max_loss_history, ep_min_loss_history = [], [], []
ep_steps_history = []
ep_wallclock_history = []
start = time.time()
for e in range(args.epochs):
# Run one episode
ep_loss = agent.train_episode()
ep_rew, ep_steps = agent.run_agent()
# Prepare for logging
mean_ep_rew, max_ep_rew, min_ep_rew, std_ep_rew = np.mean(ep_rew), np.max(ep_rew), np.min(ep_rew), np.std(ep_rew)
mean_ep_loss, max_ep_loss, min_ep_loss = np.mean(ep_loss), np.max(ep_loss), np.min(ep_loss)
ep_wallclock_history.append(time.time() - start)
ep_mean_rewards_history.append(mean_ep_rew)
ep_max_rewards_history.append(max_ep_rew)
ep_min_rewards_history.append(min_ep_rew)
ep_mean_loss_history.append(mean_ep_loss)
ep_max_loss_history.append(max_ep_loss)
ep_min_loss_history.append(min_ep_loss)
ep_steps_history.append(ep_steps)
template = "EPISODE {} | mean ep reward: {:.2f} - max ep reward: {:.2f}" \
" - min ep reward: {:.2f} - std ep reward: {:.2f} - mean ep loss {:.2f}"
print(template.format(e, mean_ep_rew, max_ep_rew, min_ep_rew, std_ep_rew, mean_ep_loss))
# Now that we've completed training, let's plot the results
print(f"Training time elapsed (sec): {round(time.time() - start, 2)}")
# Let's evaluate the performance of the trained agent
print("Beginning evaluation of trained agent!")
eval_rew = []
for i in range(50):
ep_rew, ep_steps = agent.run_agent()
eval_rew.append(ep_rew)
print(f"Evaluation rewards: mean - {np.mean(eval_rew)} | min - {np.min(eval_rew)} | max - {np.max(eval_rew)}")
# Plot summary of results
plot_training_results(mean_rewards_history=ep_mean_rewards_history,
max_rew_history=ep_max_rewards_history,
min_rew_history=ep_min_rewards_history,
mean_loss_history=ep_mean_loss_history,
max_loss_history=ep_max_loss_history,
min_loss_history=ep_min_loss_history,
steps_history=ep_steps_history,
wallclock_history=ep_wallclock_history,
save_dir="./results.png")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--env", type=str, default="CartPole-v0")
parser.add_argument('--expert_policy_file', type=str, default='./checkpoints/expert_model_weights')
parser.add_argument('--expert_data', type=str, default='expert_data.pkl')
# parser.add_argument("--run_dagger", action="store_false")
parser.add_argument("--run_dagger", type=bool, default=False)
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument('--max_ep_len', type=int, default=100) # max trajectory length
parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=20) # number of grad updates per iter
parser.add_argument('--batch_size', type=int, default=1000) # num steps/transitions to sample for itr 1+
parser.add_argument('--train_batch_size', type=int, default=512) # training batch size per model
parser.add_argument('--eval_batch_size', type=int, default=400) # steps collected per eval iteration
args = parser.parse_args()
main() | :param random: Boolean to indicate whether or not to sample actions randomly or via MPC
:return:
""" |
issue-4446.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::io::println;
pub fn | () {
let (tx, rx) = channel();
spawn(proc() {
println(rx.recv());
});
tx.send("hello, world");
}
| main |
program.py | # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from collections import OrderedDict
import paddle.fluid as fluid
from ppcls.optimizer import LearningRateBuilder
from ppcls.optimizer import OptimizerBuilder
from ppcls.modeling import architectures
from ppcls.modeling.loss import CELoss
from ppcls.modeling.loss import MixCELoss
from ppcls.modeling.loss import JSDivLoss
from ppcls.modeling.loss import GoogLeNetLoss
from ppcls.utils.misc import AverageMeter
from ppcls.utils import logger
from paddle.fluid.incubate.fleet.collective import fleet
from paddle.fluid.incubate.fleet.collective import DistributedStrategy
from ema import ExponentialMovingAverage
def create_feeds(image_shape, use_mix=None):
"""
Create feeds as model input
Args:
image_shape(list[int]): model input shape, such as [3, 224, 224]
use_mix(bool): whether to use mix(include mixup, cutmix, fmix)
Returns:
feeds(dict): dict of model input variables
"""
feeds = OrderedDict()
feeds['image'] = fluid.data(
name="feed_image", shape=[None] + image_shape, dtype="float32")
if use_mix:
feeds['feed_y_a'] = fluid.data(
name="feed_y_a", shape=[None, 1], dtype="int64")
feeds['feed_y_b'] = fluid.data(
name="feed_y_b", shape=[None, 1], dtype="int64")
feeds['feed_lam'] = fluid.data(
name="feed_lam", shape=[None, 1], dtype="float32")
else:
feeds['label'] = fluid.data(
name="feed_label", shape=[None, 1], dtype="int64")
return feeds
def create_dataloader(feeds):
"""
Create a dataloader with model input variables
Args:
feeds(dict): dict of model input variables
Returns:
dataloader(fluid dataloader):
"""
trainer_num = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
capacity = 64 if trainer_num <= 1 else 8
dataloader = fluid.io.DataLoader.from_generator(
feed_list=feeds,
capacity=capacity,
use_double_buffer=True,
iterable=True)
return dataloader
def create_model(architecture, image, classes_num, is_train):
"""
Create a model
Args:
architecture(dict): architecture information,
name(such as ResNet50) is needed
image(variable): model input variable
classes_num(int): num of classes
Returns:
out(variable): model output variable
"""
name = architecture["name"]
params = architecture.get("params", {})
if "is_test" in params:
params['is_test'] = not is_train
model = architectures.__dict__[name](**params)
out = model.net(input=image, class_dim=classes_num)
return out
def create_loss(out,
feeds,
architecture,
classes_num=1000,
epsilon=None,
use_mix=False,
use_distillation=False):
"""
Create a loss for optimization, such as:
1. CrossEnotry loss
2. CrossEnotry loss with label smoothing
3. CrossEnotry loss with mix(mixup, cutmix, fmix)
4. CrossEnotry loss with label smoothing and (mixup, cutmix, fmix)
5. GoogLeNet loss
Args:
out(variable): model output variable
feeds(dict): dict of model input variables
architecture(dict): architecture information,
name(such as ResNet50) is needed
classes_num(int): num of classes
epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0
use_mix(bool): whether to use mix(include mixup, cutmix, fmix)
Returns:
loss(variable): loss variable
"""
if architecture["name"] == "GoogLeNet":
assert len(out) == 3, "GoogLeNet should have 3 outputs"
loss = GoogLeNetLoss(class_dim=classes_num, epsilon=epsilon)
target = feeds['label']
return loss(out[0], out[1], out[2], target)
if use_distillation:
assert len(out) == 2, ("distillation output length must be 2, "
"but got {}".format(len(out)))
loss = JSDivLoss(class_dim=classes_num, epsilon=epsilon)
return loss(out[1], out[0])
if use_mix:
loss = MixCELoss(class_dim=classes_num, epsilon=epsilon)
feed_y_a = feeds['feed_y_a']
feed_y_b = feeds['feed_y_b']
feed_lam = feeds['feed_lam']
return loss(out, feed_y_a, feed_y_b, feed_lam)
else:
loss = CELoss(class_dim=classes_num, epsilon=epsilon)
target = feeds['label']
return loss(out, target)
def create_metric(out,
feeds,
architecture,
topk=5,
classes_num=1000,
use_distillation=False):
"""
Create measures of model accuracy, such as top1 and top5
Args:
out(variable): model output variable
feeds(dict): dict of model input variables(included label)
topk(int): usually top5
classes_num(int): num of classes
Returns:
fetchs(dict): dict of measures
"""
if architecture["name"] == "GoogLeNet":
assert len(out) == 3, "GoogLeNet should have 3 outputs"
softmax_out = out[0]
else:
# just need student label to get metrics
if use_distillation:
out = out[1]
softmax_out = fluid.layers.softmax(out, use_cudnn=False)
fetchs = OrderedDict()
# set top1 to fetchs
top1 = fluid.layers.accuracy(softmax_out, label=feeds['label'], k=1)
fetchs['top1'] = (top1, AverageMeter('top1', '.4f', need_avg=True))
# set topk to fetchs
k = min(topk, classes_num)
topk = fluid.layers.accuracy(softmax_out, label=feeds['label'], k=k)
topk_name = 'top{}'.format(k)
fetchs[topk_name] = (topk, AverageMeter(topk_name, '.4f', need_avg=True))
return fetchs
def create_fetchs(out,
feeds,
architecture,
topk=5,
classes_num=1000,
epsilon=None,
use_mix=False,
use_distillation=False):
"""
Create fetchs as model outputs(included loss and measures),
will call create_loss and create_metric(if use_mix).
Args:
out(variable): model output variable
feeds(dict): dict of model input variables.
If use mix_up, it will not include label.
architecture(dict): architecture information,
name(such as ResNet50) is needed
topk(int): usually top5
classes_num(int): num of classes
epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0
use_mix(bool): whether to use mix(include mixup, cutmix, fmix)
Returns:
fetchs(dict): dict of model outputs(included loss and measures)
"""
fetchs = OrderedDict()
loss = create_loss(out, feeds, architecture, classes_num, epsilon, use_mix,
use_distillation)
fetchs['loss'] = (loss, AverageMeter('loss', '7.4f', need_avg=True))
if not use_mix:
metric = create_metric(out, feeds, architecture, topk, classes_num,
use_distillation)
fetchs.update(metric)
return fetchs
def create_optimizer(config):
"""
Create an optimizer using config, usually including
learning rate and regularization.
Args:
config(dict): such as
{
'LEARNING_RATE':
{'function': 'Cosine',
'params': {'lr': 0.1}
},
'OPTIMIZER':
{'function': 'Momentum',
'params':{'momentum': 0.9},
'regularizer':
{'function': 'L2', 'factor': 0.0001}
}
}
Returns:
an optimizer instance
"""
# create learning_rate instance
lr_config = config['LEARNING_RATE']
lr_config['params'].update({
'epochs': config['epochs'],
'step_each_epoch':
config['total_images'] // config['TRAIN']['batch_size'],
})
lr = LearningRateBuilder(**lr_config)()
# create optimizer instance
opt_config = config['OPTIMIZER']
opt = OptimizerBuilder(**opt_config)
return opt(lr)
def dist_optimizer(config, optimizer):
"""
Create a distributed optimizer based on a normal optimizer
Args:
config(dict):
optimizer(): a normal optimizer
Returns:
optimizer: a distributed optimizer
"""
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = 3
exec_strategy.num_iteration_per_drop_scope = 10
dist_strategy = DistributedStrategy()
dist_strategy.nccl_comm_num = 1
dist_strategy.fuse_all_reduce_ops = True
dist_strategy.exec_strategy = exec_strategy
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
return optimizer
def mixed_precision_optimizer(config, optimizer):
use_fp16 = config.get('use_fp16', False)
amp_scale_loss = config.get('amp_scale_loss', 1.0)
use_dynamic_loss_scaling = config.get('use_dynamic_loss_scaling', False)
if use_fp16:
optimizer = fluid.contrib.mixed_precision.decorate( | init_loss_scaling=amp_scale_loss,
use_dynamic_loss_scaling=use_dynamic_loss_scaling)
return optimizer
def build(config, main_prog, startup_prog, is_train=True):
"""
Build a program using a model and an optimizer
1. create feeds
2. create a dataloader
3. create a model
4. create fetchs
5. create an optimizer
Args:
config(dict): config
main_prog(): main program
startup_prog(): startup program
is_train(bool): train or valid
Returns:
dataloader(): a bridge between the model and the data
fetchs(dict): dict of model outputs(included loss and measures)
"""
with fluid.program_guard(main_prog, startup_prog):
with fluid.unique_name.guard():
use_mix = config.get('use_mix') and is_train
use_distillation = config.get('use_distillation')
feeds = create_feeds(config.image_shape, use_mix=use_mix)
dataloader = create_dataloader(feeds.values())
out = create_model(config.ARCHITECTURE, feeds['image'],
config.classes_num, is_train)
fetchs = create_fetchs(
out,
feeds,
config.ARCHITECTURE,
config.topk,
config.classes_num,
epsilon=config.get('ls_epsilon'),
use_mix=use_mix,
use_distillation=use_distillation)
if is_train:
optimizer = create_optimizer(config)
lr = optimizer._global_learning_rate()
fetchs['lr'] = (lr, AverageMeter('lr', 'f', need_avg=False))
optimizer = mixed_precision_optimizer(config, optimizer)
optimizer = dist_optimizer(config, optimizer)
optimizer.minimize(fetchs['loss'][0])
if config.get('use_ema'):
global_steps = fluid.layers.learning_rate_scheduler._decay_step_counter(
)
ema = ExponentialMovingAverage(
config.get('ema_decay'), thres_steps=global_steps)
ema.update()
return dataloader, fetchs, ema
return dataloader, fetchs
def compile(config, program, loss_name=None):
"""
Compile the program
Args:
config(dict): config
program(): the program which is wrapped by
loss_name(str): loss name
Returns:
compiled_program(): a compiled program
"""
build_strategy = fluid.compiler.BuildStrategy()
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = 1
exec_strategy.num_iteration_per_drop_scope = 10
compiled_program = fluid.CompiledProgram(program).with_data_parallel(
loss_name=loss_name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
return compiled_program
total_step = 0
def run(dataloader,
exe,
program,
fetchs,
epoch=0,
mode='train',
vdl_writer=None):
"""
Feed data to the model and fetch the measures and loss
Args:
dataloader(fluid dataloader):
exe():
program():
fetchs(dict): dict of measures and the loss
epoch(int): epoch of training or validation
model(str): log only
Returns:
"""
fetch_list = [f[0] for f in fetchs.values()]
metric_list = [f[1] for f in fetchs.values()]
for m in metric_list:
m.reset()
batch_time = AverageMeter('elapse', '.3f')
tic = time.time()
for idx, batch in enumerate(dataloader()):
metrics = exe.run(program=program, feed=batch, fetch_list=fetch_list)
batch_time.update(time.time() - tic)
tic = time.time()
for i, m in enumerate(metrics):
metric_list[i].update(m[0], len(batch[0]))
fetchs_str = ''.join([str(m.value) + ' '
for m in metric_list] + [batch_time.value]) + 's'
if vdl_writer:
global total_step
logger.scaler('loss', metrics[0][0], total_step, vdl_writer)
total_step += 1
if mode == 'eval':
logger.info("{:s} step:{:<4d} {:s}s".format(mode, idx, fetchs_str))
else:
epoch_str = "epoch:{:<3d}".format(epoch)
step_str = "{:s} step:{:<4d}".format(mode, idx)
logger.info("{:s} {:s} {:s}".format(
logger.coloring(epoch_str, "HEADER")
if idx == 0 else epoch_str,
logger.coloring(step_str, "PURPLE"),
logger.coloring(fetchs_str, 'OKGREEN')))
end_str = ''.join([str(m.mean) + ' '
for m in metric_list] + [batch_time.total]) + 's'
if mode == 'eval':
logger.info("END {:s} {:s}s".format(mode, end_str))
else:
end_epoch_str = "END epoch:{:<3d}".format(epoch)
logger.info("{:s} {:s} {:s}".format(
logger.coloring(end_epoch_str, "RED"),
logger.coloring(mode, "PURPLE"),
logger.coloring(end_str, "OKGREEN")))
# return top1_acc in order to save the best model
if mode == 'valid':
return fetchs["top1"][1].avg | optimizer, |
generic_scheduler_test.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package core
import (
"fmt"
"math"
"reflect"
"strconv"
"strings"
"testing"
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
algorithmpredicates "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
algorithmpriorities "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
plugins "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1"
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
)
var (
errPrioritize = fmt.Errorf("priority map encounters an error")
order = []string{"false", "true", "matches", "nopods", algorithmpredicates.MatchInterPodAffinityPred}
)
func falsePredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
return false, []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
}
func truePredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
return true, nil, nil
}
func matchesPredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
node := nodeInfo.Node()
if node == nil {
return false, nil, fmt.Errorf("node not found")
}
if pod.Name == node.Name {
return true, nil, nil
}
return false, []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
}
func hasNoPodsPredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
if len(nodeInfo.Pods()) == 0 {
return true, nil, nil
}
return false, []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
}
func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
result := []schedulerapi.HostPriority{}
for _, node := range nodes {
score, err := strconv.Atoi(node.Name)
if err != nil {
return nil, err
}
result = append(result, schedulerapi.HostPriority{
Host: node.Name,
Score: score,
})
}
return result, nil
}
func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
var maxScore float64
minScore := math.MaxFloat64
reverseResult := []schedulerapi.HostPriority{}
result, err := numericPriority(pod, nodeNameToInfo, nodes)
if err != nil {
return nil, err
}
for _, hostPriority := range result {
maxScore = math.Max(maxScore, float64(hostPriority.Score))
minScore = math.Min(minScore, float64(hostPriority.Score))
}
for _, hostPriority := range result {
reverseResult = append(reverseResult, schedulerapi.HostPriority{
Host: hostPriority.Host,
Score: int(maxScore + minScore - float64(hostPriority.Score)),
})
}
return reverseResult, nil
}
func trueMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
return schedulerapi.HostPriority{
Host: nodeInfo.Node().Name,
Score: 1,
}, nil
}
func falseMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
return schedulerapi.HostPriority{}, errPrioritize
}
func getNodeReducePriority(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error {
for _, host := range result {
if host.Host == "" {
return fmt.Errorf("unexpected empty host name")
}
}
return nil
}
// EmptyPluginSet is a test plugin set used by the default scheduler.
type EmptyPluginSet struct{}
var _ plugins.PluginSet = EmptyPluginSet{}
// ReservePlugins returns a slice of default reserve plugins.
func (r EmptyPluginSet) ReservePlugins() []plugins.ReservePlugin {
return []plugins.ReservePlugin{}
}
// PrebindPlugins returns a slice of default prebind plugins.
func (r EmptyPluginSet) PrebindPlugins() []plugins.PrebindPlugin {
return []plugins.PrebindPlugin{}
}
// Data returns a pointer to PluginData.
func (r EmptyPluginSet) Data() *plugins.PluginData {
return &plugins.PluginData{}
}
var emptyPluginSet = &EmptyPluginSet{}
func makeNodeList(nodeNames []string) []*v1.Node {
result := make([]*v1.Node, 0, len(nodeNames))
for _, nodeName := range nodeNames {
result = append(result, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}})
}
return result
}
func TestSelectHost(t *testing.T) {
scheduler := genericScheduler{}
tests := []struct {
name string
list schedulerapi.HostPriorityList
possibleHosts sets.String
expectsErr bool
}{
{
name: "unique properly ordered scores",
list: []schedulerapi.HostPriority{
{Host: "machine1.1", Score: 1},
{Host: "machine2.1", Score: 2},
},
possibleHosts: sets.NewString("machine2.1"),
expectsErr: false,
},
{
name: "equal scores",
list: []schedulerapi.HostPriority{
{Host: "machine1.1", Score: 1},
{Host: "machine1.2", Score: 2},
{Host: "machine1.3", Score: 2},
{Host: "machine2.1", Score: 2},
},
possibleHosts: sets.NewString("machine1.2", "machine1.3", "machine2.1"),
expectsErr: false,
},
{
name: "out of order scores",
list: []schedulerapi.HostPriority{
{Host: "machine1.1", Score: 3},
{Host: "machine1.2", Score: 3},
{Host: "machine2.1", Score: 2},
{Host: "machine3.1", Score: 1},
{Host: "machine1.3", Score: 3},
},
possibleHosts: sets.NewString("machine1.1", "machine1.2", "machine1.3"),
expectsErr: false,
},
{
name: "empty priority list",
list: []schedulerapi.HostPriority{},
possibleHosts: sets.NewString(),
expectsErr: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// increase the randomness
for i := 0; i < 10; i++ {
got, err := scheduler.selectHost(test.list)
if test.expectsErr {
if err == nil {
t.Error("Unexpected non-error")
}
} else {
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !test.possibleHosts.Has(got) {
t.Errorf("got %s is not in the possible map %v", got, test.possibleHosts)
}
}
}
})
}
}
func TestGenericScheduler(t *testing.T) {
algorithmpredicates.SetPredicatesOrdering(order)
tests := []struct {
name string
predicates map[string]algorithmpredicates.FitPredicate
prioritizers []algorithm.PriorityConfig
alwaysCheckAllPredicates bool
nodes []string
pvcs []*v1.PersistentVolumeClaim
pod *v1.Pod
pods []*v1.Pod
expectedHosts sets.String
expectsErr bool
wErr error
}{
{
predicates: map[string]algorithmpredicates.FitPredicate{"false": falsePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
nodes: []string{"machine1", "machine2"},
expectsErr: true,
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
name: "test 1",
wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 2,
FailedPredicates: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
}},
},
{
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")}},
expectedHosts: sets.NewString("machine1", "machine2"),
name: "test 2",
wErr: nil,
},
{
// Fits on a machine where the pod ID matches the machine name
predicates: map[string]algorithmpredicates.FitPredicate{"matches": matchesPredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine2", UID: types.UID("machine2")}},
expectedHosts: sets.NewString("machine2"),
name: "test 3",
wErr: nil,
},
{
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")}},
expectedHosts: sets.NewString("3"),
name: "test 4",
wErr: nil,
},
{
predicates: map[string]algorithmpredicates.FitPredicate{"matches": matchesPredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
expectedHosts: sets.NewString("2"),
name: "test 5",
wErr: nil,
},
{
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}},
nodes: []string{"3", "2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
expectedHosts: sets.NewString("1"),
name: "test 6",
wErr: nil,
},
{
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate, "false": falsePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
expectsErr: true,
name: "test 7",
wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 3,
FailedPredicates: FailedPredicateMap{
"3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
"2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
"1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
},
},
},
{
predicates: map[string]algorithmpredicates.FitPredicate{
"nopods": hasNoPodsPredicate,
"matches": matchesPredicate,
},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")},
Spec: v1.PodSpec{
NodeName: "2",
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
},
},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"1", "2"},
expectsErr: true,
name: "test 8",
wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 2,
FailedPredicates: FailedPredicateMap{
"1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
"2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
},
},
},
{
// Pod with existing PVC
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
nodes: []string{"machine1", "machine2"},
pvcs: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC"}}},
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "existingPVC",
},
},
},
},
},
},
expectedHosts: sets.NewString("machine1", "machine2"),
name: "existing PVC",
wErr: nil,
},
{
// Pod with non existing PVC
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "unknownPVC",
},
},
},
},
},
},
name: "unknown PVC",
expectsErr: true,
wErr: fmt.Errorf("persistentvolumeclaim \"unknownPVC\" not found"),
},
{
// Pod with deleting PVC
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
nodes: []string{"machine1", "machine2"},
pvcs: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", DeletionTimestamp: &metav1.Time{}}}},
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "existingPVC",
},
},
},
},
},
},
name: "deleted PVC",
expectsErr: true,
wErr: fmt.Errorf("persistentvolumeclaim \"existingPVC\" is being deleted"),
},
{
// alwaysCheckAllPredicates is true
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate, "matches": matchesPredicate, "false": falsePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
alwaysCheckAllPredicates: true,
nodes: []string{"1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
name: "test alwaysCheckAllPredicates is true",
wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 1,
FailedPredicates: FailedPredicateMap{
"1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate, algorithmpredicates.ErrFakePredicate},
},
},
},
{
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: falseMapPriority, Weight: 1}, {Map: trueMapPriority, Reduce: getNodeReducePriority, Weight: 2}},
nodes: []string{"2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
name: "test error with priority map",
wErr: errors.NewAggregate([]error{errPrioritize, errPrioritize}),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
cache := schedulerinternalcache.New(time.Duration(0), wait.NeverStop)
for _, pod := range test.pods {
cache.AddPod(pod)
}
for _, name := range test.nodes {
cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name}})
}
pvcs := []*v1.PersistentVolumeClaim{}
pvcs = append(pvcs, test.pvcs...)
pvcLister := schedulertesting.FakePersistentVolumeClaimLister(pvcs)
scheduler := NewGenericScheduler(
cache,
internalqueue.NewSchedulingQueue(nil),
test.predicates,
algorithmpredicates.EmptyPredicateMetadataProducer,
test.prioritizers,
algorithm.EmptyPriorityMetadataProducer,
emptyPluginSet,
[]algorithm.SchedulerExtender{},
nil,
pvcLister,
schedulertesting.FakePDBLister{},
test.alwaysCheckAllPredicates,
false,
schedulerapi.DefaultPercentageOfNodesToScore)
result, err := scheduler.Schedule(test.pod, schedulertesting.FakeNodeLister(makeNodeList(test.nodes)))
if !reflect.DeepEqual(err, test.wErr) {
t.Errorf("Unexpected error: %v, expected: %v", err, test.wErr)
}
if test.expectedHosts != nil && !test.expectedHosts.Has(result.SuggestedHost) {
t.Errorf("Expected: %s, got: %s", test.expectedHosts, result.SuggestedHost)
}
})
}
}
// makeScheduler makes a simple genericScheduler for testing.
func makeScheduler(predicates map[string]algorithmpredicates.FitPredicate, nodes []*v1.Node) *genericScheduler {
algorithmpredicates.SetPredicatesOrdering(order)
cache := schedulerinternalcache.New(time.Duration(0), wait.NeverStop)
for _, n := range nodes {
cache.AddNode(n)
}
prioritizers := []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}
s := NewGenericScheduler(
cache,
internalqueue.NewSchedulingQueue(nil),
predicates,
algorithmpredicates.EmptyPredicateMetadataProducer,
prioritizers,
algorithm.EmptyPriorityMetadataProducer,
emptyPluginSet,
nil, nil, nil, nil, false, false,
schedulerapi.DefaultPercentageOfNodesToScore)
cache.UpdateNodeNameToInfoMap(s.(*genericScheduler).cachedNodeInfoMap)
return s.(*genericScheduler)
}
func TestFindFitAllError(t *testing.T) {
predicates := map[string]algorithmpredicates.FitPredicate{"true": truePredicate, "matches": matchesPredicate}
nodes := makeNodeList([]string{"3", "2", "1"})
scheduler := makeScheduler(predicates, nodes)
_, predicateMap, err := scheduler.findNodesThatFit(&v1.Pod{}, nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(predicateMap) != len(nodes) {
t.Errorf("unexpected failed predicate map: %v", predicateMap)
}
for _, node := range nodes {
t.Run(node.Name, func(t *testing.T) {
failures, found := predicateMap[node.Name]
if !found {
t.Errorf("failed to find node in %v", predicateMap)
}
if len(failures) != 1 || failures[0] != algorithmpredicates.ErrFakePredicate {
t.Errorf("unexpected failures: %v", failures)
}
})
}
}
func TestFindFitSomeError(t *testing.T) {
predicates := map[string]algorithmpredicates.FitPredicate{"true": truePredicate, "matches": matchesPredicate}
nodes := makeNodeList([]string{"3", "2", "1"})
scheduler := makeScheduler(predicates, nodes)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1", UID: types.UID("1")}}
_, predicateMap, err := scheduler.findNodesThatFit(pod, nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(predicateMap) != (len(nodes) - 1) {
t.Errorf("unexpected failed predicate map: %v", predicateMap)
}
for _, node := range nodes {
if node.Name == pod.Name {
continue
}
t.Run(node.Name, func(t *testing.T) {
failures, found := predicateMap[node.Name]
if !found {
t.Errorf("failed to find node in %v", predicateMap)
}
if len(failures) != 1 || failures[0] != algorithmpredicates.ErrFakePredicate {
t.Errorf("unexpected failures: %v", failures)
}
})
}
}
func makeNode(node string, milliCPU, memory int64) *v1.Node {
return &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: node},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
"pods": *resource.NewQuantity(100, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
"pods": *resource.NewQuantity(100, resource.DecimalSI),
},
},
}
}
func TestHumanReadableFitError(t *testing.T) {
err := &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 3,
FailedPredicates: FailedPredicateMap{
"1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
"2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
"3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
},
}
if strings.Contains(err.Error(), "0/3 nodes are available") {
if strings.Contains(err.Error(), "2 node(s) had disk pressure") && strings.Contains(err.Error(), "1 node(s) had memory pressure") {
return
}
}
t.Errorf("Error message doesn't have all the information content: [" + err.Error() + "]")
}
// The point of this test is to show that you:
// - get the same priority for a zero-request pod as for a pod with the defaults requests,
// both when the zero-request pod is already on the machine and when the zero-request pod
// is the one being scheduled.
// - don't get the same score no matter what we schedule.
func | (t *testing.T) {
// A pod with no resources. We expect spreading to count it as having the default resources.
noResources := v1.PodSpec{
Containers: []v1.Container{
{},
},
}
noResources1 := noResources
noResources1.NodeName = "machine1"
// A pod with the same resources as a 0-request pod gets by default as its resources (for spreading).
small := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCPURequest, 10) + "m"),
v1.ResourceMemory: resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)),
},
},
},
},
}
small2 := small
small2.NodeName = "machine2"
// A larger pod.
large := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*3, 10) + "m"),
v1.ResourceMemory: resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
},
},
},
},
}
large1 := large
large1.NodeName = "machine1"
large2 := large
large2.NodeName = "machine2"
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
name string
expectedScore int
}{
// The point of these next two tests is to show you get the same priority for a zero-request pod
// as for a pod with the defaults requests, both when the zero-request pod is already on the machine
// and when the zero-request pod is the one being scheduled.
{
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
name: "test priority of zero-request pod with machine with zero-request pod",
pods: []*v1.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
expectedScore: 25,
},
{
pod: &v1.Pod{Spec: small},
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
name: "test priority of nonzero-request pod with machine with zero-request pod",
pods: []*v1.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
expectedScore: 25,
},
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
{
pod: &v1.Pod{Spec: large},
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
name: "test priority of larger pod with machine with zero-request pod",
pods: []*v1.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
expectedScore: 23,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// This should match the configuration in defaultPriorities() in
// pkg/scheduler/algorithmprovider/defaults/defaults.go if you want
// to test what's actually in production.
priorityConfigs := []algorithm.PriorityConfig{
{Map: algorithmpriorities.LeastRequestedPriorityMap, Weight: 1},
{Map: algorithmpriorities.BalancedResourceAllocationMap, Weight: 1},
}
selectorSpreadPriorityMap, selectorSpreadPriorityReduce := algorithmpriorities.NewSelectorSpreadPriority(
schedulertesting.FakeServiceLister([]*v1.Service{}),
schedulertesting.FakeControllerLister([]*v1.ReplicationController{}),
schedulertesting.FakeReplicaSetLister([]*apps.ReplicaSet{}),
schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{}))
pc := algorithm.PriorityConfig{Map: selectorSpreadPriorityMap, Reduce: selectorSpreadPriorityReduce, Weight: 1}
priorityConfigs = append(priorityConfigs, pc)
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
metaDataProducer := algorithmpriorities.NewPriorityMetadataFactory(
schedulertesting.FakeServiceLister([]*v1.Service{}),
schedulertesting.FakeControllerLister([]*v1.ReplicationController{}),
schedulertesting.FakeReplicaSetLister([]*apps.ReplicaSet{}),
schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{}))
metaData := metaDataProducer(test.pod, nodeNameToInfo)
list, err := PrioritizeNodes(
test.pod, nodeNameToInfo, metaData, priorityConfigs,
schedulertesting.FakeNodeLister(test.nodes), []algorithm.SchedulerExtender{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
for _, hp := range list {
if hp.Score != test.expectedScore {
t.Errorf("expected %d for all priorities, got list %#v", test.expectedScore, list)
}
}
})
}
}
func printNodeToVictims(nodeToVictims map[*v1.Node]*schedulerapi.Victims) string {
var output string
for node, victims := range nodeToVictims {
output += node.Name + ": ["
for _, pod := range victims.Pods {
output += pod.Name + ", "
}
output += "]"
}
return output
}
func checkPreemptionVictims(expected map[string]map[string]bool, nodeToPods map[*v1.Node]*schedulerapi.Victims) error {
if len(expected) == len(nodeToPods) {
for k, victims := range nodeToPods {
if expPods, ok := expected[k.Name]; ok {
if len(victims.Pods) != len(expPods) {
return fmt.Errorf("unexpected number of pods. expected: %v, got: %v", expected, printNodeToVictims(nodeToPods))
}
prevPriority := int32(math.MaxInt32)
for _, p := range victims.Pods {
// Check that pods are sorted by their priority.
if *p.Spec.Priority > prevPriority {
return fmt.Errorf("pod %v of node %v was not sorted by priority", p.Name, k)
}
prevPriority = *p.Spec.Priority
if _, ok := expPods[p.Name]; !ok {
return fmt.Errorf("pod %v was not expected. Expected: %v", p.Name, expPods)
}
}
} else {
return fmt.Errorf("unexpected machines. expected: %v, got: %v", expected, printNodeToVictims(nodeToPods))
}
}
} else {
return fmt.Errorf("unexpected number of machines. expected: %v, got: %v", expected, printNodeToVictims(nodeToPods))
}
return nil
}
type FakeNodeInfo v1.Node
func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
node := v1.Node(n)
return &node, nil
}
func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithmpredicates.PredicateMetadata {
return algorithmpredicates.NewPredicateMetadataFactory(schedulertesting.FakePodLister{p})(p, nodeInfo)
}
var smallContainers = []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCPURequest, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)),
},
},
},
}
var mediumContainers = []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*2, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest*2, 10)),
},
},
},
}
var largeContainers = []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*3, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
},
},
},
}
var veryLargeContainers = []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*5, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest*5, 10)),
},
},
},
}
var negPriority, lowPriority, midPriority, highPriority, veryHighPriority = int32(-100), int32(0), int32(100), int32(1000), int32(10000)
// TestSelectNodesForPreemption tests selectNodesForPreemption. This test assumes
// that podsFitsOnNode works correctly and is tested separately.
func TestSelectNodesForPreemption(t *testing.T) {
algorithmpredicates.SetPredicatesOrdering(order)
tests := []struct {
name string
predicates map[string]algorithmpredicates.FitPredicate
nodes []string
pod *v1.Pod
pods []*v1.Pod
expected map[string]map[string]bool // Map from node name to a list of pods names which should be preempted.
addAffinityPredicate bool
}{
{
name: "a pod that does not fit on any machine",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": falsePredicate},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "new", UID: types.UID("new")}, Spec: v1.PodSpec{Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Priority: &midPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{},
},
{
name: "a pod that fits with no preemption",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": truePredicate},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "new", UID: types.UID("new")}, Spec: v1.PodSpec{Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Priority: &midPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{"machine1": {}, "machine2": {}},
},
{
name: "a pod that fits on one machine with no preemption",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": matchesPredicate},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Priority: &midPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{"machine1": {}},
},
{
name: "a pod that fits on both machines when lower priority pods are preempted",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{"machine1": {"a": true}, "machine2": {"b": true}},
},
{
name: "a pod that would fit on the machines, but other pods running are higher priority",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &lowPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{},
},
{
name: "medium priority pod is preempted, but lower priority one stays as it is small",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "c", UID: types.UID("c")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{"machine1": {"b": true}, "machine2": {"c": true}},
},
{
name: "mixed priority pods are preempted",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "c", UID: types.UID("c")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "d", UID: types.UID("d")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &highPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "e", UID: types.UID("e")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{"machine1": {"b": true, "c": true}},
},
{
name: "pod with anti-affinity is preempted",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{
Name: "machine1",
Labels: map[string]string{"pod": "preemptor"}}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a"), Labels: map[string]string{"service": "securityscan"}}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1", Affinity: &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "pod",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"preemptor", "value2"},
},
},
},
TopologyKey: "hostname",
},
},
}}}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "d", UID: types.UID("d")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &highPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "e", UID: types.UID("e")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{"machine1": {"a": true}, "machine2": {}},
addAffinityPredicate: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodes := []*v1.Node{}
for _, n := range test.nodes {
node := makeNode(n, 1000*5, priorityutil.DefaultMemoryRequest*5)
node.ObjectMeta.Labels = map[string]string{"hostname": node.Name}
nodes = append(nodes, node)
}
if test.addAffinityPredicate {
test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods))
}
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, nodes)
// newnode simulate a case that a new node is added to the cluster, but nodeNameToInfo
// doesn't have it yet.
newnode := makeNode("newnode", 1000*5, priorityutil.DefaultMemoryRequest*5)
newnode.ObjectMeta.Labels = map[string]string{"hostname": "newnode"}
nodes = append(nodes, newnode)
nodeToPods, err := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
if err != nil {
t.Error(err)
}
if err := checkPreemptionVictims(test.expected, nodeToPods); err != nil {
t.Error(err)
}
})
}
}
// TestPickOneNodeForPreemption tests pickOneNodeForPreemption.
func TestPickOneNodeForPreemption(t *testing.T) {
algorithmpredicates.SetPredicatesOrdering(order)
tests := []struct {
name string
predicates map[string]algorithmpredicates.FitPredicate
nodes []string
pod *v1.Pod
pods []*v1.Pod
expected []string // any of the items is valid
}{
{
name: "No node needs preemption",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}}},
expected: []string{"machine1"},
},
{
name: "a pod that fits on both machines when lower priority pods are preempted",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}}},
expected: []string{"machine1", "machine2"},
},
{
name: "a pod that fits on a machine with no preemption",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}}},
expected: []string{"machine3"},
},
{
name: "machine with min highest priority pod is picked",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine2"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.2", UID: types.UID("m2.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &lowPriority, NodeName: "machine2"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &lowPriority, NodeName: "machine3"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.2", UID: types.UID("m3.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &lowPriority, NodeName: "machine3"}},
},
expected: []string{"machine3"},
},
{
name: "when highest priorities are the same, minimum sum of priorities is picked",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.2", UID: types.UID("m2.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &lowPriority, NodeName: "machine2"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.2", UID: types.UID("m3.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}},
},
expected: []string{"machine2"},
},
{
name: "when highest priority and sum are the same, minimum number of pods is picked",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &negPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.3", UID: types.UID("m1.3")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.4", UID: types.UID("m1.4")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &negPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.2", UID: types.UID("m2.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &negPriority, NodeName: "machine2"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.2", UID: types.UID("m3.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &negPriority, NodeName: "machine3"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.3", UID: types.UID("m3.3")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine3"}},
},
expected: []string{"machine2"},
},
{
// pickOneNodeForPreemption adjusts pod priorities when finding the sum of the victims. This
// test ensures that the logic works correctly.
name: "sum of adjusted priorities is considered",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &negPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.3", UID: types.UID("m1.3")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &negPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.2", UID: types.UID("m2.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &negPriority, NodeName: "machine2"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.2", UID: types.UID("m3.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &negPriority, NodeName: "machine3"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.3", UID: types.UID("m3.3")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine3"}},
},
expected: []string{"machine2"},
},
{
name: "non-overlapping lowest high priority, sum priorities, and number of pods",
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2", "machine3", "machine4"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &veryHighPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.3", UID: types.UID("m1.3")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority, NodeName: "machine2"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.2", UID: types.UID("m3.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine3"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.3", UID: types.UID("m3.3")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine3"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.4", UID: types.UID("m3.4")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &lowPriority, NodeName: "machine3"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m4.1", UID: types.UID("m4.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine4"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m4.2", UID: types.UID("m4.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine4"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m4.3", UID: types.UID("m4.3")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine4"}},
{ObjectMeta: metav1.ObjectMeta{Name: "m4.4", UID: types.UID("m4.4")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &negPriority, NodeName: "machine4"}},
},
expected: []string{"machine1"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodes := []*v1.Node{}
for _, n := range test.nodes {
nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5))
}
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, nodes)
candidateNodes, _ := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
node := pickOneNodeForPreemption(candidateNodes)
found := false
for _, nodeName := range test.expected {
if node.Name == nodeName {
found = true
break
}
}
if !found {
t.Errorf("unexpected node: %v", node)
}
})
}
}
func TestNodesWherePreemptionMightHelp(t *testing.T) {
// Prepare 4 node names.
nodeNames := []string{}
for i := 1; i < 5; i++ {
nodeNames = append(nodeNames, fmt.Sprintf("machine%d", i))
}
tests := []struct {
name string
failedPredMap FailedPredicateMap
expected map[string]bool // set of expected node names. Value is ignored.
}{
{
name: "No node should be attempted",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrTaintsTolerationsNotMatch},
"machine4": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeLabelPresenceViolated},
},
expected: map[string]bool{},
},
{
name: "ErrPodAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnschedulable},
},
expected: map[string]bool{"machine1": true, "machine4": true},
},
{
name: "pod with both pod affinity and anti-affinity should be tried",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
},
expected: map[string]bool{"machine1": true, "machine3": true, "machine4": true},
},
{
name: "ErrPodAffinityRulesNotMatch should not be tried as it indicates that the pod is unschedulable due to inter-pod affinity, but ErrPodAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityRulesNotMatch},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
},
expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true},
},
{
name: "Mix of failed predicates works fine",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch, algorithmpredicates.ErrNodeUnderDiskPressure, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName, algorithmpredicates.ErrDiskConflict},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)},
"machine4": []algorithmpredicates.PredicateFailureReason{},
},
expected: map[string]bool{"machine3": true, "machine4": true},
},
{
name: "Node condition errors should be considered unresolvable",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderPIDPressure},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
},
expected: map[string]bool{"machine4": true},
},
{
name: "Node condition errors and ErrNodeUnknownCondition should be considered unresolvable",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeNotReady},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeNetworkUnavailable},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnknownCondition},
},
expected: map[string]bool{"machine4": true},
},
{
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeZoneConflict},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeNodeConflict},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeBindConflict},
},
expected: map[string]bool{"machine4": true},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodes := nodesWherePreemptionMightHelp(makeNodeList(nodeNames), test.failedPredMap)
if len(test.expected) != len(nodes) {
t.Errorf("number of nodes is not the same as expected. exptectd: %d, got: %d. Nodes: %v", len(test.expected), len(nodes), nodes)
}
for _, node := range nodes {
if _, found := test.expected[node.Name]; !found {
t.Errorf("node %v is not expected.", node.Name)
}
}
})
}
}
func TestPreempt(t *testing.T) {
failedPredMap := FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrDiskConflict},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)},
}
// Prepare 3 node names.
nodeNames := []string{}
for i := 1; i < 4; i++ {
nodeNames = append(nodeNames, fmt.Sprintf("machine%d", i))
}
tests := []struct {
name string
pod *v1.Pod
pods []*v1.Pod
extenders []*FakeExtender
expectedNode string
expectedPods []string // list of preempted pods
}{
{
name: "basic preemption logic",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
Containers: veryLargeContainers,
Priority: &highPriority},
},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
},
expectedNode: "machine1",
expectedPods: []string{"m1.1", "m1.2"},
},
{
name: "One node doesn't need any preemption",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
Containers: veryLargeContainers,
Priority: &highPriority},
},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
},
expectedNode: "machine3",
expectedPods: []string{},
},
{
name: "Scheduler extenders allow only machine1, otherwise machine3 would have been chosen",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
Containers: veryLargeContainers,
Priority: &highPriority},
},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
},
extenders: []*FakeExtender{
{
predicates: []fitPredicate{truePredicateExtender},
},
{
predicates: []fitPredicate{machine1PredicateExtender},
},
},
expectedNode: "machine1",
expectedPods: []string{"m1.1", "m1.2"},
},
{
name: "Scheduler extenders do not allow any preemption",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
Containers: veryLargeContainers,
Priority: &highPriority},
},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
},
extenders: []*FakeExtender{
{
predicates: []fitPredicate{falsePredicateExtender},
},
},
expectedNode: "",
expectedPods: []string{},
},
{
name: "One scheduler extender allows only machine1, the other returns error but ignorable. Only machine1 would be chosen",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
Containers: veryLargeContainers,
Priority: &highPriority},
},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
},
extenders: []*FakeExtender{
{
predicates: []fitPredicate{errorPredicateExtender},
ignorable: true,
},
{
predicates: []fitPredicate{machine1PredicateExtender},
},
},
expectedNode: "machine1",
expectedPods: []string{"m1.1", "m1.2"},
},
{
name: "One scheduler extender allows only machine1, but it is not interested in given pod, otherwise machine1 would have been chosen",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
Containers: veryLargeContainers,
Priority: &highPriority},
},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
},
extenders: []*FakeExtender{
{
predicates: []fitPredicate{machine1PredicateExtender},
unInterested: true,
},
{
predicates: []fitPredicate{truePredicateExtender},
},
},
expectedNode: "machine3",
expectedPods: []string{},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
stop := make(chan struct{})
cache := schedulerinternalcache.New(time.Duration(0), stop)
for _, pod := range test.pods {
cache.AddPod(pod)
}
cachedNodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{}
for _, name := range nodeNames {
node := makeNode(name, 1000*5, priorityutil.DefaultMemoryRequest*5)
cache.AddNode(node)
// Set nodeInfo to extenders to mock extenders' cache for preemption.
cachedNodeInfo := schedulernodeinfo.NewNodeInfo()
cachedNodeInfo.SetNode(node)
cachedNodeInfoMap[name] = cachedNodeInfo
}
extenders := []algorithm.SchedulerExtender{}
for _, extender := range test.extenders {
// Set nodeInfoMap as extenders cached node information.
extender.cachedNodeNameToInfo = cachedNodeInfoMap
extenders = append(extenders, extender)
}
scheduler := NewGenericScheduler(
cache,
internalqueue.NewSchedulingQueue(nil),
map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
algorithmpredicates.EmptyPredicateMetadataProducer,
[]algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
algorithm.EmptyPriorityMetadataProducer,
emptyPluginSet,
extenders,
nil,
schedulertesting.FakePersistentVolumeClaimLister{},
schedulertesting.FakePDBLister{},
false,
false,
schedulerapi.DefaultPercentageOfNodesToScore)
// Call Preempt and check the expected results.
node, victims, _, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
if err != nil {
t.Errorf("unexpected error in preemption: %v", err)
}
if (node != nil && node.Name != test.expectedNode) || (node == nil && len(test.expectedNode) != 0) {
t.Errorf("expected node: %v, got: %v", test.expectedNode, node.GetName())
}
if len(victims) != len(test.expectedPods) {
t.Errorf("expected %v pods, got %v.", len(test.expectedPods), len(victims))
}
for _, victim := range victims {
found := false
for _, expPod := range test.expectedPods {
if expPod == victim.Name {
found = true
break
}
}
if !found {
t.Errorf("pod %v is not expected to be a victim.", victim.Name)
}
// Mark the victims for deletion and record the preemptor's nominated node name.
now := metav1.Now()
victim.DeletionTimestamp = &now
test.pod.Status.NominatedNodeName = node.Name
}
// Call preempt again and make sure it doesn't preempt any more pods.
node, victims, _, err = scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
if err != nil {
t.Errorf("unexpected error in preemption: %v", err)
}
if node != nil && len(victims) > 0 {
t.Errorf("didn't expect any more preemption. Node %v is selected for preemption.", node)
}
close(stop)
})
}
}
func TestNumFeasibleNodesToFind(t *testing.T) {
tests := []struct {
name string
percentageOfNodesToScore int32
numAllNodes int32
wantNumNodes int32
}{
{
name: "not set percentageOfNodesToScore and nodes number not more than 50",
numAllNodes: 10,
wantNumNodes: 10,
},
{
name: "set percentageOfNodesToScore and nodes number not more than 50",
percentageOfNodesToScore: 40,
numAllNodes: 10,
wantNumNodes: 10,
},
{
name: "not set percentageOfNodesToScore and nodes number more than 50",
numAllNodes: 1000,
wantNumNodes: 420,
},
{
name: "set percentageOfNodesToScore and nodes number more than 50",
percentageOfNodesToScore: 40,
numAllNodes: 1000,
wantNumNodes: 400,
},
{
name: "not set percentageOfNodesToScore and nodes number more than 50*125",
numAllNodes: 6000,
wantNumNodes: 300,
},
{
name: "set percentageOfNodesToScore and nodes number more than 50*125",
percentageOfNodesToScore: 40,
numAllNodes: 6000,
wantNumNodes: 2400,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := &genericScheduler{
percentageOfNodesToScore: tt.percentageOfNodesToScore,
}
if gotNumNodes := g.numFeasibleNodesToFind(tt.numAllNodes); gotNumNodes != tt.wantNumNodes {
t.Errorf("genericScheduler.numFeasibleNodesToFind() = %v, want %v", gotNumNodes, tt.wantNumNodes)
}
})
}
}
| TestZeroRequest |
set_flag.rs | use crate::types::*;
use specs::*;
use crate::systems::specials::config::*;
use crate::component::flag::IsBoosting;
use crate::protocol::PlaneType;
use crate::systems::handlers::packet::KeyHandler;
use crate::SystemInfo;
pub struct SetBoostingFlag;
#[derive(SystemData)]
pub struct | <'a> {
pub config: Read<'a, Config>,
pub entities: Entities<'a>,
pub plane: ReadStorage<'a, Plane>,
pub energy: ReadStorage<'a, Energy>,
pub boosting: WriteStorage<'a, IsBoosting>,
pub keystate: WriteStorage<'a, KeyState>,
pub energy_regen: WriteStorage<'a, EnergyRegen>,
}
impl<'a> System<'a> for SetBoostingFlag {
type SystemData = SetBoostingFlagData<'a>;
fn run(&mut self, mut data: Self::SystemData) {
let ref info = data.config.planes[PlaneType::Predator];
let mut boosting = data.boosting;
let mut clears = vec![];
let mut keystate = data.keystate;
(
&data.plane,
&data.energy,
keystate.mask(),
&mut data.energy_regen,
&*data.entities,
)
.join()
.filter(|(plane, _, _, _, _)| **plane == PlaneType::Predator)
.for_each(|(_, energy, _, energy_regen, ent)| {
let keystate = try_get!(ent, keystate);
if *energy == Energy::new(0.0) || !keystate.special {
if boosting.get(ent).is_some() {
clears.push(ent);
*energy_regen = info.energy_regen;
boosting.remove(ent);
}
} else if keystate.special && (keystate.up || keystate.down) {
*energy_regen = *PREDATOR_SPECIAL_REGEN;
// Only insert when there is no value there
// already, to prevent multiple change
// flags from being set
if boosting.get(ent).is_none() {
boosting.insert(ent, IsBoosting).unwrap();
}
}
});
// Clear specific keys without iterating over
// all key states mutably
for ent in clears {
try_get!(ent, mut keystate).special = false;
}
}
}
impl SystemInfo for SetBoostingFlag {
type Dependencies = (KeyHandler);
fn name() -> &'static str {
concat!(module_path!(), "::", line!())
}
fn new() -> Self {
Self {}
}
}
| SetBoostingFlagData |
projects.client.service.js | // Projects service used to communicate Projects REST endpoints
(function () {
'use strict';
angular
.module('projects')
.factory('ProjectsService', ProjectsService);
ProjectsService.$inject = ['$resource', '$log'];
function ProjectsService($resource, $log) {
var Project = $resource('/api/projects/:projectId', {
projectId: '@_id'
}, {
update: {
method: 'PUT'
},
forProgram: {
method: 'GET',
url: '/api/projects/for/program/:programId',
isArray: true
},
makeRequest: {
method: 'GET',
url :'/api/request/project/:projectId'
},
my: {
method: 'GET',
url: '/api/my/projects',
isArray: true
},
myadmin: {
method: 'GET',
url: '/api/myadmin/projects',
isArray: true
},
getRequests: {
method: 'GET',
url :'/api/projects/requests/:projectId',
isArray: true
},
getMembers: {
method: 'GET',
url :'/api/projects/members/:projectId',
isArray: true
},
confirmMember: {
method: 'GET',
url : '/api/projects/requests/confirm/:projectId/:userId'
},
denyMember: {
method: 'GET',
url : '/api/projects/requests/deny/:projectId/:userId'
}
});
angular.extend(Project.prototype, {
createOrUpdate: function () {
var project = this;
return createOrUpdate(project);
}
});
return Project;
function createOrUpdate(project) {
if (project._id) {
return project.$update(onSuccess, onError);
} else {
return project.$save(onSuccess, onError);
}
// Handle successful response
function | () {
// Any required internal processing from inside the service, goes here.
}
// Handle error response
function onError(errorResponse) {
var error = errorResponse.data;
// Handle error internally
handleError(error);
}
function handleError(error) {
// Log error
$log.error(error);
}
}
}
}());
| onSuccess |
merkle_blocks.py | #!/usr/bin/env python2
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Copyright (c) 2014-2020 The Skicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test merkleblock fetch/validation
#
from test_framework.test_framework import SkicoinTestFramework
from test_framework.util import *
class | (SkicoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print "Mining blocks..."
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 500})
self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
# Doesn't apply to Skicoin Core - we have txindex always on
# assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent])
# ...but we can if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# ...or if the first tx is not fully-spent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
try:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
except JSONRPCException:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist)
# ...or if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
if __name__ == '__main__':
MerkleBlockTest().main()
| MerkleBlockTest |
test_router.py | """
Unit tests for the router class. Please don't add any test that will involve
controller or the actual replica wrapper, use mock if necessary.
"""
import asyncio
import pytest
import ray
from ray.serve.common import RunningReplicaInfo
from ray.serve.router import Query, ReplicaSet, RequestMetadata
from ray._private.test_utils import SignalActor
pytestmark = pytest.mark.asyncio
@pytest.fixture
def ray_instance():
# Note(simon):
# This line should be not turned on on master because it leads to very
# spammy and not useful log in case of a failure in CI.
# To run locally, please use this instead.
# SERVE_LOG_DEBUG=1 pytest -v -s test_api.py
# os.environ["SERVE_LOG_DEBUG"] = "1" <- Do not uncomment this.
ray.init(num_cpus=16)
yield
ray.shutdown()
def mock_task_runner():
|
@pytest.fixture
def task_runner_mock_actor():
yield mock_task_runner()
async def test_replica_set(ray_instance):
signal = SignalActor.remote()
@ray.remote(num_cpus=0)
class MockWorker:
_num_queries = 0
@ray.method(num_returns=2)
async def handle_request(self, request):
self._num_queries += 1
await signal.wait.remote()
return b"", "DONE"
async def num_queries(self):
return self._num_queries
# We will test a scenario with two replicas in the replica set.
rs = ReplicaSet(
"my_deployment",
asyncio.get_event_loop(),
)
replicas = [
RunningReplicaInfo(
deployment_name="my_deployment",
replica_tag=str(i),
actor_handle=MockWorker.remote(),
max_concurrent_queries=1,
)
for i in range(2)
]
rs.update_running_replicas(replicas)
# Send two queries. They should go through the router but blocked by signal
# actors.
query = Query([], {}, RequestMetadata("request-id", "endpoint"))
first_ref = await rs.assign_replica(query)
second_ref = await rs.assign_replica(query)
# These should be blocked by signal actor.
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get([first_ref, second_ref], timeout=1)
# Each replica should have exactly one inflight query. Let make sure the
# queries arrived there.
for replica in replicas:
while await replica.actor_handle.num_queries.remote() != 1:
await asyncio.sleep(1)
# Let's try to send another query.
third_ref_pending_task = asyncio.get_event_loop().create_task(
rs.assign_replica(query)
)
# We should fail to assign a replica, so this coroutine should still be
# pending after some time.
await asyncio.sleep(0.2)
assert not third_ref_pending_task.done()
# Let's unblock the two replicas
await signal.send.remote()
assert await first_ref == "DONE"
assert await second_ref == "DONE"
# The third request should be unblocked and sent to first replica.
# This meas we should be able to get the object ref.
third_ref = await third_ref_pending_task
# Now we got the object ref, let's get it result.
await signal.send.remote()
assert await third_ref == "DONE"
# Finally, make sure that one of the replica processed the third query.
num_queries_set = {
(await replica.actor_handle.num_queries.remote()) for replica in replicas
}
assert num_queries_set == {2, 1}
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| @ray.remote(num_cpus=0)
class TaskRunnerMock:
def __init__(self):
self.query = None
self.queries = []
@ray.method(num_returns=2)
async def handle_request(self, request_metadata, *args, **kwargs):
self.query = Query(args, kwargs, request_metadata)
self.queries.append(self.query)
return b"", "DONE"
def get_recent_call(self):
return self.query
def get_all_calls(self):
return self.queries
def clear_calls(self):
self.queries = []
async def reconfigure(self, user_config):
return
return TaskRunnerMock.remote() |
future.rs | use futures::{Future, Poll};
use crate::errors::Error;
/// Represent a future that resolves into Telegram API response.
#[must_use = "futures do nothing unless polled"]
pub struct TelegramFuture<T> {
inner: Box<Future<Item = T, Error = Error>>, | fn new(inner: Box<Future<Item = T, Error = Error>>) -> Self;
}
impl<T> NewTelegramFuture<T> for TelegramFuture<T> {
fn new(inner: Box<Future<Item = T, Error = Error>>) -> Self {
Self { inner: inner }
}
}
impl<T> Future for TelegramFuture<T> {
type Item = T;
type Error = Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.inner.poll()
}
} | }
pub trait NewTelegramFuture<T> { |
sliding_alignment.py | # 2) Write a script that generates all the possible ungapped alignments of two sequences, scores them and identifies
# the best scoring ones.
#
# These are all the possible ungapped alingments of the two sequences: TCA and GA:
#
# --TCA -TCA TCA TCA TCA- TCA--
# GA--- GA-- GA- -GA --GA ---GA
#
# Using the following scoring scheme:# 2) Write a script that generates all the possible ungapped alignments of two sequences, scores them and identifies
# the best scoring ones.
#
# These are all the possible ungapped alingments of the two sequences: TCA and GA:
#
# --TCA -TCA TCA TCA TCA- TCA--
# GA--- GA-- GA- -GA --GA ---GA
#
# Using the following scoring scheme:
matrix = {'AA': 2, 'AC': -1, 'AT': -1, 'AG': -2, 'CC': 2, 'CT': 0, 'CG': -1,
'TT': 2, 'TG': -1, 'GG': 2, 'CA': -1, 'TA': -1, 'GA': -2, 'TC': 0,
'GC': -1, 'GT': -1, }
human = open('./data/titin_hu.txt', 'r')
mouse = open('./data/titin_mo.txt', 'r')
seq1 = ''
seq2 = ''
for line in human:
line = line.rstrip()
seq2 += line
for line in mouse:
line = line.rstrip()
seq2 += line
# seq1 = 'TT'
# seq2 = 'GTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT'
len_seq1 = len(seq1)
len_seq2 = len(seq2)
iters = len_seq1 + len_seq2
same_size = False
if len_seq1 < len_seq2:
short = seq1
long = seq2
elif len_seq1 > len_seq2:
short = seq2
long = seq1
else:
same_size = True
short = seq1
long = seq2
len_short = len(short)
len_long = len(long)
long = long + '-' * len_short
short = '-' * len_long + short
short = list(short)
long = list(long)
highest = False
best_seq1 = ''
best_seq2 = ''
def score_fun(s1, s2, scoring_matrix):
score = 0
gap_penalty = -2
for base1, base2 in zip(s1, s2):
if base1 == '-' or base2 == '-':
score += gap_penalty
else:
score += scoring_matrix[base1 + base2]
print(''.join(s1), ''.join(s2), score, sep = '\n')
return score
for i in range(iters - 1):
score = score_fun(long, short, matrix)
if long[-1] == '-' and short[0] == '-':
del short[0]
del long[-1]
score = score_fun(long, short, matrix)
elif long[-1] != '-' and short[0] == '-':
short.append('-')
del short[0] | short.append('-')
score = score_fun(long, short, matrix)
if highest == False:
highest = score
if score > highest:
best_seq1 = ''.join(long)
best_seq2 = ''.join(short)
highest = score
print(highest)
comp = ''
for base1, base2 in zip(best_seq1, best_seq2):
if base1 == base2:
comp += '|'
else:
comp += ' '
print(best_seq1, comp, best_seq2,sep = '\n')
print('The best alignment score is:', highest) | score = score_fun(long, short, matrix)
else:
long.insert(0, '-') |
is_buildable_nd.rs | /*
Copyright 2017 Martin Buck
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall
be included all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
//! IsBuildableND trait used for types which are positioned in n-dimensional space and can be constructed
use crate::*;
//------------------------------------------------------------------------------
/// IsBuildableND is a trait used for types which are positioned in n-dimensional space and can be constructed
pub trait IsBuildableND: Sized + IsND {
/// Should build an object from the correct number of coordinates
fn new_nd(coords: &[f64]) -> Result<Self>;
/// Should use the coordinates of another as its own
fn from_nd<P>(&mut self, other: P) -> Result<()>
where
P: IsBuildableND;
/// Returns a new object with 0 for all coordinates
#[inline(always)]
fn zero_nd() -> Self {
// Safe if the implementation is correct
Self::new_nd(&vec![0.0; Self::n_dimensions()]).unwrap()
}
/// Returns the center between this and other
fn | <P>(&self, other: &P, buffer: &mut Vec<f64>) -> Result<Self>
where
P: IsND,
{
let n = Self::n_dimensions();
if n != P::n_dimensions() {
return Err(ErrorKind::IncorrectDimension);
}
buffer.clear();
for i in 0..n {
buffer.push(
0.5 * (self.position_nd(i).ok_or(ErrorKind::IncorrectDimension)?
+ other.position_nd(i).ok_or(ErrorKind::IncorrectDimension)?),
);
}
Self::new_nd(&buffer)
}
}
| center_nd |
extract_reference_case.py | """
Extract the reference case (``cea/examples/reference-case-open.zip``).
"""
from __future__ import division
import os
import zipfile
import cea.examples
import cea.config
import cea.inputlocator
# list the sections in the configuration file that are used by this script
# this value is used to generate the help menu for the command-line interface
CEA_CONFIG_SECTIONS = ['extract-reference-case']
def main(config):
|
if __name__ == '__main__':
main(cea.config.Configuration())
| """
Extract the reference case in ``reference-case-open.zip`` to the destination folder.
:param config: Contains the PathParameter ``config.extract_reference_case.destination``
:type config: cea.config.Configuration
:return:
"""
reference_case = 'reference-case-{case}.zip'.format(case=config.extract_reference_case.case)
archive = zipfile.ZipFile(os.path.join(os.path.dirname(cea.examples.__file__), reference_case))
archive.extractall(config.extract_reference_case.destination) |
runner.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import getpass
import time
import http
import logging
from random import seed
import infra.network
import infra.proc
import infra.remote_client
import infra.rates
import cimetrics.upload
from loguru import logger as LOG
logging.getLogger("matplotlib").setLevel(logging.WARNING)
logging.getLogger("paramiko").setLevel(logging.WARNING)
def minimum_number_of_local_nodes(args):
"""
If we are using bft then we need to have 3 nodes. CFT will run with 1 nodes, unless it expects a backup
"""
if args.consensus == "bft":
return 3
if args.send_tx_to == "backups":
return 2
return 1
def get_command_args(args, get_command):
command_args = []
return get_command(*command_args)
def filter_nodes(primary, backups, filter_type):
|
def configure_remote_client(args, client_id, client_host, node, command_args):
if client_host == "localhost":
client_host = infra.net.expand_localhost()
remote_impl = infra.remote.LocalRemote
else:
remote_impl = infra.remote.SSHRemote
try:
remote_client = infra.remote_client.CCFRemoteClient(
"client_" + str(client_id),
client_host,
args.client,
node.host,
node.rpc_port,
args.workspace,
args.label,
args.config,
command_args,
remote_impl,
)
remote_client.setup()
return remote_client
except Exception:
LOG.exception("Failed to start client {}".format(client_host))
raise
def run(get_command, args):
if args.fixed_seed:
seed(getpass.getuser())
hosts = args.nodes
if not hosts:
hosts = ["local://localhost"] * minimum_number_of_local_nodes(args)
LOG.info("Starting nodes on {}".format(hosts))
with infra.network.network(
hosts, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
network.start_and_join(args)
primary, backups = network.find_nodes()
command_args = get_command_args(args, get_command)
nodes_to_send_to = filter_nodes(primary, backups, args.send_tx_to)
clients = []
client_hosts = []
if args.one_client_per_backup:
if not backups:
raise Exception(
"--one-client-per-backup was set but no backup was found"
)
client_hosts = ["localhost"] * len(backups)
else:
if args.client_nodes:
client_hosts.extend(args.client_nodes)
if args.num_localhost_clients:
client_hosts.extend(["localhost"] * int(args.num_localhost_clients))
if not client_hosts:
client_hosts = ["localhost"]
for client_id, client_host in enumerate(client_hosts):
node = nodes_to_send_to[client_id % len(nodes_to_send_to)]
remote_client = configure_remote_client(
args, client_id, client_host, node, command_args
)
clients.append(remote_client)
if args.network_only:
for remote_client in clients:
LOG.info(f"Client can be run with: {remote_client.remote.get_cmd()}")
while True:
time.sleep(60)
else:
for remote_client in clients:
remote_client.start()
hard_stop_timeout = 90
try:
with cimetrics.upload.metrics(complete=False) as metrics:
tx_rates = infra.rates.TxRates(primary)
start_time = time.time()
while True:
stop_waiting = True
for i, remote_client in enumerate(clients):
done = remote_client.check_done()
# all the clients need to be done
LOG.info(
f"Client {i} has {'completed' if done else 'not completed'} running ({time.time() - start_time:.2f}s / {hard_stop_timeout}s)"
)
stop_waiting = stop_waiting and done
if stop_waiting:
break
if time.time() > start_time + hard_stop_timeout:
raise TimeoutError(
f"Client still running after {hard_stop_timeout}s"
)
time.sleep(5)
tx_rates.get_metrics()
for remote_client in clients:
perf_result = remote_client.get_result()
LOG.success(f"{args.label}/{remote_client.name}: {perf_result}")
# TODO: Only results for first client are uploaded
# https://github.com/microsoft/CCF/issues/1046
if remote_client == clients[0]:
LOG.success(f"Uploading results for {remote_client.name}")
metrics.put(args.label, perf_result)
else:
LOG.warning(f"Skipping upload for {remote_client.name}")
primary, _ = network.find_primary()
with primary.client() as nc:
r = nc.get("/node/memory")
assert r.status_code == http.HTTPStatus.OK.value
results = r.body.json()
tx_rates.insert_metrics(**results)
# Construct name for heap metric, removing ^ suffix if present
heap_peak_metric = f"Mem_{args.label}"
if heap_peak_metric.endswith("^"):
heap_peak_metric = heap_peak_metric[:-1]
peak_value = results["peak_allocated_heap_size"]
metrics.put(heap_peak_metric, peak_value)
LOG.info(f"Rates:\n{tx_rates}")
tx_rates.save_results(args.metrics_file)
for remote_client in clients:
remote_client.stop()
except Exception:
LOG.error("Stopping clients due to exception")
for remote_client in clients:
remote_client.stop()
raise
| if filter_type == "primary":
return [primary]
elif filter_type == "backups":
if not backups:
raise Exception("--send-tx-to backups but no backup was found")
return backups
else:
return [primary] + backups |
_impl_fn_update.rs | use crate::bdd_params::{BddParameterEncoder, BddParams};
use crate::biodivine_std::structs::IdState;
use crate::{BinaryOp, FnUpdate};
use biodivine_lib_bdd::Bdd;
impl FnUpdate {
/// **(internal)** Evaluate this `FnUpdate` into symbolic `BddParams` that represent all parameter
/// valuations for which this function evaluates to `true`.
pub(crate) fn symbolic_eval_true(
&self,
state: IdState,
encoder: &BddParameterEncoder,
) -> BddParams |
pub(super) fn _symbolic_eval(&self, state: IdState, encoder: &BddParameterEncoder) -> Bdd {
match self {
FnUpdate::Const(value) => {
if *value {
encoder.bdd_variables.mk_true()
} else {
encoder.bdd_variables.mk_false()
}
}
FnUpdate::Not(inner) => inner._symbolic_eval(state, encoder).not(),
FnUpdate::Var(id) => {
if state.get_bit(id.0) {
encoder.bdd_variables.mk_true()
} else {
encoder.bdd_variables.mk_false()
}
}
FnUpdate::Param(id, args) => {
let var = encoder.get_explicit(state, *id, args);
encoder.bdd_variables.mk_var(var)
}
FnUpdate::Binary(op, l, r) => {
let l = l._symbolic_eval(state, encoder);
let r = r._symbolic_eval(state, encoder);
match op {
BinaryOp::And => l.and(&r),
BinaryOp::Or => l.or(&r),
BinaryOp::Xor => l.xor(&r),
BinaryOp::Imp => l.imp(&r),
BinaryOp::Iff => l.iff(&r),
}
}
}
}
}
| {
BddParams(self._symbolic_eval(state, encoder))
} |
encryption.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use std::io::Result;
use engine_traits::{EncryptionKeyManager, EncryptionMethod, FileEncryptionInfo};
use rocksdb::{
DBEncryptionMethod, EncryptionKeyManager as DBEncryptionKeyManager,
FileEncryptionInfo as DBFileEncryptionInfo,
};
pub struct WrappedEncryptionKeyManager<T: EncryptionKeyManager> {
manager: T,
}
impl<T: EncryptionKeyManager> DBEncryptionKeyManager for WrappedEncryptionKeyManager<T> {
fn get_file(&self, fname: &str) -> Result<DBFileEncryptionInfo> {
self.manager
.get_file(fname)
.map(convert_file_encryption_info)
}
fn new_file(&self, fname: &str) -> Result<DBFileEncryptionInfo> {
self.manager
.new_file(fname)
.map(convert_file_encryption_info)
}
fn delete_file(&self, fname: &str) -> Result<()> {
self.manager.delete_file(fname)
}
fn link_file(&self, src_fname: &str, dst_fname: &str) -> Result<()> {
self.manager.link_file(src_fname, dst_fname)
}
fn rename_file(&self, src_fname: &str, dst_fname: &str) -> Result<()> {
self.manager.rename_file(src_fname, dst_fname)
}
}
fn convert_file_encryption_info(input: FileEncryptionInfo) -> DBFileEncryptionInfo {
DBFileEncryptionInfo {
method: convert_encryption_method(input.method),
key: input.key,
iv: input.iv,
}
}
fn convert_encryption_method(input: EncryptionMethod) -> DBEncryptionMethod | {
match input {
EncryptionMethod::Plaintext => DBEncryptionMethod::Plaintext,
EncryptionMethod::Aes128Ctr => DBEncryptionMethod::Aes128Ctr,
EncryptionMethod::Aes192Ctr => DBEncryptionMethod::Aes192Ctr,
EncryptionMethod::Aes256Ctr => DBEncryptionMethod::Aes256Ctr,
EncryptionMethod::Unknown => DBEncryptionMethod::Unknown,
}
} |
|
interface.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package publicipclient
import (
"context"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
const (
// APIVersion is the API version for network.
APIVersion = "2020-08-01"
// AzureStackCloudAPIVersion is the API version for Azure Stack
AzureStackCloudAPIVersion = "2018-11-01"
// ComputeAPIVersion is the API version for compute. It is required to get VMSS public IP.
ComputeAPIVersion = "2017-03-30"
// AzureStackComputeAPIVersion is the API version for compute for Azure Stack. It is required to get VMSS network interface.
AzureStackComputeAPIVersion = "2018-11-01"
// AzureStackCloudName is the cloud name of Azure Stack
AzureStackCloudName = "AZURESTACKCLOUD"
)
// Interface is the client interface for PublicIPAddress.
// Don't forget to run the following command to generate the mock client:
// mockgen -source=$GOPATH/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/interface.go -package=mockpublicipclient Interface > $GOPATH/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/interface.go
type Interface interface {
// Get gets a PublicIPAddress.
Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, rerr *retry.Error)
// GetVirtualMachineScaleSetPublicIPAddress gets a PublicIPAddress for VMSS VM.
GetVirtualMachineScaleSetPublicIPAddress(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, IPConfigurationName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, rerr *retry.Error) | // List gets a list of PublicIPAddress in the resource group.
List(ctx context.Context, resourceGroupName string) (result []network.PublicIPAddress, rerr *retry.Error)
// CreateOrUpdate creates or updates a PublicIPAddress.
CreateOrUpdate(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress) *retry.Error
// Delete deletes a PublicIPAddress by name.
Delete(ctx context.Context, resourceGroupName string, publicIPAddressName string) *retry.Error
} | |
fake_role.go | /*
Copyright The Kubernetes Authors.
Copyright 2020 Authors of Arktos - file modified.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
rbacv1 "k8s.io/api/rbac/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeRoles implements RoleInterface
type FakeRoles struct {
Fake *FakeRbacV1
ns string
te string
}
var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "roles"}
var rolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "Role"}
// Get takes name of the role, and returns the corresponding role object, and an error if there is any.
func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *rbacv1.Role, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetActionWithMultiTenancy(rolesResource, c.ns, name, c.te), &rbacv1.Role{})
if obj == nil {
return nil, err
}
return obj.(*rbacv1.Role), err
}
// List takes label and field selectors, and returns the list of Roles that match those selectors.
func (c *FakeRoles) List(opts v1.ListOptions) (result *rbacv1.RoleList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListActionWithMultiTenancy(rolesResource, rolesKind, c.ns, opts, c.te), &rbacv1.RoleList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &rbacv1.RoleList{ListMeta: obj.(*rbacv1.RoleList).ListMeta}
for _, item := range obj.(*rbacv1.RoleList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.AggregatedWatchInterface that watches the requested roles.
func (c *FakeRoles) Watch(opts v1.ListOptions) watch.AggregatedWatchInterface {
aggWatch := watch.NewAggregatedWatcher()
watcher, err := c.Fake.
InvokesWatch(testing.NewWatchActionWithMultiTenancy(rolesResource, c.ns, opts, c.te))
aggWatch.AddWatchInterface(watcher, err)
return aggWatch
}
// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any.
func (c *FakeRoles) Create(role *rbacv1.Role) (result *rbacv1.Role, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateActionWithMultiTenancy(rolesResource, c.ns, role, c.te), &rbacv1.Role{})
if obj == nil {
return nil, err
}
return obj.(*rbacv1.Role), err
}
// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
func (c *FakeRoles) Update(role *rbacv1.Role) (result *rbacv1.Role, err error) { | return nil, err
}
return obj.(*rbacv1.Role), err
}
// Delete takes name of the role and deletes it. Returns an error if one occurs.
func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteActionWithMultiTenancy(rolesResource, c.ns, name, c.te), &rbacv1.Role{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
action := testing.NewDeleteCollectionActionWithMultiTenancy(rolesResource, c.ns, listOptions, c.te)
_, err := c.Fake.Invokes(action, &rbacv1.RoleList{})
return err
}
// Patch applies the patch and returns the patched role.
func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.Role, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceActionWithMultiTenancy(rolesResource, c.te, c.ns, name, pt, data, subresources...), &rbacv1.Role{})
if obj == nil {
return nil, err
}
return obj.(*rbacv1.Role), err
} | obj, err := c.Fake.
Invokes(testing.NewUpdateActionWithMultiTenancy(rolesResource, c.ns, role, c.te), &rbacv1.Role{})
if obj == nil { |
core.py | """
Defines `Core`.
Instantiates the module-level logger with the appropriate naming
convention.
"""
import logging
from abc import ABC
from interface.interface import Interface
from astparser.astparser import AstParser
from record.record import Record
from exception.exception import NoFilesSpecifiedError
LOGGER = logging.getLogger(__name__)
class Core(ABC):
"""
Define the object responsible for the project's three main features.
`Core` is also responsible for administrating the `Interface` and
`AstParser` objects but mainly exists as a straightforward and
moderated view inside the complex internal functionality of IDA-CFP.
While the instances of `Interface` and `AstParser` can be explicitly
accessed by other third-party code, this is not recommended as both
objects contain no (strict) immutable state.
"""
def __init__(self) -> None:
"""
Initialize the `Core` object.
Unlike the __init__ of `AstParser`, the internal state of _intr
and _astp persists between files specified.
`self._intr` contains an instance of the `Interface` object and
is responsible for providing access to high level file I/O
functionality.
`self._astp` contains an instance of the `AstParser` object and
is responsible for processing and understanding the abstract
syntax tree (AST) that PycParser generates.
:return: returns nothing
"""
self._intr = Interface()
self._astp = AstParser()
def | (self, files: list) -> None:
"""
Process a list of file I/O objects.
For each file specified in the `files` list, its AST
is loaded and properly processed before it is added
to the module-level `Record`.
:param files: list of argparser IO wrappers
:return: returns nothing
"""
# If the `files` list is found to be empty or improperly
# populated then a `NoFileSpecifiedError` is raised
if not files:
raise NoFilesSpecifiedError()
for f_str in files:
ast = self._intr.load_new_ast(f_str.name)
self._astp.process_ast(ast)
# Rather than attempt to integrate the list and dict after
# every file, it saves huge computational complexity to just
# condense the operation and only do it once per run
Record.integrate_list_to_dict()
def generate_bundle(self) -> None:
"""
Generate the bundle interface for disk I/O.
Utilize the `Interface`-based conversion functionality to
convert from the master `Record` dictionary of string: function
pairs to a `json` string dump.
:return: returns nothing
"""
self._intr.convert_dict_to_json(Record.str_func_dict)
def export(self) -> None:
"""
Export the final bundle to disk.
Utilize the `Interface`-based file-I/O system to drop the
converted json string data to out/bundle.json.
:return: returns nothing
"""
self._intr.drop_bundle_to_disk(self._intr.json_data)
| process_files |
index.ts | import { Executor } from '../../types'
export function | (options: Executor.Options): Executor {
const { dontFakeInteractive, spawn } = options
if (dontFakeInteractive) return spawn
const shQtMdl = import('shell-quote')
return async (cmd, args) => {
const { quote } = await shQtMdl
const shCmd = quote([cmd, ...args])
return spawn(
'script',
['--quiet', '--append', '/dev/null', '--command', shCmd],
)
}
}
export default executor
| executor |
byobnet.py | """ Bring-Your-Own-Blocks Network
A flexible network w/ dataclass based config for stacking those NN blocks.
This model is currently used to implement the following networks:
GPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)).
Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
Code and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0
RepVGG - repvgg_*
Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
Code and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT
In all cases the models have been modified to fit within the design of ByobNet. I've remapped
the original weights and verified accuracies.
For GPU Efficient nets, I used the original names for the blocks since they were for the most part
the same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some
changes introduced in RegNet were also present in the stem and bottleneck blocks for this model.
A significant number of different network archs can be implemented here, including variants of the
above nets that include attention.
Hacked together by / copyright Ross Wightman, 2021.
"""
import math
from dataclasses import dataclass, field, replace
from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg, named_apply
from .layers import ClassifierHead, ConvBnAct, BatchNormAct2d, DropPath, AvgPool2dSame, \
create_conv2d, get_act_layer, convert_norm_act, get_attn, make_divisible, to_2tuple
from .registry import register_model
__all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block']
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = {
# GPU-Efficient (ResNet) weights
'gernet_s': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_s-756b4751.pth'),
'gernet_m': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_m-0873c53a.pth'),
'gernet_l': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_l-f31e2e8d.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
# RepVGG weights
'repvgg_a2': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_a2-c1ee6d2b.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b0': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b0-80ac3f1b.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b1': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1-77ca2989.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b1g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1g4-abde5d92.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b2': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2-25b7494e.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b2g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2g4-165a85f2.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b3': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3-199bc50d.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b3g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3g4-73c370bf.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
# experimental configs
'resnet51q': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth',
first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8),
test_input_size=(3, 288, 288), crop_pct=1.0),
'resnet61q': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8),
test_input_size=(3, 288, 288), crop_pct=1.0, interpolation='bicubic'),
'resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'seresnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'eca_resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'bat_resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic',
min_input_size=(3, 256, 256)),
'resnet32ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'resnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'seresnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'eca_resnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnet50t': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnext50ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
}
@dataclass
class ByoBlockCfg:
type: Union[str, nn.Module]
d: int # block depth (number of block repeats in stage)
c: int # number of output channels for each block in stage
s: int = 2 # stride of stage (first block)
gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1
br: float = 1. # bottleneck-ratio of blocks in stage
# NOTE: these config items override the model cfgs that are applied to all blocks by default
attn_layer: Optional[str] = None
attn_kwargs: Optional[Dict[str, Any]] = None
self_attn_layer: Optional[str] = None
self_attn_kwargs: Optional[Dict[str, Any]] = None
block_kwargs: Optional[Dict[str, Any]] = None
@dataclass
class ByoModelCfg:
blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...]
downsample: str = 'conv1x1'
stem_type: str = '3x3'
stem_pool: Optional[str] = 'maxpool'
stem_chs: int = 32
width_factor: float = 1.0
num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0
zero_init_last: bool = True # zero init last weight (usually bn) in residual path
fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation
act_layer: str = 'relu'
norm_layer: str = 'batchnorm'
# NOTE: these config items will be overridden by the block cfg (per-block) if they are set there
attn_layer: Optional[str] = None
attn_kwargs: dict = field(default_factory=lambda: dict())
self_attn_layer: Optional[str] = None
self_attn_kwargs: dict = field(default_factory=lambda: dict())
block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict())
def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0):
c = (64, 128, 256, 512)
group_size = 0
if groups > 0:
group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0
bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)])
return bcfg
def interleave_blocks(
types: Tuple[str, str], d, every: Union[int, List[int]] = 1, first: bool = False, **kwargs
) -> Tuple[ByoBlockCfg]:
""" interleave 2 block types in stack
"""
assert len(types) == 2
if isinstance(every, int):
every = list(range(0 if first else every, d, every + 1))
if not every:
every = [d - 1]
set(every)
blocks = []
for i in range(d):
block_type = types[1] if i in every else types[0]
blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)]
return tuple(blocks)
model_cfgs = dict(
gernet_l=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.),
),
stem_chs=32,
stem_pool=None,
num_features=2560,
),
gernet_m=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.),
),
stem_chs=32,
stem_pool=None,
num_features=2560,
),
gernet_s=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.),
),
stem_chs=13,
stem_pool=None,
num_features=1920,
),
repvgg_a2=ByoModelCfg(
blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)),
stem_type='rep',
stem_chs=64,
),
repvgg_b0=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)),
stem_type='rep',
stem_chs=64,
),
repvgg_b1=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b1g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_b2=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b2g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_b3=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b3g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4),
stem_type='rep',
stem_chs=64,
),
# 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks
# DW convs in last block, 2048 pre-FC, silu act
resnet51q=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),
),
stem_chs=128,
stem_type='quad2',
stem_pool=None,
num_features=2048,
act_layer='silu',
),
# 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks
# DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act
resnet61q=ByoModelCfg(
blocks=(
ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),
),
stem_chs=128,
stem_type='quad',
stem_pool=None,
num_features=2048,
act_layer='silu',
block_kwargs=dict(extra_conv=True),
),
# A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act,
# and a tiered stem w/ maxpool
resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
| ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
),
gcresnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='gca',
),
seresnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='se',
),
eca_resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='eca',
),
bat_resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='bat',
attn_kwargs=dict(block_size=8)
),
# ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool
resnet32ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=0,
act_layer='silu',
),
# ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool
resnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
),
# A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat
# and a tiered stem w/ no maxpool
gcresnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='gca',
),
seresnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='se',
),
eca_resnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='eca',
),
gcresnet50t=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
attn_layer='gca',
),
gcresnext50ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
# stem_pool=None,
act_layer='silu',
attn_layer='gca',
),
)
@register_model
def gernet_l(pretrained=False, **kwargs):
""" GEResNet-Large (GENet-Large from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs)
@register_model
def gernet_m(pretrained=False, **kwargs):
""" GEResNet-Medium (GENet-Normal from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs)
@register_model
def gernet_s(pretrained=False, **kwargs):
""" EResNet-Small (GENet-Small from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs)
@register_model
def repvgg_a2(pretrained=False, **kwargs):
""" RepVGG-A2
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b0(pretrained=False, **kwargs):
""" RepVGG-B0
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b1(pretrained=False, **kwargs):
""" RepVGG-B1
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b1g4(pretrained=False, **kwargs):
""" RepVGG-B1g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b2(pretrained=False, **kwargs):
""" RepVGG-B2
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b2g4(pretrained=False, **kwargs):
""" RepVGG-B2g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b3(pretrained=False, **kwargs):
""" RepVGG-B3
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b3g4(pretrained=False, **kwargs):
""" RepVGG-B3g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs)
@register_model
def resnet51q(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs)
@register_model
def resnet61q(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs)
@register_model
def resnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs)
@register_model
def seresnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs)
@register_model
def eca_resnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def bat_resnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def resnet32ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs)
@register_model
def resnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs)
@register_model
def seresnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs)
@register_model
def eca_resnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnet50t(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs)
@register_model
def gcresnext50ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs)
def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]:
if not isinstance(stage_blocks_cfg, Sequence):
stage_blocks_cfg = (stage_blocks_cfg,)
block_cfgs = []
for i, cfg in enumerate(stage_blocks_cfg):
block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)]
return block_cfgs
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
@dataclass
class LayerFn:
conv_norm_act: Callable = ConvBnAct
norm_act: Callable = BatchNormAct2d
act: Callable = nn.ReLU
attn: Optional[Callable] = None
self_attn: Optional[Callable] = None
class DownsampleAvg(nn.Module):
def __init__(self, in_chs, out_chs, stride=1, dilation=1, apply_act=False, layers: LayerFn = None):
""" AvgPool Downsampling as in 'D' ResNet variants."""
super(DownsampleAvg, self).__init__()
layers = layers or LayerFn()
avg_stride = stride if dilation == 1 else 1
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
else:
self.pool = nn.Identity()
self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act)
def forward(self, x):
return self.conv(self.pool(x))
def create_downsample(downsample_type, layers: LayerFn, **kwargs):
if downsample_type == 'avg':
return DownsampleAvg(**kwargs)
else:
return layers.conv_norm_act(kwargs.pop('in_chs'), kwargs.pop('out_chs'), kernel_size=1, **kwargs)
class BasicBlock(nn.Module):
""" ResNet Basic Block - kxk + kxk
"""
def __init__(
self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), group_size=None, bottle_ratio=1.0,
downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None,
drop_path_rate=0.):
super(BasicBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0])
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_kxk.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
# residual path
x = self.conv1_kxk(x)
x = self.conv2_kxk(x)
x = self.attn(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class BottleneckBlock(nn.Module):
""" ResNet-like Bottleneck Block - 1x1 - kxk - 1x1
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None,
downsample='avg', attn_last=False, linear_out=False, extra_conv=False, layers: LayerFn = None,
drop_block=None, drop_path_rate=0.):
super(BottleneckBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
if extra_conv:
self.conv2b_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block)
else:
self.conv2b_kxk = nn.Identity()
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv3_1x1.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.conv2_kxk(x)
x = self.conv2b_kxk(x)
x = self.attn(x)
x = self.conv3_1x1(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class DarkBlock(nn.Module):
""" DarkNet-like (1x1 + 3x3 w/ stride) block
The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models.
This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet
uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats).
If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1)
for more optimal compute.
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None,
drop_path_rate=0.):
super(DarkBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_kxk.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.attn(x)
x = self.conv2_kxk(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class EdgeBlock(nn.Module):
""" EdgeResidual-like (3x3 + 1x1) block
A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed.
Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is
intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs.
FIXME is there a more common 3x3 + 1x1 conv block to name this after?
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='avg', attn_last=False, linear_out=False, layers: LayerFn = None,
drop_block=None, drop_path_rate=0.):
super(EdgeBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_kxk = layers.conv_norm_act(
in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_1x1.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_kxk(x)
x = self.attn(x)
x = self.conv2_1x1(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class RepVggBlock(nn.Module):
""" RepVGG Block.
Adapted from impl at https://github.com/DingXiaoH/RepVGG
This version does not currently support the deploy optimization. It is currently fixed in 'train' mode.
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='', layers: LayerFn = None, drop_block=None, drop_path_rate=0.):
super(RepVggBlock, self).__init__()
layers = layers or LayerFn()
groups = num_groups(group_size, in_chs)
use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1]
self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None
self.conv_kxk = layers.conv_norm_act(
in_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block, apply_act=False)
self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False)
self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity()
self.act = layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
# NOTE this init overrides that base model init with specific changes for the block type
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.normal_(m.weight, .1, .1)
nn.init.normal_(m.bias, 0, .1)
if hasattr(self.attn, 'reset_parameters'):
self.attn.reset_parameters()
def forward(self, x):
if self.identity is None:
x = self.conv_1x1(x) + self.conv_kxk(x)
else:
identity = self.identity(x)
x = self.conv_1x1(x) + self.conv_kxk(x)
x = self.drop_path(x) # not in the paper / official impl, experimental
x = x + identity
x = self.attn(x) # no attn in the paper / official impl, experimental
x = self.act(x)
return x
class SelfAttnBlock(nn.Module):
""" ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None,
downsample='avg', extra_conv=False, linear_out=False, post_attn_na=True, feat_size=None,
layers: LayerFn = None, drop_block=None, drop_path_rate=0.):
super(SelfAttnBlock, self).__init__()
assert layers is not None
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
if extra_conv:
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
stride = 1 # striding done via conv if enabled
else:
self.conv2_kxk = nn.Identity()
opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size)
# FIXME need to dilate self attn to have dilated network support, moop moop
self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs)
self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity()
self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv3_1x1.bn.weight)
if hasattr(self.self_attn, 'reset_parameters'):
self.self_attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.conv2_kxk(x)
x = self.self_attn(x)
x = self.post_attn(x)
x = self.conv3_1x1(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
_block_registry = dict(
basic=BasicBlock,
bottle=BottleneckBlock,
dark=DarkBlock,
edge=EdgeBlock,
rep=RepVggBlock,
self_attn=SelfAttnBlock,
)
def register_block(block_type:str, block_fn: nn.Module):
_block_registry[block_type] = block_fn
def create_block(block: Union[str, nn.Module], **kwargs):
if isinstance(block, (nn.Module, partial)):
return block(**kwargs)
assert block in _block_registry, f'Unknown block type ({block}'
return _block_registry[block](**kwargs)
class Stem(nn.Sequential):
def __init__(self, in_chs, out_chs, kernel_size=3, stride=4, pool='maxpool',
num_rep=3, num_act=None, chs_decay=0.5, layers: LayerFn = None):
super().__init__()
assert stride in (2, 4)
layers = layers or LayerFn()
if isinstance(out_chs, (list, tuple)):
num_rep = len(out_chs)
stem_chs = out_chs
else:
stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1]
self.stride = stride
self.feature_info = [] # track intermediate features
prev_feat = ''
stem_strides = [2] + [1] * (num_rep - 1)
if stride == 4 and not pool:
# set last conv in stack to be strided if stride == 4 and no pooling layer
stem_strides[-1] = 2
num_act = num_rep if num_act is None else num_act
# if num_act < num_rep, first convs in stack won't have bn + act
stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act
prev_chs = in_chs
curr_stride = 1
for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)):
layer_fn = layers.conv_norm_act if na else create_conv2d
conv_name = f'conv{i + 1}'
if i > 0 and s > 1:
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s))
prev_chs = ch
curr_stride *= s
prev_feat = conv_name
if pool and 'max' in pool.lower():
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module('pool', nn.MaxPool2d(3, 2, 1))
curr_stride *= 2
prev_feat = 'pool'
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
assert curr_stride == stride
def create_byob_stem(in_chs, out_chs, stem_type='', pool_type='', feat_prefix='stem', layers: LayerFn = None):
layers = layers or LayerFn()
assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', '7x7', '3x3')
if 'quad' in stem_type:
# based on NFNet stem, stack of 4 3x3 convs
num_act = 2 if 'quad2' in stem_type else None
stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers)
elif 'tiered' in stem_type:
# 3x3 stack of 3 convs as in my ResNet-T
stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers)
elif 'deep' in stem_type:
# 3x3 stack of 3 convs as in ResNet-D
stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers)
elif 'rep' in stem_type:
stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers)
elif '7x7' in stem_type:
# 7x7 stem conv as in ResNet
if pool_type:
stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers)
else:
stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2)
else:
# 3x3 stem conv as in RegNet is the default
if pool_type:
stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers)
else:
stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2)
if isinstance(stem, Stem):
feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info]
else:
feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix)]
return stem, feature_info
def reduce_feat_size(feat_size, stride=2):
return None if feat_size is None else tuple([s // stride for s in feat_size])
def override_kwargs(block_kwargs, model_kwargs):
""" Override model level attn/self-attn/block kwargs w/ block level
NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs
for the block if set to anything that isn't None.
i.e. an empty block_kwargs dict will remove kwargs set at model level for that block
"""
out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs
return out_kwargs or {} # make sure None isn't returned
def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ):
layer_fns = block_kwargs['layers']
# override attn layer / args with block local config
attn_set = block_cfg.attn_layer is not None
if attn_set or block_cfg.attn_kwargs is not None:
# override attn layer config
if attn_set and not block_cfg.attn_layer:
# empty string for attn_layer type will disable attn for this block
attn_layer = None
else:
attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs)
attn_layer = block_cfg.attn_layer or model_cfg.attn_layer
attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None
layer_fns = replace(layer_fns, attn=attn_layer)
# override self-attn layer / args with block local cfg
self_attn_set = block_cfg.self_attn_layer is not None
if self_attn_set or block_cfg.self_attn_kwargs is not None:
# override attn layer config
if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == ''
# empty string for self_attn_layer type will disable attn for this block
self_attn_layer = None
else:
self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs)
self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer
self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \
if self_attn_layer is not None else None
layer_fns = replace(layer_fns, self_attn=self_attn_layer)
block_kwargs['layers'] = layer_fns
# add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set
block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs))
def create_byob_stages(
cfg: ByoModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any],
feat_size: Optional[int] = None,
layers: Optional[LayerFn] = None,
block_kwargs_fn: Optional[Callable] = update_block_kwargs):
layers = layers or LayerFn()
feature_info = []
block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks]
depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs]
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
dilation = 1
net_stride = stem_feat['reduction']
prev_chs = stem_feat['num_chs']
prev_feat = stem_feat
stages = []
for stage_idx, stage_block_cfgs in enumerate(block_cfgs):
stride = stage_block_cfgs[0].s
if stride != 1 and prev_feat:
feature_info.append(prev_feat)
if net_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
net_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
blocks = []
for block_idx, block_cfg in enumerate(stage_block_cfgs):
out_chs = make_divisible(block_cfg.c * cfg.width_factor)
group_size = block_cfg.gs
if isinstance(group_size, Callable):
group_size = group_size(out_chs, block_idx)
block_kwargs = dict( # Blocks used in this model must accept these arguments
in_chs=prev_chs,
out_chs=out_chs,
stride=stride if block_idx == 0 else 1,
dilation=(first_dilation, dilation),
group_size=group_size,
bottle_ratio=block_cfg.br,
downsample=cfg.downsample,
drop_path_rate=dpr[stage_idx][block_idx],
layers=layers,
)
if block_cfg.type in ('self_attn',):
# add feat_size arg for blocks that support/need it
block_kwargs['feat_size'] = feat_size
block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg)
blocks += [create_block(block_cfg.type, **block_kwargs)]
first_dilation = dilation
prev_chs = out_chs
if stride > 1 and block_idx == 0:
feat_size = reduce_feat_size(feat_size, stride)
stages += [nn.Sequential(*blocks)]
prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')
feature_info.append(prev_feat)
return nn.Sequential(*stages), feature_info
def get_layer_fns(cfg: ByoModelCfg):
act = get_act_layer(cfg.act_layer)
norm_act = convert_norm_act(norm_layer=cfg.norm_layer, act_layer=act)
conv_norm_act = partial(ConvBnAct, norm_layer=cfg.norm_layer, act_layer=act)
attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None
self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None
layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn)
return layer_fn
class ByobNet(nn.Module):
""" 'Bring-your-own-blocks' Net
A flexible network backbone that allows building model stem + blocks via
dataclass cfg definition w/ factory functions for module instantiation.
Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act).
"""
def __init__(self, cfg: ByoModelCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32,
zero_init_last=True, img_size=None, drop_rate=0., drop_path_rate=0.):
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
layers = get_layer_fns(cfg)
if cfg.fixed_input_size:
assert img_size is not None, 'img_size argument is required for fixed input size model'
feat_size = to_2tuple(img_size) if img_size is not None else None
self.feature_info = []
stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor))
self.stem, stem_feat = create_byob_stem(in_chans, stem_chs, cfg.stem_type, cfg.stem_pool, layers=layers)
self.feature_info.extend(stem_feat[:-1])
feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction'])
self.stages, stage_feat = create_byob_stages(
cfg, drop_path_rate, output_stride, stem_feat[-1], layers=layers, feat_size=feat_size)
self.feature_info.extend(stage_feat[:-1])
prev_chs = stage_feat[-1]['num_chs']
if cfg.num_features:
self.num_features = int(round(cfg.width_factor * cfg.num_features))
self.final_conv = layers.conv_norm_act(prev_chs, self.num_features, 1)
else:
self.num_features = prev_chs
self.final_conv = nn.Identity()
self.feature_info += [
dict(num_chs=self.num_features, reduction=stage_feat[-1]['reduction'], module='final_conv')]
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
# init weights
named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.final_conv(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _init_weights(module, name='', zero_init_last=False):
if isinstance(module, nn.Conv2d):
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
module.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=0.01)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights(zero_init_last=zero_init_last)
def _create_byobnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ByobNet, variant, pretrained,
default_cfg=default_cfgs[variant],
model_cfg=model_cfgs[variant],
feature_cfg=dict(flatten_sequential=True),
**kwargs) | |
gen_HttpConnDict.rs | #![allow(unused_imports)]
use super::*;
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
extern "wasm-bindgen" {
# [wasm_bindgen (extends = :: js_sys :: Object , js_name = HttpConnDict)]
#[derive(Debug, Clone, PartialEq, Eq)]
#[doc = "The `HttpConnDict` dictionary."]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `HttpConnDict`*"]
pub type HttpConnDict;
}
impl HttpConnDict {
#[doc = "Construct a new `HttpConnDict`."]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `HttpConnDict`*"]
pub fn new() -> Self {
#[allow(unused_mut)]
let mut ret: Self = ::wasm_bindgen::JsCast::unchecked_into(::js_sys::Object::new());
ret
}
#[doc = "Change the `connections` field of this object."]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `HttpConnDict`*"]
pub fn connections(&mut self, val: &::wasm_bindgen::JsValue) -> &mut Self |
}
| {
use wasm_bindgen::JsValue;
let r = ::js_sys::Reflect::set(
self.as_ref(),
&JsValue::from("connections"),
&JsValue::from(val),
);
debug_assert!(
r.is_ok(),
"setting properties should never fail on our dictionary objects"
);
let _ = r;
self
} |
stream.py | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
# | #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
from ask_sdk_model.interfaces.audioplayer.caption_data import CaptionData as CaptionData_e119f120
class Stream(object):
"""
:param expected_previous_token:
:type expected_previous_token: (optional) str
:param token:
:type token: (optional) str
:param url:
:type url: (optional) str
:param offset_in_milliseconds:
:type offset_in_milliseconds: (optional) int
:param caption_data:
:type caption_data: (optional) ask_sdk_model.interfaces.audioplayer.caption_data.CaptionData
"""
deserialized_types = {
'expected_previous_token': 'str',
'token': 'str',
'url': 'str',
'offset_in_milliseconds': 'int',
'caption_data': 'ask_sdk_model.interfaces.audioplayer.caption_data.CaptionData'
} # type: Dict
attribute_map = {
'expected_previous_token': 'expectedPreviousToken',
'token': 'token',
'url': 'url',
'offset_in_milliseconds': 'offsetInMilliseconds',
'caption_data': 'captionData'
} # type: Dict
supports_multiple_types = False
def __init__(self, expected_previous_token=None, token=None, url=None, offset_in_milliseconds=None, caption_data=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[int], Optional[CaptionData_e119f120]) -> None
"""
:param expected_previous_token:
:type expected_previous_token: (optional) str
:param token:
:type token: (optional) str
:param url:
:type url: (optional) str
:param offset_in_milliseconds:
:type offset_in_milliseconds: (optional) int
:param caption_data:
:type caption_data: (optional) ask_sdk_model.interfaces.audioplayer.caption_data.CaptionData
"""
self.__discriminator_value = None # type: str
self.expected_previous_token = expected_previous_token
self.token = token
self.url = url
self.offset_in_milliseconds = offset_in_milliseconds
self.caption_data = caption_data
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Stream):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other | # http://aws.amazon.com/apache2.0/ |
southxchange.py | # -*- coding: utf-8 -*-
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
class southxchange (Exchange):
def describe(self):
return self.deep_extend(super(southxchange, self).describe(), {
'id': 'southxchange',
'name': 'SouthXchange',
'countries': 'AR', # Argentina
'rateLimit': 1000,
'hasFetchTickers': True,
'hasCORS': False,
'hasWithdraw': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27838912-4f94ec8a-60f6-11e7-9e5d-bbf9bd50a559.jpg',
'api': 'https://www.southxchange.com/api',
'www': 'https://www.southxchange.com',
'doc': 'https://www.southxchange.com/Home/Api',
},
'api': {
'public': {
'get': [
'markets',
'price/{symbol}',
'prices',
'book/{symbol}',
'trades/{symbol}',
],
},
'private': {
'post': [
'cancelMarketOrders',
'cancelOrder',
'generatenewaddress',
'listOrders',
'listBalances',
'placeOrder',
'withdraw',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100,
'taker': 0.2 / 100,
},
},
})
def fetch_markets(self):
markets = self.publicGetMarkets()
result = []
for p in range(0, len(markets)):
market = markets[p]
base = market[0]
quote = market[1]
symbol = base + '/' + quote
id = symbol
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
balances = self.privatePostListBalances()
if not balances:
raise ExchangeError(self.id + ' fetchBalance got an unrecognized response')
result = {'info': balances}
for b in range(0, len(balances)):
balance = balances[b]
currency = balance['Currency']
uppercase = currency.upper()
free = float(balance['Available'])
used = float(balance['Unconfirmed'])
total = self.sum(free, used)
account = {
'free': free,
'used': used,
'total': total,
}
result[uppercase] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, params={}):
self.load_markets()
orderbook = self.publicGetBookSymbol(self.extend({
'symbol': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook, None, 'BuyOrders', 'SellOrders', 'Price', 'Amount')
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_float(ticker, 'Bid'),
'ask': self.safe_float(ticker, 'Ask'),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': self.safe_float(ticker, 'Last'),
'change': self.safe_float(ticker, 'Variation24Hr'),
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'Volume24Hr'),
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetPrices(params)
tickers = self.index_by(response, 'Market')
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
symbol = id
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
ticker = self.publicGetPriceSymbol(self.extend({
'symbol': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def | (self, trade, market):
timestamp = trade['At'] * 1000
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'id': None,
'order': None,
'type': None,
'side': trade['Type'],
'price': trade['Price'],
'amount': trade['Amount'],
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetTradesSymbol(self.extend({
'symbol': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
order = {
'listingCurrency': market['base'],
'referenceCurrency': market['quote'],
'type': side,
'amount': amount,
}
if type == 'limit':
order['limitPrice'] = price
response = self.privatePostPlaceOrder(self.extend(order, params))
return {
'info': response,
'id': str(response),
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
return self.privatePostCancelOrder(self.extend({
'orderCode': id,
}, params))
def withdraw(self, currency, amount, address, tag=None, params={}):
response = self.privatePostWithdraw(self.extend({
'currency': currency,
'address': address,
'amount': amount,
}, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'private':
self.check_required_credentials()
nonce = self.nonce()
query = self.extend({
'key': self.apiKey,
'nonce': nonce,
}, query)
body = self.json(query)
headers = {
'Content-Type': 'application/json',
'Hash': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
return response
| parse_trade |
extract_strings_qt.py | #!/usr/bin/env python3
# Copyright (c) 2012-2019 The Vadercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import operator
import os
import sys
OUT_CPP="qt/vadercoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
|
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
sys.exit(1)
child = Popen([XGETTEXT,'--output=-','--from-code=utf-8','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w', encoding="utf8")
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *vadercoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("vadercoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("vadercoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| msgid.append(line) |
permissions.js | export const SYSADMIN = 'SYSADMIN'; | ||
context.go | // Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"context"
)
type cfgMgrKey struct{}
// FromContext returns CfgManager from context
func FromContext(ctx context.Context) (*CfgManager, bool) {
m, ok := ctx.Value(cfgMgrKey{}).(*CfgManager)
return m, ok
}
// NewContext returns context with CfgManager
func NewContext(ctx context.Context, m *CfgManager) context.Context | {
return context.WithValue(ctx, cfgMgrKey{}, m)
} |
|
MaximumTradingProfit.py | # coding=utf-8
"""Maximum trade profit problem dynamic programming solution Python implementation."""
def mx_profit(prices):
|
if __name__ == "__main__":
prices = [2, 30, 15, 10, 8, 25, 80]
print(mx_profit(prices))
| n = len(prices)
profit = [0] * n
mxp = prices[n - 1]
for i in range(n - 2, -1, -1):
mxp = max(mxp, prices[i])
profit[i] = max(profit[i + 1], mxp - prices[i])
mnp = prices[0]
for i in range(1, n):
mnp = min(mnp, prices[i])
profit[i] = max(profit[i - 1], profit[i] + (prices[i] - mnp))
return profit[n - 1] |
test58.js | var callbackArguments = [];
var argument1 = function callback(a,b,c,d) {
callbackArguments.push(JSON.stringify(arguments))
argument3[1] = [843,595,823,0,607,823]
base_0 = 82
argument3[0] = false
return a*b*c/d
};
var argument2 = function callback(a,b,c,d) {
callbackArguments.push(JSON.stringify(arguments))
argument4[1] = 7.335461004710814e+307
argument4[0] = {"r":"iT","T":1.3571612592489743e+308,"8.717758929396265e+307":"","":""}
argument4['!j'] = true
return a-b*c+d
};
var argument3 = function callback(a,b,c,d) {
callbackArguments.push(JSON.stringify(arguments))
base_2[0] = [627,"ZX"]
argument4['w'] = 5e-324
argument5[6] = ""
return a*b+c*d
};
var argument4 = 126;
var argument5 = function callback(a,b,c,d) {
callbackArguments.push(JSON.stringify(arguments))
base_3[5] = null
base_3[4][3] = null
return a-b*c-d
};
var base_0 = ["zm","c","#","%VWC","<"]
var r_0= undefined
try {
r_0 = base_0.reduceRight(argument1)
}
catch(e) {
r_0= "Error"
}
var base_1 = ["zm","c","#","%VWC","<"]
var r_1= undefined
try {
r_1 = base_1.reduceRight(argument2)
}
catch(e) {
r_1= "Error"
}
var base_2 = ["zm","c","#","%VWC","<"]
var r_2= undefined
try {
r_2 = base_2.reduceRight(argument3,argument4)
}
catch(e) {
r_2= "Error"
}
var base_3 = ["zm","c","#","%VWC","<"]
var r_3= undefined
try {
r_3 = base_3.reduceRight(argument5) | }
catch(e) {
r_3= "Error"
}
function serialize(array){
return array.map(function(a){
if (a === null || a == undefined) return a;
var name = a.constructor.name;
if (name==='Object' || name=='Boolean'|| name=='Array'||name=='Number'||name=='String')
return JSON.stringify(a);
return name;
});
}
setTimeout(function(){
require("fs").writeFileSync("./experiments/reduceRight/reduceRightGen/test58.json",JSON.stringify({"baseObjects":serialize([base_0,base_1,base_2,base_3]),"returnObjects":serialize([r_0,r_1,r_2,r_3]),"callbackArgs":callbackArguments}))
},300) | |
opt.rs | use std::path::PathBuf;
use clap::{ArgEnum, Args, Parser, Subcommand};
/// Encrypt, decrypt and solve classical ciphers.
#[derive(Parser, Debug)]
#[clap(name = "cipher", author = "Tom Thorogood <[email protected]>")]
pub enum Opt {
/// Generate completion scripts
Completions {
/// Output file to write completion to, if unspecified then writes to
/// stdout
#[clap(short, long, parse(from_os_str))]
output: Option<PathBuf>,
/// Shell to generate completions for
#[clap(arg_enum)]
shell: Shell,
}, | #[clap(subcommand)]
sub: LangCmd,
},
/// Perform statistical analysis on a ciphertext
Stats {
/// If present, overrides the selected lang and uses the value given
#[clap(global = true, short, long)]
lang: Option<String>,
/// Text to analyse. If not present then read from stdin
#[clap(global = true, short, long)]
text: Option<String>,
#[clap(subcommand)]
cmd: StatsCmd,
},
/// Encrypt a plaintext with a cipher. Ciphers are specified with
/// the submodules
Encrypt(CryptCmd),
/// Decrypt a ciphertext with a cipher. Ciphers are specified with
/// the submodules
Decrypt(CryptCmd),
/// Solve a ciphertext. Use submodules to solve a specific cipher.
/// If no cipher is specified, the input will be solved by analysing
/// the text and trying likely ciphers
Solve {
/// The cipher to solve as. If not specified, the message will be
/// automatically solved
#[clap(subcommand)]
cipher: Option<CipherSolveCmd>,
/// A crib to aid in solving. This may not always be used
#[clap(global = true, short, long)]
crib: Option<String>,
/// The position of the crib within the ciphertext
#[clap(global = true, long, short = 'p', requires("crib"))]
crib_pos: Option<usize>,
/// Display the key once solved
#[clap(global = true, short = 'k', long)]
show_key: bool,
/// Hide the plaintext once solved
#[clap(global = true, short = 'T', long)]
no_plain: bool,
/// Control the scoring statistics used to break the cipher
#[clap(global = true, arg_enum, short = 's', long = "size")]
stats_size: Option<StatsSizeOpt>,
/// If present, overrides the selected lang and uses the value given
#[clap(global = true, short, long)]
lang: Option<String>,
/// The text to solve, if not specified then read from stdin
#[clap(global = true, short, long)]
text: Option<String>,
},
}
#[derive(ArgEnum, Clone, Copy, Debug)]
pub enum StatsSizeOpt {
Unigrams,
Bigrams,
Trigrams,
Quadgrams,
}
#[derive(ArgEnum, Clone, Copy, Debug)]
pub enum Shell {
Bash,
PowerShell,
}
#[derive(Subcommand, Debug)]
pub enum LangCmd {
/// List all languages
List,
/// Select a language
Set {
/// Name of the language
#[clap(short, long)]
lang: String,
},
/// Select an alphabet. You can view the current selection with `lang list`
SetAlph {
/// Language to select alphabet for
#[clap(short, long)]
lang: Option<String>,
/// Length of alphabet to select
#[clap(short, long)]
length: usize,
},
/// Remove a language
#[clap(name = "rm")]
Remove {
/// Name of the language to remove
#[clap(short, long)]
name: String,
/// Remove without asking for confirmation
#[clap(short, long)]
force: bool,
},
/// Add a new language
New {
/// Name of the language to add
#[clap(short, long)]
name: String,
/// Uppercase alphabet
#[clap(short, long)]
upper: String,
/// Lowercase alphabet
#[clap(short, long)]
lower: String,
/// Text corpus, if not present then read from stdin
#[clap(short, long)]
corpus: Option<String>,
},
/// Change an existing language, adding or overwriting a cipher
/// alphabet
Alphabet {
/// Name of the language to add the alphabet to
#[clap(short, long)]
name: String,
/// Uppercase alphabet
#[clap(short, long)]
upper: String,
/// Lowercase alphabet
#[clap(short, long)]
lower: String,
/// Uppercase letters which should be removed from the alphabet
/// when scoring
#[clap(long)]
discard_upper: String,
/// Lowercase letters which should be removed from the alphabet
/// when scoring
#[clap(long)]
discard_lower: String,
/// Text corpus, if not present then read from stdin
#[clap(short, long)]
corpus: Option<String>,
},
}
#[derive(Subcommand, Debug)]
pub enum StatsCmd {
/// Display a graph showing periodic index of coincedence
Periodic {
/// If present, sets the width of the graph
#[clap(short, long, default_value = "60")]
width: usize,
/// If present, consider the the characters given rather than the
/// language's alphabet
#[clap(short, long)]
alphabet: Option<String>,
},
/// Display a chart showing letter frequency
Freq {
/// If present, also show frequencies for whitespace characters
#[clap(short, long)]
whitespace: bool,
/// If present, also show frequencies for all other (non-whitespace)
/// characters
#[clap(short, long)]
punct: bool,
/// If present, consider the the characters given rather than the
/// language's alphabet
#[clap(short, long)]
alphabet: Option<String>,
},
/// Display the index of coincedence of the text
Ioc {
/// If present, consider the the characters given rather than the
/// language's alphabet
#[clap(short, long)]
alphabet: Option<String>,
},
/// Display the text length and its factors
Length {
/// If present, consider the the characters given rather than the
/// language's alphabet
#[clap(short, long)]
alphabet: Option<String>,
},
/// Display the chi squared value for the text
ChiSquared,
/// Display the Unigram score for the text
Unigram,
/// Display the Bigram score for the text
Bigram,
/// Display the Trigram score for the text
Trigram,
/// Display the Quadgram score for the text
Quadgram,
}
#[derive(Args, Debug)]
pub struct CryptCmd {
/// The algorithm to use
#[clap(subcommand)]
pub cipher: CipherCmd,
/// If present, overrides the selected lang and uses the value given
#[clap(global = true, short, long)]
pub lang: Option<String>,
/// The text to encrypt/decrypt, if not specified then read from stdin
#[clap(global = true, short, long)]
pub text: Option<String>,
}
#[derive(Subcommand, Debug)]
pub enum CipherCmd {
/// The Affine cipher
Affine {
/// Affine coefficient, a
#[clap(short, long)]
a: i32,
/// Affine constant, b
#[clap(short, long)]
b: i32,
},
/// The Atbash cipher
Atbash,
/// The Caesar cipher
Caesar {
/// Caesar shift
#[clap(short, long)]
shift: i32,
},
ClassicVigenere {
/// Keyword
#[clap(short, long)]
keyword: String,
},
/// The Railfence cipher
Railfence {
/// Number of rails
#[clap(short, long)]
rails: i32,
},
/// The Rot13 cipher
Rot13,
/// The Scytale cipher
Scytale {
/// Number of faces
#[clap(short, long)]
faces: i32,
},
/// The Substitution cipher
Substitution {
/// Keyword or alphabet
#[clap(short, long)]
keyword: String,
},
}
#[derive(Subcommand, Debug)]
pub enum CipherSolveCmd {
/// The Affine cipher
Affine,
/// The Atbash cipher
Atbash,
/// The Caesar cipher
Caesar,
ClassicVigenere {
/// Maximum key length to try
#[clap(long, default_value = "30")]
max_key_length: usize,
},
/// The Railfence cipher
Railfence,
/// The Rot13 cipher
Rot13,
/// The Scytale cipher
Scytale,
/// The Substitution cipher
Substitution {
/// Limit to the number of iterations that the algorithm should run for
#[clap(long, default_value = "2000")]
max_iterations: usize,
/// Number of times that a solution must be reached to determine that it
/// is the optimal solution
#[clap(long, default_value = "5")]
min_repetitions: usize,
},
} | /// Manage language configuration
Lang { |
bufmoduleprotoparse.go | // Copyright 2020-2021 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bufmoduleprotoparse
import (
"context"
"io"
"github.com/bufbuild/buf/internal/buf/bufanalysis"
"github.com/bufbuild/buf/internal/buf/bufmodule"
"github.com/bufbuild/buf/internal/pkg/normalpath"
"github.com/jhump/protoreflect/desc/protoparse"
)
// ParserAccessorHandler handles ParserAccessor operations for protoparse..
type ParserAccessorHandler interface {
// Open opens the given path, and tracks the external path and import status.
//
// This function can be used as a ParserAccessor for protoparse.
Open(path string) (io.ReadCloser, error)
// ExternalPath returns the external path for the input path.
//
// Returns the input path if the external path is not known.
ExternalPath(path string) string
// IsImport returns true if the path is an import.
IsImport(path string) bool
// Returns nil if not available.
ModuleIdentity(path string) bufmodule.ModuleIdentity
// Returns empty if not available.
Commit(path string) string
}
// NewParserAccessorHandler returns a new ParserAccessorHandler.
//
// The given module should be a bufmodule.ModuleFileSet for image builds, as it needs
// access to not just the target files, but all dependency files as well.
//
// For AST building, this can just be a bufmodule.Module.
func NewParserAccessorHandler(ctx context.Context, module bufmodule.Module) ParserAccessorHandler {
return newParserAccessorHandler(ctx, module)
}
// GetFileAnnotations gets the FileAnnotations for the ErrorWithPos errors.
func GetFileAnnotations(
ctx context.Context,
parserAccessorHandler ParserAccessorHandler,
errorsWithPos []protoparse.ErrorWithPos,
) ([]bufanalysis.FileAnnotation, error) {
fileAnnotations := make([]bufanalysis.FileAnnotation, 0, len(errorsWithPos))
for _, errorWithPos := range errorsWithPos {
fileAnnotation, err := GetFileAnnotation(
ctx,
parserAccessorHandler,
errorWithPos,
)
if err != nil {
return nil, err
}
fileAnnotations = append(fileAnnotations, fileAnnotation)
}
return fileAnnotations, nil
}
// GetFileAnnotation gets the FileAnnotation for the ErrorWithPos error.
func GetFileAnnotation(
ctx context.Context,
parserAccessorHandler ParserAccessorHandler,
errorWithPos protoparse.ErrorWithPos,
) (bufanalysis.FileAnnotation, error) {
var fileInfo bufmodule.FileInfo
var startLine int
var startColumn int
var endLine int
var endColumn int
typeString := "COMPILE"
message := "Compile error."
// this should never happen
// maybe we should error
if errorWithPos.Unwrap() != nil |
sourcePos := protoparse.SourcePos{}
if errorWithSourcePos, ok := errorWithPos.(protoparse.ErrorWithSourcePos); ok {
if pos := errorWithSourcePos.Pos; pos != nil {
sourcePos = *pos
}
}
if sourcePos.Filename != "" {
path, err := normalpath.NormalizeAndValidate(sourcePos.Filename)
if err != nil {
return nil, err
}
fileInfo, err = bufmodule.NewFileInfo(
path,
parserAccessorHandler.ExternalPath(path),
parserAccessorHandler.IsImport(path),
nil,
"",
)
if err != nil {
return nil, err
}
}
if sourcePos.Line > 0 {
startLine = sourcePos.Line
endLine = sourcePos.Line
}
if sourcePos.Col > 0 {
startColumn = sourcePos.Col
endColumn = sourcePos.Col
}
return bufanalysis.NewFileAnnotation(
fileInfo,
startLine,
startColumn,
endLine,
endColumn,
typeString,
message,
), nil
}
| {
message = errorWithPos.Unwrap().Error()
} |
main.js | 'use strict';
angular.module('cylon')
.controller('MainCtrl', function ($scope,$http) {
var main= this;
main.robotname = null;
main._fetchRobots = function(){
$http({url:"http://127.0.0.1:4321/Robots",method:"GET"}).success(function(result){
main.robotname = result[0].name;
});
}; | main._fetchRobots();
main.ChangeColor = function(color){
var value = {'color':color};
$http({url:"http://127.0.0.1:4321/robots/"+main.robotname+"/devices/sphero/commands/setColor", data:value}).success(function(){
console.log("Recieved")
});
console.log("Sending "+color+"....");
}
}); | |
test_classify_pp.py | # Copyright 2019 Xilinx Inc.
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from six import itervalues, iteritems
from ctypes import *
import numpy as np
import os, sys
from vai.dpuv1.rt import xdnn, xdnn_io
from vai.dpuv1.rt.vitis.python.dpu.runner import Runner
import waa_rt
import multiprocessing as mp
import ctypes
def pre_process(q,args):
xclbin_p=str(args['xclbin']+"/xdnn_v3_96x16_2pe_8b_9mb_bank03.xclbin")
kernelName_p="pp_pipeline_accel"
deviceIdx_p=args['deviceid']
fpga_pp = waa_rt.PreProcess(xclbin_p,kernelName_p,deviceIdx_p, 0)
batch_sz = args['batch_sz']
img_paths = xdnn_io.getFilePaths(args['images'])
print("Pre-processing handle created. Populating Queue")
for i in range(0, len(img_paths), batch_sz):
for j, p in enumerate(img_paths[i:i + batch_sz]):
arr, ht = fpga_pp.preprocess_input(p)
q.put(arr)
print("Queue populated")
def process_xdnn(q,args):
|
if __name__ == '__main__':
print("\n\n\n\n\n\n\n\n" + '\33[32m' + "Running Inference with HW Pre-processing" + '\33[0m')
args = xdnn_io.processCommandLine()
#Create a queue for passing the pre-processed data
q = mp.Queue()
#Creating a process to run HW pre-processing kernel
p_preprocess = mp.Process(target=pre_process,args=(q,args))
#Process to run XDNN
p_xdnn = mp.Process(target=process_xdnn,args=(q,args))
p_preprocess.start()
p_xdnn.start()
p_preprocess.join()
p_xdnn.join()
| runner = Runner(args['vitis_rundir'])
inTensors = runner.get_input_tensors()
outTensors = runner.get_output_tensors()
batch_sz = args['batch_sz']
if batch_sz == -1:
# use Runner's suggested batch size
batch_sz = inTensors[0].dims[0]
if args['golden']:
goldenMap = xdnn_io.getGoldenMap(args['golden'])
top5Count = 0
top1Count = 0
fpgaBlobs = []
for io in [inTensors, outTensors]:
blobs = []
for t in io:
shape = (batch_sz,) + tuple([t.dims[i] for i in range(t.ndims)][1:])
blobs.append(np.empty((shape), dtype=np.float32, order='C'))
fpgaBlobs.append(blobs)
img_paths = xdnn_io.getFilePaths(args['images'])
labels = xdnn_io.get_labels(args['labels'])
xdnnCPUOp = xdnn.XDNNCPUOp("%s/weights.h5" % args['vitis_rundir'])
fcOutput = np.empty((batch_sz, args['outsz'],), dtype=np.float32, order='C')
fpgaInput = fpgaBlobs[0][0]
for i in range(0, len(img_paths), batch_sz):
pl = []
# fill tensor input data from image file
for j, p in enumerate(img_paths[i:i + batch_sz]):
img, _ = q.get(), None
pl.append(p)
np.copyto(fpgaInput[j], img)
jid = runner.execute_async(fpgaBlobs[0], fpgaBlobs[1])
runner.wait(jid)
xdnnCPUOp.computeFC(fpgaBlobs[1][0], fcOutput)
softmaxOut = xdnnCPUOp.computeSoftmax(fcOutput)
if args['golden']:
for j,p in enumerate(img_paths[i:i + batch_sz]):
top1Count += xdnn_io.isTopK(softmaxOut[j], goldenMap, p, labels, 1)
top5Count += xdnn_io.isTopK(softmaxOut[j], goldenMap, p, labels, 5)
else:
xdnn_io.printClassification(softmaxOut, pl, labels)
if args['golden']:
print ( ("\nAverage accuracy (n=%d) Top-1: %.1f%%, Top-5: %.1f%%\n") % (len(img_paths), float(top1Count)/float(len(img_paths))*100., float(top5Count)/float(len(img_paths))*100.) ) |
WebSocketShard.ts | import EventEmitter from 'events'
import WebSocketManager from './WebSocketManager'
export default class WebSocketShard extends EventEmitter { |
constructor(manager: WebSocketManager, id: number) {
super()
this.manager = manager
this.id = id
}
} | public manager: WebSocketManager
public id: number |
FinanceAction.ts | import connect from '../connect'; | name: string;
type: string;
fields?: string;
nominal: number;
date: Date;
}
async function insertFinance(props: IInsert) {
const {name, type, fields, nominal, date} = props;
const realm = await connect();
try {
realm.write(() => {
realm.create(finance, {
_id: Date.now(),
name,
type,
fields,
nominal,
date,
createDate: new Date(),
updateDate: new Date(),
});
});
return true;
} catch (error) {
console.error('Error when insert Finance => ', error);
}
realm.close();
}
// async function updateFinance() {}
// async function deleteFinance() {}
async function getFinance() {
const realm = await connect();
try {
let data = realm.objects(finance).sorted('date', true);
return data;
} catch (error) {
console.error('Error when get Finance => ', error);
}
}
async function getFinanceStatistic() {
const realm = await connect();
try {
let data = realm.objects(finance);
return data;
} catch (error) {}
}
export default {insertFinance, getFinance, getFinanceStatistic}; |
const finance = 'Finance';
interface IInsert { |
mod.rs | use super::get_contents;
use std::collections::HashMap;
pub fn main() {
println!("Day-16 part 1: {}", part_one());
println!("Day-16 part 2: {}", part_two());
}
fn part_one() -> u64 {
let contents = get_contents("src/days/day_16/input.txt");
let packets = parse_packets(contents);
packets.into_iter().map(|(_, p)| p.version as u64).sum()
}
fn part_two() -> u64 {
let contents = get_contents("src/days/day_16/input.txt");
let packets = parse_packets(contents);
let mut outermost_packet = packets.get(&1).unwrap().clone();
evaluate_packet(&mut outermost_packet);
outermost_packet.value.unwrap()
}
#[derive(Clone, Debug)]
struct Packet {
id: u64,
version: u8,
type_id: TypeId,
length_type: LengthType,
value: Option<u64>,
subpackets: Vec<Packet>,
}
impl Packet {
fn new(id: u64, version: u8, type_id: TypeId) -> Self {
Packet {
id,
version,
type_id,
length_type: LengthType::None,
value: None,
subpackets: Vec::new(),
}
}
}
#[derive(Clone, PartialEq, PartialOrd)]
enum ParserState {
Version,
TypeId,
LengthType,
Length,
Value(u8),
}
#[derive(Clone, Debug, PartialEq, PartialOrd)]
enum TypeId {
Sum, // 0
Product, // 1
Minimum, // 2
Maximum, // 3
Literal, // 4
GreaterThan, // 5
LessThan, // 6
EqualTo, // 7
}
#[derive(Clone, PartialEq, PartialOrd, Debug)]
enum LengthType {
Bits(u16, u16), // 0 - length 15 bits
Subpackets(u16, u16), // 1 - length 11 bits
None,
}
#[rustfmt::skip]
fn parse_packets(contents: String) -> HashMap<u64, Packet> {
use self::LengthType::*;
use self::TypeId::*;
let bin_string = hex_to_bin(&contents);
let bits = bin_string.chars().collect::<Vec<char>>();
let mut packets = HashMap::new();
let mut stack: Vec<Packet> = Vec::new();
let mut state = ParserState::Version;
let mut buffer = String::new();
let mut last_value_bit = false;
let mut current_id = 0_u64;
let mut current_packet: Packet = Packet::new(current_id, 0, Literal);
for bit in bits {
match state {
ParserState::Value(0) => {},
_ => buffer.push(bit),
}
// if we are currently parsing subpackets,
// we may need to increment the parents bit counter!
for packet in stack.iter_mut() {
match packet.length_type {
Bits(c, t) if c < t => {
packet.length_type = Bits(c + 1, t);
},
_ => |
}
}
match state {
ParserState::Version if buffer.len() >= 3 => {
current_packet.version = bin_to_dec(&buffer) as u8;
buffer.clear();
state = ParserState::TypeId;
}
ParserState::TypeId if buffer.len() >= 3 => {
current_id += 1;
current_packet.id = current_id;
match bin_to_dec(&buffer) {
0 => current_packet.type_id = Sum,
1 => current_packet.type_id = Product,
2 => current_packet.type_id = Minimum,
3 => current_packet.type_id = Maximum,
4 => current_packet.type_id = Literal,
5 => current_packet.type_id = GreaterThan,
6 => current_packet.type_id = LessThan,
7 => current_packet.type_id = EqualTo,
_ => panic!("Invalid type id! Invalid input or parser erro!"),
}
if current_packet.type_id == Literal {
state = ParserState::Value(0);
} else {
state = ParserState::LengthType;
};
buffer.clear();
}
ParserState::LengthType => {
current_packet.length_type = if bin_to_dec(&buffer) == 0 {
Bits(0, 0)
} else {
Subpackets(0, 0)
};
buffer.clear();
state = ParserState::Length;
}
ParserState::Length => {
match current_packet.length_type {
Bits(..) if buffer.len() >= 15 => {
current_packet.length_type = Bits(0, bin_to_dec(&buffer) as u16);
buffer.clear();
state = ParserState::Version;
stack.push(current_packet.clone());
},
Subpackets(..) if buffer.len() >= 11 => {
current_packet.length_type = Subpackets(0, bin_to_dec(&buffer) as u16);
buffer.clear();
state = ParserState::Version;
stack.push(current_packet.clone());
},
_ => {}
}
}
ParserState::Value(k) => {
if k == 0 {
last_value_bit = bit == '0';
state = ParserState::Value(1);
} else if k < 4 {
state = ParserState::Value(k+1);
} else if !last_value_bit {
state = ParserState::Value(0);
} else {
current_packet.value = Some(bin_to_dec(&buffer));
buffer.clear();
packets.insert(current_id, current_packet.clone());
// the current packet is done
// if we are currently parsing subpackets,
// we need to increment the parents counters!
if !stack.is_empty() {
let mut parent_packet = stack.pop().unwrap();
if let Subpackets(c, t) = parent_packet.length_type {
parent_packet.length_type = Subpackets(c + 1, t);
}
parent_packet.subpackets.push(current_packet.clone());
stack.push(parent_packet);
}
state = ParserState::Version;
}
},
_ => {}
}
// if we are currently parsing subpackets,
// we need to check whether the parent packet is done!
// NOTE: if operator packets are nested, this will require
// iteration or recursion through the stack!
let mut may_pop = true;
while !stack.is_empty() && may_pop {
may_pop = match stack[stack.len() - 1].length_type {
Subpackets(c, t) if c == t => {
let popped_parent = stack.pop().unwrap();
packets.insert(popped_parent.id, popped_parent.clone());
if !stack.is_empty() {
let mut prev_parent = stack.pop().unwrap();
if let Subpackets(c2, t2) = prev_parent.length_type {
prev_parent.length_type = Subpackets(c2 + 1, t2);
}
prev_parent.subpackets.push(popped_parent);
stack.push(prev_parent);
true
} else {
false
}
},
Bits(c, t) if c == t => {
let popped_parent = stack.pop().unwrap();
packets.insert(popped_parent.id, popped_parent.clone());
if !stack.is_empty() {
let mut prev_parent = stack.pop().unwrap();
if let Subpackets(c2, t2) = prev_parent.length_type {
prev_parent.length_type = Subpackets(c2 + 1, t2);
}
prev_parent.subpackets.push(popped_parent);
stack.push(prev_parent);
true
} else {
false
}
},
_ => false,
}
}
}
packets
}
fn hex_to_bin(hex_string: &str) -> String {
let mut bin_string = String::new();
for hex_char in hex_string.chars() {
bin_string.push_str(format!("{:04b}", hex_char.to_digit(16).unwrap()).as_str());
}
bin_string
}
fn bin_to_dec(bin_string: &str) -> u64 {
let mut dec = 0_u64;
for (k, bin_char) in bin_string.chars().enumerate() {
dec +=
2_u64.pow((bin_string.len() - (k + 1)) as u32) * bin_char.to_digit(2).unwrap() as u64;
}
dec
}
fn evaluate_packet(packet: &mut Packet) {
// if the packet is a literal, there is nothing to evaluate
if packet.type_id == TypeId::Literal {
return;
}
// evaluate all subpackets
for subpacket in packet.subpackets.iter_mut() {
evaluate_packet(subpacket);
}
// make the operation
packet.value = match packet.type_id {
TypeId::Sum => Some(packet.subpackets.iter().map(|p| p.value.unwrap()).sum()),
TypeId::Product => Some(packet.subpackets.iter().map(|p| p.value.unwrap()).product()),
TypeId::Minimum => Some(
packet
.subpackets
.iter()
.map(|p| p.value.unwrap())
.min()
.unwrap(),
),
TypeId::Maximum => Some(
packet
.subpackets
.iter()
.map(|p| p.value.unwrap())
.max()
.unwrap(),
),
TypeId::GreaterThan => {
Some((packet.subpackets[0].value.unwrap() > packet.subpackets[1].value.unwrap()) as u64)
}
TypeId::LessThan => {
Some((packet.subpackets[0].value.unwrap() < packet.subpackets[1].value.unwrap()) as u64)
}
TypeId::EqualTo => Some(
(packet.subpackets[0].value.unwrap() == packet.subpackets[1].value.unwrap()) as u64,
),
_ => panic!("This type id should never appear here."),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_hex_to_bin() {
assert_eq!(
hex_to_bin("EE00D40C823060"),
"11101110000000001101010000001100100000100011000001100000"
);
}
#[test]
fn test_bin_to_dec() {
assert_eq!(bin_to_dec("110"), 6_u64);
assert_eq!(bin_to_dec("10001"), 17_u64);
assert_eq!(bin_to_dec("10011000"), 152_u64);
assert_eq!(bin_to_dec("10110"), 22_u64);
assert_eq!(bin_to_dec("1010100"), 84_u64);
}
#[test]
fn test_example_1() {
let contents = get_contents("src/days/day_16/example_01.txt");
let packets = parse_packets(contents);
assert_eq!(packets.len(), 4);
assert!(packets.get(&1).unwrap().length_type == LengthType::Subpackets(1, 1));
assert!(packets.get(&2).unwrap().length_type == LengthType::Subpackets(1, 1));
assert!(packets.get(&3).unwrap().length_type == LengthType::Bits(11, 11));
assert!(packets.get(&4).unwrap().type_id == TypeId::Literal);
assert!(packets.get(&4).unwrap().value == Some(15));
}
#[test]
fn test_example_2() {
let contents = get_contents("src/days/day_16/example_02.txt");
let packets = parse_packets(contents);
assert_eq!(packets.len(), 7);
assert!(packets.get(&1).unwrap().length_type == LengthType::Subpackets(2, 2));
assert!(packets.get(&2).unwrap().length_type == LengthType::Bits(22, 22));
assert!(packets.get(&3).unwrap().type_id == TypeId::Literal);
assert!(packets.get(&4).unwrap().type_id == TypeId::Literal);
assert!(packets.get(&5).unwrap().length_type == LengthType::Subpackets(2, 2));
assert!(packets.get(&6).unwrap().type_id == TypeId::Literal);
assert!(packets.get(&7).unwrap().type_id == TypeId::Literal);
}
#[test]
fn test_example_3() {
let contents = get_contents("src/days/day_16/example_03.txt");
let packets = parse_packets(contents);
assert_eq!(packets.len(), 7);
assert!(packets.get(&1).unwrap().length_type == LengthType::Bits(84, 84));
assert!(packets.get(&2).unwrap().length_type == LengthType::Bits(22, 22));
assert!(packets.get(&3).unwrap().type_id == TypeId::Literal);
assert!(packets.get(&4).unwrap().type_id == TypeId::Literal);
assert!(packets.get(&5).unwrap().length_type == LengthType::Subpackets(2, 2));
assert!(packets.get(&6).unwrap().type_id == TypeId::Literal);
assert!(packets.get(&7).unwrap().type_id == TypeId::Literal);
}
#[test]
fn test_example_4() {
let contents = get_contents("src/days/day_16/example_04.txt");
let packets = parse_packets(contents);
assert!(packets.get(&1).unwrap().type_id != TypeId::Literal);
assert!(packets.get(&2).unwrap().type_id != TypeId::Literal);
assert!(packets.get(&3).unwrap().type_id != TypeId::Literal);
assert!(packets.get(&4).unwrap().type_id == TypeId::Literal);
assert!(packets.get(&5).unwrap().type_id == TypeId::Literal);
assert!(packets.get(&6).unwrap().type_id == TypeId::Literal);
assert!(packets.get(&7).unwrap().type_id == TypeId::Literal);
assert!(packets.get(&8).unwrap().type_id == TypeId::Literal);
assert_eq!(packets.len(), 8);
}
#[test]
fn test_example_5() {
let contents = String::from("C200B40A82");
let packets = parse_packets(contents);
let mut packet = packets.get(&1).unwrap().clone();
evaluate_packet(&mut packet);
assert_eq!(packet.value, Some(3));
}
#[test]
fn test_example_6() {
let contents = String::from("880086C3E88112");
let packets = parse_packets(contents);
let mut packet = packets.get(&1).unwrap().clone();
evaluate_packet(&mut packet);
assert_eq!(packet.value, Some(7));
}
}
| {} |
dbtRpcClientBase.ts | import { v4 as uuidv4 } from 'uuid';
import fetch from 'node-fetch';
import {
DbtRpcDocsGenerateResults,
DbtRpcGetManifestResults,
isDbtRpcDocsGenerateResults,
isDbtRpcManifestResults,
isDbtRpcRunSqlResults,
} from 'common';
import {
DbtError,
NetworkError,
NoServerRunningError,
RetryableNetworkError,
} from '../errors';
import { DbtClient, QueryRunner } from '../types';
export const DEFAULT_HEADERS: Record<string, string> = {
'Content-Type': 'application/json',
};
type PollArgs<T> = {
func: () => Promise<T>;
condition: (t: T) => Promise<boolean>;
maxAttempts: number;
startInterval: number;
maxInterval: number;
intervalMultiplier: number;
};
const pollOverNetwork = async <T>({
func,
condition,
maxAttempts,
startInterval,
maxInterval,
intervalMultiplier,
}: PollArgs<T>): Promise<T> => {
let attempts = 0;
let interval = startInterval;
const poll = async (
resolve: (res: T) => void,
reject: (err: any) => void,
) => {
if (attempts >= maxAttempts) {
reject(
new NetworkError(
`Lightdash timedout trying to reach dbt server.`,
{},
),
);
return;
}
try {
const result = await func();
const success = await condition(result);
if (success) {
resolve(result);
return;
}
} catch (e) {
if (!(e instanceof RetryableNetworkError)) {
reject(e);
return;
}
}
attempts += 1;
interval = Math.min(maxInterval, interval * intervalMultiplier);
setTimeout(poll, interval, resolve, reject);
};
return new Promise<T>(poll);
};
export class | implements DbtClient, QueryRunner {
serverUrl: string;
headers: Record<string, any>;
constructor(
serverUrl: string,
headers: Record<string, any> = DEFAULT_HEADERS,
) {
this.serverUrl = serverUrl;
this.headers = headers;
}
async _post(method: string, params: Object): Promise<Record<string, any>> {
const requestId = uuidv4();
const payload = {
method,
params,
jsonrpc: '2.0',
id: requestId,
};
let data: any = {};
const url = `${this.serverUrl}/?method=${method}`;
try {
const response = await fetch(url, {
method: 'POST',
headers: this.headers,
body: JSON.stringify(payload),
});
data = await response.json();
} catch (e) {
// Network errors or not a json response - server not available or not ready
throw new RetryableNetworkError(`Network error: ${e}, try again.`);
}
if (data === undefined) {
// Server responded with an empty message - unexpected behaviour
throw new NetworkError(
'Unexpected error, dbt returned an empty response',
{},
);
} else if (data.jsonrpc === '2.0') {
if (data.error) {
// Dbt method returned an error
const messages = [];
if (data.error.message) {
messages.push(data.error.message);
}
if (data.error.data?.message) {
messages.push(data.error.data.message);
}
const combinedMessage = messages.join('\n');
throw new DbtError(combinedMessage, data.error);
} else if (
data.result &&
typeof data.result === 'object' &&
data.result !== null
) {
return data.result;
} else {
throw new NetworkError(
'Unexpected error, dbt returned a response with no results',
data,
);
}
}
// We have a json response but not a valid rpc server response
throw new NetworkError('Unexpected response from dbt rpc server', data);
}
private async _isServerResponding(): Promise<boolean> {
const result = await this._post('status', {});
switch (result?.state) {
case 'ready':
case 'error':
case null:
return true;
case 'compiling':
return false;
default:
throw new NetworkError(
'Unexpected result from dbt status',
result,
);
}
}
async _isServerReady(): Promise<boolean> {
const result = await this._post('status', {});
switch (result?.state) {
case 'ready':
return true;
case 'compiling':
return false;
case 'error':
case null:
if (result?.error?.message === undefined) {
throw new NetworkError(
'Unexpected error format received from dbt while checking server status',
result,
);
}
throw new DbtError(
`Dbt Error: ${result.error.message}`,
result,
);
default:
throw new NetworkError(
'Unexpected result from dbt status',
result,
);
}
}
private async _waitForServerReady(): Promise<true> {
await pollOverNetwork({
func: () => this._isServerReady(),
condition: async (x) => x,
startInterval: 200,
maxInterval: 1000,
intervalMultiplier: 1.5,
maxAttempts: 25,
});
return true;
}
private async _waitForServerResponding(): Promise<true> {
await pollOverNetwork({
func: () => this._isServerResponding(),
condition: async (x) => x,
startInterval: 200,
maxInterval: 1000,
intervalMultiplier: 1.5,
maxAttempts: 25,
});
return true;
}
private async _submitJob(
method: string,
params: Record<string, any>,
): Promise<string> {
const results = await this._post(method, params);
if (results.request_token) {
return `${results.request_token}`;
}
if (results?.error?.message) {
if (
results.error.message ===
'No server running! Please restart the server.'
) {
throw new NoServerRunningError(
'No server running! Please restart the server. If you are using dbt cloud make sure you have the IDE for your project open.',
);
}
throw new DbtError(`Dbt Error: ${results.error.message}`, results);
}
throw new NetworkError(
'Unexpected result from dbt while trying to submit new job',
results,
);
}
private async _jobStatus(requestToken: string) {
const result = await this._post('poll', {
request_token: requestToken,
});
if (
result?.logs?.some(
(log: any) => log.message === 'Please log into GCP to continue',
)
) {
throw new DbtError(
`Lightdash cannot connect to dbt because of missing GCP credentials. This error happened because your profiles.yml contains a bigquery profile with method: oauth`,
{},
);
}
return result;
}
private async _waitForJobComplete(
requestToken: string,
): Promise<Record<string, any>> {
const isJobComplete = async (
results: Record<string, any>,
): Promise<boolean> => {
switch (results.state) {
case 'running':
return false;
case 'success':
return true;
default:
throw new NetworkError(
'Unexpected response received from dbt',
results,
);
}
};
const jobResults = await pollOverNetwork<Record<string, any>>({
func: () => this._jobStatus(requestToken),
condition: isJobComplete,
maxAttempts: 20,
startInterval: 500,
intervalMultiplier: 1.5,
maxInterval: 30000,
});
return jobResults;
}
public async getDbtCatalog(): Promise<DbtRpcDocsGenerateResults> {
await this._waitForServerReady();
const requestToken = await this._submitJob('docs.generate', {
compile: false,
});
const jobResults = await this._waitForJobComplete(requestToken);
if (isDbtRpcDocsGenerateResults(jobResults)) {
return jobResults;
}
throw new NetworkError(
'Unknown response received from dbt when generating docs',
jobResults,
);
}
public async installDeps(): Promise<void> {
await this._waitForServerResponding();
const requestToken = await this._submitJob('deps', {});
await this._waitForJobComplete(requestToken);
}
public async getDbtManifest(): Promise<DbtRpcGetManifestResults> {
await this._waitForServerReady();
const requestToken = await this._submitJob('get-manifest', {});
const jobResults = await this._waitForJobComplete(requestToken);
if (isDbtRpcManifestResults(jobResults)) {
return jobResults;
}
throw new NetworkError(
'Unknown response received from dbt when compiling',
jobResults,
);
}
public async runQuery(query: string): Promise<Record<string, any>[]> {
const params = {
name: 'request',
timeout: 60,
sql: Buffer.from(query).toString('base64'),
};
await this._waitForServerReady();
const requestToken = await this._submitJob('run_sql', params);
const results = await this._waitForJobComplete(requestToken);
if (isDbtRpcRunSqlResults(results)) {
const { column_names: columns, rows } = results.results[0].table;
return rows.map((row) =>
Object.fromEntries(
row.map((value: any, index: number) => [
columns[index],
value,
]),
),
);
}
throw new NetworkError(
'Unknown response received from dbt while running query',
results,
);
}
async test(): Promise<void> {
await this.installDeps();
await this.runQuery('SELECT 1');
}
}
| DbtRpcClientBase |
test_harness.js | "use strict";
var couchbase = require('./lib/couchbase.js'),
fs = require('fs'),
util = require('util');
var assert = require('assert');
var configReady = false;
var supportsN1ql = false;
var config;
var configFilename = 'config.json';
if (fs.existsSync(configFilename)) {
config = JSON.parse(fs.readFileSync(configFilename));
} else {
config = {
mock : false,
host : 'localhost:8091',
queryhosts : '',
bucket : 'default',
operationTimeout : 20000,
connectionTimeout : 20000
};
}
if (process.env.CNMOCK !== undefined) {
config.mock = process.env.CNMOCK ? true : false;
}
if (process.env.CNHOST !== undefined) {
config.host = process.env.CNHOST;
}
if (process.env.CNQHOSTS !== undefined) {
config.queryhosts = process.env.CNQHOSTS;
}
if (process.env.CNBUCKET !== undefined) {
config.bucket = process.env.CNBUCKET;
}
if (config.mock) {
couchbase = couchbase.Mock;
}
var isMock = config.mock;
delete config.mock;
// Use direct setup for the moment.
supportsN1ql = config.queryhosts !== '';
configReady = true;
function | (callback) {
// This is so we do late-creation of clients...
var self = this;
Object.defineProperty(this, 'client', {
get : function() {
if (!self._client) {
self._client = self.newClient();
}
return self._client;
},
enumerable: true,
configurable: false,
writeable: false
});
this.supportsN1ql = supportsN1ql;
this.lib = couchbase;
this.errors = couchbase.errors;
this.format = couchbase.format;
this.keySerial = 0;
}
Harness.prototype.newClient = function(oconfig, callback) {
if (oconfig instanceof Function) {
callback = oconfig;
oconfig = undefined;
}
if (!oconfig) {
oconfig = {};
}
if (!configReady) {
throw new Error('newClient before config was ready');
}
var this_config = {};
for (var i in config) {
if (config.hasOwnProperty(i)) {
this_config[i] = config[i];
}
}
for (var j in oconfig) {
if (oconfig.hasOwnProperty(j)) {
this_config[j] = oconfig[j];
}
}
return new couchbase.Connection(this_config, callback);
};
Harness.prototype.genKey = function(prefix) {
if (!prefix) {
prefix = "generic";
}
var ret = "TEST-" +
(process.env.CNTESTPREFIX ? process.env.CNTESTPREFIX : process.pid) +
'-' + prefix + this.keySerial;
this.keySerial++;
return ret;
};
Harness.prototype.genMultiKeys = function(count, prefix) {
var ret = {};
for (var i = 0; i < count; i++) {
var key = this.genKey(prefix);
ret[key] = { value: "value_for_" + key };
}
return ret;
};
Harness.prototype.okCallback = function(target) {
// Get the stack
var stack = new Error().stack;
return function(err, result) {
if (err) {
assert(!err, "Got unrecognized error: " + util.inspect(err));
}
assert(typeof result === "object", "Meta is missing");
target(result);
};
};
Harness.prototype.setGet = function(key, value, callback) {
var o = this;
o.client.set(key, value, o.okCallback(function(result) {
o.client.get(key, o.okCallback(function(result) {
callback(result.value);
}));
}));
};
// Skips the test if in no-mock mode.
Harness.prototype.nmIt = function(name, func) {
if (!isMock) {
it(name, func);
} else {
it.skip(name, func);
}
};
module.exports = new Harness();
| Harness |
brackets.rs | //! # Matchmaker Brackets (based on Transient RingBuffer implementation)
//!
//! This pallet provides a trait and implementation for a brackets ranked system that
//! abstracts over storage items and presents them as multiple brackets, with each
//! having a FIFO queue. This allows an implementation of a matchmaker over different
//! ranking brackets.
//!
//! Usage Example:
//! ```rust, ignore
//! use bracketqueues::{BracketsTrait, BracketsTransient};
//!
//! // Trait object that we will be interacting with.
//! type Brackets = dyn BracketsTrait<SomeStruct>;
//! // Implementation that we will instantiate.
//! type Transient = BracketsTransient<
//! SomeStruct,
//! <TestModule as Store>::TestBracketIndices,
//! <TestModule as Store>::TestBracketIndexKeyMap,
//! >;
//! {
//! let mut ring: Box<Brackets> = Box::new(Transient::new());
//! ring.push(SomeStruct { foo: 1, bar: 2 });
//! } // `ring.commit()` will be called on `drop` here and syncs to storage
//! ```
//!
//! Note: You might want to introduce a helper function that wraps the complex
//! types and just returns the boxed trait object.
use codec::{Codec, EncodeLike};
use core::marker::PhantomData;
use frame_support::storage::{StorageDoubleMap, StorageMap, StorageValue};
use sp_std::vec::{Vec};
/// Trait object presenting the brackets interface.
pub trait BracketsTrait<ItemKey, Item>
where
ItemKey: Codec + EncodeLike,
Item: Codec + EncodeLike,
{
/// Store all changes made in the underlying storage.
///
/// Data is not guaranteed to be consistent before this call.
///
/// Implementation note: Call in `drop` to increase ergonomics.
fn commit(&self);
/// Push an item onto the end of the queue.
fn push(&mut self, b: Bracket, j: ItemKey, i: Item) -> bool;
/// Pop an item from the start of the queue.
///
/// Returns `None` if the queue is empty.
fn pop(&mut self, b: Bracket) -> Option<Item>;
/// Return whether the queue is empty.
fn is_empty(&self, b: Bracket) -> bool;
/// Return the size of the brackets queue.
fn size(&self, b: Bracket) -> BufferIndex;
/// Return whether the item_key is queued or not.
fn is_queued(&self, j: ItemKey) -> bool;
}
// There is no equivalent trait in std so we create one.
pub trait WrappingOps {
fn wrapping_add(self, rhs: Self) -> Self;
fn wrapping_sub(self, rhs: Self) -> Self;
}
macro_rules! impl_wrapping_ops {
($type:ty) => {
impl WrappingOps for $type {
fn wrapping_add(self, rhs: Self) -> Self {
self.wrapping_add(rhs)
}
fn wrapping_sub(self, rhs: Self) -> Self {
self.wrapping_sub(rhs)
}
}
};
}
impl_wrapping_ops!(u8);
impl_wrapping_ops!(u16);
impl_wrapping_ops!(u32);
impl_wrapping_ops!(u64);
pub type BufferIndex = u16;
pub type BufferIndexVector = Vec<(BufferIndex, BufferIndex)>;
pub type Bracket = u8;
/// Transient backing data that is the backbone of the trait object.
pub struct BracketsTransient<ItemKey, Item, C, B, M, N>
where
ItemKey: Codec + EncodeLike,
Item: Codec + EncodeLike,
C: StorageValue<Bracket, Query = Bracket>,
B: StorageMap<Bracket, (BufferIndex, BufferIndex), Query = (BufferIndex, BufferIndex)>,
M: StorageDoubleMap<Bracket, BufferIndex, ItemKey, Query = ItemKey>,
N: StorageDoubleMap<Bracket, ItemKey, Item, Query = Item>,
{
index_vector: BufferIndexVector,
_phantom: PhantomData<(ItemKey, Item, C, B, M, N)>,
}
impl<ItemKey, Item, C, B, M, N> BracketsTransient<ItemKey, Item, C, B, M, N>
where
ItemKey: Codec + EncodeLike,
Item: Codec + EncodeLike,
C: StorageValue<Bracket, Query = Bracket>,
B: StorageMap<Bracket, (BufferIndex, BufferIndex), Query = (BufferIndex, BufferIndex)>,
M: StorageDoubleMap<Bracket, BufferIndex, ItemKey, Query = ItemKey>,
N: StorageDoubleMap<Bracket, ItemKey, Item, Query = Item>,
{
/// Create a new `BracketsTransient` that backs the brackets implementation.
///
/// Initializes itself from the bounds storage `B`.
pub fn new() -> BracketsTransient<ItemKey, Item, C, B, M, N> {
// get brackets count
let brackets_count = C::get();
// initialize all brackets
let mut index_vector = Vec::new();
for i in 0..brackets_count {
let (start, end) = B::get(i);
index_vector.push((start, end));
}
BracketsTransient {
index_vector,
_phantom: PhantomData,
}
}
}
impl<ItemKey, Item, C, B, M, N> Drop for BracketsTransient<ItemKey, Item, C, B, M, N>
where
ItemKey: Codec + EncodeLike,
Item: Codec + EncodeLike,
C: StorageValue<Bracket, Query = Bracket>,
B: StorageMap<Bracket, (BufferIndex, BufferIndex), Query = (BufferIndex, BufferIndex)>,
M: StorageDoubleMap<Bracket, BufferIndex, ItemKey, Query = ItemKey>,
N: StorageDoubleMap<Bracket, ItemKey, Item, Query = Item>,
{
/// Commit on `drop`.
fn drop(&mut self) {
<Self as BracketsTrait<ItemKey, Item>>::commit(self);
}
}
/// Brackets implementation based on `BracketsTransient`
impl<ItemKey, Item, C, B, M, N> BracketsTrait<ItemKey, Item> for BracketsTransient<ItemKey, Item, C, B, M, N>
where
ItemKey: Codec + EncodeLike,
Item: Codec + EncodeLike,
C: StorageValue<Bracket, Query = Bracket>,
B: StorageMap<Bracket, (BufferIndex, BufferIndex), Query = (BufferIndex, BufferIndex)>,
M: StorageDoubleMap<Bracket, BufferIndex, ItemKey, Query = ItemKey>,
N: StorageDoubleMap<Bracket, ItemKey, Item, Query = Item>,
{
/// Commit the (potentially) changed bounds to storage.
fn commit(&self) {
// commit indicies on all brackets
for i in 0..self.index_vector.len() {
let (v_start, v_end) = self.index_vector[i];
B::insert(i as Bracket, (v_start, v_end));
}
}
/// Push an item onto the end of the queue.
///
/// Will insert the new item, but will not update the bounds in storage.
fn push(&mut self, bracket: Bracket, item_key: ItemKey, item: Item) -> bool {
let (mut v_start, mut v_end) = self.index_vector[bracket as usize];
// check if there is already such a key queued
if N::contains_key(bracket, &item_key) {
return false
}
// insert the item key and the item
N::insert(bracket, &item_key, item);
M::insert(bracket, v_end, item_key);
// this will intentionally overflow and wrap around when bonds_end
// reaches `Index::max_value` because we want a brackets.
let next_index = v_end.wrapping_add(1 as u16);
if next_index == v_start {
// queue presents as empty but is not
// --> overwrite the oldest item in the FIFO brackets
v_start = v_start.wrapping_add(1 as u16);
}
v_end = next_index;
self.index_vector[bracket as usize] = (v_start, v_end);
true
}
/// Pop an item from the start of the queue.
///
/// Will remove the item, but will not update the bounds in storage.
fn pop(&mut self, bracket: Bracket) -> Option<Item> {
if self.is_empty(bracket) {
return None;
}
let (mut v_start, v_end) = self.index_vector[bracket as usize];
let item_key = M::take(bracket, v_start);
let item = N::take(bracket, item_key);
v_start = v_start.wrapping_add(1 as u16);
self.index_vector[bracket as usize] = (v_start, v_end);
item.into()
}
/// Return whether to consider the queue empty.
fn is_empty(&self, bracket: Bracket) -> bool {
let (v_start, v_end) = self.index_vector[bracket as usize];
v_start == v_end
}
/// Return the current size of the ring buffer as a BufferIndex.
fn size(&self, bracket: Bracket) -> BufferIndex {
let (v_start, v_end) = self.index_vector[bracket as usize];
if v_start <= v_end {
return v_end - v_start
} else {
return (BufferIndex::MAX - v_start) + v_end;
}
}
/// Return whether the item_key is queued or not.
fn is_queued(&self, item_key: ItemKey) -> bool {
// check all brackets if key is queued
for i in 0..self.index_vector.len() {
if N::contains_key(i as Bracket, &item_key) {
return true
}
}
false
}
}
#[cfg(test)]
mod tests {
use super::*;
use BracketsTrait;
use codec::{Decode, Encode};
use frame_support::{decl_module, decl_storage, impl_outer_origin, parameter_types};
use sp_core::H256;
use sp_runtime::{
testing::Header,
traits::{BlakeTwo256, IdentityLookup},
};
impl_outer_origin! {
pub enum Origin for Test {}
}
// For testing the pallet, we construct most of a mock runtime. This means
// first constructing a configuration type (`Test`) which `impl`s each of the
// configuration traits of modules we want to use.
#[derive(Clone, Eq, PartialEq)]
pub struct Test;
pub trait Config: frame_system::Config {}
decl_module! {
pub struct Module<T: Config> for enum Call where origin: T::Origin {
}
}
type TestIdx = BufferIndex;
type SomeKey = u64;
#[derive(Clone, PartialEq, Encode, Decode, Default, Debug)]
pub struct SomeStruct {
foo: u64,
bar: u64,
}
decl_storage! {
trait Store for Module<T: Config> as BracketsTest {
TestBracketsCount get(fn get_test_brackets): Bracket = 1; // C
TestBracketIndices get(fn get_test_range): map hasher(twox_64_concat) Bracket => (TestIdx, TestIdx); // B
TestBracketIndexKeyMap get(fn get_test_value): double_map hasher(twox_64_concat) Bracket, hasher(twox_64_concat) TestIdx => SomeKey; // M
TestBracketKeyValueMap get(fn get_test_list): double_map hasher(twox_64_concat) Bracket, hasher(twox_64_concat) SomeKey => SomeStruct; // N
}
}
// https://github.com/paritytech/substrate/pull/8090#issuecomment-776069095
pub struct MockPalletInfo;
impl frame_support::traits::PalletInfo for MockPalletInfo {
fn index<P: 'static>() -> Option<usize> {
Some(0)
}
fn name<P: 'static>() -> Option<&'static str> {
Some("test")
}
}
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub BlockWeights: frame_system::limits::BlockWeights =
frame_system::limits::BlockWeights::simple_max(1024);
}
impl frame_system::Config for Test {
type BaseCallFilter = ();
type BlockWeights = ();
type BlockLength = ();
type Origin = Origin;
type Index = u64;
type Call = ();
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = ();
type BlockHashCount = BlockHashCount;
type DbWeight = ();
type Version = ();
type PalletInfo = MockPalletInfo;
type AccountData = ();
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
type SS58Prefix = ();
type OnSetCode = ();
}
impl Config for Test {}
type TestModule = Module<Test>;
// This function basically just builds a genesis storage key/value store according to
// our desired mockup.
fn new_test_ext() -> sp_io::TestExternalities {
let storage = frame_system::GenesisConfig::default()
.build_storage::<Test>()
.unwrap();
storage.into()
}
// ------------------------------------------------------------
// brackets
// Trait object that we will be interacting with.
type Brackets = dyn BracketsTrait<SomeKey, SomeStruct>;
// Implementation that we will instantiate.
type Transient = BracketsTransient<
SomeKey,
SomeStruct,
<TestModule as Store>::TestBracketsCount,
<TestModule as Store>::TestBracketIndices,
<TestModule as Store>::TestBracketIndexKeyMap,
<TestModule as Store>::TestBracketKeyValueMap,
>;
#[test]
fn | () {
new_test_ext().execute_with(|| {
let bracket: Bracket = 0;
let mut ring: Box<Brackets> = Box::new(Transient::new());
let some_struct = SomeStruct { foo: 1, bar: 2 };
ring.push(bracket, some_struct.foo.clone(), some_struct);
ring.commit();
let start_end = TestModule::get_test_range(0);
assert_eq!(start_end, (0, 1));
let some_key = TestModule::get_test_value(0, 0);
let some_struct = TestModule::get_test_list(0, some_key);
assert_eq!(some_struct, SomeStruct { foo: 1, bar: 2 });
})
}
#[test]
fn size_tests() {
new_test_ext().execute_with(|| {
let bracket: Bracket = 0;
let mut ring: Box<Brackets> = Box::new(Transient::new());
assert_eq!(0, ring.size(bracket));
let some_struct = SomeStruct { foo: 1, bar: 2 };
ring.push(bracket,some_struct.foo.clone(), some_struct);
ring.commit();
let start_end = TestModule::get_test_range(0);
assert_eq!(start_end, (0, 1));
let some_key = TestModule::get_test_value(0, 0);
let some_struct = TestModule::get_test_list(0, some_key);
assert_eq!(some_struct, SomeStruct { foo: 1, bar: 2 });
assert_eq!(1, ring.size(bracket));
let some_struct = SomeStruct { foo: 2, bar: 2 };
ring.push(bracket,some_struct.foo.clone(), some_struct);
ring.commit();
assert_eq!(2, ring.size(bracket));
})
}
#[test]
fn drop_does_commit() {
new_test_ext().execute_with(|| {
// test drop here
{
let bracket: Bracket = 0;
let mut ring: Box<Brackets> = Box::new(Transient::new());
let some_struct = SomeStruct { foo: 1, bar: 2 };
ring.push(bracket,some_struct.foo.clone(), some_struct);
}
let start_end = TestModule::get_test_range(0);
assert_eq!(start_end, (0, 1));
let some_key = TestModule::get_test_value(0, 0);
let some_struct = TestModule::get_test_list(0, some_key);
assert_eq!(some_struct, SomeStruct { foo: 1, bar: 2 });
})
}
#[test]
fn simple_pop() {
new_test_ext().execute_with(|| {
let bracket: Bracket = 0;
let mut ring: Box<Brackets> = Box::new(Transient::new());
let some_struct = SomeStruct { foo: 1, bar: 2 };
ring.push(bracket,some_struct.foo.clone(), some_struct);
let item = ring.pop(bracket);
ring.commit();
assert!(item.is_some());
let start_end = TestModule::get_test_range(0);
assert_eq!(start_end, (1, 1));
})
}
#[test]
fn duplicate_check() {
new_test_ext().execute_with(|| {
let bracket: Bracket = 0;
let mut ring: Box<Brackets> = Box::new(Transient::new());
let some_struct = SomeStruct { foo: 1, bar: 2 };
ring.push(bracket,some_struct.foo.clone(), some_struct);
assert_eq!(1, ring.size(bracket));
assert_eq!(true, ring.is_queued(1));
let some_struct = SomeStruct { foo: 1, bar: 2 };
ring.push(bracket,some_struct.foo.clone(), some_struct);
// no change as its a duplicate
assert_eq!(1, ring.size(bracket));
assert_eq!(false, ring.is_queued(2));
let some_struct = SomeStruct { foo: 2, bar: 2 };
ring.push(bracket,some_struct.foo.clone(), some_struct);
assert_eq!(2, ring.size(bracket));
})
}
#[test]
fn overflow_wrap_around() {
new_test_ext().execute_with(|| {
let bracket: Bracket = 0;
let mut ring: Box<Brackets> = Box::new(Transient::new());
let mut key:u64 = 0;
for i in 1..(TestIdx::max_value() as u64) + 2 {
let some_struct = SomeStruct { foo: key, bar: i };
key = key + 1;
assert_eq!(true, ring.push(bracket,some_struct.foo.clone(), some_struct));
}
ring.commit();
let start_end = TestModule::get_test_range(0);
assert_eq!(
start_end,
(1, 0),
"range should be inverted because the index wrapped around"
);
let item = ring.pop(bracket);
ring.commit();
let (start, end) = TestModule::get_test_range(0);
assert_eq!(start..end, 2..0);
let item = item.expect("an item should be returned");
assert_eq!(
item.bar, 2,
"the struct for field `bar = 2`, was placed at index 1"
);
let item = ring.pop(bracket);
ring.commit();
let (start, end) = TestModule::get_test_range(0);
assert_eq!(start..end, 3..0);
let item = item.expect("an item should be returned");
assert_eq!(
item.bar, 3,
"the struct for field `bar = 3`, was placed at index 2"
);
for i in 1..4 {
let some_struct = SomeStruct { foo: key, bar: i };
key = key + 1;
assert_eq!(true, ring.push(bracket,some_struct.foo.clone(), some_struct));
}
ring.commit();
let start_end = TestModule::get_test_range(0);
assert_eq!(start_end, (4, 3));
})
}
} | simple_push |
lib.rs | #![cfg_attr(not(feature = "bench"), warn(missing_docs))]
#![warn(missing_debug_implementations)]
#![deny(unsafe_code)]
//For async doctests, it is easier to write out the doctest explicitly using tokio::main.
#![allow(clippy::needless_doctest_main)]
//! Asyncronous redis client built using futures and async await, with optional connection pooling.
//! ```
//! use darkredis::*;
//!
//! # #[cfg_attr(feature = "runtime_tokio", tokio::main)]
//! # #[cfg_attr(feature = "runtime_async_std", async_std::main)]
//! # async fn main() {
//! // Create a connection pool with 4 connections
//! let pool = ConnectionPool::create("127.0.0.1:6379".into(), None, 4).await.unwrap();
//! let mut connection = pool.get().await; // Grab a connection from the pool
//!
//! connection.set("some-key", "Hello, world!").await.unwrap();
//! assert_eq!(connection.get("some-key").await.unwrap(), Some("Hello, world!".into()));
//! # connection.del("some-key").await.unwrap();
//! # }
//! ```
#[cfg(all(feature = "runtime_tokio", feature = "runtime_async_std"))]
compile_error!("The `runtime_tokio` and `runtime_async_std` features are mutually exclusive!");
#[cfg(not(any(feature = "runtime_tokio", feature = "runtime_async_std")))]
compile_error!("Expected one of the features `runtime_tokio` or `runtime_async_std`");
#[macro_use]
extern crate quick_error;
mod command;
mod connection;
mod connectionpool;
mod error;
///Export the ToSocketAddrs trait to be used for deadpool-darkredis. You probably won't need this unless you're implementing an adapter crate for a different connection pool. | pub use tokio::net::ToSocketAddrs;
#[cfg(feature = "bench")]
pub mod test;
#[cfg(all(not(feature = "bench"), test))]
mod test;
pub use command::{Command, CommandList};
pub use connection::{
builder::MSetBuilder, Connection, HScanBuilder, HScanStream, Message, MessageStream, PMessage,
PMessageStream, ResponseStream, ScanBuilder, ScanStream,
};
pub use connectionpool::ConnectionPool;
pub use error::Error;
///Result type used in the whole crate.
pub type Result<T> = std::result::Result<T, Error>;
///Enum depicting the various possible responses one can get from Redis.
#[derive(Debug, PartialEq)]
pub enum Value {
///A Redis `OK` response.
Ok,
///Nil Response.
Nil,
///Array response.
Array(Vec<Value>),
///Integer response.
Integer(isize),
///String response. This cannot be a `String` type, because Redis strings need not be valid UTF-8, unlike Rust.
String(Vec<u8>),
}
impl Value {
///Returns the inner `isize` of a [`Value::Integer`](enum.Value.html#Integer.v).
///# Panics
///Panics if `self` is not a [`Value::Integer`](enum.Value.html#Integer.v)
#[inline]
pub fn unwrap_integer(self) -> isize {
if let Value::Integer(i) = self {
i
} else {
panic!("expected integer value, got {:?}", self)
}
}
///Returns the inner `Vec<Value>` of a `Value::Array`.
///# Panics
///Panics if `self` is not a [`Value::Array`](enum.Value.html#Array.v)
#[inline]
pub fn unwrap_array(self) -> Vec<Value> {
if let Value::Array(a) = self {
a
} else {
panic!("expected array value, got {:?}", self)
}
}
///Returns the inner `Vec<u8>` of a [`Value::String`](enum.Value.html#String.v).
///# Panics
///Panics if `self` is not a [`Value::String`](enum.Value.html#String.v)
#[inline]
pub fn unwrap_string(self) -> Vec<u8> {
if let Value::String(s) = self {
s
} else {
panic!("expected string value, got {:?}", self)
}
}
///Returns `true` if `self` is nonzero.
///# Panics
///Panics if `self is not a [`Value::Integer`](enum.Value.html#Integer.v)
#[inline]
pub fn unwrap_bool(self) -> bool {
self.unwrap_integer() != 0
}
///Returns `self` as a vector of Redis strings.
///# Panics
///Panics if `self` is not a [`Value::Array`](enum.Value.html#Array.v) or not all the elements are strings.
#[inline]
pub fn unwrap_string_array(self) -> Vec<Vec<u8>> {
self.unwrap_array()
.into_iter()
.map(|v| v.unwrap_string())
.collect()
}
///Like `unwrap_string`, but returns an `Option` instead of panicking.
#[inline]
pub fn optional_string(self) -> Option<Vec<u8>> {
match self {
Value::String(s) => Some(s),
_ => None,
}
}
///Like `unwrap_array`, but returns an `Option` instead of panicking.
#[inline]
pub fn optional_array(self) -> Option<Vec<Value>> {
match self {
Value::Array(a) => Some(a),
_ => None,
}
}
///Like `unwrap_integer`, but returns an `Option` instead of panicking.
#[inline]
pub fn optional_integer(self) -> Option<isize> {
match self {
Value::Integer(i) => Some(i),
_ => None,
}
}
///Like `unwrap_bool`, but returns an `Option` instead of panicking.
#[inline]
pub fn optional_bool(self) -> Option<bool> {
self.optional_integer().map(|i| i != 0)
}
}
///An enum corresponding to every Redis type.
#[derive(Debug, Clone, PartialEq)]
pub enum DataType {
///A simple string.
String,
///A List.
List,
///A set.
Set,
///A sorted set.
ZSet,
///A hash set.
Hash,
///A stream.
Stream,
} | #[cfg(feature = "runtime_async_std")]
pub use async_std::net::ToSocketAddrs;
#[cfg(feature = "runtime_tokio")] |
qiniu.go | package CloudStore
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"strings"
"time"
"github.com/astaxie/beego/httplib"
"github.com/qiniu/api.v7/v7/auth/qbox"
"github.com/qiniu/api.v7/v7/storage"
)
type QINIU struct {
AccessKey string
SecretKey string
Bucket string
Domain string
Zone *storage.Zone
mac *qbox.Mac
BucketManager *storage.BucketManager
}
func NewQINIU(accessKey, secretKey, bucket, domain string) (q *QINIU, err error) {
q = &QINIU{
AccessKey: accessKey,
SecretKey: secretKey,
Bucket: bucket,
Domain: domain,
}
q.Domain = strings.TrimRight(q.Domain, "/")
q.mac = qbox.NewMac(accessKey, secretKey)
q.Zone, err = storage.GetZone(accessKey, bucket)
if err != nil {
return
}
q.BucketManager = storage.NewBucketManager(q.mac, &storage.Config{Zone: q.Zone})
return
}
func (q *QINIU) IsExist(object string) (err error) {
_, err = q.GetInfo(object)
return
}
// TODO: 目前没发现有可以设置header的地方
func (q *QINIU) Upload(tmpFile, saveFile string, headers ...map[string]string) (err error) {
policy := storage.PutPolicy{Scope: q.Bucket}
token := policy.UploadToken(q.mac)
cfg := &storage.Config{
Zone: q.Zone,
}
form := storage.NewFormUploader(cfg)
ret := &storage.PutRet{}
params := make(map[string]string)
for _, header := range headers {
for k, v := range header {
params["x:"+k] = v
}
}
extra := &storage.PutExtra{
Params: params,
}
saveFile = objectRel(saveFile)
// 需要先删除,文件已存在的话,没法覆盖
q.Delete(saveFile)
err = form.PutFile(context.Background(), ret, token, saveFile, tmpFile, extra)
return
}
func (q *QINIU) Delete(objects ...string) (err error) {
length := len(objects)
if length == 0 {
return
}
defer func() {
// 被删除文件不存在的时候,err值为空但不为nil,这里处理一下
if err != nil && err.Error() == "" {
err = nil
}
}()
deleteOps := make([]string, 0, length)
for _, object := range objects {
deleteOps = append(deleteOps, storage.URIDelete(q.Bucket, objectRel(object)))
}
cfg := &storage.Config{
Zone: q.Zone,
}
manager := storage.NewBucketManager(q.mac, cfg)
var res []storage.BatchOpRet
res, err = manager.Batch(deleteOps)
if err != nil {
return
}
var errs []string
for _, item := range res {
if item.Code != http.StatusOK {
errs = append(errs, fmt.Errorf("%+v: %v", item.Data, item.Code).Error())
}
}
if len(errs) > 0 {
err = errors.New(strings.Join(errs, "; "))
}
return
}
func (q *QINIU) GetSignURL(object string, expire int64) (link string, err error) {
object = objectRel(object)
if expire > 0 {
deadline := time.Now().Add(time.Second * time.Duration(expire)).Unix()
link = storage.MakePrivateURL(q.mac, q.Domain, object, deadline)
} else {
link = storage.MakePublicURL(q.Domain, object)
}
if !strings.HasPrefix(link, q.Domain) {
if u, errU := url.Parse(link); errU == nil {
link = q.Domain + u.RequestURI()
}
}
return
}
func (q *QINIU) Download(object string, savePath string) (err error) {
var link string
link, err = q.GetSignURL(object, 3600)
if err != nil {
return
}
req := httplib.Get(link).SetTimeout(30*time.Minute, 30*time.Minute)
if strings.HasPrefix(strings.ToLower(link), "https://") {
req.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true})
}
var resp *http.Response
resp, err = req.Response()
if err != nil {
return
}
defer resp.Body.Close()
data, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
return fmt.Errorf("%v: %v", resp.Status, string(data))
}
err = ioutil.WriteFile(savePath, data, os.ModePerm)
return
}
func (q *QINIU) GetInfo(object string) (info File, err error) {
var fileInfo storage.FileInfo
object = objectRel(object)
fileInfo, err = q.BucketManager.Stat(q.Bucket, object)
if err != nil {
return
}
info = File{
Name: object,
Size: fileInfo.Fsize,
ModTime: storage.ParsePutTime(fileInfo.PutTime),
IsDir: fileInfo.Fsize == 0,
}
return
}
func (q *QINIU) Lists(prefix string) (files []File, err error) {
var items []storage.ListItem
prefix = objectRel(prefix)
limit := 1000
cfg := &storage.Config{
Zone: q.Zone,
}
manager := storage.NewBucketManager(q.mac, cfg)
items, _, _, _, err = manager.ListFiles(q.Bucket, prefix, "", "", limit)
if err != nil {
return
}
for _, item := range items {
files = append(files, File{
ModTime: storage.ParsePutTime(item.PutTime),
Name: objectRel(item.Key),
Size: item.Fsize,
IsDir: item.Fsize == 0,
})
} |
return
} | |
build.rs | #![allow(
clippy::enum_glob_use,
clippy::must_use_candidate,
clippy::single_match_else
)]
mod rustc;
use std::env;
use std::ffi::OsString;
use std::fs;
use std::path::Path;
use std::process::{self, Command};
fn main() | {
let rustc = env::var_os("RUSTC").unwrap_or_else(|| OsString::from("rustc"));
let output = match Command::new(&rustc).arg("--version").output() {
Ok(output) => output,
Err(e) => {
let rustc = rustc.to_string_lossy();
eprintln!("Error: failed to run `{} --version`: {}", rustc, e);
process::exit(1);
}
};
let string = match String::from_utf8(output.stdout) {
Ok(string) => string,
Err(e) => {
let rustc = rustc.to_string_lossy();
eprintln!(
"Error: failed to parse output of `{} --version`: {}",
rustc, e,
);
process::exit(1);
}
};
let version = match rustc::parse(&string) {
Some(version) => version,
None => {
eprintln!(
"Error: unexpected output from `rustc --version`: {:?}\n\n\
Please file an issue in https://github.com/dtolnay/rustversion",
string
);
process::exit(1);
}
};
if version.minor < 38 {
// Prior to 1.38, a #[proc_macro] is not allowed to be named `cfg`.
println!("cargo:rustc-cfg=cfg_macro_not_allowed");
}
let version = format!("{:#?}\n", version);
let out_dir = env::var_os("OUT_DIR").expect("OUT_DIR not set");
let out_file = Path::new(&out_dir).join("version.rs");
fs::write(out_file, version).expect("failed to write version.rs");
} |
|
main.py | import json
import pathlib
import urllib.request
def | ():
# https://gist.github.com/kawanet/a880c83f06d6baf742e45ac9ac52af96
url = 'https://gist.githubusercontent.com/kawanet/a880c83f06d6baf742e45ac9ac52af96/raw' \
'/b4fbc9a730394eb977277e73cc37b60955463f21/material-colors.json'
json_file_name = 'material-colors.json'
urllib.request.urlretrieve(url, json_file_name)
with open(json_file_name, 'r') as json_file:
colors = json.load(json_file)
out_dir_name = 'material_ui_colors'
pathlib.Path(out_dir_name).mkdir(exist_ok=True)
for color in colors:
with open(out_dir_name + '/_' + color + '.scss', 'w') as out_file:
shades = colors[color]
out = ['$material_ui_' + color + '_' + shade + ': ' + value + ';\n' for shade, value in shades.items()]
out.append('$material_ui_' + color + ': $material_ui_' + color + '_500;')
out_file.writelines(out)
with open(out_dir_name + '/_main.scss', 'w') as out_main_file:
out = ['@import "' + color + '";\n' for color in colors]
out_main_file.writelines(out)
if __name__ == '__main__':
main()
| main |
webhook.go | /*
Copyright 2019 Hiroki Matsumoto.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package webhook
import (
"sigs.k8s.io/controller-runtime/pkg/manager"
)
// AddToManagerFuncs is a list of functions to add all Controllers to the Manager
var AddToManagerFuncs []func(manager.Manager) error
// AddToManager adds all Controllers to the Manager | // +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
func AddToManager(m manager.Manager) error {
for _, f := range AddToManagerFuncs {
if err := f(m); err != nil {
return err
}
}
return nil
} | // +kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=mutatingwebhookconfigurations;validatingwebhookconfigurations,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete |
usecases.ts | import * as sd from "@staticdeploy/core";
import { IUsecasesByName } from "@staticdeploy/http-adapters";
const usecases: IUsecasesByName = {
checkHealth: sd.CheckHealth,
createApp: sd.CreateApp,
createBundle: sd.CreateBundle,
createEntrypoint: sd.CreateEntrypoint,
createGroup: sd.CreateGroup,
createUser: sd.CreateUser, | deleteUser: sd.DeleteUser,
deployBundle: sd.DeployBundle,
getApp: sd.GetApp,
getApps: sd.GetApps,
getBundle: sd.GetBundle,
getBundleNames: sd.GetBundleNames,
getBundles: sd.GetBundles,
getBundlesByNameAndTag: sd.GetBundlesByNameAndTag,
getBundleTagsByBundleName: sd.GetBundleTagsByBundleName,
getCurrentUser: sd.GetCurrentUser,
getEntrypoint: sd.GetEntrypoint,
getEntrypointsByAppId: sd.GetEntrypointsByAppId,
getGroup: sd.GetGroup,
getGroups: sd.GetGroups,
getOperationLogs: sd.GetOperationLogs,
getUser: sd.GetUser,
getUsers: sd.GetUsers,
respondToEndpointRequest: sd.RespondToEndpointRequest,
updateApp: sd.UpdateApp,
updateEntrypoint: sd.UpdateEntrypoint,
updateGroup: sd.UpdateGroup,
updateUser: sd.UpdateUser,
};
export default usecases; | deleteApp: sd.DeleteApp,
deleteBundlesByNameAndTag: sd.DeleteBundlesByNameAndTag,
deleteEntrypoint: sd.DeleteEntrypoint,
deleteGroup: sd.DeleteGroup, |
serializer.py | from __future__ import absolute_import, division, unicode_literals
from pip9._vendor.six import text_type
import re
from codecs import register_error, xmlcharrefreplace_errors
from .constants import voidElements, booleanAttributes, spaceCharacters
from .constants import rcdataElements, entities, xmlEntities
from . import treewalkers, _utils
from xml.sax.saxutils import escape
_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
"\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
"\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
"\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
"\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
"\u3000]")
_encode_entity_map = {}
_is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((_is_ucs4 and len(v) > 1) or
(not _is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = _utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in _encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
_encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = _encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error("htmlentityreplace", htmlentityreplace_errors)
def serialize(input, tree="etree", encoding=None, **serializer_opts):
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
s = HTMLSerializer(**serializer_opts)
return s.render(walker(input), encoding)
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = "legacy" # be secure by default
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values="legacy"|"spec"|"always"
Whether to quote attribute values that don't require quoting
per legacy browser behaviour, when required by the standard, or always.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
alphabetical_attributes=False|True
Reorder attributes to be in alphabetical order.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
unexpected_args = frozenset(kwargs) - frozenset(self.options)
if len(unexpected_args) > 0:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "htmlentityreplace")
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
# pylint:disable=too-many-nested-blocks
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from .filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# Alphabetical attributes is here under the assumption that none of
# the later filters add or change order of attributes; it needs to be
# before the sanitizer so escaped elements come out correctly
if self.alphabetical_attributes:
from .filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from .filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from .filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from .filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError("System identifer contains both single and double quote characters")
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError("Unexpected </ in CDATA")
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
for (_, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple()) and
k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values == "always" or len(v) == 0:
quote_attr = True
elif self.quote_attr_values == "spec":
quote_attr = _quoteAttributeSpec.search(v) is not None
elif self.quote_attr_values == "legacy":
quote_attr = _quoteAttributeLegacy.search(v) is not None
else:
raise ValueError("quote_attr_values must be one of: "
"'always', 'spec', or 'legacy'")
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError("Comment contains --")
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError("Entity %s not recognized" % name)
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def | (self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
class SerializeError(Exception):
"""Error in serialized tree"""
pass
| serializeError |
hook.go | package controller
import (
"fmt"
"github.com/gin-gonic/gin"
"os"
"path/filepath"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/drone/drone/engine"
"github.com/drone/drone/model"
"github.com/drone/drone/remote"
"github.com/drone/drone/router/middleware/context"
"github.com/drone/drone/shared/httputil"
"github.com/drone/drone/shared/token"
"github.com/drone/drone/store"
"github.com/drone/drone/yaml"
"github.com/drone/drone/yaml/matrix"
)
func PostHook(c *gin.Context) {
remote_ := remote.FromContext(c)
tmprepo, build, err := remote_.Hook(c.Request)
if err != nil {
log.Errorf("failure to parse hook. %s", err)
c.AbortWithError(400, err)
return
}
if build == nil {
c.Writer.WriteHeader(200)
return
}
if tmprepo == nil {
log.Errorf("failure to ascertain repo from hook.")
c.Writer.WriteHeader(400)
return
}
// a build may be skipped if the text [CI SKIP]
// is found inside the commit message
if strings.Contains(build.Message, "[CI SKIP]") {
log.Infof("ignoring hook. [ci skip] found for %s")
c.Writer.WriteHeader(204)
return
}
repo, err := store.GetRepoOwnerName(c, tmprepo.Owner, tmprepo.Name)
if err != nil {
log.Errorf("failure to find repo %s/%s from hook. %s", tmprepo.Owner, tmprepo.Name, err)
c.AbortWithError(404, err)
return
}
// get the token and verify the hook is authorized
parsed, err := token.ParseRequest(c.Request, func(t *token.Token) (string, error) {
return repo.Hash, nil
})
if err != nil {
log.Errorf("failure to parse token from hook for %s. %s", repo.FullName, err)
c.AbortWithError(400, err)
return
}
if parsed.Text != repo.FullName {
log.Errorf("failure to verify token from hook. Expected %s, got %s", repo.FullName, parsed.Text)
c.AbortWithStatus(403)
return
}
if repo.UserID == 0 {
log.Warnf("ignoring hook. repo %s has no owner.", repo.FullName)
c.Writer.WriteHeader(204)
return
}
var skipped = true
if (build.Event == model.EventPush && repo.AllowPush) ||
(build.Event == model.EventPull && repo.AllowPull) ||
(build.Event == model.EventDeploy && repo.AllowDeploy) ||
(build.Event == model.EventTag && repo.AllowTag) {
skipped = false
}
if skipped {
log.Infof("ignoring hook. repo %s is disabled for %s events.", repo.FullName, build.Event)
c.Writer.WriteHeader(204)
return
}
user, err := store.GetUser(c, repo.UserID)
if err != nil {
log.Errorf("failure to find repo owner %s. %s", repo.FullName, err)
c.AbortWithError(500, err) |
// if there is no email address associated with the pull request,
// we lookup the email address based on the authors github login.
//
// my initial hesitation with this code is that it has the ability
// to expose your email address. At the same time, your email address
// is already exposed in the public .git log. So while some people will
// a small number of people will probably be upset by this, I'm not sure
// it is actually that big of a deal.
if len(build.Email) == 0 {
author, err := store.GetUserLogin(c, build.Author)
if err == nil {
build.Email = author.Email
}
}
// if the remote has a refresh token, the current access token
// may be stale. Therefore, we should refresh prior to dispatching
// the job.
if refresher, ok := remote_.(remote.Refresher); ok {
ok, _ := refresher.Refresh(user)
if ok {
store.UpdateUser(c, user)
}
}
// fetch the .drone.yml file from the database
raw, sec, err := remote_.Script(user, repo, build)
if err != nil {
log.Errorf("failure to get .drone.yml for %s. %s", repo.FullName, err)
c.AbortWithError(404, err)
return
}
axes, err := matrix.Parse(string(raw))
if err != nil {
log.Errorf("failure to calculate matrix for %s. %s", repo.FullName, err)
c.AbortWithError(400, err)
return
}
if len(axes) == 0 {
axes = append(axes, matrix.Axis{})
}
netrc, err := remote_.Netrc(user, repo)
if err != nil {
log.Errorf("failure to generate netrc for %s. %s", repo.FullName, err)
c.AbortWithError(500, err)
return
}
key, _ := store.GetKey(c, repo)
// verify the branches can be built vs skipped
yconfig, _ := yaml.Parse(string(raw))
var match = false
for _, branch := range yconfig.Branches {
if branch == build.Branch {
match = true
break
}
match, _ = filepath.Match(branch, build.Branch)
if match {
break
}
}
if !match && len(yconfig.Branches) != 0 {
log.Infof("ignoring hook. yaml file excludes repo and branch %s %s", repo.FullName, build.Branch)
c.AbortWithStatus(200)
return
}
// update some build fields
build.Status = model.StatusPending
build.RepoID = repo.ID
// and use a transaction
var jobs []*model.Job
for num, axis := range axes {
jobs = append(jobs, &model.Job{
BuildID: build.ID,
Number: num + 1,
Status: model.StatusPending,
Environment: axis,
})
}
err = store.CreateBuild(c, build, jobs...)
if err != nil {
log.Errorf("failure to save commit for %s. %s", repo.FullName, err)
c.AbortWithError(500, err)
return
}
c.JSON(200, build)
url := fmt.Sprintf("%s/%s/%d", httputil.GetURL(c.Request), repo.FullName, build.Number)
err = remote_.Status(user, repo, build, url)
if err != nil {
log.Errorf("error setting commit status for %s/%d", repo.FullName, build.Number)
}
// get the previous build so taht we can send
// on status change notifications
last, _ := store.GetBuildLastBefore(c, repo, build.Branch, build.ID)
engine_ := context.Engine(c)
go engine_.Schedule(c.Copy(), &engine.Task{
User: user,
Repo: repo,
Build: build,
BuildPrev: last,
Jobs: jobs,
Keys: key,
Netrc: netrc,
Config: string(raw),
Secret: string(sec),
System: &model.System{
Link: httputil.GetURL(c.Request),
Plugins: strings.Split(os.Getenv("PLUGIN_FILTER"), " "),
Globals: strings.Split(os.Getenv("PLUGIN_PARAMS"), " "),
},
})
} | return
} |
OpenStereotaxy.py | # -*- coding: utf-8 -*-
"""
===================== OpenStereotaxy module for FreeCAD =======================
This Python module for FreeCAD allows the user to calculate the chamber-centered
coordinates of the target structure(s). Based on this data, the module will
generate surface meshes (exported in .stl format ready for 3D-printing) of the
following custom parts:
1) a drill guide for performing craniotomy
2) a guide tube guide grid
3) a microdrive system
Written by Aidan Murphy, PhD ([email protected])
"""
import numpy as np
from scipy.io import loadmat
# ================= Load data from Slicer files
def LoadChamberCoords(TransformFile, TargetsFile):
x = loadmat(TransformFile, squeeze_me=True) # Load transform matrix
data = x['AffineTransform_double_3_3']
TransformMatrix = np.reshape(data[0:9], [3,3]) # Reshape array
TransformMatrix = np.append(TransformMatrix, [0,0,0,1])
Ras2lps = [-1,-1,1,1]
Tform = TransformMatrix.*Ras2lps # Convert transfrom matrix from LPS to RAS
fid = open(TargetsFile, 'r') # Load target coordinate data
for line in fid.readlines()[3:]: # For each target...
Coords = line.split(",")
Coords(f).Name = Coords[11]
Coords(f).Description = Coords[12]
Coords(f).TformFile = TransformFile
Coords(f).TformMatrix = Tform
Coords(f).XYZ_RAS = Coords[1:4] # Get the raw MR-volume coordinates
XYZ_Chamber = Tform*[Coords(f).XYZ_RAS,1]' # Apply transform
ChamberCoords[n] = -XYZ_Chamber(1:3)' # Return chamber-centered coordinates
return ChamberCoords
# ================= Move electrode holes
def UpdateHoleLocations(ChamberCoords): |
TransformFile = '/Volumes/RAWDATA/murphya/MRI/StevieRay/Surgery2_Plan/ManualTransform_LH_V2.mat'
TargetsFile = '/Volumes/RAWDATA/murphya/MRI/StevieRay/Surgery2_Plan/SurgicalTargets.fcsv'
ChamberCoords = LoadChamberCoords(TransformFile, TargetsFile)
| |
payment.py | from dataclasses import dataclass
from typing import List
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.program import Program
from salvia.util.ints import uint64
# This class is supposed to correspond to a CREATE_COIN condition
@dataclass(frozen=True)
class | :
puzzle_hash: bytes32
amount: uint64
memos: List[bytes]
def as_condition_args(self) -> List:
return [self.puzzle_hash, self.amount, self.memos]
def as_condition(self) -> Program:
return Program.to([51, *self.as_condition_args()])
def name(self) -> bytes32:
return self.as_condition().get_tree_hash()
@classmethod
def from_condition(cls, condition: Program) -> "Payment":
python_condition: List = condition.as_python()
puzzle_hash, amount = python_condition[1:3]
memos: List[bytes] = []
if len(python_condition) > 3:
memos = python_condition[3]
return cls(bytes32(puzzle_hash), uint64(int.from_bytes(amount, "big")), memos)
| Payment |
log.go | // Copyright 2013-2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package graft
import (
"bytes"
"crypto/sha1"
"encoding/json"
"io/ioutil"
"os"
)
type envelope struct {
SHA, Data []byte
}
type persistentState struct {
CurrentTerm uint64
VotedFor string
}
func (n *Node) initLog(path string) error {
if log, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0660); err != nil {
return err
} else {
log.Close()
}
n.logPath = path
ps, err := n.readState(path)
if err != nil && err != LogNoStateErr {
return err
}
if ps != nil {
n.setTerm(ps.CurrentTerm)
n.setVote(ps.VotedFor)
}
return nil
}
func (n *Node) closeLog() error {
err := os.Remove(n.logPath)
n.logPath = ""
return err
}
func (n *Node) writeState() error {
n.mu.Lock()
ps := persistentState{
CurrentTerm: n.term,
VotedFor: n.vote,
}
logPath := n.logPath
n.mu.Unlock()
buf, err := json.Marshal(ps)
if err != nil |
// Set a SHA1 to test for corruption on read
env := envelope{
SHA: sha1.New().Sum(buf),
Data: buf,
}
toWrite, err := json.Marshal(env)
if err != nil {
return err
}
return ioutil.WriteFile(logPath, toWrite, 0660)
}
func (n *Node) readState(path string) (*persistentState, error) {
buf, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
if len(buf) <= 0 {
return nil, LogNoStateErr
}
env := &envelope{}
if err := json.Unmarshal(buf, env); err != nil {
return nil, err
}
// Test for corruption
sha := sha1.New().Sum(env.Data)
if !bytes.Equal(sha, env.SHA) {
return nil, LogCorruptErr
}
ps := &persistentState{}
if err := json.Unmarshal(env.Data, ps); err != nil {
return nil, err
}
return ps, nil
}
| {
return err
} |
issue-3121.rs | // run-pass
#![allow(dead_code)]
#![allow(non_camel_case_types)]
#![feature(box_syntax)]
#[derive(Copy, Clone)]
enum | { mayo, catsup, vinegar }
#[derive(Copy, Clone)]
enum order { hamburger, fries(side), shake }
#[derive(Copy, Clone)]
enum meal { to_go(order), for_here(order) }
fn foo(m: Box<meal>, cond: bool) {
match *m {
meal::to_go(_) => { }
meal::for_here(_) if cond => {}
meal::for_here(order::hamburger) => {}
meal::for_here(order::fries(_s)) => {}
meal::for_here(order::shake) => {}
}
}
pub fn main() {
foo(box meal::for_here(order::hamburger), true)
}
| side |
bulb.go | package yeelight
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"log"
"net"
"sync"
)
// notes:
// max 4 parallel opened TCP connections
// quota: 60 commands per minute (for one device)
// quota: 144 commands per minute for all devices
// TODO: Returns response objects too, not only error
// TODO: Export interface only, not whole struct
type Bulb struct {
standardCommands
commonCommands
// Namespace to control "background" capabilities (device must support it)
Bg backgroundLightCommands
Ip string
Port int
conn net.Conn
results map[int]chan Response
resultsMtx sync.Mutex
}
func (b *Bulb) Connect() error {
destination := fmt.Sprintf("%s:%d", b.Ip, b.Port)
conn, err := net.Dial("tcp", destination)
if err != nil {
return err
}
go b.responseProcessor()
b.conn = conn
return nil
}
func (b *Bulb) Disconnect() error {
err := b.conn.Close()
if err != nil {
return err
}
return nil
}
// NewBulb creates Bulb instance, default protocol port: 55443
func | (ip string) *Bulb {
bulb := &Bulb{
standardCommands{},
commonCommands{},
backgroundLightCommands{},
ip,
55443, // 55443 is a constant protocol port
nil,
make(map[int]chan Response),
sync.Mutex{},
}
// I know It looks badly, but "It is working? It is working"
bulb.standardCommands.commander = bulb
bulb.commonCommands.commander = bulb
bulb.Bg.commander = bulb
bulb.Bg.prefix = "bg_"
return bulb
}
func (b *Bulb) executeCommand(c partialCommand) error {
respChan := make(chan Response)
// preparing request ID to be able to monitor and wait for response
b.resultsMtx.Lock()
id, err := b.findFirstFreeIntKey()
if err != nil {
b.resultsMtx.Unlock()
return err
}
b.results[id] = respChan
b.resultsMtx.Unlock()
defer func(ch chan Response, id int) {
close(ch)
delete(b.results, id)
}(respChan, id)
realCommand := newCompleteCommand(c, id)
message, err := json.Marshal(realCommand)
if err != nil {
return err
}
log.Printf("[%s] request: %s\n", b.Ip, message)
message = append(message, CR, LF)
_, err = b.conn.Write(message)
if err != nil {
return err
}
// waiting for response on that request
resp := <-respChan
return resp.ok()
}
func openSocket(host string, min, max int) (net.Listener, int, error) {
if min > max {
return nil, 0, errors.New("min value cannot be greather than max value")
}
if min < 0 || max > 65535 {
return nil, 0, errors.New("port number must be in range 0 - 65535")
}
for port := min; port <= max; port++ {
var ip = "" // binding on all interfaces
address := fmt.Sprintf("%s:%d", ip, port)
listener, err := net.Listen("tcp", address)
if err != nil {
continue
}
return listener, port, nil
}
return nil, 0, errors.New("no available free ports in given range")
}
// keysExists returns a bool when given map contains all of given key names
func keysExists(m map[string]interface{}, keys ...string) bool {
var matches int
for k1, _ := range m {
for _, k2 := range keys {
if k1 == k2 {
matches += 1
}
}
}
return matches == len(keys)
}
// responseProcessor is run internally by Connect() function.
// Tt's responsible for monitoring command responses and notifications
func (b *Bulb) responseProcessor() {
var buff = make([]byte, 512)
var resp map[string]interface{}
for {
n, err := b.conn.Read(buff)
if err != nil {
break
}
responses := bytes.Split(buff[:n], []byte{CR, LF})
for _, r := range responses[:len(responses)-1] {
resp = make(map[string]interface{})
err = json.Unmarshal(r, &resp)
if err != nil {
log.Printf("OKResponse err: %s\n", r)
continue
}
switch {
case keysExists(resp, "id", "result"): // Command success
var unmarshaled OKResponse
err = json.Unmarshal(r, &unmarshaled)
if err != nil {
log.Printf("second unmarshal error: %s\n", r)
}
b.results[unmarshaled.id()] <- &unmarshaled
case keysExists(resp, "id", "error"): // Command failed
var unmarshaled ERRResponse
err = json.Unmarshal(r, &unmarshaled)
if err != nil {
log.Printf("second unmarshal error: %s\n", r)
}
b.results[unmarshaled.id()] <- &unmarshaled
case keysExists(resp, "method", "params"): // Notification
// log.Printf("state change%s\n", r)
default:
log.Printf("unhandled response: %s\n", r)
}
}
}
log.Printf("response processor exited\n")
}
// findFirstFreeIntKey finds available (unique) id which will be used as command identifier
func (b *Bulb) findFirstFreeIntKey() (int, error) {
for i := 0; i < 100; i++ {
_, ok := b.results[i]
if !ok {
return i, nil
}
}
return 0, errors.New("not available")
}
| NewBulb |
next.config.js | const webpack = require('webpack') |
module.exports = {
webpack: function (cfg) {
cfg.plugins.push(new webpack.DefinePlugin({
'process.env': {
'NODE_ENV': JSON.stringify(process.env.NODE_ENV)
}
}))
return cfg
}
} | |
0003_partyguest_hasprepartyaccess.py | # -*- coding: utf-8 -*- | from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("PartyListV2", "0002_restrictedguest"),
]
operations = [
migrations.AddField(
model_name="partyguest",
name="hasPrepartyAccess",
field=models.BooleanField(default=False),
),
] | # Generated by Django 1.11.4 on 2018-05-05 02:05 |
crate_name.rs | //! Crate name parsing.
use crate::errors::*;
use crate::Dependency;
use crate::{get_crate_name_from_github, get_crate_name_from_gitlab, get_crate_name_from_path};
/// A crate specifier. This can be a plain name (e.g. `docopt`), a name and a versionreq (e.g.
/// `docopt@^0.8`), a URL, or a path.
#[derive(Debug)]
pub struct CrateName<'a>(&'a str);
impl<'a> CrateName<'a> {
/// Create a new `CrateName`
pub fn new(name: &'a str) -> Self {
CrateName(name)
}
/// Get crate name
pub fn name(&self) -> &str {
self.0
}
/// Does this specify a versionreq?
pub fn has_version(&self) -> bool {
self.0.contains('@')
}
/// Is this a URI?
pub fn is_url_or_path(&self) -> bool {
self.is_github_url() || self.is_gitlab_url() || self.is_path()
}
fn | (&self) -> bool {
self.0.contains("https://github.com")
}
fn is_gitlab_url(&self) -> bool {
self.0.contains("https://gitlab.com")
}
fn is_path(&self) -> bool {
// FIXME: how else can we check if the name is a (possibly invalid) path?
self.0.contains('.') || self.0.contains('/') || self.0.contains('\\')
}
/// If this crate specifier includes a version (e.g. `[email protected]`), extract the name and
/// version.
pub fn parse_as_version(&self) -> Result<Option<Dependency>> {
if self.has_version() {
let xs: Vec<_> = self.0.splitn(2, '@').collect();
let (name, version) = (xs[0], xs[1]);
semver::VersionReq::parse(version).chain_err(|| "Invalid crate version requirement")?;
Ok(Some(Dependency::new(name).set_version(version)))
} else {
Ok(None)
}
}
/// Will parse this crate name on the assumption that it is a URI.
pub fn parse_crate_name_from_uri(&self) -> Result<Dependency> {
if self.is_github_url() {
if let Ok(ref crate_name) = get_crate_name_from_github(self.0) {
return Ok(Dependency::new(crate_name).set_git(self.0, None));
}
} else if self.is_gitlab_url() {
if let Ok(ref crate_name) = get_crate_name_from_gitlab(self.0) {
return Ok(Dependency::new(crate_name).set_git(self.0, None));
}
} else if self.is_path() {
if let Ok(ref crate_name) = get_crate_name_from_path(self.0) {
let path = dunce::canonicalize(std::path::Path::new(self.0))?;
return Ok(Dependency::new(crate_name).set_path(path));
}
}
bail!("Unable to obtain crate informations from `{}`.\n", self.0)
}
}
| is_github_url |
AppInstallErrorPage.tsx | import errorImg from "@assets/images/app-install-error.svg";
import { Button, Grid, Typography } from "@material-ui/core";
import Container from "@saleor/components/Container";
import React from "react";
import { FormattedMessage } from "react-intl";
import { useStyles } from "./styles";
interface AppInstallErrorPageProps {
onBack: () => void;
}
export const AppInstallErrorPage: React.FC<AppInstallErrorPageProps> = ({
onBack
}) => {
const classes = useStyles({});
return (
<Container className={classes.root}>
<Grid spacing={3} alignItems="center" container>
<Grid xs={12} sm={6} item>
<img src={errorImg} alt="" />
</Grid>
<Grid xs={12} sm={6} item>
<Typography variant="h3" component="h3">
<FormattedMessage
defaultMessage="There’s a problem with app."
description="title"
/>
</Typography>
<Typography variant="body2">
<FormattedMessage
defaultMessage="Saleor couldn’t fetch crucial information regarding installation. Without those System can’t install the app in your Saleor. Please use the button below to get back to system’s dashboard."
description="content"
/>
</Typography>
<Button
className={classes.button}
color="primary" | <FormattedMessage
defaultMessage="Back to homepage"
description="button"
/>
</Button>
</Grid>
</Grid>
</Container>
);
};
export default AppInstallErrorPage; | variant="contained"
onClick={onBack}
> |
test_content_download.py | # coding: utf-8
import io
import os
import shutil
import tempfile
import unittest
from edo_client import WoClient
class ContentApi_DownloadTestCase(unittest.TestCase):
'''
- Basically this is to ensure
all the facilities related to HTTP range headers are working properly;
'''
@classmethod
def setUpClass(cls):
cls.file_size = 10 * (2 ** 20)
cls.download_url = 'http://192.168.1.115/docker/unittest/10mb.test'
cls.api_url = 'https://httpbin.org/redirect-to?url={}'.format(
cls.download_url
)
cls.empty_file_url = 'http://192.168.1.115/docker/unittest/empty_file.bin'
# We're just testing some basic util functions,
# and don't want a real WoClient instance
cls.client = WoClient(
cls.api_url + '#',
'', '', '', '',
account='', instance=''
)
cls.tmpdir = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdir)
def test_01_get_download_url(self):
self.assertEqual(
self.client.content.get_download_url(uid=''),
self.download_url,
'Should be able to extract direct download URL from 302 redirect'
)
def test_11_download_to_stream_all(self):
'''测试:下载完整文件到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url
)
self.assertEqual(
self.file_size,
stream.tell(),
'Cursor should be at the end of stream after download'
)
stream.seek(0, os.SEEK_SET)
self.assertEqual(
self.file_size,
len(stream.read()),
'File length should be 10240 bytes'
)
def test_12_download_stream_first_byte(self):
'''测试:下载第一个字节到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=0, end=0,
)
self.assertEqual(1, stream.tell(), 'Download first byte of file')
def test_13_download_stream_head_part(self):
'''测试:从头下载一部分到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=0, end=(5 * (2 ** 20) - 1),
)
self.assertEqual(5 * (2 ** 20), stream.tell())
def test_14_download_stream_tail_part(self):
'''测试:从中间开始,下载文件后半部分到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=(5 * (2 ** 20)), end=None,
)
self.assertEqual(5 * (2 ** 20), stream.tell())
def test_15_download_partial(self):
'''测试:从中间开始,下载一部分到流'''
stream = io.BytesIO()
start, end = 1234, 54321
self.client.content.download_to_stream(
stream, url=self.download_url, start=start, end=end,
)
self.assertEqual(stream.tell(), end - start + 1)
def test_21_get_data_full_size(self):
'''测试:完整读取文件内容'''
self.assertEqual(
self.file_size,
len(self.client.content.get_data(url=self.download_url)),
'.get_data shoule be able to download the whole file by default',
)
def test_22_get_data_first_byte(self):
'''测试:读取文件第一个字节'''
self.assertEqual(
1,
len(self.client.content.get_data(url=self.downl | '.get_data should be able to download the 1st byte of given file',
)
def test_23_get_data_head_part(self):
'''测试:从头读取文件的一部分内容'''
size = 5432
self.assertEqual(
size,
len(self.client.content.get_data(url=self.download_url, size=size)), # noqa E501
'.get_data should download the first {} bytes'.format(size),
)
def test_24_get_data_tail_part(self):
'''测试:从中间开始,读取文件后半部分内容'''
start = 12345
size = self.file_size - start
self.assertEqual(
size,
len(self.client.content.get_data(
url=self.download_url,
offset=start, size=size
)),
'.get_data shoule download last {} bytes'.format(size),
)
def test_25_get_data_partial(self):
'''测试:从中间开始,读取文件一部分的内容'''
start = 23451
size = self.file_size - start
self.assertEqual(
size,
len(self.client.content.get_data(
url=self.download_url,
offset=start, size=size,
)),
'.get_data should download {} bytes starting from offset {}'.format(size, start), # noqa E501
)
def test_31_download_to_file(self):
'''测试:完整下载文件到本地'''
fd, fpath = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.client.content.download_to_file(destination=fpath, url=self.download_url)
self.assertEqual(self.file_size, os.stat(fpath).st_size)
def test_41_download_empty_file(self):
'''测试:下载空文件到本地'''
fd, fpath = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.client.content.download_to_file(destination=fpath, url=self.empty_file_url)
self.assertEqual(0, os.stat(fpath).st_size)
| oad_url, size=1)),
|
cleanprinteddocs.py | # -*- coding: utf-8 -*-
import os
import arrow
from django.core.management.base import BaseCommand
from printto.models import UploadedFileModel
class Command(BaseCommand):
help = 'Clean all printed docs after 3 minutes'
def handle(self, *args, **options):
| now_time = arrow.now()
now_time = now_time.shift(minutes=-3)
now_time = now_time.datetime
records = UploadedFileModel.objects.filter(datetime__lt=now_time)
for record in records:
try:
os.remove(record.file.path)
except:
pass
if records:
records.delete() |
|
value.rs | use chain_impl_mockchain::value;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::{fmt, str::FromStr};
/// Value in the blockchain, always printed as absolute Lovelace
///
/// Value has some property to be human readable on standard display
///
/// ```
/// # use jormungandr_lib::interfaces::Value;
/// # use chain_impl_mockchain::value::Value as StdValue;
///
/// let value: Value = StdValue(64).into();
///
/// println!("value: {}", value);
///
/// # assert_eq!(value.to_string(), "64");
/// ```
///
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Value(value::Value);
/* ---------------- Display ------------------------------------------------ */
impl fmt::Display for Value {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl FromStr for Value {
type Err = std::num::ParseIntError;
fn from_str(s: &str) -> Result<Self, Self::Err> |
}
/* ---------------- AsRef -------------------------------------------------- */
impl AsRef<value::Value> for Value {
fn as_ref(&self) -> &value::Value {
&self.0
}
}
/* ---------------- Conversion --------------------------------------------- */
impl From<value::Value> for Value {
fn from(v: value::Value) -> Self {
Value(v)
}
}
impl From<Value> for value::Value {
fn from(v: Value) -> Self {
v.0
}
}
impl From<u64> for Value {
fn from(v: u64) -> Self {
Value(value::Value(v))
}
}
/* ------------------- Serde ----------------------------------------------- */
impl Serialize for Value {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.0.as_ref().serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Value {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v = u64::deserialize(deserializer)?;
Ok(Value(value::Value(v)))
}
}
#[cfg(test)]
mod test {
use super::*;
use quickcheck::{Arbitrary, Gen, TestResult};
impl Arbitrary for Value {
fn arbitrary<G>(g: &mut G) -> Self
where
G: Gen,
{
Value(value::Value(u64::arbitrary(g)))
}
}
#[test]
fn value_display_as_u64() {
const VALUE: u64 = 928170;
let value = Value(value::Value(VALUE));
assert_eq!(value.to_string(), VALUE.to_string());
}
#[test]
fn value_serde_as_u64() {
const VALUE: u64 = 928170;
let value = Value(value::Value(VALUE));
assert_eq!(
serde_yaml::to_string(&value).unwrap(),
format!("---\n{}", VALUE)
);
}
quickcheck! {
fn value_display_parse(value: Value) -> TestResult {
let s = value.to_string();
let value_dec: Value = s.parse().unwrap();
TestResult::from_bool(value_dec == value)
}
fn value_serde_human_readable_encode_decode(value: Value) -> TestResult {
let s = serde_yaml::to_string(&value).unwrap();
let value_dec: Value = serde_yaml::from_str(&s).unwrap();
TestResult::from_bool(value_dec == value)
}
fn value_serde_binary_encode_decode(value: Value) -> TestResult {
let s = bincode::serialize(&value).unwrap();
let value_dec: Value = bincode::deserialize(&s).unwrap();
TestResult::from_bool(value_dec == value)
}
}
}
| {
s.parse().map(|v| Value(value::Value(v)))
} |
yahoo.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
import time
import lxml.etree
import randoms
import re
from searchengine import searchengine
class yahoo(searchengine):
"""Search resources from yahoo searchengine, include titles/urls."""
def __init__(self):
super(yahoo, self).__init__()
def yahoo_dork_search(self, dork, page=0, random_sleep=True):
"""Search dorks from yahoo pages"""
resources = []
indexs = range(page + 1)
for index in indexs:
req = requests.Session()
url = 'https://search.yahoo.com/search'
headers = {'User-Agent': 'Mozilla/5.0'}
pz = 10 # items num in per page
params = {'pz': pz, 'p': dork, 'b': pz * index + 1}
resp = req.get(url, params=params, headers=headers)
if resp.status_code != 200: # no available baidu pages
return {dork: resources}
html = lxml.etree.HTML(resp.text)
ols = html.xpath('//div[@id="main"]/div/div[@id="web"]/'
'ol[contains(@class, "searchCenterMiddle")]')
if not ols:
return {dork: resources} # no available baidu pages
for ol in ols:
as_ = ol.xpath('//h3[@class="title"]/a')
for a in as_:
title = "".join([_ for _ in a.itertext()])
href = a.get('href')
href = self.parse_yahoo_url(href)
data = [title, href]
resources.append(data)
# Avoid yahoo.com banning spider ip, sleep during 1...n (not 1, n)
if random_sleep and len(indexs) > 1 and index != indexs[-1]:
rt = randoms.rand_item_from_iters([_ for _ in range(1, 8)])
print("sleeping {} s to avoid yahoo...".format(rt))
time.sleep(int(rt))
return {dork: resources}
def parse_yahoo_url(self, url):
"""parse link from yahoo href"""
if '/RU=' in url: # parse
# regex = re.compile('/RU=([^\']+)/RK=0')
regex = re.compile('.*/RU=([^\']+)/RK=')
url = regex.findall(url)[0]
url = requests.utils.unquote(url)
return url
def demo_yahoo():
"""A demo test for yahoo class"""
yh = yahoo()
dork = 'site:google.com'
data = yh.yahoo_dork_search(dork, page=1)
for title, href in data[dork]:
print(title)
print(href)
print('\n-----------\n')
| demo_yahoo() | if __name__ == "__main__": |
tabela.component.ts | import { Component, OnInit, ViewChild } from '@angular/core';
import { MatPaginator } from '@angular/material/paginator';
import { MatSort } from '@angular/material/sort';
@Component({
selector: 'dio-tabela',
templateUrl: './tabela.component.html',
styleUrls: ['./tabela.component.scss']
}) | export class TabelaComponent implements OnInit {
@ViewChild(MatPaginator, { static: true }) paginator: MatPaginator;
@ViewChild(MatSort, { static: true }) sort: MatSort;
dataSource: any[] = [
{id: 1, name: 'teste'},
{id: 2, name: 'teste 2'}
];
/** Columns displayed in the table. Columns IDs can be added, removed, or reordered. */
displayedColumns = ['id', 'name'];
ngOnInit() {
console.log(this.paginator);
}
paginaTrocada(pagina: any) {
console.log(pagina);
}
} | |
listobject.rs | use crate::ffi::object::*;
use crate::ffi::pyport::Py_ssize_t;
use std::os::raw::c_int;
#[cfg_attr(windows, link(name = "pythonXY"))]
extern "C" {
#[cfg_attr(PyPy, link_name = "PyPyList_Type")]
pub static mut PyList_Type: PyTypeObject;
pub static mut PyListIter_Type: PyTypeObject;
pub static mut PyListRevIter_Type: PyTypeObject;
}
#[inline]
pub unsafe fn PyList_Check(op: *mut PyObject) -> c_int {
PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_LIST_SUBCLASS)
}
#[inline]
pub unsafe fn PyList_CheckExact(op: *mut PyObject) -> c_int {
(Py_TYPE(op) == &mut PyList_Type) as c_int
}
extern "C" {
#[cfg_attr(PyPy, link_name = "PyPyList_New")]
pub fn PyList_New(size: Py_ssize_t) -> *mut PyObject;
#[cfg_attr(PyPy, link_name = "PyPyList_Size")]
pub fn PyList_Size(arg1: *mut PyObject) -> Py_ssize_t;
#[cfg_attr(PyPy, link_name = "PyPyList_GetItem")]
pub fn PyList_GetItem(arg1: *mut PyObject, arg2: Py_ssize_t) -> *mut PyObject;
#[cfg_attr(PyPy, link_name = "PyPyList_SetItem")]
pub fn PyList_SetItem(arg1: *mut PyObject, arg2: Py_ssize_t, arg3: *mut PyObject) -> c_int;
#[cfg_attr(PyPy, link_name = "PyPyList_Insert")]
pub fn PyList_Insert(arg1: *mut PyObject, arg2: Py_ssize_t, arg3: *mut PyObject) -> c_int;
#[cfg_attr(PyPy, link_name = "PyPyList_Append")]
pub fn PyList_Append(arg1: *mut PyObject, arg2: *mut PyObject) -> c_int;
#[cfg_attr(PyPy, link_name = "PyPyList_GetSlice")]
pub fn PyList_GetSlice(
arg1: *mut PyObject,
arg2: Py_ssize_t,
arg3: Py_ssize_t,
) -> *mut PyObject;
#[cfg_attr(PyPy, link_name = "PyPyList_SetSlice")]
pub fn PyList_SetSlice(
arg1: *mut PyObject,
arg2: Py_ssize_t,
arg3: Py_ssize_t,
arg4: *mut PyObject,
) -> c_int;
#[cfg_attr(PyPy, link_name = "PyPyList_Sort")]
pub fn PyList_Sort(arg1: *mut PyObject) -> c_int;
#[cfg_attr(PyPy, link_name = "PyPyList_Reverse")]
pub fn PyList_Reverse(arg1: *mut PyObject) -> c_int;
#[cfg_attr(PyPy, link_name = "PyPyList_AsTuple")]
pub fn PyList_AsTuple(arg1: *mut PyObject) -> *mut PyObject; | #[cfg_attr(PyPy, link_name = "PyPyList_GET_ITEM")]
pub fn PyList_GET_ITEM(arg1: *mut PyObject, arg2: Py_ssize_t) -> *mut PyObject;
#[cfg(PyPy)]
#[cfg_attr(PyPy, link_name = "PyPyList_GET_SIZE")]
pub fn PyList_GET_SIZE(arg1: *mut PyObject) -> Py_ssize_t;
#[cfg(PyPy)]
#[cfg_attr(PyPy, link_name = "PyPyList_SET_ITEM")]
pub fn PyList_SET_ITEM(arg1: *mut PyObject, arg2: Py_ssize_t, arg3: *mut PyObject);
} |
// CPython macros exported as functions on PyPy
#[cfg(PyPy)] |
helpers.js | /* jshint esversion: 6,-W097, -W040, node: true, expr: true, undef: true */
module.exports= {
/* HTML PART */
toID: function(c,f){ return (c+( f ? "/"+this.toComponentName(f) : "" )).replace(/[^\wěščřžýáíéúůĚŠČŘŽÝÁÍÉÚŮťŤňŇ]/g, "-").toLowerCase(); },
toFileName: file=> file.substring(file.lastIndexOf("/")+1),
toComponentName: function(file, c){
const fn= this.toFileName(file), _fn= fn.substring(0, fn.indexOf("."));
if(_fn==="inc") return fn.substring(fn.indexOf(".")+1, fn.lastIndexOf("."));
return _fn==="index"&&c ? c : _fn;
},
handleLinks: function(text){
return text.replace(/\{@link ([^\}]*)\}/g, (_, id)=> `[${this.toFileName(id)}](#nav_${this.toID(id)})`);
},
isDynamicHTML: (params, depends)=> params||depends ? "dynamické" : "statické",
/* API PART */
isRequest: function(file){
if(/inc\./g.test(file)) return "pomocný script";
const file_name= this.toFileName(file);
const action= `action="${file_name.substring(0, file_name.lastIndexOf("."))}"`;
return `veřejně přístupné přes **POST** jako \`${action}\`.`;
},
isDynamicAPI: (params, depends)=> depends ? "dynamické" : "statické",
typedArray: function([ _type, description ]){
const type= /\[\]$/.test(_type) ? `\`Array.<${_type.replace("[]", "")}>\``: `\`${_type}\``;
return `${type} | ${this.joinLines(description, "<br> ")}`;
},
defaultParams: function(template_main, template, type){
if(!type) return "";
const def= {
user: {
hash: [ "`String`", "Identifikátor uživatele" ]
}
};
const def_type= def[type];
const text= type==="function" ? "použít jako argumenty funkce" : ( type==="namespace" ? "dospupné veřejné proměnné/funkce/… v tomto scriptu" : "použít jako **POST** klíče" );
return this.partial(template_main, text) + ( def_type ? this.partial(template, def_type, "loop", "\n")+"\n" : "" );
},
defaultReturns: function(ret, template){
if(!ret||!Object.keys(ret).length) return "";
return this.partial(template, { success: [ "`Number`", "Vrací `0` (neúspěch), `1` (**úspěch**), `2` (neúspěch s hláškou)." ] }, "loop", "\n")+"\n";
},
/* BOTH */
toStructure: function(id_synamic_name, data){
const toCompare= o=> (o.category+"/000"+this.toComponentName(o.file)).toLowerCase();
const map= data.sort((a, b)=> structureSort(toCompare(a), toCompare(b)));
let out= "", deep= [], deep_l= 0, indent= "", description= "", target= "";
for(let i=0, map_i, map_i_file, map_i_link, map_i_arr, map_i_arr_l, map_i_arr_last;(map_i= map[i]); i++){
[ "category", "description" ].filter(hasNotKey(map_i)).forEach(throwError(map_i.file));
map_i_file= this.toComponentName(map_i.file);
map_i_arr= map_i.category.split("/");
if(map_i_file!=="index") map_i_arr.push(map_i_file);
map_i_arr_l= map_i_arr.length;
map_i_arr_last= map_i_arr_l-1;
map_i_link= this.toID(map_i.category+"/"+map_i_file);
if(deep_l>map_i_arr_l){
deep.length= map_i_arr_l;
deep_l= deep.length;
}
for(let j=deep_l-1, map_ij;(map_ij= map_i_arr[j]); j--){
if(deep[j]!==map_ij) deep.pop();
}
deep_l= deep.length;
for(let j=deep_l, map_ij;(map_ij= map_i_arr[j]); j++){
deep.push(map_ij);
indent= " ".repeat(j);
target= `<a name="nav_${this.toID(map_i.category)}"></a>`;
if(j===map_i_arr_last){
description= ` *(${this[id_synamic_name](map_i.params,map_i.depends)})*\n\n${indent} `+this.handleLinks(this.joinLines(map_i.description, " "));
out+= `${indent}- **[${map_ij}](#nav_${map_i_link})**${map_i_file==="index" ? target : ""}${description}\n\n`;
} else {
out+= `${indent}- **${map_ij}** ${target}\n\n`;
}
}
deep_l= deep.length;
}
return out;
},
};
function structureSort(ca, cb){
const [ ia, ib ]= [ ca, cb ].map(c=> c.indexOf("/"));
const main= cb.substring(0, ib).localeCompare(ca.substring(0, ia));
if(main) return main;
return ca.substring(ia).localeCompare(cb.substring(ib));
}
function hasNotKey(o){ return k=> !Reflect.has(o, k); }
function throwError(o){ return k=> { throw new Error(`Chybí klíč '${k} | u '${o.file}'!`); }; } | ' v soubor |
ErrorObservable.js | "use strict";
var __extends = (this && this.__extends) || function (d, b) {
for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p];
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
var Observable_1 = require('../Observable');
/**
* We need this JSDoc comment for affecting ESDoc.
* @extends {Ignored}
* @hide true
*/
var ErrorObservable = (function (_super) {
__extends(ErrorObservable, _super);
function | (error, scheduler) {
_super.call(this);
this.error = error;
this.scheduler = scheduler;
}
/**
* Creates an Observable that emits no items to the Observer and immediately
* emits an error notification.
*
* <span class="informal">Just emits 'error', and nothing else.
* </span>
*
* <img src="./img/throw.png" width="100%">
*
* This static operator is useful for creating a simple Observable that only
* emits the error notification. It can be used for composing with other
* Observables, such as in a {@link mergeMap}.
*
* @example <caption>Emit the number 7, then emit an error.</caption>
* var result = Rx.Observable.throw(new Error('oops!')).startWith(7);
* result.subscribe(x => console.log(x), e => console.error(e));
*
* @example <caption>Map and flatten numbers to the sequence 'a', 'b', 'c', but throw an error for 13</caption>
* var interval = Rx.Observable.interval(1000);
* var result = interval.mergeMap(x =>
* x === 13 ?
* Rx.Observable.throw('Thirteens are bad') :
* Rx.Observable.of('a', 'b', 'c')
* );
* result.subscribe(x => console.log(x), e => console.error(e));
*
* @see {@link create}
* @see {@link empty}
* @see {@link never}
* @see {@link of}
*
* @param {any} error The particular Error to pass to the error notification.
* @param {Scheduler} [scheduler] A {@link IScheduler} to use for scheduling
* the emission of the error notification.
* @return {Observable} An error Observable: emits only the error notification
* using the given error argument.
* @static true
* @name throw
* @owner Observable
*/
ErrorObservable.create = function (error, scheduler) {
return new ErrorObservable(error, scheduler);
};
ErrorObservable.dispatch = function (arg) {
var error = arg.error, subscriber = arg.subscriber;
subscriber.error(error);
};
ErrorObservable.prototype._subscribe = function (subscriber) {
var error = this.error;
var scheduler = this.scheduler;
if (scheduler) {
return scheduler.schedule(ErrorObservable.dispatch, 0, {
error: error, subscriber: subscriber
});
}
else {
subscriber.error(error);
}
};
return ErrorObservable;
}(Observable_1.Observable));
exports.ErrorObservable = ErrorObservable;
//# sourceMappingURL=ErrorObservable.js.map | ErrorObservable |
eds_test.go | /*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edsbalancer
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"testing"
"github.com/golang/protobuf/jsonpb"
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
"github.com/google/go-cmp/cmp"
"google.golang.org/grpc"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpctest"
scpb "google.golang.org/grpc/internal/proto/grpc_service_config"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
"google.golang.org/grpc/xds/internal/balancer/lrs"
xdsclient "google.golang.org/grpc/xds/internal/client"
"google.golang.org/grpc/xds/internal/client/bootstrap"
"google.golang.org/grpc/xds/internal/testutils"
"google.golang.org/grpc/xds/internal/testutils/fakeclient"
)
func init() {
balancer.Register(&edsBalancerBuilder{})
bootstrapConfigNew = func() (*bootstrap.Config, error) {
return &bootstrap.Config{
BalancerName: testBalancerNameFooBar,
Creds: grpc.WithInsecure(),
NodeProto: testutils.EmptyNodeProtoV2,
}, nil
}
}
func subConnFromPicker(p balancer.Picker) func() balancer.SubConn {
return func() balancer.SubConn {
scst, _ := p.Pick(balancer.PickInfo{})
return scst.SubConn
}
}
type s struct {
grpctest.Tester
}
func Test(t *testing.T) {
grpctest.RunSubTests(t, s{})
}
const testBalancerNameFooBar = "foo.bar"
func newNoopTestClientConn() *noopTestClientConn {
return &noopTestClientConn{}
}
// noopTestClientConn is used in EDS balancer config update tests that only
// cover the config update handling, but not SubConn/load-balancing.
type noopTestClientConn struct {
balancer.ClientConn
}
func (t *noopTestClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
return nil, nil
}
func (noopTestClientConn) Target() string { return testServiceName }
type scStateChange struct {
sc balancer.SubConn
state connectivity.State
}
type fakeEDSBalancer struct {
cc balancer.ClientConn
childPolicy *testutils.Channel
subconnStateChange *testutils.Channel
edsUpdate *testutils.Channel
loadStore lrs.Store
}
func (f *fakeEDSBalancer) handleSubConnStateChange(sc balancer.SubConn, state connectivity.State) {
f.subconnStateChange.Send(&scStateChange{sc: sc, state: state})
}
func (f *fakeEDSBalancer) handleChildPolicy(name string, config json.RawMessage) {
f.childPolicy.Send(&loadBalancingConfig{Name: name, Config: config})
}
func (f *fakeEDSBalancer) handleEDSResponse(edsResp xdsclient.EndpointsUpdate) {
f.edsUpdate.Send(edsResp)
}
func (f *fakeEDSBalancer) updateState(priority priorityType, s balancer.State) {}
func (f *fakeEDSBalancer) close() {}
func (f *fakeEDSBalancer) waitForChildPolicy(wantPolicy *loadBalancingConfig) error {
val, err := f.childPolicy.Receive()
if err != nil {
return fmt.Errorf("error waiting for childPolicy: %v", err)
}
gotPolicy := val.(*loadBalancingConfig)
if !cmp.Equal(gotPolicy, wantPolicy) {
return fmt.Errorf("got childPolicy %v, want %v", gotPolicy, wantPolicy)
}
return nil
}
func (f *fakeEDSBalancer) waitForSubConnStateChange(wantState *scStateChange) error {
val, err := f.subconnStateChange.Receive()
if err != nil {
return fmt.Errorf("error waiting for subconnStateChange: %v", err)
}
gotState := val.(*scStateChange)
if !cmp.Equal(gotState, wantState, cmp.AllowUnexported(scStateChange{})) {
return fmt.Errorf("got subconnStateChange %v, want %v", gotState, wantState)
}
return nil
}
func (f *fakeEDSBalancer) waitForEDSResponse(wantUpdate xdsclient.EndpointsUpdate) error {
val, err := f.edsUpdate.Receive()
if err != nil {
return fmt.Errorf("error waiting for edsUpdate: %v", err)
}
gotUpdate := val.(xdsclient.EndpointsUpdate)
if !reflect.DeepEqual(gotUpdate, wantUpdate) {
return fmt.Errorf("got edsUpdate %+v, want %+v", gotUpdate, wantUpdate)
}
return nil
}
func newFakeEDSBalancer(cc balancer.ClientConn, loadStore lrs.Store) edsBalancerImplInterface {
return &fakeEDSBalancer{
cc: cc,
childPolicy: testutils.NewChannelWithSize(10),
subconnStateChange: testutils.NewChannelWithSize(10),
edsUpdate: testutils.NewChannelWithSize(10),
loadStore: loadStore,
}
}
type fakeSubConn struct{}
func (*fakeSubConn) UpdateAddresses([]resolver.Address) { panic("implement me") }
func (*fakeSubConn) Connect() { panic("implement me") }
// waitForNewXDSClientWithEDSWatch makes sure that a new xdsClient is created
// with the provided name. It also make sure that the newly created client
// registers an eds watcher.
func waitForNewXDSClientWithEDSWatch(t *testing.T, ch *testutils.Channel, wantName string) *fakeclient.Client {
t.Helper()
val, err := ch.Receive()
if err != nil {
t.Fatalf("error when waiting for a new xds client: %v", err)
return nil
}
xdsC := val.(*fakeclient.Client)
if xdsC.Name() != wantName {
t.Fatalf("xdsClient created to balancer: %v, want %v", xdsC.Name(), wantName)
return nil
}
_, err = xdsC.WaitForWatchEDS()
if err != nil {
t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err)
return nil
}
return xdsC
}
// waitForNewEDSLB makes sure that a new edsLB is created by the top-level
// edsBalancer.
func waitForNewEDSLB(t *testing.T, ch *testutils.Channel) *fakeEDSBalancer {
t.Helper()
val, err := ch.Receive()
if err != nil {
t.Fatalf("error when waiting for a new edsLB: %v", err)
return nil
}
return val.(*fakeEDSBalancer)
}
// setup overrides the functions which are used to create the xdsClient and the
// edsLB, creates fake version of them and makes them available on the provided
// channels. The returned cancel function should be called by the test for
// cleanup.
func setup(edsLBCh *testutils.Channel, xdsClientCh *testutils.Channel) func() {
origNewEDSBalancer := newEDSBalancer
newEDSBalancer = func(cc balancer.ClientConn, enqueue func(priorityType, balancer.State), loadStore lrs.Store, logger *grpclog.PrefixLogger) edsBalancerImplInterface {
edsLB := newFakeEDSBalancer(cc, loadStore)
defer func() { edsLBCh.Send(edsLB) }()
return edsLB
}
origXdsClientNew := xdsclientNew
xdsclientNew = func(opts xdsclient.Options) (xdsClientInterface, error) {
xdsC := fakeclient.NewClientWithName(opts.Config.BalancerName)
defer func() { xdsClientCh.Send(xdsC) }()
return xdsC, nil
}
return func() {
newEDSBalancer = origNewEDSBalancer
xdsclientNew = origXdsClientNew
}
}
// TestXDSConfigBalancerNameUpdate verifies different scenarios where the
// balancer name in the lbConfig is updated.
//
// The test does the following:
// * Builds a new xds balancer.
// * Repeatedly pushes new ClientConnState which specifies different
// balancerName in the lbConfig. We expect xdsClient objects to created
// whenever the balancerName changes.
func (s) TestXDSConfigBalancerNameUpdate(t *testing.T) {
oldBootstrapConfigNew := bootstrapConfigNew
bootstrapConfigNew = func() (*bootstrap.Config, error) {
// Return an error from bootstrap, so the eds balancer will use
// BalancerName from the config.
//
// TODO: remove this when deleting BalancerName from config.
return nil, fmt.Errorf("no bootstrap available")
}
defer func() { bootstrapConfigNew = oldBootstrapConfigNew }()
edsLBCh := testutils.NewChannel()
xdsClientCh := testutils.NewChannel()
cancel := setup(edsLBCh, xdsClientCh)
defer cancel()
builder := balancer.Get(edsName)
cc := newNoopTestClientConn()
edsB, ok := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSClusterName}}).(*edsBalancer)
if !ok {
t.Fatalf("builder.Build(%s) returned type {%T}, want {*edsBalancer}", edsName, edsB)
}
defer edsB.Close()
addrs := []resolver.Address{{Addr: "1.1.1.1:10001"}, {Addr: "2.2.2.2:10002"}, {Addr: "3.3.3.3:10003"}}
for i := 0; i < 2; i++ {
balancerName := fmt.Sprintf("balancer-%d", i)
edsB.UpdateClientConnState(balancer.ClientConnState{
ResolverState: resolver.State{Addresses: addrs},
BalancerConfig: &EDSConfig{
BalancerName: balancerName,
EDSServiceName: testEDSClusterName,
},
})
xdsC := waitForNewXDSClientWithEDSWatch(t, xdsClientCh, balancerName)
xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil)
}
}
const (
fakeBalancerA = "fake_balancer_A"
fakeBalancerB = "fake_balancer_B"
)
// Install two fake balancers for service config update tests.
//
// ParseConfig only accepts the json if the balancer specified is registered.
func init() |
type fakeBalancerBuilder struct {
name string
}
func (b *fakeBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
return &fakeBalancer{cc: cc}
}
func (b *fakeBalancerBuilder) Name() string {
return b.name
}
type fakeBalancer struct {
cc balancer.ClientConn
}
func (b *fakeBalancer) ResolverError(error) {
panic("implement me")
}
func (b *fakeBalancer) UpdateClientConnState(balancer.ClientConnState) error {
panic("implement me")
}
func (b *fakeBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {
panic("implement me")
}
func (b *fakeBalancer) Close() {}
// TestXDSConnfigChildPolicyUpdate verifies scenarios where the childPolicy
// section of the lbConfig is updated.
//
// The test does the following:
// * Builds a new xds balancer.
// * Pushes a new ClientConnState with a childPolicy set to fakeBalancerA.
// Verifies that a new xdsClient is created. It then pushes a new edsUpdate
// through the fakexds client. Verifies that a new edsLB is created and it
// receives the expected childPolicy.
// * Pushes a new ClientConnState with a childPolicy set to fakeBalancerB.
// This time around, we expect no new xdsClient or edsLB to be created.
// Instead, we expect the existing edsLB to receive the new child policy.
func (s) TestXDSConnfigChildPolicyUpdate(t *testing.T) {
edsLBCh := testutils.NewChannel()
xdsClientCh := testutils.NewChannel()
cancel := setup(edsLBCh, xdsClientCh)
defer cancel()
builder := balancer.Get(edsName)
cc := newNoopTestClientConn()
edsB, ok := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}).(*edsBalancer)
if !ok {
t.Fatalf("builder.Build(%s) returned type {%T}, want {*edsBalancer}", edsName, edsB)
}
defer edsB.Close()
edsB.UpdateClientConnState(balancer.ClientConnState{
BalancerConfig: &EDSConfig{
BalancerName: testBalancerNameFooBar,
ChildPolicy: &loadBalancingConfig{
Name: fakeBalancerA,
Config: json.RawMessage("{}"),
},
EDSServiceName: testEDSClusterName,
},
})
xdsC := waitForNewXDSClientWithEDSWatch(t, xdsClientCh, testBalancerNameFooBar)
xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil)
edsLB := waitForNewEDSLB(t, edsLBCh)
edsLB.waitForChildPolicy(&loadBalancingConfig{
Name: string(fakeBalancerA),
Config: json.RawMessage(`{}`),
})
edsB.UpdateClientConnState(balancer.ClientConnState{
BalancerConfig: &EDSConfig{
BalancerName: testBalancerNameFooBar,
ChildPolicy: &loadBalancingConfig{
Name: fakeBalancerB,
Config: json.RawMessage("{}"),
},
EDSServiceName: testEDSClusterName,
},
})
edsLB.waitForChildPolicy(&loadBalancingConfig{
Name: string(fakeBalancerA),
Config: json.RawMessage(`{}`),
})
}
// TestXDSSubConnStateChange verifies if the top-level edsBalancer passes on
// the subConnStateChange to appropriate child balancers.
func (s) TestXDSSubConnStateChange(t *testing.T) {
edsLBCh := testutils.NewChannel()
xdsClientCh := testutils.NewChannel()
cancel := setup(edsLBCh, xdsClientCh)
defer cancel()
builder := balancer.Get(edsName)
cc := newNoopTestClientConn()
edsB, ok := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSClusterName}}).(*edsBalancer)
if !ok {
t.Fatalf("builder.Build(%s) returned type {%T}, want {*edsBalancer}", edsName, edsB)
}
defer edsB.Close()
addrs := []resolver.Address{{Addr: "1.1.1.1:10001"}, {Addr: "2.2.2.2:10002"}, {Addr: "3.3.3.3:10003"}}
edsB.UpdateClientConnState(balancer.ClientConnState{
ResolverState: resolver.State{Addresses: addrs},
BalancerConfig: &EDSConfig{
BalancerName: testBalancerNameFooBar,
EDSServiceName: testEDSClusterName,
},
})
xdsC := waitForNewXDSClientWithEDSWatch(t, xdsClientCh, testBalancerNameFooBar)
xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil)
edsLB := waitForNewEDSLB(t, edsLBCh)
fsc := &fakeSubConn{}
state := connectivity.Ready
edsB.UpdateSubConnState(fsc, balancer.SubConnState{ConnectivityState: state})
edsLB.waitForSubConnStateChange(&scStateChange{sc: fsc, state: state})
}
// TestErrorFromXDSClientUpdate verifies that errros from xdsclient update are
// handled correctly.
//
// If it's resource-not-found, watch will NOT be canceled, the EDS impl will
// receive an empty EDS update, and new RPCs will fail.
//
// If it's connection error, nothing will happen. This will need to change to
// handle fallback.
func (s) TestErrorFromXDSClientUpdate(t *testing.T) {
edsLBCh := testutils.NewChannel()
xdsClientCh := testutils.NewChannel()
cancel := setup(edsLBCh, xdsClientCh)
defer cancel()
builder := balancer.Get(edsName)
cc := newNoopTestClientConn()
edsB, ok := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSClusterName}}).(*edsBalancer)
if !ok {
t.Fatalf("builder.Build(%s) returned type {%T}, want {*edsBalancer}", edsName, edsB)
}
defer edsB.Close()
edsB.UpdateClientConnState(balancer.ClientConnState{
BalancerConfig: &EDSConfig{
BalancerName: testBalancerNameFooBar,
EDSServiceName: testEDSClusterName,
},
})
xdsC := waitForNewXDSClientWithEDSWatch(t, xdsClientCh, testBalancerNameFooBar)
xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil)
edsLB := waitForNewEDSLB(t, edsLBCh)
if err := edsLB.waitForEDSResponse(xdsclient.EndpointsUpdate{}); err != nil {
t.Fatalf("EDS impl got unexpected EDS response: %v", err)
}
connectionErr := xdsclient.NewErrorf(xdsclient.ErrorTypeConnection, "connection error")
xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, connectionErr)
if err := xdsC.WaitForCancelEDSWatch(); err == nil {
t.Fatal("watch was canceled, want not canceled (timeout error)")
}
if err := edsLB.waitForEDSResponse(xdsclient.EndpointsUpdate{}); err == nil {
t.Fatal("eds impl got EDS resp, want timeout error")
}
resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "edsBalancer resource not found error")
xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, resourceErr)
// Even if error is resource not found, watch shouldn't be canceled, because
// this is an EDS resource removed (and xds client actually never sends this
// error, but we still handles it).
if err := xdsC.WaitForCancelEDSWatch(); err == nil {
t.Fatal("watch was canceled, want not canceled (timeout error)")
}
if err := edsLB.waitForEDSResponse(xdsclient.EndpointsUpdate{}); err != nil {
t.Fatalf("eds impl expecting empty update, got %v", err)
}
}
// TestErrorFromResolver verifies that resolver errors are handled correctly.
//
// If it's resource-not-found, watch will be canceled, the EDS impl will receive
// an empty EDS update, and new RPCs will fail.
//
// If it's connection error, nothing will happen. This will need to change to
// handle fallback.
func (s) TestErrorFromResolver(t *testing.T) {
edsLBCh := testutils.NewChannel()
xdsClientCh := testutils.NewChannel()
cancel := setup(edsLBCh, xdsClientCh)
defer cancel()
builder := balancer.Get(edsName)
cc := newNoopTestClientConn()
edsB, ok := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSClusterName}}).(*edsBalancer)
if !ok {
t.Fatalf("builder.Build(%s) returned type {%T}, want {*edsBalancer}", edsName, edsB)
}
defer edsB.Close()
edsB.UpdateClientConnState(balancer.ClientConnState{
BalancerConfig: &EDSConfig{
BalancerName: testBalancerNameFooBar,
EDSServiceName: testEDSClusterName,
},
})
xdsC := waitForNewXDSClientWithEDSWatch(t, xdsClientCh, testBalancerNameFooBar)
xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil)
edsLB := waitForNewEDSLB(t, edsLBCh)
if err := edsLB.waitForEDSResponse(xdsclient.EndpointsUpdate{}); err != nil {
t.Fatalf("EDS impl got unexpected EDS response: %v", err)
}
connectionErr := xdsclient.NewErrorf(xdsclient.ErrorTypeConnection, "connection error")
edsB.ResolverError(connectionErr)
if err := xdsC.WaitForCancelEDSWatch(); err == nil {
t.Fatal("watch was canceled, want not canceled (timeout error)")
}
if err := edsLB.waitForEDSResponse(xdsclient.EndpointsUpdate{}); err == nil {
t.Fatal("eds impl got EDS resp, want timeout error")
}
resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "edsBalancer resource not found error")
edsB.ResolverError(resourceErr)
if err := xdsC.WaitForCancelEDSWatch(); err != nil {
t.Fatalf("want watch to be canceled, waitForCancel failed: %v", err)
}
if err := edsLB.waitForEDSResponse(xdsclient.EndpointsUpdate{}); err != nil {
t.Fatalf("EDS impl got unexpected EDS response: %v", err)
}
}
func (s) TestXDSBalancerConfigParsing(t *testing.T) {
const testEDSName = "eds.service"
var testLRSName = "lrs.server"
b := bytes.NewBuffer(nil)
if err := (&jsonpb.Marshaler{}).Marshal(b, &scpb.XdsConfig{
ChildPolicy: []*scpb.LoadBalancingConfig{
{Policy: &scpb.LoadBalancingConfig_Xds{}},
{Policy: &scpb.LoadBalancingConfig_RoundRobin{
RoundRobin: &scpb.RoundRobinConfig{},
}},
},
FallbackPolicy: []*scpb.LoadBalancingConfig{
{Policy: &scpb.LoadBalancingConfig_Xds{}},
{Policy: &scpb.LoadBalancingConfig_PickFirst{
PickFirst: &scpb.PickFirstConfig{},
}},
},
EdsServiceName: testEDSName,
LrsLoadReportingServerName: &wrapperspb.StringValue{Value: testLRSName},
}); err != nil {
t.Fatalf("%v", err)
}
tests := []struct {
name string
js json.RawMessage
want serviceconfig.LoadBalancingConfig
wantErr bool
}{
{
name: "jsonpb-generated",
js: b.Bytes(),
want: &EDSConfig{
ChildPolicy: &loadBalancingConfig{
Name: "round_robin",
Config: json.RawMessage("{}"),
},
FallBackPolicy: &loadBalancingConfig{
Name: "pick_first",
Config: json.RawMessage("{}"),
},
EDSServiceName: testEDSName,
LrsLoadReportingServerName: &testLRSName,
},
wantErr: false,
},
{
// json with random balancers, and the first is not registered.
name: "manually-generated",
js: json.RawMessage(`
{
"balancerName": "fake.foo.bar",
"childPolicy": [
{"fake_balancer_C": {}},
{"fake_balancer_A": {}},
{"fake_balancer_B": {}}
],
"fallbackPolicy": [
{"fake_balancer_C": {}},
{"fake_balancer_B": {}},
{"fake_balancer_A": {}}
],
"edsServiceName": "eds.service",
"lrsLoadReportingServerName": "lrs.server"
}`),
want: &EDSConfig{
BalancerName: "fake.foo.bar",
ChildPolicy: &loadBalancingConfig{
Name: "fake_balancer_A",
Config: json.RawMessage("{}"),
},
FallBackPolicy: &loadBalancingConfig{
Name: "fake_balancer_B",
Config: json.RawMessage("{}"),
},
EDSServiceName: testEDSName,
LrsLoadReportingServerName: &testLRSName,
},
wantErr: false,
},
{
// json with no lrs server name, LrsLoadReportingServerName should
// be nil (not an empty string).
name: "no-lrs-server-name",
js: json.RawMessage(`
{
"balancerName": "fake.foo.bar",
"edsServiceName": "eds.service"
}`),
want: &EDSConfig{
BalancerName: "fake.foo.bar",
EDSServiceName: testEDSName,
LrsLoadReportingServerName: nil,
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
b := &edsBalancerBuilder{}
got, err := b.ParseConfig(tt.js)
if (err != nil) != tt.wantErr {
t.Errorf("edsBalancerBuilder.ParseConfig() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !cmp.Equal(got, tt.want) {
t.Errorf(cmp.Diff(got, tt.want))
}
})
}
}
func (s) TestLoadbalancingConfigParsing(t *testing.T) {
tests := []struct {
name string
s string
want *EDSConfig
}{
{
name: "empty",
s: "{}",
want: &EDSConfig{},
},
{
name: "success1",
s: `{"childPolicy":[{"pick_first":{}}]}`,
want: &EDSConfig{
ChildPolicy: &loadBalancingConfig{
Name: "pick_first",
Config: json.RawMessage(`{}`),
},
},
},
{
name: "success2",
s: `{"childPolicy":[{"round_robin":{}},{"pick_first":{}}]}`,
want: &EDSConfig{
ChildPolicy: &loadBalancingConfig{
Name: "round_robin",
Config: json.RawMessage(`{}`),
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var cfg EDSConfig
if err := json.Unmarshal([]byte(tt.s), &cfg); err != nil || !cmp.Equal(&cfg, tt.want) {
t.Errorf("test name: %s, parseFullServiceConfig() = %+v, err: %v, want %+v, <nil>", tt.name, cfg, err, tt.want)
}
})
}
}
func (s) TestEqualStringPointers(t *testing.T) {
var (
ta1 = "test-a"
ta2 = "test-a"
tb = "test-b"
)
tests := []struct {
name string
a *string
b *string
want bool
}{
{"both-nil", nil, nil, true},
{"a-non-nil", &ta1, nil, false},
{"b-non-nil", nil, &tb, false},
{"equal", &ta1, &ta2, true},
{"different", &ta1, &tb, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := equalStringPointers(tt.a, tt.b); got != tt.want {
t.Errorf("equalStringPointers() = %v, want %v", got, tt.want)
}
})
}
}
| {
balancer.Register(&fakeBalancerBuilder{name: fakeBalancerA})
balancer.Register(&fakeBalancerBuilder{name: fakeBalancerB})
} |
expand.rs | use crate::parser::Parser;
use crate::token::Token;
impl<'a> Parser<'a> {
pub fn lex_expanded_token(&mut self) -> Option<Token> {
if self.is_conditional_head() {
// Handle conditionals, like \ifnum
self.expand_conditional();
return self.lex_expanded_token();
} else if self.is_print_head() {
// Handle printing, like \number\count1
let replacement = self.expand_print();
self.add_upcoming_tokens(replacement);
return self.lex_expanded_token();
}
match self.lex_unexpanded_token() {
None => None,
Some(token) => {
// Handle macro expansion
if let Some(makro) = self.state.get_macro(&token) {
let replacement_map = self.parse_replacement_map(&makro);
let replacement = makro.get_replacement(&replacement_map);
self.add_upcoming_tokens(replacement);
self.lex_expanded_token()
} else {
// Passthrough anything else
Some(token)
}
}
}
}
pub fn peek_expanded_token(&mut self) -> Option<Token> {
match self.lex_expanded_token() {
Some(token) => {
self.add_upcoming_token(token.clone());
Some(token)
}
None => None,
}
}
pub fn lex_unexpanded_token(&mut self) -> Option<Token> {
if self.upcoming_tokens.is_empty() {
self.lexer.lex_token()
} else {
self.upcoming_tokens.pop()
}
}
pub fn peek_unexpanded_token(&mut self) -> Option<Token> {
match self.lex_unexpanded_token() {
Some(token) => {
self.add_upcoming_token(token.clone());
Some(token)
}
None => None,
}
}
// Sometimes, we need to undo the lexing of a token. This function accepts
// a token that we want to lex next. This undoing happens in a few places:
// * When we're peeking at tokens (e.g. when we're handling <optional
// spaces> and we want to check if the next token is a space)
// * When we expand something, so we want the next lexed tokens to be the
// expanded result
// * When we're following the instructions to "insert the token <tok> into
// the input", like we do when seeing vertical mode material in
// horizontal mode.
//
// Note: Use this function sparingly outside of this file! For efficiency's
// sake, we should try to peek tokens instead of manually parsing and
// un-parsing them.
pub fn add_upcoming_token(&mut self, token: Token) {
self.upcoming_tokens.push(token);
}
// Adds multiple tokens with add_upcoming_token(). We add the tokens in
// reverse so that the first token in the list gets parsed next first.
// Note: Use this function sparingly! For efficiency's sake, we should try
// only peek one token ahead when we can.
pub fn add_upcoming_tokens(&mut self, tokens: Vec<Token>) {
for token in tokens.into_iter().rev() {
self.add_upcoming_token(token);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::rc::Rc;
use crate::category::Category;
use crate::makro::{Macro, MacroListElem};
use crate::testing::with_parser;
#[test]
fn it_lexes_tokens() {
with_parser(&["a%"], |parser| {
assert_eq!(
parser.lex_unexpanded_token(),
Some(Token::Char('a', Category::Letter))
);
});
}
#[test]
fn it_peeks_tokens() {
with_parser(&["a%"], |parser| {
assert_eq!(
parser.peek_unexpanded_token(),
Some(Token::Char('a', Category::Letter))
);
assert_eq!(
parser.lex_unexpanded_token(),
Some(Token::Char('a', Category::Letter))
);
});
}
#[test]
fn it_expands_macros() {
with_parser(&["\\a{ab}%"], |parser| {
parser.state.set_macro(
false,
&Token::ControlSequence("a".to_string()),
&Rc::new(Macro::new(
vec![MacroListElem::Parameter(1)],
vec![
MacroListElem::Token(Token::Char(
'x',
Category::Letter,
)),
MacroListElem::Parameter(1),
MacroListElem::Parameter(1),
],
)),
);
assert_eq!(
parser.lex_expanded_token(),
Some(Token::Char('x', Category::Letter))
);
assert_eq!(
parser.lex_expanded_token(),
Some(Token::Char('a', Category::Letter))
);
assert_eq!(
parser.lex_expanded_token(),
Some(Token::Char('b', Category::Letter))
);
assert_eq!(
parser.lex_expanded_token(),
Some(Token::Char('a', Category::Letter))
);
assert_eq!(
parser.lex_expanded_token(),
Some(Token::Char('b', Category::Letter))
);
});
}
#[test]
fn it_expands_conditionals() {
with_parser(&["\\iftrue x\\else y\\fi%"], |parser| {
assert_eq!(
parser.lex_expanded_token(),
Some(Token::Char('x', Category::Letter))
);
assert_eq!(parser.lex_expanded_token(), None,);
});
}
#[test]
fn | () {
with_parser(&["\\a b%"], |parser| {
parser.state.set_macro(
false,
&Token::ControlSequence("a".to_string()),
&Rc::new(Macro::new(
vec![],
vec![MacroListElem::Token(Token::Char(
'x',
Category::Letter,
))],
)),
);
assert_eq!(
parser.peek_expanded_token(),
Some(Token::Char('x', Category::Letter))
);
assert_eq!(
parser.lex_expanded_token(),
Some(Token::Char('x', Category::Letter))
);
assert_eq!(
parser.peek_expanded_token(),
Some(Token::Char('b', Category::Letter))
);
assert_eq!(
parser.lex_expanded_token(),
Some(Token::Char('b', Category::Letter))
);
});
}
#[test]
fn it_prints_numbers() {
with_parser(&["\\count1=-100 %", "\\number\\count1%"], |parser| {
parser.parse_assignment(None);
assert_eq!(
parser.lex_expanded_token(),
Some(Token::Char('-', Category::Other))
);
assert_eq!(
parser.lex_expanded_token(),
Some(Token::Char('1', Category::Other))
);
assert_eq!(
parser.lex_expanded_token(),
Some(Token::Char('0', Category::Other))
);
assert_eq!(
parser.lex_expanded_token(),
Some(Token::Char('0', Category::Other))
);
});
}
}
| it_peeks_expanded_tokens |
test_cam16_ucs.py | # -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.models.cam16_ucs` module.
"""
import unittest
from colour.models.tests.test_cam02_ucs import (
TestJMh_CIECAM02_to_UCS_Luo2006,
TestUCS_Luo2006_to_JMh_CIECAM02,
TestXYZ_to_UCS_Luo2006,
TestUCS_Luo2006_to_XYZ,
)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'TestJMh_CAM16_to_UCS_Li2017',
'TestUCS_Li2017_to_JMh_CAM16',
'TestXYZ_to_UCS_Li2017',
'TestUCS_Li2017_to_XYZ',
]
class TestJMh_CAM16_to_UCS_Li2017(TestJMh_CIECAM02_to_UCS_Luo2006):
"""
Defines :func:`colour.models.cam16_ucs.JMh_CAM16_to_UCS_Li2017`
definition unit tests methods.
Notes
-----
- :func:`colour.models.cam16_ucs.JMh_CAM16_to_UCS_Li2017` is a wrapper
of :func:`colour.models.cam02_ucs.JMh_CIECAM02_to_UCS_Luo2006` and thus
currently adopts the same unittests.
"""
class TestUCS_Li2017_to_JMh_CAM16(TestUCS_Luo2006_to_JMh_CIECAM02):
"""
Defines :func:`colour.models.cam16_ucs.UCS_Li2017_to_JMh_CAM16`
definition unit tests methods.
Notes
-----
- :func:`colour.models.cam16_ucs.UCS_Li2017_to_JMh_CAM16` is a wrapper
of :func:`colour.models.cam02_ucs.UCS_Luo2006_to_JMh_CIECAM02` and thus
currently adopts the same unittests.
"""
|
class TestXYZ_to_UCS_Li2017(TestXYZ_to_UCS_Luo2006):
"""
Defines :func:`colour.models.cam16_ucs.XYZ_to_UCS_Li2017`
definition unit tests methods.
"""
pass
class TestUCS_Li2017_to_XYZ(TestUCS_Luo2006_to_XYZ):
"""
Defines :func:`colour.models.cam16_ucs.UCS_Li2017_to_XYZ`
definition unit tests methods.
"""
pass
if __name__ == '__main__':
unittest.main() | |
embedly.js | const fetch = require("node-fetch")
const EMBEDLY_API_KEY = process.env.EMBEDLY_API_KEY
const EMBEDLY_API = "https://api.embedly.com/1/oembed?"
exports.handler = async (event, context) => {
const url = JSON.parse(event.body).url
let data
try {
console.log("[LOG]: requesting", url)
const service = `${EMBEDLY_API}url=${url}&key=${EMBEDLY_API_KEY}&maxwidth=512`
data = await request(service)
} catch (error) {
console.error("[ERROR]: something went wrong,", error)
return {
statusCode: 500,
body: JSON.stringify(error),
}
}
// sign data for this demo
data.api = "embedly"
return {
statusCode: 200,
body: JSON.stringify(data),
}
}
async function | (url, options = {method: "GET"}) {
const res = await fetch(url, options)
if (!res.ok) {
throw new Error("HTTP request error, ".concat(String(res.status)))
} else if (res.status == 204) {
return null
} else {
return await res.json()
}
}
| request |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.