file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
aliyun.go | // Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aliyun
import (
"fmt"
"strings"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
alierr "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/pkg/errors"
"yunion.io/x/jsonutils"
"yunion.io/x/log"
v "yunion.io/x/pkg/util/version"
"yunion.io/x/pkg/utils"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/util/httputils"
)
const (
ALIYUN_INTERNATIONAL_CLOUDENV = "InternationalCloud"
ALIYUN_FINANCE_CLOUDENV = "FinanceCloud"
CLOUD_PROVIDER_ALIYUN = api.CLOUD_PROVIDER_ALIYUN
CLOUD_PROVIDER_ALIYUN_CN = "阿里云"
CLOUD_PROVIDER_ALIYUN_EN = "Aliyun"
ALIYUN_DEFAULT_REGION = "cn-hangzhou"
ALIYUN_API_VERSION = "2014-05-26"
ALIYUN_API_VERSION_VPC = "2016-04-28"
ALIYUN_API_VERSION_LB = "2014-05-15"
ALIYUN_API_VERSION_KVS = "2015-01-01"
ALIYUN_API_VERSION_TRIAL = "2020-07-06"
ALIYUN_BSS_API_VERSION = "2017-12-14"
ALIYUN_RAM_API_VERSION = "2015-05-01"
ALIYUN_RDS_API_VERSION = "2014-08-15"
ALIYUN_RM_API_VERSION = "2020-03-31"
ALIYUN_STS_API_VERSION = "2015-04-01"
ALIYUN_PVTZ_API_VERSION = "2018-01-01"
ALIYUN_ALIDNS_API_VERSION = "2015-01-09"
ALIYUN_CBN_API_VERSION = "2017-09-12"
ALIYUN_CDN_API_VERSION = "2018-05-10"
ALIYUN_IMS_API_VERSION = "2019-08-15"
ALIYUN_NAS_API_VERSION = "2017-06-26"
ALIYUN_WAF_API_VERSION = "2019-09-10"
ALIYUN_MONGO_DB_API_VERSION = "2015-12-01"
ALIYUN_ES_API_VERSION = "2017-06-13"
ALIYUN_SERVICE_ECS = "ecs"
ALIYUN_SERVICE_VPC = "vpc"
ALIYUN_SERVICE_RDS = "rds"
ALIYUN_SERVICE_SLB = "slb"
ALIYUN_SERVICE_KVS = "kvs"
)
var (
// https://help.aliyun.com/document_detail/31837.html?spm=a2c4g.11186623.2.18.675f2b8cu8CN5K#concept-zt4-cvy-5db
OSS_FINANCE_REGION_MAP = map[string]string{
"cn-hzfinance": "cn-hangzhou",
"cn-shanghai-finance-1-pub": "cn-shanghai-finance-1",
"cn-szfinance": "cn-shenzhen-finance-1",
"cn-hzjbp": "cn-hangzhou",
"cn-shanghai-finance-1": "cn-shanghai-finance-1",
"cn-shenzhen-finance-1": "cn-shenzhen-finance-1",
}
)
type AliyunClientConfig struct {
cpcfg cloudprovider.ProviderConfig
cloudEnv string // 服务区域 InternationalCloud | FinanceCloud
accessKey string
accessSecret string
debug bool
}
func NewAliyunClientConfig(cloudEnv, accessKey, accessSecret string) *AliyunClientConfig {
cfg := &AliyunClientConfig{
cloudEnv: cloudEnv,
accessKey: accessKey,
accessSecret: accessSecret,
}
return cfg
}
func (cfg *AliyunClientConfig) CloudproviderConfig(cpcfg cloudprovider.ProviderConfig) *AliyunClientConfig {
cfg.cpcfg = cpcfg
return cfg
}
func (cfg *AliyunClientConfig) Debug(debug bool) *AliyunClientConfig {
cfg.debug = debug
return cfg
}
func (cfg AliyunClientConfig) Copy() AliyunClientConfig {
return cfg
}
type SAliyunClient struct {
*AliyunClientConfig
ownerId string
ownerName string
nasEndpoints map[string]string
vpcEndpoints map[string]string
iregions []cloudprovider.ICloudRegion
iBuckets []cloudprovider.ICloudBucket
}
func NewAliyunClient(cfg *AliyunClientConfig) (*SAliyunClient, error) {
client := SAliyunClient{
AliyunClientConfig: cfg,
nasEndpoints: map[string]string{},
vpcEndpoints: map[string]string{},
}
err := client.fetchRegions()
if err != nil {
return nil, errors.Wrap(err, "fetchRegions")
}
err = client.fetchBuckets()
if err != nil {
return nil, errors.Wrap(err, "fetchBuckets")
}
if client.debug {
log.Debugf("ClientID: %s ClientName: %s", client.ownerId, client.ownerName)
}
return &client, nil
}
func jsonRequest(client *sdk.Client, domain, apiVersion, apiName string, params map[string]string, debug bool) (jsonutils.JSONObject, error) {
if debug {
log.Debugf("request %s %s %s %s", domain, apiVersion, apiName, params)
}
var resp jsonutils.JSONObject
var err error
for i := 1; i < 4; i++ {
resp, err = _jsonRequest(client, domain, apiVersion, apiName, params)
retry := false
if err != nil {
if e, ok := errors.Cause(err).(*alierr.ServerError); ok {
code := e.ErrorCode()
switch code {
case "InvalidAccessKeyId.NotFound":
return nil, err
case "404 Not Found", "InstanceNotFound":
return nil, errors.Wrap(cloudprovider.ErrNotFound, err.Error())
case "InvalidInstance.NotSupported",
"SignatureNonceUsed", // SignatureNonce 重复。每次请求的 SignatureNonce 在 15 分钟内不能重复。
"BackendServer.configuring", // 负载均衡的前一个配置项正在配置中,请稍后再试。
"Operation.Conflict", // 您当前的操作可能与其他人的操作产生了冲突,请稍后重试。
"OperationDenied.ResourceControl", // 指定的区域处于资源控制中,请稍后再试。
"ServiceIsStopping", // 监听正在停止,请稍后重试。
"ProcessingSameRequest", // 正在处理相同的请求。请稍后再试。
"ResourceInOperating", // 当前资源正在操作中,请求稍后重试。
"InvalidFileSystemStatus.Ordering", // Message: The filesystem is ordering now, please check it later.
"OperationUnsupported.EipNatBWPCheck": // create nat snat
retry = true
default:
if strings.HasPrefix(code, "EntityNotExist.") || strings.HasSuffix(code, ".NotFound") || strings.HasSuffix(code, "NotExist") {
if strings.HasPrefix(apiName, "Delete") {
return jsonutils.NewDict(), nil
}
return nil, errors.Wrap(cloudprovider.ErrNotFound, err.Error())
}
return nil, err
}
} else {
for _, code := range []string{
"EOF",
"i/o timeout",
"TLS handshake timeout",
"Client.Timeout exceeded while awaiting headers",
"connection reset by peer",
"server misbehaving",
"try later",
"Another operation is being performed", // Another operation is being performed on the DB instance or the DB instance is faulty(赋予RDS账号权限)
} {
if strings.Contains(err.Error(), code) {
retry = true
break
}
}
}
}
if retry {
if debug {
log.Debugf("Retry %d...", i)
}
time.Sleep(time.Second * time.Duration(i*10))
continue
}
if debug {
log.Debugf("Response: %s", resp)
}
return resp, err
}
return resp, err
}
func _jsonRequest(client *sdk.Client, domain string, version string, apiName string, params map[string]string) (jsonutils.JSONObject, error) {
req := requests.NewCommonRequest()
req.Domain = domain
req.Version = version
req.ApiName = apiName
if params != nil {
for k, v := range params {
req.QueryParams[k] = v
}
}
req.Scheme = "https"
req.GetHeaders()["User-Agent"] = "vendor/yunion-OneCloud@" + v.Get().GitVersion
method := requests.POST
for prefix, _method := range map[string]string{
"Get": requests.GET,
"Describe": requests.GET,
"List": requests.GET,
"Delete": requests.DELETE,
} {
if strings.HasPrefix(apiName, prefix) {
method = _method
break
}
}
if strings.HasPrefix(domain, "elasticsearch") {
req.Product = "elasticsearch"
req.ServiceCode = "elasticsearch"
pathPattern, ok := params["PathPattern"]
if !ok {
return nil, errors.Errorf("Roa request missing pathPattern")
}
delete(params, "PathPattern")
req.PathPattern = pathPattern
req.Method = method
}
resp, err := processCommonRequest(client, req)
if err != nil {
return nil, errors.Wrapf(err, "processCommonRequest with params %s", params)
}
body, err := jsonutils.Parse(resp.GetHttpContentBytes())
if err != nil {
return nil, errors.Wrapf(err, "jsonutils.Parse")
}
//{"Code":"InvalidInstanceType.ValueNotSupported","HostId":"ecs.aliyuncs.com","Message":"The specified instanceType beyond the permitted range.","RequestId":"0042EE30-0EDF-48A7-A414-56229D4AD532"}
//{"Code":"200","Message":"successful","PageNumber":1,"PageSize":50,"RequestId":"BB4C970C-0E23-48DC-A3B0-EB21FFC70A29","RouterTableList":{"RouterTableListType":[{"CreationTime":"2017-03-19T13:37:40Z","Description":"","ResourceGroupId":"rg-acfmwie3cqoobmi","RouteTableId":"vtb-j6c60lectdi80rk5xz43g","RouteTableName":"","RouteTableType":"System","RouterId":"vrt-j6c00qrol733dg36iq4qj","RouterType":"VRouter","VSwitchIds":{"VSwitchId":["vsw-j6c3gig5ub4fmi2veyrus"]},"VpcId":"vpc-j6c86z3sh8ufhgsxwme0q"}]},"Success":true,"TotalCount":1}
//{"Code":"Success","Data":{"CashCoupon":[]},"Message":"Successful!","RequestId":"87AD7E9A-3F8F-460F-9934-FFFE502325EE","Success":true}
if body.Contains("Code") {
code, _ := body.GetString("Code")
if len(code) > 0 && !utils.IsInStringArray(code, []string{"200", "Success"}) {
return nil, fmt.Errorf(body.String())
}
}
return body, nil
}
func (self *SAliyunClient) getNasEndpoint(regionId string) string {
err := self.fetchNasEndpoints()
if err != nil {
return "nas.aliyuncs.com"
}
ep, ok := self.nasEndpoints[regionId]
if ok && len(ep) > 0 {
return ep
}
return "nas.aliyuncs.com"
}
func (self *SAliyunClient) fetchNasEndpoints() error {
if len(self.nasEndpoints) > 0 {
return nil
}
client, err := self.getDefaultClient()
if err != nil {
return errors.Wrapf(err, "getDefaultClient")
}
resp, err := jsonRequest(client, "nas.aliyuncs.com", ALIYUN_NAS_API_VERSION, "DescribeRegions", nil, self.debug)
if err != nil {
return errors.Wrapf(err, "DescribeRegions")
}
regions := []SRegion{}
err = resp.Unmarshal(®ions, "Regions", "Region")
if err != nil {
return errors.Wrapf(err, "resp.Unmarshal")
}
for _, region := range regions {
self.nasEndpoints[region.RegionId] = region.RegionEndpoint
}
return nil
}
func (self *SAliyunClient) getDefaultClient() (*sdk.Client, error) {
client, err := self.getSdkClient(ALIYUN_DEFAULT_REGION)
return client, err
}
func (self *SAliyunClient) getVpcEndpoint(regionId string) string {
err := self.fetchVpcEndpoints()
if err != nil {
return "vpc.aliyuncs.com"
}
ep, ok := self.vpcEndpoints[regionId]
if ok && len(ep) > 0 {
return ep
}
return "vpc.aliyuncs.com"
}
func (self *SAliyunClient) fetchVpcEndpoints() error {
if len(self.vpcEndpoints) > 0 {
return nil
}
client, err := self.getDefaultClient()
if err != nil {
return errors.Wrapf(err, "getDefaultClient")
}
resp, err := jsonRequest(client, "vpc.aliyuncs.com", ALIYUN_API_VERSION_VPC, "DescribeRegions", nil, self.debug)
if err != nil {
return errors.Wrapf(err, "DescribeRegions")
}
regions := []SRegion{}
err = resp.Unmarshal(®ions, "Regions", "Region")
if err != nil {
return errors.Wrapf(err, "resp.Unmarshal")
}
for _, region := range regions {
self.vpcEndpoints[region.RegionId] = region.RegionEndpoint
}
return nil
}
func (self *SAliyunClient) getSdkClient(regionId string) (*sdk.Client, error) {
transport := httputils.GetAdaptiveTransport(true)
transport.Proxy = self.cpcfg.ProxyFunc
client, err := sdk.NewClientWithOptions(
regionId,
&sdk.Config{
HttpTransport: transport,
},
&credentials.BaseCredential{
AccessKeyId: self.accessKey,
AccessKeySecret: self.accessSecret,
},
)
return client, err
}
func (self *SAliyunClient) imsRequest(apiName string, params map[string]string) (jsonutils.JSONObject, error) {
cli, err := self.getDefaultClient()
if err != nil {
return nil, err
}
return jsonRequest(cli, "ims.aliyuncs.com", ALIYUN_IMS_API_VERSION, apiName, params, self.debug)
}
func (self *SAliyunClient) rmRequest(apiName string, params map[string]string) (jsonutils.JSONObject, error) {
cli, err := self.getDefaultClient()
if err != nil {
return nil, err
}
return jsonRequest(cli, "resourcemanager.aliyuncs.com", ALIYUN_RM_API_VERSION, apiName, params, self.debug)
}
func (self *SAliyunClient) ecsRequest(apiName string, params map[string]string) (jsonutils.JSONObject, error) {
cli, err := self.getDefaultClient()
if err != nil {
return nil, err
}
return jsonRequest(cli, "ecs.aliyuncs.com", ALIYUN_API_VERSION, apiName, params, self.debug)
}
func (self *SAliyunClient) pvtzRequest(apiName string, params map[string]string) (jsonutils.JSONObject, error) {
cli, err := self.getDefaultClient()
if err != nil {
return nil, err
}
return jsonRequest(cli, "pvtz.aliyuncs.com", ALIYUN_PVTZ_API_VERSION, apiName, params, self.debug)
}
func (self *SAliyunClient) alidnsRequest(apiName string, params map[string]string) (jsonutils.JSONObject, error) {
cli, err := self.getDefaultClient()
if err != nil {
return nil, err
}
return jsonRequest(cli, "alidns.aliyuncs.com", ALIYUN_ALIDNS_API_VERSION, apiName, params, self.debug)
}
func (self *SAliyunClient) cbnRequest(apiName string, params map[string]string) (jsonutils.JSONObject, error) {
cli, err := self.getDefaultClient()
if err != nil {
return nil, err
}
return jsonRequest(cli, "cbn.aliyuncs.com", ALIYUN_CBN_API_VERSION, apiName, params, self.debug)
}
func (self *SAliyunClient) cdnRequest(apiName string, params map[string]string) (jsonutils.JSONObject, error) {
cli, err := self.getDefaultClient()
if err != nil {
return nil, err
}
return jsonRequest(cli, "cdn.aliyuncs.com", ALIYUN_CDN_API_VERSION, apiName, params, self.debug)
}
func (self *SAliyunClient) fetchRegions() error {
body, err := self.ecsRequest("DescribeRegions", map[string]string{"AcceptLanguage": "zh-CN"})
if err != nil {
log.Errorf("fetchRegions fail %s", err)
return err
}
regions := make([]SRegion, 0)
err = body.Unmarshal(®ions, "Regions", "Region")
if err != nil {
log.Errorf("unmarshal json error %s", err)
return err
}
self.iregions = make([]cloudprovider.ICloudRegion, len(regions))
for i := 0; i < len(regions); i += 1 {
regions[i].client = self
self.iregions[i] = ®ions[i]
}
return nil
}
// oss endpoint
// https://help.aliyun.com/document_detail/31837.html?spm=a2c4g.11186623.2.6.6E8ZkO
func getOSSExternalDomain(regionId string) string {
return fmt.Sprintf("oss-%s.aliyuncs.com", regionId)
}
func getOSSInternalDomain(regionId string) string {
return fmt.Sprintf("oss-%s-internal.aliyuncs.com", regionId)
}
// https://help.aliyun.com/document_detail/31837.html?spm=a2c4g.11186623.2.6.XqEgD1
func (client *SAliyunClient) getOssClientByEndpoint(endpoint string) (*oss.Client, error) {
// NOTE
//
// oss package as of version 20181116160301-c6838fdc33ed does not
// respect http.ProxyFromEnvironment.
//
// The ClientOption Proxy, AuthProxy lacks the feature NO_PROXY has
// which can be used to whitelist ips, domains from http_proxy,
// https_proxy setting
// oss use no timeout client so as to send/download large files
httpClient := client.cpcfg.AdaptiveTimeoutHttpClient()
cliOpts := []oss.ClientOption{
oss.HTTPClient(httpClient),
}
cli, err := oss.New(endpoint, client.accessKey, client.accessSecret, cliOpts...)
if err != nil {
return nil, errors.Wrap(err, "oss.New")
}
return cli, nil
}
func (client *SAliyunClient) getOssClient(regionId string) (*oss.Client, error) {
ep := getOSSExternalDomain(regionId)
return client.getOssClientByEndpoint(ep)
}
func (self *SAliyunClient) getRegionByRegionId(id string) (cloudprovider.ICloudRegion, error) {
_id, ok := OSS_FINANCE_REGION_MAP[id]
if ok {
id = _id
}
for i := 0; i < len(self.iregions); i += 1 {
if self.iregions[i].GetId() == id {
return self.iregions[i], nil
}
}
return nil, cloudprovider.ErrNotFound
}
func (self *SAliyunClient) invalidateIBuckets() {
self.iBuckets = nil
}
func (self *SAliyunClient) getIBuckets() ([]cloudprovider.ICloudBucket, error) {
if self.iBuckets == nil {
err := self.fetchBuckets()
if err != nil {
return nil, errors.Wrap(err, "fetchBuckets")
}
}
return self.iBuckets, nil
}
func (self *SAliyunClient) fetchBuckets() error {
osscli, err := self.getOssClient(ALIYUN_DEFAULT_REGION)
if err != nil {
return errors.Wrap(err, "self.getOssClient")
}
result, err := osscli.ListBuckets()
if err != nil {
return errors.Wrap(err, "oss.ListBuckets")
}
self.ownerId = result.Owner.ID
self.ownerName = result.Owner.DisplayName
ret := make([]cloudprovider.ICloudBucket, 0)
for _, bInfo := range result.Buckets {
regionId := bInfo.Location[4:]
region, err := self.getRegionByRegionId(regionId)
if err != nil {
log.Errorf("cannot find bucket's region %s", regionId)
continue
}
b := SBucket{
region: region.(*SRegion),
Name: bInfo.Name,
Location: bInfo.Location,
CreationDate: bInfo.CreationDate,
StorageClass: bInfo.StorageClass,
}
ret = append(ret, &b | ons() []SRegion {
regions := make([]SRegion, len(self.iregions))
for i := 0; i < len(regions); i += 1 {
region := self.iregions[i].(*SRegion)
regions[i] = *region
}
return regions
}
func (self *SAliyunClient) GetSubAccounts() ([]cloudprovider.SSubAccount, error) {
err := self.fetchRegions()
if err != nil {
return nil, err
}
subAccount := cloudprovider.SSubAccount{}
subAccount.Name = self.cpcfg.Name
subAccount.Account = self.accessKey
subAccount.HealthStatus = api.CLOUD_PROVIDER_HEALTH_NORMAL
return []cloudprovider.SSubAccount{subAccount}, nil
}
func (self *SAliyunClient) GetAccountId() string {
return self.ownerId
}
func (self *SAliyunClient) GetIRegions() []cloudprovider.ICloudRegion {
return self.iregions
}
func (self *SAliyunClient) GetIRegionById(id string) (cloudprovider.ICloudRegion, error) {
for i := 0; i < len(self.iregions); i += 1 {
if self.iregions[i].GetGlobalId() == id {
return self.iregions[i], nil
}
}
return nil, cloudprovider.ErrNotFound
}
func (self *SAliyunClient) GetRegion(regionId string) *SRegion {
if len(regionId) == 0 {
regionId = ALIYUN_DEFAULT_REGION
}
for i := 0; i < len(self.iregions); i += 1 {
if self.iregions[i].GetId() == regionId {
return self.iregions[i].(*SRegion)
}
}
return nil
}
func (self *SAliyunClient) GetIHostById(id string) (cloudprovider.ICloudHost, error) {
for i := 0; i < len(self.iregions); i += 1 {
ihost, err := self.iregions[i].GetIHostById(id)
if err == nil {
return ihost, nil
} else if errors.Cause(err) != cloudprovider.ErrNotFound {
return nil, err
}
}
return nil, cloudprovider.ErrNotFound
}
func (self *SAliyunClient) GetIVpcById(id string) (cloudprovider.ICloudVpc, error) {
for i := 0; i < len(self.iregions); i += 1 {
ihost, err := self.iregions[i].GetIVpcById(id)
if err == nil {
return ihost, nil
} else if errors.Cause(err) != cloudprovider.ErrNotFound {
return nil, err
}
}
return nil, cloudprovider.ErrNotFound
}
func (self *SAliyunClient) GetIStorageById(id string) (cloudprovider.ICloudStorage, error) {
for i := 0; i < len(self.iregions); i += 1 {
ihost, err := self.iregions[i].GetIStorageById(id)
if err == nil {
return ihost, nil
} else if errors.Cause(err) != cloudprovider.ErrNotFound {
return nil, err
}
}
return nil, cloudprovider.ErrNotFound
}
func (self *SAliyunClient) GetIProjects() ([]cloudprovider.ICloudProject, error) {
pageSize, pageNumber := 50, 1
resourceGroups := []SResourceGroup{}
for {
parts, total, err := self.GetResourceGroups(pageNumber, pageSize)
if err != nil {
return nil, errors.Wrap(err, "GetResourceGroups")
}
resourceGroups = append(resourceGroups, parts...)
if len(resourceGroups) >= total {
break
}
pageNumber += 1
}
ret := []cloudprovider.ICloudProject{}
for i := range resourceGroups {
ret = append(ret, &resourceGroups[i])
}
return ret, nil
}
func (region *SAliyunClient) GetCapabilities() []string {
caps := []string{
cloudprovider.CLOUD_CAPABILITY_PROJECT,
cloudprovider.CLOUD_CAPABILITY_COMPUTE,
cloudprovider.CLOUD_CAPABILITY_NETWORK,
cloudprovider.CLOUD_CAPABILITY_LOADBALANCER,
cloudprovider.CLOUD_CAPABILITY_OBJECTSTORE,
cloudprovider.CLOUD_CAPABILITY_RDS,
cloudprovider.CLOUD_CAPABILITY_CACHE,
cloudprovider.CLOUD_CAPABILITY_EVENT,
cloudprovider.CLOUD_CAPABILITY_CLOUDID,
cloudprovider.CLOUD_CAPABILITY_DNSZONE,
cloudprovider.CLOUD_CAPABILITY_INTERVPCNETWORK,
cloudprovider.CLOUD_CAPABILITY_SAML_AUTH,
cloudprovider.CLOUD_CAPABILITY_NAT,
cloudprovider.CLOUD_CAPABILITY_NAS,
cloudprovider.CLOUD_CAPABILITY_WAF,
cloudprovider.CLOUD_CAPABILITY_MONGO_DB,
cloudprovider.CLOUD_CAPABILITY_ES,
}
return caps
}
func (self *SAliyunClient) GetAccessEnv() string {
switch self.cloudEnv {
case ALIYUN_INTERNATIONAL_CLOUDENV:
return api.CLOUD_ACCESS_ENV_ALIYUN_GLOBAL
case ALIYUN_FINANCE_CLOUDENV:
return api.CLOUD_ACCESS_ENV_ALIYUN_FINANCE
default:
return api.CLOUD_ACCESS_ENV_ALIYUN_GLOBAL
}
}
| )
}
self.iBuckets = ret
return nil
}
func (self *SAliyunClient) GetRegi |
main.go | // +build linux
package main
import (
"flag"
"fmt"
"koding/terminal"
"koding/tools/config"
"koding/tools/logger"
"log"
"os"
)
var (
flagProfile = flag.String("c", "", "Configuration profile from file")
flagRegion = flag.String("r", "", "Configuration region from file")
flagDebug = flag.Bool("d", false, "Debug mode")
flagVersion = flag.Bool("version", false, "Show version and exit")
flagPort = flag.Int("p", 0, "Kite port")
)
func main() {
if *flagVersion {
fmt.Println(terminal.TERMINAL_VERSION)
os.Exit(0)
}
flag.Parse()
if *flagProfile == "" || *flagRegion == "" {
log.Fatalf("Please specify profile via -c and region via -r. Aborting.")
}
var logLevel logger.Level
if *flagDebug {
logLevel = logger.DEBUG | } else {
logLevel = logger.GetLoggingLevelFromConfig("terminal", *flagProfile)
}
term := terminal.New(config.MustConfig(*flagProfile))
term.LogLevel = logLevel
term.Region = *flagRegion
term.Port = *flagPort
// go go!
term.Run()
} | |
products.go | package main
import (
"context"
"fmt"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
type Product struct {
SKU string `json:"sku"`
ID int `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Stock int `json:"stock"`
Cost int `json:"cost"`
SellingPrice int `json:"selling_price"`
Sold int `json:"sold,omitempty"`
TypeID int `json:"type_id,omitempty"`
TypeName string `json:"type_name,omitempty"`
}
type ProductType struct {
ID int `json:"id"`
Name string `json:"name"`
}
func getProducts(ctx context.Context, db *sqlx.DB) ([]Product, error) {
return queryProducts(ctx, db, nil)
}
func getTopProducts(ctx context.Context, db *sqlx.DB) ([]Product, error) {
const limit = 3 // top 3 best-selling products
queryString := `SELECT
id, sku, name, stock, SUM(order_lines.amount) AS sold
FROM products JOIN order_lines ON id=product_id GROUP BY products.id ORDER BY sold DESC
`
queryString += fmt.Sprintf("LIMIT %d\n", limit)
rows, err := db.QueryContext(ctx, queryString)
if err != nil {
return nil, errors.Wrap(err, "querying top products")
}
defer rows.Close()
var products []Product
for rows.Next() {
var p Product
if err := rows.Scan(&p.ID, &p.SKU, &p.Name, &p.Stock, &p.Sold); err != nil {
return nil, err
}
products = append(products, p)
}
return products, rows.Err()
}
func getProduct(ctx context.Context, db *sqlx.DB, id int) (*Product, error) {
products, err := queryProducts(ctx, db, &id)
if err != nil || len(products) == 0 {
return nil, err
}
return &products[0], nil
}
func queryProducts(ctx context.Context, db *sqlx.DB, id *int) ([]Product, error) {
var args []interface{}
queryString := `SELECT
products.id, products.sku, products.name, products.description,
products.stock, products.cost, products.selling_price,
products.type_id, product_types.name | queryString += "WHERE products.id=?\n"
args = append(args, *id)
}
rows, err := db.QueryContext(ctx, db.Rebind(queryString), args...)
if err != nil {
return nil, errors.Wrap(err, "querying products")
}
defer rows.Close()
var products []Product
for rows.Next() {
var p Product
if err := rows.Scan(
&p.ID, &p.SKU, &p.Name, &p.Description,
&p.Stock, &p.Cost, &p.SellingPrice,
&p.TypeID, &p.TypeName,
); err != nil {
return nil, err
}
products = append(products, p)
}
return products, rows.Err()
}
func getProductTypes(ctx context.Context, db *sqlx.DB) ([]ProductType, error) {
return queryProductTypes(ctx, db, nil)
}
func getProductType(ctx context.Context, db *sqlx.DB, id int) (*ProductType, error) {
productTypes, err := queryProductTypes(ctx, db, &id)
if err != nil || len(productTypes) == 0 {
return nil, err
}
return &productTypes[0], nil
}
func queryProductTypes(ctx context.Context, db *sqlx.DB, id *int) ([]ProductType, error) {
var args []interface{}
queryString := "SELECT id, name FROM product_types"
if id != nil {
queryString += " WHERE id=?"
args = append(args, *id)
}
rows, err := db.QueryContext(ctx, db.Rebind(queryString), args...)
if err != nil {
return nil, errors.Wrap(err, "querying product types")
}
defer rows.Close()
var productTypes []ProductType
for rows.Next() {
var pt ProductType
if err := rows.Scan(&pt.ID, &pt.Name); err != nil {
return nil, err
}
productTypes = append(productTypes, pt)
}
return productTypes, rows.Err()
} | FROM products JOIN product_types ON type_id=product_types.id
`
if id != nil { |
publish.ts | import tmp from 'tmp';
import fs from 'fs-extra';
import path from 'path';
import chalk from 'chalk';
import yaml from 'js-yaml';
import request from 'request';
import { flags } from '@oclif/command';
import BaseCommand from '../base';
import { Tuture } from '../types';
import { apiEndpoint, apiTokenPath, staticServer } from '../config';
export default class Publish extends BaseCommand {
static description = 'Publish tutorial to tuture.co';
static flags = {
help: flags.help({ char: 'h' }),
};
collectTutureAssets(tutureYml: string): [string, string[]] {
const assets: string[] = [];
// Replace all paths to local assets with ones on tuture static server.
// For instance, ./tuture-assets/foo.png => https://static.tuture.co/foo.png.
const updatedTuture = tutureYml.replace(
/!\[.*\]\((.*)\)/g,
(match, imagePath) => {
assets.push(imagePath);
return match.replace(imagePath, staticServer + imagePath);
},
);
return [updatedTuture, assets];
}
saveTutureToTmp(tuture: string) {
const tmpDir = tmp.dirSync();
const tmpPath = path.join(tmpDir.name, 'tuture.json');
fs.writeFileSync(tmpPath, tuture);
return tmpPath;
}
async run() {
this.parse(Publish);
if (!fs.existsSync(apiTokenPath)) {
this.error(
`You have not logged in yet. Please login with ${chalk.bold(
'tuture login',
)}.`,
);
this.exit(1);
}
const tutureYml = fs.readFileSync('tuture.yml').toString();
const [updatedTutureYml, assets] = this.collectTutureAssets(tutureYml);
const tuture: Tuture = yaml.safeLoad(updatedTutureYml);
const formData: any = {
id: tuture.id,
name: tuture.name,
tuture: fs.createReadStream(this.saveTutureToTmp(JSON.stringify(tuture))),
diff: fs.createReadStream(path.join('.tuture', 'diff.json')),
assets: assets.map((asset) => fs.createReadStream(asset)),
};
if (tuture.topics) {
formData.topics = tuture.topics.join(',');
}
if (tuture.description) {
formData.description = tuture.description;
}
const token = fs.readFileSync(apiTokenPath).toString();
request.post(
`${apiEndpoint}/publish`,
{ formData, headers: { Authorization: `JWT ${token}` } },
(err, res, body) => {
if (err) {
this.log(
`Verification failed. Please relogin with ${chalk.bold(
'tuture login',
)}.`,
);
this.exit(1);
}
if (res.statusCode === 201) {
this.success('Your tutorial has been successfully published!');
} else {
this.log('Publish failed. Please retry.');
this.log(body);
this.exit(1);
}
},
);
} | } |
|
data.rs | use amethyst::ecs::prelude::*;
use std::{
borrow::Cow,
collections::hash_map::DefaultHasher,
hash::{Hash, Hasher},
};
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum Direction {
Up,
Down,
Left,
Right,
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum PlayerAction {
Move(Direction),
Wait,
Grab,
Quit,
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Attack {
pub attacker: Entity,
pub target: Entity,
}
#[derive(Default, Debug, Clone, PartialEq, Hash)]
pub struct ItemProperties {
pub name: Cow<'static, str>,
pub damage: Option<i32>,
}
#[derive(Debug, Clone, PartialEq, Hash)]
pub enum ItemPart {
Name(Cow<'static, str>),
Damage(i32),
}
impl ItemPart {
pub fn collect_properties(&self, prop: &mut ItemProperties) {
use ItemPart::*;
match *self {
Name(ref name) => {
prop.name.to_mut().push_str(name);
}
Damage(dmg) => |
}
}
}
pub fn calculate_hash<T: Hash>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
| {
prop.damage = Some(prop.damage.map_or(dmg, |x| x + dmg));
} |
run_mots_depth_inference.py | """ Script for running depth inference assuming MOTS dataset structure """
import logging
import os
import sys
from pathlib import Path, PurePath
import click
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.compat.v1 as tf
from IPython.core import ultratb
from PIL import Image
import diw
from diw.model import Model, get_vars_to_save_and_restore
sys.excepthook = ultratb.FormattedTB(mode="Verbose", color_scheme="Linux", call_pdb=1)
_logger = logging.getLogger(__name__)
def load_image(img_file):
|
def resize_img(img, img_shape):
""" resizes an image """
return img.resize(img_shape, Image.LANCZOS).convert("RGB")
def plot_image(image, image_type="RGB"):
""" plots image with matplotlib """
plt.figure()
color_map = None
if image_type != "RGB":
color_map = plt.cm.get_cmap("plasma").reversed()
plt.imshow(image, cmap=color_map)
plt.show() # display it
return plt
@click.command()
@click.option(
"--checkpoint_dir",
"checkpoint_dir",
default="./data/checkpoints/test",
type=click.Path(exists=True),
help="Path to the model checkpoint",
)
@click.option(
"--data_dir",
"data_dir",
default="./data/test/mots_data",
type=click.Path(exists=True),
help="Path to MOTS data",
)
@click.option(
"--save_img",
"save_img",
flag_value=True,
help="Flag to whether save the image of the depth (besides numpy array)",
)
@click.version_option(diw.__version__)
def main(data_dir, checkpoint_dir, save_img):
if save_img:
plt.figure()
height, width = 128, 416
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" # to fix CUDA bug
inference_model = Model(
is_training=False, batch_size=1, img_height=height, img_width=width
)
checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
vars_to_restore = get_vars_to_save_and_restore(checkpoint)
saver = tf.train.Saver(vars_to_restore)
with tf.Session() as sess:
saver.restore(sess, checkpoint)
sequence_paths = [p for p in Path(data_dir).glob("*") if p.is_dir()]
for seq_path in sequence_paths:
model_name = PurePath(checkpoint_dir).parts[-1]
(seq_path / model_name).mkdir(parents=True, exist_ok=True)
if save_img:
(seq_path / (model_name + "_depth_images")).mkdir(
parents=True, exist_ok=True
)
img_paths = sorted(
[p for p in (seq_path / "img1").glob("*") if p.is_file()],
key=lambda path: str(path),
)
for img_path in img_paths:
img_name = img_path.parts[-1].split(".")[0]
print("Processing sequence: {}, image: {}".format(seq_path, img_name))
image = load_image(str(img_path))
image = resize_img(image, (width, height))
image = np.array(image)
image = image[None, ...]
depth = inference_model.inference_depth(image, sess)
depth = depth[0, :, :, 0]
np.save(str(seq_path / model_name / img_name), depth)
if save_img:
plt.imshow(depth, plt.cm.get_cmap("plasma").reversed())
plt.savefig(
str(seq_path / (model_name + "_depth_images"))
+ "/"
+ (img_name + ".png")
)
plt.clf()
if __name__ == "__main__":
main()
| """Load image from disk. Output value range: [0,255]."""
return Image.open(img_file).convert("RGB") |
bindata.go | // Package internal Code generated by go-bindata. (@generated) DO NOT EDIT.
// sources:
// template/collection.tmpl
// template/edge.tmpl
// template/enum.tmpl
// template/node.tmpl
// template/pagination.tmpl
// template/pagination_test.tmpl
// template/transaction.tmpl
package internal
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) |
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
// Name return file name
func (fi bindataFileInfo) Name() string {
return fi.name
}
// Size return file size
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
// Mode return file mode
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
// ModTime return file modify time
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
// IsDir return file whether a directory
func (fi bindataFileInfo) IsDir() bool {
return fi.mode&os.ModeDir != 0
}
// Sys return file is sys mode
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _templateCollectionTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x54\x5f\x6f\xdb\x36\x10\x7f\x16\x3f\xc5\x4d\xf0\x83\x65\x38\x74\xda\xb7\x64\xf0\x43\x16\xa4\x45\x81\x2e\x43\xd7\x02\x7b\x1c\x68\xf2\x24\x11\xa3\x49\x85\xa4\xd2\x18\x02\xbf\xfb\x40\x8a\xb2\x6c\x37\x1b\x02\x34\x0f\xb1\x78\xf7\xbb\xbb\xdf\xfd\x1d\x86\xcd\x8a\xdc\x9b\xee\x60\x65\xd3\x7a\x78\x7f\xfd\xee\xe6\xaa\xb3\xe8\x50\x7b\xf8\xc0\x38\xee\x8c\xf9\x07\x3e\x69\x4e\xe1\x4e\x29\x48\x20\x07\x51\x6f\x9f\x51\x50\xf2\xad\x95\x0e\x9c\xe9\x2d\x47\xe0\x46\x20\x48\x07\x4a\x72\xd4\x0e\x05\xf4\x5a\xa0\x05\xdf\x22\xdc\x75\x8c\xb7\x08\xef\xe9\xf5\xa4\x85\xda\xf4\x5a\x10\xa9\x93\xfe\xf3\xa7\xfb\x87\xc7\xaf\x0f\x50\x4b\x85\x90\x65\xd6\x18\x0f\x42\x5a\xe4\xde\xd8\x03\x98\x1a\xfc\x49\x30\x6f\x11\x29\x59\x6d\x42\x20\x64\x18\x40\x60\x2d\x35\x42\xc9\x8d\x52\xc8\xbd\x34\xba\x84\x10\xa2\xc6\xe3\xbe\x53\xcc\x23\x94\x2d\x32\x81\xb6\x84\x45\xd4\x10\xb9\xef\x8c\xf5\xb0\x24\x45\xc9\x8d\xf6\xf8\xe2\x4b\x42\x8a\xb2\x91\xbe\xed\x77\x94\x9b\xfd\xe6\xe6\x46\xa0\x93\x8d\x76\x9b\xe6\x49\x35\xa8\x37\x8d\x65\x5d\xfb\xa4\x4a\x52\xa5\x98\x96\xe9\x06\x61\xa1\x63\xda\xb7\x5b\x58\xd0\x47\x23\xd0\x41\x66\xb4\x40\xd1\xa0\x8b\x0a\x21\xb9\xcf\x64\xb2\x49\x54\x25\x93\x68\x4b\x1f\x12\x30\x04\x52\x0c\x03\xc8\x1a\x16\x4c\x6b\xe3\x59\x4c\x22\x81\x22\x9a\xde\x1d\x65\x8e\x3e\x68\xff\xf1\xcb\xe7\x64\xf1\xa3\x09\xfd\x4d\x6a\x31\xea\x5e\x51\xfe\xce\xba\x4e\xea\x06\x42\x18\x06\xa8\x99\x54\x50\xee\x22\x9e\x69\x01\xfb\xac\x9b\xe1\x0e\x98\x45\xd8\xf7\xbe\x67\x4a\x1d\x00\x5f\xb8\xea\x9d\x7c\xc6\x72\xb4\xc7\xd3\x40\x39\xdf\x2d\x38\xf4\xd3\x63\xa4\xfe\xc8\xf6\x08\x4b\x25\x5d\x96\xd3\x6f\x87\xee\x15\x69\x14\x54\xd5\x31\xab\xa3\xf3\x9c\xc4\xc4\x2e\x56\xe4\xd5\x84\x7e\x86\xc7\xe4\xfd\xc7\xf0\xf3\xe7\xfc\x95\xda\x6b\x91\xa3\x7c\x46\x3b\xf7\xf1\xcf\x49\x32\x82\x17\x4f\x3d\xda\xc3\xac\xfe\x12\x9f\x29\x5a\x08\x64\xb3\x81\xfb\x71\x54\x3f\x48\x54\xc2\x81\x47\xa5\x5c\x9a\xfb\x64\x76\xb5\xeb\xa5\x4a\xdb\x63\x00\x59\x83\x56\x1d\x40\x19\x26\x80\x1b\xad\x91\x7b\x14\xa0\xd3\xb4\xed\x0e\x71\x1b\x8d\x8a\x71\xf3\x1c\x53\x52\xf7\x9a\xc3\xf2\x8c\x65\x08\xb0\x9a\x49\x85\x50\x9d\xc7\x5f\x72\xff\x72\xb4\xbf\x1f\x7f\xd7\xe0\x98\x97\xae\x96\xe8\x80\x52\xea\xbc\x4d\x15\x3a\x73\x03\x03\x29\x64\x0d\x35\x8f\x79\xe6\xfd\xa0\x1f\x71\xf4\x9a\xfd\x44\xdf\xd5\xaf\x11\xf3\xcb\x16\xb4\x54\xd1\xa6\xb8\x24\xb7\x85\x0b\x09\xe5\x27\xfc\x96\x27\xae\xff\xe8\xd0\xa6\xde\x9f\xba\x5f\x43\xcd\x69\x82\x9e\xb0\xa6\x94\x56\xa4\x08\xa4\xb0\xe8\x7b\xab\x2f\x23\x90\x40\xde\x56\xa9\x33\x26\xb1\x50\xab\x89\xce\x25\x97\x35\xd4\x11\x74\xac\x44\xae\x31\x8a\x4b\x6a\xff\x5b\xd0\x61\xb8\x82\xef\xd2\xb7\xd3\xf8\xa6\x99\xac\x8d\x85\xbf\x27\xff\xb7\xdb\x7c\x4a\x2e\x02\xcd\xcd\xcc\x48\xfa\x15\xf3\x3d\x74\x27\xd1\xab\xd4\x82\xc2\x7d\x97\x9e\xb7\x19\x98\x26\x33\x89\x53\xfc\xe9\xb8\xb1\x3d\xae\x61\xf1\xcc\x54\x3f\x1e\x33\x9a\x37\xad\x28\x38\x73\x08\xf3\x4d\x93\x13\x2c\xa2\xa4\x16\xf8\x72\x34\x7b\x37\x9e\x0b\x59\x43\xe3\x61\x21\xe1\x1a\x42\x58\xc3\x71\x9f\xca\x98\xff\x68\x3a\x3e\x46\xf1\xed\x18\xe6\x0d\x93\xf2\x97\xf4\xed\x30\x40\xc7\x1c\x67\x6a\xe4\x0c\x21\x2c\x63\x6f\x97\x63\x5d\x57\xb3\x7a\x79\xce\xed\x3a\xae\x7c\x5a\xcd\x5c\x94\xf8\x97\x8c\xe8\x65\xdb\x73\x49\xab\x8c\x0a\xd5\xb1\x58\xf3\x21\x8c\xff\x02\x39\x13\xfe\xf7\xf0\xbd\x76\x5b\xfe\x0d\x00\x00\xff\xff\x0f\x9c\x85\xe3\x90\x07\x00\x00")
func templateCollectionTmplBytes() ([]byte, error) {
return bindataRead(
_templateCollectionTmpl,
"template/collection.tmpl",
)
}
func templateCollectionTmpl() (*asset, error) {
bytes, err := templateCollectionTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "template/collection.tmpl", size: 1936, mode: os.FileMode(420), modTime: time.Unix(1, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templateEdgeTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x52\xc1\x6a\xdb\x40\x10\x3d\x5b\x5f\x31\x08\x1f\x6c\xd3\x6e\xd2\xdc\x5a\xe8\x21\x18\x1b\x0c\xa9\x4d\x9b\xf4\x54\x7a\x50\x77\x9f\xec\x25\xeb\x5d\x75\x76\x14\x62\xc4\xfe\x7b\x59\xc9\x8a\x5d\xc8\x49\x33\xf3\xe6\xbd\x79\x4f\x6c\xd7\xdd\x2c\x8a\x65\x68\x4e\x6c\xf7\x07\xa1\xbb\xdb\x4f\x9f\x3f\x36\x8c\x08\x2f\xb4\xae\x34\xfe\x84\xf0\x4c\x1b\xaf\x15\xdd\x3b\x47\xfd\x52\xa4\x8c\xf3\x0b\x8c\x2a\x9e\x0e\x36\x52\x0c\x2d\x6b\x90\x0e\x06\x64\x23\x39\xab\xe1\x23\x0c\xb5\xde\x80\x49\x0e\xa0\xfb\xa6\xd2\x07\xd0\x9d\xba\x1d\x51\xaa\x43\xeb\x4d\x61\x7d\x8f\x3f\x6c\x96\xab\xed\xe3\x8a\x6a\xeb\x40\xe7\x19\x87\x20\x64\x2c\x43\x4b\xe0\x13\x85\x9a\xe4\xea\x98\x30\xa0\x8a\xc5\x4d\x4a\x45\xd1\x75\x64\x50\x5b\x0f\x2a\x61\xf6\x28\x29\xa5\x3c\x13\x1c\x1b\x57\x09\xa8\x3c\xa0\x32\xe0\x92\xa6\x19\x29\xec\xb1\x09\x2c\x54\xea\xe0\x05\xaf\x52\xf6\x02\x5c\xf9\x3d\x68\xea\xe9\xcb\x57\x9a\xaa\x6d\x30\x88\x79\x79\xd2\x75\x34\xe5\x7e\xe8\xd5\x0f\x68\xd8\x17\xf0\x08\x9c\x39\x38\xc3\x2b\xb3\x3f\x93\x26\x75\xeb\x35\xcd\x06\x6e\x4a\xb4\xc8\x95\x57\xdb\xea\x08\x4a\x69\x4e\xb9\x85\x7a\x14\x6e\xb5\xac\x2d\x9c\xa1\x94\x66\x5a\x5e\xe9\x6c\x49\x2d\x87\xef\xbc\xd7\xb0\x35\xf9\x20\x99\xf1\xd3\xdb\xbf\x6d\x96\xf8\xf5\xbb\xeb\x08\x3e\xf3\x16\x83\xd8\xd3\xa9\xc1\x78\xe0\x03\x81\x39\xf0\x9c\xba\x62\x32\x99\x30\x62\xeb\xa4\x9f\x65\xa3\xa3\xa9\xc1\xae\x7a\xcf\xca\x8e\x57\xcc\xb3\x79\x26\xdb\x9a\x36\x71\x1b\xe4\x21\x54\x06\x66\x06\x1e\x55\xff\x93\xbd\x52\xfd\xde\x82\x4f\xef\xe6\x9b\xab\x21\xcb\x75\x8e\x9d\x77\x79\x19\x2e\xe6\xee\xde\xb9\xb7\x58\xf9\x77\xf4\x0e\xd2\x90\x41\x5a\xf6\x34\xde\x1c\x84\x2a\x6f\xae\xc4\xa6\x50\xbb\x46\x6c\xf0\x95\xa3\x94\xbe\x55\xf1\x79\x1b\x64\x9d\x1f\x59\xef\xfa\x72\x05\xcc\x6f\x57\x8a\x5e\xff\xd2\x5e\xaa\xab\xf2\x5f\x00\x00\x00\xff\xff\x07\x45\xeb\x33\x23\x03\x00\x00")
func templateEdgeTmplBytes() ([]byte, error) {
return bindataRead(
_templateEdgeTmpl,
"template/edge.tmpl",
)
}
func templateEdgeTmpl() (*asset, error) {
bytes, err := templateEdgeTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "template/edge.tmpl", size: 803, mode: os.FileMode(420), modTime: time.Unix(1, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templateEnumTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x53\xd1\x4e\xe3\x3a\x10\x7d\x4e\xbe\x62\x88\x8a\x94\x54\xd4\xe1\xf2\x76\xef\x55\x1f\x58\x54\x58\x24\x76\xb5\x08\x76\xf7\x11\x99\x78\xdc\x5a\x75\xec\xee\xd8\x29\x42\x51\xfe\x7d\x65\x27\x2d\x29\xb0\x2b\xde\xdc\x9e\x33\x67\xce\xcc\x9c\xb4\x6d\x39\x4d\x2f\xec\xe6\x99\xd4\x72\xe5\xe1\xec\xf4\x9f\x7f\x67\x1b\x42\x87\xc6\xc3\x25\xaf\xf0\xd1\xda\x35\x5c\x9b\x8a\xc1\xb9\xd6\x10\x49\x0e\x02\x4e\x5b\x14\x2c\xbd\x5f\x29\x07\xce\x36\x54\x21\x54\x56\x20\x28\x07\x5a\x55\x68\x1c\x0a\x68\x8c\x40\x02\xbf\x42\x38\xdf\xf0\x6a\x85\x70\xc6\x4e\x77\x28\x48\xdb\x18\x91\x2a\x13\xf1\x9b\xeb\x8b\xc5\xd7\xbb\x05\x48\xa5\x11\x86\xff\xc8\x5a\x0f\x42\x11\x56\xde\xd2\x33\x58\x09\x7e\xd4\xcc\x13\x22\x4b\xa7\x65\xd7\xa5\x69\xdb\x82\x40\xa9\x0c\x42\x56\xa3\xe7\x25\x17\x42\x79\x65\x0d\xd7\xe5\x92\xf8\x66\xf5\x4b\xcf\xd0\x34\x75\x06\x03\x99\xb8\x59\x22\x4c\x24\xfc\x37\x87\x09\x5b\x98\xa6\xbe\x54\xa8\x85\x0b\x78\xd2\xb6\x30\x09\xec\x00\x7a\x52\xf5\x37\x5e\xad\x79\xa4\xb3\xfb\xe7\x0d\xb2\x3b\x4f\xca\x2c\x61\xc2\x76\xc0\xac\xaf\x9a\x81\x92\x60\xac\x0f\xc4\xcf\xdc\x5d\xd9\xc0\x8e\x8a\x51\x92\xb0\x42\xb5\x45\x0a\xb2\xfb\xf7\x44\xb2\x4f\x8d\xd2\x02\x29\x1a\xe8\xa5\x92\xb2\x84\x2f\x9c\xdc\x8a\xeb\xab\xdb\x1b\x50\xf5\x46\x63\x8d\xc6\x3b\x18\x86\x61\x03\x8a\x04\xca\x78\x24\xc9\x2b\x64\x69\x92\xc8\xc6\x54\x90\x1f\x34\xeb\x3a\xd8\xcf\xd3\x75\xc5\x48\x37\x7f\x02\x65\xd9\x4f\x52\x1e\xa9\x80\x36\x4d\x92\x64\xf7\xbb\x9f\x30\x7f\x3a\x01\xe7\xa9\xb2\x66\xcb\x6e\x1b\xeb\xf1\xb5\xf2\xb0\x89\xbc\x28\x8a\x34\x49\xba\xb4\x77\xfe\xdd\xd4\x7f\xf5\xbe\xc7\x3f\xe6\x7e\x7a\x60\x7f\x2c\x9e\x6f\xb9\x7e\x51\x68\xbb\x02\x90\xc8\x52\x3f\x8a\xf3\x74\x02\x76\x1d\x96\xbd\xe5\x9a\xe5\x2e\x5a\x2d\xe2\x94\x12\x8e\xec\xba\xa7\x25\x84\xbe\x21\x03\xb2\xf6\x6c\x11\xaa\x65\x9e\xc5\x66\xc7\xf7\x50\x37\xce\xc3\x23\x02\x87\xbe\x38\x3b\x09\x52\x51\x22\x1c\x29\x99\xbe\xb6\x3a\x1f\xaf\x3a\x74\xdc\xb5\x43\x8a\x57\x0f\xa8\x64\x3f\xb8\x56\x82\x7b\x1b\x4a\xf2\xd7\x1a\xc5\xff\x91\x7c\x34\x07\xa3\xf4\x9f\x2d\x1e\xbb\xf0\x99\x85\xac\xf1\xe0\x49\x89\x71\xe7\x2c\x9e\xed\xc5\xe7\x50\x6f\x94\x8e\x57\x8a\x49\x45\xed\x86\x68\x6e\x39\x41\x1e\x68\x65\x39\x16\xe9\xa7\xdf\x9f\xef\x6d\xf2\xc2\xc5\x92\x87\x77\x12\x79\xb8\x85\x2c\x2b\x3e\x2e\x3e\x8a\x46\x94\x87\x87\x77\x53\x33\x87\xfc\x20\x15\xb9\x51\xf1\x2c\xc5\x30\x9b\x11\x61\xb4\xb6\x7d\xfb\xfa\x1d\x00\x00\xff\xff\x0a\xcf\x5e\x8f\xec\x04\x00\x00")
func templateEnumTmplBytes() ([]byte, error) {
return bindataRead(
_templateEnumTmpl,
"template/enum.tmpl",
)
}
func templateEnumTmpl() (*asset, error) {
bytes, err := templateEnumTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "template/enum.tmpl", size: 1260, mode: os.FileMode(420), modTime: time.Unix(1, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templateNodeTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x5a\x5f\x6f\x1b\xb7\xb2\x7f\x96\x3e\xc5\x5c\x5d\xa7\xd8\x35\x14\x6e\x9c\xfb\x54\xb5\xbe\x80\x4f\x9c\x00\x02\x7a\x92\xb4\x0e\x4e\x1f\x0c\xa3\xa5\x97\x5c\x89\x27\x2b\x72\x4d\x52\x92\x0d\x55\xdf\xfd\x60\x86\xe4\x6a\x57\x7f\x52\x27\xe8\x69\x5e\xa2\xe5\x0c\x67\x38\xbf\xf9\xcb\x5d\x6f\x36\xc5\xf9\xf0\x8d\x69\x9e\xac\x9a\xcd\x3d\xbc\x7e\x75\xf1\xfd\xcb\xc6\x4a\x27\xb5\x87\x77\xbc\x94\xf7\xc6\x7c\x86\xa9\x2e\x19\x5c\xd5\x35\x10\x93\x03\xa4\xdb\x95\x14\x6c\xf8\x69\xae\x1c\x38\xb3\xb4\xa5\x84\xd2\x08\x09\xca\x41\xad\x4a\xa9\x9d\x14\xb0\xd4\x42\x5a\xf0\x73\x09\x57\x0d\x2f\xe7\x12\x5e\xb3\x57\x89\x0a\x95\x59\x6a\x31\x54\x9a\xe8\x3f\x4d\xdf\xbc\x7d\x7f\xf3\x16\x2a\x55\x4b\x88\x6b\xd6\x18\x0f\x42\x59\x59\x7a\x63\x9f\xc0\x54\xe0\x3b\xca\xbc\x95\x92\x0d\xcf\x8b\xed\x76\x38\xdc\x6c\x40\xc8\x4a\x69\x09\x23\x6d\x84\x1c\xc1\x76\x8b\x6b\x67\xcd\xe7\x19\x4c\x2e\xe1\x9e\x3b\x09\x67\xec\x8d\xd1\x95\x9a\xb1\x8f\xbc\xfc\xcc\x67\x32\xf2\x78\xb9\x68\x6a\xee\x25\x8c\xe6\x92\x0b\x69\x47\x70\x06\x51\xa4\xaa\x40\xe3\xbe\x1b\x6f\x2c\x9f\x49\xf6\x9e\x2f\x24\x8c\xdc\x43\x4d\xf2\x07\x9b\x0d\x54\x5c\xd5\x41\x25\x58\xf9\xb0\x54\x56\x3a\xb8\xf9\xf9\x27\x70\x61\x47\x3a\x87\xd4\x22\xca\x2c\xce\xe1\xad\x76\x4b\x2b\x81\xd7\x35\x28\x01\xfe\xa9\x91\x0e\xe6\x7c\x25\xc9\x64\x87\x2a\x70\x0d\xc8\x30\xb4\x41\x89\x4f\xf8\x3c\xb9\x84\x33\x36\xbd\xa6\xdf\x81\xa2\x2a\x98\x79\xc8\x6a\xa9\xe1\x8c\xbd\x37\x42\xba\x1c\x5e\xa5\x93\xa5\x6d\x97\x90\x29\x2d\xe4\x63\x62\x81\x57\x39\x9b\x5e\xb3\x24\x06\x59\x2d\xd7\x33\x09\x67\x1a\x55\x64\x0e\xbd\xd3\x32\x5f\xe4\xc4\x34\xd8\x81\x11\xc4\x86\xfd\x67\x3a\x89\xda\xc9\x1b\xec\xc1\x22\x8c\x74\xa0\x8d\x07\xb7\x6c\x1a\x63\x3d\x2c\x96\xb5\x57\x0d\x3a\x39\x1a\x3f\x6a\x55\x44\x98\x3a\x3f\x3b\xd8\xa9\x05\x6d\xcf\x90\xfc\xb2\x77\xe6\x74\x58\x12\x33\x42\xdb\x0f\x3d\x5d\xe0\xb2\xee\x2c\x8c\x82\x9c\x9d\xca\x97\xb0\x56\x7e\x0e\x67\x4d\x64\x41\xc1\xd1\xd8\x8f\x9f\x67\x1f\xb9\x9f\x77\x14\x34\x27\xe4\xe4\xdd\x73\x8e\x66\xca\xcf\x97\xf7\xac\x34\x8b\xa2\x8a\x89\x54\x48\xed\x0b\xa1\x78\x2d\x4b\x5f\x60\x24\x3d\x8f\xad\x70\xe5\x5c\x2e\xf8\x09\x6e\xa5\xcb\xe5\x3d\xf7\xc6\xe2\xb6\x97\xa5\xd1\xde\xaa\x7b\xfc\x3d\x3b\x50\xf0\xfd\xf7\x42\x3a\x35\xd3\xae\x98\x3d\xd4\x33\xa9\x8b\x99\xe5\xcd\xfc\x80\x6d\xce\xdd\x5c\x95\xc6\x36\xc5\xcc\xbc\x24\x8f\x49\x6b\x8d\x25\x2e\x53\x73\x3d\x63\xc6\xce\x8a\xc7\xc2\x3d\xe9\xb2\x70\x72\xc1\x9b\xb9\xb1\x72\x84\xf6\x17\x05\xa0\x3b\x2c\xac\x2d\x6f\x1c\x05\xf5\x3d\x77\xaa\xa4\x55\x58\x48\x3f\x37\x82\x0d\x29\xc4\x03\x9f\xd2\x5e\x5a\x34\x05\x36\xc3\x01\x2e\x65\x68\x81\x7c\xf4\xe8\x45\xfc\x3f\x87\xec\x1c\xd7\xc7\x40\x87\xc8\x87\xdb\x56\x4b\xaa\x14\x64\x45\x47\x2a\x38\x6f\x97\xa5\x47\x89\xd3\x6b\x18\x00\x40\x27\x25\xb6\x5b\xf8\xfd\xdf\xce\xe8\xc9\x48\x89\xb1\x59\x28\xac\x00\xfe\x69\xf4\x7b\x51\x00\x85\xac\x12\x6c\x38\x20\x4e\x40\x39\x4a\xcf\x00\xd2\x0e\xd4\xd0\xdd\x03\x00\x69\x1b\x92\xd8\x70\xf0\x4e\xc9\x5a\x38\xb8\xbd\x3b\xa7\x5f\x69\x63\x45\xcb\xbd\xad\x69\x63\x20\xb1\xe1\xe0\xad\x98\x49\x07\xb8\x15\x7f\xb5\x3a\x25\x2e\xf7\x95\xa6\xad\x44\x62\x11\x90\xa0\xcf\x54\xc0\x89\x18\xe1\x08\xab\x3b\x3c\x82\x61\xd1\xae\x93\x56\x15\x45\x38\x56\xb2\x8a\x4a\xdf\xde\x2e\xcd\x17\xa7\x76\x21\x09\x32\xee\xd0\x3f\x41\x75\xce\x86\x83\x7f\xf1\x7a\x29\xf7\x84\xac\x70\x6d\x1f\x96\xc0\xa2\x2a\x25\x05\x10\x43\x32\x31\x20\x74\x2f\xfd\x5a\x4a\x0d\x7e\x6d\xc8\x52\x17\x4d\x25\xd4\xf6\x2c\xfd\x53\x07\x16\x05\xa1\xd8\x33\x74\x7f\xd3\x81\xa5\x69\x13\x12\x18\xc6\x18\xb9\xed\x44\x8c\x9d\xf0\x9d\x12\x0e\xb2\xf5\x5c\x5a\x19\xda\x1a\x09\x6c\x8c\xd2\x1e\xbc\xc9\xc9\x62\xea\x17\xb5\x31\x0d\x98\x95\xb4\xd4\x32\x42\xbf\xe0\x5a\x00\x17\x02\xd4\xa2\xa9\xe5\x02\x3b\x35\x66\x41\xcc\x88\x98\x4e\xac\x6d\x21\xa7\x8a\x25\x9e\xd7\xca\x52\x2a\x14\x8e\x34\xcd\x7e\x49\x8f\x48\xaf\x96\xba\x84\xac\xc7\xb5\xdd\xc2\x79\x28\xa5\x04\xd4\x76\x9b\x43\x48\x59\xff\x08\x87\x69\x4b\x76\xee\x72\x37\xe6\x2f\xfa\x66\x40\xa4\x4b\xf8\x0e\x89\xf8\x3c\x98\x5e\x4f\x60\x4f\x15\x9b\x5e\x8f\x91\x84\x88\x4e\x60\xd4\xd3\x3b\x22\x4a\x48\xb6\x09\x2c\xf8\x67\x99\xa5\x94\x1b\xa3\x1c\xea\x8a\x9a\xc5\x6c\xdc\x6e\x73\xe2\xa7\xf8\xd9\xb1\xe3\x63\x97\x3b\x84\x57\x64\x0e\x4d\x29\xb5\x85\x8e\x28\x14\xb4\xe2\x16\xee\x97\x15\xdc\xde\xdd\x3f\x79\x19\xda\x5e\xdb\x95\xd4\x18\xce\xaa\x08\x68\x6f\xd7\x40\x55\xb8\x2b\x80\x71\x09\x18\x20\xec\x9f\xdc\xba\x39\xaf\xf7\x61\x66\x9b\x0d\x34\xdc\x95\xbc\x86\xb3\xaa\x05\xfb\x07\xda\xf9\x3f\x97\xa0\x55\x4d\x30\x0e\x06\x03\x2b\xfd\xd2\x6a\x5c\x21\xb9\xb4\x18\xb4\x51\x15\x08\x07\xb8\xa5\xd8\x84\xed\xf6\x0e\x41\xa7\xb5\xb8\x3d\x80\x1b\xd0\xad\x52\x1f\x0f\xe8\x0e\x28\x1b\x76\xc4\x1e\xf4\x83\x90\xce\x93\x98\x2c\xd9\xfd\xb2\x0a\x18\x0f\xb6\x09\x8e\xd4\x5c\x0f\x1f\x12\xa6\x09\xf0\x23\x00\xca\x08\x60\x97\x25\x58\x44\x2b\x7d\x83\x70\xa9\x67\x0f\x9d\x38\x8c\x25\xfb\xc7\x0e\x36\x45\x86\x3e\x2d\x28\xc1\x83\xa8\xaa\xed\xfe\xef\x97\x0b\x69\x55\x99\x8e\x70\xf4\x0c\x6c\x7a\xed\x92\x57\x8f\x39\xd2\x62\x56\x8f\x7e\x5e\x4a\xfb\x34\x82\x2c\xf9\x35\xa8\xc7\x11\x2b\xc3\xf2\x48\xff\x6e\x24\xf6\xfc\xac\x73\xfc\xdd\xcc\x12\x7c\x39\xbd\x6e\x99\x3b\x31\x12\x0f\x7b\x13\x2a\xd7\x76\xeb\x30\x25\xf3\xd6\x1e\x59\x3b\xd9\x5a\xf0\xb7\x9f\xf3\xa6\xe4\x1a\xcf\x33\x86\xef\x4e\xa1\xd7\x39\x6a\x0a\x14\x4a\x97\xaf\x88\xf7\xd3\x21\x97\xb6\x50\x21\xd2\xaa\x1e\x0e\x0e\xc7\xf2\x2b\x21\xa8\x88\x52\x61\xe2\x8d\x02\x6f\xe8\xb9\xac\x15\xd6\x57\xaa\xa5\xa1\x20\x96\x70\xfe\x86\x16\x4f\xd7\xbe\x31\x4e\xb7\xbd\x76\xb0\x3f\xc4\xa0\x35\x3a\x84\xcc\xe4\x12\x4a\xaa\xca\x36\x60\xa4\x44\x3e\x3c\x62\xfa\x81\xdd\xdb\x61\xbb\xc6\xd2\x41\x68\x38\xc2\xe2\x24\xad\xc5\xb5\xa9\x5e\xf1\x5a\x89\xe9\x75\xa8\xb5\xfe\x1d\xde\xb9\xde\xe2\x09\x36\xe1\xa6\xb4\x9b\xa5\x3e\x34\x5e\x19\x8d\x4d\xc6\xac\x1d\x1a\x54\xa9\xd9\x92\xa2\x29\xf5\x16\x0b\xf2\x51\x96\x4b\x62\x5b\x3a\xa4\x20\x20\xf8\xc8\x6b\x30\xb4\xdd\x75\x86\xb0\x28\x10\x79\xb2\x73\xdd\x2e\xb8\x30\x24\xfe\xaa\xfc\x1c\xb9\x42\x97\x96\xde\xed\xd0\xa7\x25\x2b\x9d\xa9\x31\x34\x93\x0e\xc8\x14\x93\x8c\xb8\x3c\xbf\xaf\x25\x3a\xe8\x01\x23\x35\x67\x28\x6f\x5a\xc1\x9a\x87\xab\x46\x63\xcd\x4a\x09\x29\xc6\x1d\xe6\xb5\xaa\x6b\xb8\x97\x20\xa4\x55\x2b\x29\xa0\xb2\x66\x41\xe4\xa5\xc6\x04\x70\xbc\x7e\xa9\x04\xca\x49\x86\xf3\x80\x86\x03\x21\x5d\x69\xd5\xbd\x14\xa0\xf4\x04\xe6\xde\x37\x6e\x52\xd0\x74\x6d\x98\x32\x85\x30\xa5\x2b\x16\x6a\x66\xb9\x97\xc5\xff\x76\xa5\x39\x16\x02\xa6\x6b\x69\x56\x05\x3c\x0e\xe2\x65\x3f\x58\x42\x55\x4d\xd1\x92\x77\x01\xdd\xb4\x7e\x27\x51\x06\x7a\xe0\x52\xac\x18\xa6\x13\xb4\x97\x50\x61\xa8\x6c\x5b\xd0\xdf\xa9\x47\x29\x0e\x91\xa7\x27\xba\x62\x47\x2f\x78\x03\x1c\x2a\x64\x4e\x33\x58\x6b\x4d\x4f\x44\xe6\x63\x0b\x38\x71\xc8\xbe\xf9\xdf\x60\x7c\x37\xfa\x7d\xca\x5f\x0a\x74\x0a\xb5\x8e\xf1\x9d\xe1\xaf\xb5\xff\x5b\x34\xa2\xec\x83\x5c\xd7\x72\x1d\xed\x73\x99\x69\x3c\x8e\xf8\x3b\x7b\xf3\x9e\x13\xc2\x01\x90\x67\x72\x19\x4a\x5e\x24\x6c\x70\xaa\x32\x16\x7e\x1b\x63\xbe\x20\x35\x74\x3c\xe2\x25\xc7\x35\x3e\xa3\x9d\x39\xe5\x37\x5e\xba\xf1\xa9\xe3\xce\x5d\x41\xd8\xa7\x44\x53\x9f\x59\x8d\x8e\x60\xfc\x27\x9d\x2f\xba\xa0\x64\x94\x51\x3b\xcd\xa1\x6a\x95\x4c\x60\x66\xd9\x58\xbf\x0e\xdb\x4e\xb7\x7e\x55\x0b\xcf\xa8\x0a\x55\xd9\xa8\xe4\x1a\x93\x36\xe6\x3b\xb9\xd3\x42\xf6\x62\x95\xd3\x94\x60\x96\x1e\x14\x86\xe8\x53\x23\x47\x7d\xd9\xa9\xba\x6f\x7b\xa5\x10\x41\xe9\xdc\x10\x2d\x04\x82\x03\x1e\xe6\xe3\xfb\x27\x92\xa7\x04\xc3\x92\x91\x4a\x1b\xe1\x72\x58\x3f\x94\xa7\xc2\x81\xc2\x8e\xd5\x0e\x15\x53\x03\x78\x59\x1a\x2b\xa8\x56\x9a\x83\xaa\xd2\x2f\x29\x58\xac\x86\x45\x31\x18\x1c\xd4\xfc\x23\x8b\x63\x90\xda\xb3\x5e\x06\x35\xd2\xb3\x4f\xe8\x80\x1c\x77\x1c\x6f\x49\xf6\x79\x51\x30\x0e\x71\xc7\x18\xeb\x06\x72\xf6\x5b\x90\xb1\x3f\xb2\x0b\x59\xc5\x6a\x9c\x85\x70\x51\x15\x4c\x5d\xea\x29\x99\xb4\x29\x8a\xc2\x78\xb1\x7b\x6b\xc0\xae\x9a\x46\x06\x0e\xb2\x67\xf6\x50\xa3\xf3\x51\x49\xbb\x5b\x89\x3c\x8f\xae\xcc\xf2\xe1\x80\x22\xac\xd3\x1d\xf7\x73\x2f\xdf\x0b\xbe\xaf\x6f\x99\x25\x49\x88\x40\x47\x75\x28\xe5\x68\xe6\x9f\x86\x34\x34\x97\x94\x4b\x47\xd2\x6c\x07\x65\x84\xd1\xad\x95\x2f\xe7\x71\xe3\xe6\x4f\xde\x66\x95\xdc\x49\xd8\x7f\x75\x15\xdc\x3f\x41\xa8\xbb\x13\x44\xef\x7a\xc4\x68\x86\x4b\xe3\xda\xaf\x78\xcb\xcc\x0e\xe4\x4c\xaf\x09\x77\xd6\x9d\x7a\xe7\xdc\x7d\x6a\xdf\x88\x96\xa6\xc6\x29\x4f\x19\x3d\x6a\x87\xc7\x37\x61\x2d\x5c\x2b\x02\x7c\x7b\x37\xb3\x8e\xc0\xce\x2c\xf7\x41\xd7\x4f\xed\x4c\x7a\x6c\xb0\x3b\x32\xd7\xd1\xce\xb4\x1e\x2b\x7f\x57\xac\x90\x15\x5f\xd6\x7e\x32\xfc\xca\xea\x12\x12\x98\x3c\xf0\xe2\x61\x02\x2f\xd6\xa3\x36\x06\xf6\xe7\xa6\x3c\x36\xcf\xe3\x79\xe6\x4e\x25\x9a\xdb\x7f\x1d\x70\x22\xd7\x42\x0f\x39\x16\x20\xb5\xd4\x99\x12\xa1\x9f\x53\x18\x5c\x4c\xe2\xb5\xd9\x9e\x1a\x1b\xdd\xed\xab\xbb\xa0\x87\x31\x96\x0f\x8f\xc2\x7c\x88\x72\x67\x40\x8e\xa7\xd9\x90\x96\x6d\x04\x9c\x94\xbf\x9a\x1c\x72\x6d\xdb\x59\x3a\xb4\x5b\x4b\xed\x2e\xde\xad\xa3\x59\xc9\x8a\x7c\x38\x20\x0b\xbb\x2c\xb4\xd0\x63\x09\x8d\xa5\x65\x59\xf0\xe6\x36\xa4\xd6\xdd\x1e\x9a\x98\xed\xe2\xb5\x12\x8f\x3d\xde\x1e\xcb\xdd\xed\x9d\xd2\xbe\x27\xbe\x6d\xc9\x47\x0a\x4a\xe8\xcb\x8a\x72\xb8\x6d\xcb\xe8\x47\x44\xad\x5f\x8e\xfa\x7d\xb7\x53\x80\x8e\xe1\x1d\xac\xbe\x55\x78\x4d\x8d\x51\x8d\xe1\xa2\xf4\x52\x46\xec\x83\xd1\xb7\xf4\x1f\x72\xf1\x50\x2d\x7b\xcb\xad\x02\x32\xfa\x56\x89\x0e\xe3\x6e\x6d\x0c\x2a\x0f\xee\x40\x5b\xda\xa2\xe6\x76\x06\x45\x80\xd3\x0b\x18\xd7\x2d\xb1\x6d\x3c\x77\xea\xa1\x3b\x61\x55\x9c\x61\x8e\x61\xd5\x21\x3e\x76\xa9\xed\xc1\x37\xed\x25\x94\x80\x11\x8f\x1d\x68\x42\xc6\x53\x53\x0f\xb3\x43\xab\xed\x84\x67\x9e\xab\x2d\x58\x97\xb4\x91\xed\xb7\xea\x6e\x4f\xe5\x0e\xbb\x13\xda\x02\x12\xc9\x9f\x1d\x3c\x68\x4e\x0b\x1a\xee\xfa\x55\xad\xe3\xec\xa0\xa9\x17\x10\x27\x9b\xe1\x0e\x81\x83\x2e\x1b\x76\xe7\x87\xe1\x75\xb4\xe3\x06\xea\x33\xfa\xee\x00\x2b\xd9\xe4\x12\xe2\x07\x01\x1a\x3a\x3e\x72\x3f\x8f\x45\x8d\x02\x03\x35\x26\xfa\x7b\xb9\x46\x32\xb2\x4d\xb5\x90\x8f\x99\xa2\xd7\x3f\x28\x2d\xb1\x5c\x89\x70\xd5\x0c\x41\xb5\x3b\x7a\x7f\x5e\x43\xd8\x42\x25\x39\xd9\x7b\x4f\x94\xd9\xfd\xe6\x7b\x50\x74\x8f\x17\xd8\x67\xd4\x2a\x25\x16\xbc\xf9\x62\x6d\x39\x3f\xdc\xf4\xa5\xc0\x41\x79\xfb\x69\x1b\x97\xe2\x4b\x91\x0e\x34\x7f\xed\x88\xb0\x97\xe7\xdf\x32\x26\x4c\xc9\x46\x6c\x2a\x7f\xd3\xb0\x70\x55\xd7\xdf\x34\x2b\xc4\x62\x40\xf7\xd7\xd6\x09\x84\x40\xbf\x5a\x84\x31\xa0\xe3\x26\xf4\x05\xbd\x9a\x9a\x5e\xb7\x35\xe3\x3c\x70\x85\x72\xd1\x2b\x15\x7f\xc1\x0c\xe2\xbe\x7a\x08\x39\x91\x31\xe1\xd3\xec\xb1\x2b\x1b\xdd\x8e\x63\xc5\xdf\x5d\x8c\x07\x46\x97\x12\xc0\x3d\xe9\x92\x7d\xd0\x25\x1a\xe6\xe4\x02\x00\xce\xdb\xef\x78\xec\x57\xa9\x66\x73\x2f\xc5\x70\x30\x88\xf7\x1b\x6f\x16\xaa\x64\xf4\xc6\x37\x56\x49\x4a\x54\x0f\xe7\x41\x41\x48\xd4\xd4\x0f\x0f\x53\x55\xd8\x15\xc4\xef\x99\xec\xba\xbd\x27\x3e\xe7\x52\x1a\xe4\xb7\x01\xec\xd9\x4f\x86\x8b\x10\x4a\xc2\xae\xbe\x3c\xe5\x8c\x46\xdd\x21\x27\xb6\x08\xa5\x7d\xa6\x44\x91\x5d\xfc\xf8\xe3\xff\xbd\x86\x97\x70\x91\x47\x21\x48\xff\x11\x5e\xc1\x1f\x7f\xd0\xcf\xff\xbf\xa4\xe4\x4e\xf6\xed\xcb\xfd\x82\x83\x83\x53\xc9\xbf\x4a\xc0\x8b\x55\x74\x2e\x5d\xe7\x0e\x3d\xdb\x1d\xc0\x62\xd3\xc7\x36\xd5\x19\xae\x0e\xb0\x4e\x10\x3c\x07\x67\xaa\x81\x47\x70\x55\x15\xec\xc6\x2d\xcf\xc2\x2b\x1e\x12\x9c\xff\x90\x28\x47\x20\x8d\x77\xff\x56\x66\x1e\xcf\x19\x46\x19\x86\xc1\xc5\xae\x4d\x96\xae\x89\xe0\x19\x46\xd7\x25\xec\x82\xeb\xbd\x5c\xa7\xf8\xca\x2e\x72\xd8\x76\x5c\x48\x27\x71\x72\xc1\xae\x4a\xfa\x63\x8a\xe0\xe6\x8b\x23\x9f\x3d\x8e\x8f\xb1\xe1\x8a\x1a\x44\xfc\x22\x6b\xc9\x9d\xcc\x2e\xf2\xff\x96\xb1\xfb\x81\x59\x9f\x08\xcc\xee\xa0\x90\x74\xdf\x78\x63\x65\x8a\xad\x63\x31\xd0\x5e\x58\x5b\xf7\x47\xdf\xd7\x7f\x81\xef\xad\x59\x87\x57\x53\xee\xa1\x66\xbf\x98\x35\xbd\x97\x1a\xd0\xcb\xd4\x31\x70\x3b\x23\x22\xd2\xae\x83\xb8\x4c\xd8\x55\xfb\x3b\xf6\x80\xf8\x01\x80\x3e\xa8\xc6\x2a\xfe\xce\x9a\x45\x86\xdb\xa8\xfb\x64\xe1\x6f\x16\xe8\xe3\x40\x7c\x61\x41\x5c\x1f\xac\x90\xf6\x1f\x4f\xc4\x78\xe5\xca\x6c\xa4\xc4\x28\x92\x62\x37\xea\x05\x04\xaa\x0e\xeb\x04\x6d\xe7\x90\x63\x40\x3b\xbe\x32\x38\x70\x0b\x7b\x53\x1b\x27\x49\xcf\x8a\xdb\xe4\xff\x84\xd4\xa1\x27\xf0\xa4\x37\x25\xd7\x37\xb5\x2a\x65\x86\x12\xc6\xf0\x5d\xeb\xbb\xee\xa7\x84\xde\x47\x85\xf6\x4f\x92\xc2\x27\x84\x22\x7c\xe7\x2f\xb8\x10\x2a\xbc\x33\x1f\xb5\x7f\x7a\x82\x15\x3c\xfe\x71\x4f\xff\xad\x5b\x51\xc0\x8e\x3f\xfe\xa5\x00\x60\x13\x4b\x9f\x29\xda\x40\x8c\xa7\xed\x35\xa7\xdd\x71\xfe\x13\x00\x00\xff\xff\x40\x46\x56\x88\xe5\x25\x00\x00")
func templateNodeTmplBytes() ([]byte, error) {
return bindataRead(
_templateNodeTmpl,
"template/node.tmpl",
)
}
func templateNodeTmpl() (*asset, error) {
bytes, err := templateNodeTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "template/node.tmpl", size: 9701, mode: os.FileMode(420), modTime: time.Unix(1, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatePaginationTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x3a\x7f\x6f\xdc\xb8\x72\x7f\x4b\x9f\x62\x4e\x70\xfc\x24\x57\x91\x93\x43\x5b\xe0\xf6\x75\x1f\x60\xd8\xc9\xd5\x40\x5e\x92\x3b\xbb\xef\xfe\x08\x82\xb3\x2c\x51\xbb\x6c\xb4\xe4\x86\xe4\xda\xc9\xdb\xec\x77\x2f\x86\x43\x4a\x94\x56\x6b\x3b\x41\x8b\x1a\x48\x6c\x91\x33\xc3\xf9\xc5\x99\xe1\x90\xdb\xed\xe9\x49\x7c\x2e\xd7\x5f\x15\x5f\x2c\x0d\xfc\xfc\xe2\xe5\x2f\xcf\xd7\x8a\x69\x26\x0c\xbc\x2e\x2b\x76\x2b\xe5\x27\xb8\x14\x55\x01\x67\x6d\x0b\x16\x48\x03\xce\xab\x3b\x56\x17\xf1\xf5\x92\x6b\xd0\x72\xa3\x2a\x06\x95\xac\x19\x70\x0d\x2d\xaf\x98\xd0\xac\x86\x8d\xa8\x99\x02\xb3\x64\x70\xb6\x2e\xab\x25\x83\x9f\x8b\x17\x7e\x16\x1a\xb9\x11\x75\xcc\x85\x9d\x7f\x73\x79\xfe\xea\xed\xd5\x2b\x68\x78\xcb\xc0\x8d\x29\x29\x0d\xd4\x5c\xb1\xca\x48\xf5\x15\x64\x03\x26\x58\xcc\x28\xc6\x8a\xf8\xe4\x74\xb7\x8b\xe3\xed\x16\x6a\xd6\x70\xc1\x20\x59\x97\x0b\x2e\x4a\xc3\xa5\x48\x60\xb7\xc3\x19\xc3\x56\xeb\xb6\x34\x0c\x92\x25\x2b\x6b\xa6\x12\x38\x02\x42\x7a\x0e\xbc\x01\xc1\xe0\xa8\xb8\x32\x52\x95\x0b\x56\xbc\x2d\x57\x0c\x12\xfd\xb9\xb5\xc8\xd1\x76\x0b\x4d\xc9\xdb\x90\x2a\x28\xf6\x79\xc3\x15\xd3\x70\xf5\xdb\x1b\xd0\x84\xe7\x96\x7a\x0e\x4c\xd4\x03\xda\xd2\x40\xba\x2c\xf5\x75\xc7\x42\x25\xdb\x96\x55\x96\xbd\xec\xf1\x25\x1a\xce\xda\x1a\x02\x9c\xf1\x3a\x7c\xb5\x96\xca\x40\x8a\x74\x9e\x83\x2a\xc5\x82\xc1\x91\x80\xd9\x1c\x8e\x8a\xb7\xb2\x66\xda\xae\x11\x25\xdb\x2d\x1c\x15\xe7\x52\x34\x7c\x51\xbc\x2f\xab\x4f\xe5\x82\xc1\x6e\x77\x8a\xc3\x22\x18\x48\x88\x8e\xa3\x9e\x85\xf4\x93\x05\x37\xcb\xcd\x6d\x51\xc9\xd5\x69\xe3\xdc\xe2\x94\x09\x73\x5a\xf3\x12\xb9\x3b\x45\xa5\x0d\xc1\x7e\xf9\xa5\x66\x9a\x2f\x84\x3e\x5d\x7c\x6e\x17\x4c\x9c\x2e\x54\xb9\x5e\xee\x81\xdd\xb1\x4f\xa6\x5c\x22\xcc\xba\x54\x9a\xa9\xd3\xbb\x9f\xf1\x83\x29\x25\xd5\x18\x74\xc5\x97\x25\x6f\x99\xa8\xe4\xe9\x4a\x2f\xd6\x65\xf5\xe9\xf4\xee\xdf\x12\xe4\xf4\xf4\x14\xde\xa9\x9a\xa9\x0b\xeb\x2f\xa8\x45\xf2\x08\x6d\x5d\xa9\xf6\xa3\x1a\x9d\xeb\x7e\xc9\xab\x25\x18\x09\x12\x31\xa0\x84\x96\x6b\x83\xfe\xc5\x0d\x5b\xe9\x22\x36\x5f\xd7\x6c\x4c\x4d\x1b\xc5\xc5\x22\x8e\x2b\x29\xb4\x55\xc8\xde\x82\x67\xba\x02\xbd\x66\x15\x6f\x38\xd3\x50\x0a\x28\x75\xc5\x44\xcd\xc5\x82\xd6\x29\xe2\x68\x1f\x61\xb4\x0a\xcc\x21\x39\xbb\x3a\x4f\x26\xc8\x5f\xb0\x21\x7d\xa8\xd9\x23\xf4\x2d\xc6\x68\x81\x39\x24\x17\xaf\x70\x01\x52\xd9\x3f\xca\x96\xd7\xe8\x98\xa8\x24\xd2\x46\xa7\x2a\xb8\x2b\xdb\x0d\x2b\xe2\x66\x23\x2a\x48\xe5\x88\x52\xd6\xe1\xa6\x19\x58\x5b\xc1\x36\x8e\x78\x03\x12\x7e\x9a\x4f\x68\xe6\xf8\x78\x6a\xc6\xb2\xb8\x8d\xa3\x48\x31\xb3\x51\x02\x9a\x95\x29\x5e\x21\xb1\x26\x4d\x9e\x69\x8c\x25\xb8\x85\x4a\x64\x85\xd7\x23\xdc\x24\x07\x99\xc5\xd1\x2e\xf6\xc8\x82\xb7\xf1\xce\x8a\x75\x65\x8d\x05\x7c\xb5\x6e\xd9\x8a\x09\xa3\x2d\x61\x1a\x65\x0a\xb8\x30\x4c\xa1\x13\x1f\x16\x8e\x60\xd3\xcc\xd9\x1d\x79\x74\xab\xd0\x40\x2a\x33\xb7\xd6\xdf\x4b\xa5\x97\x65\xfb\xeb\x6f\x6f\xc2\xf5\x9c\xab\x17\x6e\xf6\x69\x8b\xf6\xa4\xd2\x7b\xe0\xb2\xf8\x43\x71\xc3\x54\x66\x15\xeb\xbe\x1c\x5f\xf7\x39\xf2\x51\x49\x71\x57\xfc\xb6\x91\x86\xa5\xb2\xf0\x1c\x67\x9e\xb1\xff\x12\xab\x07\x59\xeb\xe6\xa7\x99\x3b\x19\x73\x17\xd2\x4b\xef\xca\xb6\x47\xda\xee\x02\x17\xd0\x46\xe5\x20\x3f\x61\x0c\xba\x2b\xdb\x22\x25\x7d\x65\xd6\x37\x7e\x92\x9f\x0e\x59\x7b\xec\x7c\xcf\xae\x61\xb5\xd1\x06\x6e\x19\x94\x4e\xe7\x49\x8e\x14\xc9\xe4\x27\x12\xc6\xbe\x84\x2b\x65\x9d\x99\x64\xd1\xfb\x27\x2a\xe4\x90\xce\x15\xbb\x63\x4a\xa3\x13\x8f\x76\x8a\xf7\xe6\xf9\x63\x3e\xbb\xe7\xeb\xa1\x4f\xee\xa3\x3e\xc4\x8c\x55\xc2\xeb\x8d\xa8\x52\x8a\xfc\x4e\x77\x04\x87\xe3\x4f\xe7\x0a\xbf\x89\xca\x60\x8f\x9c\xf5\xa3\x9e\x8f\x6a\xa3\xb4\x54\xfa\x5a\xbe\x57\xac\xe6\x55\x69\x98\x4e\x7b\x3b\x0c\x57\xc9\xa1\x6c\x0c\x53\x39\xdc\xb2\x46\x2a\x06\x27\xe7\x16\x39\xa7\x4c\x95\x03\xaf\x5f\x0f\x18\xff\xf0\x11\x97\x48\x35\x9c\xe8\xcf\x6d\x71\xc5\x5a\x9b\xcb\xad\x47\xdf\x95\x0a\xd6\xdd\x8a\x87\x20\x07\x89\xad\x72\x8b\x1d\xc9\xb5\x46\xff\xaa\x79\x65\x20\xb1\x1c\x25\x90\xda\x20\x9e\xfc\x7a\x9d\x40\xf2\xe6\x3a\xc9\x20\x21\x1e\xbb\x99\x37\x38\xf3\xeb\xb5\xcb\xbb\xa8\x46\x4c\x7f\x44\x13\x76\x3b\x0c\x4e\x82\xb7\x56\x87\x7b\x93\xe8\x4c\x1b\x36\x00\x19\x0a\x00\x96\xfb\x0f\x1f\x49\xf0\x1c\x8a\xa2\x18\x6c\x0f\x2b\x55\xa7\x60\x8b\xcf\x9b\xc0\xdd\xf7\xec\x79\xe6\xcc\x19\x45\x51\xbf\xc8\x1c\x90\xcc\xb9\x5c\xad\xa5\xe6\x86\x6d\xb7\xc0\x45\xcd\xbe\x90\x42\x5e\x90\x5c\x51\xb4\x03\xd6\x6a\xf6\x9d\xd8\x2f\x3b\xec\x78\x80\xa5\x61\x0e\xe5\x7a\xcd\x44\x9d\xf6\x63\x39\x1c\x34\x2b\xfe\xe8\xe2\x8f\x25\x53\xac\x47\x48\x69\x3c\xd2\xc5\xb9\x6c\x37\x2b\xa1\xd3\xa1\xbf\x64\xb9\x03\x98\x50\x7a\x3e\xb2\xc4\xe5\x85\x03\xce\x32\xe2\xd7\xfe\x1a\xc8\x3c\x61\x19\x6f\x97\xff\x33\xa3\xfc\x90\x2d\xfe\x7f\x4c\x90\x3e\xac\xf5\x03\x0a\x8e\xed\xbf\xa0\x3c\xf4\x21\xa5\xe7\xc9\x25\x9e\xf7\xe5\x82\x5d\x8a\x46\x62\x45\x55\x42\x25\x85\x70\xfa\xc4\xba\xca\x55\x57\x1d\x8c\x36\x6a\x53\x19\x64\xfb\x3f\x4b\xfd\x96\x7d\x31\x38\x03\xf8\x73\x2b\x65\x8b\xbf\x6f\xfe\x5b\x4b\x31\x4b\x96\xfd\x74\x72\x63\xa1\xdf\x2b\x76\xc7\xe5\x46\x5b\x8c\x7d\xe8\x70\x1a\x31\xae\x4c\xa9\x0c\xc5\x2b\x4b\xdf\xc5\x2e\x8f\xa1\xfb\x69\x84\x7e\x25\xea\x00\x76\x0f\x9a\xf9\xe9\xe4\xc6\x49\xed\xe6\x51\x66\x01\xac\x5e\xb0\x50\x5c\x37\xd9\x0b\x7b\x79\x61\xdd\xba\xb8\xbc\xb8\xc6\xf9\xdd\x0e\x6e\x5c\x41\x3b\x4b\x38\xae\x4f\x01\x87\xfe\xa7\x9f\x1e\xe0\x2e\x97\x2b\x2c\x55\xd7\xe6\x6b\xb7\xfc\x0f\x97\x21\x95\x63\xee\xc1\xf2\xe3\x33\x16\x19\x18\x72\x3f\x7c\xbc\xfd\x6a\xd8\xf6\x2f\xc9\x5f\x76\x71\x74\x4f\x20\xa9\x9d\xcd\xe2\xa8\x66\x0d\x53\x30\x1e\xbd\xaf\x10\xf1\xb6\xd4\xec\xdf\xff\xb5\x78\xcb\xee\x5f\x09\x3c\x27\xaa\xd4\x8d\xfc\x5e\xde\x5f\x99\xda\x0e\xda\x1d\x7a\xdf\x13\xaa\x8a\xf3\x56\x62\x6e\x8e\xa3\x3f\x61\x0e\x4e\xfe\x90\xc6\x7d\x95\x15\xf4\x77\x5a\xfd\xaf\xd4\x3d\x95\x37\xf4\xb8\xde\x39\x54\xed\x74\xb5\xce\x93\x2b\x9d\x67\xd7\x7d\x5d\xdb\x17\x36\x94\xa5\x79\x83\xa4\x91\x5e\x20\xec\x05\x23\x61\xe3\x28\xea\xb5\x18\x0c\x46\xd3\x9a\xc4\x19\xa2\xaf\x11\xe1\x77\x7b\xf8\x4d\xb5\xdd\xf5\xf8\x5f\x56\x10\x8d\xb4\xca\xfe\x6a\x57\x0d\xb2\xdb\x04\xdb\x55\x29\x90\xe7\xda\xe2\xb8\xa2\x61\x06\xcf\xee\x93\x1c\x91\xa7\x0a\x71\x3a\x2a\x31\xa5\x2e\x85\xad\xdf\xdf\xf7\x27\xdc\x39\x24\x97\x6f\xff\x71\xf6\xe6\xf2\xe2\xcf\xf7\x67\xbf\x5e\xbe\x3d\xbb\xbe\x7c\xf7\x36\x71\x05\xc9\x9d\x2b\xdc\x5e\x73\xa5\xcd\x9b\x52\x9b\xb4\xc1\xbf\x72\x68\x4b\x6d\xe0\x84\x0b\x93\x41\x8a\x0c\x9f\xf8\x63\x22\x31\x69\x3d\x55\xdf\x73\x53\x2d\xf1\xaf\xaa\xd4\x0c\x2c\xa6\x97\xec\xf8\x98\x48\xd0\xe7\x2c\x8e\x22\xa4\x32\x87\xe3\x21\x1d\x1b\x42\xff\xce\xb4\x2e\x17\x6c\x06\xc9\xfb\x52\x6b\x3c\x02\xdc\x4a\xb3\x84\x1b\x4b\xf0\x06\x4a\x51\xc3\x0d\x12\xbb\xc1\x73\xa4\x3b\xbb\xb3\x61\xb8\x73\x56\xd6\x9b\x35\x1e\xa2\x59\x5d\x24\x79\x1f\x44\x5d\x49\x53\xaa\x05\x5a\x9b\x2a\x14\x4b\x3b\x81\x04\xe9\x52\x03\x82\x84\xc0\x48\x81\x80\x7d\x91\x72\x7c\x0c\x27\xc1\xe8\x7f\xc0\x0b\x94\xe6\x01\x71\x02\x79\x6e\x7a\xc4\x1b\x90\x62\xc8\xb3\xb3\xf2\x2d\x83\x96\x69\x3c\x3b\x97\x02\xfe\xc9\x94\x24\xde\x29\x33\x31\xa5\xd0\x07\x8a\x2b\x66\xd0\x0c\xf9\xa4\x89\xb3\x61\xae\xe8\x9d\x83\x29\xd5\x95\x9e\x0b\x66\xce\xa9\xb9\xc1\x28\x25\xa5\x95\xf9\x82\xec\x18\xf6\xc5\x14\xe7\xf4\x3b\x87\x75\x69\x96\x58\x51\xf9\xaa\xf2\xc4\x6f\xe5\x21\x32\x1a\xbd\xb1\xc1\xc6\xcf\xff\xca\x8c\x9d\x71\x94\x90\x3a\x6d\xcf\xa6\xc2\xe4\x3e\xf2\x76\x74\x5b\x64\x54\x8e\x69\xbc\x5b\x33\x65\x85\x1a\xd2\xa1\x1a\x7d\x36\x87\xa6\x2a\xec\x32\x71\x7c\x5f\xb6\x9f\x66\x71\xd4\x48\x05\x7f\xe6\x20\xca\x95\x0d\x9a\x64\x6b\x2b\x05\x2e\xe7\x66\x9b\x7e\x6a\x24\x8f\x25\xa6\x53\x59\xb9\xba\xda\xa5\x77\x2e\x85\xce\x91\xcb\xac\x2b\x52\x1b\xea\x58\xa1\x2c\xf8\x9b\x4c\x4d\x7c\xcd\xa1\xb1\x5f\xa8\x4d\x2e\x36\x0c\x90\xb7\x20\x93\x8f\xa5\x76\x9f\xc7\x16\xbb\xb3\xd0\xb2\xd4\x3f\x62\x21\x9b\x8d\xe9\xac\xf2\x90\x2d\x26\x8c\x60\xd4\x86\x85\xfc\x4c\xba\x08\x2d\x58\x14\x45\xe6\x36\x44\x1f\x6e\x86\xad\xb0\xce\x44\xb4\xc1\x30\x2d\xeb\x04\x12\x21\x6b\x96\xd8\x96\x9b\x2d\x42\x12\x48\x8c\x34\x65\x7b\x2e\x37\xc2\x6f\x3c\xdc\x24\x84\xbd\xdb\xbd\x76\x0a\x4d\xc2\xc1\xbd\x5e\xd9\x76\xdb\x35\xe0\x30\x42\x06\x3d\xb8\xe7\xd4\x88\x3c\xa2\xe3\x9d\x35\x6e\xc7\xd3\x73\xd7\xd1\xf3\x0c\x5b\x3c\x24\x50\x38\xc0\x9d\x0b\x18\xbc\x81\x23\xbb\x39\x29\x82\x22\x58\x53\x9c\x75\x03\xba\x78\x25\x0c\x66\x3c\xc7\x7d\xd7\x7f\x3c\x6a\x0a\x2c\x31\x6c\xdd\x5f\xaa\xf2\xb6\x65\xae\xcc\xf4\x9d\xc7\x74\xad\xb8\x30\x0d\x24\x8e\x3a\xab\x5d\xdf\xf1\x99\x2e\x9e\xe9\xee\x14\x5e\x75\xf8\x89\x63\xd0\xba\xde\x11\xb9\x60\xd6\xaf\xeb\xb7\x7c\x34\x16\xd9\x57\xb1\xc3\xd1\xa3\xa6\x13\xd1\x61\x0e\x1b\xa9\x70\xe4\xb7\x51\xb0\xaa\xd7\xa8\xad\xb3\x66\x73\xb0\x22\x38\xc8\xe4\x55\xbd\x60\x89\x05\x39\x3d\x85\x0e\x6a\xb7\xc3\x80\x6c\x96\x8c\x8a\x33\xc5\x5c\x93\x9b\xd4\x29\xe9\xc0\x67\x09\xec\x76\xae\x6a\x0b\x71\xfb\xd2\x0d\x6d\x4a\xd1\xd7\x41\xfb\x9a\xd0\x7a\xd5\x4d\x1c\xb9\x62\x2f\x28\x1f\x1d\x40\x15\x54\x8c\xb6\xe6\x96\x42\xec\x71\x7f\xde\x05\xe3\x81\x0c\x16\xb6\x97\x21\x0c\xd9\x52\x98\x92\x0b\x4c\x51\xd6\xbf\x31\x21\x4d\xcb\xe2\x69\xf4\xb2\xbc\xb2\x08\x1f\x3e\x9e\x84\xa2\xfa\x12\xd7\x6e\x96\x9b\x38\xea\x6a\xf5\xee\x8f\x5e\xa4\x6e\x0b\xdd\xc4\xd1\x75\xb7\x87\xb0\x58\xf2\x55\x2b\xc1\x05\xfb\xab\x13\x1f\x71\x55\x2f\x7f\xaa\x5b\x5e\x31\xc7\xf7\x0b\x78\x09\xdf\xa0\x95\xf7\x58\x80\x0e\x66\x5e\x66\x98\x92\x17\x78\xe4\xef\xf6\xd5\xda\xec\xa9\xd1\x65\x22\xf6\x6e\xbd\xa7\x4a\x04\xdf\xed\x80\x09\x74\x66\x0d\x41\xe3\xbd\xda\x68\x23\x57\xfc\x9f\xf6\x2b\x50\x9c\xc3\xb0\x27\xae\x93\x9e\xf5\x9d\xaf\x04\xe3\x7e\x7b\xef\x31\x62\x4f\x90\x03\x5e\xdf\x0d\xe1\x92\x3f\xb8\x59\x26\x1e\x7d\xc8\x27\x81\xee\x76\x68\xe4\x86\x2f\x36\x6a\xc8\xaf\x45\xe1\x62\xe1\xaa\xd7\x11\x52\x4a\x14\x4f\x7a\xde\x90\xe1\x40\x1e\xd7\x4c\xb2\x53\x41\x24\x46\x88\x9a\x35\xe5\xa6\xdd\x63\xf5\x82\x86\x93\x09\xe1\xa2\xc8\x11\x82\x3d\x7c\x57\x00\x48\x24\x74\x62\xa1\xba\xf0\x6e\x55\x4a\xda\x9c\x52\xac\xe5\xa7\x2f\x8a\x65\xd1\x1d\xc5\x83\xf6\xde\x5e\xf5\x1a\x96\x1a\x94\xeb\x50\xcc\xc2\x85\xf1\x00\xae\x1b\x9b\xe2\xd9\xe5\x74\x22\x60\xf9\x2a\xbc\x84\xc7\x72\x2f\x7f\x3a\x9f\xfe\xbc\x61\xea\x6b\xe8\x03\x18\xb2\x7e\xc3\xc1\x41\xdc\x92\x6b\xf3\x9a\xb7\x66\xca\x0d\x48\xb1\x34\x3b\x76\x5b\x87\x73\xd0\x1f\x1a\x3b\x3f\xf4\x86\x0e\x27\xa5\xd9\xc0\x8b\x89\x59\x54\xf6\xf0\x3b\x27\xe5\x67\x63\x67\xf9\x3e\x9b\xb9\xe5\xe6\x93\x76\x91\xca\x1e\x4a\xd2\x24\x5c\xd7\xa3\xf4\x45\xa8\xe0\x6d\x92\x0d\x4c\xe0\xa9\x3a\xd8\x29\x3b\x74\xdb\xd6\x33\x16\x04\xbc\x89\x2d\x81\x85\xdc\x77\x2a\xc6\x5b\x5b\xb0\xfb\xf7\xc3\x20\x96\x08\x76\x9f\x04\x31\xc8\xdb\xb0\xb3\x48\x87\x82\xfb\x73\x6d\x30\xf8\xf6\x4a\xf6\xeb\x79\xc6\xfd\x7a\xc8\x79\x17\x2d\x8f\x43\x88\xed\xae\xab\x36\x5d\x14\xa4\x42\xc2\x92\x1e\x6d\x9e\xb5\x21\xab\x1d\xde\x30\x82\xb7\x79\xbf\x6b\xe8\x3c\x3a\xf0\xfc\x1e\x65\xb8\x21\x0e\x6d\x79\xdf\x22\x42\xe0\xdc\x17\x6a\x74\xd2\x5e\x8f\x9d\xa7\x5c\xaf\xdb\xaf\xe4\xad\x29\x29\xfc\x49\xb6\x70\x51\x6c\xed\x3d\x63\xff\x14\xeb\xa7\x88\xea\xe0\xa4\x6a\x47\x3a\xc6\x90\xbc\xea\x2b\x8d\xdf\x59\xc5\xf8\x9d\x8b\xc9\x07\x98\x36\x92\xd2\x7c\x4a\xb8\xbb\xdd\xa0\x32\xc8\x7c\x11\xd0\x6f\x9e\x35\xe9\x8c\x82\x4b\xb1\x87\x9e\x3d\xa6\x20\x82\xd7\x53\x1a\x3a\xd0\x9c\xcf\x86\x50\xf6\x94\x44\x1e\xd3\xb7\x23\x3b\xbf\x99\xba\x0c\x40\x6b\x3b\xae\x0f\x5d\x04\xe4\x01\x0c\x49\xe6\x1a\xbc\x07\xe3\xaa\x03\x88\x23\x3a\xcc\x10\x77\x73\x32\xc8\xb8\x77\xb9\x6f\xb2\xc7\xb4\x64\xd7\x9a\xd6\x91\xbb\xf0\xb1\x47\x94\x09\xd5\xf4\xdd\x5f\xdc\xd3\x63\xb1\xad\xab\x79\x0a\xc8\x77\xd0\x2c\xee\x1b\xc7\x45\x77\xa9\x64\x19\x1f\xca\x46\xac\xf5\xb0\xfd\x9d\xcf\x84\x02\xb3\xcc\x39\x77\x30\x83\x1e\x7e\x50\xad\x13\xca\x3c\xbc\xe0\x23\xc6\xc9\xa6\xf5\x4e\x1d\x5e\xea\x77\xb0\x2f\xac\xda\x18\x77\xc9\x4e\xcb\x96\xa2\x06\x42\xd1\x50\x82\x62\x6d\xf9\xd5\x36\xff\x6a\xe7\x5b\x83\x86\xf0\xa8\x52\x25\x93\x0e\xb7\x52\xbf\xff\xfd\xaa\x69\x1c\x4d\x1e\x42\xad\x47\x86\x57\x52\xca\xb5\x8b\xf2\x38\x1a\xdf\x57\x75\xad\xa4\x9c\x42\x65\x51\x14\x7d\x18\xce\x63\x1f\x6c\x5c\xc5\x3c\x8a\x35\x2e\xa0\x3e\xd8\xa7\x3a\xd8\x53\xeb\x63\xec\xce\x05\xf5\xdc\x13\x9c\xca\x0f\x59\xb7\xe2\xc3\xa4\x62\x7f\x67\xd5\xe5\x0d\x98\xbb\xd8\x1d\x06\xd6\x2e\xce\x3c\x85\xbd\x38\xf2\x27\x94\xe3\x40\x19\x5b\x7b\x68\x98\x8d\x4e\x0d\xdb\x1d\xa5\x8b\x9f\x26\x3b\x06\x39\x1d\x4d\xe8\xda\x01\xbe\x7d\x8b\xa3\x68\xdc\x99\x3b\xa1\x81\xf9\x1c\x5e\x10\x40\xd0\xaa\xb3\xf3\xf6\xdb\x4e\xbb\xbc\x76\x60\xa5\xfe\xa8\x11\x2e\x17\x1d\x80\xf6\x07\x18\x07\x6b\xb3\x61\x85\xc8\x03\xab\xd8\xad\x61\x69\xba\xde\x4f\x34\x61\x95\xa9\x34\x4a\x1d\x33\xd4\x5c\x11\x1c\x8f\xe6\x60\x97\xe8\xa6\xfc\xc1\xaa\x08\xef\x40\xe6\x7b\xcd\x4b\x8b\x04\x7f\x83\x17\x93\x88\x83\xeb\x90\x39\x8c\xd4\x17\xe2\x06\xdd\x1f\xa4\x92\xfb\xda\xc9\x1a\x30\xa5\x6d\xe4\x30\xbf\x7d\x1b\x72\xf1\xed\x9b\xcf\x2e\xfd\x40\xb0\x52\x06\xc7\xc7\xf1\x41\x5d\xef\x59\x06\xb5\x76\x48\xd9\xad\x14\x2c\xcd\x86\x4a\x9f\xd0\xf9\x64\xe5\x72\x58\xe1\x28\x64\x17\x5f\x06\x1b\xc4\x27\xd6\x7e\x0f\x0d\x32\x5c\x76\x08\x8f\xc2\x6b\x8f\x15\x6a\x83\x6e\xb9\x5b\xbe\xe2\xf6\x48\x4c\xad\xc7\x50\x9d\x28\x02\x4d\xcf\xdd\x06\xf8\x97\x97\xb1\xbf\x34\xe4\xcd\xc0\x88\x03\x58\x9c\xb0\xa0\x96\x26\x0d\xff\xcd\x6d\x8d\x80\xcf\x4e\x9d\x6f\x10\x22\xb5\x70\x59\x67\xea\xae\x3b\x76\xa0\xc9\xd6\xef\xd9\x1c\xb0\x22\x22\xab\xfd\xd5\xe1\xfd\x34\x3c\x31\x8e\x57\xac\x82\x5e\x66\xfa\x58\x33\x35\x47\xe9\xfd\x5b\x85\x38\xc2\xc5\xf4\xbe\x4f\x9c\xb5\x6d\xdf\xc2\x0d\x1c\x01\x7d\x90\x89\xd4\x62\x65\x7d\x88\x18\xb8\x78\x18\x26\x87\xc0\xa4\xbc\x6d\xfc\xf4\xbd\x38\x05\x7a\x78\xf7\xc5\x11\xc9\x03\x73\xab\x44\xfd\x61\xd6\xaf\xff\xfc\xe5\x47\xe2\x0a\xfd\x04\x87\xce\x0c\x1d\x43\xec\x25\x47\x58\x4a\x12\xe3\x23\x77\xb0\xb1\x39\x90\xe6\x39\xbc\x74\xab\x9d\xa1\x97\x10\x25\xd8\xa3\x35\xdc\x39\x96\x27\xf1\x9c\x7f\x74\x55\x7f\x7f\x63\xfd\x23\x84\x3a\x32\x2e\x7b\x14\xd4\x62\x9a\xc3\xaa\xfc\xc4\xd2\x61\xce\xc8\x03\xde\x33\xaa\x4a\x79\x5f\x8a\x92\xd2\x3c\x1f\x38\x4e\xfc\xa4\x3c\xf3\x06\xb0\xb4\x3f\xf0\x8f\xe0\x32\x94\xcf\x45\xc8\xd5\x5b\x59\xb3\x99\x45\xb1\x37\x17\xe7\xee\xc2\x8a\x76\x6e\x57\x77\xe3\x7c\x96\x8f\x58\xee\xec\x1a\x5e\x19\xcf\xe1\x38\x58\xf3\xc5\xc7\x82\xc6\xc7\x28\xfd\xbd\xf1\x10\x01\x25\xed\x3f\xd1\xf0\x1d\x01\xde\xc0\x5e\xbc\xf2\x3e\xbc\x1f\xc8\x7a\x95\x11\xc7\x7b\x81\x7c\x17\x8f\x9a\xcd\xd3\x2d\x29\x3b\x95\x74\xfd\x67\xde\x0c\xdb\xb2\xe8\x71\xe8\x94\xa9\xeb\xea\x0e\xfa\xd3\x63\x40\x02\x39\x42\xf8\x7e\xad\x80\x81\xa3\xa6\xb8\xb2\x47\xf0\xd7\xae\x79\x8e\x28\xae\xad\x81\x48\xbb\x1d\x75\xa1\x74\x58\x0e\xc2\xed\x57\xfb\xe9\xee\x38\xb0\x3e\x74\x6f\x15\x1c\x8a\x33\x7a\xb0\x8e\x33\x3d\xdd\x80\xcc\x88\x18\x9e\xe4\xfa\x17\xae\x05\x51\x3c\x97\x42\x9b\x52\x50\xb1\x87\x18\xde\x21\x66\xe4\xeb\x8f\x9e\xe6\x82\x6c\x4f\x83\x6e\x2c\xba\xbc\x98\xf5\x01\xab\x7b\x4a\x41\xd7\xf9\xc1\x0c\xb1\x31\x54\x8a\x83\x75\x6f\x40\xba\xfb\xb6\xb0\xa7\x9e\xc5\xf6\x65\xe8\xf8\x7d\xe3\xc1\xe7\x8d\x11\x55\xd4\x54\x19\x0e\x14\x35\xf9\xc4\xd1\x5a\x5c\x1b\xe5\x5f\xbb\x46\xfe\x3a\xb5\xa1\x13\x01\x49\xfe\x04\x77\xe8\xef\x2d\x1f\xd5\xff\xcc\x3d\x58\x31\xaa\xbb\x61\x99\xb8\xd6\x28\xde\x85\xcc\x27\x9e\x8d\xee\xae\x21\x28\x69\xb4\x71\x91\xfe\xc7\x5e\x45\x3c\xa4\xb2\x07\x5e\x48\x3c\xfa\x42\xb3\x09\x5f\x68\x7a\xfe\x7e\xf0\xad\x42\xc7\xe3\xc9\x04\x93\x4f\x7a\xb7\x30\x78\xa6\x19\x3c\x5d\x08\xdf\x2e\x4c\xbd\x02\xd8\x5b\xef\xd0\x43\x4d\xdf\xb5\x73\x0e\x84\xe6\xfd\x3e\xe7\x79\xaa\x2b\x38\xff\x39\x69\xb0\x2c\xda\x6e\x9f\x1a\x7d\x06\xde\xe3\x0e\xc1\xb3\x03\x42\xef\xbf\x44\xde\x53\x43\x62\xcd\x9d\x4d\x5e\xad\x86\xf7\x59\xbe\x97\x3b\x50\x61\xf8\x52\xdd\xb7\xf4\x5d\x91\xe5\xef\xa5\xba\x9b\xaf\xc1\x85\xce\x90\x4c\xdf\xe5\x0c\x5f\x8f\xc6\x5d\x6c\x0b\x1a\x9c\xa3\x88\x16\x8f\x38\x3b\xc8\xd4\x13\xd8\x19\x72\x72\x71\xe0\x21\xa9\xbf\x17\xea\xda\x12\xc9\x4d\x1c\x91\x24\xfb\x3e\xed\x81\xad\x58\xdd\x8b\xa7\x89\xee\x85\xbf\x1d\x73\xc3\x8f\xf0\x8d\xc1\x6e\x8a\x48\x98\x57\x28\xa5\x74\x6c\xcf\xf6\x1f\x04\xe6\x8e\xef\xd9\x81\x6c\xf4\x48\x2e\xb2\x63\x97\x17\xe3\x8c\xf4\xfd\xf9\x68\x98\x8d\x46\x69\xc8\x86\xc8\x3c\xc6\x7f\xa4\xbc\x6b\x89\x75\x08\x56\x0d\x77\x4c\x99\x61\xde\xe5\xc2\xf5\x65\x5c\x39\x35\xdd\x97\xe9\x98\x20\x52\xd3\x97\x4d\x83\x7b\xc5\x03\xd7\x4d\x4f\xec\x21\x8f\x0b\x3c\xaa\xef\xa0\xef\x79\xc4\x7d\x91\xf7\x70\x8b\x35\xef\xee\x6b\x82\x2b\x67\xff\xd7\xff\x04\x00\x00\xff\xff\x77\x62\x5f\x50\x0f\x35\x00\x00")
func templatePaginationTmplBytes() ([]byte, error) {
return bindataRead(
_templatePaginationTmpl,
"template/pagination.tmpl",
)
}
func templatePaginationTmpl() (*asset, error) {
bytes, err := templatePaginationTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "template/pagination.tmpl", size: 13583, mode: os.FileMode(420), modTime: time.Unix(1, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatePagination_testTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x54\x7f\x6f\xdb\x36\x10\xfd\x5b\xfa\x14\x07\x22\x58\xe5\xc0\x61\x12\xb7\x01\xd6\x00\x06\xd6\xc6\x5e\xe1\xa1\x35\xd6\x35\x2d\xb0\x05\x45\x41\x51\x27\x89\x88\x4c\xaa\x47\x2a\xad\x21\xe8\xbb\x0f\x47\x39\x5b\xd6\x78\x1e\x86\xfe\x61\x43\xba\x77\x3f\xde\xbd\x47\xaa\xef\x4f\x8f\xd3\x2b\xd7\x6e\xc9\x54\x75\x80\xd9\xd9\xf9\xf3\x93\x96\xd0\xa3\x0d\xf0\xb3\xd2\x98\x3b\x77\x0b\x2b\xab\x25\xbc\x68\x1a\x88\x49\x1e\x18\xa7\x3b\x2c\x64\x7a\x5d\x1b\x0f\xde\x75\xa4\x11\xb4\x2b\x10\x8c\x87\xc6\x68\xb4\x1e\x0b\xe8\x6c\x81\x04\xa1\x46\x78\xd1\x2a\x5d\x23\xcc\xe4\xd9\x3d\x0a\xa5\xeb\x6c\x91\x1a\x1b\xf1\xd7\xab\xab\xe5\xfa\xdd\x12\x4a\xd3\x20\xec\x62\xe4\x5c\x80\xc2\x10\xea\xe0\x68\x0b\xae\x84\xf0\x60\x58\x20\x44\x99\x1e\x9f\x0e\x43\x9a\xf6\x3d\x14\x58\x1a\x8b\x20\x5a\x55\x19\xab\x82\x71\xf6\x53\x40\x1f\x04\xec\xf0\xa3\xf6\xb6\x82\xcb\x39\xe4\xca\x23\x1c\xc9\x2b\x67\x4b\x53\xc9\x5f\x95\xbe\x55\x15\x72\x52\xdf\x9f\xc0\x17\x13\x6a\xc0\xaf\x01\x6d\x01\x47\x20\x76\xa8\x80\xac\x25\x63\xc3\xd8\x43\x8c\x7d\x27\x5c\x93\xf4\x3d\x04\xdc\xb4\x8d\x0a\x08\xa2\x46\x55\x20\x09\x90\xf7\xed\xb8\x0d\x8f\x37\x9b\xd6\x51\x80\x2c\x4d\x44\xbe\x0d\xe8\x45\x9a\x08\x1f\x48\x3b\x7b\xc7\x8f\xdc\xcf\xd8\x4a\xa4\x69\x22\x98\xe9\x63\x72\x9c\x55\x99\x50\x77\xb9\xd4\x6e\x73\xea\x03\x61\xd0\x35\x9d\xc6\xca\x72\x7b\xaa\xbc\x47\x0a\x22\x9d\xa4\x69\xd9\x59\x0d\xd7\xe8\xc3\x55\x47\xde\xd1\xd2\x6a\x57\x18\x5b\x65\x01\x8e\x77\x73\xe4\xf5\x04\xfa\x34\x09\xf2\xb7\xce\x66\x22\x26\xe0\x02\xf9\x5f\x4c\x81\xcb\x1f\xe7\x26\xda\x59\x1f\x17\x48\x12\x53\xc0\x1c\xfa\x1e\x4c\x09\x47\x72\xb5\xb8\xde\xb6\x28\xd7\xdd\x06\xc9\x68\x18\x86\x67\xb3\xbe\x07\x6c\x7c\x64\xfd\x6c\xc6\xfb\xec\x54\x48\x92\xe4\x4e\x35\x1d\xc2\x1c\x44\xe9\x5c\xae\x48\x70\xec\x73\xe7\x02\xc7\x3a\x63\xc3\x8f\xd9\x13\xf1\x64\x92\x26\x09\xff\xee\x14\x41\xde\x95\x10\x15\x93\x2f\xbb\xb2\x44\x62\x26\x6c\xe2\xbd\x9f\xc3\x20\xc7\x35\xfb\xd5\xe2\x12\x4c\x31\x85\x0f\x3c\xe2\x12\xc6\x49\x3c\x54\xcb\x37\x8a\x7c\xad\x9a\x57\x6f\x5f\x67\x3f\xe4\x5d\xc9\xbd\x7d\x3c\x09\x5d\x29\xdf\x05\x62\x71\x38\x36\x6a\x28\x97\x9f\x3b\xd5\x64\x61\x0a\x91\xd8\x14\xfc\xcd\xd9\x47\x86\x2d\x97\x34\x68\x33\x3f\x81\x13\x38\x3f\x54\x60\x63\x81\x86\x7d\x3c\x99\x12\x12\x71\x33\x2d\xdf\xdb\xcd\xdf\xdc\xfc\xcd\xf9\xe5\x58\xb9\x6b\xbc\x76\x4b\x22\x47\xdc\x1a\x89\xf6\x51\xe4\x85\xb5\x5c\x2d\xf6\x61\x51\x01\x86\xa3\x22\x93\x34\x19\x26\xdf\x78\xbe\x76\x11\xfa\x2f\xd3\x0f\xfb\x7d\x71\xf1\xc0\xef\x8b\x8b\x7f\xf8\xfd\xff\x1c\x84\xd1\xc2\x03\xa6\x45\x1d\xb8\x78\x77\x77\xe4\x7b\x1b\x35\xcf\x1e\x3a\x79\x50\xc0\xc3\x9e\x3c\xb6\xe4\x7b\xdd\x58\x9b\x88\xec\x35\x61\xbc\x72\x2f\x55\xb1\xb2\x6d\x17\xfe\xdd\x05\xc3\x70\x3c\xb0\x37\x1f\x8d\x0d\x48\xa5\xd2\xd8\x0f\x0c\x25\x67\x5f\x73\x55\xe4\x88\xe5\x94\xdf\x84\x8e\xfb\xfc\x94\xab\xe2\x7c\xf6\x54\x8c\xb1\xdf\x9f\x7e\xd8\xea\xd9\xf3\xed\xdb\x57\xbf\xd4\x7f\x2c\x96\xdb\x37\x5f\xe6\xf3\x08\xf1\xd6\xa5\x23\xf8\x34\x85\x38\x82\x27\x90\xb2\x15\xc2\x6e\x62\x3f\x5e\x59\x02\xfd\x58\x33\x86\xf6\x1f\xe3\x58\xcc\x12\xfc\xa5\xcf\x37\xb2\x0d\x51\x05\xfe\x40\xde\x9f\x94\x3f\x03\x00\x00\xff\xff\x71\x15\x8f\x1b\x82\x06\x00\x00")
func templatePagination_testTmplBytes() ([]byte, error) {
return bindataRead(
_templatePagination_testTmpl,
"template/pagination_test.tmpl",
)
}
func templatePagination_testTmpl() (*asset, error) {
bytes, err := templatePagination_testTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "template/pagination_test.tmpl", size: 1666, mode: os.FileMode(420), modTime: time.Unix(1, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templateTransactionTmpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x53\xc1\x6e\xdb\x3a\x10\x3c\x8b\x5f\x31\x4f\xc8\xc1\x0a\x1c\x3a\x2f\xb7\x06\xf0\x21\x30\x12\x20\x40\xe0\xa2\x88\x7f\x80\x21\x57\x31\x51\x99\x54\x96\xeb\xc4\x81\xe0\x7f\x2f\x44\x49\xad\xeb\x02\xbd\xf4\x22\x81\x3b\x3b\x33\xdc\x21\xd9\x75\x8b\x4b\xb5\x8a\xed\x27\xfb\xd7\xad\xe0\xe6\xfa\xff\x2f\x57\x2d\x53\xa2\x20\x78\x30\x96\x5e\x62\xfc\x8e\xc7\x60\x35\xee\x9a\x06\xb9\x29\xa1\xc7\xf9\x9d\x9c\x56\x9b\xad\x4f\x48\x71\xcf\x96\x60\xa3\x23\xf8\x84\xc6\x5b\x0a\x89\x1c\xf6\xc1\x11\x43\xb6\x84\xbb\xd6\xd8\x2d\xe1\x46\x5f\x4f\x28\xea\xb8\x0f\x4e\xf9\x90\xf1\xa7\xc7\xd5\xfd\xfa\xf9\x1e\xb5\x6f\x08\x63\x8d\x63\x14\x38\xcf\x64\x25\xf2\x27\x62\x0d\x39\x31\x13\x26\xd2\xea\x72\x71\x3c\x2a\xd5\x75\x70\x54\xfb\x40\x28\x85\x4d\x48\xc6\x8a\x8f\xa1\xc4\xf1\xd8\x43\x42\xbb\xb6\x31\x42\x28\xb7\x64\x1c\x71\x89\x0b\x0c\xac\x2b\xf8\x1a\x81\x70\xa1\x9f\x25\xb2\x79\x25\xbd\x36\x3b\x42\x99\xde\x9a\x4c\x06\x80\xae\x43\x6d\x7c\xf3\x9b\x32\x98\xde\xf6\x9e\x29\xe1\xf9\xdb\x13\xd2\xc0\x1d\xed\xae\x40\xc1\x65\x7d\xbf\x6b\x23\x0b\x66\xaa\x28\x6d\x0c\x42\x07\x29\x55\x51\x3a\x23\xe6\xc5\x24\x5a\xa4\xb7\x66\xe1\xd8\xbf\x13\xf7\x65\x62\x8e\x9c\x4a\x55\x29\xb5\x58\xe0\x6b\x4b\x61\x73\x40\x6c\x29\x24\x18\x9c\x5a\x9b\xe0\xc0\x24\x7b\x3e\x47\x4c\xd3\x33\x47\x27\x98\x26\x86\x57\x7c\x78\xd9\xe6\x2c\x2d\x93\x11\x72\xa7\xfd\x5a\xd5\xfb\x60\x31\xb3\xb8\x5c\x35\x9e\x82\x54\xa3\xed\xcc\xca\x61\xd2\xd1\xab\xe1\x5f\x61\x76\x56\x99\x63\xd8\xbc\xde\x1c\xe6\xc8\xbb\xaf\xd0\xa9\x42\x86\x15\x6e\x97\xb0\x7a\xd0\xaa\x54\xe1\xeb\x5c\xfc\x6f\x89\xe0\x9b\xbe\xad\x18\x46\xe8\x97\xf3\xe1\x43\xcc\xaa\x38\xaa\xa2\x37\x5f\x62\x4d\x1f\x9b\xc3\xe8\xd4\x6b\xcc\x91\x75\x7e\x82\x67\x90\x1e\x26\x98\x55\x95\x9a\x94\x47\x24\xab\xab\xe3\x49\xaa\x0f\x1c\x77\x23\x3d\x07\x7c\x9a\x49\x42\xcd\x71\x07\x9b\xd5\xf2\xc1\x92\xeb\xaf\xe3\x34\xfb\x10\xd9\x1f\x3a\xff\x94\xd8\x68\x76\xbb\xc4\x99\xe2\x90\xdb\x08\x2f\xff\x1e\x5d\xe4\xa4\xd7\xf4\x31\x2b\x43\x9c\x18\x46\xa4\x7f\x73\x0e\x12\xa7\xbd\x95\x55\x8e\x78\x4a\x28\xf7\xe9\x5f\x87\x5e\xa9\xe1\x2d\x8d\x17\xf8\x47\x00\x00\x00\xff\xff\xfd\x7b\xf0\xa3\x1a\x04\x00\x00")
func templateTransactionTmplBytes() ([]byte, error) {
return bindataRead(
_templateTransactionTmpl,
"template/transaction.tmpl",
)
}
func templateTransactionTmpl() (*asset, error) {
bytes, err := templateTransactionTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "template/transaction.tmpl", size: 1050, mode: os.FileMode(420), modTime: time.Unix(1, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"template/collection.tmpl": templateCollectionTmpl,
"template/edge.tmpl": templateEdgeTmpl,
"template/enum.tmpl": templateEnumTmpl,
"template/node.tmpl": templateNodeTmpl,
"template/pagination.tmpl": templatePaginationTmpl,
"template/pagination_test.tmpl": templatePagination_testTmpl,
"template/transaction.tmpl": templateTransactionTmpl,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"template": &bintree{nil, map[string]*bintree{
"collection.tmpl": &bintree{templateCollectionTmpl, map[string]*bintree{}},
"edge.tmpl": &bintree{templateEdgeTmpl, map[string]*bintree{}},
"enum.tmpl": &bintree{templateEnumTmpl, map[string]*bintree{}},
"node.tmpl": &bintree{templateNodeTmpl, map[string]*bintree{}},
"pagination.tmpl": &bintree{templatePaginationTmpl, map[string]*bintree{}},
"pagination_test.tmpl": &bintree{templatePagination_testTmpl, map[string]*bintree{}},
"transaction.tmpl": &bintree{templateTransactionTmpl, map[string]*bintree{}},
}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}
| {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("read %q: %v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("read %q: %v", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
} |
QTreeView.py | # encoding: utf-8
# module PySide.QtGui
# from C:\Python27\lib\site-packages\PySide\QtGui.pyd
# by generator 1.147
# no doc
# imports
import PySide.QtCore as __PySide_QtCore
import Shiboken as __Shiboken
from QAbstractItemView import QAbstractItemView
class QTreeView(QAbstractItemView):
# no doc
def allColumnsShowFocus(self, *args, **kwargs): # real signature unknown
pass
def autoExpandDelay(self, *args, **kwargs): # real signature unknown
pass
def collapse(self, *args, **kwargs): # real signature unknown
pass
def collapseAll(self, *args, **kwargs): # real signature unknown
pass
def collapsed(self, *args, **kwargs): # real signature unknown
""" Signal """
pass
def columnAt(self, *args, **kwargs): # real signature unknown
pass
def columnCountChanged(self, *args, **kwargs): # real signature unknown
pass
def columnMoved(self, *args, **kwargs): # real signature unknown
pass
def columnResized(self, *args, **kwargs): # real signature unknown
pass
def columnViewportPosition(self, *args, **kwargs): # real signature unknown
pass
def columnWidth(self, *args, **kwargs): # real signature unknown
pass
def currentChanged(self, *args, **kwargs): # real signature unknown
pass
def dataChanged(self, *args, **kwargs): # real signature unknown
pass
def doItemsLayout(self, *args, **kwargs): # real signature unknown
pass
def dragMoveEvent(self, *args, **kwargs): # real signature unknown
pass
def drawBranches(self, *args, **kwargs): # real signature unknown
pass
def drawRow(self, *args, **kwargs): # real signature unknown
pass
def drawTree(self, *args, **kwargs): # real signature unknown
pass
def expand(self, *args, **kwargs): # real signature unknown
pass
def expandAll(self, *args, **kwargs): # real signature unknown
pass
def expanded(self, *args, **kwargs): # real signature unknown
""" Signal """
pass
def expandsOnDoubleClick(self, *args, **kwargs): # real signature unknown
pass
def expandToDepth(self, *args, **kwargs): # real signature unknown
pass
def header(self, *args, **kwargs): # real signature unknown
pass
def hideColumn(self, *args, **kwargs): # real signature unknown
pass
def horizontalOffset(self, *args, **kwargs): # real signature unknown
pass
def horizontalScrollbarAction(self, *args, **kwargs): # real signature unknown
pass
def indentation(self, *args, **kwargs): # real signature unknown
pass
def indexAbove(self, *args, **kwargs): # real signature unknown
pass
def indexAt(self, *args, **kwargs): # real signature unknown
pass
def indexBelow(self, *args, **kwargs): # real signature unknown
pass
def indexRowSizeHint(self, *args, **kwargs): # real signature unknown
pass
def isAnimated(self, *args, **kwargs): # real signature unknown
pass
def isColumnHidden(self, *args, **kwargs): # real signature unknown
pass
def isExpanded(self, *args, **kwargs): # real signature unknown
pass
def isFirstColumnSpanned(self, *args, **kwargs): # real signature unknown
pass
def isHeaderHidden(self, *args, **kwargs): # real signature unknown
pass
def isIndexHidden(self, *args, **kwargs): # real signature unknown
pass
def isRowHidden(self, *args, **kwargs): # real signature unknown
pass
def isSortingEnabled(self, *args, **kwargs): # real signature unknown
pass
def itemsExpandable(self, *args, **kwargs): # real signature unknown
pass
def keyboardSearch(self, *args, **kwargs): # real signature unknown
pass
def keyPressEvent(self, *args, **kwargs): # real signature unknown
pass
def mouseDoubleClickEvent(self, *args, **kwargs): # real signature unknown
pass
def mouseMoveEvent(self, *args, **kwargs): # real signature unknown
pass
def mousePressEvent(self, *args, **kwargs): # real signature unknown
pass
def mouseReleaseEvent(self, *args, **kwargs): # real signature unknown
pass
def moveCursor(self, *args, **kwargs): # real signature unknown
pass
def | (self, *args, **kwargs): # real signature unknown
pass
def reexpand(self, *args, **kwargs): # real signature unknown
pass
def reset(self, *args, **kwargs): # real signature unknown
pass
def resizeColumnToContents(self, *args, **kwargs): # real signature unknown
pass
def rootIsDecorated(self, *args, **kwargs): # real signature unknown
pass
def rowHeight(self, *args, **kwargs): # real signature unknown
pass
def rowsAboutToBeRemoved(self, *args, **kwargs): # real signature unknown
pass
def rowsInserted(self, *args, **kwargs): # real signature unknown
pass
def rowsRemoved(self, *args, **kwargs): # real signature unknown
pass
def scrollContentsBy(self, *args, **kwargs): # real signature unknown
pass
def scrollTo(self, *args, **kwargs): # real signature unknown
pass
def selectAll(self, *args, **kwargs): # real signature unknown
pass
def selectedIndexes(self, *args, **kwargs): # real signature unknown
pass
def selectionChanged(self, *args, **kwargs): # real signature unknown
pass
def setAllColumnsShowFocus(self, *args, **kwargs): # real signature unknown
pass
def setAnimated(self, *args, **kwargs): # real signature unknown
pass
def setAutoExpandDelay(self, *args, **kwargs): # real signature unknown
pass
def setColumnHidden(self, *args, **kwargs): # real signature unknown
pass
def setColumnWidth(self, *args, **kwargs): # real signature unknown
pass
def setExpanded(self, *args, **kwargs): # real signature unknown
pass
def setExpandsOnDoubleClick(self, *args, **kwargs): # real signature unknown
pass
def setFirstColumnSpanned(self, *args, **kwargs): # real signature unknown
pass
def setHeader(self, *args, **kwargs): # real signature unknown
pass
def setHeaderHidden(self, *args, **kwargs): # real signature unknown
pass
def setIndentation(self, *args, **kwargs): # real signature unknown
pass
def setItemsExpandable(self, *args, **kwargs): # real signature unknown
pass
def setModel(self, *args, **kwargs): # real signature unknown
pass
def setRootIndex(self, *args, **kwargs): # real signature unknown
pass
def setRootIsDecorated(self, *args, **kwargs): # real signature unknown
pass
def setRowHidden(self, *args, **kwargs): # real signature unknown
pass
def setSelection(self, *args, **kwargs): # real signature unknown
pass
def setSelectionModel(self, *args, **kwargs): # real signature unknown
pass
def setSortingEnabled(self, *args, **kwargs): # real signature unknown
pass
def setUniformRowHeights(self, *args, **kwargs): # real signature unknown
pass
def setWordWrap(self, *args, **kwargs): # real signature unknown
pass
def showColumn(self, *args, **kwargs): # real signature unknown
pass
def sizeHintForColumn(self, *args, **kwargs): # real signature unknown
pass
def sortByColumn(self, *args, **kwargs): # real signature unknown
pass
def timerEvent(self, *args, **kwargs): # real signature unknown
pass
def uniformRowHeights(self, *args, **kwargs): # real signature unknown
pass
def updateGeometries(self, *args, **kwargs): # real signature unknown
pass
def verticalOffset(self, *args, **kwargs): # real signature unknown
pass
def viewportEvent(self, *args, **kwargs): # real signature unknown
pass
def visualRect(self, *args, **kwargs): # real signature unknown
pass
def visualRegionForSelection(self, *args, **kwargs): # real signature unknown
pass
def wordWrap(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
staticMetaObject = None # (!) real value is '<PySide.QtCore.QMetaObject object at 0x000000000406DB88>'
| paintEvent |
statement-store.js | import debounce from 'lodash.debounce';
import { action, computed, observable } from 'mobx';
import { isDesktop, toMoment } from '@deriv/shared';
import Shortcode from 'Modules/Reports/Helpers/shortcode';
import { WS } from 'Services/ws-methods';
import { formatStatementTransaction } from './Helpers/format-response';
import getDateBoundaries from '../Profit/Helpers/format-request';
import BaseStore from '../../base-store';
const batch_size = 100; // request response limit
const delay_on_scroll_time = 150; // fetch debounce delay on scroll
export default class StatementStore extends BaseStore {
@observable data = [];
@observable is_loading = false;
@observable has_loaded_all = false;
@observable date_from = null;
@observable date_to = toMoment()
.startOf('day')
.add(1, 'd')
.subtract(1, 's')
.unix();
@observable error = '';
@observable filtered_date_range;
// `client_loginid` is only used to detect if this is in sync with the client-store, don't rely on
// this for calculations. Use the client.currency instead.
@observable client_loginid = '';
@computed
get is_empty() {
return !this.is_loading && this.data.length === 0;
}
@computed
get has_selected_date() {
return !!(this.date_from || this.date_to);
}
@computed
get data_source() {
// TODO: remove this getter once Multiplier is supported in mobile
return isDesktop() ? this.data : this.data.filter(row => !Shortcode.isMultiplier({ shortcode: row.shortcode }));
}
@action.bound
clearTable() {
this.data = [];
this.has_loaded_all = false;
this.is_loading = false;
}
@action.bound
clearDateFilter() {
this.date_from = null;
this.date_to = toMoment()
.startOf('day')
.add(1, 'd')
.subtract(1, 's')
.unix();
this.partial_fetch_time = 0;
}
shouldFetchNextBatch(should_load_partially) {
if (!should_load_partially && (this.has_loaded_all || this.is_loading)) return false;
const today = toMoment()
.startOf('day')
.add(1, 'd')
.subtract(1, 's')
.unix();
if (this.date_to < today) return !should_load_partially && this.partial_fetch_time;
return true;
}
@action.bound
async fetchNextBatch(should_load_partially = false) {
if (!this.shouldFetchNextBatch(should_load_partially)) return;
this.is_loading = true;
const response = await WS.statement(
batch_size,
!should_load_partially ? this.data.length : undefined,
getDateBoundaries(this.date_from, this.date_to, this.partial_fetch_time, should_load_partially)
);
this.statementHandler(response, should_load_partially);
}
@action.bound
statementHandler(response, should_load_partially) {
if ('error' in response) {
this.error = response.error.message;
return;
}
const formatted_transactions = response.statement.transactions.map(transaction =>
formatStatementTransaction(
transaction,
this.root_store.client.currency,
this.root_store.modules.trade.active_symbols
)
);
if (should_load_partially) {
this.data = [...formatted_transactions, ...this.data];
} else {
this.data = [...this.data, ...formatted_transactions];
}
this.has_loaded_all = !should_load_partially && formatted_transactions.length < batch_size;
this.is_loading = false;
if (formatted_transactions.length > 0) {
this.partial_fetch_time = toMoment().unix();
}
}
@action.bound
handleDateChange(date_values, { date_range } = {}) {
this.filtered_date_range = date_range;
this.date_from = date_values?.from ?? (date_values.is_batch ? null : this.date_from);
this.date_to = date_values?.to ?? this.date_to;
this.clearTable();
this.fetchNextBatch();
}
fetchOnScroll = debounce(left => {
if (left < 2000) {
this.fetchNextBatch();
}
}, delay_on_scroll_time);
@action.bound
handleScroll(event) {
const { scrollTop, scrollHeight, clientHeight } = event.target;
const left_to_scroll = scrollHeight - (scrollTop + clientHeight);
this.fetchOnScroll(left_to_scroll);
}
@action.bound
accountSwitcherListener() {
return new Promise(resolve => {
this.clearTable();
this.clearDateFilter();
return resolve(this.fetchNextBatch());
});
}
@action.bound
networkStatusChangeListener(is_online) {
this.is_loading = !is_online;
}
@action.bound
async onMount() {
this.assertHasValidCache(
this.client_loginid,
this.clearDateFilter,
this.clearTable,
WS.forgetAll.bind(null, 'proposal')
);
this.client_loginid = this.root_store.client.loginid;
this.onSwitchAccount(this.accountSwitcherListener);
this.onNetworkStatusChange(this.networkStatusChangeListener);
await WS.wait('authorize');
this.fetchNextBatch(true); | @action.bound
onUnmount() {
this.clearTable();
this.clearDateFilter();
this.disposeSwitchAccount();
WS.forgetAll('proposal');
}
} | }
|
binder.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumebinding
import (
"context"
"fmt"
"sort"
"strings"
"time"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage"
utilfeature "k8s.io/apiserver/pkg/util/feature"
coreinformers "k8s.io/client-go/informers/core/v1"
storageinformers "k8s.io/client-go/informers/storage/v1"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
storagelisters "k8s.io/client-go/listers/storage/v1"
"k8s.io/component-helpers/storage/ephemeral"
"k8s.io/component-helpers/storage/volume"
csitrans "k8s.io/csi-translation-lib"
csiplugins "k8s.io/csi-translation-lib/plugins"
"k8s.io/klog/v2"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics"
)
// ConflictReason is used for the special strings which explain why
// volume binding is impossible for a node.
type ConflictReason string
// ConflictReasons contains all reasons that explain why volume binding is impossible for a node.
type ConflictReasons []ConflictReason
func (reasons ConflictReasons) Len() int { return len(reasons) }
func (reasons ConflictReasons) Less(i, j int) bool { return reasons[i] < reasons[j] }
func (reasons ConflictReasons) Swap(i, j int) { reasons[i], reasons[j] = reasons[j], reasons[i] }
const (
// ErrReasonBindConflict is used for VolumeBindingNoMatch predicate error.
ErrReasonBindConflict ConflictReason = "node(s) didn't find available persistent volumes to bind"
// ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error.
ErrReasonNodeConflict ConflictReason = "node(s) had volume node affinity conflict"
// ErrReasonNotEnoughSpace is used when a pod cannot start on a node because not enough storage space is available.
ErrReasonNotEnoughSpace = "node(s) did not have enough free storage"
// ErrReasonPVNotExist is used when a pod has one or more PVC(s) bound to non-existent persistent volume(s)"
ErrReasonPVNotExist = "node(s) unavailable due to one or more pvc(s) bound to non-existent pv(s)"
)
// BindingInfo holds a binding between PV and PVC.
type BindingInfo struct {
// PVC that needs to be bound
pvc *v1.PersistentVolumeClaim
// Proposed PV to bind to this PVC
pv *v1.PersistentVolume
}
// StorageClassName returns the name of the storage class.
func (b *BindingInfo) StorageClassName() string {
return b.pv.Spec.StorageClassName
}
// StorageResource represents storage resource.
type StorageResource struct {
Requested int64
Capacity int64
}
// StorageResource returns storage resource.
func (b *BindingInfo) StorageResource() *StorageResource {
// both fields are mandatory
requestedQty := b.pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
capacityQty := b.pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
return &StorageResource{
Requested: requestedQty.Value(),
Capacity: capacityQty.Value(),
}
}
// PodVolumes holds pod's volumes information used in volume scheduling.
type PodVolumes struct {
// StaticBindings are binding decisions for PVCs which can be bound to
// pre-provisioned static PVs.
StaticBindings []*BindingInfo
// DynamicProvisions are PVCs that require dynamic provisioning
DynamicProvisions []*v1.PersistentVolumeClaim
}
// InTreeToCSITranslator contains methods required to check migratable status
// and perform translations from InTree PV's to CSI
type InTreeToCSITranslator interface {
IsPVMigratable(pv *v1.PersistentVolume) bool
GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (string, error)
TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error)
}
// SchedulerVolumeBinder is used by the scheduler VolumeBinding plugin to
// handle PVC/PV binding and dynamic provisioning. The binding decisions are
// integrated into the pod scheduling workflow so that the PV NodeAffinity is
// also considered along with the pod's other scheduling requirements.
//
// This integrates into the existing scheduler workflow as follows:
// 1. The scheduler takes a Pod off the scheduler queue and processes it serially:
// a. Invokes all pre-filter plugins for the pod. GetPodVolumes() is invoked
// here, pod volume information will be saved in current scheduling cycle state for later use.
// b. Invokes all filter plugins, parallelized across nodes. FindPodVolumes() is invoked here.
// c. Invokes all score plugins. Future/TBD
// d. Selects the best node for the Pod.
// e. Invokes all reserve plugins. AssumePodVolumes() is invoked here.
// i. If PVC binding is required, cache in-memory only:
// * For manual binding: update PV objects for prebinding to the corresponding PVCs.
// * For dynamic provisioning: update PVC object with a selected node from c)
// * For the pod, which PVCs and PVs need API updates.
// ii. Afterwards, the main scheduler caches the Pod->Node binding in the scheduler's pod cache,
// This is handled in the scheduler and not here.
// f. Asynchronously bind volumes and pod in a separate goroutine
// i. BindPodVolumes() is called first in PreBind phase. It makes all the necessary API updates and waits for
// PV controller to fully bind and provision the PVCs. If binding fails, the Pod is sent
// back through the scheduler.
// ii. After BindPodVolumes() is complete, then the scheduler does the final Pod->Node binding.
// 2. Once all the assume operations are done in e), the scheduler processes the next Pod in the scheduler queue
// while the actual binding operation occurs in the background.
type SchedulerVolumeBinder interface {
// GetPodVolumes returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning)
// and unbound with immediate binding (including prebound)
GetPodVolumes(pod *v1.Pod) (boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error)
// FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the
// node and returns pod's volumes information.
//
// If a PVC is bound, it checks if the PV's NodeAffinity matches the Node.
// Otherwise, it tries to find an available PV to bind to the PVC.
//
// It returns an error when something went wrong or a list of reasons why the node is
// (currently) not usable for the pod.
//
// If the CSIStorageCapacity feature is enabled, then it also checks for sufficient storage
// for volumes that still need to be created.
//
// This function is called by the scheduler VolumeBinding plugin and can be called in parallel
FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error)
// AssumePodVolumes will:
// 1. Take the PV matches for unbound PVCs and update the PV cache assuming
// that the PV is prebound to the PVC.
// 2. Take the PVCs that need provisioning and update the PVC cache with related
// annotations set.
//
// It returns true if all volumes are fully bound
//
// This function is called serially.
AssumePodVolumes(assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (allFullyBound bool, err error)
// RevertAssumedPodVolumes will revert assumed PV and PVC cache.
RevertAssumedPodVolumes(podVolumes *PodVolumes)
// BindPodVolumes will:
// 1. Initiate the volume binding by making the API call to prebind the PV
// to its matching PVC.
// 2. Trigger the volume provisioning by making the API call to set related
// annotations on the PVC
// 3. Wait for PVCs to be completely bound by the PV controller
//
// This function can be called in parallel.
BindPodVolumes(assumedPod *v1.Pod, podVolumes *PodVolumes) error
}
type volumeBinder struct {
kubeClient clientset.Interface
classLister storagelisters.StorageClassLister
podLister corelisters.PodLister
nodeLister corelisters.NodeLister
csiNodeLister storagelisters.CSINodeLister
pvcCache PVCAssumeCache
pvCache PVAssumeCache
// Amount of time to wait for the bind operation to succeed
bindTimeout time.Duration
translator InTreeToCSITranslator
csiDriverLister storagelisters.CSIDriverLister
csiStorageCapacityLister storagelisters.CSIStorageCapacityLister
}
// CapacityCheck contains additional parameters for NewVolumeBinder that
// are only needed when checking volume sizes against available storage
// capacity is desired.
type CapacityCheck struct {
CSIDriverInformer storageinformers.CSIDriverInformer
CSIStorageCapacityInformer storageinformers.CSIStorageCapacityInformer
}
// NewVolumeBinder sets up all the caches needed for the scheduler to make volume binding decisions.
//
// capacityCheck determines how storage capacity is checked (CSIStorageCapacity feature).
func NewVolumeBinder(
kubeClient clientset.Interface,
podInformer coreinformers.PodInformer,
nodeInformer coreinformers.NodeInformer,
csiNodeInformer storageinformers.CSINodeInformer,
pvcInformer coreinformers.PersistentVolumeClaimInformer,
pvInformer coreinformers.PersistentVolumeInformer,
storageClassInformer storageinformers.StorageClassInformer,
capacityCheck CapacityCheck,
bindTimeout time.Duration) SchedulerVolumeBinder {
b := &volumeBinder{
kubeClient: kubeClient,
podLister: podInformer.Lister(),
classLister: storageClassInformer.Lister(),
nodeLister: nodeInformer.Lister(),
csiNodeLister: csiNodeInformer.Lister(),
pvcCache: NewPVCAssumeCache(pvcInformer.Informer()),
pvCache: NewPVAssumeCache(pvInformer.Informer()),
bindTimeout: bindTimeout,
translator: csitrans.New(),
}
b.csiDriverLister = capacityCheck.CSIDriverInformer.Lister()
b.csiStorageCapacityLister = capacityCheck.CSIStorageCapacityInformer.Lister()
return b
}
// FindPodVolumes finds the matching PVs for PVCs and nodes to provision PVs
// for the given pod and node. If the node does not fit, confilict reasons are
// returned.
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
podVolumes = &PodVolumes{}
// Warning: Below log needs high verbosity as it can be printed several times (#60933).
klog.V(5).InfoS("FindPodVolumes", "pod", klog.KObj(pod), "node", klog.KObj(node))
// Initialize to true for pods that don't have volumes. These
// booleans get translated into reason strings when the function
// returns without an error.
unboundVolumesSatisfied := true
boundVolumesSatisfied := true
sufficientStorage := true
boundPVsFound := true
defer func() { | }
if !boundVolumesSatisfied {
reasons = append(reasons, ErrReasonNodeConflict)
}
if !unboundVolumesSatisfied {
reasons = append(reasons, ErrReasonBindConflict)
}
if !sufficientStorage {
reasons = append(reasons, ErrReasonNotEnoughSpace)
}
if !boundPVsFound {
reasons = append(reasons, ErrReasonPVNotExist)
}
}()
defer func() {
if err != nil {
metrics.VolumeSchedulingStageFailed.WithLabelValues("predicate").Inc()
}
}()
var (
staticBindings []*BindingInfo
dynamicProvisions []*v1.PersistentVolumeClaim
)
defer func() {
// Although we do not distinguish nil from empty in this function, for
// easier testing, we normalize empty to nil.
if len(staticBindings) == 0 {
staticBindings = nil
}
if len(dynamicProvisions) == 0 {
dynamicProvisions = nil
}
podVolumes.StaticBindings = staticBindings
podVolumes.DynamicProvisions = dynamicProvisions
}()
// Check PV node affinity on bound volumes
if len(boundClaims) > 0 {
boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(boundClaims, node, pod)
if err != nil {
return
}
}
// Find matching volumes and node for unbound claims
if len(claimsToBind) > 0 {
var (
claimsToFindMatching []*v1.PersistentVolumeClaim
claimsToProvision []*v1.PersistentVolumeClaim
)
// Filter out claims to provision
for _, claim := range claimsToBind {
if selectedNode, ok := claim.Annotations[volume.AnnSelectedNode]; ok {
if selectedNode != node.Name {
// Fast path, skip unmatched node.
unboundVolumesSatisfied = false
return
}
claimsToProvision = append(claimsToProvision, claim)
} else {
claimsToFindMatching = append(claimsToFindMatching, claim)
}
}
// Find matching volumes
if len(claimsToFindMatching) > 0 {
var unboundClaims []*v1.PersistentVolumeClaim
unboundVolumesSatisfied, staticBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node)
if err != nil {
return
}
claimsToProvision = append(claimsToProvision, unboundClaims...)
}
// Check for claims to provision. This is the first time where we potentially
// find out that storage is not sufficient for the node.
if len(claimsToProvision) > 0 {
unboundVolumesSatisfied, sufficientStorage, dynamicProvisions, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
if err != nil {
return
}
}
}
return
}
// AssumePodVolumes will take the matching PVs and PVCs to provision in pod's
// volume information for the chosen node, and:
// 1. Update the pvCache with the new prebound PV.
// 2. Update the pvcCache with the new PVCs with annotations set
// 3. Update PodVolumes again with cached API updates for PVs and PVCs.
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (allFullyBound bool, err error) {
klog.V(4).InfoS("AssumePodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName))
defer func() {
if err != nil {
metrics.VolumeSchedulingStageFailed.WithLabelValues("assume").Inc()
}
}()
if allBound := b.arePodVolumesBound(assumedPod); allBound {
klog.V(4).InfoS("AssumePodVolumes: all PVCs bound and nothing to do", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName))
return true, nil
}
// Assume PV
newBindings := []*BindingInfo{}
for _, binding := range podVolumes.StaticBindings {
newPV, dirty, err := volume.GetBindVolumeToClaim(binding.pv, binding.pvc)
klog.V(5).InfoS("AssumePodVolumes: GetBindVolumeToClaim",
"pod", klog.KObj(assumedPod),
"PV", klog.KObj(binding.pv),
"PVC", klog.KObj(binding.pvc),
"newPV", klog.KObj(newPV),
"dirty", dirty,
)
if err != nil {
klog.ErrorS(err, "AssumePodVolumes: fail to GetBindVolumeToClaim")
b.revertAssumedPVs(newBindings)
return false, err
}
// TODO: can we assume every time?
if dirty {
err = b.pvCache.Assume(newPV)
if err != nil {
b.revertAssumedPVs(newBindings)
return false, err
}
}
newBindings = append(newBindings, &BindingInfo{pv: newPV, pvc: binding.pvc})
}
// Assume PVCs
newProvisionedPVCs := []*v1.PersistentVolumeClaim{}
for _, claim := range podVolumes.DynamicProvisions {
// The claims from method args can be pointing to watcher cache. We must not
// modify these, therefore create a copy.
claimClone := claim.DeepCopy()
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, volume.AnnSelectedNode, nodeName)
err = b.pvcCache.Assume(claimClone)
if err != nil {
b.revertAssumedPVs(newBindings)
b.revertAssumedPVCs(newProvisionedPVCs)
return
}
newProvisionedPVCs = append(newProvisionedPVCs, claimClone)
}
podVolumes.StaticBindings = newBindings
podVolumes.DynamicProvisions = newProvisionedPVCs
return
}
// RevertAssumedPodVolumes will revert assumed PV and PVC cache.
func (b *volumeBinder) RevertAssumedPodVolumes(podVolumes *PodVolumes) {
b.revertAssumedPVs(podVolumes.StaticBindings)
b.revertAssumedPVCs(podVolumes.DynamicProvisions)
}
// BindPodVolumes gets the cached bindings and PVCs to provision in pod's volumes information,
// makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound
// by the PV controller.
func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod, podVolumes *PodVolumes) (err error) {
klog.V(4).InfoS("BindPodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", assumedPod.Spec.NodeName))
defer func() {
if err != nil {
metrics.VolumeSchedulingStageFailed.WithLabelValues("bind").Inc()
}
}()
bindings := podVolumes.StaticBindings
claimsToProvision := podVolumes.DynamicProvisions
// Start API operations
err = b.bindAPIUpdate(assumedPod, bindings, claimsToProvision)
if err != nil {
return err
}
err = wait.Poll(time.Second, b.bindTimeout, func() (bool, error) {
b, err := b.checkBindings(assumedPod, bindings, claimsToProvision)
return b, err
})
if err != nil {
return fmt.Errorf("binding volumes: %w", err)
}
return nil
}
func getPodName(pod *v1.Pod) string {
return pod.Namespace + "/" + pod.Name
}
func getPVCName(pvc *v1.PersistentVolumeClaim) string {
return pvc.Namespace + "/" + pvc.Name
}
// bindAPIUpdate makes the API update for those PVs/PVCs.
func (b *volumeBinder) bindAPIUpdate(pod *v1.Pod, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error {
podName := getPodName(pod)
if bindings == nil {
return fmt.Errorf("failed to get cached bindings for pod %q", podName)
}
if claimsToProvision == nil {
return fmt.Errorf("failed to get cached claims to provision for pod %q", podName)
}
lastProcessedBinding := 0
lastProcessedProvisioning := 0
defer func() {
// only revert assumed cached updates for volumes we haven't successfully bound
if lastProcessedBinding < len(bindings) {
b.revertAssumedPVs(bindings[lastProcessedBinding:])
}
// only revert assumed cached updates for claims we haven't updated,
if lastProcessedProvisioning < len(claimsToProvision) {
b.revertAssumedPVCs(claimsToProvision[lastProcessedProvisioning:])
}
}()
var (
binding *BindingInfo
i int
claim *v1.PersistentVolumeClaim
)
// Do the actual prebinding. Let the PV controller take care of the rest
// There is no API rollback if the actual binding fails
for _, binding = range bindings {
klog.V(5).InfoS("bindAPIUpdate: binding PV to PVC", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc))
// TODO: does it hurt if we make an api call and nothing needs to be updated?
klog.V(2).InfoS("Claim bound to volume", "PVC", klog.KObj(binding.pvc), "PV", klog.KObj(binding.pv))
newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv, metav1.UpdateOptions{})
if err != nil {
klog.V(4).InfoS("Updating PersistentVolume: binding to claim failed", "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc), "err", err)
return err
}
klog.V(4).InfoS("Updating PersistentVolume: bound to claim", "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc))
// Save updated object from apiserver for later checking.
binding.pv = newPV
lastProcessedBinding++
}
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
// PV controller is expected to signal back by removing related annotations if actual provisioning fails
for i, claim = range claimsToProvision {
klog.V(5).InfoS("Updating claims objects to trigger volume provisioning", "pod", klog.KObj(pod), "PVC", klog.KObj(claim))
newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim, metav1.UpdateOptions{})
if err != nil {
return err
}
// Save updated object from apiserver for later checking.
claimsToProvision[i] = newClaim
lastProcessedProvisioning++
}
return nil
}
var (
versioner = storage.APIObjectVersioner{}
)
// checkBindings runs through all the PVCs in the Pod and checks:
// * if the PVC is fully bound
// * if there are any conditions that require binding to fail and be retried
//
// It returns true when all of the Pod's PVCs are fully bound, and error if
// binding (and scheduling) needs to be retried
// Note that it checks on API objects not PV/PVC cache, this is because
// PV/PVC cache can be assumed again in main scheduler loop, we must check
// latest state in API server which are shared with PV controller and
// provisioners
func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) (bool, error) {
podName := getPodName(pod)
if bindings == nil {
return false, fmt.Errorf("failed to get cached bindings for pod %q", podName)
}
if claimsToProvision == nil {
return false, fmt.Errorf("failed to get cached claims to provision for pod %q", podName)
}
node, err := b.nodeLister.Get(pod.Spec.NodeName)
if err != nil {
return false, fmt.Errorf("failed to get node %q: %w", pod.Spec.NodeName, err)
}
csiNode, err := b.csiNodeLister.Get(node.Name)
if err != nil {
// TODO: return the error once CSINode is created by default
klog.V(4).InfoS("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err)
}
// Check for any conditions that might require scheduling retry
// When pod is deleted, binding operation should be cancelled. There is no
// need to check PV/PVC bindings any more.
_, err = b.podLister.Pods(pod.Namespace).Get(pod.Name)
if err != nil {
if apierrors.IsNotFound(err) {
return false, fmt.Errorf("pod does not exist any more: %w", err)
}
klog.ErrorS(err, "Failed to get pod from the lister", "pod", klog.KObj(pod))
}
for _, binding := range bindings {
pv, err := b.pvCache.GetAPIPV(binding.pv.Name)
if err != nil {
return false, fmt.Errorf("failed to check binding: %w", err)
}
pvc, err := b.pvcCache.GetAPIPVC(getPVCName(binding.pvc))
if err != nil {
return false, fmt.Errorf("failed to check binding: %w", err)
}
// Because we updated PV in apiserver, skip if API object is older
// and wait for new API object propagated from apiserver.
if versioner.CompareResourceVersion(binding.pv, pv) > 0 {
return false, nil
}
pv, err = b.tryTranslatePVToCSI(pv, csiNode)
if err != nil {
return false, fmt.Errorf("failed to translate pv to csi: %w", err)
}
// Check PV's node affinity (the node might not have the proper label)
if err := volume.CheckNodeAffinity(pv, node.Labels); err != nil {
return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %w", pv.Name, node.Name, err)
}
// Check if pv.ClaimRef got dropped by unbindVolume()
if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID == "" {
return false, fmt.Errorf("ClaimRef got reset for pv %q", pv.Name)
}
// Check if pvc is fully bound
if !b.isPVCFullyBound(pvc) {
return false, nil
}
}
for _, claim := range claimsToProvision {
pvc, err := b.pvcCache.GetAPIPVC(getPVCName(claim))
if err != nil {
return false, fmt.Errorf("failed to check provisioning pvc: %w", err)
}
// Because we updated PVC in apiserver, skip if API object is older
// and wait for new API object propagated from apiserver.
if versioner.CompareResourceVersion(claim, pvc) > 0 {
return false, nil
}
// Check if selectedNode annotation is still set
if pvc.Annotations == nil {
return false, fmt.Errorf("selectedNode annotation reset for PVC %q", pvc.Name)
}
selectedNode := pvc.Annotations[volume.AnnSelectedNode]
if selectedNode != pod.Spec.NodeName {
// If provisioner fails to provision a volume, selectedNode
// annotation will be removed to signal back to the scheduler to
// retry.
return false, fmt.Errorf("provisioning failed for PVC %q", pvc.Name)
}
// If the PVC is bound to a PV, check its node affinity
if pvc.Spec.VolumeName != "" {
pv, err := b.pvCache.GetAPIPV(pvc.Spec.VolumeName)
if err != nil {
if _, ok := err.(*errNotFound); ok {
// We tolerate NotFound error here, because PV is possibly
// not found because of API delay, we can check next time.
// And if PV does not exist because it's deleted, PVC will
// be unbound eventually.
return false, nil
}
return false, fmt.Errorf("failed to get pv %q from cache: %w", pvc.Spec.VolumeName, err)
}
pv, err = b.tryTranslatePVToCSI(pv, csiNode)
if err != nil {
return false, err
}
if err := volume.CheckNodeAffinity(pv, node.Labels); err != nil {
return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %w", pv.Name, node.Name, err)
}
}
// Check if pvc is fully bound
if !b.isPVCFullyBound(pvc) {
return false, nil
}
}
// All pvs and pvcs that we operated on are bound
klog.V(4).InfoS("All PVCs for pod are bound", "pod", klog.KObj(pod))
return true, nil
}
func (b *volumeBinder) isVolumeBound(pod *v1.Pod, vol *v1.Volume) (bound bool, pvc *v1.PersistentVolumeClaim, err error) {
pvcName := ""
isEphemeral := false
switch {
case vol.PersistentVolumeClaim != nil:
pvcName = vol.PersistentVolumeClaim.ClaimName
case vol.Ephemeral != nil:
// Generic ephemeral inline volumes also use a PVC,
// just with a computed name, and...
pvcName = ephemeral.VolumeClaimName(pod, vol)
isEphemeral = true
default:
return true, nil, nil
}
bound, pvc, err = b.isPVCBound(pod.Namespace, pvcName)
// ... the PVC must be owned by the pod.
if isEphemeral && err == nil && pvc != nil {
if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil {
return false, nil, err
}
}
return
}
func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.PersistentVolumeClaim, error) {
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: pvcName,
Namespace: namespace,
},
}
pvcKey := getPVCName(claim)
pvc, err := b.pvcCache.GetPVC(pvcKey)
if err != nil || pvc == nil {
return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcKey, err)
}
fullyBound := b.isPVCFullyBound(pvc)
if fullyBound {
klog.V(5).InfoS("PVC is fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName))
} else {
if pvc.Spec.VolumeName != "" {
klog.V(5).InfoS("PVC is not fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName))
} else {
klog.V(5).InfoS("PVC is not bound", "PVC", klog.KObj(pvc))
}
}
return fullyBound, pvc, nil
}
func (b *volumeBinder) isPVCFullyBound(pvc *v1.PersistentVolumeClaim) bool {
return pvc.Spec.VolumeName != "" && metav1.HasAnnotation(pvc.ObjectMeta, volume.AnnBindCompleted)
}
// arePodVolumesBound returns true if all volumes are fully bound
func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool {
for _, vol := range pod.Spec.Volumes {
if isBound, _, _ := b.isVolumeBound(pod, &vol); !isBound {
// Pod has at least one PVC that needs binding
return false
}
}
return true
}
// GetPodVolumes returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning)
// and unbound with immediate binding (including prebound)
func (b *volumeBinder) GetPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentVolumeClaim, unboundClaimsDelayBinding []*v1.PersistentVolumeClaim, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) {
boundClaims = []*v1.PersistentVolumeClaim{}
unboundClaimsImmediate = []*v1.PersistentVolumeClaim{}
unboundClaimsDelayBinding = []*v1.PersistentVolumeClaim{}
for _, vol := range pod.Spec.Volumes {
volumeBound, pvc, err := b.isVolumeBound(pod, &vol)
if err != nil {
return nil, nil, nil, err
}
if pvc == nil {
continue
}
if volumeBound {
boundClaims = append(boundClaims, pvc)
} else {
delayBindingMode, err := volume.IsDelayBindingMode(pvc, b.classLister)
if err != nil {
return nil, nil, nil, err
}
// Prebound PVCs are treated as unbound immediate binding
if delayBindingMode && pvc.Spec.VolumeName == "" {
// Scheduler path
unboundClaimsDelayBinding = append(unboundClaimsDelayBinding, pvc)
} else {
// !delayBindingMode || pvc.Spec.VolumeName != ""
// Immediate binding should have already been bound
unboundClaimsImmediate = append(unboundClaimsImmediate, pvc)
}
}
}
return boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate, nil
}
func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, pod *v1.Pod) (bool, bool, error) {
csiNode, err := b.csiNodeLister.Get(node.Name)
if err != nil {
// TODO: return the error once CSINode is created by default
klog.V(4).InfoS("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err)
}
for _, pvc := range claims {
pvName := pvc.Spec.VolumeName
pv, err := b.pvCache.GetPV(pvName)
if err != nil {
if _, ok := err.(*errNotFound); ok {
err = nil
}
return true, false, err
}
pv, err = b.tryTranslatePVToCSI(pv, csiNode)
if err != nil {
return false, true, err
}
err = volume.CheckNodeAffinity(pv, node.Labels)
if err != nil {
klog.V(4).InfoS("PersistentVolume and node mismatch for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod), "err", err)
return false, true, nil
}
klog.V(5).InfoS("PersistentVolume and node matches for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod))
}
klog.V(4).InfoS("All bound volumes for pod match with node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true, true, nil
}
// findMatchingVolumes tries to find matching volumes for given claims,
// and return unbound claims for further provision.
func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) {
// Sort all the claims by increasing size request to get the smallest fits
sort.Sort(byPVCSize(claimsToBind))
chosenPVs := map[string]*v1.PersistentVolume{}
foundMatches = true
for _, pvc := range claimsToBind {
// Get storage class name from each PVC
storageClassName := volume.GetPersistentVolumeClaimClass(pvc)
allPVs := b.pvCache.ListPVs(storageClassName)
// Find a matching PV
pv, err := volume.FindMatchingVolume(pvc, allPVs, node, chosenPVs, true)
if err != nil {
return false, nil, nil, err
}
if pv == nil {
klog.V(4).InfoS("No matching volumes for pod", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc), "node", klog.KObj(node))
unboundClaims = append(unboundClaims, pvc)
foundMatches = false
continue
}
// matching PV needs to be excluded so we don't select it again
chosenPVs[pv.Name] = pv
bindings = append(bindings, &BindingInfo{pv: pv, pvc: pvc})
klog.V(5).InfoS("Found matching PV for PVC for pod", "PV", klog.KObj(pv), "PVC", klog.KObj(pvc), "node", klog.KObj(node), "pod", klog.KObj(pod))
}
if foundMatches {
klog.V(4).InfoS("Found matching volumes for pod", "pod", klog.KObj(pod), "node", klog.KObj(node))
}
return
}
// checkVolumeProvisions checks given unbound claims (the claims have gone through func
// findMatchingVolumes, and do not have matching volumes for binding), and return true
// if all of the claims are eligible for dynamic provision.
func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied, sufficientStorage bool, dynamicProvisions []*v1.PersistentVolumeClaim, err error) {
dynamicProvisions = []*v1.PersistentVolumeClaim{}
// We return early with provisionedClaims == nil if a check
// fails or we encounter an error.
for _, claim := range claimsToProvision {
pvcName := getPVCName(claim)
className := volume.GetPersistentVolumeClaimClass(claim)
if className == "" {
return false, false, nil, fmt.Errorf("no class for claim %q", pvcName)
}
class, err := b.classLister.Get(className)
if err != nil {
return false, false, nil, fmt.Errorf("failed to find storage class %q", className)
}
provisioner := class.Provisioner
if provisioner == "" || provisioner == volume.NotSupportedProvisioner {
klog.V(4).InfoS("Storage class of claim does not support dynamic provisioning", "storageClassName", className, "PVC", klog.KObj(claim))
return false, true, nil, nil
}
// Check if the node can satisfy the topology requirement in the class
if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) {
klog.V(4).InfoS("Node cannot satisfy provisioning topology requirements of claim", "node", klog.KObj(node), "PVC", klog.KObj(claim))
return false, true, nil, nil
}
// Check storage capacity.
sufficient, err := b.hasEnoughCapacity(provisioner, claim, class, node)
if err != nil {
return false, false, nil, err
}
if !sufficient {
// hasEnoughCapacity logs an explanation.
return true, false, nil, nil
}
dynamicProvisions = append(dynamicProvisions, claim)
}
klog.V(4).InfoS("Provisioning for claims of pod that has no matching volumes...", "claimCount", len(claimsToProvision), "pod", klog.KObj(pod), "node", klog.KObj(node))
return true, true, dynamicProvisions, nil
}
func (b *volumeBinder) revertAssumedPVs(bindings []*BindingInfo) {
for _, BindingInfo := range bindings {
b.pvCache.Restore(BindingInfo.pv.Name)
}
}
func (b *volumeBinder) revertAssumedPVCs(claims []*v1.PersistentVolumeClaim) {
for _, claim := range claims {
b.pvcCache.Restore(getPVCName(claim))
}
}
// hasEnoughCapacity checks whether the provisioner has enough capacity left for a new volume of the given size
// that is available from the node.
func (b *volumeBinder) hasEnoughCapacity(provisioner string, claim *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass, node *v1.Node) (bool, error) {
quantity, ok := claim.Spec.Resources.Requests[v1.ResourceStorage]
if !ok {
// No capacity to check for.
return true, nil
}
// Only enabled for CSI drivers which opt into it.
driver, err := b.csiDriverLister.Get(provisioner)
if err != nil {
if apierrors.IsNotFound(err) {
// Either the provisioner is not a CSI driver or the driver does not
// opt into storage capacity scheduling. Either way, skip
// capacity checking.
return true, nil
}
return false, err
}
if driver.Spec.StorageCapacity == nil || !*driver.Spec.StorageCapacity {
return true, nil
}
// Look for a matching CSIStorageCapacity object(s).
// TODO (for beta): benchmark this and potentially introduce some kind of lookup structure (https://github.com/kubernetes/enhancements/issues/1698#issuecomment-654356718).
capacities, err := b.csiStorageCapacityLister.List(labels.Everything())
if err != nil {
return false, err
}
sizeInBytes := quantity.Value()
for _, capacity := range capacities {
if capacity.StorageClassName == storageClass.Name &&
capacitySufficient(capacity, sizeInBytes) &&
b.nodeHasAccess(node, capacity) {
// Enough capacity found.
return true, nil
}
}
// TODO (?): this doesn't give any information about which pools where considered and why
// they had to be rejected. Log that above? But that might be a lot of log output...
klog.V(4).InfoS("Node has no accessible CSIStorageCapacity with enough capacity for PVC",
"node", klog.KObj(node), "PVC", klog.KObj(claim), "size", sizeInBytes, "storageClass", klog.KObj(storageClass))
return false, nil
}
func capacitySufficient(capacity *storagev1.CSIStorageCapacity, sizeInBytes int64) bool {
limit := capacity.Capacity
if capacity.MaximumVolumeSize != nil {
// Prefer MaximumVolumeSize if available, it is more precise.
limit = capacity.MaximumVolumeSize
}
return limit != nil && limit.Value() >= sizeInBytes
}
func (b *volumeBinder) nodeHasAccess(node *v1.Node, capacity *storagev1.CSIStorageCapacity) bool {
if capacity.NodeTopology == nil {
// Unavailable
return false
}
// Only matching by label is supported.
selector, err := metav1.LabelSelectorAsSelector(capacity.NodeTopology)
if err != nil {
klog.ErrorS(err, "Unexpected error converting to a label selector", "nodeTopology", capacity.NodeTopology)
return false
}
return selector.Matches(labels.Set(node.Labels))
}
type byPVCSize []*v1.PersistentVolumeClaim
func (a byPVCSize) Len() int {
return len(a)
}
func (a byPVCSize) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a byPVCSize) Less(i, j int) bool {
iSize := a[i].Spec.Resources.Requests[v1.ResourceStorage]
jSize := a[j].Spec.Resources.Requests[v1.ResourceStorage]
// return true if iSize is less than jSize
return iSize.Cmp(jSize) == -1
}
// isCSIMigrationOnForPlugin checks if CSI migration is enabled for a given plugin.
func isCSIMigrationOnForPlugin(pluginName string) bool {
switch pluginName {
case csiplugins.AWSEBSInTreePluginName:
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAWS)
case csiplugins.GCEPDInTreePluginName:
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationGCE)
case csiplugins.AzureDiskInTreePluginName:
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDisk)
case csiplugins.CinderInTreePluginName:
return true
case csiplugins.PortworxVolumePluginName:
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationPortworx)
case csiplugins.RBDVolumePluginName:
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationRBD)
}
return false
}
// isPluginMigratedToCSIOnNode checks if an in-tree plugin has been migrated to a CSI driver on the node.
func isPluginMigratedToCSIOnNode(pluginName string, csiNode *storagev1.CSINode) bool {
if csiNode == nil {
return false
}
csiNodeAnn := csiNode.GetAnnotations()
if csiNodeAnn == nil {
return false
}
var mpaSet sets.String
mpa := csiNodeAnn[v1.MigratedPluginsAnnotationKey]
if len(mpa) == 0 {
mpaSet = sets.NewString()
} else {
tok := strings.Split(mpa, ",")
mpaSet = sets.NewString(tok...)
}
return mpaSet.Has(pluginName)
}
// tryTranslatePVToCSI will translate the in-tree PV to CSI if it meets the criteria. If not, it returns the unmodified in-tree PV.
func (b *volumeBinder) tryTranslatePVToCSI(pv *v1.PersistentVolume, csiNode *storagev1.CSINode) (*v1.PersistentVolume, error) {
if !b.translator.IsPVMigratable(pv) {
return pv, nil
}
pluginName, err := b.translator.GetInTreePluginNameFromSpec(pv, nil)
if err != nil {
return nil, fmt.Errorf("could not get plugin name from pv: %v", err)
}
if !isCSIMigrationOnForPlugin(pluginName) {
return pv, nil
}
if !isPluginMigratedToCSIOnNode(pluginName, csiNode) {
return pv, nil
}
transPV, err := b.translator.TranslateInTreePVToCSI(pv)
if err != nil {
return nil, fmt.Errorf("could not translate pv: %v", err)
}
return transPV, nil
} | if err != nil {
return |
blinky_basic.rs | //! Blink an led without using the BSP split() method.
#![no_std]
#![no_main]
use panic_halt as _;
use pygamer as hal;
use hal::clock::GenericClockController;
use hal::delay::Delay;
use hal::entry;
use hal::pac::{CorePeripherals, Peripherals};
use hal::prelude::*;
use hal::watchdog::{Watchdog, WatchdogTimeout};
#[entry]
fn main() -> ! {
let mut peripherals = Peripherals::take().unwrap();
let core = CorePeripherals::take().unwrap();
let mut clocks = GenericClockController::with_internal_32kosc(
peripherals.GCLK,
&mut peripherals.MCLK, | &mut peripherals.NVMCTRL,
);
let mut delay = Delay::new(core.SYST, &mut clocks);
delay.delay_ms(400u16);
let mut pins = hal::Pins::new(peripherals.PORT);
let mut red_led = pins.d13.into_open_drain_output(&mut pins.port);
let mut wdt = Watchdog::new(peripherals.WDT);
wdt.start(WatchdogTimeout::Cycles256 as u8);
loop {
delay.delay_ms(200u8);
wdt.feed();
red_led.set_high().unwrap();
delay.delay_ms(200u8);
wdt.feed();
red_led.set_low().unwrap();
}
} | &mut peripherals.OSC32KCTRL,
&mut peripherals.OSCCTRL, |
app.ts | import { main } from "https://deno.land/x/[email protected]/mod.ts";
main(async ({ vim }) => {
vim.register({
async hellow(): Promise<void> {
const undoFlag = await vim.g.get("hellow_undo_enable")
const undoLevels = await vim.eval("&undolevels");
if (!undoFlag) {
vim.execute('set undolevels=-1');
}
const line = await vim.call("line", "$");
const targetLine = ((max: number) => {
return Math.floor(Math.random() * max) + 1;
})(line);
vim.execute(`${targetLine}delete`);
vim.execute(`set undolevels=${undoLevels}`);
},
});
| noremap <silent> <Left> :<C-u>Hellow<CR>
noremap <silent> <Right> :<C-u>Hellow<CR>
`)
}); | await vim.execute(`
command! Hellow call denops#request('${vim.name}', 'hellow', [])
noremap <silent> <Up> :<C-u>Hellow<CR>
noremap <silent> <Down> :<C-u>Hellow<CR> |
02-render-state.rs | //! This program shows how to tweak the render state in order to render two simple triangles with
//! different parameters.
//!
//! From this tutorial on, vertex types and semantics are taken from a common.rs file.
//!
//! Press <space> to switch which triangle is rendered atop of which.
//! Press <b> to activate additive blending or disable it.
//! Press <escape> to quit or close the window.
//!
//! https://docs.rs/luminance
mod common;
use crate::common::{Semantics, Vertex, VertexPosition, VertexColor};
use luminance::blending::{Equation, Factor};
use luminance::context::GraphicsContext as _;
use luminance::pipeline::PipelineState;
use luminance::render_state::RenderState;
use luminance::shader::program::Program;
use luminance::tess::{Mode, TessBuilder};
use luminance_glfw::{Action, GlfwSurface, Key, Surface, WindowEvent, WindowDim, WindowOpt};
const VS: &'static str = include_str!("simple-vs.glsl");
const FS: &'static str = include_str!("simple-fs.glsl");
pub const TRI_RED_BLUE_VERTICES: [Vertex; 6] = [
// first triangle – a red one
Vertex { pos: VertexPosition::new([0.5, -0.5]), rgb: VertexColor::new([1., 0., 0.]) },
Vertex { pos: VertexPosition::new([0.0, 0.5]), rgb: VertexColor::new([1., 0., 0.]) },
Vertex { pos: VertexPosition::new([-0.5, -0.5]), rgb: VertexColor::new([1., 0., 0.]) },
// second triangle, a blue one
Vertex { pos: VertexPosition::new([-0.5, 0.5]), rgb: VertexColor::new([0., 0., 1.]) },
Vertex { pos: VertexPosition::new([0.0, -0.5]), rgb: VertexColor::new([0., 0., 1.]) },
Vertex { pos: VertexPosition::new([0.5, 0.5]), rgb: VertexColor::new([0., 0., 1.]) },
];
// Convenience type to demonstrate how the depth test influences the rendering of two triangles.
#[derive(Copy, Clone, Debug)]
enum DepthMethod {
Under, // draw the red triangle under the blue one
Atop, // draw the red triangle atop the blue one
}
impl DepthMethod {
fn toggle(self) -> Self {
match self {
DepthMethod::Under => DepthMethod::Atop,
DepthMethod::Atop => DepthMethod::Under,
}
}
}
type Blending = Option<(Equation, Factor, Factor)>;
// toggle between no blending and additive blending
fn toggle_blending(blending: Blending) -> Blending {
match blending {
None => Some((Equation::Additive, Factor::One, Factor::One)),
_ => None,
}
}
fn ma | {
let mut surface = GlfwSurface::new(
WindowDim::Windowed(960, 540),
"Hello, world!",
WindowOpt::default(),
)
.expect("GLFW surface creation");
let program = Program::<Semantics, (), ()>::from_strings(None, VS, None, FS)
.expect("program creation")
.ignore_warnings();
// create a red and blue triangles
let red_triangle = TessBuilder::new(&mut surface)
.add_vertices(&TRI_RED_BLUE_VERTICES[0..3])
.set_mode(Mode::Triangle)
.build()
.unwrap();
let blue_triangle = TessBuilder::new(&mut surface)
.add_vertices(&TRI_RED_BLUE_VERTICES[3..6])
.set_mode(Mode::Triangle)
.build()
.unwrap();
let mut back_buffer = surface.back_buffer().unwrap();
let mut blending = None;
let mut depth_method = DepthMethod::Under;
println!("now rendering red triangle {:?} the blue one", depth_method);
let mut resize = false;
'app: loop {
for event in surface.poll_events() {
match event {
WindowEvent::Close | WindowEvent::Key(Key::Escape, _, Action::Release, _) => break 'app,
WindowEvent::Key(Key::Space, _, Action::Release, _) => {
depth_method = depth_method.toggle();
println!("now rendering red triangle {:?} the blue one", depth_method);
}
WindowEvent::Key(Key::B, _, Action::Release, _) => {
blending = toggle_blending(blending);
println!("now blending with {:?}", blending);
}
WindowEvent::FramebufferSize(..) => {
resize = true;
}
_ => (),
}
}
if resize {
back_buffer = surface.back_buffer().unwrap();
resize = false;
}
surface
.pipeline_builder()
.pipeline(&back_buffer, &PipelineState::default(), |_, mut shd_gate| {
shd_gate.shade(&program, |_, mut rdr_gate| {
let render_state = RenderState::default()
// let’s disable the depth test so that every fragment (i.e. pixels) will rendered to every
// time we have to draw a part of a triangle
.set_depth_test(None)
// set the blending we decided earlier
.set_blending(blending);
rdr_gate.render(render_state, |mut tess_gate| match depth_method {
DepthMethod::Under => {
tess_gate.render(&red_triangle);
tess_gate.render(&blue_triangle);
}
DepthMethod::Atop => {
tess_gate.render(&blue_triangle);
tess_gate.render(&red_triangle);
}
});
});
});
surface.swap_buffers();
}
}
| in() |
freq.py | from pandas import read_csv
import _pickle as pickle
from traceback import format_exc
from .common import exists, preprocess_pandas_csv
from .common import try_remove
DEFAULT_FREQ = 1
def load_freq(freq_fpath, min_freq=1, preprocess=True, sep='\t', strip_pos=True, use_pickle=True):
f = FreqDictionary(freq_fpath, min_freq=min_freq, preprocess=preprocess, sep=sep, strip_pos=strip_pos, use_pickle=use_pickle)
return f.data
class FreqDictionary(object):
def __init__(self, freq_fpath, min_freq=1, preprocess=True, sep='\t', strip_pos=True, use_pickle=True):
""" Reads a word frequency list in CSV format "word<TAB>freq" """
if not exists(freq_fpath):
self._freq = {}
return
pkl_fpath = freq_fpath + ".pkl"
if use_pickle and exists(pkl_fpath):
voc = pickle.load(open(pkl_fpath, "rb"))
else:
# load words to datafame
if preprocess:
freq_cln_fpath = freq_fpath + "-cln"
preprocess_pandas_csv(freq_fpath, freq_cln_fpath)
word_df = read_csv(freq_cln_fpath, sep, encoding='utf-8', error_bad_lines=False)
try_remove(freq_cln_fpath)
else:
word_df = read_csv(freq_fpath, sep, encoding='utf-8', error_bad_lines=False)
# load from dataframe to dictionary
word_df = word_df.drop(word_df[word_df["freq"] < min_freq].index)
if strip_pos:
voc = {}
for i, row in word_df.iterrows():
try:
word = str(row["word"]).split("#")[0]
freq = int(row["freq"]) | if word not in voc or voc[word] < freq: voc[word] = freq
except:
print("Bad row:", row)
print(format_exc())
else:
voc = { row["word"]: row["freq"] for i, row in word_df.iterrows() }
print("dictionary is loaded:", len(voc))
if use_pickle:
pickle.dump(voc, open(pkl_fpath, "wb"))
print("Pickled voc:", pkl_fpath)
print("Loaded %d words from: %s" % (len(voc), pkl_fpath if pkl_fpath else freq_fpath))
self._freq = voc
@property
def data(self):
return self._freq
def freq(self, word):
""" Returns frequency of the word or 1 """
if word in self._freq: return self._freq[word]
else: return DEFAULT_FREQ | |
geventeventstream.py | # -*- coding: utf-8 -*-
"""
Example controller for SSE (server-side events) with gevent.
Builds on the simple SSE controller.
"""
import sys
import time
import gevent.queue
from tg import expose, request, response
from tg import url
from tg.decorators import with_trailing_slash
from eventstream import EventstreamController
class GeventEventstreamController(EventstreamController):
# set containing a gevent queue for each of the clients (browsers) listening for events
client_queues = set()
@expose()
@with_trailing_slash
def index(self):
|
@expose()
def visitstream(self):
"""sends a SSE whenever somebody visits index"""
# set charset appropriately
response.headers['Content-type'] = 'text/event-stream'
# disable charset (see EventstreamController)
response.charset = ""
# create a new queue for this new listening client
q = gevent.queue.Queue()
GeventEventstreamController.client_queues.add(q)
def stream():
while True:
yield "data: %s %s\n\n" % (q.get(), time.time())
return stream()
| """whenever a new client opens this page, sends an event to all listening clients"""
# put a gevent event in each client's queue
for q in GeventEventstreamController.client_queues:
q.put("visit received from %(REMOTE_ADDR)s with user agent %(HTTP_USER_AGENT)s" % request.environ)
# return the page for listening
return self.load_js(url('visitstream')) |
headersCheck.go | package main
import (
"github.com/sirupsen/logrus"
"net/http"
)
func | () {
req, _ := http.NewRequest(`GET`, `https://godoc.org/github.com/sirupsen/logrus#pkg-examples`, nil)
req.Header.Set(`Authorization`, "Bearer some-token")
req.Header.Set(`Content-Type`, "application/json")
logrus.Info("request URL ", req.URL)
}
| main |
producer.go | package kafka
import (
"time"
"github.com/Shopify/sarama"
"github.com/micro/go-log"
)
var Producer sarama.SyncProducer
func Init(addrs []string) error {
log.Log("Init producer")
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Partitioner = sarama.NewRandomPartitioner
config.Producer.Return.Successes = true
config.Producer.Return.Errors = true
config.Version = sarama.V2_0_0_0
config.Admin.Timeout = 10 * time.Second
var err error
Producer, err = sarama.NewSyncProducer(addrs, config)
for try := 0; try < 10; try++ {
if err == sarama.ErrOutOfBrokers {
time.Sleep(2 * time.Second)
Producer, err = sarama.NewSyncProducer(addrs, config)
} else {
break
}
}
if err != nil {
log.Logf("Create producer failed, err:%v", err)
}
return err
}
func ProduceMsg(topic string, msg []byte) error | {
kafkaMsg := &sarama.ProducerMessage{Topic: topic}
kafkaMsg.Value = sarama.ByteEncoder(msg)
//producer.Input() <- msg
log.Logf("send message:%s\n", kafkaMsg)
partition, offset, err := Producer.SendMessage(kafkaMsg)
if err != nil {
log.Logf("Producer send message failed, err:%v\n", err)
} else {
log.Logf("message is stored in topic(%s)/partition(%d)/offset(%d)\n", topic, partition, offset)
}
return err
} |
|
collapse_example.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.3
# Email : [email protected]
###################################################################
from dayu_widgets3.collapse import MCollapse
from dayu_widgets3.label import MLabel
from dayu_widgets3.qt import *
class CollapseExample(QWidget):
| def __init__(self, parent=None):
super(CollapseExample, self).__init__(parent)
self._init_ui()
def _init_ui(self):
label_1 = MLabel(u'史蒂夫·乔布斯(Steve Jobs),1955年2月24日生于美国加利福尼亚州旧金山,美国发明家、企业家、美国苹果公司联合创办人。')
label_2 = MLabel(
u'斯蒂夫·盖瑞·沃兹尼亚克(Stephen Gary Wozniak),美国电脑工程师,曾与史蒂夫·乔布斯合伙创立苹果电脑(今之苹果公司)。斯蒂夫·盖瑞·沃兹尼亚克曾就读于美国科罗拉多大学,后转学入美国著名高等学府加州大学伯克利分校(UC Berkeley)并获得电机工程及计算机(EECS)本科学位(1987年)。')
label_3 = MLabel(
u'乔纳森·伊夫是一位工业设计师,现任Apple公司设计师兼资深副总裁,英国爵士。他曾参与设计了iPod,iMac,iPhone,iPad等众多苹果产品。除了乔布斯,他是对苹果那些著名的产品最有影响力的人。')
label_1.setWordWrap(True)
label_2.setWordWrap(True)
label_3.setWordWrap(True)
section_list = [
{
'title': u'史蒂夫乔布斯',
'expand': True,
'widget': label_1
}, {
'title': u'斯蒂夫·盖瑞·沃兹尼亚克',
'expand': True,
'widget': label_2
}
]
section_group = MCollapse()
section_group.add_section_list(section_list)
main_lay = QVBoxLayout()
main_lay.addWidget(section_group)
main_lay.addStretch()
self.setLayout(main_lay)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
test = CollapseExample()
from dayu_widgets3 import dayu_theme
dayu_theme.apply(test)
test.show()
sys.exit(app.exec_())
|
|
verify.go | // Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package verify
import (
"fmt"
"os"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/datadir"
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
"go.etcd.io/etcd/server/v3/mvcc/backend"
wal2 "go.etcd.io/etcd/server/v3/wal"
"go.etcd.io/etcd/server/v3/wal/walpb"
"go.uber.org/zap"
)
const ENV_VERIFY = "ETCD_VERIFY"
const ENV_VERIFY_ALL_VALUE = "all"
type Config struct {
// DataDir is a root directory where the data being verified are stored.
DataDir string
// ExactIndex requires consistent_index in backend exactly match the last committed WAL entry.
// Usually backend's consistent_index needs to be <= WAL.commit, but for backups the match
// is expected to be exact.
ExactIndex bool
Logger *zap.Logger
}
// Verify performs consistency checks of given etcd data-directory.
// The errors are reported as the returned error, but for some situations
// the function can also panic.
// The function is expected to work on not-in-use data model, i.e.
// no file-locks should be taken. Verify does not modified the data.
func Verify(cfg Config) error {
lg := cfg.Logger
if lg == nil {
lg = zap.NewNop()
}
var err error
lg.Info("verification of persisted state", zap.String("data-dir", cfg.DataDir))
defer func() {
if err != nil {
lg.Error("verification of persisted state failed",
zap.String("data-dir", cfg.DataDir),
zap.Error(err))
} else if r := recover(); r != nil {
lg.Error("verification of persisted state failed",
zap.String("data-dir", cfg.DataDir))
panic(r)
} else {
lg.Info("verification of persisted state successful", zap.String("data-dir", cfg.DataDir))
}
}()
beConfig := backend.DefaultBackendConfig()
beConfig.Path = datadir.ToBackendFileName(cfg.DataDir)
beConfig.Logger = cfg.Logger
be := backend.New(beConfig)
defer be.Close()
snapshot, hardstate, err := validateWal(cfg)
if err != nil {
return err
}
// TODO: Perform validation of consistency of membership between
// backend/members & WAL confstate (and maybe storev2 if still exists).
return validateConsistentIndex(cfg, hardstate, snapshot, be)
}
// VerifyIfEnabled performs verification according to ETCD_VERIFY env settings.
// See Verify for more information.
func | (cfg Config) error {
if os.Getenv(ENV_VERIFY) == ENV_VERIFY_ALL_VALUE {
return Verify(cfg)
}
return nil
}
// MustVerifyIfEnabled performs verification according to ETCD_VERIFY env settings
// and exits in case of found problems.
// See Verify for more information.
func MustVerifyIfEnabled(cfg Config) {
if err := VerifyIfEnabled(cfg); err != nil {
cfg.Logger.Fatal("Verification failed",
zap.String("data-dir", cfg.DataDir),
zap.Error(err))
}
}
func validateConsistentIndex(cfg Config, hardstate *raftpb.HardState, snapshot *walpb.Snapshot, be backend.Backend) error {
tx := be.BatchTx()
index, term := cindex.ReadConsistentIndex(tx)
if cfg.ExactIndex && index != hardstate.Commit {
return fmt.Errorf("backend.ConsistentIndex (%v) expected == WAL.HardState.commit (%v)", index, hardstate.Commit)
}
if cfg.ExactIndex && term != hardstate.Term {
return fmt.Errorf("backend.Term (%v) expected == WAL.HardState.term, (%v)", term, hardstate.Term)
}
if index > hardstate.Commit {
return fmt.Errorf("backend.ConsistentIndex (%v) must be <= WAL.HardState.commit (%v)", index, hardstate.Commit)
}
if term > hardstate.Term {
return fmt.Errorf("backend.Term (%v) must be <= WAL.HardState.term, (%v)", term, hardstate.Term)
}
if index < snapshot.Index {
return fmt.Errorf("backend.ConsistentIndex (%v) must be >= last snapshot index (%v)", index, snapshot.Index)
}
cfg.Logger.Info("verification: consistentIndex OK", zap.Uint64("backend-consistent-index", index), zap.Uint64("hardstate-commit", hardstate.Commit))
return nil
}
func validateWal(cfg Config) (*walpb.Snapshot, *raftpb.HardState, error) {
walDir := datadir.ToWalDir(cfg.DataDir)
walSnaps, err := wal2.ValidSnapshotEntries(cfg.Logger, walDir)
if err != nil {
return nil, nil, err
}
snapshot := walSnaps[len(walSnaps)-1]
hardstate, err := wal2.Verify(cfg.Logger, walDir, snapshot)
if err != nil {
return nil, nil, err
}
return &snapshot, hardstate, nil
}
| VerifyIfEnabled |
benchmarking.rs | // ! Benchmarking setup for pallet-template
use super::*;
use frame_benchmarking::{benchmarks, impl_benchmark_test_suite};
use frame_system::RawOrigin;
use sp_std::{boxed::Box, vec, vec::Vec};
#[allow(unused)]
use crate::Module as ProcessValidation;
// TODO implement benchmarking
benchmarks! {
create_process {
}: _(RawOrigin::Root)
verify {
}
disable_process {
}: _(RawOrigin::Root)
verify { | }
impl_benchmark_test_suite!(ProcessValidation, crate::mock::new_test_ext(), crate::mock::Test,); | } |
context.rs | use core::fmt::{Display, Formatter};
use super::{Context, Value, Wrapped};
use crate::permutation::Permutation;
/// A Context is a container of values.
/// Ideally it should not have more than 20 elements
/// but this is not a hard limit.
///
#[derive(Default)]
pub struct ContextImpl(pub(super) Vec<Box<dyn Value>>);
impl std::fmt::Display for ContextImpl {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "(")?;
let mut it = self.0.iter();
if let Some(value) = it.next() {
write!(fmt, "{:?}", value)?;
}
for value in it {
write!(fmt, ",{:?}", value)?;
}
write!(fmt, ")")
}
}
#[derive(Debug)]
enum | {}
impl Display for Bottom {
fn fmt(&self, _:&mut Formatter) -> std::fmt::Result { Ok(()) }
}
impl Context for ContextImpl {
fn empty_value(&self) -> Box<dyn Value> {
Box::new(<Wrapped<Bottom> as Default>::default())
}
fn create_empty(&self) -> Box<dyn Context> {
Box::new(ContextImpl::default())
}
fn len(&self) -> u8 {
self.0.len() as u8
}
/// Perform a permutation over the values.
///
/// p: the permutation to perform.
///
fn permutate(&mut self, p: Permutation) {
p.permutate(&mut self.0)
}
fn take_after(&mut self, at: u8, values_accepter: &mut dyn FnMut(&mut dyn Value)) -> u8 {
let at = if self.0.len() < at as usize {
0
} else {
at as usize
};
let mut values = self.0.split_off(at);
for value in values.iter_mut() {
values_accepter(&mut **value);
}
values.len() as u8
}
fn extend(&mut self, values: &mut dyn Iterator<Item = &mut dyn Value>) {
self.0.extend(values.map(|x| x.take()));
}
}
impl Drop for ContextImpl {
fn drop(&mut self) {}
}
#[cfg(test)]
mod test {
use crate::value::context::ContextImpl;
use crate::value::traits::{Context, ContextExt};
use crate::value::{unwrap, wrap};
#[test]
fn test_take_values() {
let mut c = ContextImpl(vec![]);
c.push(wrap(10i32));
assert_eq!(1, c.take_after(0, &mut |v| ()));
}
#[test]
fn test_pop_push() {
let mut c = ContextImpl(vec![]);
c.push(wrap(10i32));
assert_eq!(1, c.len());
assert_eq!(10i32, unwrap::<i32>(c.pop().unwrap()).unwrap());
assert!(c.is_empty());
}
}
| Bottom |
dockerhub.test.js | const login = require("../../../collections/login");
const providers = require("../../../collections/providers");
const constants = require("../../../config/constants");
const common = require("../../../utility/common");
const logs = require("../../../utility/logs");
const query = require("../../../config/query");
const elementHandler = require("../../../utility/elementHandler");
const checkSuccess = require("../../../schemaValidate/check.success");
const typeSuccess = require("../../../schemaValidate/type.success");
const accessTypeSuccess = require("../../../schemaValidate/accesstype.success");
const createProSuccess = require("../../../schemaValidate/create.success");
const workerSuccess = require("../../../schemaValidate/worker.success");
const loggingSuccess = require("../../../schemaValidate/logging.success");
const addContext = require("mochawesome/addContext");
beforeEach(function (done) {
const token = common.searchOfEnv("login", "login", "token");
login.check()
.end((err, res) => {
if (res.statusCode !== 200)
logs.getApiResponse(res);
elementHandler.responseExpect(res.body, res.statusCode, 200, res.res.statusMessage, constants.STATUS_TEXT_OK, res.ok);
elementHandler.toBeEqual(res.body.email_verified, constants.email_verified);
elementHandler.schemaValidate(res.body, checkSuccess);
elementHandler.toBeEqual(res.body.token, token);
done();
});
});
const accessType = "DockerHub Token";
describe("Create Docker Hub provider test case", () => {
it("GET /provider/types", function (done) {
providers.types("?id=25")
.end((err, res) => {
if (res.statusCode !== 200)
logs.getApiResponse(res);
elementHandler.responseExpect(res.body, res.statusCode, 200, res.res.statusMessage, constants.STATUS_TEXT_OK, res.ok);
elementHandler.schemaValidate(res.body, typeSuccess);
for (const obj of res.body) {
if (obj.name === constants.DOCKER) {
elementHandler.toBeTrue(obj.enabled);
elementHandler.toBeEqual(obj.name, constants.DOCKER);
//set provider type id and name in the env variable
common.readOrWriteJsonFile("provider", "provider_type", "id", obj.id);
common.readOrWriteJsonFile("provider", "provider_type", "name", obj.name);
console.log("Docker Hub provider type id and name: ", [obj.id, obj.name]);
addContext(this, "Docker Hub provider type id and name: " + [obj.id, obj.name]);
}
}
done();
});
});
it("GET /provider/access_types", function (done) {
const provider_type_id = "?provider_type_id=" + common.searchOfEnv("provider", "provider_type", "id");
providers.accessType(provider_type_id)
.end((err, res) => {
if (res.statusCode !== 200)
logs.getApiResponse(res);
elementHandler.responseExpect(res.body, res.statusCode, 200, res.res.statusMessage, constants.STATUS_TEXT_OK, res.ok);
elementHandler.schemaValidate(res.body, accessTypeSuccess);
for (const obj of res.body) {
if (obj.name === accessType) {
elementHandler.toBeEqual(obj.name, accessType);
//set access type id and name in env variable
common.readOrWriteJsonFile("provider", "access_type", "id", obj.id);
common.readOrWriteJsonFile("provider", "access_type", "name", obj.name);
console.log("Docker Hub access type id and name: ", [obj.id, obj.name]);
addContext(this, "Docker Hub access type id and name: " + [obj.id, obj.name]);
}
}
done();
});
});
it("POST /provider", function (done) {
const param = {
"name": "docker-" + common.getRandomString(2),
"password": process.env.docker_hub_token,
"url": process.env.docker_hub_url,
"user_name": process.env.docker_hub_user,
}
providers.create()
.send(param)
.end((err, res) => {
if (res.statusCode !== 200)
logs.postApiResponse(res);
if (common.isEmpty(res.body)) {
console.log("response body: ", res.body);
elementHandler.responseExpect(res.body, res.statusCode, 200, res.res.statusMessage, constants.STATUS_TEXT_OK, res.ok);
} else {
elementHandler.responseExpect(res.body, res.statusCode, 200, res.res.statusMessage, constants.STATUS_TEXT_OK, res.ok);
elementHandler.schemaValidate(res.body, createProSuccess);
// create provider id and name set in env file
common.readOrWriteJsonFile("provider", "provider_details", "id", res.body.id);
common.readOrWriteJsonFile("provider", "provider_details", "name", res.body.name);
console.log("Docker Hub create provider id and name: ", [res.body.id, res.body.name]);
addContext(this, "Docker Hub create provider id and name: " + [res.body.id, res.body.name]);
}
done();
});
});
describe('Sync provider test case', () => {
it('POST / provider/sync', function (done) {
providers.sync()
.end((err, res) => {
if (res.statusCode !== 200)
logs.postApiResponse(res);
elementHandler.responseExpect(res.body, res.statusCode, 200, res.res.statusMessage, constants.STATUS_TEXT_OK, res.ok);
elementHandler.toBeEqual(res.body.message, 'published');
console.log('Sync provider: ', [res.body.message]);
addContext(this, 'Sync provider: ' + res.body.message);
done();
});
});
});
describe('Sync logs provider test case', () => {
it('GET / worker_requests', function () {
const pro_id = common.searchOfEnv('provider', 'provider_details', 'id');
return providers.worker(query.worker + pro_id)
.then((res) => {
if (res.statusCode !== 200) | logs.getApiResponse(res);
elementHandler.responseExpect(res.body, res.statusCode, 200, res.res.statusMessage, constants.STATUS_TEXT_OK, res.ok);
elementHandler.schemaValidate(res.body, workerSuccess);
for (var i = 0; i < res.body.length; i++) {
if (i === 0) {
list = res.body[i];
elementHandler.toBeEqual(list.provider_id, pro_id);
common.readOrWriteJsonFile('provider', 'worker_requests', 'logs_id', list.id)
return;
}
}
});
});
it('GET / worker_request', function (done) {
const log_id = common.searchOfEnv('provider', 'worker_requests', 'logs_id');
providers.worker('/' + log_id)
.end((err, res) => {
if (res.statusCode !== 200)
logs.getApiResponse(res);
elementHandler.responseExpect(res.body, res.statusCode, 200, res.res.statusMessage, constants.STATUS_TEXT_OK, res.ok);
if (res.body.id === log_id) {
elementHandler.toBeEqual(res.body.id, log_id);
console.log('Worker requests id: ', [res.body.id]);
addContext(this, 'Worker requests id: ' + res.body.id);
}
done();
});
});
it('GET / logging', function (done) {
const log_id = common.searchOfEnv('provider', 'worker_requests', 'logs_id');
providers.logging(log_id)
.end((err, res) => {
if (res.statusCode !== 200)
logs.getApiResponse(res);
elementHandler.responseExpect(res.body, res.statusCode, 200, res.res.statusMessage, constants.STATUS_TEXT_OK, res.ok);
elementHandler.schemaValidate(res.body, loggingSuccess);
common.tryDelay(5000);
for (const obj of res.body) {
if (obj.id === log_id) {
elementHandler.toBeEqual(obj.id, log_id);
console.log('Logging id and status: ', [obj.id, obj.status]);
addContext(this, 'Logging id and status: ' + [obj.id, obj.status]);
}
}
done();
});
});
});
}); | |
go1_13_log.go | // Code generated by 'goexports log'. DO NOT EDIT.
// +build go1.13,!go1.14
package stdlib
import (
"go/constant"
"go/token"
"log"
"reflect"
)
func init() | {
Symbols["log"] = map[string]reflect.Value{
// function, constant and variable definitions
"Fatal": reflect.ValueOf(log.Fatal),
"Fatalf": reflect.ValueOf(log.Fatalf),
"Fatalln": reflect.ValueOf(log.Fatalln),
"Flags": reflect.ValueOf(log.Flags),
"LUTC": reflect.ValueOf(constant.MakeFromLiteral("32", token.INT, 0)),
"Ldate": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"Llongfile": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"Lmicroseconds": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"Lshortfile": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"LstdFlags": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"Ltime": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"New": reflect.ValueOf(log.New),
"Output": reflect.ValueOf(log.Output),
"Panic": reflect.ValueOf(log.Panic),
"Panicf": reflect.ValueOf(log.Panicf),
"Panicln": reflect.ValueOf(log.Panicln),
"Prefix": reflect.ValueOf(log.Prefix),
"Print": reflect.ValueOf(log.Print),
"Printf": reflect.ValueOf(log.Printf),
"Println": reflect.ValueOf(log.Println),
"SetFlags": reflect.ValueOf(log.SetFlags),
"SetOutput": reflect.ValueOf(log.SetOutput),
"SetPrefix": reflect.ValueOf(log.SetPrefix),
"Writer": reflect.ValueOf(log.Writer),
// type definitions
"Logger": reflect.ValueOf((*log.Logger)(nil)),
}
} |
|
issue_690.rs | //! Tests auto-converted from "sass-spec/spec/libsass-closed-issues/issue_690.hrx"
#[allow(unused)]
fn runner() -> crate::TestRunner {
super::runner()
}
#[test]
fn test() | {
assert_eq!(
runner().ok("test {\
\n left: expression(callSomeFunc());\
\n content: expression(\"Smile :-)\");\
\n}\n"),
"test {\
\n left: expression(callSomeFunc());\
\n content: expression(\"Smile :-)\");\
\n}\n"
);
} |
|
goal.rs | use meters::goal::*;
use prototty::*;
use std::fmt::Write;
pub struct GoalView {
scratch: String,
}
impl GoalView {
pub fn new() -> Self {
Self {
scratch: String::new(),
}
}
} | &mut self,
&goal: &Option<(GoalType, bool)>,
offset: Coord,
depth: i32,
grid: &mut G,
) {
if let Some(&(goal, complete)) = goal.as_ref() {
self.scratch.clear();
match goal {
GoalType::Escape => write!(self.scratch, "Escape!").unwrap(),
GoalType::KillBoss => write!(self.scratch, "Kill the boss!").unwrap(),
GoalType::KillEggs => {
write!(self.scratch, "Kill the eggs before they hatch!").unwrap()
}
GoalType::ActivateBeacon => {
write!(self.scratch, "Activate the emergency beacon!").unwrap()
}
}
if complete {
write!(self.scratch, " (COMPLETE)").unwrap();
}
StringView.view(&self.scratch, offset, depth, grid);
}
}
} |
impl View<Option<(GoalType, bool)>> for GoalView {
fn view<G: ViewGrid>( |
client.go | package main
import (
"crypto/rand"
"encoding/base64"
"fmt"
"koding/broker/storage"
"koding/tools/sockjs"
"strconv"
"strings"
"time"
"github.com/streadway/amqp"
set "gopkg.in/fatih/set.v0"
)
type Client struct {
// Holds SockJS session
Session *sockjs.Session
// ControlChannel for communicating with authworker
ControlChannel *amqp.Channel
// Holds the socket id for Client Session
SocketId string
// Main broker singleton
Broker *Broker
// LastPayload is used for trying to send the same payload again
// if any error occures while publishing
LastPayload string
// Subscriptions holds subscriptions of the current client
Subscriptions storage.Subscriptionable
}
// NewClient retuns a new client which represents the connected client
// it holds required information about the client/session
func NewClient(session *sockjs.Session, broker *Broker) (*Client, error) {
socketId := randomString()
session.Tag = socketId
controlChannel, err := broker.PublishConn.Channel()
if err != nil {
return nil, fmt.Errorf("Couldnt create publish channel %v", err)
}
subscriptions, err := createSubscriptionStorage(broker, socketId)
if err != nil {
return nil, err
}
globalMapMutex.Lock()
sessionsMap[socketId] = session
globalMapMutex.Unlock()
return &Client{
Session: session,
SocketId: socketId,
ControlChannel: controlChannel,
Broker: broker,
Subscriptions: subscriptions,
}, nil
}
// createSubscriptionStorage arranges a storage place for subscriptions
// it can be Redis backend or inmemory Set storage
func createSubscriptionStorage(broker *Broker, socketId string) (storage.Subscriptionable, error) {
// first try to create a redis storage
if subscriptions, err := storage.NewRedisStorage(broker.RedisSingleton, conf, socketId); err == nil {
// if we success, just return the storage
return subscriptions, nil
} else {
log.Critical("Couldnt access to redis/create a key for client %v: Error: %v", socketId, err)
}
// if we try to create subscription storage backend with redis and fail
// create an inmemory storage system
if subscriptions, err := storage.NewStorage(conf, storage.SET, socketId); err == nil {
return subscriptions, nil
}
// this will never fail to here, because SET returns nil as error
return nil, fmt.Errorf("Couldnt create subscription storage for Client: %v", socketId)
}
// Close should be called whenever a client disconnects.
// Close removes client's subscriptions from routeMap immediately
// It waits for 5 minutes before clearing the client's subscriptions because if this
// is a temp glitch on network client should be able to resubscribe to all of them again
func (c *Client) Close() {
log.Debug("Client Close Request for socketID: %v", c.SocketId)
c.Subscriptions.Each(func(routingKeyPrefix interface{}) bool {
c.RemoveFromRoute(routingKeyPrefix.(string))
return true
})
c.Subscriptions.ClearWithTimeout(time.Minute * 5)
for {
err := c.ControlChannel.Publish(c.Broker.Config.AuthAllExchange, "broker.clientDisconnected", false, false, amqp.Publishing{Body: []byte(c.SocketId)})
if err == nil {
break
}
if amqpError, isAmqpError := err.(*amqp.Error); !isAmqpError || amqpError.Code != amqp.ChannelError {
log.Critical("Error while publising -not rabbitmq error- %v", err)
}
c.resetControlChannel()
}
log.Debug("Closing control channel for socketID: %v", c.SocketId)
c.ControlChannel.Close()
globalMapMutex.Lock()
defer globalMapMutex.Unlock()
delete(sessionsMap, c.SocketId)
}
// handleSessionMessage handles the received message from the client. It
// passes a response back to the client or publish the received message to a
// rabbitmq exchange for further process.
func (c *Client) handleSessionMessage(data interface{}) {
message := data.(map[string]interface{})
log.Debug("Received message: %v", message)
action := message["action"]
switch action {
case "subscribe":
routingKeyPrefixes := strings.Split(message["routingKeyPrefix"].(string), " ")
if err := c.Subscribe(routingKeyPrefixes...); err != nil {
log.Error(err.Error())
}
sendToClient(c.Session, "broker.subscribed", message["routingKeyPrefix"])
case "resubscribe":
clientId := message["socketId"].(string)
log.Debug("Resubscribe event for clientId: %v SocketId: %v", clientId, c.SocketId)
found, err := c.Resubscribe(clientId)
if err != nil {
log.Error(err.Error())
}
log.Debug("Resubscribe found for socketID: %v, %v", clientId, found)
sendToClient(c.Session, "broker.resubscribed", found)
case "unsubscribe":
routingKeyPrefix := message["routingKeyPrefix"].(string)
log.Debug("Unsubscribe event for socketID: %v, and prefixes", c.SocketId, routingKeyPrefix)
routingKeyPrefixes := strings.Split(routingKeyPrefix, " ")
c.Unsubscribe(routingKeyPrefixes...)
case "publish":
exchange := message["exchange"].(string)
routingKey := message["routingKey"].(string)
payload := message["payload"].(string)
log.Debug("Publish Event: Exchange: %v, RoutingKey %v, Payload %v",
exchange,
routingKey,
payload,
)
if err := c.Publish(exchange, routingKey, payload); err != nil {
log.Error(err.Error())
}
case "ping":
sendToClient(c.Session, "broker.pong", nil)
if c.Subscriptions.Backend() == storage.REDIS {
// TOOD - may be we need to revisit this part later about duration and request count
go c.Subscriptions.ClearWithTimeout(time.Minute * 59)
}
default:
log.Warning("Invalid action. message: %v socketId: %v", message, c.SocketId)
}
}
// Publish publish the given payload for to the given exchange and routingkey.
// if publishing fails for given payload waits for quarter of a second
func (c *Client) Publish(exchange, routingKey, payload string) error {
if !strings.HasPrefix(routingKey, "client.") {
return fmt.Errorf("Invalid routing key: %v socketId: %v", routingKey, c.SocketId)
}
for {
c.LastPayload = ""
err := c.ControlChannel.Publish(exchange, routingKey, false, false, amqp.Publishing{CorrelationId: c.SocketId, Body: []byte(payload)})
if err == nil {
c.LastPayload = payload
break
}
if amqpError, isAmqpError := err.(*amqp.Error); !isAmqpError || amqpError.Code != amqp.ChannelError {
log.Warning("payload: %v routing key: %v exchange: %v err: %v",
payload, routingKey, exchange, err)
}
time.Sleep(time.Second / 4) // penalty for crashing the AMQP channel
c.resetControlChannel()
}
return nil
}
// gaugeStart starts the gauge for a given session. It returns a new
// function which ends the gauge for the given session. Usually one invokes
// gaugeStart and calls the returned function in a defer statement.
func (c *Client) gaugeStart() (gaugeEnd func()) {
log.Debug("Client connected: %v", c.Session.Tag)
changeClientsGauge(1)
changeNewClientsGauge(1)
if c.Session.IsWebsocket {
changeWebsocketClientsGauge(1)
}
return func() {
log.Debug("Client disconnected: %v", c.Session.Tag)
changeClientsGauge(-1)
if c.Session.IsWebsocket {
changeWebsocketClientsGauge(-1)
}
}
}
// resetControlChannel closes the current client's control channel and creates
// a new channel. It also listens to any server side error and publish back
// the error to the client.
func (c *Client) resetControlChannel() {
defer log.RecoverAndLog()
if c.ControlChannel != nil {
c.ControlChannel.Close()
}
var err error
c.ControlChannel, err = c.Broker.PublishConn.Channel()
if err != nil {
log.Critical("Couldnt create publishing channel %v", err)
}
go func() {
defer log.RecoverAndLog()
for amqpErr := range c.ControlChannel.NotifyClose(make(chan *amqp.Error)) {
if !(strings.Contains(amqpErr.Error(), "NOT_FOUND") && (strings.Contains(amqpErr.Error(), "koding-social-") || strings.Contains(amqpErr.Error(), "auth-"))) {
log.Warning("AMQP channel: %v Last publish payload: %v", amqpErr.Error(), c.LastPayload)
}
sendToClient(c.Session, "broker.error", map[string]interface{}{
"code": amqpErr.Code,
"reason": amqpErr.Reason,
"server": amqpErr.Server,
"recover": amqpErr.Recover,
})
}
}()
}
// RemoveFromRoute removes the sessions for the given routingKeyPrefixes.
func (c *Client) RemoveFromRoute(routingKeyPrefixes ...string) {
globalMapMutex.Lock()
defer globalMapMutex.Unlock()
for _, routingKeyPrefix := range routingKeyPrefixes {
if _, ok := routeMap[routingKeyPrefix]; !ok {
continue
}
routeMap[routingKeyPrefix].Remove(c.SocketId)
if routeMap[routingKeyPrefix].Size() == 0 {
delete(routeMap, routingKeyPrefix)
}
}
}
// AddToRoute ads routes to the routeMap for client
func (c *Client) AddToRoute() {
c.Subscriptions.Each(func(routingKeyPrefix interface{}) bool {
c.AddToRouteMapNOTS(routingKeyPrefix.(string))
return true
})
}
// AddToRouteMapNOTS adds given routingKeys to the global routemap
// it is non-thread-safe function, developers should use it with their
// own thread safe wrapping
func (c *Client) AddToRouteMapNOTS(routingKeyPrefixes ...string) {
for _, routingKeyPrefix := range routingKeyPrefixes {
if _, ok := routeMap[routingKeyPrefix]; !ok {
routeMap[routingKeyPrefix] = set.New()
}
routeMap[routingKeyPrefix].Add(c.SocketId)
}
}
// Subscribe add the given routingKeyPrefix to the list of subscriptions
// associated with this client.
func (c *Client) Subscribe(routingKeyPrefixes ...string) error {
if err := c.Subscriptions.Subscribe(routingKeyPrefixes...); err != nil {
return err
}
globalMapMutex.Lock()
c.AddToRouteMapNOTS(routingKeyPrefixes...)
globalMapMutex.Unlock()
// Log some information about the Client
go func() {
length, err := c.Subscriptions.Len()
if err != nil {
log.Warning("Error while trying to get Subscriptions.Len() for: %v Error: %v", c.Session.Tag, err)
}
if length > 0 && length%2000 == 0 {
log.Warning("Client with more than %v subscriptions %v", strconv.Itoa(length), c.Session.Tag)
}
}()
return nil
}
// Resubscribe tries to resubscribe with another sessionId
// it is useful when client disconnected and a while after
// tries to subscribe again, so there will not be that many
// communication between broker and the client
func (c *Client) Resubscribe(sessionId string) (bool, error) {
found, err := c.Subscriptions.Resubscribe(sessionId)
if err != nil {
return false, err
}
if !found {
return false, nil
}
c.AddToRoute()
return true, nil
}
// Unsubscribe deletes the given routingKey prefix from the subscription list
// and removes it from the global route map
func (c *Client) Unsubscribe(routingKeyPrefixes ...string) {
c.RemoveFromRoute(routingKeyPrefixes...)
if err := c.Subscriptions.Unsubscribe(routingKeyPrefixes...); err != nil {
fmt.Errorf("%v", err)
}
}
// randomString() returns a new 16 char length random string
func | () string {
r := make([]byte, 128/8)
rand.Read(r)
return base64.StdEncoding.EncodeToString(r)
}
| randomString |
take.ts | import { MonoTypeOperatorFunction } from "../types.ts";
import { EMPTY } from "../observable/empty.ts";
import { operate } from "../util/lift.ts";
import { OperatorSubscriber } from "./OperatorSubscriber.ts";
/**
* Emits only the first `count` values emitted by the source Observable.
*
* <span class="informal">Takes the first `count` values from the source, then
* completes.</span>
*
* 
*
* `take` returns an Observable that emits only the first `count` values emitted
* by the source Observable. If the source emits fewer than `count` values then
* all of its values are emitted. After that, it completes, regardless if the
* source completes.
*
* ## Example
*
* Take the first 5 seconds of an infinite 1-second interval Observable
*
* ```ts
* import { interval, take } from 'rxjs';
*
* const intervalCount = interval(1000);
* const takeFive = intervalCount.pipe(take(5));
* takeFive.subscribe(x => console.log(x));
*
* // Logs:
* // 0
* // 1
* // 2
* // 3
* // 4
* ```
*
* @see {@link takeLast}
* @see {@link takeUntil}
* @see {@link takeWhile}
* @see {@link skip}
*
* @param count The maximum number of `next` values to emit.
* @return A function that returns an Observable that emits only the first
* `count` values emitted by the source Observable, or all of the values from
* the source if the source emits fewer than `count` values.
*/
export function | <T>(count: number): MonoTypeOperatorFunction<T> {
return count <= 0
? // If we are taking no values, that's empty.
() => EMPTY
: operate((source, subscriber) => {
let seen = 0;
source.subscribe(
new OperatorSubscriber(subscriber, (value) => {
// Increment the number of values we have seen,
// then check it against the allowed count to see
// if we are still letting values through.
if (++seen <= count) {
subscriber.next(value);
// If we have met or passed our allowed count,
// we need to complete. We have to do <= here,
// because re-entrant code will increment `seen` twice.
if (count <= seen) {
subscriber.complete();
}
}
}),
);
});
}
| take |
apps.py | from django.apps import AppConfig
class ContatoConfig(AppConfig):
| default_auto_field = 'django.db.models.BigAutoField'
name = 'contato' |
|
registry.go | package distribution
import (
"fmt"
"net"
"net/http"
"net/url"
"time"
"github.com/docker/distribution"
distreference "github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/client"
"github.com/docker/distribution/registry/client/auth"
"github.com/docker/distribution/registry/client/transport"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/registry"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
type dumbCredentialStore struct {
auth *types.AuthConfig
}
func (dcs dumbCredentialStore) Basic(*url.URL) (string, string) {
return dcs.auth.Username, dcs.auth.Password
}
func (dcs dumbCredentialStore) RefreshToken(*url.URL, string) string {
return dcs.auth.IdentityToken
}
func (dcs dumbCredentialStore) SetRefreshToken(*url.URL, string, string) {
}
// NewV2Repository returns a repository (v2 only). It creates a HTTP transport
// providing timeout settings and authentication support, and also verifies the
// remote API version.
func | (ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) {
repoName := repoInfo.FullName()
// If endpoint does not support CanonicalName, use the RemoteName instead
if endpoint.TrimHostname {
repoName = repoInfo.RemoteName()
}
// TODO(dmcgowan): Call close idle connections when complete, use keep alive
base := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: endpoint.TLSConfig,
// TODO(dmcgowan): Call close idle connections when complete and use keep alive
DisableKeepAlives: true,
}
modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders)
authTransport := transport.NewTransport(base, modifiers...)
challengeManager, foundVersion, err := registry.PingV2Registry(endpoint, authTransport)
if err != nil {
transportOK := false
if responseErr, ok := err.(registry.PingResponseError); ok {
transportOK = true
err = responseErr.Err
}
return nil, foundVersion, fallbackError{
err: err,
confirmedV2: foundVersion,
transportOK: transportOK,
}
}
if authConfig.RegistryToken != "" {
passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken}
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler))
} else {
creds := dumbCredentialStore{auth: authConfig}
tokenHandlerOptions := auth.TokenHandlerOptions{
Transport: authTransport,
Credentials: creds,
Scopes: []auth.Scope{
auth.RepositoryScope{
Repository: repoName,
Actions: actions,
},
},
ClientID: registry.AuthClientID,
}
tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions)
basicHandler := auth.NewBasicHandler(creds)
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
}
tr := transport.NewTransport(base, modifiers...)
repoNameRef, err := distreference.ParseNamed(repoName)
if err != nil {
return nil, foundVersion, fallbackError{
err: err,
confirmedV2: foundVersion,
transportOK: true,
}
}
repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr)
if err != nil {
err = fallbackError{
err: err,
confirmedV2: foundVersion,
transportOK: true,
}
}
return
}
type existingTokenHandler struct {
token string
}
func (th *existingTokenHandler) Scheme() string {
return "bearer"
}
func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token))
return nil
}
| NewV2Repository |
scrollIntoView.d.ts | /// <reference types="webdriverio/webdriverio-core" />
export default function scrollIntoView(this: WebdriverIO.Element, scrollIntoViewOptions?: boolean): Promise<void>; | //# sourceMappingURL=scrollIntoView.d.ts.map |
|
NavegacaoDrawer.js | import React from "react";
import { createDrawerNavigator } from "react-navigation-drawer";
import { HeaderButtons, Item } from "react-navigation-header-buttons";
import HeaderButton from "./HeaderButton";
import NavegacaoBottomTab from "./NavegacaoBottomTab";
import AvaliacaoScreen from "./Avaliacao";
import ConfiguracaoScreen from "./Configuracao";
import InfoScreen from "./Info";
import { createStackNavigator } from "react-navigation-stack";
function | (headerTitle) {
return navigationData => {
return {
headerTitle: headerTitle,
headerLeft: () => (
<HeaderButtons HeaderButtonComponent={HeaderButton}>
<Item
title="Menu"
iconName="arrow-back"
iconSize={25}
onPress={() => {
navigationData.navigation.navigate({ routeName: "Home" });
}}
/>
</HeaderButtons>
)
};
};
}
const drawer = {
Home: {
screen: NavegacaoBottomTab
},
Avaliacao: {
screen: createStackNavigator({
Avaliacao: {
screen: AvaliacaoScreen,
navigationOptions: menuBack("Avaliação")
}
}),
navigationOptions: {
drawerLabel: "Avaliação"
}
},
Configuracao: {
screen: createStackNavigator({
Configuracao: {
screen: ConfiguracaoScreen,
navigationOptions: menuBack("Configuração")
}
}),
navigationOptions: {
drawerLabel: "Configuração"
}
},
Info: {
screen: createStackNavigator({
Info: {
screen: InfoScreen,
navigationOptions: menuBack("Informações")
}
}),
navigationOptions: {
drawerLabel: "Informações"
}
}
};
export default createDrawerNavigator(drawer);
| menuBack |
test_init.py | """Unit tests for platform/plant.py."""
from datetime import datetime, timedelta
import pytest
from homeassistant.components import recorder
import homeassistant.components.plant as plant
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONDUCTIVITY,
STATE_OK,
STATE_PROBLEM,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import State
from homeassistant.setup import async_setup_component
from tests.common import init_recorder_component
GOOD_DATA = {
"moisture": 50,
"battery": 90,
"temperature": 23.4,
"conductivity": 777,
"brightness": 987,
}
BRIGHTNESS_ENTITY = "sensor.mqtt_plant_brightness"
MOISTURE_ENTITY = "sensor.mqtt_plant_moisture"
GOOD_CONFIG = {
"sensors": {
"moisture": MOISTURE_ENTITY,
"battery": "sensor.mqtt_plant_battery",
"temperature": "sensor.mqtt_plant_temperature",
"conductivity": "sensor.mqtt_plant_conductivity",
"brightness": BRIGHTNESS_ENTITY,
},
"min_moisture": 20,
"max_moisture": 60,
"min_battery": 17,
"min_conductivity": 500,
"min_temperature": 15,
"min_brightness": 500,
}
async def test_valid_data(hass):
"""Test processing valid data."""
sensor = plant.Plant("my plant", GOOD_CONFIG)
sensor.entity_id = "sensor.mqtt_plant_battery"
sensor.hass = hass
for reading, value in GOOD_DATA.items():
sensor.state_changed(
GOOD_CONFIG["sensors"][reading],
None,
State(GOOD_CONFIG["sensors"][reading], value),
)
assert sensor.state == "ok"
attrib = sensor.state_attributes
for reading, value in GOOD_DATA.items():
# battery level has a different name in
# the JSON format than in hass
assert attrib[reading] == value
async def test_low_battery(hass):
"""Test processing with low battery data and limit set."""
sensor = plant.Plant("other plant", GOOD_CONFIG)
sensor.entity_id = "sensor.mqtt_plant_battery"
sensor.hass = hass
assert sensor.state_attributes["problem"] == "none"
sensor.state_changed(
"sensor.mqtt_plant_battery",
State("sensor.mqtt_plant_battery", 45),
State("sensor.mqtt_plant_battery", 10),
)
assert sensor.state == "problem"
assert sensor.state_attributes["problem"] == "battery low"
async def test_initial_states(hass):
"""Test plant initialises attributes if sensor already exists."""
hass.states.async_set(MOISTURE_ENTITY, 5, {ATTR_UNIT_OF_MEASUREMENT: CONDUCTIVITY})
plant_name = "some_plant"
assert await async_setup_component(
hass, plant.DOMAIN, {plant.DOMAIN: {plant_name: GOOD_CONFIG}}
)
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert 5 == state.attributes[plant.READING_MOISTURE]
async def test_update_states(hass):
"""Test updating the state of a sensor.
Make sure that plant processes this correctly.
"""
plant_name = "some_plant"
assert await async_setup_component(
hass, plant.DOMAIN, {plant.DOMAIN: {plant_name: GOOD_CONFIG}}
)
hass.states.async_set(MOISTURE_ENTITY, 5, {ATTR_UNIT_OF_MEASUREMENT: CONDUCTIVITY})
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert STATE_PROBLEM == state.state
assert 5 == state.attributes[plant.READING_MOISTURE]
async def test_unavailable_state(hass):
"""Test updating the state with unavailable.
Make sure that plant processes this correctly.
"""
plant_name = "some_plant"
assert await async_setup_component(
hass, plant.DOMAIN, {plant.DOMAIN: {plant_name: GOOD_CONFIG}}
)
hass.states.async_set(
MOISTURE_ENTITY, STATE_UNAVAILABLE, {ATTR_UNIT_OF_MEASUREMENT: CONDUCTIVITY}
)
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert state.state == STATE_PROBLEM
assert state.attributes[plant.READING_MOISTURE] == STATE_UNAVAILABLE
async def test_state_problem_if_unavailable(hass):
"""Test updating the state with unavailable after setting it to valid value.
Make sure that plant processes this correctly.
"""
plant_name = "some_plant"
assert await async_setup_component(
hass, plant.DOMAIN, {plant.DOMAIN: {plant_name: GOOD_CONFIG}}
)
hass.states.async_set(MOISTURE_ENTITY, 42, {ATTR_UNIT_OF_MEASUREMENT: CONDUCTIVITY})
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert state.state == STATE_OK
assert state.attributes[plant.READING_MOISTURE] == 42
hass.states.async_set(
MOISTURE_ENTITY, STATE_UNAVAILABLE, {ATTR_UNIT_OF_MEASUREMENT: CONDUCTIVITY}
)
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert state.state == STATE_PROBLEM
assert state.attributes[plant.READING_MOISTURE] == STATE_UNAVAILABLE
@pytest.mark.skipif(
plant.ENABLE_LOAD_HISTORY is False,
reason="tests for loading from DB are unstable, thus"
"this feature is turned of until tests become"
"stable",
)
async def test_load_from_db(hass):
"""Test bootstrapping the brightness history from the database.
This test can should only be executed if the loading of the history
is enabled via plant.ENABLE_LOAD_HISTORY.
"""
init_recorder_component(hass)
plant_name = "wise_plant"
for value in [20, 30, 10]:
hass.states.async_set(
BRIGHTNESS_ENTITY, value, {ATTR_UNIT_OF_MEASUREMENT: "Lux"}
) | assert await async_setup_component(
hass, plant.DOMAIN, {plant.DOMAIN: {plant_name: GOOD_CONFIG}}
)
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert STATE_UNKNOWN == state.state
max_brightness = state.attributes.get(plant.ATTR_MAX_BRIGHTNESS_HISTORY)
assert 30 == max_brightness
async def test_brightness_history(hass):
"""Test the min_brightness check."""
plant_name = "some_plant"
assert await async_setup_component(
hass, plant.DOMAIN, {plant.DOMAIN: {plant_name: GOOD_CONFIG}}
)
hass.states.async_set(BRIGHTNESS_ENTITY, 100, {ATTR_UNIT_OF_MEASUREMENT: "lux"})
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert STATE_PROBLEM == state.state
hass.states.async_set(BRIGHTNESS_ENTITY, 600, {ATTR_UNIT_OF_MEASUREMENT: "lux"})
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert STATE_OK == state.state
hass.states.async_set(BRIGHTNESS_ENTITY, 100, {ATTR_UNIT_OF_MEASUREMENT: "lux"})
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert STATE_OK == state.state
def test_daily_history_no_data(hass):
"""Test with empty history."""
dh = plant.DailyHistory(3)
assert dh.max is None
def test_daily_history_one_day(hass):
"""Test storing data for the same day."""
dh = plant.DailyHistory(3)
values = [-2, 10, 0, 5, 20]
for i in range(len(values)):
dh.add_measurement(values[i])
max_value = max(values[0 : i + 1])
assert 1 == len(dh._days)
assert dh.max == max_value
def test_daily_history_multiple_days(hass):
"""Test storing data for different days."""
dh = plant.DailyHistory(3)
today = datetime.now()
today_minus_1 = today - timedelta(days=1)
today_minus_2 = today_minus_1 - timedelta(days=1)
today_minus_3 = today_minus_2 - timedelta(days=1)
days = [today_minus_3, today_minus_2, today_minus_1, today]
values = [10, 1, 7, 3]
max_values = [10, 10, 10, 7]
for i in range(len(days)):
dh.add_measurement(values[i], days[i])
assert max_values[i] == dh.max | await hass.async_block_till_done()
# wait for the recorder to really store the data
hass.data[recorder.DATA_INSTANCE].block_till_done()
|
defineProperty.js | (function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory() :
typeof define === 'function' && define.amd ? define('navodyDigitalFrontend', factory) :
(factory());
}(this, (function () { 'use strict';
(function(undefined) {
// Detection from https://github.com/Financial-Times/polyfill-service/blob/master/packages/polyfill-library/polyfills/Object/defineProperty/detect.js
var detect = (
// In IE8, defineProperty could only act on DOM elements, so full support
// for the feature requires the ability to set a property on an arbitrary object
'defineProperty' in Object && (function() {
try {
var a = {};
Object.defineProperty(a, 'test', {value:42});
return true;
} catch(e) {
return false
}
}())
);
if (detect) return
// Polyfill from https://cdn.polyfill.io/v2/polyfill.js?features=Object.defineProperty&flags=always
(function (nativeDefineProperty) {
var supportsAccessors = Object.prototype.hasOwnProperty('__defineGetter__');
var ERR_ACCESSORS_NOT_SUPPORTED = 'Getters & setters cannot be defined on this javascript engine';
var ERR_VALUE_ACCESSORS = 'A property cannot both have accessors and be writable or have a value';
Object.defineProperty = function defineProperty(object, property, descriptor) {
// Where native support exists, assume it
if (nativeDefineProperty && (object === window || object === document || object === Element.prototype || object instanceof Element)) {
return nativeDefineProperty(object, property, descriptor);
}
if (object === null || !(object instanceof Object || typeof object === 'object')) {
throw new TypeError('Object.defineProperty called on non-object');
}
if (!(descriptor instanceof Object)) {
throw new TypeError('Property description must be an object');
}
var propertyString = String(property);
var hasValueOrWritable = 'value' in descriptor || 'writable' in descriptor;
var getterType = 'get' in descriptor && typeof descriptor.get;
var setterType = 'set' in descriptor && typeof descriptor.set;
// handle descriptor.get
if (getterType) { | if (getterType !== 'function') {
throw new TypeError('Getter must be a function');
}
if (!supportsAccessors) {
throw new TypeError(ERR_ACCESSORS_NOT_SUPPORTED);
}
if (hasValueOrWritable) {
throw new TypeError(ERR_VALUE_ACCESSORS);
}
Object.__defineGetter__.call(object, propertyString, descriptor.get);
} else {
object[propertyString] = descriptor.value;
}
// handle descriptor.set
if (setterType) {
if (setterType !== 'function') {
throw new TypeError('Setter must be a function');
}
if (!supportsAccessors) {
throw new TypeError(ERR_ACCESSORS_NOT_SUPPORTED);
}
if (hasValueOrWritable) {
throw new TypeError(ERR_VALUE_ACCESSORS);
}
Object.__defineSetter__.call(object, propertyString, descriptor.set);
}
// OK to define value unconditionally - if a getter has been specified as well, an error would be thrown above
if ('value' in descriptor) {
object[propertyString] = descriptor.value;
}
return object;
};
}(Object.defineProperty));
})
.call('object' === typeof window && window || 'object' === typeof self && self || 'object' === typeof global && global || {});
}))); | |
listeners.js | // ==========================================================================
// Uiza Event Listeners
// ==========================================================================
import controls from './controls';
import events from './events';
import ui from './ui';
import { repaint } from './utils/animation';
import browser from './utils/browser';
import { getElement, getElements, matches, toggleClass, toggleHidden } from './utils/elements';
import { off, on, once, toggleListener, triggerEvent } from './utils/events';
import is from './utils/is';
import { setAspectRatio } from './utils/style';
class Listeners {
constructor(player) {
this.player = player;
this.lastKey = null;
this.focusTimer = null;
this.lastKeyDown = null;
this.handleKey = this.handleKey.bind(this);
this.toggleMenu = this.toggleMenu.bind(this);
this.setTabFocus = this.setTabFocus.bind(this);
this.firstTouch = this.firstTouch.bind(this);
}
// Handle key presses
handleKey(event) {
const { player } = this;
const { elements } = player;
const code = event.keyCode ? event.keyCode : event.which;
const pressed = event.type === 'keydown';
const repeat = pressed && code === this.lastKey;
// Bail if a modifier key is set
if (event.altKey || event.ctrlKey || event.metaKey || event.shiftKey) {
return;
}
// If the event is bubbled from the media element
// Firefox doesn't get the keycode for whatever reason
if (!is.number(code)) {
return;
}
// Seek by the number keys
const seekByKey = () => {
// Divide the max duration into 10th's and times by the number value
player.currentTime = (player.duration / 10) * (code - 48);
};
// Handle the key on keydown
// Reset on keyup
if (pressed) {
// Check focused element
// and if the focused element is not editable (e.g. text input)
// and any that accept key input http://webaim.org/techniques/keyboard/
const focused = document.activeElement;
if (is.element(focused)) {
const { editable } = player.config.selectors;
const { seek } = elements.inputs;
if (focused !== seek && matches(focused, editable)) {
return;
}
if (event.which === 32 && matches(focused, 'button, [role^="menuitem"]')) {
return;
}
}
// Which keycodes should we prevent default
const preventDefault = [32, 37, 38, 39, 40, 48, 49, 50, 51, 52, 53, 54, 56, 57, 67, 70, 73, 75, 76, 77, 79];
// If the code is found prevent default (e.g. prevent scrolling for arrows)
if (preventDefault.includes(code)) {
event.preventDefault();
event.stopPropagation();
}
switch (code) {
case 48:
case 49:
case 50:
case 51:
case 52:
case 53:
case 54:
case 55:
case 56:
case 57:
// 0-9
if (!repeat) {
seekByKey();
}
break;
case 32:
case 75:
// Space and K key
if (!repeat) {
player.togglePlay();
}
break;
case 38:
// Arrow up
player.increaseVolume(0.1);
break;
case 40:
// Arrow down
player.decreaseVolume(0.1);
break;
case 77:
// M key
if (!repeat) {
player.muted = !player.muted;
}
break;
case 39:
// Arrow forward
player.forward();
break;
case 37:
// Arrow back
player.rewind();
break;
case 70:
// F key
player.fullscreen.toggle();
break;
case 76:
// L key
player.loop = !player.loop;
break;
/* case 73:
this.setLoop('start');
break;
case 76:
this.setLoop();
break;
case 79:
this.setLoop('end');
break; */
default:
break;
}
// Escape is handle natively when in full screen
// So we only need to worry about non native
if (code === 27 && !player.fullscreen.usingNative && player.fullscreen.active) {
player.fullscreen.toggle();
}
// Store last code for next cycle
this.lastKey = code;
} else {
this.lastKey = null;
}
}
// Toggle menu
toggleMenu(event) {
controls.toggleMenu.call(this.player, event);
}
// Device is touch enabled
firstTouch() {
const { player } = this;
const { elements } = player;
player.touch = true;
// Add touch class
toggleClass(elements.container, player.config.classNames.isTouch, true);
}
setTabFocus(event) {
const { player } = this;
const { elements } = player;
clearTimeout(this.focusTimer);
// Ignore any key other than tab
if (event.type === 'keydown' && event.which !== 9) {
return;
}
// Store reference to event timeStamp
if (event.type === 'keydown') {
this.lastKeyDown = event.timeStamp;
}
// Remove current classes
const removeCurrent = () => {
const className = player.config.classNames.tabFocus;
const current = getElements.call(player, `.${className}`);
toggleClass(current, className, false);
};
// Determine if a key was pressed to trigger this event
const wasKeyDown = event.timeStamp - this.lastKeyDown <= 20;
// Ignore focus events if a key was pressed prior
if (event.type === 'focus' && !wasKeyDown) {
return;
}
// Remove all current
removeCurrent();
// Delay the adding of classname until the focus has changed
// This event fires before the focusin event
this.focusTimer = setTimeout(() => {
const focused = document.activeElement;
// Ignore if current focus element isn't inside the player
if (!elements.container.contains(focused)) {
return;
}
toggleClass(document.activeElement, player.config.classNames.tabFocus, true);
}, 10);
}
// Global window & document listeners
global(toggle = true) {
const { player } = this;
// Keyboard shortcuts
if (player.config.keyboard.global) {
toggleListener.call(player, window, 'keydown keyup', this.handleKey, toggle, false);
}
// Click anywhere closes menu
toggleListener.call(player, document.body, 'click', this.toggleMenu, toggle);
// Detect touch by events
once.call(player, document.body, 'touchstart', this.firstTouch);
// Tab focus detection
toggleListener.call(player, document.body, 'keydown focus blur', this.setTabFocus, toggle, false, true);
}
// Container listeners
container() {
const { player } = this;
const { config, elements, timers } = player;
// Keyboard shortcuts
if (!config.keyboard.global && config.keyboard.focused) {
on.call(player, elements.container, 'keydown keyup', this.handleKey, false);
}
// Toggle controls on mouse events and entering fullscreen
on.call(
player,
elements.container, | event => {
const { controls: controlsElement } = elements;
// Remove button states for fullscreen
if (controlsElement && event.type === events.ENTER_FULLSCREEN) {
controlsElement.pressed = false;
controlsElement.hover = false;
}
// Show, then hide after a timeout unless another control event occurs
const show = ['touchstart', 'touchmove', 'mousemove'].includes(event.type);
let delay = 0;
if (show) {
ui.toggleControls.call(player, true);
// Use longer timeout for touch devices
delay = player.touch ? 3000 : 2000;
}
// Clear timer
clearTimeout(timers.controls);
// Set new timer to prevent flicker when seeking
timers.controls = setTimeout(() => ui.toggleControls.call(player, false), delay);
},
);
// Resize on fullscreen change
const setPlayerSize = measure => {
// If we don't need to measure the viewport
if (!measure) {
return setAspectRatio.call(player);
}
const rect = elements.container.getBoundingClientRect();
const { width, height } = rect;
return setAspectRatio.call(player, `${width}:${height}`);
};
const resized = () => {
clearTimeout(timers.resized);
timers.resized = setTimeout(setPlayerSize, 50);
};
on.call(player, elements.container, `${events.ENTER_FULLSCREEN} ${events.EXIT_FULLSCREEN}`, event => {
const { target, usingNative } = player.fullscreen;
// Ignore events not from target
if (target !== elements.container) {
return;
}
// If it's not an embed and no ratio specified
if (!player.isEmbed && is.empty(player.config.ratio)) {
return;
}
const isEnter = event.type === events.ENTER_FULLSCREEN;
// Set the player size when entering fullscreen to viewport size
const { padding, ratio } = setPlayerSize(isEnter);
// If not using native fullscreen, we need to check for resizes of viewport
if (!usingNative) {
if (isEnter) {
on.call(player, window, events.RESIZE, resized);
} else {
off.call(player, window, events.RESIZE, resized);
}
}
});
}
// Listen for media events
media() {
const { player } = this;
const { elements } = player;
// Time change on media
on.call(player, player.media, `${events.TIME_UPDATE} ${events.SEEKING} ${events.SEEKED}`, event =>
controls.timeUpdate.call(player, event),
);
// Display duration
on.call(player, player.media, `${events.DURATION_CHANGE} ${events.LOADED_DATA} ${events.LOADED_META_DATA}`, event =>
controls.durationUpdate.call(player, event),
);
// Check for audio tracks on load
// We can't use `loadedmetadata` as it doesn't seem to have audio tracks at that point
on.call(player, player.media, `${events.CAN_PLAY} ${events.LOADED_DATA}`, () => {
toggleHidden(elements.volume, !player.hasAudio);
toggleHidden(elements.buttons.mute, !player.hasAudio);
});
// Handle the media finishing
on.call(player, player.media, events.ENDED, () => {
// Show poster on end
if (player.isHTML5 && player.isVideo && player.config.resetOnEnd) {
// Restart
player.restart();
}
});
// Check for buffer progress
on.call(player, player.media, `${events.PROGRESS} ${events.PLAYING} ${events.SEEKING} ${events.SEEKED}`, event =>
controls.updateProgress.call(player, event),
);
// Handle volume changes
on.call(player, player.media, events.VOLUME_CHANGE, event => controls.updateVolume.call(player, event));
// Handle play/pause
on.call(
player,
player.media,
`${events.PLAYING} ${events.PLAY} ${events.PAUSE} ${events.ENDED} ${events.EMPTIED} ${events.TIME_UPDATE}`,
event => ui.checkPlaying.call(player, event),
);
// Loading state
on.call(player, player.media, `${events.WAITING} ${events.CAN_PLAY} ${events.SEEKED} ${events.PLAYING}`, event =>
ui.checkLoading.call(player, event),
);
// Click video
if (player.supported.ui && player.config.clickToPlay && !player.isAudio) {
// Re-fetch the wrapper
const wrapper = getElement.call(player, `.${player.config.classNames.video}`);
// Bail if there's no wrapper (this should never happen)
if (!is.element(wrapper)) {
return;
}
// On click play, pause or restart
on.call(player, elements.container, 'click', event => {
const targets = [elements.container, wrapper];
// Ignore if click if not container or in video wrapper
if (!targets.includes(event.target) && !wrapper.contains(event.target)) {
return;
}
// Touch devices will just show controls (if hidden)
if (player.touch && player.config.hideControls) {
return;
}
if (player.ended) {
this.proxy(event, player.restart, 'restart');
this.proxy(event, player.play, events.PLAY);
} else {
this.proxy(event, player.togglePlay, events.PLAY);
}
});
}
// Disable right click
if (player.supported.ui && player.config.disableContextMenu) {
on.call(
player,
elements.wrapper,
'contextmenu',
event => {
event.preventDefault();
controls.setContextMenu.call(player, {
left: event.pageX,
top: event.pageY,
});
},
false,
);
}
// Volume change
on.call(player, player.media, events.VOLUME_CHANGE, () => {
// Save to storage
player.storage.set({
volume: player.volume,
muted: player.muted,
});
});
// Speed change
on.call(player, player.media, events.RATE_CHANGE, () => {
// Update UI
controls.updateSetting.call(player, 'speed');
// Save to storage
player.storage.set({ speed: player.speed });
});
// Quality change
on.call(player, player.media, events.QUALITY_CHANGE, event => {
// Update UI
controls.updateSetting.call(player, 'quality', null, event.detail.quality);
});
// Proxy events to container
// Bubble up key events for Edge
const proxyEvents = player.config.events.concat(['keyup', 'keydown']).join(' ');
on.call(player, player.media, proxyEvents, event => {
let { detail = {} } = event;
// Get error details from media
if (event.type === events.ERROR) {
detail = player.media.error;
}
triggerEvent.call(player, elements.container, event.type, true, detail);
});
}
// Run default and custom handlers
proxy(event, defaultHandler, customHandlerKey) {
const { player } = this;
const customHandler = player.config.listeners[customHandlerKey];
const hasCustomHandler = is.function(customHandler);
let returned = true;
// Execute custom handler
if (hasCustomHandler) {
returned = customHandler.call(player, event);
}
// Only call default handler if not prevented in custom handler
if (returned && is.function(defaultHandler)) {
defaultHandler.call(player, event);
}
}
// Trigger custom and default handlers
bind(element, type, defaultHandler, customHandlerKey, passive = true) {
const { player } = this;
const customHandler = player.config.listeners[customHandlerKey];
const hasCustomHandler = is.function(customHandler);
on.call(player, element, type, event => this.proxy(event, defaultHandler, customHandlerKey), passive && !hasCustomHandler);
}
// Listen for control events
controls() {
const { player } = this;
const { elements } = player;
// IE doesn't support input event, so we fallback to change
const inputEvent = browser.isIE ? 'change' : 'input';
// Play/pause toggle
if (elements.buttons.play) {
Array.from(elements.buttons.play).forEach(button => {
this.bind(button, 'click', player.togglePlay, 'play');
});
}
// Pause
this.bind(elements.buttons.restart, 'click', player.restart, 'restart');
// Rewind
this.bind(elements.buttons.rewind, 'click', player.rewind, 'rewind');
// Rewind
this.bind(elements.buttons.fastForward, 'click', player.forward, 'fastForward');
// Mute toggle
this.bind(
elements.buttons.mute,
'click',
() => {
player.muted = !player.muted;
},
'mute',
);
// Captions toggle
this.bind(elements.buttons.captions, 'click', () => player.toggleCaptions());
// Fullscreen toggle
this.bind(
elements.buttons.fullscreen,
'click',
() => {
player.fullscreen.toggle();
},
'fullscreen',
);
// Picture-in-Picture
this.bind(
elements.buttons.pip,
'click',
() => {
player.pip = 'toggle';
},
'pip',
);
// Settings menu - click toggle
this.bind(elements.buttons.settings, 'click', event => {
// Prevent the document click listener closing the menu
event.stopPropagation();
controls.toggleMenu.call(player, event);
});
// Settings menu - keyboard toggle
// We have to bind to keyup otherwise Firefox triggers a click when a keydown event handler shifts focus
// https://bugzilla.mozilla.org/show_bug.cgi?id=1220143
this.bind(
elements.buttons.settings,
'keyup',
event => {
const code = event.which;
// We only care about space and return
if (![13, 32].includes(code)) {
return;
}
// Because return triggers a click anyway, all we need to do is set focus
if (code === 13) {
controls.focusFirstMenuItem.call(player, null, true);
return;
}
// Prevent scroll
event.preventDefault();
// Prevent playing video (Firefox)
event.stopPropagation();
// Toggle menu
controls.toggleMenu.call(player, event);
},
null,
false, // Can't be passive as we're preventing default
);
// Escape closes menu
this.bind(elements.settings.menu, 'keydown', event => {
if (event.which === 27) {
controls.toggleMenu.call(player, event);
}
});
// Set range input alternative "value", which matches the tooltip time (#954)
this.bind(elements.inputs.seek, 'mousedown mousemove', event => {
const rect = elements.progress.getBoundingClientRect();
const percent = (100 / rect.width) * (event.pageX - rect.left);
event.currentTarget.setAttribute('seek-value', percent);
});
// Pause while seeking
this.bind(elements.inputs.seek, 'mousedown mouseup keydown keyup touchstart touchend', event => {
const seek = event.currentTarget;
const code = event.keyCode ? event.keyCode : event.which;
const attribute = 'play-on-seeked';
if (is.keyboardEvent(event) && code !== 39 && code !== 37) {
return;
}
// Record seek time so we can prevent hiding controls for a few seconds after seek
player.lastSeekTime = Date.now();
// Was playing before?
const play = seek.hasAttribute(attribute);
// Done seeking
const done = ['mouseup', 'touchend', 'keyup'].includes(event.type);
// If we're done seeking and it was playing, resume playback
if (play && done) {
seek.removeAttribute(attribute);
player.play();
} else if (!done && player.playing) {
seek.setAttribute(attribute, '');
player.pause();
}
});
// Fix range inputs on iOS
// Super weird iOS bug where after you interact with an <input type="range">,
// it takes over further interactions on the page. This is a hack
if (browser.isIos) {
const inputs = getElements.call(player, 'input[type="range"]');
Array.from(inputs).forEach(input => this.bind(input, inputEvent, event => repaint(event.target)));
}
// Seek
this.bind(
elements.inputs.seek,
inputEvent,
event => {
const seek = event.currentTarget;
// If it exists, use seek-value instead of "value" for consistency with tooltip time (#954)
let seekTo = seek.getAttribute('seek-value');
if (is.empty(seekTo)) {
seekTo = seek.value;
}
seek.removeAttribute('seek-value');
player.currentTime = (seekTo / seek.max) * player.duration;
},
'seek',
);
// Seek tooltip
this.bind(elements.progress, 'mouseenter mouseleave mousemove', event => controls.updateSeekTooltip.call(player, event));
// Preview thumbnails plugin
// TODO: Really need to work on some sort of plug-in wide event bus or pub-sub for this
this.bind(elements.progress, 'mousemove touchmove', event => {
const { previewThumbnails } = player;
if (previewThumbnails && previewThumbnails.loaded) {
previewThumbnails.startMove(event);
}
});
// Hide thumbnail preview - on mouse click, mouse leave, and video play/seek. All four are required, e.g., for buffering
this.bind(elements.progress, 'mouseleave click', () => {
const { previewThumbnails } = player;
if (previewThumbnails && previewThumbnails.loaded) {
previewThumbnails.endMove(false, true);
}
});
// Show scrubbing preview
this.bind(elements.progress, 'mousedown touchstart', event => {
const { previewThumbnails } = player;
if (previewThumbnails && previewThumbnails.loaded) {
previewThumbnails.startScrubbing(event);
}
});
this.bind(elements.progress, 'mouseup touchend', event => {
const { previewThumbnails } = player;
if (previewThumbnails && previewThumbnails.loaded) {
previewThumbnails.endScrubbing(event);
}
});
// Polyfill for lower fill in <input type="range"> for webkit
if (browser.isWebkit) {
Array.from(getElements.call(player, 'input[type="range"]')).forEach(element => {
this.bind(element, 'input', event => controls.updateRangeFill.call(player, event.target));
});
}
// Current time invert
// Only if one time element is used for both currentTime and duration
if (player.config.toggleInvert && !is.element(elements.display.duration)) {
this.bind(elements.display.currentTime, 'click', () => {
// Do nothing if we're at the start
if (player.currentTime === 0) {
return;
}
player.config.invertTime = !player.config.invertTime;
controls.timeUpdate.call(player);
});
}
// Volume
this.bind(
elements.inputs.volume,
inputEvent,
event => {
player.volume = event.target.value;
},
'volume',
);
// Update controls.hover state (used for ui.toggleControls to avoid hiding when interacting)
this.bind(elements.controls, 'mouseenter mouseleave', event => {
elements.controls.hover = !player.touch && event.type === 'mouseenter';
});
// Update controls.pressed state (used for ui.toggleControls to avoid hiding when interacting)
this.bind(elements.controls, 'mousedown mouseup touchstart touchend touchcancel', event => {
elements.controls.pressed = ['mousedown', 'touchstart'].includes(event.type);
});
// Show controls when they receive focus (e.g., when using keyboard tab key)
this.bind(elements.controls, 'focusin', () => {
const { config, timers } = player;
// Skip transition to prevent focus from scrolling the parent element
toggleClass(elements.controls, config.classNames.noTransition, true);
// Toggle
ui.toggleControls.call(player, true);
// Restore transition
setTimeout(() => {
toggleClass(elements.controls, config.classNames.noTransition, false);
}, 0);
// Delay a little more for mouse users
const delay = this.touch ? 3000 : 4000;
// Clear timer
clearTimeout(timers.controls);
// Hide again after delay
timers.controls = setTimeout(() => ui.toggleControls.call(player, false), delay);
});
// Mouse wheel for volume
this.bind(
elements.inputs.volume,
'wheel',
event => {
// Detect "natural" scroll - suppored on OS X Safari only
// Other browsers on OS X will be inverted until support improves
const inverted = event.webkitDirectionInvertedFromDevice;
// Get delta from event. Invert if `inverted` is true
const [x, y] = [event.deltaX, -event.deltaY].map(value => (inverted ? -value : value));
// Using the biggest delta, normalize to 1 or -1 (or 0 if no delta)
const direction = Math.sign(Math.abs(x) > Math.abs(y) ? x : y);
// Change the volume by 2%
player.increaseVolume(direction / 50);
// Don't break page scrolling at max and min
const { volume } = player.media;
if ((direction === 1 && volume < 1) || (direction === -1 && volume > 0)) {
event.preventDefault();
}
},
'volume',
false,
);
}
}
export default Listeners; | `mousemove mouseleave touchstart touchmove ${events.ENTER_FULLSCREEN} ${events.EXIT_FULLSCREEN}`, |
main.py | import re
import sys
| while n > y:
c, m, n, x, y = c + m, n - 1, m, n - y, x
return c + x
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
print(walk(*map(int, re.findall(r'\d+', test)))) |
def walk(m, n, x, y):
c = 0 |
auth.service.ts | import { Injectable } from '@nestjs/common';
import { UserService } from 'src/user/user.service';
import * as bcrypt from 'bcrypt';
import { User } from 'src/user/entities/user.entity';
import { UserPayload } from './models/UserPayload';
import { JwtService } from '@nestjs/jwt';
import { UserToken } from './models/UserToken';
@Injectable()
export class AuthService {
constructor(
private readonly userService: UserService,
private readonly jwtService: JwtService,
) {}
login(user: User): UserToken {
// Transforma o user em JWT
const payload: UserPayload = {
sub: user.id, | email: user.email,
name: user.name,
};
const jwtToken = this.jwtService.sign(payload);
return {
access_token: jwtToken,
};
}
async validateUser(email: string, password: string) {
const user = await this.userService.findByEmail(email);
if (user) {
// checar se a senha informada corresponde a hash que está no banco
const isPasswordValid = await bcrypt.compare(password, user.password);
if (isPasswordValid) {
return {
...user,
password: undefined,
};
}
}
// Se chegar aqui, significa que não encontrou um user e/ou a senha não corresponde
throw new Error('Email address or password provided is incorrect.');
}
} | |
dxfc.ts | // Associated data. If we're using React, this | { id: 2, text: 'Second todo', done: false }
] | // would be the component’s props or state
;[
{ id: 1, text: 'First todo', done: false }, |
db-test.js | let path = require('path')
let test = require('tape')
let {db} = require('../../src')
let getDBClient = require('../../src/db/_get-db-client')
let server
let dynamo
let TableName = 'mockapp-production-accounts'
let TableName2 = 'mockapp-production-pets'
let cwd = process.cwd()
/* Regular test suite */
test('db.start', t=> {
t.plan(3)
t.ok(db, 'got db')
// move the current process into the mock dir
process.chdir(path.join(__dirname, '..', 'mock', 'normal'))
server = db.start(function() {
t.ok(true, '@tables created in local database')
})
getDBClient(function _gotDBClient(err, client) {
if (err) console.log(err) // Yes, but actually no
dynamo = client
t.ok(dynamo, 'Got Dynamo client')
})
})
test('can list tables', t=> {
t.plan(1)
dynamo.listTables({}, function done(err, result) {
if (err) t.fail(err)
else {
t.ok(Array.isArray(result.TableNames), 'got tables')
console.log(result)
}
})
})
test('default tables present', t => {
t.plan(3)
let defaultTables = [
'arc-sessions',
'mockapp-production-arc-sessions',
'mockapp-staging-arc-sessions',
]
dynamo.listTables({}, function done(err, result) {
if (err) t.fail(err)
else {
for (let table of defaultTables) {
t.ok(result.TableNames.includes(table), `found ${table}`)
}
}
})
})
test('can insert a row', t=> {
t.plan(1)
dynamo.putItem({
TableName,
Item: {
accountID: {S: 'mock-account-id'},
email: {S: '[email protected]'}
}
},
function _put(err, result) {
if (err) t.fail(err)
else {
t.ok(result, 'got result')
console.log(result)
}
})
})
test('can read index in arc 6', t=> {
t.plan(1)
dynamo.describeTable({
TableName
},
function _desc(err, result) {
if (err) t.fail(err)
else {
t.ok(result.Table.GlobalSecondaryIndexes[0].IndexName === 'email-index', 'email-index')
}
})
})
test('can read index in arc 6', t=> {
t.plan(3)
dynamo.describeTable({
TableName: TableName2
},
function _desc(err, result) {
if (err) t.fail(err)
else {
t.ok(result.Table.GlobalSecondaryIndexes.length === 2, 'two')
t.ok(result.Table.GlobalSecondaryIndexes[0].IndexName === 'petID-index', 'petID-index')
t.ok(result.Table.GlobalSecondaryIndexes[1].IndexName === 'accountID-petID-index', 'accountID-petID-index')
}
})
})
test('can read the row', t=> {
t.plan(1)
dynamo.getItem({
TableName,
Key: {
accountID: {S:'fake-account-id'}
}
},
function _desc(err, result) {
if (err) t.fail(err)
else {
t.ok(result, 'got result')
console.log(result)
}
})
})
test('can query the index', t=> {
t.plan(1)
dynamo.query({
TableName,
IndexName: 'email-index',
KeyConditions: {
email: {
AttributeValueList: [{S: '[email protected]'}],
ComparisonOperator: 'EQ'
}
}
},
function _desc(err, result) {
if (err) t.fail(err)
else {
t.ok(result, 'got result')
console.log(result)
}
})
})
test('db.close', t=> {
t.plan(1)
server.close()
t.ok(true, 'db closed')
})
/* DEPRECATED mode */
test('db.start', t=> {
t.plan(3)
t.ok(db, 'got db')
// move the current process into the mock dir
process.chdir(path.join(__dirname, '..', 'mock', 'normal'))
process.env.DEPRECATED = true
server = db.start(function() {
t.ok(true, '@tables created in local database')
console.log(process.env.DEPRECATED)
})
getDBClient(function _gotDBClient(err, client) {
if (err) console.log(err) // Yes, but actually no
dynamo = client
t.ok(dynamo, 'Got Dynamo client')
})
})
test('can list tables', t=> {
t.plan(1)
dynamo.listTables({}, function done(err, result) {
if (err) t.fail(err)
else {
t.ok(Array.isArray(result.TableNames), 'got tables')
console.log(result)
}
}) | let defaultTables = [
'arc-sessions',
'mockapp-production-arc-sessions',
'mockapp-staging-arc-sessions',
]
dynamo.listTables({}, function done(err, result) {
if (err) t.fail(err)
else {
for (let table of defaultTables) {
t.ok(result.TableNames.includes(table), `found ${table}`)
}
}
})
})
test('can insert a row', t=> {
t.plan(1)
dynamo.putItem({
TableName,
Item: {
accountID: {S: 'mock-account-id'},
email: {S: '[email protected]'}
}
},
function _put(err, result) {
if (err) t.fail(err)
else {
t.ok(result, 'got result')
console.log(result)
}
})
})
test('can read index in arc 5', t=> {
t.plan(1)
dynamo.describeTable({
TableName
},
function _desc(err, result) {
if (err) t.fail(err)
else {
console.log(result.Table.GlobalSecondaryIndexes)
t.ok(result.Table.GlobalSecondaryIndexes[0].IndexName === 'mockapp-production-accounts-email-index', 'email-index')
}
})
})
test('can read index in arc 5', t=> {
t.plan(3)
dynamo.describeTable({
TableName: TableName2
},
function _desc(err, result) {
if (err) t.fail(err)
else {
t.ok(result.Table.GlobalSecondaryIndexes.length === 2, 'two')
t.ok(result.Table.GlobalSecondaryIndexes[0].IndexName === 'mockapp-production-pets-petID-index', 'petID-index')
t.ok(result.Table.GlobalSecondaryIndexes[1].IndexName === 'mockapp-production-pets-accountID-petID-index', 'accountID-petID-index')
}
})
})
test('can read the row', t=> {
t.plan(1)
dynamo.getItem({
TableName,
Key: {
accountID: {S:'fake-account-id'}
}
},
function _desc(err, result) {
if (err) t.fail(err)
else {
t.ok(result, 'got result')
console.log(result)
}
})
})
test('can query the index', t=> {
t.plan(1)
dynamo.query({
TableName,
IndexName: 'mockapp-production-accounts-email-index',
KeyConditions: {
email: {
AttributeValueList: [{S: '[email protected]'}],
ComparisonOperator: 'EQ'
}
}
},
function _desc(err, result) {
if (err) t.fail(err)
else {
t.ok(result, 'got result')
console.log(result)
}
})
})
test('db.close', t=> {
t.plan(2)
delete process.env.DEPRECATED
server.close()
t.ok(true, 'db closed')
process.chdir(cwd)
t.equal(process.cwd(), cwd, 'Switched back to original working dir')
}) | })
test('default tables present', t => {
t.plan(3) |
StatementTransform.ts | } | import { Statement } from "@xapi/xapi";
export interface StatementTransform {
(s: Statement): Statement; |
|
deployment_docker_image_override_test.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package acc
import (
"fmt"
"os"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
)
func | (t *testing.T) {
resName := "ec_deployment.docker_image"
randomName := prefix + "docker_image_" + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
cfgF := func(cfg string) string {
t.Helper()
requiresAPIConn(t)
b, err := os.ReadFile(cfg)
if err != nil {
t.Fatal(err)
}
return fmt.Sprintf(string(b),
randomName, "gcp-us-west2", setDefaultTemplate("gcp-us-west2", defaultTemplate),
)
}
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactory,
CheckDestroy: testAccDeploymentDestroy,
Steps: []resource.TestStep{
{
Config: cfgF("testdata/deployment_docker_image_override.tf"),
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttr(resName, "elasticsearch.0.config.0.docker_image", "docker.elastic.co/cloud-ci/elasticsearch:7.15.0-SNAPSHOT"),
resource.TestCheckResourceAttr(resName, "kibana.0.config.0.docker_image", "docker.elastic.co/cloud-ci/kibana:7.15.0-SNAPSHOT"),
resource.TestCheckResourceAttr(resName, "apm.0.config.0.docker_image", "docker.elastic.co/cloud-ci/apm:7.15.0-SNAPSHOT"),
resource.TestCheckResourceAttr(resName, "enterprise_search.0.config.0.docker_image", "docker.elastic.co/cloud-ci/enterprise-search:7.15.0-SNAPSHOT"),
),
},
},
})
}
| TestAccDeployment_docker_image_override |
table.go | // SPDX-License-Identifier: Apache-2.0
// Copyright © 2021 Wrangle Ltd
package factory
import (
"bytes"
"encoding/csv"
"fmt"
"io"
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/wrgl/wrgl/pkg/ingest"
"github.com/wrgl/wrgl/pkg/objects"
"github.com/wrgl/wrgl/pkg/slice"
"github.com/wrgl/wrgl/pkg/sorter"
"github.com/wrgl/wrgl/pkg/testutils"
)
func parseRows(rows []string, pk []uint32) ([][]string, []uint32) {
records := [][]string{}
if rows == nil {
for i := 0; i < 4; i++ {
row := []string{}
for j := 0; j < 3; j++ {
row = append(row, testutils.BrokenRandomLowerAlphaString(3))
}
records = append(records, row)
}
} else {
for _, row := range rows {
records = append(records, strings.Split(row, ","))
}
}
if pk == nil {
pk = []uint32{0}
}
return records, pk
}
func BuildTable(t *testing.T, db objects.Store, rows []string, pk []uint32) []byte { |
func SdumpTable(t *testing.T, db objects.Store, sum []byte, indent int) string {
t.Helper()
tbl, err := objects.GetTable(db, sum)
require.NoError(t, err)
lines := []string{
fmt.Sprintf("table %x", sum),
fmt.Sprintf(" %s", strings.Join(tbl.Columns, ", ")),
}
var bb []byte
var blk [][]string
for _, sum := range tbl.Blocks {
lines = append(lines, fmt.Sprintf(" block %x", sum))
blk, bb, err = objects.GetBlock(db, bb, sum)
require.NoError(t, err)
for _, row := range blk {
lines = append(lines, fmt.Sprintf(" %s", strings.Join(row, ", ")))
}
}
if indent > 0 {
for i, line := range lines {
lines[i] = strings.Repeat(" ", indent) + line
}
}
return strings.Join(lines, "\n")
}
|
t.Helper()
records, pk := parseRows(rows, pk)
buf := bytes.NewBuffer(nil)
w := csv.NewWriter(buf)
require.NoError(t, w.WriteAll(records))
s, err := sorter.NewSorter(0, nil)
require.NoError(t, err)
sum, err := ingest.IngestTable(db, s, io.NopCloser(bytes.NewReader(buf.Bytes())), slice.IndicesToValues(records[0], pk))
require.NoError(t, err)
return sum
}
|
0101_a3bcd0914482_add_data_compressed_to_serialized_dag.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add data_compressed to serialized_dag
Revision ID: a3bcd0914482
Revises: e655c0453f75
Create Date: 2022-02-03 22:40:59.841119
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a3bcd0914482'
down_revision = 'e655c0453f75'
branch_labels = None
depends_on = None
airflow_version = '2.3.0'
def upgrade():
with op.batch_alter_table('serialized_dag') as batch_op:
batch_op.alter_column('data', existing_type=sa.JSON, nullable=True)
batch_op.add_column(sa.Column('data_compressed', sa.LargeBinary, nullable=True))
def downgrade():
| with op.batch_alter_table('serialized_dag') as batch_op:
batch_op.alter_column('data', existing_type=sa.JSON, nullable=False)
batch_op.drop_column('data_compressed') |
|
entry.go | package logrus
import (
"bytes"
"context"
"fmt"
"os"
"reflect"
"runtime"
"strings"
"sync"
"time"
)
var (
// qualified package name, cached at first use
logrusPackage string
// Positions in the call stack when tracing to report the calling method
minimumCallerDepth int
// Used for caller information initialisation
callerInitOnce sync.Once
)
const (
maximumCallerDepth int = 25
knownLogrusFrames int = 4
)
func | () {
// start at the bottom of the stack before the package-name cache is primed
minimumCallerDepth = 1
}
// Defines the key when adding errors using WithError.
var ErrorKey = "error"
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
// reused and passed around as much as you wish to avoid field duplication.
type Entry struct {
Logger *Logger
// Contains all the fields set by the user.
Data Fields
// Time at which the log entry was created
Time time.Time
// Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
Level Level
// Calling method, with package name
Caller *runtime.Frame
// Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
Message string
// When formatter is called in entry.log(), a Buffer may be set to entry
Buffer *bytes.Buffer
// Contains the context set by the user. Useful for hook processing etc.
Context context.Context
// err may contain a field formatting error
err string
}
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
// Default is three fields, plus one optional. Give a little extra room.
Data: make(Fields, 6),
}
}
func (entry *Entry) Dup() *Entry {
data := make(Fields, len(entry.Data))
for k, v := range entry.Data {
data[k] = v
}
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err}
}
// Returns the bytes representation of this entry from the formatter.
func (entry *Entry) Bytes() ([]byte, error) {
return entry.Logger.Formatter.Format(entry)
}
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
serialized, err := entry.Bytes()
if err != nil {
return "", err
}
str := string(serialized)
return str, nil
}
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
func (entry *Entry) WithError(err error) *Entry {
return entry.WithField(ErrorKey, err)
}
// Add a context to the Entry.
func (entry *Entry) WithContext(ctx context.Context) *Entry {
dataCopy := make(Fields, len(entry.Data))
for k, v := range entry.Data {
dataCopy[k] = v
}
return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx}
}
func (entry *Entry) WithPrefix(prefix string) *Entry {
if oldPrefix, ok := entry.Context.Value(FieldKeyPrefix).(string); ok {
prefix = oldPrefix + prefix
}
return entry.WithContext(context.WithValue(entry.Context, FieldKeyPrefix, prefix))
}
// Add a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
}
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
data := make(Fields, len(entry.Data)+len(fields))
for k, v := range entry.Data {
data[k] = v
}
fieldErr := entry.err
for k, v := range fields {
isErrField := false
if t := reflect.TypeOf(v); t != nil {
switch {
case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func:
isErrField = true
}
}
if isErrField {
tmp := fmt.Sprintf("can not add field %q", k)
if fieldErr != "" {
fieldErr = entry.err + ", " + tmp
} else {
fieldErr = tmp
}
} else {
data[k] = v
}
}
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context}
}
// Overrides the time of the Entry.
func (entry *Entry) WithTime(t time.Time) *Entry {
dataCopy := make(Fields, len(entry.Data))
for k, v := range entry.Data {
dataCopy[k] = v
}
return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context}
}
// getPackageName reduces a fully qualified function name to the package name
// There really ought to be to be a better way...
func getPackageName(f string) string {
for {
lastPeriod := strings.LastIndex(f, ".")
lastSlash := strings.LastIndex(f, "/")
if lastPeriod > lastSlash {
f = f[:lastPeriod]
} else {
break
}
}
return f
}
// getCaller retrieves the name of the first non-logrus calling function
func getCaller() *runtime.Frame {
// cache this package's fully-qualified name
callerInitOnce.Do(func() {
pcs := make([]uintptr, maximumCallerDepth)
_ = runtime.Callers(0, pcs)
// dynamic get the package name and the minimum caller depth
for i := 0; i < maximumCallerDepth; i++ {
funcName := runtime.FuncForPC(pcs[i]).Name()
if strings.Contains(funcName, "getCaller") {
logrusPackage = getPackageName(funcName)
break
}
}
minimumCallerDepth = knownLogrusFrames
})
// Restrict the lookback frames to avoid runaway lookups
pcs := make([]uintptr, maximumCallerDepth)
depth := runtime.Callers(minimumCallerDepth, pcs)
frames := runtime.CallersFrames(pcs[:depth])
for f, again := frames.Next(); again; f, again = frames.Next() {
pkg := getPackageName(f.Function)
// If the caller isn't part of this package, we're done
if pkg != logrusPackage {
return &f //nolint:scopelint
}
}
// if we got here, we failed to find the caller's context
return nil
}
func (entry Entry) HasCaller() (has bool) {
return entry.Logger != nil &&
entry.Logger.ReportCaller &&
entry.Caller != nil
}
func (entry *Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
newEntry := entry.Dup()
if newEntry.Time.IsZero() {
newEntry.Time = time.Now()
}
newEntry.Level = level
newEntry.Message = msg
newEntry.Logger.mu.Lock()
reportCaller := newEntry.Logger.ReportCaller
bufPool := newEntry.getBufferPool()
newEntry.Logger.mu.Unlock()
if reportCaller {
newEntry.Caller = getCaller()
}
newEntry.fireHooks()
buffer = bufPool.Get()
defer func() {
newEntry.Buffer = nil
buffer.Reset()
bufPool.Put(buffer)
}()
buffer.Reset()
newEntry.Buffer = buffer
newEntry.write()
newEntry.Buffer = nil
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
panic(newEntry)
}
}
func (entry *Entry) getBufferPool() (pool BufferPool) {
if entry.Logger.BufferPool != nil {
return entry.Logger.BufferPool
}
return bufferPool
}
func (entry *Entry) fireHooks() {
var tmpHooks LevelHooks
entry.Logger.mu.Lock()
tmpHooks = make(LevelHooks, len(entry.Logger.Hooks))
for k, v := range entry.Logger.Hooks {
tmpHooks[k] = v
}
entry.Logger.mu.Unlock()
err := tmpHooks.Fire(entry.Level, entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
}
}
func (entry *Entry) write() {
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
return
}
if _, err := entry.Logger.Out.Write(serialized); err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
}
// Log will log a message at the level given as parameter.
// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit.
// For this behaviour Entry.Panic or Entry.Fatal should be used instead.
func (entry *Entry) Log(level Level, args ...interface{}) {
if entry.Logger.IsLevelEnabled(level) {
entry.log(level, fmt.Sprint(args...))
}
}
func (entry *Entry) Trace(args ...interface{}) {
entry.Log(TraceLevel, args...)
}
func (entry *Entry) Debug(args ...interface{}) {
entry.Log(DebugLevel, args...)
}
func (entry *Entry) Print(args ...interface{}) {
entry.Info(args...)
}
func (entry *Entry) Info(args ...interface{}) {
entry.Log(InfoLevel, args...)
}
func (entry *Entry) Warn(args ...interface{}) {
entry.Log(WarnLevel, args...)
}
func (entry *Entry) Warning(args ...interface{}) {
entry.Warn(args...)
}
func (entry *Entry) Error(args ...interface{}) {
entry.Log(ErrorLevel, args...)
}
func (entry *Entry) Fatal(args ...interface{}) {
entry.Log(CriticalLevel, args...)
}
func (entry *Entry) Critical(args ...interface{}) {
entry.Log(CriticalLevel, args...)
}
func (entry *Entry) Panic(args ...interface{}) {
entry.Log(PanicLevel, args...)
}
// Entry Printf family functions
func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
if entry.Logger.IsLevelEnabled(level) {
entry.Log(level, fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Tracef(format string, args ...interface{}) {
entry.Logf(TraceLevel, format, args...)
}
func (entry *Entry) Debugf(format string, args ...interface{}) {
entry.Logf(DebugLevel, format, args...)
}
func (entry *Entry) Infof(format string, args ...interface{}) {
entry.Logf(InfoLevel, format, args...)
}
func (entry *Entry) Printf(format string, args ...interface{}) {
entry.Infof(format, args...)
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
entry.Logf(WarnLevel, format, args...)
}
func (entry *Entry) Warningf(format string, args ...interface{}) {
entry.Warnf(format, args...)
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
entry.Logf(ErrorLevel, format, args...)
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
entry.Logf(CriticalLevel, format, args...)
}
func (entry *Entry) Criticalf(format string, args ...interface{}) {
entry.Logf(CriticalLevel, format, args...)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
entry.Logf(PanicLevel, format, args...)
}
// Entry Println family functions
func (entry *Entry) Logln(level Level, args ...interface{}) {
if entry.Logger.IsLevelEnabled(level) {
entry.Log(level, entry.sprintlnn(args...))
}
}
func (entry *Entry) Traceln(args ...interface{}) {
entry.Logln(TraceLevel, args...)
}
func (entry *Entry) Debugln(args ...interface{}) {
entry.Logln(DebugLevel, args...)
}
func (entry *Entry) Infoln(args ...interface{}) {
entry.Logln(InfoLevel, args...)
}
func (entry *Entry) Println(args ...interface{}) {
entry.Infoln(args...)
}
func (entry *Entry) Warnln(args ...interface{}) {
entry.Logln(WarnLevel, args...)
}
func (entry *Entry) Warningln(args ...interface{}) {
entry.Warnln(args...)
}
func (entry *Entry) Errorln(args ...interface{}) {
entry.Logln(ErrorLevel, args...)
}
func (entry *Entry) Fatalln(args ...interface{}) {
entry.Logln(CriticalLevel, args...)
}
func (entry *Entry) Criticalln(args ...interface{}) {
entry.Logln(CriticalLevel, args...)
}
func (entry *Entry) Panicln(args ...interface{}) {
entry.Logln(PanicLevel, args...)
}
// Sprintlnn => Sprint no newline. This is to get the behavior of how
// fmt.Sprintln where spaces are always added between operands, regardless of
// their type. Instead of vendoring the Sprintln implementation to spare a
// string allocation, we do the simplest thing.
func (entry *Entry) sprintlnn(args ...interface{}) string {
msg := fmt.Sprintln(args...)
return msg[:len(msg)-1]
}
| init |
preload.js | window.addEventListener("DOMContentLoaded", () => {
const replaceText = (selector, text) => {
const element = document.getElementById(selector);
if (element) element.innerText = text;
}
for (const dependency of ["chrome", "node", "electron"]) { | }); | replaceText(`${dependency}-version`, process.versions[dependency])
} |
aots.rs | // Workaround rustfmt bug:
// https://github.com/rust-lang/rustfmt/issues/3794
#[path = "common.rs"]
mod common;
use std::convert::TryFrom;
use std::path::Path;
use itertools::Itertools;
use tinyvec::tiny_vec;
use allsorts::binary::read::ReadScope;
use allsorts::error::ShapingError;
use allsorts::gpos::{self, Placement};
use allsorts::gsub::{self, FeatureInfo, Features, GlyphOrigin, RawGlyph};
use allsorts::layout::{new_layout_cache, GDEFTable, LayoutTable, GPOS, GSUB};
use allsorts::tables::cmap::{Cmap, CmapSubtable, EncodingId, PlatformId};
use allsorts::tables::{HheaTable, HmtxTable, MaxpTable, OffsetTable, OpenTypeData, OpenTypeFont};
use allsorts::tag;
use crate::common::read_fixture;
fn cmap_test(font: &str, platform: u16, encoding: u16, inputs: &[u32], expected: &[u16]) {
let font_buffer = read_fixture(Path::new("tests/aots").join(font));
let font_file = ReadScope::new(&font_buffer)
.read::<OpenTypeFont>()
.expect("error reading font file");
let ttf = match font_file.data {
OpenTypeData::Single(offset_table) => offset_table,
OpenTypeData::Collection(_) => panic!("expected a TTF font"),
};
let cmap = ttf
.read_table(&font_file.scope, tag::CMAP)
.unwrap()
.unwrap()
.read::<Cmap>()
.unwrap();
let encoding_record = cmap
.find_subtable(PlatformId(platform), EncodingId(encoding))
.unwrap();
let cmap_subtable = cmap
.scope
.offset(usize::try_from(encoding_record.offset).unwrap())
.read::<CmapSubtable<'_>>()
.unwrap();
let actual = inputs
.iter()
.map(|char_code| cmap_subtable.map_glyph(*char_code).unwrap().unwrap_or(0))
.collect_vec();
assert_eq!(actual, expected);
}
// uvs stands for Unicode Variation Selector and relates to CMAP Format 14
fn cmap_uvs_test(font: &str, inputs: &[u32], expected: &[u32]) {
let font_buffer = read_fixture(Path::new("tests/aots").join(font));
let font_file = ReadScope::new(&font_buffer)
.read::<OpenTypeFont>()
.expect("error reading font file");
let ttf = match font_file.data {
OpenTypeData::Single(offset_table) => offset_table,
OpenTypeData::Collection(_) => panic!("expected a TTF font"),
};
let cmap = ttf
.read_table(&font_file.scope, tag::CMAP)
.unwrap()
.unwrap()
.read::<Cmap>()
.unwrap();
let encoding_record = cmap
.find_subtable(PlatformId::UNICODE, EncodingId::WINDOWS_UNICODE_UCS4)
.unwrap();
let _cmap_subtable = cmap
.scope
.offset(usize::try_from(encoding_record.offset).unwrap())
.read::<CmapSubtable<'_>>()
.unwrap();
let actual: Vec<u32> = Vec::with_capacity(inputs.len() / 2);
for chunk in inputs.chunks(2) {
let _char_code = chunk[0];
let _variation_selector = chunk[1];
// TODO: Implement when we support CMAP Format 14
}
assert_eq!(actual, expected);
}
fn gsub_test(
font: &str,
script: &str,
language: &str,
features: &str,
glyph_ids: &[u16],
expected: &[u16],
) {
let script_tag = tag::from_string(script).unwrap();
let opt_lang_tag = tag::from_string(language).ok();
let feature_tag = tag::from_string(features).unwrap();
let features = Features::Custom(vec![FeatureInfo {
feature_tag,
alternate: None,
}]);
// Load font
let font_buffer = read_fixture(Path::new("tests/aots").join(font));
let font_file = ReadScope::new(&font_buffer)
.read::<OpenTypeFont>()
.expect("error reading font file");
let ttf = match font_file.data {
OpenTypeData::Single(offset_table) => offset_table,
OpenTypeData::Collection(_) => panic!("expected a TTF font"),
};
let mut glyphs = glyph_ids
.iter()
.map(|glyph_id| make_direct_glyph(*glyph_id))
.collect();
// Do gsub
shape_ttf(
&font_file.scope,
ttf,
script_tag,
opt_lang_tag,
&features,
&mut glyphs,
)
.unwrap();
let glyph_indices = glyphs.into_iter().map(|g| g.glyph_index).collect_vec();
assert_eq!(glyph_indices, expected);
}
fn gpos_test(
font: &str,
script: &str,
language: &str,
features: &str,
glyph_ids: &[u16],
xdeltas: &[i32],
ydeltas: &[i32],
_refpos: Option<&[i16]>,
_components: Option<&[i8]>,
) {
let script = tag::from_string(script).unwrap();
let opt_lang_tag = tag::from_string(language).ok();
let features = Features::Custom(vec![FeatureInfo {
feature_tag: tag::from_string(features).unwrap(),
alternate: None,
}]);
// Load font
let font_buffer = read_fixture(Path::new("tests/aots").join(font));
let font_file = ReadScope::new(&font_buffer)
.read::<OpenTypeFont>()
.expect("error reading font file");
let ttf = match font_file.data {
OpenTypeData::Single(offset_table) => offset_table,
OpenTypeData::Collection(_) => panic!("expected a TTF font"),
};
let maxp_data = ttf
.read_table(&font_file.scope, tag::MAXP)
.unwrap()
.unwrap();
let maxp = maxp_data.read::<MaxpTable>().unwrap();
let hhea_data = ttf
.read_table(&font_file.scope, tag::HHEA)
.unwrap()
.unwrap();
let hhea = hhea_data.read::<HheaTable>().unwrap();
let hmtx_data = ttf
.read_table(&font_file.scope, tag::HMTX)
.unwrap()
.unwrap();
let hmtx = hmtx_data
.read_dep::<HmtxTable>((
usize::from(maxp.num_glyphs),
usize::from(hhea.num_h_metrics),
))
.unwrap();
let gpos_record = ttf
.read_table(&font_file.scope, tag::GPOS)
.unwrap()
.unwrap();
let gpos_table = gpos_record.read::<LayoutTable<GPOS>>().unwrap();
let opt_gdef_table = match ttf.find_table_record(tag::GDEF) {
Some(gdef_record) => Some(
gdef_record
.read_table(&font_file.scope)
.unwrap()
.read::<GDEFTable>()
.unwrap(),
),
None => None,
};
let mut glyphs = glyph_ids
.iter()
.map(|glyph_id| make_direct_glyph(*glyph_id))
.collect();
// Apply GSUB if table is present
if ttf.find_table_record(tag::GSUB).is_some() {
shape_ttf(
&font_file.scope,
ttf,
script,
opt_lang_tag,
&features,
&mut glyphs,
)
.unwrap();
}
// Apply GPOS
// Rc<RefCell<LayoutCacheData<T>>> LayoutCache<GSUB>;
let cache = new_layout_cache(gpos_table);
let script = cache
.layout_table
.find_script_or_default(script)
.unwrap()
.unwrap();
let langsys = script
.find_langsys_or_default(opt_lang_tag)
.unwrap()
.unwrap();
let mut infos = gpos::Info::init_from_glyphs(opt_gdef_table.as_ref(), glyphs);
if let Features::Custom(features) = features {
gpos::apply_features(
&cache,
&cache.layout_table,
opt_gdef_table.as_ref(),
&langsys,
features.iter().copied(),
&mut infos,
)
.unwrap();
} else {
unreachable!()
}
let pos = glyph_positions(&infos, &hmtx, hhea.num_h_metrics);
let actual_x_deltas: Vec<i32> = pos
.iter()
.enumerate()
.map(|(i, (x, _))| *x - 1500 * i as i32)
.collect();
assert_eq!(actual_x_deltas, xdeltas);
let actual_y_deltas: Vec<i32> = pos.iter().map(|(_, y)| *y).collect();
assert_eq!(actual_y_deltas, ydeltas);
}
fn glyph_positions(infos: &[gpos::Info], hmtx: &HmtxTable, num_h_metrics: u16) -> Vec<(i32, i32)> {
let mut pos = Vec::new();
let mut x = 0;
let y = 0;
for i in 0..infos.len() {
let glyph_info = &infos[i];
let horizontal_advance = if i == 0 {
0
} else {
let info = &infos[i - 1];
i32::from(
hmtx.horizontal_advance(info.glyph.glyph_index, num_h_metrics)
.unwrap(),
)
};
let width = if glyph_info.kerning != 0 {
horizontal_advance + i32::from(glyph_info.kerning)
} else {
horizontal_advance
};
// Adjust for distance placement
match glyph_info.placement {
Placement::Distance(dx, dy) => {
pos.push((x + horizontal_advance + dx, y + dy));
}
Placement::None => {
pos.push((x + horizontal_advance, y));
}
}
x += width;
}
pos
}
// Variant of `bin/shape::shape_ttf`
fn shape_ttf<'a>(
scope: &ReadScope<'a>,
ttf: OffsetTable<'a>,
script_tag: u32,
opt_lang_tag: Option<u32>,
features: &Features,
glyphs: &mut Vec<RawGlyph<()>>,
) -> Result<(), ShapingError> |
fn make_direct_glyph(glyph_index: u16) -> RawGlyph<()> {
RawGlyph {
unicodes: tiny_vec![],
glyph_index: glyph_index,
liga_component_pos: 0,
glyph_origin: GlyphOrigin::Direct,
small_caps: false,
multi_subst_dup: false,
is_vert_alt: false,
fake_bold: false,
fake_italic: false,
extra_data: (),
variation: None,
}
}
mod aots {
use super::*;
include!("aots/testcases.rs");
}
| {
let gsub_record = ttf.find_table_record(tag::GSUB).unwrap();
let gsub_table = gsub_record
.read_table(&scope)?
.read::<LayoutTable<GSUB>>()?;
let num_glyphs = ttf
.read_table(&scope, tag::MAXP)
.unwrap()
.unwrap()
.read::<MaxpTable>()?
.num_glyphs;
let opt_gdef_table = match ttf.find_table_record(tag::GDEF) {
Some(gdef_record) => Some(gdef_record.read_table(&scope)?.read::<GDEFTable>()?),
None => None,
};
let cache = new_layout_cache(gsub_table);
gsub::apply(
0,
&cache,
opt_gdef_table.as_ref(),
script_tag,
opt_lang_tag,
features,
num_glyphs,
glyphs,
)?;
Ok(())
} |
formula_result.py | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.sheet
from enum import IntFlag
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
|
else:
from ...lo.sheet.formula_result import FormulaResult as FormulaResult
class FormulaResultEnum(IntFlag):
"""
Enum of Const Class FormulaResult
used to select different result types of cell formulas.
"""
VALUE = FormulaResult.VALUE
"""
selects numeric results.
"""
STRING = FormulaResult.STRING
"""
selects non-numeric results.
"""
ERROR = FormulaResult.ERROR
"""
selects errors.
"""
__all__ = ['FormulaResult', 'FormulaResultEnum']
| from com.sun.star.sheet import FormulaResult as FormulaResult
if hasattr(FormulaResult, '_constants') and isinstance(FormulaResult._constants, dict):
FormulaResult._constants['__ooo_ns__'] = 'com.sun.star.sheet'
FormulaResult._constants['__ooo_full_ns__'] = 'com.sun.star.sheet.FormulaResult'
FormulaResult._constants['__ooo_type_name__'] = 'const'
def build_enum():
global FormulaResultEnum
ls = [f for f in dir(FormulaResult) if not callable(getattr(FormulaResult, f)) and not f.startswith('__')]
_dict = {}
for name in ls:
_dict[name] = getattr(FormulaResult, name)
FormulaResultEnum = IntFlag('FormulaResultEnum', _dict)
build_enum() |
_color.py | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="cone.colorbar.title.font", **kwargs
): | role=kwargs.pop("role", "style"),
**kwargs
) | super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"), |
opurtbot-remote.py | import discord
from discord.ext import tasks, commands
import asyncio
import socketio
import threading
import subprocess
import time
from queue import Queue, Empty
from threading import Thread
from requests import get
import os
import re
import boto3
import utils
client = boto3.client('ec2')
chat_reg = re.compile("<[^ ]+>")
active_players = set()
class SpinupThread (threading.Thread):
def __init__(self, ):
threading.Thread.__init__(self)
def run(self):
client = Spinup()
client.run(os.environ['DISCORD_TOKEN'])
class ServerThread (threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
run_minecraft([])
class Spinup(discord.Client):
def __init__(self):
super().__init__()
self.voting = False
self.voted = set()
self.running = False
self.upsince = 0
self.voteStarted = 0
self.voteChannel = None
self.locked = False
self.dimensional_rift = None
self.ip = None
self.vc = None
self.sock = None
self.sock_connected = False
async def on_ready(self):
print('Logged on as {0}!'.format(self.user))
self.dimensional_rift = discord.utils.get(self.get_all_channels(), name = "dimensional-rift")
self.server_status = discord.utils.get(self.get_all_channels(), name = "server-status")
async def | (self, message):
print(message.author.id, message.channel.name, message.channel.id)
if message.channel.name == 'dimensional-rift':
# this is a message sent from minecraft
if (message.author == client.user) and message.content.startswith("```"):
return
await self.sock.emit('discord-chat', {
"task" : 'message-minecraft',
"message" : message.content,
"user" : message.author.nick
})
if message.content.startswith('#purge'):
summary = {}
num = int(message.content.split(" ")[1])
if num > 10:
num = 10
num += 1
if 'admin' in [r.name for r in message.author.roles]:
history = await message.channel.history(limit = 100).flatten()
for m in history[:num]:
if m.author.display_name not in summary:
summary[m.author.display_name] = 1
else:
summary[m.author.display_name] += 1
summary_msg = ">>> "
for n in summary:
summary_msg += n + ": " + str(summary[n]) + "\n"
await message.channel.delete_messages(history[:num])
await message.channel.send(summary_msg)
# TODO: Put these in a dictionary or smth
if message.content == "!clipthat":
print(message.author.voice.channel)
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./wardell_clipthat.mp3")
)
while self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == "!yessir":
print(message.author.voice.channel)
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./wardell_yessir.mp3")
)
while self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == "!yooo":
print(message.author.voice.channel)
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./csgo_niceknife.mp3")
)
while self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == '!bwaaa':
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./victory.mp3")
)
while self.vc and self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == '!bwaa':
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./imposter_victory.mp3")
)
while self.vc and self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == '!delib':
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./naruto_deliberation.mp3")
)
while self.vc and self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
elif message.content == '!!delib':
if self.vc:
await self.vc.disconnect()
self.vc = None
if message.content.startswith("!spinup"):
if self.voting:
await message.channel.send("you mf clown there's already an active vote")
elif self.running:
await message.channel.send("the server is already up u fool")
elif self.locked:
await message.channel.send("the server is locked! nathan's probably playing valorant...")
else:
if (message.author.id == 279456734773510145) and not message.content.endswith("nosudo"):
await self.spinup(message)
else:
await message.channel.send("starting vote, need 5 people to confirm. you have 3 MINUTES to vote [type `!yes` to vote, `!no` to cancel your existing vote]")
self.voteChannel = message.channel
self.voteStarted = time.time()
self.voting = True
self.voted = set()
elif message.content.startswith("!whois"):
if len(active_players):
res = "players currently on: \n```"
for p in active_players:
res += " - " + p + "\n"
await message.channel.send(res + "```")
else:
await message.channel.send("no one is on, hop in!")
elif message.content.startswith("!lock"):
if (message.author.id == 279456734773510145):
await message.channel.send("the server is locked and cannot be spun up")
self.locked = True
if self.voting:
await message.channel.send("the active vote has been cancelled")
self.voting = False
self.voted = set()
elif message.content.startswith("!unlock"):
if (message.author.id == 279456734773510145):
await message.channel.send("the server is unlocked can can be spun up")
self.locked = False
elif message.content.startswith("!help"):
await message.channel.send("""
`!spinup` - starts a vote to spin up the minecraft server, type `!yes` to vote, `!no` to cancel
`!spindown` - spins down the minecraft server, there is NO voting process
`!ip` - returns the IP address of the server
`!isup` - checks if the server is currently up/starting up
`!uptime` - returns the uptime of the server in seconds
""")
elif message.content.startswith("!yes"):
if message.author not in self.voted and self.voting:
self.voted.add(message.author)
await message.channel.send("%s out of 5 votes recorded" % len(self.voted))
if len(self.voted) == 5:
# spin up the mineraft server
await self.spinup(message)
elif message.content.startswith("!no"):
if message.author in self.voted and self.voting:
self.voted.remove(message.author)
await message.channel.send("%s out of 5 votes recorded" % len(self.voted))
elif message.content.startswith("!spindown"):
await message.channel.send("spinning down the minecraft server")
# tell the minecraft server to gracefully shut down
await self.sock.emit("quit")
# dc from the websocket connection
await self.sock.disconnect()
# then spin down the server
utils.alter_instance(os.environ['EC2_INSTANCE_ID'], state = 'OFF')
self.running = False
elif message.content.startswith("!isup"):
if self.running:
await message.channel.send("the server IS up")
else:
await message.channel.send("the server is NOT up")
elif message.content.startswith("!uptime"):
if self.running:
await message.channel.send("the server has been up for %s seconds" % ((time.time() - self.upsince)))
else:
await message.channel.send("the server is not currently up")
elif message.content.startswith("!ip"):
self.ip = client.describe_instances()['Reservations'][0]['Instances'][0]['NetworkInterfaces'][0]['Association']['PublicIp']
await message.channel.send("`%s:25565`" % (self.ip))
async def spinup(self, message):
self.ip = client.describe_instances()['Reservations'][0]['Instances'][0]['NetworkInterfaces'][0]['Association']['PublicIp']
await message.channel.send("vote succeeded, spinning up minecraft @ %s:25565" % self.ip)
self.voting = False
self.voted = set()
if (not self.running):
# spin up the server
utils.alter_instance(os.environ['EC2_INSTANCE_ID'], state = 'ON')
self.running = True
self.upsince = time.time()
client = Spinup()
c = 0
async def check_messages(ctx):
await ctx.wait_until_ready()
sock = socketio.AsyncClient(logger = True, reconnection_attempts=1)
@sock.event
def connect():
ctx.sock_connected = True
print("I'm connected!")
@sock.event
async def connect_error():
print("The connection failed!")
@sock.event
def disconnect():
ctx.sock_connected = False
print("I'm disconnected!")
@sock.on("joinleave")
async def joinleave(data):
if data['task'] == 'message-discord-joinleave':
user = data['user']
message = data['message']
await ctx.dimensional_rift.send(message)
@sock.on('minecraft-chat')
async def chat(data):
if data['task'] == 'message-discord':
#channel = discord.utils.get(ctx.get_all_channels(), name = "dimensional-rift")
#print(channel)
if not data['message'].endswith("Disconnected"):
await ctx.dimensional_rift.send("```diff\n+ <%s> %s```" % (data['user'], data['message']))
last_message = None
prev_topic = ""
c = 0
while True:
c += 1
# establish connection to the aws instance
# we're going to run this every 2 seconds
if ctx.running and (time.time() - ctx.upsince) > 100 and not ctx.sock_connected and c % 20 == 0:
try:
instances = client.describe_instances()
ip_addr = instances['Reservations'][0]['Instances'][0]['NetworkInterfaces'][0]['Association']['PublicIp']
await sock.connect(url = 'https://{}:5000'.format(ip_addr))
except:
print("attempted to connect and failed.")
ctx.sock = sock
if ctx.dimensional_rift and ctx.server_status:
if not last_message:
last_message = ctx.server_status.last_message_id
# set the topic of the chat
statuses = []
statuses.append("ON @ %s" % ctx.ip if ctx.running else "OFF")
statuses.append("LOCKED" if ctx.locked else "UNLOCKED")
if ctx.voting:
statuses.append("VOTING")
topic = "SERVER: "
for status in statuses:
topic += status + ", "
topic = topic[:-2]
if len(active_players) and ctx.running:
topic += " | "
for player in active_players:
topic += player + ", "
topic = topic[:-2]
elif len(active_players) == 0 and ctx.running:
topic += " | no one is on, hop on!"
if topic != prev_topic:
print("EDITING TOPIC: %s, %s" % (prev_topic, topic))
# delete the last message
if last_message:
try:
if type(last_message) == int:
msg = await ctx.server_status.fetch_message(last_message)
await msg.delete()
else:
await last_message.delete()
except Exception as e:
print(e)
last_message = await ctx.server_status.send(topic)
prev_topic = topic
if (time.time() - ctx.voteStarted) > 180 and ctx.voting:
ctx.voting = False
ctx.voted = set()
await ctx.voteChannel.send("sorry! the vote has ended, type `!spinup` to start another vote")
elif int(time.time() - ctx.voteStarted) == 120 and ctx.voting:
ctx.voteStarted -= 1 # this is so fucking janky. we only want this message sent once, so we rely on the 0.1 second resolution of the check_messages function. we subtract one from voteStarted to simulate a second of time passing, ensuring this message is only sent once.
await ctx.voteChannel.send("the vote will end in 1 MINUTE")
elif int(time.time() - ctx.voteStarted) == 60 and ctx.voting:
ctx.voteStarted -= 1
await ctx.voteChannel.send("the vote will end in 2 MINUTES")
"""
while not outq.empty():
item = outq.get()
if item['task'] == 'message-discord':
#channel = discord.utils.get(ctx.get_all_channels(), name = "dimensional-rift")
#print(channel)
if not item['message'].endswith("Disconnected"):
await ctx.dimensional_rift.send("```diff\n+ <%s> %s```" % (item['user'], item['message']))
elif item['task'] == 'message-discord-joinleave':
user = item['user']
message = item['message']
await ctx.dimensional_rift.send(message)
"""
await asyncio.sleep(0.1)
async def main():
pass
if __name__ == '__main__':
client.loop.create_task(check_messages(client))
client.run(os.environ['DISCORD_TOKEN'])
#loop = asyncio.get_event_loop()
#loop.run_until_complete(client.start(os.environ['DISCORD_TOKEN']))
#loop.close()
#print("closed")
#asyncio.run(main())
| on_message |
backbone.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def | (self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super().__init__()
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {'layer4': "0"}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool):
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = False # args.masks
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, False)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| __init__ |
train_config.py | """
# -*- coding: utf-8 -*-
-----------------------------------------------------------------------------------
# Author: Nguyen Mau Dung
# DoC: 2020.08.17
# email: [email protected]
-----------------------------------------------------------------------------------
# Description: The configurations of the project will be defined here
"""
import os
import argparse
import torch
from easydict import EasyDict as edict
import kitti_config as cnf
def parse_train_configs():
parser = argparse.ArgumentParser(description='The Implementation using PyTorch')
parser.add_argument('--seed', type=int, default=2020,
help='re-produce the results with seed random')
parser.add_argument('--saved_fn', type=str, default='fpn_resnet_18', metavar='FN',
help='The name using for saving logs, models,...')
parser.add_argument('--root-dir', type=str, default='../', metavar='PATH',
help='The ROOT working directory')
####################################################################
############## Model configs ########################
####################################################################
parser.add_argument('--arch', type=str, default='fpn_resnet_18', metavar='ARCH',
help='The name of the model architecture')
parser.add_argument('--pretrained_path', type=str, default=None, metavar='PATH',
help='the path of the pretrained checkpoint')
####################################################################
############## Dataloader and Running configs #######
####################################################################
parser.add_argument('--hflip_prob', type=float, default=0.5,
help='The probability of horizontal flip')
parser.add_argument('--no-val', action='store_true',
help='If true, dont evaluate the model on the val set')
parser.add_argument('--num_samples', type=int, default=None,
help='Take a subset of the dataset to run and debug')
parser.add_argument('--num_workers', type=int, default=4,
help='Number of threads for loading data')
parser.add_argument('--batch_size', type=int, default=16,
help='mini-batch size (default: 16), this is the total'
'batch size of all GPUs on the current node when using'
'Data Parallel or Distributed Data Parallel')
parser.add_argument('--print_freq', type=int, default=50, metavar='N',
help='print frequency (default: 50)')
parser.add_argument('--tensorboard_freq', type=int, default=50, metavar='N',
help='frequency of saving tensorboard (default: 50)')
parser.add_argument('--checkpoint_freq', type=int, default=2, metavar='N',
help='frequency of saving checkpoints (default: 5)')
####################################################################
############## Training strategy ####################
####################################################################
parser.add_argument('--start_epoch', type=int, default=1, metavar='N',
help='the starting epoch')
parser.add_argument('--num_epochs', type=int, default=300, metavar='N',
help='number of total epochs to run')
parser.add_argument('--lr_type', type=str, default='cosin',
help='the type of learning rate scheduler (cosin or multi_step or one_cycle)')
parser.add_argument('--lr', type=float, default=0.003, metavar='LR',
help='initial learning rate')
parser.add_argument('--minimum_lr', type=float, default=1e-7, metavar='MIN_LR',
help='minimum learning rate during training')
parser.add_argument('--momentum', type=float, default=0.949, metavar='M',
help='momentum')
parser.add_argument('-wd', '--weight_decay', type=float, default=0., metavar='WD',
help='weight decay (default: 0.)')
parser.add_argument('--optimizer_type', type=str, default='adam', metavar='OPTIMIZER',
help='the type of optimizer, it can be sgd or adam')
parser.add_argument('--steps', nargs='*', default=[150, 180],
help='number of burn in step')
####################################################################
############## Loss weight ##########################
####################################################################
####################################################################
############## Distributed Data Parallel ############
####################################################################
parser.add_argument('--world-size', default=-1, type=int, metavar='N',
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int, metavar='N',
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:29500', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--gpu_idx', default=0, type=int,
help='GPU index to use.')
parser.add_argument('--no_cuda', action='store_true',
help='If true, cuda is not used.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
####################################################################
############## Evaluation configurations ###################
####################################################################
parser.add_argument('--evaluate', action='store_true',
help='only evaluate the model, not training')
parser.add_argument('--resume_path', type=str, default=None, metavar='PATH',
help='the path of the resumed checkpoint')
parser.add_argument('--K', type=int, default=50,
help='the number of top K')
configs = edict(vars(parser.parse_args()))
####################################################################
############## Hardware configurations #############################
####################################################################
configs.device = torch.device('cpu' if configs.no_cuda else 'cuda')
configs.ngpus_per_node = torch.cuda.device_count()
configs.pin_memory = True
configs.input_size = (cnf.BEV_WIDTH, cnf.BEV_HEIGHT)
configs.down_ratio = 2
configs.hm_size = (cnf.BEV_WIDTH/configs.down_ratio, cnf.BEV_HEIGHT/configs.down_ratio)
configs.max_objects = 50
configs.imagenet_pretrained = True
configs.head_conv = 256
configs.num_classes = 1
configs.num_center_offset = 2
configs.num_z = 1
configs.num_dim = 3
configs.num_direction = 2 # sin, cos 8 for bin cos sin
configs.voxel_size = [0.16, 0.16, 4]
configs.point_cloud_range =[0, -34.56, -2.73, 69.12, 34.56, 1.27]
configs.max_number_of_points_per_voxel = 100
configs.heads = {
'hm_cen': configs.num_classes,
'cen_offset': configs.num_center_offset,
'direction': configs.num_direction,
'z_coor': configs.num_z,
'dim': configs.num_dim
}
configs.num_input_features = 4
####################################################################
############## Dataset, logs, Checkpoints dir ######################
####################################################################
configs.dataset_dir = '/media/wx/File/data/kittidata'
configs.checkpoints_dir = os.path.join(configs.root_dir, 'checkpoints', configs.saved_fn)
configs.logs_dir = os.path.join(configs.root_dir, 'logs', configs.saved_fn)
if not os.path.isdir(configs.checkpoints_dir):
os.makedirs(configs.checkpoints_dir)
if not os.path.isdir(configs.logs_dir): | os.makedirs(configs.logs_dir)
return configs | |
SignUp.js | import React, { useState } from 'react';
import LockOutlinedIcon from '@material-ui/icons/LockOutlined';
import {makeStyles, Link, Avatar, Button, CssBaseline, TextField, Grid, Typography, Container} from '@material-ui/core';
import axios from 'axios'
import {useHistory} from 'react-router-dom';
const header = {withCredentials: true}
const UseStyles = makeStyles((theme) => ({
paper: {
marginTop: theme.spacing(8),
display: 'flex',
flexDirection: 'column',
alignItems: 'center',
},
avatar: {
margin: theme.spacing(1),
backgroundColor: theme.palette.secondary.main,
},
form: {
width: '100%', // Fix IE 11 issue.
marginTop: theme.spacing(3),
},
submit: {
margin: theme.spacing(3, 0, 2),
},
}));
function SignUp() {
const classes = UseStyles();
const [email, setEamil] = useState();
const [password, setPassword] = useState();
const [re_password, setRePassword] = useState();
const history = useHistory();
const sendParams = {
header,
Email: email,
Password: password
}
const SignUpHandler = () => {
if(email === '' || email === undefined){
alert("이메일을 입력해 주세요");
}
else if(password === '' || password === undefined){
alert("비밀번호를 입력해 주세요");
}
else if(re_password === '' || re_password === undefined){
alert("비밀번호를 한번더 입력해 주세요");
}
else if(password !== re_password){
alert("비밀번호가 일치하지 않습니다.");
}
else{
axios
.post('http://localhost:3002/users/signup', sendParams)
.then(res => {
if(res.data.message){
alert(res.data.message);
if(res.data.duplicate === '1'){ // 이메일 중복
history.push('/signup');
}
else{ // 회원가입 성공
history.push('/');
}
}
else{
alert("회원 가입 실패");
}
})
.catch(err => {
console.log(err);
})
}
}
// useEffect(() => {
// async function fetchData(){
// const request = await axios.get('http://localhost:3002/api')
// console.log(request);
// return request;
// }
// fetchData();
// })
return (
<Container component="main" maxWidth="xs">
<CssBaseline />
<div className={classes.paper}>
<Avatar className={classes.avatar}>
<LockOutlinedIcon />
</Avatar>
<Typography component="h1" variant="h5">
Sign up
</Typography>
<form className={classes.form} noValidate>
<Grid container spacing={3}>
<Grid item xs={12}>
<TextField
variant="outlined"
required
fullWidth
id="email"
label="Email Address"
name="email"
autoComplete="email"
onChange={res => setEamil(res.target.value)}
/>
</Grid>
<Grid item xs={12}> | variant="outlined"
required
fullWidth
name="password"
label="Password"
type="password"
id="password"
autoComplete="current-password"
onChange={res => setPassword(res.target.value)}
/>
</Grid>
<Grid item xs={12}>
<TextField
variant="outlined"
required
fullWidth
name="password"
label="Password"
type="password"
id="password"
autoComplete="current-password"
onChange={res => setRePassword(res.target.value)}
/>
</Grid>
</Grid>
<Button
type="submit"
fullWidth
variant="contained"
color="primary"
className={classes.submit}
onClick={SignUpHandler}
href='#'
>
Sign Up
</Button>
<Grid container>
<Grid item>
<Link href="/" variant="body2">
{"Go to SignIn"}
</Link>
</Grid>
</Grid>
</form>
</div>
</Container>
);
}
export default SignUp | <TextField |
Day18.py | from itertools import permutations
def snailfishAdd(num1: list, num2: list) -> list:
result = ["["] + num1 + num2 + ["]"]
changeMade = True
while changeMade:
changeAlreadyMade = False
nestedLevel = 0
for i in range(len(result)):
char = result[i]
if char == "[":
|
elif char == "]":
nestedLevel -= 1
if char.isnumeric() and nestedLevel >= 5:
for b in range(i-1, -1, -1):
if result[b].isnumeric():
result[b] = str(int(result[b]) + int(result[i]))
break
for f in range(i+2, len(result)):
if result[f].isnumeric():
result[f] = str(int(result[f]) + int(result[i+1]))
break
for j in range(i+2, i-2, -1):
result.pop(j)
result.insert(i-1, "0")
changeAlreadyMade = True
break
if not changeAlreadyMade:
for i in range(len(result)):
char = result[i]
if char.isnumeric() and int(char) > 9:
val1 = int(char) // 2
val2 = int(char) // 2 + int(char) % 2
result.pop(i)
result.insert(i, "]")
result.insert(i, str(val2))
result.insert(i, str(val1))
result.insert(i, "[")
changeAlreadyMade = True
break
changeMade = False
if changeAlreadyMade:
changeMade = True
return result
def getMag(num : str) -> int:
left, right = "",""
nestedLevel = 0
for i in range(2, len(num)-1, 2):
char = num[i]
if char == "[":
nestedLevel += 1
elif char == "]":
nestedLevel -= 1
if nestedLevel == 0:
left = num[2:i+1]
right = num[i+2:len(num)-2]
break
left = int(left[0]) if len(left) == 1 else getMag(left)
right = int(right[-1]) if len(right) == 1 else getMag(right)
return 3*left + 2*right
def part1():
values = [list(i) for i in open("input.txt").read().split("\n")]
for i in range(len(values)):
values[i] = list(filter(lambda a: a != ',', values[i]))
result = values[0]
for i in range(1, len(values)):
result = snailfishAdd(result, values[i])
return getMag(" ".join(result))
def part2():
values = [list(i) for i in open("input.txt").read().split("\n")]
for i in range(len(values)):
values[i] = list(filter(lambda a: a != ',', values[i]))
maxMag = -float("inf")
count = 0
for i in permutations(values, 2):
count += 1
result = snailfishAdd(i[0], i[1])
maxMag = max(maxMag, getMag(" ".join(result)))
return maxMag
print(f"answer to part1: {part1()}")
print(f"answer to part2: {part2()}") | nestedLevel += 1 |
chain_info.go | package chain_info
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"math/rand"
"net/http"
"net/url"
"path"
"time"
"github.com/google/go-github/github"
"github.com/spf13/viper"
"github.com/strangelove-ventures/lens/client"
"golang.org/x/sync/errgroup"
)
type ChainInfo struct {
Schema string `json:"$schema"`
ChainName string `json:"chain_name"`
Status string `json:"status"`
NetworkType string `json:"network_type"`
PrettyName string `json:"pretty_name"`
ChainID string `json:"chain_id"`
Bech32Prefix string `json:"bech32_prefix"`
DaemonName string `json:"daemon_name"`
NodeHome string `json:"node_home"`
Genesis struct {
GenesisURL string `json:"genesis_url"`
} `json:"genesis"`
Slip44 int `json:"slip44"`
Codebase struct {
GitRepo string `json:"git_repo"`
RecommendedVersion string `json:"recommended_version"`
CompatibleVersions []string `json:"compatible_versions"`
} `json:"codebase"`
Peers struct {
Seeds []struct {
ID string `json:"id"`
Address string `json:"address"`
Provider string `json:"provider,omitempty"`
} `json:"seeds"`
PersistentPeers []struct {
ID string `json:"id"`
Address string `json:"address"`
} `json:"persistent_peers"`
} `json:"peers"`
Apis struct {
RPC []struct {
Address string `json:"address"`
Provider string `json:"provider"`
} `json:"rpc"`
Rest []struct {
Address string `json:"address"`
Provider string `json:"provider"`
} `json:"rest"`
} `json:"apis"`
}
func (c ChainInfo) GetAllRPCEndpoints() (out []string, err error) {
for _, endpoint := range c.Apis.RPC {
u, err := url.Parse(endpoint.Address)
if err != nil {
return nil, err
}
var port string
if u.Scheme == "https" {
port = "443"
} else {
port = u.Port()
}
out = append(out, fmt.Sprintf("%s://%s:%s", u.Scheme, u.Hostname(), port))
}
return
}
func IsHealthyRPC(endpoint string) error {
cl, err := client.NewRPCClient(endpoint, 5*time.Second)
if err != nil {
return err
}
stat, err := cl.Status(context.Background())
if err != nil {
return err
}
if stat.SyncInfo.CatchingUp {
return errors.New("still catching up")
}
return nil
}
func (c ChainInfo) GetRPCEndpoints() (out []string, err error) {
allRPCEndpoints, err := c.GetAllRPCEndpoints()
if err != nil {
return nil, err
}
var eg errgroup.Group
var endpoints []string
for _, endpoint := range allRPCEndpoints {
endpoint := endpoint
eg.Go(func() error {
err := IsHealthyRPC(endpoint)
if err != nil {
log.Printf("ignoring endpoint %s due to error %s", endpoint, err)
return nil
}
log.Printf("verified healthy endpoint %s", endpoint)
endpoints = append(endpoints, endpoint)
return nil
})
}
if err := eg.Wait(); err != nil {
return nil, err
}
return endpoints, nil
}
func (c ChainInfo) GetRandomRPCEndpoint() (string, error) {
rpcs, err := c.GetRPCEndpoints()
if err != nil {
return "", err
}
if len(rpcs) == 0 {
return "", fmt.Errorf("no working RPCs found")
}
randomGenerator := rand.New(rand.NewSource(time.Now().UnixNano()))
return rpcs[randomGenerator.Intn(len(rpcs))], nil
}
func (c ChainInfo) GetAssetList() (AssetList, error) {
cl := github.NewClient(http.DefaultClient)
chainFileName := path.Join(c.ChainName, "chain.json")
ch, _, res, err := cl.Repositories.GetContents(
context.Background(),
"cosmos",
"chain-registry",
chainFileName,
&github.RepositoryContentGetOptions{})
if err != nil || res.StatusCode != 200 {
return AssetList{}, err
}
content, err := ch.GetContent()
if err != nil {
return AssetList{}, err
}
var assetList AssetList
if err := json.Unmarshal([]byte(content), &assetList); err != nil {
return AssetList{}, err
}
return assetList, nil
}
func (c ChainInfo) GetChainConfig() (*client.ChainClientConfig, error) {
debug := viper.GetBool("debug")
home := viper.GetString("home")
assetList, err := c.GetAssetList()
if err != nil {
return nil, err
} | var gasPrices string
if len(assetList.Assets) > 0 {
gasPrices = fmt.Sprintf("%.2f%s", 0.01, assetList.Assets[0].Base)
}
rpc, err := c.GetRandomRPCEndpoint()
if err != nil {
return nil, err
}
return &client.ChainClientConfig{
Key: "default",
ChainID: c.ChainID,
RPCAddr: rpc,
AccountPrefix: c.Bech32Prefix,
KeyringBackend: "test",
GasAdjustment: 1.2,
GasPrices: gasPrices,
KeyDirectory: home,
Debug: debug,
Timeout: "20s",
OutputFormat: "json",
SignModeStr: "direct",
}, nil
} | |
shipping.py | def | ():
print("calc shipping")
| calc_shipping |
gateway.go | package daemon
import (
"context"
"fmt"
"net"
"os"
"os/exec"
"strings"
"github.com/alauda/felix/ipsets"
kubeovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1"
"github.com/kubeovn/kube-ovn/pkg/ovs"
"github.com/kubeovn/kube-ovn/pkg/util"
"github.com/vishvananda/netlink"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog"
)
const (
SubnetSet = "subnets"
SubnetNatSet = "subnets-nat"
LocalPodSet = "local-pod-ip-nat"
OtherNodeSet = "other-node"
IPSetPrefix = "ovn"
)
func (c *Controller) runGateway() {
if err := c.setIPSet(); err != nil {
klog.Errorf("failed to set gw ipsets")
}
if err := c.setIptables(); err != nil {
klog.Errorf("failed to set gw iptables")
}
if err := c.setGatewayBandwidth(); err != nil {
klog.Errorf("failed to set gw bandwidth, %v", err)
}
if err := c.setICGateway(); err != nil {
klog.Errorf("failed to set ic gateway, %v", err)
}
if err := c.setExGateway(); err != nil {
klog.Errorf("failed to set ex gateway, %v", err)
}
c.appendMssRule()
}
func (c *Controller) setIPSet() error {
protocols := make([]string, 2)
if c.protocol == kubeovnv1.ProtocolDual {
protocols[0] = kubeovnv1.ProtocolIPv4
protocols[1] = kubeovnv1.ProtocolIPv6
} else {
protocols[0] = c.protocol
}
for _, protocol := range protocols {
if c.ipset[protocol] == nil {
continue
}
subnets, err := c.getSubnetsCIDR(protocol)
if err != nil {
klog.Errorf("get subnets failed, %+v", err)
return err
}
localPodIPs, err := c.getLocalPodIPsNeedNAT(protocol)
if err != nil {
klog.Errorf("get local pod ips failed, %+v", err)
return err
}
subnetsNeedNat, err := c.getSubnetsNeedNAT(protocol)
if err != nil {
klog.Errorf("get need nat subnets failed, %+v", err)
return err
}
otherNode, err := c.getOtherNodes(protocol)
if err != nil {
klog.Errorf("failed to get node, %+v", err)
return err
}
c.ipset[protocol].AddOrReplaceIPSet(ipsets.IPSetMetadata{
MaxSize: 1048576,
SetID: SubnetSet,
Type: ipsets.IPSetTypeHashNet,
}, subnets)
c.ipset[protocol].AddOrReplaceIPSet(ipsets.IPSetMetadata{
MaxSize: 1048576,
SetID: LocalPodSet,
Type: ipsets.IPSetTypeHashIP,
}, localPodIPs)
c.ipset[protocol].AddOrReplaceIPSet(ipsets.IPSetMetadata{
MaxSize: 1048576,
SetID: SubnetNatSet,
Type: ipsets.IPSetTypeHashNet,
}, subnetsNeedNat)
c.ipset[protocol].AddOrReplaceIPSet(ipsets.IPSetMetadata{
MaxSize: 1048576,
SetID: OtherNodeSet,
Type: ipsets.IPSetTypeHashNet,
}, otherNode)
c.ipset[protocol].ApplyUpdates()
}
return nil
}
func (c *Controller) addIPSetMembers(setID, subnet, ip string) error {
podSubnet, err := c.subnetsLister.Get(subnet)
if err != nil {
klog.Errorf("get subnet %s failed, %+v", subnet, err)
return err
}
if !podSubnet.Spec.NatOutgoing ||
podSubnet.Spec.Vpc != util.DefaultVpc ||
podSubnet.Spec.GatewayType != kubeovnv1.GWDistributedType {
return nil
}
podIPs := strings.Split(ip, ",")
if protocol := util.CheckProtocol(ip); protocol == kubeovnv1.ProtocolDual {
c.ipset[kubeovnv1.ProtocolIPv4].AddMembers(setID, []string{podIPs[0]})
c.ipset[kubeovnv1.ProtocolIPv6].AddMembers(setID, []string{podIPs[1]})
c.ipset[kubeovnv1.ProtocolIPv4].ApplyUpdates()
c.ipset[kubeovnv1.ProtocolIPv6].ApplyUpdates()
} else {
c.ipset[protocol].AddMembers(setID, []string{podIPs[0]})
c.ipset[protocol].ApplyUpdates()
}
return nil
}
func (c *Controller) removeIPSetMembers(setID, subnet, ip string) error {
if subnet == "" || ip == "" {
return nil
}
podSubnet, err := c.subnetsLister.Get(subnet)
if err != nil {
klog.Errorf("get subnet %s failed, %+v", subnet, err)
return err
}
if !podSubnet.Spec.NatOutgoing ||
podSubnet.Spec.Vpc != util.DefaultVpc ||
podSubnet.Spec.GatewayType != kubeovnv1.GWDistributedType {
return nil
}
podIPs := strings.Split(ip, ",")
if protocol := util.CheckProtocol(ip); protocol == kubeovnv1.ProtocolDual {
c.ipset[kubeovnv1.ProtocolIPv4].RemoveMembers(setID, []string{podIPs[0]})
c.ipset[kubeovnv1.ProtocolIPv6].RemoveMembers(setID, []string{podIPs[1]})
c.ipset[kubeovnv1.ProtocolIPv4].ApplyUpdates()
c.ipset[kubeovnv1.ProtocolIPv6].ApplyUpdates()
} else {
c.ipset[protocol].RemoveMembers(setID, []string{podIPs[0]})
c.ipset[protocol].ApplyUpdates()
}
return nil
}
func (c *Controller) setIptables() error {
klog.V(3).Infoln("start to set up iptables")
node, err := c.nodesLister.Get(c.config.NodeName)
if err != nil {
klog.Errorf("failed to get node %s, %v", c.config.NodeName, err)
return err
}
hostIP := util.GetNodeInternalIP(*node)
var (
v4Rules = []util.IPTableRule{
// Prevent performing Masquerade on external traffic which arrives from a Node that owns the Pod/Subnet IP
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set ! --match-set ovn40subnets src -m set ! --match-set ovn40other-node src -m set --match-set ovn40local-pod-ip-nat dst -j RETURN`, " ")},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set ! --match-set ovn40subnets src -m set ! --match-set ovn40other-node src -m set --match-set ovn40subnets-nat dst -j RETURN`, " ")},
// NAT if pod/subnet to external address
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set --match-set ovn40local-pod-ip-nat src -m set ! --match-set ovn40subnets dst -j MASQUERADE`, " ")},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set --match-set ovn40subnets-nat src -m set ! --match-set ovn40subnets dst -j MASQUERADE`, " ")},
// masq traffic from hostport/nodeport
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(fmt.Sprintf(`-o ovn0 ! -s %s -j MASQUERADE`, hostIP), " ")},
// Input Accept
{Table: "filter", Chain: "FORWARD", Rule: strings.Split(`-m set --match-set ovn40subnets src -j ACCEPT`, " ")},
{Table: "filter", Chain: "FORWARD", Rule: strings.Split(`-m set --match-set ovn40subnets dst -j ACCEPT`, " ")},
// Forward Accept
{Table: "filter", Chain: "INPUT", Rule: strings.Split(`-m set --match-set ovn40subnets src -j ACCEPT`, " ")},
{Table: "filter", Chain: "INPUT", Rule: strings.Split(`-m set --match-set ovn40subnets dst -j ACCEPT`, " ")},
}
v6Rules = []util.IPTableRule{
// Prevent performing Masquerade on external traffic which arrives from a Node that owns the Pod/Subnet IP
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set ! --match-set ovn60subnets src -m set ! --match-set ovn60other-node src -m set --match-set ovn60local-pod-ip-nat dst -j RETURN`, " ")},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set ! --match-set ovn60subnets src -m set ! --match-set ovn60other-node src -m set --match-set ovn60subnets-nat dst -j RETURN`, " ")},
// NAT if pod/subnet to external address
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set --match-set ovn60local-pod-ip-nat src -m set ! --match-set ovn60subnets dst -j MASQUERADE`, " ")},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set --match-set ovn60subnets-nat src -m set ! --match-set ovn60subnets dst -j MASQUERADE`, " ")},
// masq traffic from hostport/nodeport
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(fmt.Sprintf(`-o ovn0 ! -s %s -j MASQUERADE`, hostIP), " ")},
// Input Accept
{Table: "filter", Chain: "FORWARD", Rule: strings.Split(`-m set --match-set ovn60subnets src -j ACCEPT`, " ")},
{Table: "filter", Chain: "FORWARD", Rule: strings.Split(`-m set --match-set ovn60subnets dst -j ACCEPT`, " ")},
// Forward Accept
{Table: "filter", Chain: "INPUT", Rule: strings.Split(`-m set --match-set ovn60subnets src -j ACCEPT`, " ")},
{Table: "filter", Chain: "INPUT", Rule: strings.Split(`-m set --match-set ovn60subnets dst -j ACCEPT`, " ")},
}
)
protocols := make([]string, 2)
if c.protocol == kubeovnv1.ProtocolDual {
protocols[0] = kubeovnv1.ProtocolIPv4
protocols[1] = kubeovnv1.ProtocolIPv6
} else {
protocols[0] = c.protocol
}
for _, protocol := range protocols {
if c.iptable[protocol] == nil {
continue
}
var iptableRules []util.IPTableRule
if protocol == kubeovnv1.ProtocolIPv4 {
iptableRules = v4Rules
} else {
iptableRules = v6Rules
}
iptableRules[0], iptableRules[1], iptableRules[3], iptableRules[4] =
iptableRules[4], iptableRules[3], iptableRules[1], iptableRules[0]
for _, iptRule := range iptableRules {
if strings.Contains(strings.Join(iptRule.Rule, " "), "ovn0") && protocol != util.CheckProtocol(hostIP) {
klog.V(3).Infof("ignore check iptable rule, protocol %v, hostIP %v", protocol, hostIP)
continue
}
exists, err := c.iptable[protocol].Exists(iptRule.Table, iptRule.Chain, iptRule.Rule...)
if err != nil {
klog.Errorf("check iptable rule exist failed, %+v", err)
return err
}
if !exists {
klog.Infof("iptables rules %s not exist, recreate iptables rules", strings.Join(iptRule.Rule, " "))
if err := c.iptable[protocol].Insert(iptRule.Table, iptRule.Chain, 1, iptRule.Rule...); err != nil {
klog.Errorf("insert iptable rule %s failed, %+v", strings.Join(iptRule.Rule, " "), err)
return err
}
}
}
}
return nil
}
func (c *Controller) setGatewayBandwidth() error {
node, err := c.config.KubeClient.CoreV1().Nodes().Get(context.Background(), c.config.NodeName, metav1.GetOptions{})
if err != nil {
klog.Errorf("failed to get node, %v", err)
return err
}
ingress, egress := node.Annotations[util.IngressRateAnnotation], node.Annotations[util.EgressRateAnnotation]
ifaceId := fmt.Sprintf("node-%s", c.config.NodeName)
return ovs.SetInterfaceBandwidth(ifaceId, egress, ingress)
}
func (c *Controller) setICGateway() error {
node, err := c.config.KubeClient.CoreV1().Nodes().Get(context.Background(), c.config.NodeName, metav1.GetOptions{})
if err != nil {
klog.Errorf("failed to get node, %v", err)
return err
}
enable := node.Labels[util.ICGatewayLabel]
if enable == "true" {
icEnabled, err := ovs.Exec(ovs.IfExists, "get", "open", ".", "external_ids:ovn-is-interconn")
if err != nil {
return fmt.Errorf("failed to get if ic enabled, %v", err)
}
if strings.Trim(icEnabled, "\"") != "true" {
if _, err := ovs.Exec("set", "open", ".", "external_ids:ovn-is-interconn=true"); err != nil {
return fmt.Errorf("failed to enable ic gateway, %v", err)
}
output, err := exec.Command("/usr/share/ovn/scripts/ovn-ctl", "restart_controller").CombinedOutput()
if err != nil {
return fmt.Errorf("failed to restart ovn-controller, %v, %q", err, output)
}
}
} else {
if _, err := ovs.Exec("set", "open", ".", "external_ids:ovn-is-interconn=false"); err != nil {
return fmt.Errorf("failed to disable ic gateway, %v", err)
}
}
return nil
}
func (c *Controller) setExGateway() error {
node, err := c.config.KubeClient.CoreV1().Nodes().Get(context.Background(), c.config.NodeName, metav1.GetOptions{})
if err != nil {
klog.Errorf("failed to get node, %v", err)
return err
}
enable := node.Labels[util.ExGatewayLabel]
if enable == "true" {
cm, err := c.config.KubeClient.CoreV1().ConfigMaps("kube-system").Get(context.Background(), util.ExternalGatewayConfig, metav1.GetOptions{})
if err != nil {
klog.Errorf("failed to get ovn-external-gw-config, %v", err)
return err
}
link, err := netlink.LinkByName(cm.Data["external-gw-nic"])
if err != nil {
klog.Errorf("failed to get nic %s, %v", cm.Data["external-gw-nic"], err)
return err
}
if err := netlink.LinkSetUp(link); err != nil {
klog.Errorf("failed to set gateway nic %s up, %v", cm.Data["external-gw-nic"], err)
return err
}
if _, err := ovs.Exec(
ovs.MayExist, "add-br", "br-external", "--",
ovs.MayExist, "add-port", "br-external", cm.Data["external-gw-nic"],
); err != nil {
return fmt.Errorf("failed to enable external gateway, %v", err) | return fmt.Errorf("failed to get external-ids, %v", err)
}
bridgeMappings := "external:br-external"
if output != "" && !util.IsStringIn(bridgeMappings, strings.Split(output, ",")) {
bridgeMappings = fmt.Sprintf("%s,%s", output, bridgeMappings)
}
output, err = ovs.Exec("set", "open", ".", fmt.Sprintf("external-ids:ovn-bridge-mappings=%s", bridgeMappings))
if err != nil {
return fmt.Errorf("failed to set bridg-mappings, %v: %q", err, output)
}
} else {
if _, err := ovs.Exec(
ovs.IfExists, "del-br", "br-external"); err != nil {
return fmt.Errorf("failed to disable external gateway, %v", err)
}
}
return nil
}
func (c *Controller) getLocalPodIPsNeedNAT(protocol string) ([]string, error) {
var localPodIPs []string
hostname := os.Getenv("KUBE_NODE_NAME")
allPods, err := c.podsLister.List(labels.Everything())
if err != nil {
klog.Errorf("list pods failed, %+v", err)
return nil, err
}
for _, pod := range allPods {
if pod.Spec.HostNetwork ||
pod.Status.PodIP == "" ||
pod.Annotations[util.LogicalSwitchAnnotation] == "" {
continue
}
subnet, err := c.subnetsLister.Get(pod.Annotations[util.LogicalSwitchAnnotation])
if err != nil {
klog.Errorf("get subnet %s failed, %+v", pod.Annotations[util.LogicalSwitchAnnotation], err)
continue
}
nsGWType := subnet.Spec.GatewayType
nsGWNat := subnet.Spec.NatOutgoing
if nsGWNat &&
subnet.Spec.Vpc == util.DefaultVpc &&
nsGWType == kubeovnv1.GWDistributedType &&
pod.Spec.NodeName == hostname {
if len(pod.Status.PodIPs) == 2 && protocol == kubeovnv1.ProtocolIPv6 {
localPodIPs = append(localPodIPs, pod.Status.PodIPs[1].IP)
} else if util.CheckProtocol(pod.Status.PodIP) == protocol {
localPodIPs = append(localPodIPs, pod.Status.PodIP)
}
}
}
klog.V(3).Infof("local pod ips %v", localPodIPs)
return localPodIPs, nil
}
func (c *Controller) getSubnetsNeedNAT(protocol string) ([]string, error) {
var subnetsNeedNat []string
subnets, err := c.subnetsLister.List(labels.Everything())
if err != nil {
klog.Errorf("list subnets failed, %v", err)
return nil, err
}
for _, subnet := range subnets {
if subnet.Spec.Vpc == util.DefaultVpc &&
subnet.Spec.GatewayType == kubeovnv1.GWCentralizedType &&
util.GatewayContains(subnet.Spec.GatewayNode, c.config.NodeName) &&
(subnet.Spec.Protocol == kubeovnv1.ProtocolDual || subnet.Spec.Protocol == protocol) &&
subnet.Spec.NatOutgoing {
cidrBlock := getCidrByProtocol(subnet.Spec.CIDRBlock, protocol)
subnetsNeedNat = append(subnetsNeedNat, cidrBlock)
}
}
return subnetsNeedNat, nil
}
func (c *Controller) getSubnetsCIDR(protocol string) ([]string, error) {
subnets, err := c.subnetsLister.List(labels.Everything())
if err != nil {
klog.Error("failed to list subnets")
return nil, err
}
ret := make([]string, 0, len(subnets)+3)
if c.config.NodeLocalDNSIP != "" && net.ParseIP(c.config.NodeLocalDNSIP) != nil && util.CheckProtocol(c.config.NodeLocalDNSIP) == protocol {
ret = append(ret, c.config.NodeLocalDNSIP)
}
for _, sip := range strings.Split(c.config.ServiceClusterIPRange, ",") {
if util.CheckProtocol(sip) == protocol {
ret = append(ret, sip)
}
}
for _, subnet := range subnets {
if subnet.Spec.Vpc == util.DefaultVpc {
cidrBlock := getCidrByProtocol(subnet.Spec.CIDRBlock, protocol)
ret = append(ret, cidrBlock)
}
}
return ret, nil
}
func (c *Controller) getOtherNodes(protocol string) ([]string, error) {
nodes, err := c.nodesLister.List(labels.Everything())
if err != nil {
klog.Error("failed to list nodes")
return nil, err
}
ret := make([]string, 0, len(nodes)-1)
for _, node := range nodes {
if node.Name == c.config.NodeName {
continue
}
for _, addr := range node.Status.Addresses {
if addr.Type == v1.NodeInternalIP {
if util.CheckProtocol(addr.Address) == protocol {
ret = append(ret, addr.Address)
}
}
}
}
return ret, nil
}
//Generally, the MTU of the interface is set to 1400. But in special cases, a special pod (docker indocker) will introduce the docker0 interface to the pod. The MTU of docker0 is 1500.
//The network application in pod will calculate the TCP MSS according to the MTU of docker0, and then initiate communication with others. After the other party sends a response, the kernel protocol stack of Linux host will send ICMP unreachable message to the other party, indicating that IP fragmentation is needed, which is not supported by the other party, resulting in communication failure.
func (c *Controller) appendMssRule() {
if c.config.Iface != "" && c.config.MSS > 0 {
rule := fmt.Sprintf("-p tcp --tcp-flags SYN,RST SYN -o %s -j TCPMSS --set-mss %d", c.config.Iface, c.config.MSS)
MssMangleRule := util.IPTableRule{
Table: "mangle",
Chain: "POSTROUTING",
Rule: strings.Split(rule, " "),
}
switch c.protocol {
case kubeovnv1.ProtocolIPv4:
c.updateMssRuleByProtocol(c.protocol, MssMangleRule)
case kubeovnv1.ProtocolIPv6:
c.updateMssRuleByProtocol(c.protocol, MssMangleRule)
case kubeovnv1.ProtocolDual:
c.updateMssRuleByProtocol(kubeovnv1.ProtocolIPv4, MssMangleRule)
c.updateMssRuleByProtocol(kubeovnv1.ProtocolIPv6, MssMangleRule)
}
}
}
func (c *Controller) updateMssRuleByProtocol(protocol string, MssMangleRule util.IPTableRule) {
exists, err := c.iptable[protocol].Exists(MssMangleRule.Table, MssMangleRule.Chain, MssMangleRule.Rule...)
if err != nil {
klog.Errorf("check iptable rule %v failed, %+v", MssMangleRule.Rule, err)
return
}
if !exists {
klog.Infof("iptables rules %s not exist, append iptables rules", strings.Join(MssMangleRule.Rule, " "))
if err := c.iptable[protocol].Append(MssMangleRule.Table, MssMangleRule.Chain, MssMangleRule.Rule...); err != nil {
klog.Errorf("append iptable rule %v failed, %+v", MssMangleRule.Rule, err)
return
}
}
}
func getCidrByProtocol(cidr, protocol string) string {
var cidrStr string
if util.CheckProtocol(cidr) == kubeovnv1.ProtocolDual {
cidrBlocks := strings.Split(cidr, ",")
if protocol == kubeovnv1.ProtocolIPv4 {
cidrStr = cidrBlocks[0]
} else if protocol == kubeovnv1.ProtocolIPv6 {
cidrStr = cidrBlocks[1]
}
} else {
cidrStr = cidr
}
return cidrStr
} | }
output, err := ovs.Exec(ovs.IfExists, "get", "open", ".", "external-ids:ovn-bridge-mappings")
if err != nil { |
routes.py | #!/usr/bin/env python
# coding=utf-8
"""
__created__ = '06/01/2017'
__author__ = 'deling.ma'
"""
from aio_rest.routes import RouteCollector, Route
from example.views import publish, IndexView
routes = RouteCollector(prefix='/app', routes=[
Route('/', IndexView),
Route('/publish', publish, method='GET'), | ]) |
|
cows_bulls.py | '''
File name: pythonpractice.py
Author: Hannah Lewis
Date created: 08/03/2020
Date last modified: 08/03/2020
Python Version: 3.7
'''
import random
def main():
| __name__ == '__main__':
print("Ready to Cows and Bulls?")
main() # Runs exercise
| '''
Create a program that will play the “cows and bulls” game with the user.
'''
print("You will try to guess a random 4-digit number.")
print("A 'cow' is a correct digit in the correct place.")
print("A 'bull' is a correct digit in the wrong place.")
print("The game ends when you get 4 cows!\n")
print("You can type 'exit' at any time to end the game.\n")
num = str(random.randint(10000, 99999))[1:5] # Get random number, remove first digit so that first digit can be 0
guess = input("Give me your best guess: ") # Get first guess
count = 0
cow = 0
bull = 0
guessing = True
while guessing:
assert len(guess) == 4, "Input must be 4-digits long."
if guess == 'exit': # Player can exit at any time
print("The number was " + str(num) + ".")
print("Better luck next time.")
guessing = False
break
count += 1
for i in range(0,4): # Compare digits
if num[i] == guess[i]:
cow+=1
elif num[i] in guess:
bull+=1
print("You got {} cows, and {} bulls.".format(cow,bull)) # How many cows and bulls
if cow == 4: # If all digits are correct
if count == 1:
print("You got it on the first try!")
guessing = False
if count > 1:
print("You got it! It took you", count, "tries.")
print("The number was " + str(num) + ".")
guessing = False
else: # Guess again
cow = bull = 0
guess = input("Guess again: ")
#TODO: ask if they want to play another game
return
if |
dfllmul.rs | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::DFLLMUL {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits };
let mut w = W { bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct MULR {
bits: u16,
}
impl MULR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct FSTEPR {
bits: u8,
}
impl FSTEPR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 |
}
#[doc = r" Value of the field"]
pub struct CSTEPR {
bits: u8,
}
impl CSTEPR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _MULW<'a> {
w: &'a mut W,
}
impl<'a> _MULW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _FSTEPW<'a> {
w: &'a mut W,
}
impl<'a> _FSTEPW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 255;
const OFFSET: u8 = 16;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CSTEPW<'a> {
w: &'a mut W,
}
impl<'a> _CSTEPW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 63;
const OFFSET: u8 = 26;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:15 - DFLL Multiply Factor"]
#[inline]
pub fn mul(&self) -> MULR {
let bits = {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
MULR { bits }
}
#[doc = "Bits 16:23 - Fine Maximum Step"]
#[inline]
pub fn fstep(&self) -> FSTEPR {
let bits = {
const MASK: u8 = 255;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) as u8
};
FSTEPR { bits }
}
#[doc = "Bits 26:31 - Coarse Maximum Step"]
#[inline]
pub fn cstep(&self) -> CSTEPR {
let bits = {
const MASK: u8 = 63;
const OFFSET: u8 = 26;
((self.bits >> OFFSET) & MASK as u32) as u8
};
CSTEPR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:15 - DFLL Multiply Factor"]
#[inline]
pub fn mul(&mut self) -> _MULW {
_MULW { w: self }
}
#[doc = "Bits 16:23 - Fine Maximum Step"]
#[inline]
pub fn fstep(&mut self) -> _FSTEPW {
_FSTEPW { w: self }
}
#[doc = "Bits 26:31 - Coarse Maximum Step"]
#[inline]
pub fn cstep(&mut self) -> _CSTEPW {
_CSTEPW { w: self }
}
}
| {
self.bits
} |
line-style-outlined.js | (function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory(require("omi"));
else if(typeof define === 'function' && define.amd)
define(["omi"], factory);
else if(typeof exports === 'object')
exports["line-style-outlined"] = factory(require("omi"));
else
root["line-style-outlined"] = factory(root["Omi"]);
})(this, function(__WEBPACK_EXTERNAL_MODULE_omi__) {
return /******/ (function(modules) { // webpackBootstrap
/******/ // The module cache
/******/ var installedModules = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/ if(installedModules[moduleId]) {
/******/ return installedModules[moduleId].exports;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = installedModules[moduleId] = {
/******/ i: moduleId,
/******/ l: false,
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Flag the module as loaded
/******/ module.l = true;
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/ __webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/ __webpack_require__.c = installedModules;
/******/
/******/ // define getter function for harmony exports
/******/ __webpack_require__.d = function(exports, name, getter) {
/******/ if(!__webpack_require__.o(exports, name)) {
/******/ Object.defineProperty(exports, name, { enumerable: true, get: getter });
/******/ }
/******/ };
/******/
/******/ // define __esModule on exports
/******/ __webpack_require__.r = function(exports) {
/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
/******/ }
/******/ Object.defineProperty(exports, '__esModule', { value: true });
/******/ };
/******/
/******/ // create a fake namespace object
/******/ // mode & 1: value is a module id, require it
/******/ // mode & 2: merge all properties of value into the ns
/******/ // mode & 4: return value when already ns object
/******/ // mode & 8|1: behave like require
/******/ __webpack_require__.t = function(value, mode) {
/******/ if(mode & 1) value = __webpack_require__(value);
/******/ if(mode & 8) return value;
/******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
/******/ var ns = Object.create(null);
/******/ __webpack_require__.r(ns);
/******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value });
/******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));
/******/ return ns;
/******/ };
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function getDefault() { return module['default']; } :
/******/ function getModuleExports() { return module; };
/******/ __webpack_require__.d(getter, 'a', getter);
/******/ return getter;
/******/ };
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "";
/******/
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__(__webpack_require__.s = "./esm/line-style-outlined.js");
/******/ })
/************************************************************************/
/******/ ({
/***/ "./esm/line-style-outlined.js":
/*!************************************!*\
!*** ./esm/line-style-outlined.js ***!
\************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
eval("\nObject.defineProperty(exports, \"__esModule\", { value: true });\nvar omi_1 = __webpack_require__(/*! omi */ \"omi\");\nvar createSvgIcon_1 = __webpack_require__(/*! ./utils/createSvgIcon */ \"./esm/utils/createSvgIcon.js\");\nexports.default = createSvgIcon_1.default(omi_1.h(\"path\", {\n d: \"M3 16h5v-2H3v2zm6.5 0h5v-2h-5v2zm6.5 0h5v-2h-5v2zM3 20h2v-2H3v2zm4 0h2v-2H7v2zm4 0h2v-2h-2v2zm4 0h2v-2h-2v2zm4 0h2v-2h-2v2zM3 12h8v-2H3v2zm10 0h8v-2h-8v2zM3 4v4h18V4H3z\"\n}), 'LineStyleOutlined');\n\n\n//# sourceURL=webpack://%5Bname%5D/./esm/line-style-outlined.js?");
/***/ }),
/***/ "./esm/utils/createSvgIcon.js":
/*!************************************!*\
!*** ./esm/utils/createSvgIcon.js ***!
\************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) { |
"use strict";
eval("\nvar __extends = (this && this.__extends) || (function () {\n var extendStatics = function (d, b) {\n extendStatics = Object.setPrototypeOf ||\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\n function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };\n return extendStatics(d, b);\n };\n return function (d, b) {\n extendStatics(d, b);\n function __() { this.constructor = d; }\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\n };\n})();\nObject.defineProperty(exports, \"__esModule\", { value: true });\nvar omi_1 = __webpack_require__(/*! omi */ \"omi\");\nvar hyphenateRE = /\\B([A-Z])/g;\nvar hyphenate = function (str) {\n return str.replace(hyphenateRE, '-$1').toLowerCase();\n};\nfunction createSvgIcon(path, displayName) {\n var _a;\n omi_1.define(hyphenate('OIcon' + displayName), (_a = /** @class */ (function (_super) {\n __extends(class_1, _super);\n function class_1() {\n return _super !== null && _super.apply(this, arguments) || this;\n }\n class_1.prototype.render = function () {\n return omi_1.h('svg', {\n viewBox: '0 0 24 24',\n }, path);\n };\n return class_1;\n }(omi_1.WeElement)),\n _a.css = \":host {\\n fill: currentColor;\\n width: 1em;\\n height: 1em;\\n display: inline-block;\\n vertical-align: -0.125em;\\n transition: fill 200ms cubic-bezier(0.4, 0, 0.2, 1) 0ms;\\n flex-shrink: 0;\\n user-select: none;\\n}\",\n _a));\n}\nexports.default = createSvgIcon;\n\n\n//# sourceURL=webpack://%5Bname%5D/./esm/utils/createSvgIcon.js?");
/***/ }),
/***/ "omi":
/*!******************************************************************************!*\
!*** external {"commonjs":"omi","commonjs2":"omi","amd":"omi","root":"Omi"} ***!
\******************************************************************************/
/*! no static exports found */
/***/ (function(module, exports) {
eval("module.exports = __WEBPACK_EXTERNAL_MODULE_omi__;\n\n//# sourceURL=webpack://%5Bname%5D/external_%7B%22commonjs%22:%22omi%22,%22commonjs2%22:%22omi%22,%22amd%22:%22omi%22,%22root%22:%22Omi%22%7D?");
/***/ })
/******/ })["default"];
}); | |
kemija_SMyth.py | from pj import *
class KF(enum.Enum):
OTV, ZATV = '()'
class ATOM(Token):
def Mr(self, **atomi):
return pogledaj(atomi,self)
class BROJ(Token):
def vrijednost(self,**_):
return int(self.sadržaj)
class N(Token):
literal='n'
def vrijednost(self, **atomi):
return atomi['n']
def kf_lex(formula):
lex=Tokenizer(formula)
for i, znak in enumerate(iter(lex.čitaj, '')):
print(znak)
if not i and znak=='n' or znak!=')' and lex.slijedi('n'):
raise lex.greška("nema ')' prije n!")
elif znak.isdigit() and znak!='0':
lex.zvijezda(str.isdigit)
yield lex.token(KF.BROJ)
elif znak.isupper():
idući=lex.čitaj()
print('"', idući)
if not idući.islower(): lex.vrati()
yield lex.literal(KF.ATOM)
else: yield lex.literal(KF)
### Beskontekstna gramatika
# formula -> formula skupina | skupina
# skupina -> ATOM BROJ? | OTV formula ZATV (N | BROJ)?
### Apstraktna sintaksna stabla
# Formula: skupine:[(Formula, broj|'n')]
jedan=Token(KF.BROJ,'1')
class KFParser(Parser):
def formula(self):
skupine=[self.skupina()]
while not self>={E.KRAJ,KF.ZATV}:
skupine.append(self.skupina())
return Formula(skupine)
def skupina(self):
if self >> KF.ATOM:
atom=self.zadnji
if self >> KF.BROJ:
broj=self.zadnji
else:
broj=jedan
return (atom,broj)
else:
self.pročitaj(KF.OTV)
f=self.formula()
self.pročitaj(KF.ZATV)
if self >> {KF.N, KF.BROJ}:
broj=self.zadnji
else:
broj=jedan
return (f,broj)
start = formula
class Formula(AST('skupine')):
def Mr(self, **atomi):
suma=0
for skupina, broj in self.skupine:
suma += skupina.Mr(**atomi)*broj.vrijednost(**atomi)
return suma
if __name__=='__main__':
formula=' | CabH3(CabH2)nCabH3'
formula = 'AbcdeF'
tokeni=list(kf_lex(formula))
p=KFParser.parsiraj(tokeni)
print(tokeni,p,p.Mr(Cab=12.01,H=1.008,n=2),sep='\n\n')
|
|
trainer.py | import time
import json
import argparse
import os
import sys
import logging
import shutil
from datetime import datetime
import glob
import random
from scipy.stats import mannwhitneyu
from scipy.stats import spearmanr
import numpy as np
from sklearn.metrics import roc_auc_score, precision_recall_curve, auc
import tensorflow as tf
import tensorflow_addons as tfa
#from optimization import create_optimizer
from model_attention import ModelAttention
from dataset import build_dataset
from loss import compute_loss
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
tf.config.threading.set_intra_op_parallelism_threads(60)
tf.config.threading.set_inter_op_parallelism_threads(60)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging_formatter = logging.Formatter(
'%(asctime)s - %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging_formatter)
logger.addHandler(ch)
class LearningRate(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, base_lr, end_learning_rate, warmup_steps, decay_steps):
super(LearningRate, self).__init__()
self.base_lr = base_lr
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
if decay_steps == 0:
self.poly_decay_fn = lambda x: self.base_lr
else:
self.poly_decay_fn = tf.keras.optimizers.schedules.PolynomialDecay(
base_lr,
decay_steps,
end_learning_rate=end_learning_rate,
power=1.0)
def __call__(self, step):
lr = tf.cond(
step < self.warmup_steps, lambda: self.base_lr * tf.cast(
step + 1, tf.float32) / tf.cast(self.warmup_steps, tf.float32),
lambda: self.poly_decay_fn(step - self.warmup_steps))
#if step % 100 == 0:
# tf.print('learning_rate', step, lr)
return lr
class TestMetric(object):
def __init__(self):
self._targets = tf.zeros((0, ), tf.int32)
self._preds = tf.zeros((0, ), tf.float32)
def reset_states(self):
self._targets = tf.zeros((0, ), tf.int32)
self._preds = tf.zeros((0, ), tf.float32)
def update_state(self, targets, preds):
self._targets = tf.concat(
[self._targets, tf.cast(targets, tf.int32)], axis=-1)
self._preds = tf.concat(
[self._preds, tf.cast(preds, tf.float32)], axis=-1)
def result_auROC(self):
try:
auROC = roc_auc_score(self._targets.numpy(), self._preds.numpy())
return auROC
except:
return 0.0
def result_auPR(self):
try:
precision, recall, _ = precision_recall_curve(
self._targets.numpy(), self._preds.numpy())
auPR = auc(recall, precision)
return auPR
except:
return 0.0
def result_pvalue(self):
all_pred = self._preds.numpy()
all_label = self._targets.numpy()
mtest = mannwhitneyu(all_pred[all_label == 1],
all_pred[all_label == 0],
alternative='two-sided')
pvalue = mtest.pvalue
return pvalue
def result_total(self):
res = self._targets.numpy()
return res.shape[0]
def | (self):
res = self._targets.numpy()
return res.shape[0] - np.sum(res)
def result_pos(self):
res = self._targets.numpy()
return np.sum(res)
def result_corr(self):
try:
all_pred = self._preds.numpy()
all_label = self._targets.numpy()
corr, pvalue = spearmanr(all_pred, all_label)
return corr, pvalue
except:
return 0.0
def result_max(self):
try:
all_pred = self._preds.numpy()
return np.max(all_pred)
except:
return 0.0
def train_single_gpu(config, args):
#setup logger
str_t = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
train_dir = f'./res/{str_t}'
config['train']['train_dir'] = train_dir
os.makedirs(train_dir)
os.makedirs(train_dir + '/result')
os.makedirs(train_dir + '/model')
fh = logging.FileHandler(f'{train_dir}/train.log')
fh.setFormatter(logging_formatter)
logger.addHandler(fh)
logger.info(json.dumps(config, indent=4))
#train and validate files
batch_size = config['train']['batch_size']
input_config = config['input']
input_base_dir = input_config['base_dir']
all_files = glob.glob(input_base_dir + '/' + input_config['train'][:-1] +
args.random + '*tfrec')
#all_files = glob.glob('../dataset/tf/f_v1_w64_2021_v2' + '/' +
# input_config['train'][:-1] + args.random + '*tfrec')
random.seed(2020)
random.shuffle(all_files)
train_files, validate_files = [], []
for i in range(10):
if i == args.cv:
validate_files.append(all_files[i])
else:
train_files.append(all_files[i])
print(train_files)
print(validate_files)
asd = glob.glob(input_base_dir + '/' + 'ASD' + '.tfrec')
ndd = glob.glob(input_base_dir + '/' + 'NDD' + '.tfrec')
control = glob.glob(input_base_dir + '/' + 'Control' + '.tfrec')
brca2 = glob.glob(input_base_dir + '/' + 'BRCA2' + '.tfrec')
pparg = glob.glob(input_base_dir + '/' + 'PPARG' + '.tfrec')
#train_files += pparg
train_dataset = build_dataset(train_files, batch_size)
validate_dataset = build_dataset(validate_files, batch_size)
#model
model_type = config['train']['model_type']
if model_type == 'attention':
model = ModelAttention(config['model'])
else:
raise ValueError(f'model type {model_type} does not exist.')
#learning rate
init_learning_rate = config['train']['learning_rate']
end_learning_rate = config['train']['end_learning_rate']
'''
warmup_epochs = config['train']['warmup_epochs']
decay_epochs = config['train']['decay_epochs']
training_samples = 0
for inputs in train_dataset:
training_samples += inputs[0].shape[0]
logger.info(f'training_samples= {training_samples}')
batches_each_epoch = int(training_samples / batch_size)
warmup_steps = batches_each_epoch * warmup_epochs
decay_steps = batches_each_epoch * decay_epochs
'''
warmup_steps, decay_steps = config['train']['warmup_steps'], config[
'train']['decay_steps']
learning_rate = LearningRate(init_learning_rate,
end_learning_rate=end_learning_rate,
warmup_steps=warmup_steps,
decay_steps=decay_steps)
#training algorithm
opt = config['train'].get('opt', 'adam')
if opt == 'adam':
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
#optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
elif opt == 'adamw':
weight_decay_rate = config['train']['weight_decay_rate']
optimizer = tfa.optimizers.AdamW(
weight_decay=weight_decay_rate,
learning_rate=learning_rate,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
)
'''
optimizer = create_optimizer(init_learning_rate,
decay_steps + warmup_steps,
warmup_steps,
end_lr=end_learning_rate,
optimizer_type='adamw')
'''
else:
raise NotImplementedError(f"opt {opt} not NotImplementedError")
#metrics
metric_train_loss = tf.keras.metrics.Mean(name='train_loss')
metric_test_loss = tf.keras.metrics.Mean(name='test_loss')
metric_test = TestMetric()
#summary
train_log_dir = f'{train_dir}/summary/train'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
def _update_histogram_summary():
with train_summary_writer.as_default():
for var in model.trainable_variables:
if 'kernel:' in var.name or 'gamma:' in var.name or 'beta:' in var.name:
tf.summary.histogram(var.name,
var,
step=optimizer.iterations)
def _update_gradient_norm_summary(var, grad):
with train_summary_writer.as_default():
for v, g in zip(var, grad):
if 'kernel:' in v.name or 'gamma:' in v.name or 'beta:' in v.name:
tf.summary.scalar(f'gradient_norm/{v.name}',
tf.norm(g, ord='euclidean'),
step=optimizer.iterations)
@tf.function(input_signature=[validate_dataset.element_spec])
def test_step(sample):
var, ref_aa, alt_aa, feature, label, padding_mask = sample
logit = model((ref_aa, alt_aa, feature), False, padding_mask)
loss = compute_loss(label, logit)
pred = model.predict_from_logit(logit)
return var, label, pred, loss
def _save_res(var_id, target, pred, name, epoch):
with open(f'{train_dir}/result/epoch_{epoch}_{name}.score', 'w') as f:
f.write('var\ttarget\tScore\n')
for a, c, d in zip(var_id, target, pred):
f.write('{}\t{:d}\t{:f}\n'.format(a.numpy().decode('utf-8'),
int(c), d))
return True
def test(test_dataset,
data_name,
epoch,
auc=False,
pvalue=False,
corr=False):
metric_test_loss.reset_states()
metric_test.reset_states()
all_pred, all_label, all_var = [], [], []
for step, sample in enumerate(test_dataset):
var, label, pred, loss = test_step(sample)
metric_test.update_state(label, pred)
metric_test_loss.update_state(loss)
all_pred.extend(list(pred))
all_label.extend(list(label))
all_var.extend(list(var))
all_var = np.array(all_var)
all_label = np.array(all_label)
all_pred = np.array(all_pred)
_save_res(all_var, all_label, all_pred, data_name, epoch)
if auc:
logger.info(
f'{data_name} pos= {metric_test.result_pos()} neg= {metric_test.result_neg()} loss= {metric_test_loss.result()} auPR= {metric_test.result_auPR()} auROC= {metric_test.result_auROC()} max= {metric_test.result_max()}'
)
if pvalue:
logger.info(
f'{data_name} pos= {metric_test.result_pos()} neg= {metric_test.result_neg()} loss= {metric_test_loss.result()} pvalue= {metric_test.result_pvalue()}'
)
if corr:
corr, pvalue = metric_test.result_corr()
logger.info(
f'{data_name} pos= {metric_test.result_total()} corr= {corr} pvalue= {pvalue} max= {metric_test.result_max()}'
)
return metric_test_loss.result()
@tf.function(input_signature=[train_dataset.element_spec])
def train_step(sample):
var, ref_aa, alt_aa, feature, label, padding_mask = sample
with tf.GradientTape() as tape:
logit = model((ref_aa, alt_aa, feature), True, padding_mask)
loss = compute_loss(label, logit)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
metric_train_loss.update_state(loss)
#if optimizer.iterations % 512 == 0:
# _update_gradient_norm_summary(model.trainable_variables, gradients)
return loss
EPOCHS = 512
watch_loss = 10000.0
watch_epoch = -1
patience_epochs = 5
for epoch in range(EPOCHS):
start = time.time()
for step, samples in enumerate(train_dataset):
loss = train_step(samples)
#tf.print(
# f'lr= {learning_rate(global_step)} wd={weight_decay(global_step)}'
#)
#model summary
if optimizer.iterations == 1:
model.summary(print_fn=logger.info)
#logging kernel weights
#if (optimizer.iterations + 1) % 512 == 0:
# _update_histogram_summary()
logger.info(f'Epoch {epoch} Loss {metric_train_loss.result():.4f}')
metric_train_loss.reset_states()
model.save_weights(f'{train_dir}/model/epoch-{epoch}.h5')
#validate and test
validate_loss = test(validate_dataset,
'validate',
epoch,
pvalue=False,
auc=True,
corr=False)
if validate_loss < watch_loss:
watch_loss = validate_loss
watch_epoch = epoch
#denovo
if epoch - watch_epoch == patience_epochs:
logger.info(f'best_epoch {watch_epoch} min_loss= {watch_loss}')
break
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--cv', type=int, default=0)
parser.add_argument('--random', type=str, default='0')
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
train_single_gpu(config, args)
if __name__ == '__main__':
main()
| result_neg |
boil_queries.go | // Code generated by SQLBoiler 4.1.2 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
| "github.com/volatiletech/sqlboiler/v4/queries/qm"
)
var dialect = drivers.Dialect{
LQ: 0x60,
RQ: 0x60,
UseIndexPlaceholders: false,
UseLastInsertID: true,
UseSchema: false,
UseDefaultKeyword: false,
UseAutoColumns: false,
UseTopClause: false,
UseOutputClause: false,
UseCaseWhenExistsClause: false,
}
// NewQuery initializes a new Query using the passed in QueryMods
func NewQuery(mods ...qm.QueryMod) *queries.Query {
q := &queries.Query{}
queries.SetDialect(q, &dialect)
qm.Apply(q, mods...)
return q
} | import (
"github.com/volatiletech/sqlboiler/v4/drivers"
"github.com/volatiletech/sqlboiler/v4/queries" |
formatter.go | package cluster
import (
"strings"
"github.com/rancher/norman/types"
"github.com/rancher/norman/types/convert"
"github.com/rancher/norman/types/values"
"github.com/rancher/types/apis/management.cattle.io/v3"
"github.com/sirupsen/logrus"
)
type Formatter struct {
KontainerDriverLister v3.KontainerDriverLister
}
func (f *Formatter) Formatter(request *types.APIContext, resource *types.RawResource) {
if convert.ToBool(resource.Values["internal"]) {
delete(resource.Links, "remove")
}
shellLink := request.URLBuilder.Link("shell", resource) | resource.AddAction(request, "importYaml")
resource.AddAction(request, "exportYaml")
if _, ok := resource.Values["rancherKubernetesEngineConfig"]; ok {
resource.AddAction(request, "rotateCertificates")
if _, ok := values.GetValue(resource.Values, "rancherKubernetesEngineConfig", "services", "etcd", "backupConfig"); ok {
resource.AddAction(request, "backupEtcd")
resource.AddAction(request, "restoreFromEtcdBackup")
}
}
if err := request.AccessControl.CanDo(v3.ClusterGroupVersionKind.Group, v3.ClusterResource.Name, "update", request, resource.Values, request.Schema); err == nil {
if convert.ToBool(resource.Values["enableClusterMonitoring"]) {
resource.AddAction(request, "disableMonitoring")
resource.AddAction(request, "editMonitoring")
} else {
resource.AddAction(request, "enableMonitoring")
}
}
if convert.ToBool(resource.Values["enableClusterMonitoring"]) {
resource.AddAction(request, "viewMonitoring")
}
if gkeConfig, ok := resource.Values["googleKubernetesEngineConfig"]; ok {
configMap, ok := gkeConfig.(map[string]interface{})
if !ok {
logrus.Errorf("could not convert gke config to map")
return
}
setTrueIfNil(configMap, "enableStackdriverLogging")
setTrueIfNil(configMap, "enableStackdriverMonitoring")
setTrueIfNil(configMap, "enableHorizontalPodAutoscaling")
setTrueIfNil(configMap, "enableHttpLoadBalancing")
setTrueIfNil(configMap, "enableNetworkPolicyConfig")
}
if eksConfig, ok := resource.Values["amazonElasticContainerServiceConfig"]; ok {
configMap, ok := eksConfig.(map[string]interface{})
if !ok {
logrus.Errorf("could not convert eks config to map")
return
}
setTrueIfNil(configMap, "associateWorkerNodePublicIp")
setIntIfNil(configMap, "nodeVolumeSize", 20)
}
}
func setTrueIfNil(configMap map[string]interface{}, fieldName string) {
if configMap[fieldName] == nil {
configMap[fieldName] = true
}
}
func setIntIfNil(configMap map[string]interface{}, fieldName string, replaceVal int) {
if configMap[fieldName] == nil {
configMap[fieldName] = replaceVal
}
} | shellLink = strings.Replace(shellLink, "http", "ws", 1)
shellLink = strings.Replace(shellLink, "/shell", "?shell=true", 1)
resource.Links["shell"] = shellLink
resource.AddAction(request, "generateKubeconfig") |
finder.py | """Docstring
"""
import cv2
#import numpy as np
def find_in_face(haarcascade, rec=False):
"""Press 'k' for quit
"""
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
smile_cascade = cv2.CascadeClassifier(haarcascade)
cap = cv2.VideoCapture(0)
if rec:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (640, 480))
| _, original = cap.read()
gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (fx, fy, fw, fh) in faces:
cv2.rectangle(original, pt1=(fx, fy), pt2=(
fx+fw, fy+fh), color=(0, 0, 255), thickness=2)
roi_gray = gray[fy:fy+fh, fx:fx+fw]
roi_color = original[fy:fy+fh, fx:fx+fw]
smiles = smile_cascade.detectMultiScale(roi_gray)
for (sx, sy, sw, sh) in smiles:
cv2.rectangle(roi_color, pt1=(sx, sy), pt2=(
sx+sw, sy+sh), color=(255, 0, 0), thickness=2)
if rec:
out.write(original)
cv2.imshow('Image', original)
if cv2.waitKey(1) & 0xFF == ord('k'):
break
cap.release()
if rec:
out.release()
cv2.destroyAllWindows()
find_in_face('haarcascade_eye.xml', rec=False) | while True: |
ipython.py | import textwrap
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.core import magic_arguments
import attr
from .formatting import format_var_dims
from .model import Model
from .utils import variables_dict
setup_template = """
import xsimlab as xs
ds_in = xs.create_setup(
model={model},
clocks={{}},
input_vars={{
{in_vars}
}},
output_vars={{}}
)
"""
def format_var_comment(var, verbose=0):
|
def format_input_vars(
model, skip_default=False, default=False, verbose=0, nested=False
):
lines = []
for pn, vnames in model.input_vars_dict.items():
plines = []
for vn in vnames:
var = variables_dict(type(model[pn]))[vn]
if skip_default and var.default is not attr.NOTHING:
continue
if default and var.default is not attr.NOTHING:
default_val = f"{var.default!r}"
else:
default_val = ""
comment = format_var_comment(var, verbose=verbose)
if nested:
plines.append(comment + f"'{vn}': {default_val},")
else:
lines.append(comment + f"'{pn}__{vn}': {default_val},")
if nested and plines:
pfmt = textwrap.indent("\n".join(plines), " " * 4)
lines.append(f"'{pn}': {{\n{pfmt}\n}},")
return textwrap.indent("\n".join(lines), " " * 8)[8:]
@magics_class
class SimulationMagics(Magics):
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument("model", help="xsimlab.Model object")
@magic_arguments.argument(
"-s",
"--skip-default",
action="store_true",
default=False,
help="Don't add input variables that have default values",
)
@magic_arguments.argument(
"-d",
"--default",
action="store_true",
default=False,
help="Add input variables default values, if any (ignored if --skip-default)",
)
@magic_arguments.argument(
"-v",
"--verbose",
action="count",
default=0,
help="Increase verbosity (i.e., add more input variables info as comments)",
)
@magic_arguments.argument(
"-n",
"--nested",
action="store_true",
default=False,
help="Group input variables by process",
)
def create_setup(self, line=""):
"""Pre-fill the current cell with a new simulation setup."""
args = magic_arguments.parse_argstring(self.create_setup, line)
model_obj = self.shell.user_ns.get(args.model)
if model_obj is None:
raise KeyError(f"Model '{args.model}' not defined or not imported")
elif not isinstance(model_obj, Model):
raise TypeError(f"'{args.model}' is not a xsimlab.Model object")
rendered = setup_template.format(
model=args.model,
in_vars=format_input_vars(
model_obj,
skip_default=args.skip_default,
default=args.default,
verbose=args.verbose,
nested=args.nested,
),
)
content = f"# %create_setup {line}" + rendered
self.shell.set_next_input(content, replace=True)
def load_ipython_extension(ipython):
ipython.register_magics(SimulationMagics)
| comment = ""
if verbose:
var_desc = var.metadata["description"]
if var_desc:
comment += textwrap.fill(
var_desc, width=86, initial_indent="# ", subsequent_indent="# "
)
else:
comment += "# ---"
comment += "\n"
if verbose > 1:
var_dims = format_var_dims(var)
if var_dims:
comment += f"# dimensions: {var_dims}\n"
if var.metadata["static"]:
comment += f"# static: main clock dimension not supported\n"
if verbose > 2:
var_attrs = var.metadata.get("attrs", False)
if var_attrs:
for k, v in var_attrs.items():
comment += f"# {k}: {v}\n"
return comment |
ddb_request_builder.go | package ddb
import (
ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9 "github.com/microsoft/kiota/abstractions/go"
i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization"
i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87 "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph"
)
// DdbRequestBuilder builds and executes requests for operations under \workbooks\{driveItem-id}\workbook\functions\microsoft.graph.ddb
type DdbRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string;
// The request adapter to use to execute the requests.
requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter;
// Url template to use to build the URL for the current request builder
urlTemplate string;
}
// DdbRequestBuilderPostOptions options for Post
type DdbRequestBuilderPostOptions struct {
//
Body *DdbRequestBody;
// Request headers
H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
// DdbResponse union type wrapper for classes workbookFunctionResult
type DdbResponse struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{};
// Union type representation for type workbookFunctionResult
workbookFunctionResult *i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.WorkbookFunctionResult;
}
// NewDdbResponse instantiates a new ddbResponse and sets the default values.
func NewDdbResponse()(*DdbResponse) {
m := &DdbResponse{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// GetAdditionalData gets the AdditionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *DdbResponse) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetWorkbookFunctionResult gets the workbookFunctionResult property value. Union type representation for type workbookFunctionResult
func (m *DdbResponse) GetWorkbookFunctionResult()(*i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.WorkbookFunctionResult) {
if m == nil {
return nil
} else {
return m.workbookFunctionResult
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *DdbResponse) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) {
res := make(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error))
res["workbookFunctionResult"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetObjectValue(func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.NewWorkbookFunctionResult() })
if err != nil {
return err
}
if val != nil {
m.SetWorkbookFunctionResult(val.(*i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.WorkbookFunctionResult))
}
return nil
}
return res
}
func (m *DdbResponse) IsNil()(bool) {
return m == nil
}
// Serialize serializes information the current object
func (m *DdbResponse) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) {
{
err := writer.WriteObjectValue("workbookFunctionResult", m.GetWorkbookFunctionResult())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the AdditionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *DdbResponse) SetAdditionalData(value map[string]interface{})() {
m.additionalData = value
}
// SetWorkbookFunctionResult sets the workbookFunctionResult property value. Union type representation for type workbookFunctionResult
func (m *DdbResponse) SetWorkbookFunctionResult(value *i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.WorkbookFunctionResult)() {
m.workbookFunctionResult = value
}
// NewDdbRequestBuilderInternal instantiates a new DdbRequestBuilder and sets the default values.
func NewDdbRequestBuilderInternal(pathParameters map[string]string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*DdbRequestBuilder) {
m := &DdbRequestBuilder{
}
m.urlTemplate = "{+baseurl}/workbooks/{driveItem_id}/workbook/functions/microsoft.graph.ddb";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = pathParameters;
m.requestAdapter = requestAdapter;
return m
}
// NewDdbRequestBuilder instantiates a new DdbRequestBuilder and sets the default values.
func | (rawUrl string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*DdbRequestBuilder) {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewDdbRequestBuilderInternal(urlParams, requestAdapter)
}
// CreatePostRequestInformation invoke action ddb
func (m *DdbRequestBuilder) CreatePostRequestInformation(options *DdbRequestBuilderPostOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.POST
requestInfo.SetContentFromParsable(m.requestAdapter, "application/json", options.Body)
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil {
return nil, err
}
}
return requestInfo, nil
}
// Post invoke action ddb
func (m *DdbRequestBuilder) Post(options *DdbRequestBuilderPostOptions)(*DdbResponse, error) {
requestInfo, err := m.CreatePostRequestInformation(options);
if err != nil {
return nil, err
}
res, err := m.requestAdapter.SendAsync(*requestInfo, func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return NewDdbResponse() }, nil)
if err != nil {
return nil, err
}
return res.(*DdbResponse), nil
}
| NewDdbRequestBuilder |
client.rs | use {Rocket, Request};
use local::LocalRequest;
use http::Method;
use http::uri::URI;
use error::LaunchError;
pub struct Client {
rocket: Rocket,
}
impl Client {
#[inline]
pub fn new(rocket: Rocket) -> Result<Client, LaunchError> {
if let Some(err) = rocket.prelaunch_check() {
return Err(err);
}
Ok(Client {
rocket: rocket,
})
} |
#[inline(always)]
pub fn rocket(&self) -> &Rocket {
&self.rocket
}
#[inline(always)]
pub fn req<'c, 'u: 'c, U>(&'c self, method: Method, uri: U) -> LocalRequest<'c>
where U: Into<URI<'u>>
{
let request = Request::new(&self.rocket, method, uri);
LocalRequest::new(&self.rocket, request)
}
#[inline(always)]
pub fn get<'c, 'u: 'c, U: Into<URI<'u>>>(&'c self, uri: U) -> LocalRequest<'c> {
self.req(Method::Get, uri)
}
#[inline(always)]
pub fn put<'c, 'u: 'c, U: Into<URI<'u>>>(&'c self, uri: U) -> LocalRequest<'c> {
self.req(Method::Put, uri)
}
#[inline(always)]
pub fn post<'c, 'u: 'c, U: Into<URI<'u>>>(&'c self, uri: U) -> LocalRequest<'c> {
self.req(Method::Post, uri)
}
#[inline(always)]
pub fn delete<'c, 'u: 'c, U>(&'c self, uri: U) -> LocalRequest<'c>
where U: Into<URI<'u>>
{
self.req(Method::Delete, uri)
}
#[inline(always)]
pub fn options<'c, 'u: 'c, U>(&'c self, uri: U) -> LocalRequest<'c>
where U: Into<URI<'u>>
{
self.req(Method::Options, uri)
}
#[inline(always)]
pub fn head<'c, 'u: 'c, U>(&'c self, uri: U) -> LocalRequest<'c>
where U: Into<URI<'u>>
{
self.req(Method::Head, uri)
}
#[inline(always)]
pub fn patch<'c, 'u: 'c, U>(&'c self, uri: U) -> LocalRequest<'c>
where U: Into<URI<'u>>
{
self.req(Method::Patch, uri)
}
} | |
main.go | /*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"net/http"
"github.com/knative/serving/test"
)
func handler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "What a spaceport!")
}
func | () {
flag.Parse()
test.ListenAndServeGracefully(":8080", handler)
}
| main |
scalar.rs | use std::{cmp, f64::consts::PI, fmt, hash::Hash, ops};
use decorum::R64;
/// A rational, finite scalar value
///
/// This is a wrapper around `f64`. On construction, it checks that the `f64`
/// value is not NaN. This allows `Scalar` to provide implementations of [`Eq`],
/// [`Ord`], and [`Hash`], enabling `Scalar` (and types built on top of it), to
/// be used as keys in hash maps, hash sets, and similar types.
#[derive(Clone, Copy)]
#[repr(C)]
pub struct Scalar(f64);
impl Scalar {
/// The `Scalar` instance that represents zero
pub const ZERO: Self = Self(0.);
/// The `Scalar` instance that represents one
pub const ONE: Self = Self(1.);
/// The `Scalar` instance that represents two
pub const TWO: Self = Self(2.);
/// The largest `Scalar` value
pub const MAX: Self = Self(f64::MAX);
/// The `Scalar` instance that represents pi
pub const PI: Self = Self(PI);
/// Construct a `Scalar` from an `f64`
///
/// # Panics
///
/// Panics, if `scalar` is NaN.
pub fn from_f64(scalar: f64) -> Self {
if scalar.is_nan() {
panic!("Invalid scalar value: {scalar}");
} else {
Self(scalar)
}
}
/// Construct a `Scalar` from a `u64`
pub fn from_u64(scalar: u64) -> Self {
Self::from_f64(scalar as f64)
}
/// Convert the scalar into an `f32`
pub fn into_f32(self) -> f32 {
self.0 as f32
}
/// Convert the scalar into an `f64`
pub fn into_f64(self) -> f64 {
self.0
}
/// Convert the scalar into a `u64`
pub fn into_u64(self) -> u64 {
self.0 as u64
}
/// Compute the absolute value of the scalar
pub fn abs(self) -> Self {
self.0.abs().into()
}
/// Compute the maximum of this and another scalar
pub fn max(self, other: Self) -> Self {
self.0.max(other.0).into()
}
/// Compute the smallest integer larger than or equal to this scalar
pub fn ceil(self) -> Self {
self.0.ceil().into()
}
/// Round the scalar
pub fn round(self) -> Self {
self.0.round().into()
}
/// Compute the cosine
pub fn cos(self) -> Self {
self.0.cos().into()
}
/// Compute sine and cosine
pub fn sin_cos(self) -> (Self, Self) {
let (sin, cos) = self.0.sin_cos();
(sin.into(), cos.into())
}
/// Compute the arccosine
pub fn acos(self) -> Self {
self.0.acos().into()
}
/// Compute the four-quadrant arctangent
pub fn atan2(self, other: Self) -> Self {
self.0.atan2(other.0).into()
}
}
impl Eq for Scalar {}
impl PartialOrd for Scalar {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
self.0.partial_cmp(&other.0)
}
}
impl Ord for Scalar {
fn cmp(&self, other: &Self) -> cmp::Ordering {
// Should never panic, as `from_f64` checks that the wrapped value is
// finite.
self.partial_cmp(other).unwrap()
}
}
impl PartialEq for Scalar {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl Hash for Scalar {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
// To the best of my knowledge, this matches the `PartialEq`
// implementation.
R64::from_inner(self.0).hash(state);
}
}
impl From<f32> for Scalar {
fn from(scalar: f32) -> Self {
Self::from_f64(scalar as f64)
}
}
impl From<f64> for Scalar {
fn from(scalar: f64) -> Self {
Self::from_f64(scalar)
}
}
impl From<Scalar> for f64 {
fn from(scalar: Scalar) -> Self {
scalar.into_f64()
}
}
impl ops::Neg for Scalar {
type Output = Self;
fn neg(self) -> Self::Output {
self.0.neg().into()
}
}
impl ops::Add<Self> for Scalar {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
self.0.add(rhs.0).into()
}
}
impl ops::AddAssign<Self> for Scalar {
fn add_assign(&mut self, rhs: Self) {
self.0.add_assign(rhs.0);
*self = Self::from_f64(self.0);
}
}
impl ops::Sub<Self> for Scalar {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
self.0.sub(rhs.0).into()
}
}
impl ops::Mul<Self> for Scalar {
type Output = Self;
fn mul(self, rhs: Self) -> Self::Output {
self.0.mul(rhs.0).into()
}
}
impl ops::Mul<f64> for Scalar {
type Output = Self;
fn mul(self, rhs: f64) -> Self::Output {
self.0.mul(rhs).into()
}
}
impl ops::Div<Self> for Scalar {
type Output = Self;
fn div(self, rhs: Self) -> Self::Output {
self.0.div(rhs.0).into()
}
}
impl ops::Div<f64> for Scalar {
type Output = Self;
fn div(self, rhs: f64) -> Self::Output {
self.0.div(rhs).into()
}
}
impl ops::Rem<Self> for Scalar {
type Output = Self;
fn rem(self, rhs: Self) -> Self::Output {
self.0.rem(rhs.0).into()
}
}
impl num_traits::Zero for Scalar {
fn zero() -> Self {
Self::ZERO
}
fn is_zero(&self) -> bool {
self.0.is_zero()
}
}
impl num_traits::One for Scalar {
fn one() -> Self {
Self::ONE
}
}
impl num_traits::Num for Scalar {
type FromStrRadixErr = <f64 as num_traits::Num>::FromStrRadixErr;
fn from_str_radix(
str: &str,
radix: u32,
) -> Result<Self, Self::FromStrRadixErr> {
f64::from_str_radix(str, radix).map(Self::from_f64)
}
}
impl num_traits::NumCast for Scalar {
fn from<T: num_traits::ToPrimitive>(n: T) -> Option<Self> {
Some(Self::from_f64(<f64 as num_traits::NumCast>::from(n)?))
}
}
impl num_traits::Signed for Scalar {
fn abs(&self) -> Self {
self.0.abs().into()
}
fn abs_sub(&self, other: &Self) -> Self {
<f64 as num_traits::Signed>::abs_sub(&self.0, &other.0).into()
}
fn signum(&self) -> Self {
<f64 as num_traits::Signed>::signum(&self.0).into()
}
fn is_positive(&self) -> bool {
<f64 as num_traits::Signed>::is_positive(&self.0)
}
fn is_negative(&self) -> bool {
<f64 as num_traits::Signed>::is_negative(&self.0)
}
}
impl num_traits::ToPrimitive for Scalar {
fn to_i64(&self) -> Option<i64> {
self.0.to_i64()
}
fn to_u64(&self) -> Option<u64> {
self.0.to_u64()
}
}
impl num_traits::Float for Scalar {
fn nan() -> Self {
panic!("`Scalar` can not represent NaN")
}
fn infinity() -> Self {
Self::from_f64(f64::infinity())
}
fn neg_infinity() -> Self {
Self::from_f64(f64::neg_infinity())
}
fn neg_zero() -> Self {
Self::from_f64(f64::neg_zero())
}
fn min_value() -> Self {
Self::from_f64(f64::min_value())
}
fn min_positive_value() -> Self {
Self::from_f64(f64::min_positive_value())
}
fn max_value() -> Self {
Self::from_f64(f64::max_value())
}
fn is_nan(self) -> bool {
self.0.is_nan()
}
fn is_infinite(self) -> bool {
self.0.is_infinite()
}
fn is_finite(self) -> bool {
self.0.is_finite()
}
fn is_normal(self) -> bool {
self.0.is_normal()
}
fn classify(self) -> std::num::FpCategory {
self.0.classify()
}
fn | (self) -> Self {
Self::from_f64(self.0.floor())
}
fn ceil(self) -> Self {
Self::from_f64(self.0.ceil())
}
fn round(self) -> Self {
Self::from_f64(self.0.round())
}
fn trunc(self) -> Self {
Self::from_f64(self.0.trunc())
}
fn fract(self) -> Self {
Self::from_f64(self.0.fract())
}
fn abs(self) -> Self {
Self::from_f64(self.0.abs())
}
fn signum(self) -> Self {
Self::from_f64(self.0.signum())
}
fn is_sign_positive(self) -> bool {
self.0.is_sign_positive()
}
fn is_sign_negative(self) -> bool {
self.0.is_sign_negative()
}
fn mul_add(self, a: Self, b: Self) -> Self {
Self::from_f64(self.0.mul_add(a.0, b.0))
}
fn recip(self) -> Self {
Self::from_f64(self.0.recip())
}
fn powi(self, n: i32) -> Self {
Self::from_f64(self.0.powi(n))
}
fn powf(self, n: Self) -> Self {
Self::from_f64(self.0.powf(n.0))
}
fn sqrt(self) -> Self {
Self::from_f64(self.0.sqrt())
}
fn exp(self) -> Self {
Self::from_f64(self.0.exp())
}
fn exp2(self) -> Self {
Self::from_f64(self.0.exp2())
}
fn ln(self) -> Self {
Self::from_f64(self.0.ln())
}
fn log(self, base: Self) -> Self {
Self::from_f64(self.0.log(base.0))
}
fn log2(self) -> Self {
Self::from_f64(self.0.log2())
}
fn log10(self) -> Self {
Self::from_f64(self.0.log10())
}
fn max(self, other: Self) -> Self {
Self::from_f64(self.0.max(other.0))
}
fn min(self, other: Self) -> Self {
Self::from_f64(self.0.min(other.0))
}
fn abs_sub(self, other: Self) -> Self {
(self - other).abs()
}
fn cbrt(self) -> Self {
Self::from_f64(self.0.cbrt())
}
fn hypot(self, other: Self) -> Self {
Self::from_f64(self.0.hypot(other.0))
}
fn sin(self) -> Self {
Self::from_f64(self.0.sin())
}
fn cos(self) -> Self {
Self::from_f64(self.0.cos())
}
fn tan(self) -> Self {
Self::from_f64(self.0.tan())
}
fn asin(self) -> Self {
Self::from_f64(self.0.asin())
}
fn acos(self) -> Self {
Self::from_f64(self.0.acos())
}
fn atan(self) -> Self {
Self::from_f64(self.0.atan())
}
fn atan2(self, other: Self) -> Self {
Self::from_f64(self.0.atan2(other.0))
}
fn sin_cos(self) -> (Self, Self) {
let (sin, cos) = self.0.sin_cos();
(Self::from_f64(sin), Self::from_f64(cos))
}
fn exp_m1(self) -> Self {
Self::from_f64(self.0.exp_m1())
}
fn ln_1p(self) -> Self {
Self::from_f64(self.0.ln_1p())
}
fn sinh(self) -> Self {
Self::from_f64(self.0.sinh())
}
fn cosh(self) -> Self {
Self::from_f64(self.0.cosh())
}
fn tanh(self) -> Self {
Self::from_f64(self.0.tanh())
}
fn asinh(self) -> Self {
Self::from_f64(self.0.asinh())
}
fn acosh(self) -> Self {
Self::from_f64(self.0.acosh())
}
fn atanh(self) -> Self {
Self::from_f64(self.0.atanh())
}
fn integer_decode(self) -> (u64, i16, i8) {
self.0.integer_decode()
}
}
impl fmt::Debug for Scalar {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Display for Scalar {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl approx::AbsDiffEq for Scalar {
type Epsilon = <f64 as approx::AbsDiffEq>::Epsilon;
fn default_epsilon() -> Self::Epsilon {
f64::default_epsilon()
}
fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
self.0.abs_diff_eq(&other.0, epsilon)
}
}
| floor |
encode.ts | // Copyright 2017-2020 @polkadot/util-crypto authors & contributors
// SPDX-License-Identifier: Apache-2.0
import { u8aToBuffer, u8aToU8a } from '@polkadot/util';
import { bs58 } from './bs58';
/**
* @name base58Encode
* @summary Creates a base58 value.
* @description
* From the provided input, create the base58 and return the result as a string.
*/
export function | (value: Uint8Array | string | Buffer | number[], ipfsCompat?: boolean): string {
const out = bs58.encode(
u8aToBuffer(u8aToU8a(value))
);
return ipfsCompat
? `z${out}`
: out;
}
| base58Encode |
lib.rs | //! This provides two convenience macros to make tests from simple expressions.
//!
//! ```rust
//! # #[macro_use] extern crate assertify;
//! # fn main() {}
//! testify!(add_one_two, 1 + 2 == 3);
//! ```
//!
//! ```rust
//! #[test]
//! fn add_one_two() {
//! assertify!(1 + 2 == 3);
//! }
//! ```
use proc_macro_hack::proc_macro_hack;
/// Assert an expression is true or give a useful error when it isn’t.
///
/// If the expression contains a comparison, e.g. `==`, then the failure message
/// will display the value of both sides of the comparison. Note that the
/// _right_ side will be listed as the “expected” value — think “right” as in
/// “correct.”
///
/// # Examples
///
/// ## Error for a failed comparison
///
/// ```should_panic
/// # #[macro_use] extern crate assertify;
/// # fn main() {
/// assertify!(1 + 2 == 0);
/// # }
/// ```
///
/// Produces:
///
/// ```text
/// ---- tests::fail_simple_eq stdout ----
/// thread 'tests::simple_eq' panicked at 'failed: 1 + 2 == 0
/// actual: 3
/// expected: == 0
/// ', src/lib.rs:96:9
/// ```
///
/// ## Error for other failures
///
/// ```should_panic
/// # #[macro_use] extern crate assertify;
/// # fn main() {
/// assertify!(false);
/// # }
/// ```
///
/// Produces:
///
/// ```text
/// ---- tests::fail_simple_literal stdout ----
/// thread 'tests::fail_simple_literal' panicked at 'failed: false', src/lib.rs:131:9
/// ```
#[proc_macro_hack]
pub use assertify_proc_macros::assertify;
/// Create a test function from an expression.
///
/// `testify!` is essentially a wrapper around [`assertify!`]. It takes two
/// arguments:
///
/// 1. `name`: A name for the test (as a bareword — don’t use quotes).
/// 2. `expression`: The expression to be tested with [`assertify!`].
///
/// # Examples
///
/// The following two examples are equivalent:
///
/// ```rust
/// # #[macro_use] extern crate assertify;
/// # fn main() {}
/// testify!(add_one_two, 1 + 2 == 3);
/// ```
///
/// ```rust
/// #[test]
/// fn add_one_two() {
/// assertify!(1 + 2 == 3);
/// }
/// ```
///
/// [`assertify!`]: macro.assertify.html
pub use assertify_proc_macros::testify;
#[cfg(test)]
mod tests {
pub use super::*;
#[test]
fn trybuild_tests() {
let t = trybuild::TestCases::new();
t.compile_fail("tests/trybuild-failures/*.rs");
}
#[test]
fn assertify_simple_expr() {
assertify! | e_eq, 1 + 2 == 3);
fn add(a: i32, b: i32) -> i32 {
a + b
}
testify!(add_pos, add(1, 2) == 3);
testify!(add_neg, add(-1, 2) == 1);
testify!(add_all_expressions, add(add(1, 1), 5 - 3) == 2 + 5 - 3);
fn concat(a: &str, b: &str) -> String {
let mut s = String::with_capacity(a.len() + b.len());
s.push_str(a);
s.push_str(b);
s
}
testify!(concat_literal, concat("a", "b") == "ab");
fn concat_bytes(a: &[u8], b: &[u8]) -> Vec<u8> {
let mut v = Vec::with_capacity(a.len() + b.len());
v.extend_from_slice(a);
v.extend_from_slice(b);
v
}
testify!(concat_bytes_literals, concat_bytes(b"a", b"b") == b"ab");
fn result(good: bool) -> Result<(), &'static str> {
if good {
Ok(())
} else {
Err("bad")
}
}
testify!(literal_true, true);
testify!(boolean_logic, true && true);
testify!(result_ok, result(true) == Ok(()));
testify!(result_unwrap, result(true).unwrap() == ());
testify!(result_err, result(false) == Err("bad"));
testify!(result_not_ok, result(false) != Ok(()));
testify!(result_not_err, result(false) != Err("nope"));
// FIXME check error messages from should_panic
#[test]
#[should_panic]
fn fail_simple_eq() {
assertify!(1 + 2 == 0);
}
#[test]
#[should_panic]
fn fail_simple_literal() {
assertify!(false);
}
#[test]
#[should_panic]
fn fail_simple_ne() {
assertify!(1 + 2 != 3);
}
#[test]
#[should_panic]
fn fail_simple_gt() {
assertify!(1 + 2 > 4);
}
#[test]
#[should_panic]
fn fail_result_ok() {
assertify!(result(false).unwrap() == ());
}
}
| (1 - 2 == -1);
}
testify!(simpl |
union_ext.rs | use crate::{
JsAnyArrowFunctionParameters, JsAnyBinding, JsAnyClass, JsAnyFunction, JsAnyFunctionBody,
JsClassMemberList, JsExtendsClause, SyntaxResult, SyntaxToken, TsAnyPropertyAnnotation,
TsAnyVariableAnnotation, TsImplementsClause, TsReturnTypeAnnotation, TsTypeAnnotation,
TsTypeParameters,
};
impl JsAnyClass {
pub fn abstract_token(&self) -> Option<SyntaxToken> {
match self {
JsAnyClass::JsClassDeclaration(declaration) => declaration.abstract_token(),
JsAnyClass::JsClassExpression(_) => None,
JsAnyClass::JsClassExportDefaultDeclaration(clause) => clause.abstract_token(),
}
}
pub fn class_token(&self) -> SyntaxResult<SyntaxToken> {
match self {
JsAnyClass::JsClassDeclaration(declaration) => declaration.class_token(),
JsAnyClass::JsClassExpression(expression) => expression.class_token(),
JsAnyClass::JsClassExportDefaultDeclaration(declaration) => declaration.class_token(),
}
}
pub fn id(&self) -> SyntaxResult<Option<JsAnyBinding>> {
match self {
JsAnyClass::JsClassDeclaration(declaration) => declaration.id().map(Some),
JsAnyClass::JsClassExpression(expression) => Ok(expression.id()),
JsAnyClass::JsClassExportDefaultDeclaration(declaration) => Ok(declaration.id()),
}
}
pub fn type_parameters(&self) -> Option<TsTypeParameters> {
match self {
JsAnyClass::JsClassDeclaration(declaration) => declaration.type_parameters(),
JsAnyClass::JsClassExpression(expression) => expression.type_parameters(),
JsAnyClass::JsClassExportDefaultDeclaration(clause) => clause.type_parameters(),
}
}
pub fn extends_clause(&self) -> Option<JsExtendsClause> {
match self {
JsAnyClass::JsClassDeclaration(declaration) => declaration.extends_clause(),
JsAnyClass::JsClassExpression(expression) => expression.extends_clause(),
JsAnyClass::JsClassExportDefaultDeclaration(declaration) => {
declaration.extends_clause()
}
}
}
pub fn implements_clause(&self) -> Option<TsImplementsClause> {
match self {
JsAnyClass::JsClassDeclaration(declaration) => declaration.implements_clause(),
JsAnyClass::JsClassExpression(expression) => expression.implements_clause(),
JsAnyClass::JsClassExportDefaultDeclaration(declaration) => {
declaration.implements_clause()
}
}
}
pub fn l_curly_token(&self) -> SyntaxResult<SyntaxToken> {
match self {
JsAnyClass::JsClassDeclaration(declaration) => declaration.l_curly_token(),
JsAnyClass::JsClassExpression(expression) => expression.l_curly_token(),
JsAnyClass::JsClassExportDefaultDeclaration(declaration) => declaration.l_curly_token(),
}
}
pub fn members(&self) -> JsClassMemberList {
match self {
JsAnyClass::JsClassDeclaration(declaration) => declaration.members(),
JsAnyClass::JsClassExpression(expression) => expression.members(),
JsAnyClass::JsClassExportDefaultDeclaration(declaration) => declaration.members(),
}
}
pub fn r_curly_token(&self) -> SyntaxResult<SyntaxToken> {
match self {
JsAnyClass::JsClassDeclaration(declaration) => declaration.r_curly_token(),
JsAnyClass::JsClassExpression(expression) => expression.r_curly_token(),
JsAnyClass::JsClassExportDefaultDeclaration(declaration) => declaration.r_curly_token(),
}
}
}
impl JsAnyFunction {
pub fn async_token(&self) -> Option<SyntaxToken> {
match self {
JsAnyFunction::JsArrowFunctionExpression(expr) => expr.async_token(),
JsAnyFunction::JsFunctionExpression(expr) => expr.async_token(),
JsAnyFunction::JsFunctionDeclaration(declaration) => declaration.async_token(),
JsAnyFunction::JsFunctionExportDefaultDeclaration(declaration) => {
declaration.async_token()
}
}
}
pub fn is_async(&self) -> bool {
self.async_token().is_some()
}
pub fn function_token(&self) -> SyntaxResult<Option<SyntaxToken>> {
match self {
JsAnyFunction::JsArrowFunctionExpression(_) => Ok(None),
JsAnyFunction::JsFunctionExpression(expr) => expr.function_token().map(Some),
JsAnyFunction::JsFunctionDeclaration(declaration) => {
declaration.function_token().map(Some)
}
JsAnyFunction::JsFunctionExportDefaultDeclaration(declaration) => {
declaration.function_token().map(Some)
}
}
}
pub fn star_token(&self) -> Option<SyntaxToken> {
match self {
JsAnyFunction::JsArrowFunctionExpression(_) => None,
JsAnyFunction::JsFunctionExpression(expr) => expr.star_token(),
JsAnyFunction::JsFunctionDeclaration(declaration) => declaration.star_token(),
JsAnyFunction::JsFunctionExportDefaultDeclaration(declaration) => {
declaration.star_token()
}
}
}
pub fn is_generator(&self) -> bool {
self.star_token().is_some()
}
pub fn id(&self) -> SyntaxResult<Option<JsAnyBinding>> {
match self {
JsAnyFunction::JsArrowFunctionExpression(_) => Ok(None),
JsAnyFunction::JsFunctionExpression(expr) => Ok(expr.id()),
JsAnyFunction::JsFunctionDeclaration(declaration) => declaration.id().map(Some),
JsAnyFunction::JsFunctionExportDefaultDeclaration(declaration) => Ok(declaration.id()),
}
}
pub fn type_parameters(&self) -> Option<TsTypeParameters> {
match self {
JsAnyFunction::JsArrowFunctionExpression(expr) => expr.type_parameters(),
JsAnyFunction::JsFunctionExpression(expr) => expr.type_parameters(),
JsAnyFunction::JsFunctionDeclaration(declaration) => declaration.type_parameters(),
JsAnyFunction::JsFunctionExportDefaultDeclaration(declaration) => {
declaration.type_parameters()
}
}
}
pub fn parameters(&self) -> SyntaxResult<JsAnyArrowFunctionParameters> {
match self {
JsAnyFunction::JsArrowFunctionExpression(expr) => expr.parameters(),
JsAnyFunction::JsFunctionExpression(expr) => expr
.parameters()
.map(JsAnyArrowFunctionParameters::JsParameters),
JsAnyFunction::JsFunctionDeclaration(declaration) => declaration
.parameters()
.map(JsAnyArrowFunctionParameters::JsParameters),
JsAnyFunction::JsFunctionExportDefaultDeclaration(declaration) => declaration
.parameters()
.map(JsAnyArrowFunctionParameters::JsParameters),
}
}
pub fn return_type_annotation(&self) -> Option<TsReturnTypeAnnotation> {
match self {
JsAnyFunction::JsArrowFunctionExpression(expr) => expr.return_type_annotation(),
JsAnyFunction::JsFunctionExpression(expr) => expr.return_type_annotation(),
JsAnyFunction::JsFunctionDeclaration(declaration) => {
declaration.return_type_annotation()
}
JsAnyFunction::JsFunctionExportDefaultDeclaration(declaration) => {
declaration.return_type_annotation()
}
}
}
pub fn body(&self) -> SyntaxResult<JsAnyFunctionBody> {
match self {
JsAnyFunction::JsArrowFunctionExpression(expr) => expr.body(),
JsAnyFunction::JsFunctionExpression(expr) => {
expr.body().map(JsAnyFunctionBody::JsFunctionBody)
}
JsAnyFunction::JsFunctionDeclaration(declaration) => {
declaration.body().map(JsAnyFunctionBody::JsFunctionBody)
}
JsAnyFunction::JsFunctionExportDefaultDeclaration(declaration) => {
declaration.body().map(JsAnyFunctionBody::JsFunctionBody)
}
}
}
}
impl TsAnyVariableAnnotation {
pub fn type_annotation(&self) -> SyntaxResult<Option<TsTypeAnnotation>> {
match self {
TsAnyVariableAnnotation::TsDefiniteVariableAnnotation(definite) => {
definite.type_annotation().map(Some)
}
TsAnyVariableAnnotation::TsTypeAnnotation(type_annotation) => {
Ok(Some(type_annotation.clone()))
}
}
}
}
impl TsAnyPropertyAnnotation {
pub fn type_annotation(&self) -> SyntaxResult<Option<TsTypeAnnotation>> {
match self {
TsAnyPropertyAnnotation::TsDefinitePropertyAnnotation(definite) => {
definite.type_annotation().map(Some)
}
TsAnyPropertyAnnotation::TsOptionalPropertyAnnotation(optional) => {
Ok(optional.type_annotation())
} | }
} | TsAnyPropertyAnnotation::TsTypeAnnotation(type_annotation) => {
Ok(Some(type_annotation.clone()))
}
} |
decide.go | package decide
import (
"fmt"
"math/rand"
"strings"
"time"
"github.com/trinchan/slackbot/robots"
)
type bot struct {
Random *rand.Rand
}
func init() {
d := &bot{}
d.Random = rand.New(rand.NewSource(time.Now().UnixNano())) | }
func (d bot) Run(p *robots.Payload) (slashCommandImmediateReturn string) {
go d.DeferredAction(p)
text := strings.TrimSpace(p.Text)
if text == "" {
return "I need something to decide on!"
}
return ""
}
func (d bot) DeferredAction(p *robots.Payload) {
response := robots.IncomingWebhook{
Domain: p.TeamDomain,
Channel: p.ChannelID,
Username: "Fate Bot",
IconEmoji: ":ghost:",
UnfurlLinks: true,
Parse: robots.ParseStyleFull,
}
text := strings.TrimSpace(p.Text)
if text != "" {
split := strings.Split(text, ", ")
response.Text = fmt.Sprintf("@%s: Deciding between: (%s) -> %s", p.UserName, strings.Join(split, ", "), d.decide(split))
response.Send()
}
}
func (d bot) Description() (description string) {
return "Decides your fate!\n\tUsage: /decide Life Death ...\n\tExpected Response: Deciding on (Life, Death, ...)\n\tDecided on Life!"
}
func (d bot) decide(fates []string) (result string) {
n := len(fates)
if n > 0 {
return fates[d.Random.Intn(n)]
}
return fmt.Sprintf("Error")
} | robots.RegisterRobot("decide", d) |
functions.go | package main
import "fmt"
//lets create a function that adds two number pretty simple
func add_two(a, b int) int |
//like c or c++ value passed in a function is passed by value so to create a function which swaps value we have to pass it by reference
func my_swap(a, b *int) {
temp := *a
*a = *b
*b = temp
}
// there is somethin in go known as varidic function
// it is known so because it allows unknown ammount of variables to be passed in function
func add_n(n ...int) int {
ans := 0
for _, j := range n {
ans += j
}
return ans
}
func func_passed(passed_func func(a ...int) int, a ...int) {
fmt.Println(passed_func(a...))
}
func func_return() func(a, b int) int {
fn := func(a, b int) int {
return (a * 2) / b
}
return fn
}
func main() {
a := 1
b := 2
x := add_two(a, b)
fmt.Println(a, b, x)
my_swap(&a, &b)
fmt.Println(a, b)
fmt.Println(add_n(1, 2, 3, 4, 5, 6))
my_list := []int{2, 3, 4, 5, 6}
fmt.Println(add_n(my_list...)) // I really like this you ask why ?
// because u write function that works for solo variable and also for arrays
// this discovery made me more attracted toward go🥰😍😘
func() {
fmt.Println("hello")
}()
f1 := func() {
fmt.Println("arigatou")
}
f1()
func_passed(add_n, 1, 2, 3, 4)
fr := func_return()
fmt.Println(fr(5, 10))
}
| {
y := a + b
return y
} |
appengineserviceiampolicy.go | /*
Copyright AppsCode Inc. and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1alpha1 "kubeform.dev/provider-google-api/apis/iap/v1alpha1"
scheme "kubeform.dev/provider-google-api/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// AppEngineServiceIamPoliciesGetter has a method to return a AppEngineServiceIamPolicyInterface.
// A group's client should implement this interface.
type AppEngineServiceIamPoliciesGetter interface {
AppEngineServiceIamPolicies(namespace string) AppEngineServiceIamPolicyInterface
}
// AppEngineServiceIamPolicyInterface has methods to work with AppEngineServiceIamPolicy resources.
type AppEngineServiceIamPolicyInterface interface {
Create(ctx context.Context, appEngineServiceIamPolicy *v1alpha1.AppEngineServiceIamPolicy, opts v1.CreateOptions) (*v1alpha1.AppEngineServiceIamPolicy, error)
Update(ctx context.Context, appEngineServiceIamPolicy *v1alpha1.AppEngineServiceIamPolicy, opts v1.UpdateOptions) (*v1alpha1.AppEngineServiceIamPolicy, error)
UpdateStatus(ctx context.Context, appEngineServiceIamPolicy *v1alpha1.AppEngineServiceIamPolicy, opts v1.UpdateOptions) (*v1alpha1.AppEngineServiceIamPolicy, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.AppEngineServiceIamPolicy, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.AppEngineServiceIamPolicyList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AppEngineServiceIamPolicy, err error)
AppEngineServiceIamPolicyExpansion
}
// appEngineServiceIamPolicies implements AppEngineServiceIamPolicyInterface
type appEngineServiceIamPolicies struct {
client rest.Interface
ns string
}
// newAppEngineServiceIamPolicies returns a AppEngineServiceIamPolicies
func newAppEngineServiceIamPolicies(c *IapV1alpha1Client, namespace string) *appEngineServiceIamPolicies {
return &appEngineServiceIamPolicies{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the appEngineServiceIamPolicy, and returns the corresponding appEngineServiceIamPolicy object, and an error if there is any.
func (c *appEngineServiceIamPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AppEngineServiceIamPolicy, err error) {
result = &v1alpha1.AppEngineServiceIamPolicy{}
err = c.client.Get().
Namespace(c.ns).
Resource("appengineserviceiampolicies").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of AppEngineServiceIamPolicies that match those selectors.
func (c *appEngineServiceIamPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AppEngineServiceIamPolicyList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.AppEngineServiceIamPolicyList{}
err = c.client.Get().
Namespace(c.ns).
Resource("appengineserviceiampolicies").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested appEngineServiceIamPolicies.
func (c *appEngineServiceIamPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { | timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("appengineserviceiampolicies").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a appEngineServiceIamPolicy and creates it. Returns the server's representation of the appEngineServiceIamPolicy, and an error, if there is any.
func (c *appEngineServiceIamPolicies) Create(ctx context.Context, appEngineServiceIamPolicy *v1alpha1.AppEngineServiceIamPolicy, opts v1.CreateOptions) (result *v1alpha1.AppEngineServiceIamPolicy, err error) {
result = &v1alpha1.AppEngineServiceIamPolicy{}
err = c.client.Post().
Namespace(c.ns).
Resource("appengineserviceiampolicies").
VersionedParams(&opts, scheme.ParameterCodec).
Body(appEngineServiceIamPolicy).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a appEngineServiceIamPolicy and updates it. Returns the server's representation of the appEngineServiceIamPolicy, and an error, if there is any.
func (c *appEngineServiceIamPolicies) Update(ctx context.Context, appEngineServiceIamPolicy *v1alpha1.AppEngineServiceIamPolicy, opts v1.UpdateOptions) (result *v1alpha1.AppEngineServiceIamPolicy, err error) {
result = &v1alpha1.AppEngineServiceIamPolicy{}
err = c.client.Put().
Namespace(c.ns).
Resource("appengineserviceiampolicies").
Name(appEngineServiceIamPolicy.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(appEngineServiceIamPolicy).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *appEngineServiceIamPolicies) UpdateStatus(ctx context.Context, appEngineServiceIamPolicy *v1alpha1.AppEngineServiceIamPolicy, opts v1.UpdateOptions) (result *v1alpha1.AppEngineServiceIamPolicy, err error) {
result = &v1alpha1.AppEngineServiceIamPolicy{}
err = c.client.Put().
Namespace(c.ns).
Resource("appengineserviceiampolicies").
Name(appEngineServiceIamPolicy.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(appEngineServiceIamPolicy).
Do(ctx).
Into(result)
return
}
// Delete takes name of the appEngineServiceIamPolicy and deletes it. Returns an error if one occurs.
func (c *appEngineServiceIamPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("appengineserviceiampolicies").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *appEngineServiceIamPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("appengineserviceiampolicies").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched appEngineServiceIamPolicy.
func (c *appEngineServiceIamPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AppEngineServiceIamPolicy, err error) {
result = &v1alpha1.AppEngineServiceIamPolicy{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("appengineserviceiampolicies").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
} | var timeout time.Duration
if opts.TimeoutSeconds != nil { |
cadastro.js | export const signos = ['Leão', 'Virgem', 'Gêmeos', 'Capricórnio', 'Escorpião', 'Câncer', 'Áries', 'Sagitário', 'Touro', 'Libra', 'Peixes', 'Aquário'];
export const generos = {
feminino: "Feminino",
masculino: "Masculino",
transgenero: "Transgênero"
};
export const cadastro = {
adicionar: function (dados) {
this.pessoas.push(dados);
},
remover: function () {
this.pessoas.pop();
},
pessoas: [
{
nome: "Laila Cardoso",
cidade: "Salvador",
idade: 19,
bebidaFavorita: "Budweiser",
genero: generos.feminino,
signo: signos.virgem
},
{
nome: "Mariana Rufino",
cidade: "São Paulo",
idade: 26,
bebidaFavorita: "Stella",
genero: generos.feminino,
signo: signos.gemeos
},
{
nome: "Izabela Gonzaga",
cidade: "Palmas-TO",
idade: 24,
bebidaFavorita: "Guaraná",
genero: generos.feminino,
signo: signos.capricornio
},
{
nome: "Giu Zambot",
cidade: "Marília",
idade: 41,
bebidaFavorita: "Bohemia",
genero: generos.feminino,
signo: signos.sagitario
},
{
nome: "Cyro Renato",
cidade: "Rio de janeiro",
idade: 24,
bebidaFavorita: "Guaraná antártica",
genero: generos.masculino,
signo: signos.virgem
},
{
nome: "Laurão Ribeiro",
cidade: "Belo Horizonte",
idade: 26,
bebidaFavorita: "Original",
genero: generos.feminino,
signo: signos.escorpiao
},
{
nome: "Mayhhara Morais",
cidade: "Carpina/PE",
idade: 20,
bebidaFavorita: "Vinho",
genero: generos.feminino,
signo: signos.gemeos
},
{
nome: "Clara Vasques",
cidade: "Rio de Janeiro",
idade: 25,
bebidaFavorita: "Brahma",
genero: generos.feminino,
signo: signos.capricornio
},
{
nome: "Paloma Calado",
cidade: "Rio de Janeiro",
idade: 26,
bebidaFavorita: "Guaraná Antartica",
genero: generos.feminino,
signo: signos.aquario
},
{
nome: "Rafaela Bittencourt",
cidade: "Maringá",
idade: 23,
bebidaFavorita: "Cerveja",
genero: generos.feminino,
signo: signos.virgem
},
{
nome: "Laura Ribeiro",
cidade: "São Paulo",
idade: 32,
bebidaFavorita: "colorado",
genero: generos.feminino,
signo: signos.touro
},
{
nome: "Gabriela Pandini",
cidade: "Blumenau",
idade: 20,
bebidaFavorita: "Patagonia Weisse",
genero: generos.feminino,
signo: signos.peixes
},
{
nome: "Gabriela Moro",
cidade: "Foz do Iguacu",
idade: 24, | bebidaFavorita: "Refri",
genero: generos.feminino,
signo: signos.virgem
},
{
nome: "Eudes",
cidade: "Mariana",
idade: 24,
bebidaFavorita: "Skol Beats",
genero: generos.masculino,
signo: signos.touro
},
{
nome: "Jislane Santana",
cidade: "Campina Grande - PB",
idade: 26,
bebidaFavorita: "Cerveja",
genero: generos.feminino,
signo: signos.gemeos
},
{
nome: "Emilly Roberta",
cidade: "São Paulo ",
idade: 21,
bebidaFavorita: "leite com *****",
genero: generos.feminino,
signo: signos.leao
},
{
nome: "Úrsula Ariel",
cidade: "Macapá",
idade: 33,
bebidaFavorita: "Água",
genero: generos.transgenero,
signo: signos.aquario
},
{
nome: "Luana Kuster",
cidade: "Colatina",
idade: 23,
bebidaFavorita: "Cerveja",
genero: generos.feminino,
signo: signos.capricornio
},
{
nome: "Paloma Souza",
cidade: "Salvador",
idade: 28,
bebidaFavorita: "doBem de coco com chocolate",
genero: generos.feminino,
signo: signos.leao
}
]
};
export const contaSignos = (signo) => {
let contador = 0;
for (let i = 0; i < cadastro.length; i++){ // length retorna o número de unidades de código presentes na string.
if (signo === cadastro[i].signo) contador++;
}
return contador;
};
export const contaGeneros = (genero) => {
let contador = 0;
for (let i = 0; i < cadastro.length; i++){
if (genero === cadastro[i].genero) contador++;
}
return contador;
};
export const filtraSignos = (signo) => {
let pessoasSigno = [];
for (let i = 0; i < cadastro.length; i++){
if (signo === cadastro[i].signo){
pessoasSigno.push(cadastro[i]); //pega a ficha das pessoas do signo x e coloca numa nova array
}
}
return pessoasSigno;
};
export const filtraGeneros = (genero) => {
let pessoasGenero = [];
for (let i = 0; i < cadastro.length; i++){
if (genero === cadastro[i].genero) {
pessoasGenero.push(cadastro[i]);
}
}
return pessoasGenero;
}; | |
fieldState.test.ts | import { DataFrame, TIME_SERIES_VALUE_FIELD_NAME, FieldType } from '../types';
import { getFieldDisplayName } from './fieldState';
import { toDataFrame } from '../dataframe';
interface TitleScenario {
frames: DataFrame[];
frameIndex?: number; // assume 0
fieldIndex?: number; // assume 0
}
function checkScenario(scenario: TitleScenario): string {
const frame = scenario.frames[scenario.frameIndex ?? 0];
const field = frame.fields[scenario.fieldIndex ?? 0];
return getFieldDisplayName(field, frame, scenario.frames);
}
describe('Check field state calculations (displayName and id)', () => {
it('should use field name if no frame name', () => {
const title = checkScenario({
frames: [
toDataFrame({
fields: [{ name: 'Field 1' }],
}),
],
});
expect(title).toEqual('Field 1');
});
it('should use only field name if only one series', () => {
const title = checkScenario({
frames: [
toDataFrame({
name: 'Series A',
fields: [{ name: 'Field 1' }],
}),
],
});
expect(title).toEqual('Field 1');
});
it('should use frame name and field name if more than one frame', () => {
const title = checkScenario({
frames: [
toDataFrame({
name: 'Series A',
fields: [{ name: 'Field 1' }],
}),
toDataFrame({
name: 'Series B',
fields: [{ name: 'Field 1' }],
}),
],
});
expect(title).toEqual('Series A Field 1');
});
it('should only use label value if only one label', () => {
const title = checkScenario({
frames: [
toDataFrame({
fields: [{ name: 'Value', labels: { server: 'Server A' } }],
}),
],
});
expect(title).toEqual('Server A');
});
it('should use label value only if all series have same name', () => {
const title = checkScenario({
frames: [
toDataFrame({
name: 'cpu',
fields: [{ name: 'Value', labels: { server: 'Server A' } }],
}),
toDataFrame({ | }),
],
});
expect(title).toEqual('Server A');
});
it('should use label name and value if more than one label', () => {
const title = checkScenario({
frames: [
toDataFrame({
fields: [{ name: 'Value', labels: { server: 'Server A', mode: 'B' } }],
}),
],
});
expect(title).toEqual('{mode="B", server="Server A"}');
});
it('should use field name even when it is TIME_SERIES_VALUE_FIELD_NAME if there are no labels', () => {
const title = checkScenario({
frames: [
toDataFrame({
fields: [{ name: TIME_SERIES_VALUE_FIELD_NAME, labels: {} }],
}),
],
});
expect(title).toEqual('Value');
});
it('should use series name when field name is TIME_SERIES_VALUE_FIELD_NAME and there are no labels ', () => {
const title = checkScenario({
frames: [
toDataFrame({
name: 'Series A',
fields: [{ name: TIME_SERIES_VALUE_FIELD_NAME, labels: {} }],
}),
],
});
expect(title).toEqual('Series A');
});
it('should reder loki frames', () => {
const title = checkScenario({
frames: [
toDataFrame({
refId: 'A',
fields: [
{ name: 'time', type: FieldType.time },
{
name: 'line',
labels: { host: 'ec2-13-53-116-156.eu-north-1.compute.amazonaws.com', region: 'eu-north1' },
},
],
}),
],
fieldIndex: 1,
});
expect(title).toEqual('line {host="ec2-13-53-116-156.eu-north-1.compute.amazonaws.com", region="eu-north1"}');
});
}); | name: 'cpu',
fields: [{ name: 'Value', labels: { server: 'Server A' } }], |
is_simulation.py | '''
Importance Sampling Simulation
------------------------------
This is the main module of the PyFPT code, as it runs the simulations, post
processes and exports the data ready for plotting.
'''
from timeit import default_timer as timer
import multiprocessing as mp
from multiprocessing import Process, Queue
import numpy as np
from .multi_processing_error import multi_processing_error
from .histogram_data_truncation import histogram_data_truncation
from .save_data_to_file import save_data_to_file
from .data_points_pdf import data_points_pdf
from .importance_sampling_cython import\
importance_sampling_simulations
def is_simulation(drift, diffusion, x_in, x_end,
num_runs, bias, time_step, bins=50, min_bin_size=400,
num_sub_samples=20, estimator='lognormal',
save_data=False, t_in=0., t_f=100, x_r=None):
"""Executes the simulation runs, then returns the histogram bin centres,
heights and errors.
Parameters
----------
drift : function
The drift term of the simulated Langevin equation. Must take both x and
t as arguments in the format ``(x, t)``.
diffusion : function
The diffusion term of the simulated Langevin equation. Must take both
x and t as arguments in the format ``(x, t)``.
x_in : float
The initial position value.
x_end : float
The end position value, i.e. the threshold which defines the FPT
problem.
num_runs : int
The number of simulation runs.
bias : scalar or function
The bias used in the simulated Langevin equation to achieve importance
sampling
If a scalar (float or int), this the bias amplitude, i.e. a coefficent
which mutiplies the the diffusion to define the bias.
If a function, this simply defines the bias used. Must take arguments
for both position and time in the format ``(x, t)``.
bins : int or sequence, optional
If bins is an integer, it defines the number equal width bins for the
first-passage times. If bins is a list or numpy array, it defines the
bin edges, including the left edge of the first bin and the right edge
of the last bin. The widths can vary. Defaults to 50 evenly spaced
bins.
time_step : float or int, optional
The time step. This should be at least smaller than the standard
deviation of the FPTs.
min_bin_size : int, optional
The minimum number of runs per bin to included in the data analysis.
If a bin has less than this number, it is truncated. Defaults to 400.
estimator : string, optional
The estimator used to reconstruct the target distribution probability
density from the importance sample. If ``'lognormal'``, it assumes the
weights in each bin follow a lognomral distribution. If ``'naive'``, no
assumption is made but more runs are required for convergance.
num_sub_samples : int, optional
The number of subsamples used in jackknife estimation of the errors
used for the ``'naive'`` estimator. Defaults to 20 when ``estimator``
is ``'naive'``.
Save_data : bool, optional
If ``True``, the first-passage times and the associated weights for
each run is saved to a file.
t_in : float, optional
The initial time value of simulation Defaults to 0.
t_f : float, optional
The maxiumum FPT allowed per run. If this is exceded, the
simulation run ends and returns ``t_f``, which can then be
truncated. Defaults to 100.
x_r : float, optional
The value of the reflective boundary. Must be compatible with the x_in
and x_end chosen. Defaults to unreachable value, effectively no
boundary.
Returns
-------
bin_centres : list
The centres of the histogram bins.
heights : list
The heights of the normalised histogram bars.
errors : list
The errors in estimating the heights.
"""
# Checking drift and diffusion are of the correct format
if callable(drift) is True:
if isinstance(drift(x_in, t_in), float) is True:
pass
else:
ValueError('Provided drift is not the format (x, t)')
else:
ValueError('Provided drift is not a function')
if callable(diffusion) is True:
if isinstance(diffusion(x_in, t_in), float) is True:
pass
else:
ValueError('Provided diffusion is not the format (x, t)')
else:
ValueError('Provided diffusion is not a function')
# Make sure provided values are floats for Cython
if isinstance(x_in, int) is True:
x_in = 1.0*x_in
if isinstance(x_end, int) is True:
x_end = 1.0*x_end
# Checking bias is of correct form
if isinstance(bias, float) is True or isinstance(bias, float) is True:
# If the bias argument is a scalar, use diffusion based bias
|
elif callable(bias):
# If a function is provided, check it is of the correct form
if isinstance(bias(x_in, t_in), float) is True:
bias_type = 'custom'
else:
ValueError('bias function must be of the form bias(x, t)')
else:
ValueError('Provided bias is not a number or function')
if isinstance(time_step, float) is not True\
and isinstance(time_step, int) is not True:
raise ValueError('time_step is not a number')
# Check the user has provided a estimator
if estimator != 'lognormal' and estimator != 'naive':
print('Invalid estimator argument, defaulting to naive method')
estimator = 'naive'
# If no x_r argument is provided, default to infinite boundary
if x_r is None:
# Set the reflective surface at an arbitrarily large value in the
# opposite direction to propagation
x_r = 10000*(x_in-x_end)
elif isinstance(x_r, float) is False:
if isinstance(x_r, int) is True:
if isinstance(x_r, bool) is True:
raise ValueError('x_r is not a number')
else:
pass
else:
raise ValueError('x_r is not a number')
elif (x_r-x_in)*(x_in-x_end) < 0:
raise ValueError('End and relfective surfaces not compatible with' +
' initial value.')
# The number of sims per core, so the total is correct
num_runs_per_core = int(num_runs/mp.cpu_count())
# Time how long the simulation runs take
start = timer()
# Using multiprocessing
def multi_processing_func(x_in, x_r, x_end, t_in, t_f,
time_step, bias, num_runs, queue_efolds,
queue_ws, queue_refs):
results =\
importance_sampling_simulations(x_in, x_r, x_end, t_in,
t_f, time_step, bias,
num_runs, drift, diffusion,
bias_type=bias_type,
count_refs=False)
fpt_values = np.array(results[0][:])
ws = np.array(results[1][:])
queue_efolds.put(fpt_values)
queue_ws.put(ws)
queue_efolds = Queue()
queue_ws = Queue()
queue_refs = Queue()
cores = int(mp.cpu_count()/1)
print('Number of cores used: '+str(cores))
processes = [Process(target=multi_processing_func,
args=(x_in, x_r, x_end, t_in, t_f,
time_step, bias, num_runs_per_core,
queue_efolds, queue_ws, queue_refs))
for i in range(cores)]
for p in processes:
p.start()
# More efficient to work with numpy arrays
fpt_array = np.array([queue_efolds.get() for p in processes])
ws_array = np.array([queue_ws.get() for p in processes])
end = timer()
print(f'The simulations took: {end - start} seconds')
# Combine into columns into 1
fpt_values = fpt_array.flatten()
w_values = ws_array.flatten()
# Sort in order of increasing Ns
sort_idx = np.argsort(fpt_values)
fpt_values = fpt_values[sort_idx]
w_values = w_values[sort_idx]
# Checking if multipprocessing error occured, by looking at correlation
_ = multi_processing_error(fpt_values, w_values)
# Truncating any data which did not reach x_end
fpt_values, w_values =\
histogram_data_truncation(fpt_values, t_f, weights=w_values,
num_sub_samples=num_sub_samples)
# Saving the data
if save_data is True:
if bias_type == 'diffusion':
save_data_to_file(fpt_values, w_values, x_in, num_runs, bias)
else:
# Label the file differently if custom bias is used.
save_data_to_file(fpt_values, w_values, x_in, num_runs,
bias(x_in, 0), extra_label='_custom_bias')
# Now analysisng the data to creating the histogram/PDF data
bin_centres, heights, errors, num_runs_used, bin_edges_untruncated =\
data_points_pdf(fpt_values, w_values, estimator, bins=bins,
min_bin_size=min_bin_size,
num_sub_samples=num_sub_samples)
# Return data as lists
return bin_centres.tolist(), heights.tolist(), errors.tolist()
| bias_type = 'diffusion'
if bias == 0:
estimator = 'naive'
print('As direct simulation, defaulting to naive estimator') |
z_str.rs | //! The following is derived from Rust's
//! library/std/src/ffi/c_str.rs at revision
//! dca3f1b786efd27be3b325ed1e01e247aa589c3b.
//!
//! ZStrings are like std's CStrings except that they use `u8` instead of
//! `c_char`, so that they're not platform-dependent.
#![allow(unsafe_code)]
#![deny(unsafe_op_in_unsafe_fn)]
use super::strlen;
use crate::io;
use alloc::borrow::{Cow, ToOwned};
use alloc::boxed::Box;
use alloc::rc::Rc;
use alloc::string::String;
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::ascii;
use core::borrow::Borrow;
use core::cmp::Ordering;
use core::fmt::{self, Write};
use core::mem;
#[cfg(vec_into_raw_parts)]
use core::num::NonZeroU8;
use core::ops;
use core::ptr;
use core::slice;
#[cfg(slice_internals)]
use core::slice::memchr::memchr;
use core::str::{self, Utf8Error};
#[cfg(not(slice_internals))]
fn memchr(x: u8, text: &[u8]) -> Option<usize> {
text.iter().position(|elt| *elt == x)
}
/// A type representing an owned, C-compatible, nul-terminated string with no nul bytes in the
/// middle.
///
/// This type serves the purpose of being able to safely generate a
/// C-compatible string from a Rust byte slice or vector. An instance of this
/// type is a static guarantee that the underlying bytes contain no interior 0
/// bytes ("nul characters") and that the final byte is 0 ("nul terminator").
///
/// `ZString` is to <code>&[ZStr]</code> as [`String`] is to <code>&[str]</code>: the former
/// in each pair are owned strings; the latter are borrowed
/// references.
///
/// # Creating a `ZString`
///
/// A `ZString` is created from either a byte slice or a byte vector,
/// or anything that implements <code>[Into]<[Vec]<[u8]>></code> (for
/// example, you can build a `ZString` straight out of a [`String`] or
/// a <code>&[str]</code>, since both implement that trait).
///
/// The [`ZString::new`] method will actually check that the provided <code>&[[u8]]</code>
/// does not have 0 bytes in the middle, and return an error if it
/// finds one.
///
/// # Extracting a raw pointer to the whole C string
///
/// `ZString` implements an [`as_ptr`][`ZStr::as_ptr`] method through the [`Deref`]
/// trait. This method will give you a `*const u8` which you can
/// feed directly to extern functions that expect a nul-terminated
/// string, like C's `strdup()`. Notice that [`as_ptr`][`ZStr::as_ptr`] returns a
/// read-only pointer; if the C code writes to it, that causes
/// undefined behavior.
///
/// # Extracting a slice of the whole C string
///
/// Alternatively, you can obtain a <code>&[[u8]]</code> slice from a
/// `ZString` with the [`ZString::as_bytes`] method. Slices produced in this
/// way do *not* contain the trailing nul terminator. This is useful
/// when you will be calling an extern function that takes a `*const
/// u8` argument which is not necessarily nul-terminated, plus another
/// argument with the length of the string — like C's `strndup()`.
/// You can of course get the slice's length with its
/// [`len`][slice::len] method.
///
/// If you need a <code>&[[u8]]</code> slice *with* the nul terminator, you
/// can use [`ZString::as_bytes_with_nul`] instead.
///
/// Once you have the kind of slice you need (with or without a nul
/// terminator), you can call the slice's own
/// [`as_ptr`][slice::as_ptr] method to get a read-only raw pointer to pass to
/// extern functions. See the documentation for that function for a
/// discussion on ensuring the lifetime of the raw pointer.
///
/// [str]: prim@str "str"
/// [`Deref`]: ops::Deref
///
/// # Examples
///
/// ```ignore (extern-declaration)
/// # fn main() {
/// use std::ffi::ZString;
///
/// extern "C" {
/// fn my_printer(s: *const u8);
/// }
///
/// // We are certain that our string doesn't have 0 bytes in the middle,
/// // so we can .expect()
/// let c_to_print = ZString::new("Hello, world!").expect("ZString::new failed");
/// unsafe {
/// my_printer(c_to_print.as_ptr());
/// }
/// # }
/// ```
///
/// # Safety
///
/// `ZString` is intended for working with traditional C-style strings
/// (a sequence of non-nul bytes terminated by a single nul byte); the
/// primary use case for these kinds of strings is interoperating with C-like
/// code. Often you will need to transfer ownership to/from that external
/// code. It is strongly recommended that you thoroughly read through the
/// documentation of `ZString` before use, as improper ownership management
/// of `ZString` instances can lead to invalid memory accesses, memory leaks,
/// and other memory errors.
#[derive(PartialEq, PartialOrd, Eq, Ord, Hash, Clone)]
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
pub struct ZString {
// Invariant 1: the slice ends with a zero byte and has a length of at least one.
// Invariant 2: the slice contains only one zero byte.
// Improper usage of unsafe function can break Invariant 2, but not Invariant 1.
inner: Box<[u8]>,
}
/// Representation of a borrowed C string.
///
/// This type represents a borrowed reference to a nul-terminated
/// array of bytes. It can be constructed safely from a <code>&[[u8]]</code>
/// slice, or unsafely from a raw `*const u8`. It can then be
/// converted to a Rust <code>&[str]</code> by performing UTF-8 validation, or
/// into an owned [`ZString`].
///
/// `&ZStr` is to [`ZString`] as <code>&[str]</code> is to [`String`]: the former
/// in each pair are borrowed references; the latter are owned
/// strings.
///
/// Note that this structure is **not** `repr(C)` and is not recommended to be
/// placed in the signatures of FFI functions. Instead, safe wrappers of FFI
/// functions may leverage the unsafe [`ZStr::from_ptr`] constructor to provide
/// a safe interface to other consumers.
///
/// # Examples
///
/// Inspecting a foreign C string:
///
/// ```ignore (extern-declaration)
/// use std::ffi::ZStr;
///
/// extern "C" { fn my_string() -> *const u8; }
///
/// unsafe {
/// let slice = ZStr::from_ptr(my_string());
/// println!("string buffer size without nul terminator: {}", slice.to_bytes().len());
/// }
/// ```
///
/// Passing a Rust-originating C string:
///
/// ```ignore (extern-declaration)
/// use std::ffi::{ZString, ZStr};
///
/// fn work(data: &ZStr) {
/// extern "C" { fn work_with(data: *const u8); }
///
/// unsafe { work_with(data.as_ptr()) }
/// }
///
/// let s = ZString::new("data data data data").expect("ZString::new failed");
/// work(&s);
/// ```
///
/// Converting a foreign C string into a Rust [`String`]:
///
/// ```ignore (extern-declaration)
/// use std::ffi::ZStr;
///
/// extern "C" { fn my_string() -> *const u8; }
///
/// fn my_string_safe() -> String {
/// unsafe {
/// ZStr::from_ptr(my_string()).to_string_lossy().into_owned()
/// }
/// }
///
/// println!("string: {}", my_string_safe());
/// ```
///
/// [str]: prim@str "str"
#[derive(Hash)]
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
// FIXME:
// `fn from` in `impl From<&ZStr> for Box<ZStr>` current implementation relies
// on `ZStr` being layout-compatible with `[u8]`.
// When attribute privacy is implemented, `ZStr` should be annotated as `#[repr(transparent)]`.
// Anyway, `ZStr` representation and layout are considered implementation detail, are
// not documented and must not be relied upon.
pub struct ZStr {
// FIXME: this should not be represented with a DST slice but rather with
// just a raw `u8` along with some form of marker to make
// this an unsized type. Essentially `sizeof(&ZStr)` should be the
// same as `sizeof(&u8)` but `ZStr` should be an unsized type.
inner: [u8],
}
/// An error indicating that an interior nul byte was found.
///
/// While Rust strings may contain nul bytes in the middle, C strings
/// can't, as that byte would effectively truncate the string.
///
/// This error is created by the [`new`][`ZString::new`] method on
/// [`ZString`]. See its documentation for more.
///
/// # Examples
///
/// ```
/// use std::ffi::{ZString, NulError};
///
/// let _: NulError = ZString::new(b"f\0oo".to_vec()).unwrap_err();
/// ```
#[derive(Clone, PartialEq, Eq, Debug)]
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
pub struct NulError(usize, Vec<u8>);
/// An error indicating that a nul byte was not in the expected position.
///
/// The slice used to create a [`ZStr`] must have one and only one nul byte,
/// positioned at the end.
///
/// This error is created by the [`ZStr::from_bytes_with_nul`] method.
/// See its documentation for more.
///
/// # Examples
///
/// ```
/// use std::ffi::{ZStr, FromBytesWithNulError};
///
/// let _: FromBytesWithNulError = ZStr::from_bytes_with_nul(b"f\0oo").unwrap_err();
/// ```
#[derive(Clone, PartialEq, Eq, Debug)]
#[cfg_attr(staged_api, stable(feature = "cstr_from_bytes", since = "1.10.0"))]
pub struct FromBytesWithNulError {
kind: FromBytesWithNulErrorKind,
}
/// An error indicating that a nul byte was not in the expected position.
///
/// The vector used to create a [`ZString`] must have one and only one nul byte,
/// positioned at the end.
///
/// This error is created by the [`ZString::from_vec_with_nul`] method.
/// See its documentation for more.
///
/// # Examples
///
/// ```
/// use std::ffi::{ZString, FromVecWithNulError};
///
/// let _: FromVecWithNulError = ZString::from_vec_with_nul(b"f\0oo".to_vec()).unwrap_err();
/// ```
#[derive(Clone, PartialEq, Eq, Debug)]
#[cfg_attr(
staged_api,
stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")
)]
pub struct FromVecWithNulError {
error_kind: FromBytesWithNulErrorKind,
bytes: Vec<u8>,
}
#[derive(Clone, PartialEq, Eq, Debug)]
enum FromBytesWithNulErrorKind {
InteriorNul(usize),
NotNulTerminated,
}
impl FromBytesWithNulError {
fn interior_nul(pos: usize) -> FromBytesWithNulError {
FromBytesWithNulError {
kind: FromBytesWithNulErrorKind::InteriorNul(pos),
}
}
fn not_nul_terminated() -> FromBytesWithNulError {
FromBytesWithNulError {
kind: FromBytesWithNulErrorKind::NotNulTerminated,
}
}
}
#[cfg_attr(
staged_api,
stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")
)]
impl FromVecWithNulError {
/// Returns a slice of [`u8`]s bytes that were attempted to convert to a [`ZString`].
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::ffi::ZString;
///
/// // Some invalid bytes in a vector
/// let bytes = b"f\0oo".to_vec();
///
/// let value = ZString::from_vec_with_nul(bytes.clone());
///
/// assert_eq!(&bytes[..], value.unwrap_err().as_bytes());
/// ```
#[must_use]
#[cfg_attr(
staged_api,
stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")
)]
pub fn as_bytes(&self) -> &[u8] {
&self.bytes[..]
}
/// Returns the bytes that were attempted to convert to a [`ZString`].
///
/// This method is carefully constructed to avoid allocation. It will
/// consume the error, moving out the bytes, so that a copy of the bytes
/// does not need to be made.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::ffi::ZString;
///
/// // Some invalid bytes in a vector
/// let bytes = b"f\0oo".to_vec();
///
/// let value = ZString::from_vec_with_nul(bytes.clone());
///
/// assert_eq!(bytes, value.unwrap_err().into_bytes());
/// ```
#[must_use = "`self` will be dropped if the result is not used"]
#[cfg_attr(
staged_api,
stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")
)]
pub fn into_bytes(self) -> Vec<u8> {
self.bytes
}
}
/// An error indicating invalid UTF-8 when converting a [`ZString`] into a [`String`].
///
/// `ZString` is just a wrapper over a buffer of bytes with a nul terminator;
/// [`ZString::into_string`] performs UTF-8 validation on those bytes and may
/// return this error.
///
/// This `struct` is created by [`ZString::into_string()`]. See
/// its documentation for more.
#[derive(Clone, PartialEq, Eq, Debug)]
#[cfg_attr(staged_api, stable(feature = "cstring_into", since = "1.7.0"))]
pub struct IntoStringError {
inner: ZString,
error: Utf8Error,
}
impl ZString {
/// Creates a new C-compatible string from a container of bytes.
///
/// This function will consume the provided data and use the
/// underlying bytes to construct a new string, ensuring that
/// there is a trailing 0 byte. This trailing 0 byte will be
/// appended by this function; the provided data should *not*
/// contain any 0 bytes in it.
///
/// # Examples
///
/// ```ignore (extern-declaration)
/// use std::ffi::ZString;
///
/// extern "C" { fn puts(s: *const u8); }
///
/// let to_print = ZString::new("Hello!").expect("ZString::new failed");
/// unsafe {
/// puts(to_print.as_ptr());
/// }
/// ```
///
/// # Errors
///
/// This function will return an error if the supplied bytes contain an
/// internal 0 byte. The [`NulError`] returned will contain the bytes as well as
/// the position of the nul byte.
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
pub fn new<T: Into<Vec<u8>>>(t: T) -> Result<ZString, NulError> {
trait SpecIntoVec {
fn into_vec(self) -> Vec<u8>;
}
#[cfg(not(specialization))]
impl<T: Into<Vec<u8>>> SpecIntoVec for T {
fn into_vec(self) -> Vec<u8> {
self.into()
}
}
#[cfg(specialization)]
impl<T: Into<Vec<u8>>> SpecIntoVec for T {
default fn into_vec(self) -> Vec<u8> {
self.into()
}
}
// Specialization for avoiding reallocation.
#[cfg(specialization)]
impl SpecIntoVec for &'_ [u8] {
fn into_vec(self) -> Vec<u8> {
let mut v = Vec::with_capacity(self.len() + 1);
v.extend(self);
v
}
}
#[cfg(specialization)]
impl SpecIntoVec for &'_ str {
fn into_vec(self) -> Vec<u8> {
let mut v = Vec::with_capacity(self.len() + 1);
v.extend(self.as_bytes());
v
}
}
Self::_new(SpecIntoVec::into_vec(t))
}
fn _new(bytes: Vec<u8>) -> Result<ZString, NulError> {
match memchr(b'\0', &bytes) {
Some(i) => Err(NulError(i, bytes)),
None => Ok(unsafe { ZString::from_vec_unchecked(bytes) }),
}
}
/// Creates a C-compatible string by consuming a byte vector,
/// without checking for interior 0 bytes.
///
/// Trailing 0 byte will be appended by this function.
///
/// This method is equivalent to [`ZString::new`] except that no runtime
/// assertion is made that `v` contains no 0 bytes, and it requires an
/// actual byte vector, not anything that can be converted to one with Into.
///
/// # Examples
///
/// ```
/// use std::ffi::ZString;
///
/// let raw = b"foo".to_vec();
/// unsafe {
/// let c_string = ZString::from_vec_unchecked(raw);
/// }
/// ```
#[must_use]
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
pub unsafe fn from_vec_unchecked(mut v: Vec<u8>) -> ZString {
v.reserve_exact(1);
v.push(b'\0');
ZString {
inner: v.into_boxed_slice(),
}
}
/// Retakes ownership of a `ZString` that was transferred to C via
/// [`ZString::into_raw`].
///
/// Additionally, the length of the string will be recalculated from the pointer.
///
/// # Safety
///
/// This should only ever be called with a pointer that was earlier
/// obtained by calling [`ZString::into_raw`]. Other usage (e.g., trying to take
/// ownership of a string that was allocated by foreign code) is likely to lead
/// to undefined behavior or allocator corruption.
///
/// It should be noted that the length isn't just "recomputed," but that
/// the recomputed length must match the original length from the
/// [`ZString::into_raw`] call. This means the [`ZString::into_raw`]/`from_raw`
/// methods should not be used when passing the string to C functions that can
/// modify the string's length.
///
/// > **Note:** If you need to borrow a string that was allocated by
/// > foreign code, use [`ZStr`]. If you need to take ownership of
/// > a string that was allocated by foreign code, you will need to
/// > make your own provisions for freeing it appropriately, likely
/// > with the foreign code's API to do that.
///
/// # Examples
///
/// Creates a `ZString`, pass ownership to an `extern` function (via raw pointer), then retake
/// ownership with `from_raw`:
///
/// ```ignore (extern-declaration)
/// use std::ffi::ZString;
///
/// extern "C" {
/// fn some_extern_function(s: *mut u8);
/// }
///
/// let c_string = ZString::new("Hello!").expect("ZString::new failed");
/// let raw = c_string.into_raw();
/// unsafe {
/// some_extern_function(raw);
/// let c_string = ZString::from_raw(raw);
/// }
/// ```
#[must_use = "call `drop(from_raw(ptr))` if you intend to drop the `ZString`"]
#[cfg_attr(staged_api, stable(feature = "cstr_memory", since = "1.4.0"))]
pub unsafe fn from_raw(ptr: *mut u8) -> ZString {
// SAFETY: This is called with a pointer that was obtained from a call
// to `ZString::into_raw` and the length has not been modified. As such,
// we know there is a NUL byte (and only one) at the end and that the
// information about the size of the allocation is correct on Rust's
// side.
unsafe {
let len = strlen(ptr) + 1; // Including the NUL byte
let slice = slice::from_raw_parts_mut(ptr, len as usize);
ZString {
inner: Box::from_raw(slice as *mut [u8]),
}
}
}
/// Consumes the `ZString` and transfers ownership of the string to a C caller.
///
/// The pointer which this function returns must be returned to Rust and reconstituted using
/// [`ZString::from_raw`] to be properly deallocated. Specifically, one
/// should *not* use the standard C `free()` function to deallocate
/// this string.
///
/// Failure to call [`ZString::from_raw`] will lead to a memory leak.
///
/// The C side must **not** modify the length of the string (by writing a
/// `null` somewhere inside the string or removing the final one) before
/// it makes it back into Rust using [`ZString::from_raw`]. See the safety section
/// in [`ZString::from_raw`].
///
/// # Examples
///
/// ```
/// use std::ffi::ZString;
///
/// let c_string = ZString::new("foo").expect("ZString::new failed");
///
/// let ptr = c_string.into_raw();
///
/// unsafe {
/// assert_eq!(b'f', *ptr as u8);
/// assert_eq!(b'o', *ptr.offset(1) as u8);
/// assert_eq!(b'o', *ptr.offset(2) as u8);
/// assert_eq!(b'\0', *ptr.offset(3) as u8);
///
/// // retake pointer to free memory
/// let _ = ZString::from_raw(ptr);
/// }
/// ```
#[inline]
#[must_use = "`self` will be dropped if the result is not used"]
#[cfg_attr(staged_api, stable(feature = "cstr_memory", since = "1.4.0"))]
pub fn into_raw(self) -> *mut u8 {
Box::into_raw(self.into_inner()) as *mut u8
}
/// Converts the `ZString` into a [`String`] if it contains valid UTF-8 data.
///
/// On failure, ownership of the original `ZString` is returned.
///
/// # Examples
///
/// ```
/// use std::ffi::ZString;
///
/// let valid_utf8 = vec![b'f', b'o', b'o'];
/// let cstring = ZString::new(valid_utf8).expect("ZString::new failed");
/// assert_eq!(cstring.into_string().expect("into_string() call failed"), "foo");
///
/// let invalid_utf8 = vec![b'f', 0xff, b'o', b'o'];
/// let cstring = ZString::new(invalid_utf8).expect("ZString::new failed");
/// let err = cstring.into_string().err().expect("into_string().err() failed");
/// assert_eq!(err.utf8_error().valid_up_to(), 1);
/// ```
#[cfg_attr(staged_api, stable(feature = "cstring_into", since = "1.7.0"))]
pub fn into_string(self) -> Result<String, IntoStringError> {
String::from_utf8(self.into_bytes()).map_err(|e| IntoStringError {
error: e.utf8_error(),
inner: unsafe { ZString::from_vec_unchecked(e.into_bytes()) },
})
}
/// Consumes the `ZString` and returns the underlying byte buffer.
///
/// The returned buffer does **not** contain the trailing nul
/// terminator, and it is guaranteed to not have any interior nul
/// bytes.
///
/// # Examples
///
/// ```
/// use std::ffi::ZString;
///
/// let c_string = ZString::new("foo").expect("ZString::new failed");
/// let bytes = c_string.into_bytes();
/// assert_eq!(bytes, vec![b'f', b'o', b'o']);
/// ```
#[must_use = "`self` will be dropped if the result is not used"]
#[cfg_attr(staged_api, stable(feature = "cstring_into", since = "1.7.0"))]
pub fn into_bytes(self) -> Vec<u8> {
let mut vec = self.into_inner().into_vec();
let _nul = vec.pop();
debug_assert_eq!(_nul, Some(0u8));
vec
}
/// Equivalent to [`ZString::into_bytes()`] except that the
/// returned vector includes the trailing nul terminator.
///
/// # Examples
///
/// ```
/// use std::ffi::ZString;
///
/// let c_string = ZString::new("foo").expect("ZString::new failed");
/// let bytes = c_string.into_bytes_with_nul();
/// assert_eq!(bytes, vec![b'f', b'o', b'o', b'\0']);
/// ```
#[must_use = "`self` will be dropped if the result is not used"]
#[cfg_attr(staged_api, stable(feature = "cstring_into", since = "1.7.0"))]
pub fn into_bytes_with_nul(self) -> Vec<u8> {
self.into_inner().into_vec()
}
/// Returns the contents of this `ZString` as a slice of bytes.
///
/// The returned slice does **not** contain the trailing nul
/// terminator, and it is guaranteed to not have any interior nul
/// bytes. If you need the nul terminator, use
/// [`ZString::as_bytes_with_nul`] instead.
///
/// # Examples
///
/// ```
/// use std::ffi::ZString;
///
/// let c_string = ZString::new("foo").expect("ZString::new failed");
/// let bytes = c_string.as_bytes();
/// assert_eq!(bytes, &[b'f', b'o', b'o']);
/// ```
#[inline]
#[must_use]
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
pub fn as_bytes(&self) -> &[u8] {
// SAFETY: ZString has a length at least 1
unsafe { self.inner.get_unchecked(..self.inner.len() - 1) }
}
/// Equivalent to [`ZString::as_bytes()`] except that the
/// returned slice includes the trailing nul terminator.
///
/// # Examples
///
/// ```
/// use std::ffi::ZString;
///
/// let c_string = ZString::new("foo").expect("ZString::new failed");
/// let bytes = c_string.as_bytes_with_nul();
/// assert_eq!(bytes, &[b'f', b'o', b'o', b'\0']);
/// ```
#[inline]
#[must_use]
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
pub fn as_bytes_with_nul(&self) -> &[u8] {
&self.inner
}
/// Extracts a [`ZStr`] slice containing the entire string.
///
/// # Examples
///
/// ```
/// use rustix::ffi::{ZString, ZStr};
///
/// let z_string = ZString::new(b"foo".to_vec()).expect("ZString::new failed");
/// let zstr = z_string.as_z_str();
/// assert_eq!(zstr,
/// ZStr::from_bytes_with_nul(b"foo\0").expect("ZStr::from_bytes_with_nul failed"));
/// ```
#[inline]
#[must_use]
#[cfg_attr(staged_api, stable(feature = "as_c_str", since = "1.20.0"))]
pub fn as_z_str(&self) -> &ZStr {
&*self
}
/// Extracts a [`CStr`] slice containing the entire string.
///
/// # Examples
///
/// ```
/// use std::ffi::CStr;
/// use rustix::ffi::{ZString, ZStr};
///
/// let z_string = ZString::new(b"foo".to_vec()).expect("ZString::new failed");
/// let cstr = z_string.as_c_str();
/// assert_eq!(cstr,
/// CStr::from_bytes_with_nul(b"foo\0").expect("CStr::from_bytes_with_nul failed"));
/// ```
#[cfg(not(feature = "rustc-dep-of-std"))]
#[inline]
#[must_use]
#[cfg_attr(staged_api, stable(feature = "as_c_str", since = "1.20.0"))]
pub fn as_c_str(&self) -> &ZStr {
self.as_z_str()
}
/// Converts this `ZString` into a boxed [`ZStr`].
///
/// # Examples
///
/// ```
/// use rustix::ffi::{ZString, ZStr};
///
/// let z_string = ZString::new(b"foo".to_vec()).expect("ZString::new failed");
/// let boxed = c_string.into_boxed_z_str();
/// assert_eq!(&*boxed,
/// ZStr::from_bytes_with_nul(b"foo\0").expect("ZStr::from_bytes_with_nul failed"));
/// ```
#[must_use = "`self` will be dropped if the result is not used"]
#[cfg_attr(staged_api, stable(feature = "into_boxed_c_str", since = "1.20.0"))]
pub fn into_boxed_z_str(self) -> Box<ZStr> {
unsafe { Box::from_raw(Box::into_raw(self.into_inner()) as *mut ZStr) }
}
/// Converts this `ZString` into a boxed [`CStr`].
///
/// # Examples
///
/// ```
/// use std::ffi::ZStr;
/// use rustix::ffi::{ZString, ZStr};
///
/// let z_string = ZString::new(b"foo".to_vec()).expect("ZString::new failed");
/// let boxed = z_string.into_boxed_c_str();
/// assert_eq!(&*boxed,
/// CStr::from_bytes_with_nul(b"foo\0").expect("CStr::from_bytes_with_nul failed"));
/// ```
#[cfg(feature = "std")]
#[must_use = "`self` will be dropped if the result is not used"]
#[cfg_attr(staged_api, stable(feature = "into_boxed_c_str", since = "1.20.0"))]
pub fn into_boxed_c_str(self) -> Box<CStr> {
self.into_boxed_z_str()
}
/// Bypass "move out of struct which implements [`Drop`] trait" restriction.
#[inline]
fn into_inner(self) -> Box<[u8]> {
// Rationale: `mem::forget(self)` invalidates the previous call to `ptr::read(&self.inner)`
// so we use `ManuallyDrop` to ensure `self` is not dropped.
// Then we can return the box directly without invalidating it.
// See https://github.com/rust-lang/rust/issues/62553.
let this = mem::ManuallyDrop::new(self);
unsafe { ptr::read(&this.inner) }
}
/// Converts a <code>[Vec]<[u8]></code> to a [`ZString`] without checking the
/// invariants on the given [`Vec`].
///
/// # Safety
///
/// The given [`Vec`] **must** have one nul byte as its last element.
/// This means it cannot be empty nor have any other nul byte anywhere else.
///
/// # Example
///
/// ```
/// use std::ffi::ZString;
/// assert_eq!(
/// unsafe { ZString::from_vec_with_nul_unchecked(b"abc\0".to_vec()) },
/// unsafe { ZString::from_vec_unchecked(b"abc".to_vec()) }
/// );
/// ```
#[must_use]
#[cfg_attr(
staged_api,
stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")
)]
pub unsafe fn from_vec_with_nul_unchecked(v: Vec<u8>) -> Self {
Self {
inner: v.into_boxed_slice(),
}
}
/// Attempts to converts a <code>[Vec]<[u8]></code> to a [`ZString`].
///
/// Runtime checks are present to ensure there is only one nul byte in the
/// [`Vec`], its last element.
///
/// # Errors
///
/// If a nul byte is present and not the last element or no nul bytes
/// is present, an error will be returned.
///
/// # Examples
///
/// A successful conversion will produce the same result as [`ZString::new`]
/// when called without the ending nul byte.
///
/// ```
/// use std::ffi::ZString;
/// assert_eq!(
/// ZString::from_vec_with_nul(b"abc\0".to_vec())
/// .expect("ZString::from_vec_with_nul failed"),
/// ZString::new(b"abc".to_vec()).expect("ZString::new failed")
/// );
/// ```
///
/// An incorrectly formatted [`Vec`] will produce an error.
///
/// ```
/// use std::ffi::{ZString, FromVecWithNulError};
/// // Interior nul byte
/// let _: FromVecWithNulError = ZString::from_vec_with_nul(b"a\0bc".to_vec()).unwrap_err();
/// // No nul byte
/// let _: FromVecWithNulError = ZString::from_vec_with_nul(b"abc".to_vec()).unwrap_err();
/// ```
#[cfg_attr(
staged_api,
stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")
)]
pub fn from_vec_with_nul(v: Vec<u8>) -> Result<Self, FromVecWithNulError> {
let nul_pos = memchr(b'\0', &v);
match nul_pos {
Some(nul_pos) if nul_pos + 1 == v.len() => {
// SAFETY: We know there is only one nul byte, at the end
// of the vec.
Ok(unsafe { Self::from_vec_with_nul_unchecked(v) })
}
Some(nul_pos) => Err(FromVecWithNulError {
error_kind: FromBytesWithNulErrorKind::InteriorNul(nul_pos),
bytes: v,
}),
None => Err(FromVecWithNulError {
error_kind: FromBytesWithNulErrorKind::NotNulTerminated,
bytes: v,
}),
}
}
}
// Turns this `ZString` into an empty string to prevent
// memory-unsafe code from working by accident. Inline
// to prevent LLVM from optimizing it away in debug builds.
#[cfg_attr(staged_api, stable(feature = "cstring_drop", since = "1.13.0"))]
impl Drop for ZString {
#[inline]
fn drop(&mut self) {
unsafe {
*self.inner.get_unchecked_mut(0) = 0;
}
}
}
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
impl ops::Deref for ZString {
type Target = ZStr;
#[inline]
fn deref(&self) -> &ZStr {
unsafe { ZStr::from_bytes_with_nul_unchecked(self.as_bytes_with_nul()) }
}
}
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
impl fmt::Debug for ZString {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
#[cfg_attr(staged_api, stable(feature = "cstring_into", since = "1.7.0"))]
impl From<ZString> for Vec<u8> {
/// Converts a [`ZString`] into a <code>[Vec]<[u8]></code>.
///
/// The conversion consumes the [`ZString`], and removes the terminating NUL byte.
#[inline]
fn from(s: ZString) -> Vec<u8> {
s.into_bytes()
}
}
#[cfg_attr(staged_api, stable(feature = "cstr_debug", since = "1.3.0"))]
impl fmt::Debug for ZStr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "\"")?;
for byte in self
.to_bytes()
.iter()
.flat_map(|&b| ascii::escape_default(b))
{
f.write_char(byte as char)?;
}
write!(f, "\"")
}
}
#[cfg_attr(staged_api, stable(feature = "cstr_default", since = "1.10.0"))]
impl Default for &ZStr {
fn default() -> Self {
const SLICE: &[u8] = &[0];
unsafe { ZStr::from_ptr(SLICE.as_ptr()) }
}
}
#[cfg_attr(staged_api, stable(feature = "cstr_default", since = "1.10.0"))]
impl Default for ZString {
/// Creates an empty `ZString`.
fn default() -> ZString {
let a: &ZStr = Default::default();
a.to_owned()
}
}
#[cfg_attr(staged_api, stable(feature = "cstr_borrow", since = "1.3.0"))]
impl Borrow<ZStr> for ZString {
#[inline]
fn borrow(&self) -> &ZStr {
self
}
}
#[cfg_attr(
staged_api,
stable(feature = "cstring_from_cow_cstr", since = "1.28.0")
)]
impl<'a> From<Cow<'a, ZStr>> for ZString {
#[inline]
fn from(s: Cow<'a, ZStr>) -> Self {
s.into_owned()
}
}
#[cfg_attr(staged_api, stable(feature = "box_from_c_str", since = "1.17.0"))]
impl From<&ZStr> for Box<ZStr> {
fn from(s: &ZStr) -> Box<ZStr> {
let boxed: Box<[u8]> = Box::from(s.to_bytes_with_nul());
unsafe { Box::from_raw(Box::into_raw(boxed) as *mut ZStr) }
}
}
#[cfg_attr(staged_api, stable(feature = "box_from_cow", since = "1.45.0"))]
impl From<Cow<'_, ZStr>> for Box<ZStr> {
#[inline]
fn from(cow: Cow<'_, ZStr>) -> Box<ZStr> {
match cow {
Cow::Borrowed(s) => Box::from(s),
Cow::Owned(s) => Box::from(s),
}
}
}
#[cfg_attr(staged_api, stable(feature = "c_string_from_box", since = "1.18.0"))]
impl From<Box<ZStr>> for ZString {
/// Converts a <code>[Box]<[ZStr]></code> into a [`ZString`] without copying or allocating.
#[inline]
fn from(s: Box<ZStr>) -> ZString {
s.into_z_string()
}
}
#[cfg(vec_into_raw_parts)]
#[cfg_attr(
staged_api,
stable(feature = "cstring_from_vec_of_nonzerou8", since = "1.43.0")
)]
impl From<Vec<NonZeroU8>> for ZString {
/// Converts a <code>[Vec]<[NonZeroU8]></code> into a [`ZString`] without
/// copying nor checking for inner null bytes.
#[inline]
fn from(v: Vec<NonZeroU8>) -> ZString {
unsafe {
// Transmute `Vec<NonZeroU8>` to `Vec<u8>`.
let v: Vec<u8> = {
// SAFETY:
// - transmuting between `NonZeroU8` and `u8` is sound;
// - `alloc::Layout<NonZeroU8> == alloc::Layout<u8>`.
let (ptr, len, cap): (*mut NonZeroU8, _, _) = Vec::into_raw_parts(v);
Vec::from_raw_parts(ptr.cast::<u8>(), len, cap)
};
// SAFETY: `v` cannot contain null bytes, given the type-level
// invariant of `NonZeroU8`.
ZString::from_vec_unchecked(v)
}
}
}
#[cfg_attr(staged_api, stable(feature = "more_box_slice_clone", since = "1.29.0"))]
impl Clone for Box<ZStr> {
#[inline]
fn clone(&self) -> Self {
(**self).into()
}
}
#[cfg_attr(staged_api, stable(feature = "box_from_c_string", since = "1.20.0"))]
impl From<ZString> for Box<ZStr> {
/// Converts a [`ZString`] into a <code>[Box]<[ZStr]></code> without copying or allocating.
#[inline]
fn from(s: ZString) -> Box<ZStr> {
s.into_boxed_z_str()
}
}
#[cfg_attr(staged_api, stable(feature = "cow_from_cstr", since = "1.28.0"))]
impl<'a> From<ZString> for Cow<'a, ZStr> {
/// Converts a [`ZString`] into an owned [`Cow`] without copying or allocating.
#[inline]
fn from(s: ZString) -> Cow<'a, ZStr> {
Cow::Owned(s)
}
}
#[cfg_attr(staged_api, stable(feature = "cow_from_cstr", since = "1.28.0"))]
impl<'a> From<&'a ZStr> for Cow<'a, ZStr> {
/// Converts a [`ZStr`] into a borrowed [`Cow`] without copying or allocating.
#[inline]
fn from(s: &'a ZStr) -> Cow<'a, ZStr> {
Cow::Borrowed(s)
}
}
#[cfg_attr(staged_api, stable(feature = "cow_from_cstr", since = "1.28.0"))]
impl<'a> From<&'a ZString> for Cow<'a, ZStr> {
/// Converts a `&`[`ZString`] into a borrowed [`Cow`] without copying or allocating.
#[inline]
fn from(s: &'a ZString) -> Cow<'a, ZStr> {
|
#[cfg_attr(staged_api, stable(feature = "shared_from_slice2", since = "1.24.0"))]
impl From<ZString> for Arc<ZStr> {
/// Converts a [`ZString`] into an <code>[Arc]<[ZStr]></code> without copying or allocating.
#[inline]
fn from(s: ZString) -> Arc<ZStr> {
let arc: Arc<[u8]> = Arc::from(s.into_inner());
unsafe { Arc::from_raw(Arc::into_raw(arc) as *const ZStr) }
}
}
#[cfg_attr(staged_api, stable(feature = "shared_from_slice2", since = "1.24.0"))]
impl From<&ZStr> for Arc<ZStr> {
#[inline]
fn from(s: &ZStr) -> Arc<ZStr> {
let arc: Arc<[u8]> = Arc::from(s.to_bytes_with_nul());
unsafe { Arc::from_raw(Arc::into_raw(arc) as *const ZStr) }
}
}
#[cfg_attr(staged_api, stable(feature = "shared_from_slice2", since = "1.24.0"))]
impl From<ZString> for Rc<ZStr> {
/// Converts a [`ZString`] into an <code>[Rc]<[ZStr]></code> without copying or allocating.
#[inline]
fn from(s: ZString) -> Rc<ZStr> {
let rc: Rc<[u8]> = Rc::from(s.into_inner());
unsafe { Rc::from_raw(Rc::into_raw(rc) as *const ZStr) }
}
}
#[cfg_attr(staged_api, stable(feature = "shared_from_slice2", since = "1.24.0"))]
impl From<&ZStr> for Rc<ZStr> {
#[inline]
fn from(s: &ZStr) -> Rc<ZStr> {
let rc: Rc<[u8]> = Rc::from(s.to_bytes_with_nul());
unsafe { Rc::from_raw(Rc::into_raw(rc) as *const ZStr) }
}
}
#[cfg_attr(staged_api, stable(feature = "default_box_extra", since = "1.17.0"))]
impl Default for Box<ZStr> {
fn default() -> Box<ZStr> {
let boxed: Box<[u8]> = Box::from([0]);
unsafe { Box::from_raw(Box::into_raw(boxed) as *mut ZStr) }
}
}
impl NulError {
/// Returns the position of the nul byte in the slice that caused
/// [`ZString::new`] to fail.
///
/// # Examples
///
/// ```
/// use std::ffi::ZString;
///
/// let nul_error = ZString::new("foo\0bar").unwrap_err();
/// assert_eq!(nul_error.nul_position(), 3);
///
/// let nul_error = ZString::new("foo bar\0").unwrap_err();
/// assert_eq!(nul_error.nul_position(), 7);
/// ```
#[must_use]
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
pub fn nul_position(&self) -> usize {
self.0
}
/// Consumes this error, returning the underlying vector of bytes which
/// generated the error in the first place.
///
/// # Examples
///
/// ```
/// use std::ffi::ZString;
///
/// let nul_error = ZString::new("foo\0bar").unwrap_err();
/// assert_eq!(nul_error.into_vec(), b"foo\0bar");
/// ```
#[must_use = "`self` will be dropped if the result is not used"]
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
pub fn into_vec(self) -> Vec<u8> {
self.1
}
}
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
impl NulError {
/// ```rust
/// if let Err(e) = "xc".parse::<u32>() {
/// Print `e` itself, no need for description().
/// eprintln!("Error: {}", e);
/// }
/// ```
#[allow(deprecated)]
pub fn description(&self) -> &str {
"nul byte found in data"
}
}
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
impl fmt::Display for NulError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "nul byte found in provided data at position: {}", self.0)
}
}
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
impl From<NulError> for io::Error {
/// Converts a [`NulError`] into a [`io::Error`].
fn from(_: NulError) -> io::Error {
io::Error::INVAL
}
}
#[cfg_attr(
staged_api,
stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")
)]
impl FromBytesWithNulError {
/// ```rust
/// if let Err(e) = "xc".parse::<u32>() {
/// Print `e` itself, no need for description().
/// eprintln!("Error: {}", e);
/// }
/// ```
#[allow(deprecated)]
pub fn description(&self) -> &str {
match self.kind {
FromBytesWithNulErrorKind::InteriorNul(..) => {
"data provided contains an interior nul byte"
}
FromBytesWithNulErrorKind::NotNulTerminated => "data provided is not nul terminated",
}
}
}
#[cfg_attr(
staged_api,
stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")
)]
impl fmt::Display for FromBytesWithNulError {
#[allow(deprecated, deprecated_in_future)]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.description())?;
if let FromBytesWithNulErrorKind::InteriorNul(pos) = self.kind {
write!(f, " at byte pos {}", pos)?;
}
Ok(())
}
}
#[cfg_attr(
staged_api,
stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")
)]
impl fmt::Display for FromVecWithNulError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.error_kind {
FromBytesWithNulErrorKind::InteriorNul(pos) => {
write!(
f,
"data provided contains an interior nul byte at pos {}",
pos
)
}
FromBytesWithNulErrorKind::NotNulTerminated => {
write!(f, "data provided is not nul terminated")
}
}
}
}
impl IntoStringError {
/// Consumes this error, returning original [`ZString`] which generated the
/// error.
#[must_use = "`self` will be dropped if the result is not used"]
#[cfg_attr(staged_api, stable(feature = "cstring_into", since = "1.7.0"))]
pub fn into_zstring(self) -> ZString {
self.inner
}
/// Consumes this error, returning original [`CString`] which generated the
/// error.
#[cfg(feature = "std")]
#[must_use = "`self` will be dropped if the result is not used"]
#[cfg_attr(staged_api, stable(feature = "cstring_into", since = "1.7.0"))]
pub fn into_cstring(self) -> CString {
self.into_zstring()
}
/// Access the underlying UTF-8 error that was the cause of this error.
#[must_use]
#[cfg_attr(staged_api, stable(feature = "cstring_into", since = "1.7.0"))]
pub fn utf8_error(&self) -> Utf8Error {
self.error
}
}
#[cfg_attr(staged_api, stable(feature = "cstring_into", since = "1.7.0"))]
impl IntoStringError {
#[allow(deprecated)]
pub fn description(&self) -> &str {
"C string contained non-utf8 bytes"
}
/* TODO
pub fn source(&self) -> Option<&(dyn Error + 'static)> {
Some(&self.error)
}
*/
}
#[cfg_attr(staged_api, stable(feature = "cstring_into", since = "1.7.0"))]
impl fmt::Display for IntoStringError {
#[allow(deprecated, deprecated_in_future)]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.description().fmt(f)
}
}
impl ZStr {
/// Wraps a raw C string with a safe C string wrapper.
///
/// This function will wrap the provided `ptr` with a `ZStr` wrapper, which
/// allows inspection and interoperation of non-owned C strings. The total
/// size of the raw C string must be smaller than `isize::MAX` **bytes**
/// in memory due to calling the `slice::from_raw_parts` function.
/// This method is unsafe for a number of reasons:
///
/// * There is no guarantee to the validity of `ptr`.
/// * The returned lifetime is not guaranteed to be the actual lifetime of
/// `ptr`.
/// * There is no guarantee that the memory pointed to by `ptr` contains a
/// valid nul terminator byte at the end of the string.
/// * It is not guaranteed that the memory pointed by `ptr` won't change
/// before the `ZStr` has been destroyed.
///
/// > **Note**: This operation is intended to be a 0-cost cast but it is
/// > currently implemented with an up-front calculation of the length of
/// > the string. This is not guaranteed to always be the case.
///
/// # Examples
///
/// ```ignore (extern-declaration)
/// # fn main() {
/// use std::ffi::ZStr;
///
/// extern "C" {
/// fn my_string() -> *const u8;
/// }
///
/// unsafe {
/// let slice = ZStr::from_ptr(my_string());
/// println!("string returned: {}", slice.to_str().unwrap());
/// }
/// # }
/// ```
#[inline]
#[must_use]
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
pub unsafe fn from_ptr<'a>(ptr: *const u8) -> &'a ZStr {
// SAFETY: The caller has provided a pointer that points to a valid C
// string with a NUL terminator of size less than `isize::MAX`, whose
// content remain valid and doesn't change for the lifetime of the
// returned `ZStr`.
//
// Thus computing the length is fine (a NUL byte exists), the call to
// from_raw_parts is safe because we know the length is at most `isize::MAX`, meaning
// the call to `from_bytes_with_nul_unchecked` is correct.
//
// The cast from u8 to u8 is ok because a u8 is always one byte.
unsafe {
let len = strlen(ptr);
ZStr::from_bytes_with_nul_unchecked(slice::from_raw_parts(ptr, len as usize + 1))
}
}
/// Creates a C string wrapper from a byte slice.
///
/// This function will cast the provided `bytes` to a `ZStr`
/// wrapper after ensuring that the byte slice is nul-terminated
/// and does not contain any interior nul bytes.
///
/// # Examples
///
/// ```
/// use std::ffi::ZStr;
///
/// let cstr = ZStr::from_bytes_with_nul(b"hello\0");
/// assert!(cstr.is_ok());
/// ```
///
/// Creating a `ZStr` without a trailing nul terminator is an error:
///
/// ```
/// use std::ffi::ZStr;
///
/// let cstr = ZStr::from_bytes_with_nul(b"hello");
/// assert!(cstr.is_err());
/// ```
///
/// Creating a `ZStr` with an interior nul byte is an error:
///
/// ```
/// use std::ffi::ZStr;
///
/// let cstr = ZStr::from_bytes_with_nul(b"he\0llo\0");
/// assert!(cstr.is_err());
/// ```
#[cfg_attr(staged_api, stable(feature = "cstr_from_bytes", since = "1.10.0"))]
pub fn from_bytes_with_nul(bytes: &[u8]) -> Result<&Self, FromBytesWithNulError> {
let nul_pos = memchr(b'\0', bytes);
match nul_pos {
Some(nul_pos) if nul_pos + 1 == bytes.len() => {
// SAFETY: We know there is only one nul byte, at the end
// of the byte slice.
Ok(unsafe { Self::from_bytes_with_nul_unchecked(bytes) })
}
Some(nul_pos) => Err(FromBytesWithNulError::interior_nul(nul_pos)),
None => Err(FromBytesWithNulError::not_nul_terminated()),
}
}
/// Unsafely creates a C string wrapper from a byte slice.
///
/// This function will cast the provided `bytes` to a `ZStr` wrapper without
/// performing any sanity checks. The provided slice **must** be nul-terminated
/// and not contain any interior nul bytes.
///
/// # Examples
///
/// ```
/// use std::ffi::{ZStr, ZString};
///
/// unsafe {
/// let cstring = ZString::new("hello").expect("ZString::new failed");
/// let cstr = ZStr::from_bytes_with_nul_unchecked(cstring.to_bytes_with_nul());
/// assert_eq!(cstr, &*cstring);
/// }
/// ```
#[cfg(const_raw_ptr_deref)]
#[inline]
#[must_use]
#[cfg_attr(staged_api, stable(feature = "cstr_from_bytes", since = "1.10.0"))]
#[cfg_attr(
staged_api,
rustc_const_unstable(feature = "const_cstr_unchecked", issue = "90343")
)]
pub const unsafe fn from_bytes_with_nul_unchecked(bytes: &[u8]) -> &ZStr {
// SAFETY: Casting to ZStr is safe because its internal representation
// is a [u8] too (safe only inside std).
// Dereferencing the obtained pointer is safe because it comes from a
// reference. Making a reference is then safe because its lifetime
// is bound by the lifetime of the given `bytes`.
unsafe { &*(bytes as *const [u8] as *const ZStr) }
}
/// Unsafely creates a C string wrapper from a byte slice.
///
/// This function will cast the provided `bytes` to a `ZStr` wrapper without
/// performing any sanity checks. The provided slice **must** be nul-terminated
/// and not contain any interior nul bytes.
///
/// # Examples
///
/// ```
/// use std::ffi::{ZStr, ZString};
///
/// unsafe {
/// let cstring = ZString::new("hello").expect("ZString::new failed");
/// let cstr = ZStr::from_bytes_with_nul_unchecked(cstring.to_bytes_with_nul());
/// assert_eq!(cstr, &*cstring);
/// }
/// ```
#[cfg(not(const_raw_ptr_deref))]
#[inline]
#[must_use]
#[cfg_attr(staged_api, stable(feature = "cstr_from_bytes", since = "1.10.0"))]
#[cfg_attr(
staged_api,
rustc_const_unstable(feature = "const_cstr_unchecked", issue = "90343")
)]
pub unsafe fn from_bytes_with_nul_unchecked(bytes: &[u8]) -> &ZStr {
// SAFETY: Casting to ZStr is safe because its internal representation
// is a [u8] too (safe only inside std).
// Dereferencing the obtained pointer is safe because it comes from a
// reference. Making a reference is then safe because its lifetime
// is bound by the lifetime of the given `bytes`.
unsafe { &*(bytes as *const [u8] as *const ZStr) }
}
/// Returns the inner pointer to this C string.
///
/// The returned pointer will be valid for as long as `self` is, and points
/// to a contiguous region of memory terminated with a 0 byte to represent
/// the end of the string.
///
/// **WARNING**
///
/// The returned pointer is read-only; writing to it (including passing it
/// to C code that writes to it) causes undefined behavior.
///
/// It is your responsibility to make sure that the underlying memory is not
/// freed too early. For example, the following code will cause undefined
/// behavior when `ptr` is used inside the `unsafe` block:
///
/// ```no_run
/// # #![allow(unused_must_use)] #![allow(temporary_cstring_as_ptr)]
/// use std::ffi::ZString;
///
/// let ptr = ZString::new("Hello").expect("ZString::new failed").as_ptr();
/// unsafe {
/// // `ptr` is dangling
/// *ptr;
/// }
/// ```
///
/// This happens because the pointer returned by `as_ptr` does not carry any
/// lifetime information and the [`ZString`] is deallocated immediately after
/// the `ZString::new("Hello").expect("ZString::new failed").as_ptr()`
/// expression is evaluated.
/// To fix the problem, bind the `ZString` to a local variable:
///
/// ```no_run
/// # #![allow(unused_must_use)]
/// use std::ffi::ZString;
///
/// let hello = ZString::new("Hello").expect("ZString::new failed");
/// let ptr = hello.as_ptr();
/// unsafe {
/// // `ptr` is valid because `hello` is in scope
/// *ptr;
/// }
/// ```
///
/// This way, the lifetime of the [`ZString`] in `hello` encompasses
/// the lifetime of `ptr` and the `unsafe` block.
#[inline]
#[must_use]
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
#[cfg_attr(
staged_api,
rustc_const_stable(feature = "const_str_as_ptr", since = "1.32.0")
)]
pub const fn as_ptr(&self) -> *const u8 {
self.inner.as_ptr()
}
/// Converts this C string to a byte slice.
///
/// The returned slice will **not** contain the trailing nul terminator that this C
/// string has.
///
/// > **Note**: This method is currently implemented as a constant-time
/// > cast, but it is planned to alter its definition in the future to
/// > perform the length calculation whenever this method is called.
///
/// # Examples
///
/// ```
/// use std::ffi::ZStr;
///
/// let cstr = ZStr::from_bytes_with_nul(b"foo\0").expect("ZStr::from_bytes_with_nul failed");
/// assert_eq!(cstr.to_bytes(), b"foo");
/// ```
#[inline]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
pub fn to_bytes(&self) -> &[u8] {
let bytes = self.to_bytes_with_nul();
// SAFETY: to_bytes_with_nul returns slice with length at least 1
unsafe { bytes.get_unchecked(..bytes.len() - 1) }
}
/// Converts this C string to a byte slice containing the trailing 0 byte.
///
/// This function is the equivalent of [`ZStr::to_bytes`] except that it
/// will retain the trailing nul terminator instead of chopping it off.
///
/// > **Note**: This method is currently implemented as a 0-cost cast, but
/// > it is planned to alter its definition in the future to perform the
/// > length calculation whenever this method is called.
///
/// # Examples
///
/// ```
/// use std::ffi::ZStr;
///
/// let cstr = ZStr::from_bytes_with_nul(b"foo\0").expect("ZStr::from_bytes_with_nul failed");
/// assert_eq!(cstr.to_bytes_with_nul(), b"foo\0");
/// ```
#[inline]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
pub fn to_bytes_with_nul(&self) -> &[u8] {
unsafe { &*(&self.inner as *const [u8]) }
}
/// Yields a <code>&[str]</code> slice if the `ZStr` contains valid UTF-8.
///
/// If the contents of the `ZStr` are valid UTF-8 data, this
/// function will return the corresponding <code>&[str]</code> slice. Otherwise,
/// it will return an error with details of where UTF-8 validation failed.
///
/// [str]: prim@str "str"
///
/// # Examples
///
/// ```
/// use std::ffi::ZStr;
///
/// let cstr = ZStr::from_bytes_with_nul(b"foo\0").expect("ZStr::from_bytes_with_nul failed");
/// assert_eq!(cstr.to_str(), Ok("foo"));
/// ```
#[cfg_attr(staged_api, stable(feature = "cstr_to_str", since = "1.4.0"))]
pub fn to_str(&self) -> Result<&str, str::Utf8Error> {
// N.B., when `ZStr` is changed to perform the length check in `.to_bytes()`
// instead of in `from_ptr()`, it may be worth considering if this should
// be rewritten to do the UTF-8 check inline with the length calculation
// instead of doing it afterwards.
str::from_utf8(self.to_bytes())
}
/// Converts a `ZStr` into a <code>[Cow]<[str]></code>.
///
/// If the contents of the `ZStr` are valid UTF-8 data, this
/// function will return a <code>[Cow]::[Borrowed]\(&[str])</code>
/// with the corresponding <code>&[str]</code> slice. Otherwise, it will
/// replace any invalid UTF-8 sequences with
/// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD] and return a
/// <code>[Cow]::[Owned]\(&[str])</code> with the result.
///
/// [str]: prim@str "str"
/// [Borrowed]: Cow::Borrowed
/// [Owned]: Cow::Owned
/// [U+FFFD]: crate::char::REPLACEMENT_CHARACTER "std::char::REPLACEMENT_CHARACTER"
///
/// # Examples
///
/// Calling `to_string_lossy` on a `ZStr` containing valid UTF-8:
///
/// ```
/// use std::borrow::Cow;
/// use std::ffi::ZStr;
///
/// let cstr = ZStr::from_bytes_with_nul(b"Hello World\0")
/// .expect("ZStr::from_bytes_with_nul failed");
/// assert_eq!(cstr.to_string_lossy(), Cow::Borrowed("Hello World"));
/// ```
///
/// Calling `to_string_lossy` on a `ZStr` containing invalid UTF-8:
///
/// ```
/// use std::borrow::Cow;
/// use std::ffi::ZStr;
///
/// let cstr = ZStr::from_bytes_with_nul(b"Hello \xF0\x90\x80World\0")
/// .expect("ZStr::from_bytes_with_nul failed");
/// assert_eq!(
/// cstr.to_string_lossy(),
/// Cow::Owned(String::from("Hello �World")) as Cow<'_, str>
/// );
/// ```
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[cfg_attr(staged_api, stable(feature = "cstr_to_str", since = "1.4.0"))]
pub fn to_string_lossy(&self) -> Cow<'_, str> {
String::from_utf8_lossy(self.to_bytes())
}
/// Converts a <code>[Box]<[ZStr]></code> into a [`ZString`] without copying or allocating.
///
/// # Examples
///
/// ```
/// use rustix::ffi::ZString;
///
/// let z_string = ZString::new(b"foo".to_vec()).expect("ZString::new failed");
/// let boxed = z_string.into_boxed_z_str();
/// assert_eq!(boxed.into_z_string(), ZString::new("foo").expect("ZString::new failed"));
/// ```
#[must_use = "`self` will be dropped if the result is not used"]
#[cfg_attr(staged_api, stable(feature = "into_boxed_c_str", since = "1.20.0"))]
pub fn into_z_string(self: Box<ZStr>) -> ZString {
let raw = Box::into_raw(self) as *mut [u8];
ZString {
inner: unsafe { Box::from_raw(raw) },
}
}
/// Converts a <code>[Box]<[ZStr]></code> into a [`CString`] without copying or allocating.
///
/// # Examples
///
/// ```
/// use std::ffi::CString;
/// use rustix::ffi::ZString;
///
/// let z_string = ZString::new(b"foo".to_vec()).expect("ZString::new failed");
/// let boxed = z_string.into_boxed_z_str();
/// assert_eq!(boxed.into_c_string(), CString::new("foo").expect("ZString::new failed"));
/// ```
#[cfg(feature = "std")]
#[must_use = "`self` will be dropped if the result is not used"]
#[cfg_attr(staged_api, stable(feature = "into_boxed_c_str", since = "1.20.0"))]
pub fn into_c_string(self: Box<ZStr>) -> CString {
self.into_z_string()
}
}
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
impl PartialEq for ZStr {
fn eq(&self, other: &ZStr) -> bool {
self.to_bytes().eq(other.to_bytes())
}
}
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
impl Eq for ZStr {}
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
impl PartialOrd for ZStr {
fn partial_cmp(&self, other: &ZStr) -> Option<Ordering> {
self.to_bytes().partial_cmp(&other.to_bytes())
}
}
#[cfg_attr(staged_api, stable(feature = "rust1", since = "1.0.0"))]
impl Ord for ZStr {
fn cmp(&self, other: &ZStr) -> Ordering {
self.to_bytes().cmp(&other.to_bytes())
}
}
#[cfg_attr(staged_api, stable(feature = "cstr_borrow", since = "1.3.0"))]
impl ToOwned for ZStr {
type Owned = ZString;
fn to_owned(&self) -> ZString {
ZString {
inner: self.to_bytes_with_nul().into(),
}
}
#[cfg(toowned_clone_into)]
fn clone_into(&self, target: &mut ZString) {
let mut b = Vec::from(mem::take(&mut target.inner));
self.to_bytes_with_nul().clone_into(&mut b);
target.inner = b.into_boxed_slice();
}
}
#[cfg_attr(staged_api, stable(feature = "cstring_asref", since = "1.7.0"))]
impl From<&ZStr> for ZString {
fn from(s: &ZStr) -> ZString {
s.to_owned()
}
}
#[cfg_attr(staged_api, stable(feature = "cstring_asref", since = "1.7.0"))]
impl ops::Index<ops::RangeFull> for ZString {
type Output = ZStr;
#[inline]
fn index(&self, _index: ops::RangeFull) -> &ZStr {
self
}
}
#[cfg_attr(staged_api, stable(feature = "cstr_range_from", since = "1.47.0"))]
impl ops::Index<ops::RangeFrom<usize>> for ZStr {
type Output = ZStr;
fn index(&self, index: ops::RangeFrom<usize>) -> &ZStr {
let bytes = self.to_bytes_with_nul();
// we need to manually check the starting index to account for the null
// byte, since otherwise we could get an empty string that doesn't end
// in a null.
if index.start < bytes.len() {
unsafe { ZStr::from_bytes_with_nul_unchecked(&bytes[index.start..]) }
} else {
panic!(
"index out of bounds: the len is {} but the index is {}",
bytes.len(),
index.start
);
}
}
}
#[cfg_attr(staged_api, stable(feature = "cstring_asref", since = "1.7.0"))]
impl AsRef<ZStr> for ZStr {
#[inline]
fn as_ref(&self) -> &ZStr {
self
}
}
#[cfg_attr(staged_api, stable(feature = "cstring_asref", since = "1.7.0"))]
impl AsRef<ZStr> for ZString {
#[inline]
fn as_ref(&self) -> &ZStr {
self
}
}
| Cow::Borrowed(s.as_z_str())
}
} |
component.d.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
/// <amd-module name="@angular/compiler-cli/src/ngtsc/annotations/src/component" />
import { AnimationTriggerNames, ConstantPool, InterpolationConfig, ParsedTemplate, ParseSourceFile, R3ClassMetadata, R3ComponentMetadata, TmplAstNode } from '@angular/compiler';
import ts from 'typescript';
import { CycleAnalyzer, CycleHandlingStrategy } from '../../cycles';
import { ModuleResolver, Reference, ReferenceEmitter } from '../../imports';
import { DependencyTracker } from '../../incremental/api';
import { SemanticDepGraphUpdater, SemanticReference, SemanticSymbol } from '../../incremental/semantic_graph';
import { IndexingContext } from '../../indexer';
import { ClassPropertyMapping, ComponentResources, DirectiveTypeCheckMeta, InjectableClassRegistry, MetadataReader, MetadataRegistry, ResourceRegistry } from '../../metadata';
import { PartialEvaluator } from '../../partial_evaluator';
import { PerfRecorder } from '../../perf';
import { ClassDeclaration, Decorator, ReflectionHost } from '../../reflection';
import { ComponentScopeReader, LocalModuleScopeRegistry, TypeCheckScopeRegistry } from '../../scope';
import { AnalysisOutput, CompileResult, DecoratorHandler, DetectResult, HandlerFlags, HandlerPrecedence, ResolveResult } from '../../transform';
import { TemplateSourceMapping, TypeCheckContext } from '../../typecheck/api';
import { ExtendedTemplateChecker } from '../../typecheck/extended/api';
import { SubsetOfKeys } from '../../util/src/typescript';
import { Xi18nContext } from '../../xi18n';
import { ResourceLoader } from './api';
import { DirectiveSymbol } from './directive';
/**
* These fields of `R3ComponentMetadata` are updated in the `resolve` phase.
*
* The `keyof R3ComponentMetadata &` condition ensures that only fields of `R3ComponentMetadata` can
* be included here.
*/
export declare type ComponentMetadataResolvedFields = SubsetOfKeys<R3ComponentMetadata, 'directives' | 'pipes' | 'declarationListEmitMode'>;
export interface ComponentAnalysisData {
/**
* `meta` includes those fields of `R3ComponentMetadata` which are calculated at `analyze` time
* (not during resolve).
*/
meta: Omit<R3ComponentMetadata, ComponentMetadataResolvedFields>;
baseClass: Reference<ClassDeclaration> | 'dynamic' | null;
typeCheckMeta: DirectiveTypeCheckMeta;
template: ParsedTemplateWithSource;
classMetadata: R3ClassMetadata | null;
inputs: ClassPropertyMapping;
outputs: ClassPropertyMapping;
/**
* Providers extracted from the `providers` field of the component annotation which will require
* an Angular factory definition at runtime.
*/
providersRequiringFactory: Set<Reference<ClassDeclaration>> | null;
/**
* Providers extracted from the `viewProviders` field of the component annotation which will
* require an Angular factory definition at runtime.
*/
viewProvidersRequiringFactory: Set<Reference<ClassDeclaration>> | null;
resources: ComponentResources;
/**
* `styleUrls` extracted from the decorator, if present.
*/
styleUrls: StyleUrlMeta[] | null;
/**
* Inline stylesheets extracted from the decorator, if present.
*/
inlineStyles: string[] | null;
isPoisoned: boolean;
animationTriggerNames: AnimationTriggerNames | null;
}
export declare type ComponentResolutionData = Pick<R3ComponentMetadata, ComponentMetadataResolvedFields>;
/**
* The literal style url extracted from the decorator, along with metadata for diagnostics.
*/
export interface StyleUrlMeta {
url: string;
nodeForError: ts.Node;
source: ResourceTypeForDiagnostics.StylesheetFromTemplate | ResourceTypeForDiagnostics.StylesheetFromDecorator;
}
/**
* Information about the origin of a resource in the application code. This is used for creating
* diagnostics, so we can point to the root cause of an error in the application code.
*
* A template resource comes from the `templateUrl` property on the component decorator.
*
* Stylesheets resources can come from either the `styleUrls` property on the component decorator,
* or from inline `style` tags and style links on the external template.
*/
export declare const enum ResourceTypeForDiagnostics {
Template = 0,
StylesheetFromTemplate = 1,
StylesheetFromDecorator = 2
}
/**
* Represents an Angular component.
*/
export declare class | extends DirectiveSymbol {
usedDirectives: SemanticReference[];
usedPipes: SemanticReference[];
isRemotelyScoped: boolean;
isEmitAffected(previousSymbol: SemanticSymbol, publicApiAffected: Set<SemanticSymbol>): boolean;
isTypeCheckBlockAffected(previousSymbol: SemanticSymbol, typeCheckApiAffected: Set<SemanticSymbol>): boolean;
}
/**
* `DecoratorHandler` which handles the `@Component` annotation.
*/
export declare class ComponentDecoratorHandler implements DecoratorHandler<Decorator, ComponentAnalysisData, ComponentSymbol, ComponentResolutionData> {
private reflector;
private evaluator;
private metaRegistry;
private metaReader;
private scopeReader;
private scopeRegistry;
private typeCheckScopeRegistry;
private resourceRegistry;
private isCore;
private resourceLoader;
private rootDirs;
private defaultPreserveWhitespaces;
private i18nUseExternalIds;
private enableI18nLegacyMessageIdFormat;
private usePoisonedData;
private i18nNormalizeLineEndingsInICUs;
private moduleResolver;
private cycleAnalyzer;
private cycleHandlingStrategy;
private refEmitter;
private depTracker;
private injectableRegistry;
private semanticDepGraphUpdater;
private annotateForClosureCompiler;
private perf;
constructor(reflector: ReflectionHost, evaluator: PartialEvaluator, metaRegistry: MetadataRegistry, metaReader: MetadataReader, scopeReader: ComponentScopeReader, scopeRegistry: LocalModuleScopeRegistry, typeCheckScopeRegistry: TypeCheckScopeRegistry, resourceRegistry: ResourceRegistry, isCore: boolean, resourceLoader: ResourceLoader, rootDirs: ReadonlyArray<string>, defaultPreserveWhitespaces: boolean, i18nUseExternalIds: boolean, enableI18nLegacyMessageIdFormat: boolean, usePoisonedData: boolean, i18nNormalizeLineEndingsInICUs: boolean | undefined, moduleResolver: ModuleResolver, cycleAnalyzer: CycleAnalyzer, cycleHandlingStrategy: CycleHandlingStrategy, refEmitter: ReferenceEmitter, depTracker: DependencyTracker | null, injectableRegistry: InjectableClassRegistry, semanticDepGraphUpdater: SemanticDepGraphUpdater | null, annotateForClosureCompiler: boolean, perf: PerfRecorder);
private literalCache;
private elementSchemaRegistry;
/**
* During the asynchronous preanalyze phase, it's necessary to parse the template to extract
* any potential <link> tags which might need to be loaded. This cache ensures that work is not
* thrown away, and the parsed template is reused during the analyze phase.
*/
private preanalyzeTemplateCache;
private preanalyzeStylesCache;
readonly precedence = HandlerPrecedence.PRIMARY;
readonly name: string;
detect(node: ClassDeclaration, decorators: Decorator[] | null): DetectResult<Decorator> | undefined;
preanalyze(node: ClassDeclaration, decorator: Readonly<Decorator>): Promise<void> | undefined;
analyze(node: ClassDeclaration, decorator: Readonly<Decorator>, flags?: HandlerFlags): AnalysisOutput<ComponentAnalysisData>;
symbol(node: ClassDeclaration, analysis: Readonly<ComponentAnalysisData>): ComponentSymbol;
register(node: ClassDeclaration, analysis: ComponentAnalysisData): void;
index(context: IndexingContext, node: ClassDeclaration, analysis: Readonly<ComponentAnalysisData>): null | undefined;
typeCheck(ctx: TypeCheckContext, node: ClassDeclaration, meta: Readonly<ComponentAnalysisData>): void;
extendedTemplateCheck(component: ts.ClassDeclaration, extendedTemplateChecker: ExtendedTemplateChecker): ts.Diagnostic[];
resolve(node: ClassDeclaration, analysis: Readonly<ComponentAnalysisData>, symbol: ComponentSymbol): ResolveResult<ComponentResolutionData>;
xi18n(ctx: Xi18nContext, node: ClassDeclaration, analysis: Readonly<ComponentAnalysisData>): void;
updateResources(node: ClassDeclaration, analysis: ComponentAnalysisData): void;
compileFull(node: ClassDeclaration, analysis: Readonly<ComponentAnalysisData>, resolution: Readonly<ComponentResolutionData>, pool: ConstantPool): CompileResult[];
compilePartial(node: ClassDeclaration, analysis: Readonly<ComponentAnalysisData>, resolution: Readonly<ComponentResolutionData>): CompileResult[];
/**
* Transforms the given decorator to inline external resources. i.e. if the decorator
* resolves to `@Component`, the `templateUrl` and `styleUrls` metadata fields will be
* transformed to their semantically-equivalent inline variants.
*
* This method is used for serializing decorators into the class metadata. The emitted
* class metadata should not refer to external resources as this would be inconsistent
* with the component definitions/declarations which already inline external resources.
*
* Additionally, the references to external resources would require libraries to ship
* external resources exclusively for the class metadata.
*/
private _transformDecoratorToInlineResources;
private _resolveLiteral;
private _resolveEnumValue;
private _extractComponentStyleUrls;
private _extractStyleUrlsFromExpression;
private _extractStyleResources;
private _preloadAndParseTemplate;
private extractTemplate;
private _parseTemplate;
private parseTemplateDeclaration;
private _resolveImportedFile;
/**
* Check whether adding an import from `origin` to the source-file corresponding to `expr` would
* create a cyclic import.
*
* @returns a `Cycle` object if a cycle would be created, otherwise `null`.
*/
private _checkForCyclicImport;
private _recordSyntheticImport;
private makeResourceNotFoundError;
private _extractTemplateStyleUrls;
}
/**
* Information about the template which was extracted during parsing.
*
* This contains the actual parsed template as well as any metadata collected during its parsing,
* some of which might be useful for re-parsing the template with different options.
*/
export interface ParsedComponentTemplate extends ParsedTemplate {
/**
* The template AST, parsed in a manner which preserves source map information for diagnostics.
*
* Not useful for emit.
*/
diagNodes: TmplAstNode[];
/**
* The `ParseSourceFile` for the template.
*/
file: ParseSourceFile;
}
export interface ParsedTemplateWithSource extends ParsedComponentTemplate {
/** The string contents of the template. */
content: string;
sourceMapping: TemplateSourceMapping;
declaration: TemplateDeclaration;
}
/**
* Common fields extracted from the declaration of a template.
*/
interface CommonTemplateDeclaration {
preserveWhitespaces: boolean;
interpolationConfig: InterpolationConfig;
templateUrl: string;
resolvedTemplateUrl: string;
}
/**
* Information extracted from the declaration of an inline template.
*/
interface InlineTemplateDeclaration extends CommonTemplateDeclaration {
isInline: true;
expression: ts.Expression;
}
/**
* Information extracted from the declaration of an external template.
*/
interface ExternalTemplateDeclaration extends CommonTemplateDeclaration {
isInline: false;
templateUrlExpression: ts.Expression;
}
/**
* The declaration of a template extracted from a component decorator.
*
* This data is extracted and stored separately to facilitate re-interpreting the template
* declaration whenever the compiler is notified of a change to a template file. With this
* information, `ComponentDecoratorHandler` is able to re-read the template and update the component
* record without needing to parse the original decorator again.
*/
declare type TemplateDeclaration = InlineTemplateDeclaration | ExternalTemplateDeclaration;
export {};
| ComponentSymbol |
OperatorCodes.py | # Copyright 2022, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Operator code tables
These are mostly used to look up the Python C/API from operations or a wrapper used.
"""
unary_operator_codes = {
"UAdd": ("PyNumber_Positive", 1),
"USub": ("PyNumber_Negative", 1),
"Invert": ("PyNumber_Invert", 1),
"Repr": ("PyObject_Repr", 1),
"Not": ("UNARY_NOT", 0),
} |
rich_comparison_codes = {
"Lt": "LT",
"LtE": "LE",
"Eq": "EQ",
"NotEq": "NE",
"Gt": "GT",
"GtE": "GE",
}
containing_comparison_codes = ("In", "NotIn") | |
service-worker.js | "use strict";function | (e){return e.keys().then(function(e){return e.map(function(e){return e.url})}).then(function(e){return new Set(e)})}var precacheConfig=[["index.html","f33ba166fab24448bab2497e951db01f"],["static/js/main.3c863732.js","fd9ca3286ec1ea80f61725da5ec7e299"]],cacheName="sw-precache-v3-sw-precache-webpack-plugin-"+(self.registration?self.registration.scope:""),ignoreUrlParametersMatching=[/^utm_/],addDirectoryIndex=function(e,t){var n=new URL(e);return"/"===n.pathname.slice(-1)&&(n.pathname+=t),n.toString()},cleanResponse=function(e){return e.redirected?("body"in e?Promise.resolve(e.body):e.blob()).then(function(t){return new Response(t,{headers:e.headers,status:e.status,statusText:e.statusText})}):Promise.resolve(e)},createCacheKey=function(e,t,n,r){var a=new URL(e);return r&&a.pathname.match(r)||(a.search+=(a.search?"&":"")+encodeURIComponent(t)+"="+encodeURIComponent(n)),a.toString()},isPathWhitelisted=function(e,t){if(0===e.length)return!0;var n=new URL(t).pathname;return e.some(function(e){return n.match(e)})},stripIgnoredUrlParameters=function(e,t){var n=new URL(e);return n.hash="",n.search=n.search.slice(1).split("&").map(function(e){return e.split("=")}).filter(function(e){return t.every(function(t){return!t.test(e[0])})}).map(function(e){return e.join("=")}).join("&"),n.toString()},hashParamName="_sw-precache",urlsToCacheKeys=new Map(precacheConfig.map(function(e){var t=e[0],n=e[1],r=new URL(t,self.location),a=createCacheKey(r,hashParamName,n,/\.\w{8}\./);return[r.toString(),a]}));self.addEventListener("install",function(e){e.waitUntil(caches.open(cacheName).then(function(e){return setOfCachedUrls(e).then(function(t){return Promise.all(Array.from(urlsToCacheKeys.values()).map(function(n){if(!t.has(n)){var r=new Request(n,{credentials:"same-origin"});return fetch(r).then(function(t){if(!t.ok)throw new Error("Request for "+n+" returned a response with status "+t.status);return cleanResponse(t).then(function(t){return e.put(n,t)})})}}))})}).then(function(){return self.skipWaiting()}))}),self.addEventListener("activate",function(e){var t=new Set(urlsToCacheKeys.values());e.waitUntil(caches.open(cacheName).then(function(e){return e.keys().then(function(n){return Promise.all(n.map(function(n){if(!t.has(n.url))return e.delete(n)}))})}).then(function(){return self.clients.claim()}))}),self.addEventListener("fetch",function(e){if("GET"===e.request.method){var t,n=stripIgnoredUrlParameters(e.request.url,ignoreUrlParametersMatching);t=urlsToCacheKeys.has(n);t||(n=addDirectoryIndex(n,"index.html"),t=urlsToCacheKeys.has(n));!t&&"navigate"===e.request.mode&&isPathWhitelisted([],e.request.url)&&(n=new URL("/apps/react/gameoflife/index.html",self.location).toString(),t=urlsToCacheKeys.has(n)),t&&e.respondWith(caches.open(cacheName).then(function(e){return e.match(urlsToCacheKeys.get(n)).then(function(e){if(e)return e;throw Error("The cached response that was expected is missing.")})}).catch(function(t){return console.warn('Couldn\'t serve response for "%s" from cache: %O',e.request.url,t),fetch(e.request)}))}}); | setOfCachedUrls |
gmd5.go | // Copyright 2017 gf Author(https://github.com/gogf/gf). All Rights Reserved.
//
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT was not distributed with this file,
// You can obtain one at https://github.com/gogf/gf.
// Package gmd5 provides useful API for MD5 encryption algorithms.
package gmd5
import (
"crypto/md5"
"fmt"
"io"
"os"
"github.com/gogf/gf/util/gconv"
)
// Encrypt encrypts any type of variable using MD5 algorithms.
// It uses gconv package to convert <v> to its bytes type.
func Encrypt(data interface{}) (encrypt string, err error) {
return EncryptBytes(gconv.Bytes(data))
}
// MustEncrypt encrypts any type of variable using MD5 algorithms.
// It uses gconv package to convert <v> to its bytes type.
// It panics if any error occurs.
func MustEncrypt(data interface{}) string {
result, err := Encrypt(data)
if err != nil {
panic(err)
}
return result
}
// EncryptBytes encrypts <data> using MD5 algorithms.
func EncryptBytes(data []byte) (encrypt string, err error) {
h := md5.New()
if _, err = h.Write([]byte(data)); err != nil {
return "", err
}
return fmt.Sprintf("%x", h.Sum(nil)), nil
}
// MustEncryptBytes encrypts <data> using MD5 algorithms.
// It panics if any error occurs.
func MustEncryptBytes(data []byte) string {
result, err := EncryptBytes(data)
if err != nil {
panic(err)
}
return result
}
// EncryptBytes encrypts string <data> using MD5 algorithms.
func EncryptString(data string) (encrypt string, err error) {
return EncryptBytes([]byte(data))
} | result, err := EncryptString(data)
if err != nil {
panic(err)
}
return result
}
// EncryptFile encrypts file content of <path> using MD5 algorithms.
func EncryptFile(path string) (encrypt string, err error) {
f, err := os.Open(path)
if err != nil {
return "", err
}
defer f.Close()
h := md5.New()
_, err = io.Copy(h, f)
if err != nil {
return "", err
}
return fmt.Sprintf("%x", h.Sum(nil)), nil
}
// MustEncryptFile encrypts file content of <path> using MD5 algorithms.
// It panics if any error occurs.
func MustEncryptFile(path string) string {
result, err := EncryptFile(path)
if err != nil {
panic(err)
}
return result
} |
// MustEncryptString encrypts string <data> using MD5 algorithms.
// It panics if any error occurs.
func MustEncryptString(data string) string { |
location_placeholder_field.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/ads/googleads/v3/enums/location_placeholder_field.proto
package enums
import (
fmt "fmt"
math "math"
proto "github.com/golang/protobuf/proto"
_ "google.golang.org/genproto/googleapis/api/annotations"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Possible values for Location placeholder fields.
type LocationPlaceholderFieldEnum_LocationPlaceholderField int32
const (
// Not specified.
LocationPlaceholderFieldEnum_UNSPECIFIED LocationPlaceholderFieldEnum_LocationPlaceholderField = 0
// Used for return value only. Represents value unknown in this version.
LocationPlaceholderFieldEnum_UNKNOWN LocationPlaceholderFieldEnum_LocationPlaceholderField = 1
// Data Type: STRING. The name of the business.
LocationPlaceholderFieldEnum_BUSINESS_NAME LocationPlaceholderFieldEnum_LocationPlaceholderField = 2
// Data Type: STRING. Line 1 of the business address.
LocationPlaceholderFieldEnum_ADDRESS_LINE_1 LocationPlaceholderFieldEnum_LocationPlaceholderField = 3
// Data Type: STRING. Line 2 of the business address.
LocationPlaceholderFieldEnum_ADDRESS_LINE_2 LocationPlaceholderFieldEnum_LocationPlaceholderField = 4
// Data Type: STRING. City of the business address.
LocationPlaceholderFieldEnum_CITY LocationPlaceholderFieldEnum_LocationPlaceholderField = 5
// Data Type: STRING. Province of the business address.
LocationPlaceholderFieldEnum_PROVINCE LocationPlaceholderFieldEnum_LocationPlaceholderField = 6
// Data Type: STRING. Postal code of the business address.
LocationPlaceholderFieldEnum_POSTAL_CODE LocationPlaceholderFieldEnum_LocationPlaceholderField = 7
// Data Type: STRING. Country code of the business address.
LocationPlaceholderFieldEnum_COUNTRY_CODE LocationPlaceholderFieldEnum_LocationPlaceholderField = 8
// Data Type: STRING. Phone number of the business.
LocationPlaceholderFieldEnum_PHONE_NUMBER LocationPlaceholderFieldEnum_LocationPlaceholderField = 9
)
var LocationPlaceholderFieldEnum_LocationPlaceholderField_name = map[int32]string{
0: "UNSPECIFIED",
1: "UNKNOWN",
2: "BUSINESS_NAME",
3: "ADDRESS_LINE_1",
4: "ADDRESS_LINE_2",
5: "CITY",
6: "PROVINCE",
7: "POSTAL_CODE",
8: "COUNTRY_CODE",
9: "PHONE_NUMBER",
}
var LocationPlaceholderFieldEnum_LocationPlaceholderField_value = map[string]int32{
"UNSPECIFIED": 0,
"UNKNOWN": 1,
"BUSINESS_NAME": 2,
"ADDRESS_LINE_1": 3,
"ADDRESS_LINE_2": 4,
"CITY": 5,
"PROVINCE": 6,
"POSTAL_CODE": 7,
"COUNTRY_CODE": 8,
"PHONE_NUMBER": 9,
}
func (x LocationPlaceholderFieldEnum_LocationPlaceholderField) String() string {
return proto.EnumName(LocationPlaceholderFieldEnum_LocationPlaceholderField_name, int32(x))
}
func (LocationPlaceholderFieldEnum_LocationPlaceholderField) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_06285cb96465af84, []int{0, 0}
}
// Values for Location placeholder fields.
type LocationPlaceholderFieldEnum struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LocationPlaceholderFieldEnum) Reset() { *m = LocationPlaceholderFieldEnum{} }
func (m *LocationPlaceholderFieldEnum) String() string { return proto.CompactTextString(m) }
func (*LocationPlaceholderFieldEnum) ProtoMessage() {}
func (*LocationPlaceholderFieldEnum) Descriptor() ([]byte, []int) {
return fileDescriptor_06285cb96465af84, []int{0}
}
func (m *LocationPlaceholderFieldEnum) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LocationPlaceholderFieldEnum.Unmarshal(m, b)
}
func (m *LocationPlaceholderFieldEnum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LocationPlaceholderFieldEnum.Marshal(b, m, deterministic)
}
func (m *LocationPlaceholderFieldEnum) XXX_Merge(src proto.Message) {
xxx_messageInfo_LocationPlaceholderFieldEnum.Merge(m, src)
}
func (m *LocationPlaceholderFieldEnum) XXX_Size() int {
return xxx_messageInfo_LocationPlaceholderFieldEnum.Size(m)
}
func (m *LocationPlaceholderFieldEnum) XXX_DiscardUnknown() {
xxx_messageInfo_LocationPlaceholderFieldEnum.DiscardUnknown(m)
}
var xxx_messageInfo_LocationPlaceholderFieldEnum proto.InternalMessageInfo
func init() {
proto.RegisterEnum("google.ads.googleads.v3.enums.LocationPlaceholderFieldEnum_LocationPlaceholderField", LocationPlaceholderFieldEnum_LocationPlaceholderField_name, LocationPlaceholderFieldEnum_LocationPlaceholderField_value)
proto.RegisterType((*LocationPlaceholderFieldEnum)(nil), "google.ads.googleads.v3.enums.LocationPlaceholderFieldEnum")
}
func init() |
var fileDescriptor_06285cb96465af84 = []byte{
// 392 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x51, 0xc1, 0xae, 0x93, 0x40,
0x14, 0x15, 0xde, 0xf3, 0xbd, 0x3a, 0xef, 0xa9, 0xe3, 0xac, 0x8c, 0x69, 0x17, 0xed, 0x07, 0x0c,
0x51, 0x76, 0x63, 0x62, 0x32, 0xc0, 0xb4, 0x12, 0xdb, 0x81, 0x40, 0xc1, 0xd4, 0x90, 0x10, 0x2c,
0x88, 0x24, 0x94, 0x21, 0x9d, 0xb6, 0x1f, 0xe4, 0xd2, 0xaf, 0x70, 0xed, 0x5f, 0xb8, 0x75, 0xe9,
0x17, 0x98, 0x81, 0xb6, 0x2e, 0x4c, 0xdf, 0x86, 0x9c, 0x9c, 0x7b, 0xcf, 0x39, 0xcc, 0xb9, 0xe0,
0x5d, 0x29, 0x44, 0x59, 0x17, 0x46, 0x96, 0x4b, 0xa3, 0x87, 0x0a, 0x1d, 0x4c, 0xa3, 0x68, 0xf6,
0x1b, 0x69, 0xd4, 0x62, 0x9d, 0xed, 0x2a, 0xd1, 0xa4, 0x6d, 0x9d, 0xad, 0x8b, 0xaf, 0xa2, 0xce,
0x8b, 0x6d, 0xfa, 0xa5, 0x2a, 0xea, 0x1c, 0xb7, 0x5b, 0xb1, 0x13, 0x68, 0xd4, 0x8b, 0x70, 0x96,
0x4b, 0x7c, 0xd6, 0xe3, 0x83, 0x89, 0x3b, 0xfd, 0xab, 0xe1, 0xc9, 0xbe, 0xad, 0x8c, 0xac, 0x69,
0xc4, 0xae, 0x73, 0x93, 0xbd, 0x78, 0xf2, 0x4b, 0x03, 0xc3, 0xf9, 0x31, 0xc1, 0xff, 0x17, 0x30,
0x55, 0xfe, 0xac, 0xd9, 0x6f, 0x26, 0x3f, 0x34, 0xf0, 0xf2, 0xd2, 0x02, 0x7a, 0x0e, 0xee, 0x22,
0x1e, 0xfa, 0xcc, 0x76, 0xa7, 0x2e, 0x73, 0xe0, 0x23, 0x74, 0x07, 0x6e, 0x23, 0xfe, 0x81, 0x7b,
0x1f, 0x39, 0xd4, 0xd0, 0x0b, 0xf0, 0xd4, 0x8a, 0x42, 0x97, 0xb3, 0x30, 0x4c, 0x39, 0x5d, 0x30,
0xa8, 0x23, 0x04, 0x9e, 0x51, 0xc7, 0x09, 0x14, 0x33, 0x77, 0x39, 0x4b, 0x5f, 0xc3, 0xab, 0xff,
0xb8, 0x37, 0xf0, 0x1a, 0x0d, 0xc0, 0xb5, 0xed, 0x2e, 0x57, 0xf0, 0x31, 0xba, 0x07, 0x03, 0x3f,
0xf0, 0x62, 0x97, 0xdb, 0x0c, 0xde, 0xa8, 0x40, 0xdf, 0x0b, 0x97, 0x74, 0x9e, 0xda, 0x9e, 0xc3,
0xe0, 0x2d, 0x82, 0xe0, 0xde, 0xf6, 0x22, 0xbe, 0x0c, 0x56, 0x3d, 0x33, 0x50, 0x8c, 0xff, 0xde,
0xe3, 0x2c, 0xe5, 0xd1, 0xc2, 0x62, 0x01, 0x7c, 0x62, 0xfd, 0xd1, 0xc0, 0x78, 0x2d, 0x36, 0xf8,
0xc1, 0x9e, 0xac, 0xd1, 0xa5, 0x57, 0xfa, 0xaa, 0x28, 0x5f, 0xfb, 0x64, 0x1d, 0xf5, 0xa5, 0xa8,
0xb3, 0xa6, 0xc4, 0x62, 0x5b, 0x1a, 0x65, 0xd1, 0x74, 0x35, 0x9e, 0xee, 0xd6, 0x56, 0xf2, 0xc2,
0x19, 0xdf, 0x76, 0xdf, 0x6f, 0xfa, 0xd5, 0x8c, 0xd2, 0xef, 0xfa, 0x68, 0xd6, 0x5b, 0xd1, 0x5c,
0xe2, 0x1e, 0x2a, 0x14, 0x9b, 0x58, 0x55, 0x2e, 0x7f, 0x9e, 0xe6, 0x09, 0xcd, 0x65, 0x72, 0x9e,
0x27, 0xb1, 0x99, 0x74, 0xf3, 0xdf, 0xfa, 0xb8, 0x27, 0x09, 0xa1, 0xb9, 0x24, 0xe4, 0xbc, 0x41,
0x48, 0x6c, 0x12, 0xd2, 0xed, 0x7c, 0xbe, 0xe9, 0x7e, 0xcc, 0xfc, 0x1b, 0x00, 0x00, 0xff, 0xff,
0x6c, 0x41, 0x6a, 0xaa, 0x5e, 0x02, 0x00, 0x00,
}
| {
proto.RegisterFile("google/ads/googleads/v3/enums/location_placeholder_field.proto", fileDescriptor_06285cb96465af84)
} |
theme-controller.ts | import type { CarbonTheme } from 'carbon/theme/utils.js';
import { LS_THEME_KEY } from 'carbon/theme/utils.js';
import type { ReactiveControllerHost, TemplateResult } from 'lit';
import { html, ReactiveController } from 'lit';
export class | implements ReactiveController {
host: ReactiveControllerHost & { theme: CarbonTheme };
updateRequested = false;
loadedThemes: Partial<Record<CarbonTheme, string>> = {};
styleElement?: TemplateResult;
constructor(host: ReactiveControllerHost & { theme: CarbonTheme }) {
this.host = host;
host.addController(this);
this.loadTheme(this.host.theme).then(this.requestHostUpdate);
}
hostConnected() {
window.addEventListener('np:theme-selection', this.handleThemeSelection);
}
hostDisconnected() {
window.removeEventListener('np:theme-selection', this.handleThemeSelection);
}
hostUpdate() {
const css = this.loadedThemes[this.host.theme];
if (css) {
this.styleElement = html`<style>
${css}
</style>`;
}
}
private handleThemeSelection = async (
e: CustomEvent<Record<'theme', CarbonTheme>>
) => {
const { theme } = e.detail;
window.localStorage.setItem(LS_THEME_KEY, theme);
this.host.theme = await this.loadTheme(theme);
};
private async loadTheme(theme: CarbonTheme) {
if (!Object.keys(this.loadedThemes).includes(theme)) {
const detail = { id: `load theme: ${theme}` };
window.dispatchEvent(new CustomEvent('np:progressstart', { detail }));
// await new Promise(r => setTimeout(r, 1500));
this.loadedThemes[theme] = (
await import(`../../../../src/carbon/theme/${theme}.css`)
).default;
window.dispatchEvent(new CustomEvent('np:progressend', { detail }));
}
return theme;
}
private requestHostUpdate = () => {
this.updateRequested = true;
this.host.requestUpdate();
this.host.updateComplete.then(() => {
this.updateRequested = false;
});
};
}
declare global {
interface WindowEventMap {
'np:theme-selection': CustomEvent;
}
}
| ThemeController |
client.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at | // Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package transport
import (
"fmt"
"net"
"sync"
"time"
"github.com/wangjia184/beats/libbeat/testing"
)
type Client struct {
dialer Dialer
network string
host string
config *Config
conn net.Conn
mutex sync.Mutex
}
type Config struct {
Proxy *ProxyConfig
TLS *TLSConfig
Timeout time.Duration
Stats IOStatser
}
func MakeDialer(c *Config) (Dialer, error) {
var err error
dialer := NetDialer(c.Timeout)
dialer, err = ProxyDialer(c.Proxy, dialer)
if err != nil {
return nil, err
}
if c.Stats != nil {
dialer = StatsDialer(dialer, c.Stats)
}
if c.TLS != nil {
return TLSDialer(dialer, c.TLS, c.Timeout)
}
return dialer, nil
}
func NewClient(c *Config, network, host string, defaultPort int) (*Client, error) {
// do some sanity checks regarding network and Config matching +
// address being parseable
switch network {
case "tcp", "tcp4", "tcp6":
case "udp", "udp4", "udp6":
if c.TLS == nil && c.Proxy == nil {
break
}
fallthrough
default:
return nil, fmt.Errorf("unsupported network type %v", network)
}
dialer, err := MakeDialer(c)
if err != nil {
return nil, err
}
return NewClientWithDialer(dialer, c, network, host, defaultPort)
}
func NewClientWithDialer(d Dialer, c *Config, network, host string, defaultPort int) (*Client, error) {
// check address being parseable
host = fullAddress(host, defaultPort)
_, _, err := net.SplitHostPort(host)
if err != nil {
return nil, err
}
client := &Client{
dialer: d,
network: network,
host: host,
config: c,
}
return client, nil
}
func (c *Client) Connect() error {
c.mutex.Lock()
defer c.mutex.Unlock()
if c.conn != nil {
_ = c.conn.Close()
c.conn = nil
}
conn, err := c.dialer.Dial(c.network, c.host)
if err != nil {
return err
}
c.conn = conn
return nil
}
func (c *Client) IsConnected() bool {
c.mutex.Lock()
b := c.conn != nil
c.mutex.Unlock()
return b
}
func (c *Client) Close() error {
c.mutex.Lock()
defer c.mutex.Unlock()
if c.conn != nil {
debugf("closing")
err := c.conn.Close()
c.conn = nil
return err
}
return nil
}
func (c *Client) getConn() net.Conn {
c.mutex.Lock()
conn := c.conn
c.mutex.Unlock()
return conn
}
func (c *Client) Read(b []byte) (int, error) {
conn := c.getConn()
if conn == nil {
return 0, ErrNotConnected
}
n, err := conn.Read(b)
return n, c.handleError(err)
}
func (c *Client) Write(b []byte) (int, error) {
conn := c.getConn()
if conn == nil {
return 0, ErrNotConnected
}
n, err := c.conn.Write(b)
return n, c.handleError(err)
}
func (c *Client) LocalAddr() net.Addr {
conn := c.getConn()
if conn != nil {
return c.conn.LocalAddr()
}
return nil
}
func (c *Client) RemoteAddr() net.Addr {
conn := c.getConn()
if conn != nil {
return c.conn.LocalAddr()
}
return nil
}
func (c *Client) Host() string {
return c.host
}
func (c *Client) SetDeadline(t time.Time) error {
conn := c.getConn()
if conn == nil {
return ErrNotConnected
}
err := conn.SetDeadline(t)
return c.handleError(err)
}
func (c *Client) SetReadDeadline(t time.Time) error {
conn := c.getConn()
if conn == nil {
return ErrNotConnected
}
err := conn.SetReadDeadline(t)
return c.handleError(err)
}
func (c *Client) SetWriteDeadline(t time.Time) error {
conn := c.getConn()
if conn == nil {
return ErrNotConnected
}
err := conn.SetWriteDeadline(t)
return c.handleError(err)
}
func (c *Client) handleError(err error) error {
if err != nil {
debugf("handle error: %v", err)
if nerr, ok := err.(net.Error); !(ok && (nerr.Temporary() || nerr.Timeout())) {
_ = c.Close()
}
}
return err
}
func (c *Client) Test(d testing.Driver) {
d.Run("logstash: "+c.host, func(d testing.Driver) {
d.Run("connection", func(d testing.Driver) {
netDialer := TestNetDialer(d, c.config.Timeout)
_, err := netDialer.Dial("tcp", c.host)
d.Fatal("dial up", err)
})
if c.config.TLS == nil {
d.Warn("TLS", "secure connection disabled")
} else {
d.Run("TLS", func(d testing.Driver) {
netDialer := NetDialer(c.config.Timeout)
tlsDialer, err := TestTLSDialer(d, netDialer, c.config.TLS, c.config.Timeout)
_, err = tlsDialer.Dial("tcp", c.host)
d.Fatal("dial up", err)
})
}
err := c.Connect()
d.Fatal("talk to server", err)
})
}
func (c *Client) String() string {
return c.network + "://" + c.host
} | //
// http://www.apache.org/licenses/LICENSE-2.0
// |
eu.js | CKEDITOR.plugins.setLang("about","eu",{copy:"Copyright © $1. Eskubide guztiak erreserbaturik.",dlgTitle:"CKEditor(r)i buruz",help:"$1 aztertu laguntza jasotzeko.",moreInfo:"Lizentziari buruzko informazioa gure webgunean:",title:"CKEditor(r)i buruz",userGuide:"CKEditor User's Guide"}); |
||
user-genre.js | const db = require("../database/db-config.js");
module.exports = {
findByUserId,
findById,
add,
update,
remove
};
function findByUserId(userId) {
return db("userGenre")
.where( "userGenre.userId", userId ); | return db("userGenre").where({ genreId });
}
function add(genre) {
return db("userGenre")
.insert(genre)
.returning("*")
}
function update(updatedGenre, userId, genreId) {
return db("userGenre")
.update(updatedGenre)
.where({ userId } && { id:genreId })
}
function remove(userId, genreId) {
return db("userGenre")
.where({ userId } && { id: genreId})
.del();
} | }
function findById(genreId) { |
filtering_diffusion.py | #!/usr/bin/env python
# coding: utf-8
import sys
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import norm
from scipy.io import loadmat, savemat
from nbs_class import Ppi, Patient
from subprocess import call
# import h5py
import os
import glob
import time
import datetime
# NOTE mutationProfileDiffusion -> propagation
# mutationProfile -> M, PPIAdjacencyMatrix -> adj, dataFolder -> result_folder
# PPI_influence_min -> ppi_influence_min, PPI_influence_max-> ppi_influence_max
# PPI_influence()-> calcul_ppi_influence(), PPI_influence -> ppi_influence
# influenceDistance->influence_distance
# influenceMat -> ppi_influence, PPIneighboorsMax -> ngh_max,
# bestInfluencers -> best_influencers
# filteredGenes -> deg0, keepSingletons -> keep_singletons
# mutationsMin -> min_mutation, mutationsMax -> mutationsMax
# newnet -> ppi_ngh, netFinal -> ppi_final, mutFinal -> mut_final
# filteredPatients -> filtered_patients
# @profile
def propagation(M, adj, alpha=0.7, tol=10e-6): # TODO equation, M, alpha
"""Network propagation iterative process
Iterative algorithm for apply propagation using random walk on a network:
Initialize::
X1 = M
Repeat::
X2 = alpha * X1.A + (1-alpha) * M
X1 = X2
Until::
norm(X2-X1) < tol
Where::
A : degree-normalized adjacency matrix
Parameters
----------
M : sparse matrix
Data matrix to be diffused.
adj : sparse matrix
Adjacency matrice.
alpha : float, default: 0.7
Diffusion/propagation factor with 0 <= alpha <= 1.
For alpha = 0 : no diffusion.
For alpha = 1 :
tol : float, default: 10e-6
Convergence threshold.
Returns
-------
X2 : sparse matrix
Smoothed matrix.
"""
print(' ==== propagation ==== ')
n = adj.shape[0]
# diagonal = 1 -> degree
# TODO to set diagonal = 0 before applying eye
adj = adj+sp.eye(n, dtype=np.float32)
d = sp.dia_matrix((np.array(adj.sum(axis=0))**-1, [0]),
shape=(n, n),
dtype=np.float32)
A = adj.dot(d)
X1 = M.astype(np.float32)
X2 = alpha * X1.dot(A) + (1-alpha) * M
i = 0
while norm(X2-X1) > tol:
X1 = X2
X2 = alpha * X1.dot(A) + (1-alpha) * M
i += 1
print('Propagation iteration = {} ----- {}'.format(
i, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return X2
# @profile
def compare_ij_ji(ppi, out_min=True, out_max=True):
"""Helper function for calcul_ppi_influence
In most cases the influence (propagation) is not symmetric. We have to
compare weight (a_ij) and (a_ji) for all pairs in order to obtain symmetric
matrix/matrices. 2 choices available: minimum or maximum weight.
a = min [(a_ij),(a_ji)]
a = max [(a_ij),(a_ji)]
Minimum weight is chosen to avoid Hubs phenomenon.
Parameters
----------
ppi : sparse matrix
Matrice to apply comparison.
out_min, out_max : boolean, default: True
Minimum and/or maximum weight is chosen.
Returns
-------
ppi_min, ppi_max : sparse matrix
Symmertric matrix with minimum and/or maximum weight.
"""
# TODO matrice type of ppi
n = ppi.shape[0]
ppi = ppi.tolil() # need "lil_matrix" for reshape
# transpose to compare ppi(ij) and ppi(ji)
ppi_transp = sp.lil_matrix.transpose(ppi)
# reshape to 1D matrix
ppi_1d = ppi.reshape((1, n**2))
ppi_1d_transp = ppi_transp.reshape((1, n**2))
# reshapeto original size matrix after comparison (min/max)
if out_min and out_max:
ppi_min = (sp.coo_matrix.tolil(
sp.coo_matrix.min(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0))
).reshape((n, n)).astype(np.float32)
ppi_max = (sp.coo_matrix.tolil(
sp.coo_matrix.max(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0))
).reshape((n, n)).astype(np.float32)
print('ppi_min', type(ppi_min), ppi_min.dtype, ppi_min.shape)
print('ppi_max', type(ppi_max), ppi_max.dtype, ppi_max.shape)
return ppi_min, ppi_max
elif out_min:
ppi_min = (sp.coo_matrix.tolil(
sp.coo_matrix.min(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0,
dtype=np.float32))).reshape((n, n))
return ppi_min
elif out_max:
ppi_max = (sp.coo_matrix.tolil(
sp.coo_matrix.max(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0,
dtype=np.float32))).reshape((n, n))
return ppi_max
else:
print('You have to choice Min or Max') # TODO change error message
# @profile
def calcul_final_influence(M, adj, result_folder, influence_weight='min',
simplification=True, compute=False, overwrite=False,
alpha=0.7, tol=10e-6):
"""Compute network influence score
Network propagation iterative process is applied on PPI. (1) The network
influence distance matrix and (2) influence matrices based on minimum /
maximum weight are saved as MATLAB-style files (.mat).
- (1) : 'influence_distance_alpha={}_tol={}.mat'
in 'influence_distance' directory
- (2) : 'ppi_influence_alpha={}_tol={}.mat'
in 'ppi_influence' directory
Where {} are parameter values. The directories will be automatically
created if not exist.
If compute=False, the latest data of directory will be taken into
account:
- latest data with same parameters (alpha and tol)
- if not exist, latest data of directory but with differents parameters
Parameters
----------
M : sparse matrix
Data matrix to be diffused.
adj : sparse matrix
Adjacency matrice.
result_folder : str
Path to create a new directory for save new files. If you want to creat
in current directory, enter '/directory_name'. Absolute path is also
supported.
influence_weight :
simplification : boolean, default: True
compute : boolean, default: False
If True, new network influence score will be computed.
If False, the latest network influence score will be taken into
account.
overwrite : boolean, default: False
If True, new network influence score will be computed even if the file
which same parameters already exists in the directory.
alpha : float, default: 0.7
Diffusion (propagation) factor with 0 <= alpha <= 1.
For alpha = 0 : no diffusion.
For alpha = 1 :
tol : float, default: 10e-6
Convergence threshold.
Returns
-------
final_influence : sparse matrix
Smoothed PPI influence matrices based on minimum / maximum weight.
"""
influence_distance_directory = result_folder + 'influence_distance/'
influence_distance_file = (
influence_distance_directory +
'influence_distance_alpha={}_tol={}.mat'.format(alpha, tol))
#######
final_influence_directory = result_folder + 'final_influence/'
final_influence_file = (
final_influence_directory +
'final_influence_simp={}_alpha={}_tol={}.mat'.format(
simplification, alpha, tol))
#######
existance_same_param = os.path.exists(final_influence_file)
# TODO overwrite condition
# check if same parameters file exists in directory
if existance_same_param:
final_influence_data = loadmat(final_influence_file)
if influence_weight == 'min':
final_influence = final_influence_data['final_influence_min']
else:
final_influence = final_influence_data['final_influence_max']
print('final influence matrix', type(final_influence), final_influence.shape)
print('***** Same parameters file of FINAL INFLUENCE already exists ***** {}'
.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
else:
if compute:
start = time.time()
# check if influence distance file exists
existance_same_influence = os.path.exists(influence_distance_file)
if existance_same_influence:
influence_data = loadmat(influence_distance_file)
influence = influence_data['influence_distance']
print('***** Same parameters file of INFLUENCE DISTANCE already exists ***** {}'
.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
else:
influence = propagation(M, adj, alpha, tol)
print('influence', type(influence), influence.dtype)
# save influence distance before simplification with parameters' values in filename
os.makedirs(influence_distance_directory, exist_ok=True) # NOTE For Python ≥ 3.2
print(' ==== Start to save INFLUENCE DISTANCE ==== {}'
.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
start_save = time.time()
savemat(influence_distance_file,
{'influence_distance': influence,
'alpha': alpha},
do_compression=True)
end_save = time.time()
print("---------- save time = {} ---------- {}"
.format(datetime.timedelta(seconds=end_save - start_save),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
# simplification: multiply by PPI adjacency matrix
if simplification:
influence = influence.multiply(sp.lil_matrix(adj))
# -> influence as csr_matrix
else:
print("---------- No simplification ----------")
pass
# compare influence[i,j] and influence[j,i] => min/max => final influence
start_ij = time.time()
final_influence_min, final_influence_max = compare_ij_ji(
influence, out_min=True, out_max=True)
end_ij = time.time()
print("---------- compare ij/ji = {} ---------- {}"
.format(datetime.timedelta(seconds=end_ij - start_ij),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
# save final influence with parameters' values in filename
os.makedirs(final_influence_directory, exist_ok=True)
print(' ==== Start to save FINAL INFLUENCE ==== {}'
.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
start_save = time.time()
savemat(final_influence_file,
{'final_influence_min': final_influence_min,
'final_influence_max': final_influence_max,
'alpha': alpha}, do_compression=True)
end_save = time.time()
print("---------- save time = {} ---------- {}"
.format(datetime.timedelta(seconds=end_save - start_save),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
if influence_weight == 'min':
final_influence = final_influence_min
else:
final_influence = final_influence_max
end = time.time()
print("---------- Influence = {} ---------- {}"
.format(datetime.timedelta(seconds=end-start),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
# take most recent file
else:
for x in final_influence_file, influence_distance_directory:
print(x)
newest_file = max(glob.iglob(x + '*.mat'),
key=os.path.getctime)
final_influence_data = loadmat(newest_file)
if x == final_influence_directory:
if influence_weight == 'min':
final_influence = final_influence_data['final_influence_min']
else:
final_influence = final_influence_data['final_influence_max']
return final_influence
# @profile
def best_neighboors(ppi_filt, final_influence, ngh_max):
"""Helper function for filter_ppi_patients
Keeps only the connections with the best influencers.
Parameters
----------
ppi_filt : sparse matrix
Filtration from ppi_total : only genes in PPI are considered.
final_influence :
Smoothed PPI influence matrices based on minimum or maximum weight.
ngh_max : int
Number of best influencers in PPI.
Returns
-------
ppi_ngh : sparse matrix
PPI with only best influencers.
"""
ngh_max = ngh_max + 1 # central protein included
final_influence = final_influence.todense()
print(type(final_influence))
ppi_filt = ppi_filt.todense()
ppi_ngh = np.zeros(ppi_filt.shape, dtype=np.float32)
print('ppi_ngh', ppi_ngh.shape)
for i in range(ppi_filt.shape[0]):
best_influencers = np.argpartition(-final_influence[i, :], ngh_max)[:ngh_max]
#NOTE different result if same value exists several times
# best_influencers2 = np.argpartition(final_influence[i, :], -ngh_max)[-ngh_max:]
ppi_ngh[i, best_influencers] = ppi_filt[i, best_influencers]
ppi_ngh = np.max(np.dstack((ppi_ngh, ppi_ngh.T)), axis=2)
print('ppi_ngh ', ppi_ngh.dtype)
# too stringent if np.min
return sp.csc_matrix(ppi_ngh)
# @profile
def filter_ppi_patients(ppi_total, mut_total, ppi_filt, final_influence, ngh_max,
keep_singletons=False,
min_mutation=10, max_mutation=2000):
"""Keeping only the connections with the best influencers and Filtering some
patients based on mutation number
'the 11 most influential neighbors of each gene in the network as
determined by network influence distance were used'
'Only mutation data generated using the Illumina GAIIx platform were
retained for subsequent analy- sis, and patients with fewer than 10
mutations were discarded.'
Parameters
----------
ppi_total : sparse matrix
Built from all sparse sub-matrices (AA, ... , CC).
mut_total : sparse matrix
Patients' mutation profiles of all genes (rows: patients,
columns: genes of AA, BB and CC).
ppi_filt : sparse matrix
Filtration from ppi_total : only genes in PPI are considered.
final_influence :
Smoothed PPI influence matrices based on minimum or maximum weight.
ngh_max : int
Number of best influencers in PPI.
keep_singletons : boolean, default: False
If True, proteins not annotated in PPI (genes founded only in patients'
mutation profiles) will be also considered.
If False, only annotated proteins in PPI will be considered.
min_mutation, max_mutation : int
Numbers of lowest mutations and highest mutations per patient.
Returns
-------
ppi_final, mut_final : sparse matrix
PPI and mutation profiles after filtering.
"""
# n = final_influence.shape[0]
# final_influence = index_to_sym_matrix(n, final_influence)
ppi_ngh = best_neighboors(ppi_filt, final_influence, ngh_max)
print('ppi_ngh ', ppi_ngh.dtype)
deg0 = Ppi(ppi_total).deg == 0 # True if protein degree = 0
if keep_singletons:
ppi_final = sp.bmat([
[ppi_ngh, sp.csc_matrix((ppi_ngh.shape[0], sum(deg0)))],
[sp.csc_matrix((sum(deg0), ppi_ngh.shape[0])),
sp.csc_matrix((sum(deg0), sum(deg0)))]
]) # -> COO matrix
# mut_final=sp.bmat([[mut_total[:,deg0==False],mut_total[:,deg0==True]]])
mut_final = mut_total
else:
ppi_final = ppi_ngh
mut_final = mut_total[:, Ppi(ppi_total).deg > 0]
# filtered_patients = np.array([k < min_mutation or k > max_mutation for k in Patient(mut_final).mut_per_patient])
# mut_final = mut_final[filtered_patients == False, :]
# to avoid worse comparison '== False'
mut_final = mut_final[np.array([min_mutation < k < max_mutation for k in
Patient(mut_final).mut_per_patient])]
print("Removing %i patients with less than %i or more than %i mutations" %
(mut_total.shape[0]-mut_final.shape[0], min_mutation, max_mutation))
print("New adjacency matrix:", ppi_final.shape)
print("New mutation profile matrix:", mut_final.shape)
return ppi_final, mut_final
# @profile
def quantile_norm_mean(anarray):
"""Helper function for propagation_profile
Forces the observations/variables to have identical intensity distribution.
Parameters
----------
Returns
-------
"""
A = np.squeeze(np.asarray(anarray.T))
AA = np.zeros_like(A)
I = np.argsort(A, axis=0)
AA[I, np.arange(A.shape[1])] = np.mean(A[I, np.arange(A.shape[1])],
axis=1)[:, np.newaxis]
return AA.T
# @profile
def quantile_norm_median(anarray):
A |
# @profile
def propagation_profile(mut_raw, adj, alpha, tol, qn):
# TODO error messages
start = time.time()
if alpha > 0:
# TODO verification of same parameter file
mut_propag = propagation(mut_raw, adj, alpha, tol).todense()
mut_propag[np.isnan(mut_propag)] = 0
if qn == 'mean':
mut_type = 'mean_qn'
mut_propag = quantile_norm_mean(mut_propag)
elif qn == 'median':
mut_type = 'median_qn'
mut_propag = quantile_norm_median(mut_propag)
else:
mut_type = 'diff'
end = time.time()
print("---------- Propagation on {} mutation profile = {} ---------- {}"
.format(mut_type,
datetime.timedelta(seconds=end-start),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return mut_type, mut_propag
else:
mut_type = 'raw'
mut_raw = mut_raw.todense()
end = time.time()
print("---------- Propagation on {} mutation profile = {} ---------- {}"
.format(mut_type,
datetime.timedelta(seconds=end-start),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return mut_type, mut_raw
| = np.squeeze(np.asarray(anarray.T))
AA = np.zeros_like(A)
I = np.argsort(A, axis=0)
AA[I, np.arange(A.shape[1])] = np.median(A[I, np.arange(A.shape[1])],
axis=1)[:, np.newaxis]
return AA.T
|
__init__.in.py | # ==============================================================================
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import os
import sys
import time
import getpass
from platform import system
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import ops
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import load_library
# This will turn off V1 API related warnings
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import ctypes
__all__ = [
'enable', 'disable', 'is_enabled', 'list_backends',
'set_backend', 'get_backend',
'start_logging_placement', 'stop_logging_placement',
'is_logging_placement', '__version__', 'cxx11_abi_flag'
'is_grappler_enabled', 'update_config',
'set_disabled_ops', 'get_disabled_ops',
]
ext = 'dylib' if system() == 'Darwin' else 'so'
TF_VERSION = tf.version.VERSION
TF_GIT_VERSION = tf.version.GIT_VERSION
TF_VERSION_NEEDED = "${TensorFlow_VERSION}"
TF_GIT_VERSION_BUILT_WITH = "${TensorFlow_GIT_VERSION}"
# converting version representations to strings if not already
try:
TF_VERSION = str(TF_VERSION, 'ascii')
except TypeError: # will happen for python 2 or if already string
pass
try:
TF_VERSION_NEEDED = str(TF_VERSION_NEEDED, 'ascii')
except TypeError:
pass
try:
if TF_GIT_VERSION.startswith("b'"): # TF version can be a bytes __repr__()
TF_GIT_VERSION = eval(TF_GIT_VERSION)
TF_GIT_VERSION = str(TF_GIT_VERSION, 'ascii')
except TypeError:
pass
try:
if TF_GIT_VERSION_BUILT_WITH.startswith("b'"):
TF_GIT_VERSION_BUILT_WITH = eval(TF_GIT_VERSION_BUILT_WITH)
TF_GIT_VERSION_BUILT_WITH = str(TF_GIT_VERSION_BUILT_WITH, 'ascii')
except TypeError:
pass
# print("TensorFlow version installed: {0} ({1})".format(TF_VERSION,
# TF_GIT_VERSION))
# print("nGraph bridge built with: {0} ({1})".format(TF_VERSION_NEEDED,
# TF_GIT_VERSION_BUILT_WITH))
# We need to revisit this later. We can automate that using cmake configure
# command.
TF_INSTALLED_VER = TF_VERSION.split('.')
TF_NEEDED_VER = TF_VERSION_NEEDED.split('.')
ngraph_classic_loaded = True
ngraph_bridge_lib = None
if (TF_INSTALLED_VER[0] == TF_NEEDED_VER[0]) and \
(TF_INSTALLED_VER[1] == TF_NEEDED_VER[1]) and \
((TF_INSTALLED_VER[2].split('-'))[0] == (TF_NEEDED_VER[2].split('-'))[0]):
libpath = os.path.dirname(__file__)
full_lib_path = os.path.join(libpath, 'libngraph_bridge.' + ext)
_ = load_library.load_op_library(full_lib_path)
ngraph_bridge_lib = ctypes.cdll.LoadLibrary(full_lib_path)
else:
raise ValueError(
"Error: Installed TensorFlow version {0}\nnGraph bridge built with: {1}"
.format(TF_VERSION, TF_VERSION_NEEDED))
def requested():
return ops.get_default_graph()._attr_scope({
"_ngraph_requested":
attr_value_pb2.AttrValue(b=True)
})
if ngraph_classic_loaded:
ngraph_bridge_lib.is_enabled.restype = ctypes.c_bool
ngraph_bridge_lib.list_backends.argtypes = [ctypes.POINTER(ctypes.c_char_p)]
ngraph_bridge_lib.list_backends.restype = ctypes.c_bool
ngraph_bridge_lib.set_backend.argtypes = [ctypes.c_char_p]
ngraph_bridge_lib.set_backend.restype = ctypes.c_bool
ngraph_bridge_lib.get_backend.argtypes = [ctypes.POINTER(ctypes.c_char_p)]
ngraph_bridge_lib.get_backend.restype = ctypes.c_bool
ngraph_bridge_lib.is_logging_placement.restype = ctypes.c_bool
ngraph_bridge_lib.tf_version.restype = ctypes.c_char_p
ngraph_bridge_lib.ngraph_version.restype = ctypes.c_char_p
ngraph_bridge_lib.cxx11_abi_flag.restype = ctypes.c_int
ngraph_bridge_lib.is_grappler_enabled.restype = ctypes.c_bool
ngraph_bridge_lib.set_disabled_ops.argtypes = [ctypes.c_char_p]
ngraph_bridge_lib.get_disabled_ops.restype = ctypes.c_char_p
def enable():
ngraph_bridge_lib.enable()
def disable():
ngraph_bridge_lib.disable()
def is_enabled():
return ngraph_bridge_lib.is_enabled()
def list_backends():
len_backends = ngraph_bridge_lib.backends_len()
result = (ctypes.c_char_p * len_backends)()
if not ngraph_bridge_lib.list_backends(result):
raise Exception("Expected " + str(len_backends) +
" backends, but got some other number of backends")
list_result = list(result)
# convert bytes to string required for py3 (encode/decode bytes)
backend_list = []
for backend in list_result:
backend_list.append(backend.decode("utf-8"))
return backend_list
def set_backend(backend):
if not ngraph_bridge_lib.set_backend(backend.encode("utf-8")):
raise Exception("Backend " + backend + " unavailable.")
def get_backend():
result = ctypes.c_char_p()
if not ngraph_bridge_lib.get_backend(ctypes.byref(result)):
raise Exception("Cannot get currently set backend")
return result.value.decode("utf-8")
def start_logging_placement():
ngraph_bridge_lib.start_logging_placement()
def stop_logging_placement():
ngraph_bridge_lib.stop_logging_placement()
def is_logging_placement():
return ngraph_bridge_lib.is_logging_placement()
def cxx11_abi_flag():
return ngraph_bridge_lib.cxx11_abi_flag()
def is_grappler_enabled():
return ngraph_bridge_lib.is_grappler_enabled()
def update_config(config, backend_name = "CPU", device_id = ""):
#updating session config if grappler is enabled
if(ngraph_bridge_lib.is_grappler_enabled()):
opt_name = 'ngraph-optimizer'
# If the config already has ngraph-optimizer, then do not update it
if config.HasField('graph_options'):
if config.graph_options.HasField('rewrite_options'):
custom_opts = config.graph_options.rewrite_options.custom_optimizers
for i in range(len(custom_opts)):
if custom_opts[i].name == opt_name:
return config
rewriter_options = rewriter_config_pb2.RewriterConfig()
rewriter_options.meta_optimizer_iterations=(rewriter_config_pb2.RewriterConfig.ONE)
rewriter_options.min_graph_nodes=-1
ngraph_optimizer = rewriter_options.custom_optimizers.add()
ngraph_optimizer.name = opt_name
ngraph_optimizer.parameter_map["device_id"].s = device_id.encode()
config.MergeFrom(tf.compat.v1.ConfigProto(graph_options=tf.compat.v1.GraphOptions(rewrite_options=rewriter_options)))
# For reference, if we want to provide configuration support(backend parameters)
# in a python script using the ngraph-optimizer
# rewriter_options = rewriter_config_pb2.RewriterConfig()
# rewriter_options.meta_optimizer_iterations=(rewriter_config_pb2.RewriterConfig.ONE)
# rewriter_options.min_graph_nodes=-1
# ngraph_optimizer = rewriter_options.custom_optimizers.add()
# ngraph_optimizer.name = "ngraph-optimizer"
# ngraph_optimizer.parameter_map["device_id"].s = device_id.encode()
# ngraph_optimizer.parameter_map["max_batch_size"].s = b'64'
# ngraph_optimizer.parameter_map["ice_cores"].s = b'12'
# config.MergeFrom(tf.compat.v1.ConfigProto(graph_options=tf.compat.v1.GraphOptions(rewrite_options=rewriter_options)))
return config
def set_disabled_ops(unsupported_ops):
ngraph_bridge_lib.set_disabled_ops(unsupported_ops.encode("utf-8")) |
__version__ = \
"nGraph bridge version: " + str(ngraph_bridge_lib.version()) + "\n" + \
"nGraph version used for this build: " + str(ngraph_bridge_lib.ngraph_version()) + "\n" + \
"TensorFlow version used for this build: " + TF_GIT_VERSION_BUILT_WITH + "\n" \
"CXX11_ABI flag used for this build: " + str(ngraph_bridge_lib.cxx11_abi_flag()) + "\n" \
"nGraph bridge built with Grappler: " + str(ngraph_bridge_lib.is_grappler_enabled()) + "\n" \ |
def get_disabled_ops():
return ngraph_bridge_lib.get_disabled_ops() |
model_topology_label_values_list.go | /*
Sumo Logic API
# Getting Started Welcome to the Sumo Logic API reference. You can use these APIs to interact with the Sumo Logic platform. For information on the collector and search APIs see our [API home page](https://help.sumologic.com/APIs). ## API Endpoints Sumo Logic has several deployments in different geographic locations. You'll need to use the Sumo Logic API endpoint corresponding to your geographic location. See the table below for the different API endpoints by deployment. For details determining your account's deployment see [API endpoints](https://help.sumologic.com/?cid=3011). <table> <tr> <td> <strong>Deployment</strong> </td> <td> <strong>Endpoint</strong> </td> </tr> <tr> <td> AU </td> <td> https://api.au.sumologic.com/api/ </td> </tr> <tr> <td> CA </td> <td> https://api.ca.sumologic.com/api/ </td> </tr> <tr> <td> DE </td> <td> https://api.de.sumologic.com/api/ </td> </tr> <tr> <td> EU </td> <td> https://api.eu.sumologic.com/api/ </td> </tr> <tr> <td> FED </td> <td> https://api.fed.sumologic.com/api/ </td> </tr> <tr> <td> IN </td> <td> https://api.in.sumologic.com/api/ </td> </tr> <tr> <td> JP </td> <td> https://api.jp.sumologic.com/api/ </td> </tr> <tr> <td> US1 </td> <td> https://api.sumologic.com/api/ </td> </tr> <tr> <td> US2 </td> <td> https://api.us2.sumologic.com/api/ </td> </tr> </table> ## Authentication Sumo Logic supports the following options for API authentication: - Access ID and Access Key - Base64 encoded Access ID and Access Key See [Access Keys](https://help.sumologic.com/Manage/Security/Access-Keys) to generate an Access Key. Make sure to copy the key you create, because it is displayed only once. When you have an Access ID and Access Key you can execute requests such as the following: ```bash curl -u \"<accessId>:<accessKey>\" -X GET https://api.<deployment>.sumologic.com/api/v1/users ``` Where `deployment` is either `au`, `ca`, `de`, `eu`, `fed`, `in`, `jp`, `us1`, or `us2`. See [API endpoints](#section/API-Endpoints) for details. If you prefer to use basic access authentication, you can do a Base64 encoding of your `<accessId>:<accessKey>` to authenticate your HTTPS request. The following is an example request, replace the placeholder `<encoded>` with your encoded Access ID and Access Key string: ```bash curl -H \"Authorization: Basic <encoded>\" -X GET https://api.<deployment>.sumologic.com/api/v1/users ``` Refer to [API Authentication](https://help.sumologic.com/?cid=3012) for a Base64 example. ## Status Codes Generic status codes that apply to all our APIs. See the [HTTP status code registry](https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml) for reference. <table> <tr> <td> <strong>HTTP Status Code</strong> </td> <td> <strong>Error Code</strong> </td> <td> <strong>Description</strong> </td> </tr> <tr> <td> 301 </td> <td> moved </td> <td> The requested resource SHOULD be accessed through returned URI in Location Header. See [troubleshooting](https://help.sumologic.com/APIs/Troubleshooting-APIs/API-301-Error-Moved) for details.</td> </tr> <tr> <td> 401 </td> <td> unauthorized </td> <td> Credential could not be verified.</td> </tr> <tr> <td> 403 </td> <td> forbidden </td> <td> This operation is not allowed for your account type or the user doesn't have the role capability to perform this action. See [troubleshooting](https://help.sumologic.com/APIs/Troubleshooting-APIs/API-403-Error-This-operation-is-not-allowed-for-your-account-type) for details.</td> </tr> <tr> <td> 404 </td> <td> notfound </td> <td> Requested resource could not be found. </td> </tr> <tr> <td> 405 </td> <td> method.unsupported </td> <td> Unsupported method for URL. </td> </tr> <tr> <td> 415 </td> <td> contenttype.invalid </td> <td> Invalid content type. </td> </tr> <tr> <td> 429 </td> <td> rate.limit.exceeded </td> <td> The API request rate is higher than 4 request per second or inflight API requests are higher than 10 request per second. </td> </tr> <tr> <td> 500 </td> <td> internal.error </td> <td> Internal server error. </td> </tr> <tr> <td> 503 </td> <td> service.unavailable </td> <td> Service is currently unavailable. </td> </tr> </table> ## Filtering Some API endpoints support filtering results on a specified set of fields. Each endpoint that supports filtering will list the fields that can be filtered. Multiple fields can be combined by using an ampersand `&` character. For example, to get 20 users whose `firstName` is `John` and `lastName` is `Doe`: ```bash api.sumologic.com/v1/users?limit=20&firstName=John&lastName=Doe ``` ## Sorting Some API endpoints support sorting fields by using the `sortBy` query parameter. The default sort order is ascending. Prefix the field with a minus sign `-` to sort in descending order. For example, to get 20 users sorted by their `email` in descending order: ```bash api.sumologic.com/v1/users?limit=20&sort=-email ``` ## Asynchronous Request Asynchronous requests do not wait for results, instead they immediately respond back with a job identifier while the job runs in the background. You can use the job identifier to track the status of the asynchronous job request. Here is a typical flow for an asynchronous request. 1. Start an asynchronous job. On success, a job identifier is returned. The job identifier uniquely identifies your asynchronous job. 2. Once started, use the job identifier from step 1 to track the status of your asynchronous job. An asynchronous request will typically provide an endpoint to poll for the status of asynchronous job. A successful response from the status endpoint will have the following structure: ```json { \"status\": \"Status of asynchronous request\", \"statusMessage\": \"Optional message with additional information in case request succeeds\", \"error\": \"Error object in case request fails\" } ``` The `status` field can have one of the following values: 1. `Success`: The job succeeded. The `statusMessage` field might have additional information. 2. `InProgress`: The job is still running. 3. `Failed`: The job failed. The `error` field in the response will have more information about the failure. 3. Some asynchronous APIs may provide a third endpoint (like [export result](#operation/getAsyncExportResult)) to fetch the result of an asynchronous job. ### Example Let's say we want to export a folder with the identifier `0000000006A2E86F`. We will use the [async export](#operation/beginAsyncExport) API to export all the content under the folder with `id=0000000006A2E86F`. 1. Start an export job for the folder ```bash curl -X POST -u \"<accessId>:<accessKey>\" https://api.<deployment>.sumologic.com/api/v2/content/0000000006A2E86F/export ``` See [authentication section](#section/Authentication) for more details about `accessId`, `accessKey`, and `deployment`. On success, you will get back a job identifier. In the response below, `C03E086C137F38B4` is the job identifier. ```bash { \"id\": \"C03E086C137F38B4\" } ``` 2. Now poll for the status of the asynchronous job with the [status](#operation/getAsyncExportStatus) endpoint. ```bash curl -X GET -u \"<accessId>:<accessKey>\" https://api.<deployment>.sumologic.com/api/v2/content/0000000006A2E86F/export/C03E086C137F38B4/status ``` You may get a response like ```json { \"status\": \"InProgress\", \"statusMessage\": null, \"error\": null } ``` It implies the job is still in progress. Keep polling till the status is either `Success` or `Failed`. 3. When the asynchronous job completes (`status != \"InProgress\"`), you can fetch the results with the [export result](#operation/getAsyncExportResult) endpoint. ```bash curl -X GET -u \"<accessId>:<accessKey>\" https://api.<deployment>.sumologic.com/api/v2/content/0000000006A2E86F/export/C03E086C137F38B4/result ``` The asynchronous job may fail (`status == \"Failed\"`). You can look at the `error` field for more details. ```json { \"status\": \"Failed\", \"errors\": { \"code\": \"content1:too_many_items\", \"message\": \"Too many objects: object count(1100) was greater than limit 1000\" } } ``` ## Rate Limiting * A rate limit of four API requests per second (240 requests per minute) applies to all API calls from a user. * A rate limit of 10 concurrent requests to any API endpoint applies to an access key. If a rate is exceeded, a rate limit exceeded 429 status code is returned. ## Generating Clients You can use [OpenAPI Generator](https://openapi-generator.tech) to generate clients from the YAML file to access the API. ### Using [NPM](https://www.npmjs.com/get-npm) 1. Install [NPM package wrapper](https://github.com/openapitools/openapi-generator-cli) globally, exposing the CLI on the command line: ```bash npm install @openapitools/openapi-generator-cli -g ``` You can see detailed instructions [here](https://openapi-generator.tech/docs/installation#npm). 2. Download the [YAML file](/docs/sumologic-api.yaml) and save it locally. Let's say the file is saved as `sumologic-api.yaml`. 3. Use the following command to generate `python` client inside the `sumo/client/python` directory: ```bash openapi-generator generate -i sumologic-api.yaml -g python -o sumo/client/python ``` ### Using [Homebrew](https://brew.sh/) 1. Install OpenAPI Generator ```bash brew install openapi-generator ``` 2. Download the [YAML file](/docs/sumologic-api.yaml) and save it locally. Let's say the file is saved as `sumologic-api.yaml`. 3. Use the following command to generate `python` client side code inside the `sumo/client/python` directory: ```bash openapi-generator generate -i sumologic-api.yaml -g python -o sumo/client/python ```
API version: 1.0.0
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package openapi
import (
"encoding/json"
)
// TopologyLabelValuesList List of values corresponding to a key of a label.
type TopologyLabelValuesList struct {
Items []string
}
// NewTopologyLabelValuesList instantiates a new TopologyLabelValuesList object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewTopologyLabelValuesList() *TopologyLabelValuesList {
this := TopologyLabelValuesList{}
return &this
}
// NewTopologyLabelValuesListWithDefaults instantiates a new TopologyLabelValuesList object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewTopologyLabelValuesListWithDefaults() *TopologyLabelValuesList {
this := TopologyLabelValuesList{}
return &this
}
func (o TopologyLabelValuesList) MarshalJSON() ([]byte, error) {
toSerialize := make([]interface{}, len(o.Items))
for i, item := range o.Items {
toSerialize[i] = item
}
return json.Marshal(toSerialize)
}
func (o *TopologyLabelValuesList) UnmarshalJSON(bytes []byte) (err error) {
return json.Unmarshal(bytes, &o.Items)
}
type NullableTopologyLabelValuesList struct {
value *TopologyLabelValuesList
isSet bool
}
func (v NullableTopologyLabelValuesList) Get() *TopologyLabelValuesList {
return v.value
}
func (v *NullableTopologyLabelValuesList) Set(val *TopologyLabelValuesList) {
v.value = val
v.isSet = true
}
func (v NullableTopologyLabelValuesList) IsSet() bool {
return v.isSet
}
func (v *NullableTopologyLabelValuesList) Unset() {
v.value = nil
v.isSet = false
} | func NewNullableTopologyLabelValuesList(val *TopologyLabelValuesList) *NullableTopologyLabelValuesList {
return &NullableTopologyLabelValuesList{value: val, isSet: true}
}
func (v NullableTopologyLabelValuesList) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableTopologyLabelValuesList) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | |
inventory.py | #!/usr/bin/env python
'''
Example custom dynamic inventory script for Ansible, in Python.
FOR pyhhon 3.8.10 it's working
used:
https://www.jeffgeerling.com/blog/creating-custom-dynamic-inventories-ansible
'''
import os
import sys
import argparse
try:
import json
except ImportError:
import simplejson as json
class ExampleInventory(object):
def __init__(self):
self.inventory = {}
self.read_cli_args()
# Called with `--list`.
if self.args.list:
self.inventory = self.example_inventory()
# Called with `--host [hostname]`.
elif self.args.host:
# Not implemented, since we return _meta info `--list`.
self.inventory = self.empty_inventory()
# If no groups or vars are present, return an empty inventory.
else:
self.inventory = self.empty_inventory()
print (json.dumps(self.inventory))
# Example inventory for testing.
def example_inventory(self):
return {
"group": {
"hosts": [
"51.250.4.212",
"62.84.113.197"
],
"vars": {
"ansible_ssh_user": "ubuntu",
"ansible_ssh_private_key_file": "~/.ssh/id_rsa",
"example_variable": "value"
}
},
"_meta": {
"hostvars": {
"51.250.4.212": {
"reddit": "db"
},
"62.84.113.197": {
"reddit": "app"
}
}
}
}
# Empty inventory for testing.
def empty_inventory(self):
return {'_meta': {'hostvars': {}}}
# Read the command line args passed to the script.
def read_cli_args(self):
|
# Get the inventory.
ExampleInventory()
| parser = argparse.ArgumentParser()
parser.add_argument('--list', action = 'store_true')
parser.add_argument('--host', action = 'store')
self.args = parser.parse_args() |
ArchivesDestory.js | import React, { PureComponent, Fragment } from 'react';
import { connect } from 'dva';
import router from 'umi/router';
import {
Row,
Col,
Card,
Form,
Input,
Button,
Select,
Table, message, Modal, DatePicker,
} from 'antd';
import PageHeaderWrapper from '@/components/PageHeaderWrapper';
import moment from 'moment';
import styles from '../table.less';
const FormItem = Form.Item;
const { Option } = Select;
const CreateForm = Form.create()(props => {
const { modalVisible, form, handleAdd, handleModalVisible,modalInfo } = props;
const okHandle = () => {
form.validateFields((err, fieldsValue) => {
if (err) return;
form.resetFields();
handleAdd(fieldsValue,modalInfo);
});
};
return (
<Modal
destroyOnClose
title="编辑归档"
visible={modalVisible}
onOk={okHandle}
onCancel={() => handleModalVisible()}
>
<Form.Item labelCol={{ span: 5 }} wrapperCol={{ span: 15}} label="退档时间">
{form.getFieldDecorator('archivesdate', {
initialValue: moment(new Date(), "YYYY-MM-DD"), | placeholder="退档时间"
/>
)}
</Form.Item>
</Modal>
);
});
/* eslint react/no-multi-comp:0 */
@connect(({ archives, loading }) => ({
archives,
loading: loading.models.archives,
}))
@Form.create()
class ArchivesDestory extends PureComponent {
state = {
modalVisible: false,
modalInfo :{},
};
columns = [
{
title: '委托编号',
dataIndex: 'reportno',
},
{
title: '委托日期',
dataIndex: 'reportdate',
render: val => <span>{moment(val).format('YYYY-MM-DD')}</span>
},
{
title: '委托人',
dataIndex: 'applicant',
},
{
title: '船名标识',
dataIndex: 'shipname',
},
{
title: '检查品名',
dataIndex: 'cargoname',
},
{
title: '归档位置',
dataIndex: 'archiveplace',
},
{
title: '归档/退档日期',
dataIndex: 'archivesdate',
render: val => this.isValidDate(val),
},
{
title: '操作',
render: (text, record) => (
<Fragment>
<a onClick={() => this.modifyItem(text, record)}>退档</a>
<a onClick={() => this.previewItem(text, record)}>委托详情</a>
</Fragment>
),
},
];
componentDidMount() {
const user = JSON.parse(localStorage.getItem("userinfo"));
const { dispatch } = this.props;
const params = {
certCode:user.certCode
};
dispatch({
type: 'archives/getAllReports',
payload: params,
});
}
handleFormReset = () => {
const user = JSON.parse(localStorage.getItem("userinfo"));
const params = {
certCode:user.certCode
};
const { form } = this.props;
form.resetFields();
const { dispatch } = this.props;
dispatch({
type: 'archives/getAllReports',
payload: params,
});
}
handleSearch = e=> {
e.preventDefault();
const { dispatch, form } = this.props;
form.validateFields((err, fieldsValue) => {
if (err) return;
const user = JSON.parse(localStorage.getItem("userinfo"));
const values = {
...fieldsValue,
kind :fieldsValue.kind,
value: fieldsValue.value,
certCode:user.certCode,
};
dispatch({
type: 'archives/getAllReports',
payload: values,
});
});
}
isValidDate =date=> {
if(date !==undefined && date !==null ){
return <span>{moment(date).format('YYYY-MM-DD')}</span>;
}
return [];
}
previewItem = text => {
sessionStorage.setItem('reportno',text.reportno);
localStorage.setItem('reportDetailNo',text.reportno);
window.open("/Entrustment/DetailForEntrustment");
};
modifyItem = text => {
if(text.archiveplace !==undefined && text.archiveplace !==null && text.archiveplace !==""){
this.setState({
modalInfo:text,
});
this.handleModalVisible(true);
}else{
message.success("未归档,无需退档");
}
};
handleModalVisible = (flag) => {
this.setState({
modalVisible: !!flag,
});
};
handleAdd = (fields,modalInfo) => {
const { dispatch } = this.props;
let prams = modalInfo;
prams.archiveplace = "";
prams.archivesdate = fields.archivesdate;
const values = {
...prams,
};
dispatch({
type: 'archives/updateArchivesFetch',
payload:values,
callback: (response) => {
if(response)
message.success("保存成功");
}
});
this.setState({
modalVisible: false,
});
}
renderSimpleForm() {
const {
form: { getFieldDecorator },
} = this.props;
return (
<Form onSubmit={this.handleSearch} layout="inline">
<Row gutter={{ md: 8, lg: 24, xl: 48 }}>
<Col md={4} sm={20}>
<Form.Item
labelCol={{ span: 5 }}
wrapperCol={{ span: 6 }}
colon={false}
>
{getFieldDecorator('kind', {
initialValue:"shipname",
rules: [{ message: '搜索类型' }],
})(
<Select placeholder="搜索类型">
<Option value="reportno">委托编号</Option>
<Option value="applicant">委托人</Option>
<Option value="shipname">船名标识</Option>
<Option value="cargoname">检查品名</Option>
<Option value="archiveplace">归档位置</Option>
</Select>
)}
</Form.Item>
</Col>
<Col md={4} sm={20}>
<FormItem>
{getFieldDecorator('value',{rules: [{ message: '搜索数据' }],})(<Input placeholder="请输入" />)}
</FormItem>
</Col>
<Col md={8} sm={20}>
<span className={styles.submitButtons}>
<Button type="primary" htmlType="submit">
查询
</Button>
<Button style={{ marginLeft: 8 }} onClick={this.handleFormReset}>
重置
</Button>
</span>
</Col>
</Row>
</Form>
);
}
render() {
const {
archives: {report},
loading,
dispatch,
} = this.props;
const { modalVisible,modalInfo } = this.state;
const parentMethods = {
handleAdd: this.handleAdd,
handleModalVisible: this.handleModalVisible,
};
return (
<PageHeaderWrapper>
<Card bordered={false} size="small">
<div className={styles.tableList}>
<CreateForm {...parentMethods} modalVisible={modalVisible} modalInfo={modalInfo} dispatch={dispatch} />
<div className={styles.tableListForm}>{this.renderSimpleForm()}</div>
<Table
size="middle"
loading={loading}
dataSource={report.list}
columns={this.columns}
rowKey="reportno"
pagination={{showQuickJumper:true,showSizeChanger:true}}
/>
</div>
</Card>
</PageHeaderWrapper>
);
}
}
export default ArchivesDestory; | })(
<DatePicker
style={{ width: '100%' }}
format="YYYY-MM-DD" |
Opengauss_Function_System_View_Case0044.py | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 系统视图
Case Name : 测试系统视图PG_STAT_DATABASE字段与数据类型
Description :
1.查看系统视图PG_STAT_DATABASE的结构
2.该视图字段与对应字段数据类型是否正确
Expect :
1.查看系统视图PG_STAT_DATABASE的结构成功
2.该视图字段与字段数据类型对应正确
History :
"""
import unittest
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
LOG = Logger()
class SystemView(unittest.TestCase):
def setUp(self):
LOG.info('----------------this is setup-----------------------')
LOG.info( | '------Opengauss_Function_System_View_Case0044开始执行----------')
self.com = Common()
self.comsh = CommonSH('dbuser')
self.expect_result_dict = {
'Column': ['datid', 'datname', 'numbackends', 'xact_commit',
'xact_rollback', 'blks_read', 'blks_hit',
'tup_returned', 'tup_fetched', 'tup_inserted',
'tup_updated', 'tup_deleted', 'conflicts',
'temp_files', 'temp_bytes', 'deadlocks',
'blk_read_time', 'blk_write_time', 'stats_reset'],
'Type': ['oid', 'name', 'integer', 'bigint', 'bigint', 'bigint',
'bigint', 'bigint', 'bigint', 'bigint',
'bigint', 'bigint', 'bigint', 'bigint', 'bigint',
'bigint', 'double precision', 'double precision',
'timestamp with time zone']}
def test_index_file_damaged(self):
LOG.info(
'--------------------查看表结构--------------------------')
msg = self.comsh.execut_db_sql('\d PG_STAT_DATABASE')
LOG.info(msg)
result_dict = self.com.format_sql_result(msg)
LOG.info(result_dict)
del result_dict['Modifiers']
self.assertDictEqual(self.expect_result_dict, result_dict)
def tearDown(self):
LOG.info('----------------this is tearDown-----------------------')
# 无须清理环境
LOG.info(
'---Opengauss_Function_System_View_Case0044执行完成------------')
| |
mod.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
// #[PerformanceCriticalPath]
mod backward;
mod forward;
use engine_traits::{CfName, CF_DEFAULT, CF_LOCK, CF_WRITE};
use kvproto::kvrpcpb::{ExtraOp, IsolationLevel};
use txn_types::{
Key, Lock, LockType, OldValue, TimeStamp, TsSet, Value, Write, WriteRef, WriteType,
};
use self::backward::BackwardKvScanner;
use self::forward::{
DeltaEntryPolicy, ForwardKvScanner, ForwardScanner, LatestEntryPolicy, LatestKvPolicy,
};
use crate::storage::kv::{
CfStatistics, Cursor, CursorBuilder, Iterator, ScanMode, Snapshot, Statistics,
};
use crate::storage::mvcc::{default_not_found_error, NewerTsCheckState, Result};
use crate::storage::txn::{Result as TxnResult, Scanner as StoreScanner};
pub use self::forward::{test_util, DeltaScanner, EntryScanner};
pub struct ScannerBuilder<S: Snapshot>(ScannerConfig<S>);
impl<S: Snapshot> ScannerBuilder<S> {
/// Initialize a new `ScannerBuilder`
pub fn new(snapshot: S, ts: TimeStamp) -> Self {
Self(ScannerConfig::new(snapshot, ts))
}
/// Set whether or not read operations should fill the cache.
///
/// Defaults to `true`.
#[inline]
#[must_use]
pub fn fill_cache(mut self, fill_cache: bool) -> Self {
self.0.fill_cache = fill_cache;
self
}
/// Set whether values of the user key should be omitted. When `omit_value` is `true`, the
/// length of returned value will be 0.
///
/// Previously this option is called `key_only`.
///
/// Defaults to `false`.
#[inline]
#[must_use]
pub fn omit_value(mut self, omit_value: bool) -> Self {
self.0.omit_value = omit_value;
self
}
/// Set the isolation level.
///
/// Defaults to `IsolationLevel::Si`.
#[inline]
#[must_use]
pub fn isolation_level(mut self, isolation_level: IsolationLevel) -> Self {
self.0.isolation_level = isolation_level;
self
}
/// Set the desc.
///
/// Default is 'false'.
#[inline]
#[must_use]
pub fn desc(mut self, desc: bool) -> Self {
self.0.desc = desc;
self
}
/// Limit the range to `[lower_bound, upper_bound)` in which the `ForwardKvScanner` should scan.
/// `None` means unbounded.
///
/// Default is `(None, None)`.
#[inline]
#[must_use]
pub fn range(mut self, lower_bound: Option<Key>, upper_bound: Option<Key>) -> Self {
self.0.lower_bound = lower_bound;
self.0.upper_bound = upper_bound;
self
}
/// Set locks that the scanner can bypass. Locks with start_ts in the specified set will be
/// ignored during scanning.
///
/// Default is empty.
#[inline]
#[must_use]
pub fn bypass_locks(mut self, locks: TsSet) -> Self {
self.0.bypass_locks = locks;
self
}
/// Set locks that the scanner can read through. Locks with start_ts in the specified set will be
/// accessed during scanning.
///
/// Default is empty.
#[inline]
#[must_use]
pub fn access_locks(mut self, locks: TsSet) -> Self {
self.0.access_locks = locks;
self
}
/// Set the hint for the minimum commit ts we want to scan.
///
/// Default is empty.
///
/// NOTE: user should be careful to use it with `ExtraOp::ReadOldValue`.
#[inline]
#[must_use]
pub fn hint_min_ts(mut self, min_ts: Option<TimeStamp>) -> Self {
self.0.hint_min_ts = min_ts;
self
}
/// Set the hint for the maximum commit ts we want to scan.
///
/// Default is empty.
///
/// NOTE: user should be careful to use it with `ExtraOp::ReadOldValue`.
#[inline]
#[must_use]
pub fn hint_max_ts(mut self, max_ts: Option<TimeStamp>) -> Self {
self.0.hint_max_ts = max_ts;
self
}
/// Check whether there is data with newer ts. The result of `met_newer_ts_data` is Unknown
/// if this option is not set.
///
/// Default is false.
#[inline]
#[must_use]
pub fn check_has_newer_ts_data(mut self, enabled: bool) -> Self {
self.0.check_has_newer_ts_data = enabled;
self
}
/// Build `Scanner` from the current configuration.
pub fn build(mut self) -> Result<Scanner<S>> {
let lock_cursor = self.build_lock_cursor()?;
let write_cursor = self.0.create_cf_cursor(CF_WRITE)?;
if self.0.desc {
Ok(Scanner::Backward(BackwardKvScanner::new(
self.0,
lock_cursor,
write_cursor,
)))
} else {
Ok(Scanner::Forward(ForwardScanner::new(
self.0,
lock_cursor,
write_cursor,
None,
LatestKvPolicy,
)))
}
}
pub fn build_entry_scanner(
mut self,
after_ts: TimeStamp,
output_delete: bool,
) -> Result<EntryScanner<S>> {
let lock_cursor = self.build_lock_cursor()?;
let write_cursor = self.0.create_cf_cursor(CF_WRITE)?;
// Note: Create a default cf cursor will take key range, so we need to
// ensure the default cursor is created after lock and write.
let default_cursor = self.0.create_cf_cursor(CF_DEFAULT)?;
Ok(ForwardScanner::new(
self.0,
lock_cursor,
write_cursor,
Some(default_cursor),
LatestEntryPolicy::new(after_ts, output_delete),
))
}
pub fn build_delta_scanner(
mut self,
from_ts: TimeStamp,
extra_op: ExtraOp,
) -> Result<DeltaScanner<S>> {
let lock_cursor = self.build_lock_cursor()?;
let write_cursor = self.0.create_cf_cursor(CF_WRITE)?;
// Note: Create a default cf cursor will take key range, so we need to
// ensure the default cursor is created after lock and write.
let default_cursor = self
.0
.create_cf_cursor_with_scan_mode(CF_DEFAULT, ScanMode::Mixed)?;
Ok(ForwardScanner::new(
self.0,
lock_cursor,
write_cursor,
Some(default_cursor),
DeltaEntryPolicy::new(from_ts, extra_op),
))
}
fn build_lock_cursor(&mut self) -> Result<Option<Cursor<S::Iter>>> {
Ok(match self.0.isolation_level {
IsolationLevel::Si => Some(self.0.create_cf_cursor(CF_LOCK)?),
IsolationLevel::Rc => None,
})
}
}
pub enum Scanner<S: Snapshot> {
Forward(ForwardKvScanner<S>),
Backward(BackwardKvScanner<S>),
}
impl<S: Snapshot> StoreScanner for Scanner<S> {
fn next(&mut self) -> TxnResult<Option<(Key, Value)>> {
match self {
Scanner::Forward(scanner) => Ok(scanner.read_next()?),
Scanner::Backward(scanner) => Ok(scanner.read_next()?),
}
}
/// Take out and reset the statistics collected so far.
fn take_statistics(&mut self) -> Statistics {
match self {
Scanner::Forward(scanner) => scanner.take_statistics(),
Scanner::Backward(scanner) => scanner.take_statistics(),
}
}
/// Returns whether data with newer ts is found. The result is meaningful only when
/// `check_has_newer_ts_data` is set to true.
fn met_newer_ts_data(&self) -> NewerTsCheckState {
match self {
Scanner::Forward(scanner) => scanner.met_newer_ts_data(),
Scanner::Backward(scanner) => scanner.met_newer_ts_data(),
}
}
}
pub struct ScannerConfig<S: Snapshot> {
snapshot: S,
fill_cache: bool,
omit_value: bool,
isolation_level: IsolationLevel,
/// `lower_bound` and `upper_bound` is used to create `default_cursor`. `upper_bound`
/// is used in initial seek(or `lower_bound` in initial backward seek) as well. They will be consumed after `default_cursor` is being
/// created.
lower_bound: Option<Key>,
upper_bound: Option<Key>,
// hint for we will only scan data with commit ts >= hint_min_ts
hint_min_ts: Option<TimeStamp>,
// hint for we will only scan data with commit ts <= hint_max_ts
hint_max_ts: Option<TimeStamp>,
ts: TimeStamp,
desc: bool,
bypass_locks: TsSet,
access_locks: TsSet,
check_has_newer_ts_data: bool,
}
impl<S: Snapshot> ScannerConfig<S> {
fn new(snapshot: S, ts: TimeStamp) -> Self {
Self {
snapshot,
fill_cache: true,
omit_value: false,
isolation_level: IsolationLevel::Si,
lower_bound: None,
upper_bound: None,
hint_min_ts: None,
hint_max_ts: None,
ts,
desc: false,
bypass_locks: Default::default(),
access_locks: Default::default(),
check_has_newer_ts_data: false,
}
}
#[inline]
fn scan_mode(&self) -> ScanMode {
if self.desc {
ScanMode::Mixed
} else {
ScanMode::Forward
}
}
/// Create the cursor.
#[inline]
fn create_cf_cursor(&mut self, cf: CfName) -> Result<Cursor<S::Iter>> {
self.create_cf_cursor_with_scan_mode(cf, self.scan_mode())
}
/// Create the cursor with specified scan_mode, instead of inferring scan_mode from the config.
#[inline]
fn create_cf_cursor_with_scan_mode(
&mut self,
cf: CfName,
scan_mode: ScanMode,
) -> Result<Cursor<S::Iter>> {
let (lower, upper) = if cf == CF_DEFAULT {
(self.lower_bound.take(), self.upper_bound.take())
} else {
(self.lower_bound.clone(), self.upper_bound.clone())
};
// FIXME: Try to find out how to filter default CF SSTs by start ts
let (hint_min_ts, hint_max_ts) = if cf == CF_WRITE {
(self.hint_min_ts, self.hint_max_ts)
} else {
(None, None)
};
let cursor = CursorBuilder::new(&self.snapshot, cf)
.range(lower, upper)
.fill_cache(self.fill_cache)
.scan_mode(scan_mode)
.hint_min_ts(hint_min_ts)
.hint_max_ts(hint_max_ts)
.build()?;
Ok(cursor)
}
}
/// Reads user key's value in default CF according to the given write CF value
/// (`write`).
///
/// Internally, there will be a `near_seek` operation.
///
/// Notice that the value may be already carried in the `write` (short value). In this
/// case, you should not call this function.
///
/// # Panics
///
/// Panics if there is a short value carried in the given `write`.
///
/// Panics if key in default CF does not exist. This means there is a data corruption.
pub fn near_load_data_by_write<I>(
default_cursor: &mut Cursor<I>, // TODO: make it `ForwardCursor`.
user_key: &Key,
write_start_ts: TimeStamp,
statistics: &mut Statistics,
) -> Result<Value>
where
I: Iterator,
{
let seek_key = user_key.clone().append_ts(write_start_ts);
default_cursor.near_seek(&seek_key, &mut statistics.data)?;
if !default_cursor.valid()?
|| default_cursor.key(&mut statistics.data) != seek_key.as_encoded().as_slice()
{
return Err(default_not_found_error(
user_key.to_raw()?,
"near_load_data_by_write",
));
}
statistics.data.processed_keys += 1;
Ok(default_cursor.value(&mut statistics.data).to_vec())
}
/// Similar to `near_load_data_by_write`, but accepts a `BackwardCursor` and use
/// `near_seek_for_prev` internally.
fn near_reverse_load_data_by_write<I>(
default_cursor: &mut Cursor<I>, // TODO: make it `BackwardCursor`.
user_key: &Key,
write_start_ts: TimeStamp,
statistics: &mut Statistics,
) -> Result<Value>
where
I: Iterator,
{
let seek_key = user_key.clone().append_ts(write_start_ts);
default_cursor.near_seek_for_prev(&seek_key, &mut statistics.data)?;
if !default_cursor.valid()?
|| default_cursor.key(&mut statistics.data) != seek_key.as_encoded().as_slice()
{
return Err(default_not_found_error(
user_key.to_raw()?,
"near_reverse_load_data_by_write",
));
}
statistics.data.processed_keys += 1;
Ok(default_cursor.value(&mut statistics.data).to_vec())
}
pub fn has_data_in_range<S: Snapshot>(
snapshot: S,
cf: CfName,
left: &Key,
right: &Key,
statistic: &mut CfStatistics,
) -> Result<bool> {
let mut cursor = CursorBuilder::new(&snapshot, cf)
.range(None, Some(right.clone()))
.scan_mode(ScanMode::Forward)
.fill_cache(true)
.max_skippable_internal_keys(100)
.build()?;
match cursor.seek(left, statistic) {
Ok(valid) => {
if valid && cursor.key(statistic) < right.as_encoded().as_slice() {
return Ok(true);
}
}
Err(e)
if e.to_string()
.contains("Result incomplete: Too many internal keys skipped") =>
{
return Ok(true);
}
err @ Err(_) => {
err?;
}
}
Ok(false)
}
/// Seek for the next valid (write type == Put or Delete) write record.
/// The write cursor must indicate a data key of the user key of which ts <= after_ts.
/// Return None if cannot find any valid write record.
///
/// GC fence will be checked against the specified `gc_fence_limit`. If `gc_fence_limit` is greater
/// than the `commit_ts` of the current write record pointed by the cursor, The caller must
/// guarantee that there are no other versions in range `(current_commit_ts, gc_fence_limit]`. Note
/// that if a record is determined as invalid by checking GC fence, the `write_cursor`'s position
/// will be left remain on it.
pub fn seek_for_valid_write<I>(
write_cursor: &mut Cursor<I>,
user_key: &Key,
after_ts: TimeStamp,
gc_fence_limit: TimeStamp,
statistics: &mut Statistics,
) -> Result<Option<Write>>
where
I: Iterator,
{
let mut ret = None;
while write_cursor.valid()?
&& Key::is_user_key_eq(
write_cursor.key(&mut statistics.write),
user_key.as_encoded(),
)
{
let write_ref = WriteRef::parse(write_cursor.value(&mut statistics.write))?;
if !write_ref.check_gc_fence_as_latest_version(gc_fence_limit) {
break;
}
match write_ref.write_type {
WriteType::Put | WriteType::Delete => {
assert_ge!(
after_ts,
Key::decode_ts_from(write_cursor.key(&mut statistics.write))?
);
ret = Some(write_ref.to_owned());
break;
}
WriteType::Lock | WriteType::Rollback => {
// Move to the next write record.
write_cursor.next(&mut statistics.write);
}
}
}
Ok(ret)
}
/// Seek for the last written value.
/// The write cursor must indicate a data key of the user key of which ts <= after_ts.
/// Return None if cannot find any valid write record or found a delete record.
///
/// GC fence will be checked against the specified `gc_fence_limit`. If `gc_fence_limit` is greater
/// than the `commit_ts` of the current write record pointed by the cursor, The caller must
/// guarantee that there are no other versions in range `(current_commit_ts, gc_fence_limit]`. Note
/// that if a record is determined as invalid by checking GC fence, the `write_cursor`'s position
/// will be left remain on it.
///
/// `write_cursor` maybe created with an `TsFilter`, which can filter out some key-value pairs with
/// less `commit_ts` than `ts_filter`. So if the got value has a less timestamp than `ts_filter`, it
/// should be replaced by None because the real wanted value can have been filtered.
pub fn seek_for_valid_value<I>(
write_cursor: &mut Cursor<I>,
default_cursor: &mut Cursor<I>,
user_key: &Key,
after_ts: TimeStamp,
gc_fence_limit: TimeStamp,
ts_filter: Option<TimeStamp>,
statistics: &mut Statistics,
) -> Result<OldValue>
where
I: Iterator,
{
let seek_after = || {
let seek_after = user_key.clone().append_ts(after_ts);
OldValue::SeekWrite(seek_after)
};
if let Some(write) =
seek_for_valid_write(write_cursor, user_key, after_ts, gc_fence_limit, statistics)?
{
if write.write_type == WriteType::Put {
if let Some(ts_filter) = ts_filter {
let k = write_cursor.key(&mut statistics.write);
if Key::decode_ts_from(k).unwrap() < ts_filter {
return Ok(seek_after());
}
}
let value = if let Some(v) = write.short_value {
v
} else {
near_load_data_by_write(default_cursor, user_key, write.start_ts, statistics)?
};
return Ok(OldValue::Value { value });
}
Ok(OldValue::None)
} else if ts_filter.is_some() {
Ok(seek_after())
} else {
Ok(OldValue::None)
}
}
pub(crate) fn load_data_by_lock<S: Snapshot, I: Iterator>(
current_user_key: &Key,
cfg: &ScannerConfig<S>,
default_cursor: &mut Cursor<I>,
lock: Lock,
statistics: &mut Statistics,
) -> Result<Option<Value>> {
match lock.lock_type {
LockType::Put => {
if cfg.omit_value {
return Ok(Some(vec![]));
}
match lock.short_value {
Some(value) => {
// Value is carried in `lock`.
Ok(Some(value.to_vec()))
}
None => {
let value = if cfg.desc {
near_reverse_load_data_by_write(
default_cursor,
current_user_key,
lock.ts,
statistics,
)
} else {
near_load_data_by_write(
default_cursor,
current_user_key,
lock.ts,
statistics,
)
}?;
Ok(Some(value))
}
}
}
LockType::Delete => Ok(None),
LockType::Lock | LockType::Pessimistic => {
// Only when fails to call `Lock::check_ts_conflict()`, the function is called, so it's
// unreachable here.
unreachable!()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::kv::{
Engine, PerfStatisticsInstant, RocksEngine, TestEngineBuilder, SEEK_BOUND,
};
use crate::storage::mvcc::tests::*;
use crate::storage::mvcc::{Error as MvccError, ErrorInner as MvccErrorInner};
use crate::storage::txn::tests::*;
use crate::storage::txn::{
Error as TxnError, ErrorInner as TxnErrorInner, TxnEntry, TxnEntryScanner,
};
use engine_traits::MiscExt;
use txn_types::OldValue;
// Collect data from the scanner and assert it equals to `expected`, which is a collection of
// (raw_key, value).
// `None` value in `expected` means the key is locked.
fn check_scan_result<S: Snapshot>(
mut scanner: Scanner<S>,
expected: &[(Vec<u8>, Option<Vec<u8>>)],
) {
let mut scan_result = Vec::new();
loop {
match scanner.next() {
Ok(None) => break,
Ok(Some((key, value))) => scan_result.push((key.to_raw().unwrap(), Some(value))),
Err(TxnError(box TxnErrorInner::Mvcc(MvccError(
box MvccErrorInner::KeyIsLocked(mut info),
)))) => scan_result.push((info.take_key(), None)),
e => panic!("got error while scanning: {:?}", e),
}
}
assert_eq!(scan_result, expected);
}
fn test_scan_with_lock_and_write_impl(desc: bool) {
const SCAN_TS: TimeStamp = TimeStamp::new(10);
const PREV_TS: TimeStamp = TimeStamp::new(4);
const POST_TS: TimeStamp = TimeStamp::new(5);
let new_engine = || TestEngineBuilder::new().build().unwrap();
let add_write_at_ts = |commit_ts, engine, key, value| {
must_prewrite_put(engine, key, value, key, commit_ts);
must_commit(engine, key, commit_ts, commit_ts);
};
let add_lock_at_ts = |lock_ts, engine, key| {
must_prewrite_put(engine, key, b"lock", key, lock_ts);
must_locked(engine, key, lock_ts);
};
let test_scanner_result =
move |engine: &RocksEngine, expected_result: Vec<(Vec<u8>, Option<Vec<u8>>)>| {
let snapshot = engine.snapshot(Default::default()).unwrap();
let scanner = ScannerBuilder::new(snapshot, SCAN_TS)
.desc(desc)
.build()
.unwrap();
check_scan_result(scanner, &expected_result);
};
let desc_map = move |result: Vec<(Vec<u8>, Option<Vec<u8>>)>| {
if desc {
result.into_iter().rev().collect()
} else {
result
}
};
// Lock after write
let engine = new_engine();
add_write_at_ts(POST_TS, &engine, b"a", b"a_value");
add_lock_at_ts(PREV_TS, &engine, b"b");
let expected_result = desc_map(vec![
(b"a".to_vec(), Some(b"a_value".to_vec())),
(b"b".to_vec(), None),
]);
test_scanner_result(&engine, expected_result);
// Lock before write for same key
let engine = new_engine();
add_write_at_ts(PREV_TS, &engine, b"a", b"a_value");
add_lock_at_ts(POST_TS, &engine, b"a");
let expected_result = vec![(b"a".to_vec(), None)];
test_scanner_result(&engine, expected_result);
// Lock before write in different keys
let engine = new_engine();
add_lock_at_ts(POST_TS, &engine, b"a");
add_write_at_ts(PREV_TS, &engine, b"b", b"b_value");
let expected_result = desc_map(vec![
(b"a".to_vec(), None),
(b"b".to_vec(), Some(b"b_value".to_vec())),
]);
test_scanner_result(&engine, expected_result);
// Only a lock here
let engine = new_engine();
add_lock_at_ts(PREV_TS, &engine, b"a");
let expected_result = desc_map(vec![(b"a".to_vec(), None)]);
test_scanner_result(&engine, expected_result);
// Write Only
let engine = new_engine();
add_write_at_ts(PREV_TS, &engine, b"a", b"a_value");
let expected_result = desc_map(vec![(b"a".to_vec(), Some(b"a_value".to_vec()))]);
test_scanner_result(&engine, expected_result);
}
fn test_scan_with_lock_impl(desc: bool) {
let engine = TestEngineBuilder::new().build().unwrap();
for i in 0..5 {
must_prewrite_put(&engine, &[i], &[b'v', i], &[i], 1);
must_commit(&engine, &[i], 1, 2);
must_prewrite_put(&engine, &[i], &[b'v', i], &[i], 10);
must_commit(&engine, &[i], 10, 100);
}
must_acquire_pessimistic_lock(&engine, &[1], &[1], 20, 110);
must_acquire_pessimistic_lock(&engine, &[2], &[2], 50, 110);
must_acquire_pessimistic_lock(&engine, &[3], &[3], 105, 110);
must_prewrite_put(&engine, &[4], b"a", &[4], 105);
let snapshot = engine.snapshot(Default::default()).unwrap();
let mut expected_result = vec![
(vec![0], Some(vec![b'v', 0])),
(vec![1], Some(vec![b'v', 1])),
(vec![2], Some(vec![b'v', 2])),
(vec![3], Some(vec![b'v', 3])),
(vec![4], Some(vec![b'v', 4])),
];
if desc {
expected_result.reverse();
}
let scanner = ScannerBuilder::new(snapshot.clone(), 30.into())
.desc(desc)
.build()
.unwrap();
check_scan_result(scanner, &expected_result);
let scanner = ScannerBuilder::new(snapshot.clone(), 70.into())
.desc(desc)
.build()
.unwrap();
check_scan_result(scanner, &expected_result);
let scanner = ScannerBuilder::new(snapshot.clone(), 103.into())
.desc(desc)
.build()
.unwrap();
check_scan_result(scanner, &expected_result);
// The value of key 4 is locked at 105 so that it can't be read at 106
if desc {
expected_result[0].1 = None;
} else {
expected_result[4].1 = None;
}
let scanner = ScannerBuilder::new(snapshot, 106.into())
.desc(desc)
.build()
.unwrap();
check_scan_result(scanner, &expected_result);
}
#[test]
fn test_scan_with_lock_and_write() {
test_scan_with_lock_and_write_impl(true);
test_scan_with_lock_and_write_impl(false);
}
#[test]
fn test_scan_with_lock() {
test_scan_with_lock_impl(false);
test_scan_with_lock_impl(true);
}
fn test_scan_bypass_locks_impl(desc: bool) {
let engine = TestEngineBuilder::new().build().unwrap();
for i in 0..5 {
must_prewrite_put(&engine, &[i], &[b'v', i], &[i], 10);
must_commit(&engine, &[i], 10, 20);
}
// Locks are: 30, 40, 50, 60, 70
for i in 0..5 {
must_prewrite_put(&engine, &[i], &[b'v', i], &[i], 30 + u64::from(i) * 10);
}
let bypass_locks = TsSet::from_u64s(vec![30, 41, 50]);
// Scan at ts 65 will meet locks at 40 and 60.
let mut expected_result = vec![
(vec![0], Some(vec![b'v', 0])),
(vec![1], None),
(vec![2], Some(vec![b'v', 2])),
(vec![3], None),
(vec![4], Some(vec![b'v', 4])),
];
if desc {
expected_result = expected_result.into_iter().rev().collect();
}
let snapshot = engine.snapshot(Default::default()).unwrap();
let scanner = ScannerBuilder::new(snapshot, 65.into())
.desc(desc)
.bypass_locks(bypass_locks)
.build()
.unwrap();
check_scan_result(scanner, &expected_result);
}
#[test]
fn test_scan_bypass_locks() {
test_scan_bypass_locks_impl(false);
test_scan_bypass_locks_impl(true);
}
fn test_scan_access_locks_impl(desc: bool, delete_bound: bool) {
let engine = TestEngineBuilder::new().build().unwrap();
for i in 0..=8 {
must_prewrite_put(&engine, &[i], &[b'v', i], &[i], 10);
must_commit(&engine, &[i], 10, 20);
}
if delete_bound {
must_prewrite_delete(&engine, &[0], &[0], 30); // access delete
} else {
must_prewrite_put(&engine, &[0], &[b'v', 0, 0], &[0], 30); // access put
}
must_prewrite_put(&engine, &[1], &[b'v', 1, 1], &[1], 40); // access put
must_prewrite_delete(&engine, &[2], &[2], 50); // access delete
must_prewrite_lock(&engine, &[3], &[3], 60); // access lock(actually ignored)
must_prewrite_put(&engine, &[4], &[b'v', 4, 4], &[4], 70); // locked
must_prewrite_put(&engine, &[5], &[b'v', 5, 5], &[5], 80); // bypass
must_prewrite_put(&engine, &[6], &[b'v', 6, 6], &[6], 100); // locked with larger ts
if delete_bound {
must_prewrite_delete(&engine, &[8], &[8], 90); // access delete
} else {
must_prewrite_put(&engine, &[8], &[b'v', 8, 8], &[8], 90); // access put
}
let bypass_locks = TsSet::from_u64s(vec![80]);
let access_locks = TsSet::from_u64s(vec![30, 40, 50, 60, 90]);
let mut expected_result = vec![
(vec![0], Some(vec![b'v', 0, 0])), /* access put if not delete_bound */
(vec![1], Some(vec![b'v', 1, 1])), /* access put */
/* vec![2] access delete */
(vec![3], Some(vec![b'v', 3])), /* ignore LockType::Lock */
(vec![4], None), /* locked */
(vec![5], Some(vec![b'v', 5])), /* bypass */
(vec![6], Some(vec![b'v', 6])), /* ignore lock with larger ts */
(vec![7], Some(vec![b'v', 7])), /* no lock */
(vec![8], Some(vec![b'v', 8, 8])), /* access put if not delete_bound*/
];
if desc {
expected_result.reverse();
}
let snapshot = engine.snapshot(Default::default()).unwrap();
let scanner = ScannerBuilder::new(snapshot, 95.into())
.desc(desc)
.bypass_locks(bypass_locks)
.access_locks(access_locks)
.build()
.unwrap();
check_scan_result(
scanner,
if delete_bound {
&expected_result[1..expected_result.len() - 1]
} else {
&expected_result
},
);
}
#[test]
fn test_scan_access_locks() {
for (desc, delete_bound) in [(false, false), (false, true), (true, false), (true, true)] {
test_scan_access_locks_impl(desc, delete_bound);
}
}
fn must_met_newer_ts_data<E: Engine>(
engine: &E,
scanner_ts: impl Into<TimeStamp>,
key: &[u8],
value: Option<&[u8]>,
desc: bool,
expected_met_newer_ts_data: bool,
) {
let mut scanner = ScannerBuilder::new(
engine.snapshot(Default::default()).unwrap(),
scanner_ts.into(),
)
.desc(desc)
.range(Some(Key::from_raw(key)), None)
.check_has_newer_ts_data(true)
.build()
.unwrap();
let result = scanner.next().unwrap();
if let Some(value) = value {
let (k, v) = result.unwrap();
assert_eq!(k, Key::from_raw(key));
assert_eq!(v, value);
} else {
assert!(result.is_none());
}
let expected = if expected_met_newer_ts_data {
NewerTsCheckState::Met
} else {
NewerTsCheckState::NotMetYet
};
assert_eq!(expected, scanner.met_newer_ts_data());
}
fn test_met_newer_ts_data_impl(deep_write_seek: bool, desc: bool) {
let engine = TestEngineBuilder::new().build().unwrap();
let (key, val1) = (b"foo", b"bar1");
if deep_write_seek {
for i in 0..SEEK_BOUND {
must_prewrite_put(&engine, key, val1, key, i);
must_commit(&engine, key, i, i);
}
}
must_prewrite_put(&engine, key, val1, key, 100);
must_commit(&engine, key, 100, 200);
let (key, val2) = (b"foo", b"bar2");
must_prewrite_put(&engine, key, val2, key, 300);
must_commit(&engine, key, 300, 400);
must_met_newer_ts_data(
&engine,
100,
key,
if deep_write_seek { Some(val1) } else { None },
desc,
true,
);
must_met_newer_ts_data(&engine, 200, key, Some(val1), desc, true);
must_met_newer_ts_data(&engine, 300, key, Some(val1), desc, true);
must_met_newer_ts_data(&engine, 400, key, Some(val2), desc, false);
must_met_newer_ts_data(&engine, 500, key, Some(val2), desc, false);
must_prewrite_lock(&engine, key, key, 600);
must_met_newer_ts_data(&engine, 500, key, Some(val2), desc, true);
must_met_newer_ts_data(&engine, 600, key, Some(val2), desc, true);
}
#[test]
fn test_met_newer_ts_data() {
test_met_newer_ts_data_impl(false, false);
test_met_newer_ts_data_impl(false, true);
test_met_newer_ts_data_impl(true, false);
test_met_newer_ts_data_impl(true, true);
}
#[test]
fn test_old_value_with_hint_min_ts() {
let engine = TestEngineBuilder::new().build_without_cache().unwrap();
let create_scanner = |from_ts: u64| {
let snap = engine.snapshot(Default::default()).unwrap();
ScannerBuilder::new(snap, TimeStamp::max())
.fill_cache(false)
.hint_min_ts(Some(from_ts.into()))
.build_delta_scanner(from_ts.into(), ExtraOp::ReadOldValue)
.unwrap()
};
let mut value = Vec::with_capacity(1024);
(0..128).for_each(|_| value.extend_from_slice(b"long-val"));
// Create the initial data with CF_WRITE L0: |zkey_110, zkey1_160|
must_prewrite_put(&engine, b"zkey", &value, b"zkey", 100);
must_commit(&engine, b"zkey", 100, 110);
must_prewrite_put(&engine, b"zkey1", &value, b"zkey1", 150);
must_commit(&engine, b"zkey1", 150, 160);
engine.kv_engine().flush_cf(CF_WRITE, true).unwrap();
engine.kv_engine().flush_cf(CF_DEFAULT, true).unwrap();
must_prewrite_delete(&engine, b"zkey", b"zkey", 200);
let tests = vec![
// `zkey_110` is filtered, so no old value and block reads is 0.
(200, OldValue::seek_write(b"zkey", 200), 0),
// Old value can be found as expected, read 2 blocks from CF_WRITE and CF_DEFAULT.
(100, OldValue::value(value.clone()), 2),
// `zkey_110` isn't filtered, so needs to read 1 block from CF_WRITE.
// But we can't ensure whether it's the old value or not.
(150, OldValue::seek_write(b"zkey", 200), 1),
];
for (from_ts, expected_old_value, block_reads) in tests {
let mut scanner = create_scanner(from_ts);
let perf_instant = PerfStatisticsInstant::new();
match scanner.next_entry().unwrap().unwrap() {
TxnEntry::Prewrite { old_value, .. } => assert_eq!(old_value, expected_old_value),
TxnEntry::Commit { .. } => unreachable!(),
}
let delta = perf_instant.delta().0;
assert_eq!(delta.block_read_count, block_reads);
}
// CF_WRITE L0: |zkey_110, zkey1_160|, |zkey_210|
must_commit(&engine, b"zkey", 200, 210);
engine.kv_engine().flush_cf(CF_WRITE, false).unwrap();
engine.kv_engine().flush_cf(CF_DEFAULT, false).unwrap();
let tests = vec![
// `zkey_110` is filtered, so no old value and block reads is 0.
(200, OldValue::seek_write(b"zkey", 209), 0),
// Old value can be found as expected, read 2 blocks from CF_WRITE and CF_DEFAULT.
(100, OldValue::value(value), 2),
// `zkey_110` isn't filtered, so needs to read 1 block from CF_WRITE.
// But we can't ensure whether it's the old value or not.
(150, OldValue::seek_write(b"zkey", 209), 1),
];
for (from_ts, expected_old_value, block_reads) in tests {
let mut scanner = create_scanner(from_ts);
let perf_instant = PerfStatisticsInstant::new();
match scanner.next_entry().unwrap().unwrap() {
TxnEntry::Prewrite { .. } => unreachable!(),
TxnEntry::Commit { old_value, .. } => assert_eq!(old_value, expected_old_value),
}
let delta = perf_instant.delta().0;
assert_eq!(delta.block_read_count, block_reads);
}
}
#[test]
fn test_rc_scan_skip_lock() |
fn test_rc_scan_skip_lock_impl(desc: bool) {
let engine = TestEngineBuilder::new().build().unwrap();
let (key1, val1, val12) = (b"foo1", b"bar1", b"bar12");
let (key2, val2) = (b"foo2", b"bar2");
let mut expected = vec![(key1, val1), (key2, val2)];
if desc {
expected.reverse();
}
must_prewrite_put(&engine, key1, val1, key1, 10);
must_commit(&engine, key1, 10, 20);
must_prewrite_put(&engine, key2, val2, key2, 30);
must_commit(&engine, key2, 30, 40);
must_prewrite_put(&engine, key1, val12, key1, 50);
let snapshot = engine.snapshot(Default::default()).unwrap();
let mut scanner = ScannerBuilder::new(snapshot, 60.into())
.fill_cache(false)
.range(Some(Key::from_raw(key1)), None)
.desc(desc)
.isolation_level(IsolationLevel::Rc)
.build()
.unwrap();
for e in expected {
let (k, v) = scanner.next().unwrap().unwrap();
assert_eq!(k, Key::from_raw(e.0));
assert_eq!(v, e.1);
}
assert!(scanner.next().unwrap().is_none());
assert_eq!(scanner.take_statistics().lock.total_op_count(), 0);
}
}
| {
test_rc_scan_skip_lock_impl(false);
test_rc_scan_skip_lock_impl(true);
} |
binaryComplement1sFunc.py | from .__init__ import *
from ..__init__ import Generator
def binaryComplement1sFunc(maxDigits=10):
|
binaryComplement1s = Generator("Binary Complement 1s", 4, "1010=", "0101",
binaryComplement1sFunc)
| question = ''
answer = ''
for i in range(random.randint(1, maxDigits)):
temp = str(random.randint(0, 1))
question += temp
answer += "0" if temp == "1" else "1"
problem = question + "="
solution = answer
return problem, solution |
ms.js | (function(d){d['ms']=Object.assign(d['ms']||{},{a:"Gagal memuat naik fail",b:"Image toolbar",c:"Table toolbar",d:"Align left",e:"Align right",f:"Align center",g:"Justify",h:"Text alignment",i:"Text alignment toolbar",j:"Bold",k:"Italic",l:"Block quote",m:"Insert image or file",n:"Choose heading",o:"Heading",p:"image widget",q:"Full size image",r:"Side image",s:"Left aligned image",t:"Centered image",u:"Right aligned image",v:"Insert image",w:"Increase indent",x:"Decrease indent",y:"Numbered List",z:"Bulleted List",aa:"Insert table",ab:"Header column",ac:"Insert column left",ad:"Insert column right",ae:"Delete column",af:"Column",ag:"Header row",ah:"Insert row below",ai:"Insert row above",aj:"Delete row",ak:"Row",al:"Merge cell up",am:"Merge cell right",an:"Merge cell down",ao:"Merge cell left",ap:"Split cell vertically",aq:"Split cell horizontally",ar:"Merge cells",as:"Enter image caption",at:"Upload failed",au:"media widget",av:"Insert media",aw:"The URL must not be empty.",ax:"This media URL is not supported.",ay:"Link",az:"Widget toolbar",ba:"Upload in progress",bb:"Could not obtain resized image URL.",bc:"Selecting resized image failed",bd:"Could not insert image at the current position.",be:"Inserting image failed",bf:"Font Family",bg:"Default",bh:"Font Size",bi:"Tiny",bj:"Small",bk:"Big",bl:"Huge",bm:"Font Color",bn:"Font Background Color",bo:"Change image text alternative",bp:"Undo",bq:"Redo",br:"Rich Text Editor",bs:"Rich Text Editor, %0",bt:"Dropdown toolbar",bu:"Editor toolbar",bv:"Show more items",bw:"%0 of %1",bx:"Previous",by:"Next",bz:"Remove color",ca:"Document colors",cb:"Black",cc:"Dim grey",cd:"Grey",ce:"Light grey",cf:"White",cg:"Red",ch:"Orange",ci:"Yellow",cj:"Light green",ck:"Green",cl:"Aquamarine",cm:"Turquoise",cn:"Light blue",co:"Blue",cp:"Purple",cq:"Save",cr:"Cancel",cs:"Text alternative",ct:"Open in a new tab",cu:"Downloadable",cv:"Unlink",cw:"Edit link",cx:"Open link in new tab",cy:"This link has no URL",cz:"Link URL",da:"Paste the media URL in the input.",db:"Tip: Paste the URL into the content to embed faster.",dc:"Media URL",dd:"Paragraph",de:"Heading 1",df:"Heading 2",dg:"Heading 3",dh:"Heading 4",di:"Heading 5",dj:"Heading 6"})})(window.CKEDITOR_TRANSLATIONS||(window.CKEDITOR_TRANSLATIONS={})); |
||
app.module.ts | import { Module } from '@nestjs/common';
import { MoviesModule } from './movies/movies.module';
import { AppService } from './app.service';
import { AppController } from './app.controller';
@Module({
imports: [MoviesModule],
controllers: [AppController],
providers: [AppService],
})
export class | {}
| AppModule |
types.go | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rpc
import (
"fmt"
"math"
"reflect"
"strings"
"sync"
"github.com/lianxiangcloud/linkchain/libs/hexutil"
"gopkg.in/fatih/set.v0"
)
// API describes the set of methods offered over the RPC interface
type API struct {
Namespace string // namespace under which the rpc methods of Service are exposed
Version string // api version for DApp's
Service interface{} // receiver instance which holds the methods
Public bool // indication if the methods must be considered safe for public use
}
// callback is a method callback which was registered in the server
type callback struct {
rcvr reflect.Value // receiver of method
method reflect.Method // callback
argTypes []reflect.Type // input argument types
hasCtx bool // method's first argument is a context (not included in argTypes)
errPos int // err return idx, of -1 when method cannot return error
isSubscribe bool // indication if the callback is a subscription
}
// service represents a registered object
type service struct {
name string // name for service
typ reflect.Type // receiver type
callbacks callbacks // registered handlers
subscriptions subscriptions // available subscriptions/notifications
}
// serverRequest is an incoming request
type serverRequest struct {
id interface{}
svcname string
callb *callback
args []reflect.Value
isUnsubscribe bool
err Error
}
type serviceRegistry map[string]*service // collection of services
type callbacks map[string]*callback // collection of RPC callbacks
type subscriptions map[string]*callback // collection of subscription callbacks
// Server represents a RPC server
type Server struct {
services serviceRegistry
run int32
codecsMu sync.Mutex
codecs *set.Set
}
// rpcRequest represents a raw incoming RPC request
type rpcRequest struct {
service string
method string
id interface{}
isPubSub bool
params interface{}
err Error // invalid batch element
}
// Error wraps RPC errors, which contain an error code in addition to the message.
type Error interface {
Error() string // returns the message
ErrorCode() int // returns the code
}
// ServerCodec implements reading, parsing and writing RPC messages for the server side of
// a RPC session. Implementations must be go-routine safe since the codec can be called in
// multiple go-routines concurrently.
type ServerCodec interface {
// Read next request
ReadRequestHeaders() ([]rpcRequest, bool, Error)
// Parse request argument to the given types
ParseRequestArguments(argTypes []reflect.Type, params interface{}) ([]reflect.Value, Error)
// Assemble success response, expects response id and payload
CreateResponse(id interface{}, reply interface{}) interface{}
// Assemble error response, expects response id and error
CreateErrorResponse(id interface{}, err Error) interface{}
// Assemble error response with extra information about the error through info
CreateErrorResponseWithInfo(id interface{}, err Error, info interface{}) interface{}
// Create notification response
CreateNotification(id, namespace string, event interface{}) interface{}
// Write msg to client.
Write(msg interface{}) error
// Close underlying data stream
Close()
// Closed when underlying connection is closed
Closed() <-chan interface{}
}
type BlockNumber int64
const (
PendingBlockNumber = BlockNumber(-2)
LatestBlockNumber = BlockNumber(-1)
EarliestBlockNumber = BlockNumber(0)
)
// UnmarshalJSON parses the given JSON fragment into a BlockNumber. It supports:
// - "latest", "earliest" or "pending" as string arguments
// - the block number
// Returned errors:
// - an invalid block number error when the given argument isn't a known strings
// - an out of range error when the given block number is either too little or too large
func (bn *BlockNumber) UnmarshalJSON(data []byte) error {
input := strings.TrimSpace(string(data))
if len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"' {
input = input[1 : len(input)-1]
}
switch input {
case "earliest": | *bn = LatestBlockNumber
return nil
case "pending":
*bn = PendingBlockNumber
return nil
case "delay":
*bn = PendingBlockNumber
return nil
}
blckNum, err := hexutil.DecodeUint64(input)
if err != nil {
return err
}
if blckNum > math.MaxInt64 {
return fmt.Errorf("Blocknumber too high")
}
*bn = BlockNumber(blckNum)
return nil
}
func (bn BlockNumber) String() string {
switch bn {
case EarliestBlockNumber:
return "earliest"
case LatestBlockNumber:
return "latest"
case PendingBlockNumber:
return "pending"
}
str := fmt.Sprintf("0x%x", int64(bn))
return str
}
func (bn BlockNumber) Int64() int64 {
return (int64)(bn)
} | *bn = EarliestBlockNumber
return nil
case "latest": |
models.py | from django.db import models
class CapitalizeField(models.CharField):
def __init__(self, *args, **kwargs):
super(CapitalizeField, self).__init__(*args, **kwargs)
def | (self, model_instance, add):
value = getattr(model_instance, self.attname, None)
if value:
value = value.capitalize()
setattr(model_instance, self.attname, value)
return value
else:
return super(CapitalizeField, self).pre_save(model_instance, add)
class CustomManager(models.Manager):
"""
Custom manager so as not to return deleted objects
"""
def get_queryset(self):
return super(CustomManager, self).get_queryset().filter(deleted=False)
class AbstractBase(models.Model):
"""
This contains all common object attributes
Every model will inherit this class to avoid repetition
Its abstract hence can't be instatiated
"""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(
default=False,
help_text="This is to make sure deletes are not actual deletes"
)
# everything will be used to query deleted objects e.g Model.everything.all()
everything = models.Manager()
objects = CustomManager()
def delete(self, *args, **kwargs):
self.deleted = True
self.save()
class Meta:
ordering = ['-updated_at', '-created_at']
abstract = True
| pre_save |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.