file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
app.component.ts | import { Component } from '@angular/core';
| @Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.scss']
})
export class AppComponent {
title = 'my-app-v7';
} | |
customerGateway.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package ec2
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Provides a customer gateway inside a VPC. These objects can be connected to VPN gateways via VPN connections, and allow you to establish tunnels between your network and the VPC.
//
// ## Example Usage
//
// ```go
// package main
//
// import (
// "github.com/pulumi/pulumi-aws/sdk/v4/go/aws/ec2"
// "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// _, err := ec2.NewCustomerGateway(ctx, "main", &ec2.CustomerGatewayArgs{
// BgpAsn: pulumi.String("65000"),
// IpAddress: pulumi.String("172.83.124.10"),
// Tags: pulumi.StringMap{
// "Name": pulumi.String("main-customer-gateway"),
// },
// Type: pulumi.String("ipsec.1"),
// })
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
//
// ## Import
//
// Customer Gateways can be imported using the `id`, e.g.,
//
// ```sh
// $ pulumi import aws:ec2/customerGateway:CustomerGateway main cgw-b4dc3961
// ```
type CustomerGateway struct {
pulumi.CustomResourceState
// The ARN of the customer gateway.
Arn pulumi.StringOutput `pulumi:"arn"`
// The gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).
BgpAsn pulumi.StringOutput `pulumi:"bgpAsn"`
// The Amazon Resource Name (ARN) for the customer gateway certificate.
CertificateArn pulumi.StringPtrOutput `pulumi:"certificateArn"`
// A name for the customer gateway device.
DeviceName pulumi.StringPtrOutput `pulumi:"deviceName"`
// The IP address of the gateway's Internet-routable external interface.
IpAddress pulumi.StringOutput `pulumi:"ipAddress"`
// Tags to apply to the gateway. .If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
Tags pulumi.StringMapOutput `pulumi:"tags"`
// A map of tags assigned to the resource, including those inherited from the provider .
TagsAll pulumi.StringMapOutput `pulumi:"tagsAll"`
// The type of customer gateway. The only type AWS
// supports at this time is "ipsec.1".
Type pulumi.StringOutput `pulumi:"type"`
}
// NewCustomerGateway registers a new resource with the given unique name, arguments, and options.
func NewCustomerGateway(ctx *pulumi.Context,
name string, args *CustomerGatewayArgs, opts ...pulumi.ResourceOption) (*CustomerGateway, error) {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.BgpAsn == nil {
return nil, errors.New("invalid value for required argument 'BgpAsn'")
}
if args.IpAddress == nil {
return nil, errors.New("invalid value for required argument 'IpAddress'")
}
if args.Type == nil {
return nil, errors.New("invalid value for required argument 'Type'")
}
var resource CustomerGateway
err := ctx.RegisterResource("aws:ec2/customerGateway:CustomerGateway", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetCustomerGateway gets an existing CustomerGateway resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetCustomerGateway(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *CustomerGatewayState, opts ...pulumi.ResourceOption) (*CustomerGateway, error) {
var resource CustomerGateway
err := ctx.ReadResource("aws:ec2/customerGateway:CustomerGateway", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering CustomerGateway resources.
type customerGatewayState struct {
// The ARN of the customer gateway.
Arn *string `pulumi:"arn"`
// The gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).
BgpAsn *string `pulumi:"bgpAsn"`
// The Amazon Resource Name (ARN) for the customer gateway certificate.
CertificateArn *string `pulumi:"certificateArn"`
// A name for the customer gateway device.
DeviceName *string `pulumi:"deviceName"`
// The IP address of the gateway's Internet-routable external interface.
IpAddress *string `pulumi:"ipAddress"`
// Tags to apply to the gateway. .If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
Tags map[string]string `pulumi:"tags"`
// A map of tags assigned to the resource, including those inherited from the provider .
TagsAll map[string]string `pulumi:"tagsAll"`
// The type of customer gateway. The only type AWS
// supports at this time is "ipsec.1".
Type *string `pulumi:"type"`
}
type CustomerGatewayState struct {
// The ARN of the customer gateway.
Arn pulumi.StringPtrInput | BgpAsn pulumi.StringPtrInput
// The Amazon Resource Name (ARN) for the customer gateway certificate.
CertificateArn pulumi.StringPtrInput
// A name for the customer gateway device.
DeviceName pulumi.StringPtrInput
// The IP address of the gateway's Internet-routable external interface.
IpAddress pulumi.StringPtrInput
// Tags to apply to the gateway. .If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
Tags pulumi.StringMapInput
// A map of tags assigned to the resource, including those inherited from the provider .
TagsAll pulumi.StringMapInput
// The type of customer gateway. The only type AWS
// supports at this time is "ipsec.1".
Type pulumi.StringPtrInput
}
func (CustomerGatewayState) ElementType() reflect.Type {
return reflect.TypeOf((*customerGatewayState)(nil)).Elem()
}
type customerGatewayArgs struct {
// The gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).
BgpAsn string `pulumi:"bgpAsn"`
// The Amazon Resource Name (ARN) for the customer gateway certificate.
CertificateArn *string `pulumi:"certificateArn"`
// A name for the customer gateway device.
DeviceName *string `pulumi:"deviceName"`
// The IP address of the gateway's Internet-routable external interface.
IpAddress string `pulumi:"ipAddress"`
// Tags to apply to the gateway. .If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
Tags map[string]string `pulumi:"tags"`
// The type of customer gateway. The only type AWS
// supports at this time is "ipsec.1".
Type string `pulumi:"type"`
}
// The set of arguments for constructing a CustomerGateway resource.
type CustomerGatewayArgs struct {
// The gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).
BgpAsn pulumi.StringInput
// The Amazon Resource Name (ARN) for the customer gateway certificate.
CertificateArn pulumi.StringPtrInput
// A name for the customer gateway device.
DeviceName pulumi.StringPtrInput
// The IP address of the gateway's Internet-routable external interface.
IpAddress pulumi.StringInput
// Tags to apply to the gateway. .If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
Tags pulumi.StringMapInput
// The type of customer gateway. The only type AWS
// supports at this time is "ipsec.1".
Type pulumi.StringInput
}
func (CustomerGatewayArgs) ElementType() reflect.Type {
return reflect.TypeOf((*customerGatewayArgs)(nil)).Elem()
}
type CustomerGatewayInput interface {
pulumi.Input
ToCustomerGatewayOutput() CustomerGatewayOutput
ToCustomerGatewayOutputWithContext(ctx context.Context) CustomerGatewayOutput
}
func (*CustomerGateway) ElementType() reflect.Type {
return reflect.TypeOf((**CustomerGateway)(nil)).Elem()
}
func (i *CustomerGateway) ToCustomerGatewayOutput() CustomerGatewayOutput {
return i.ToCustomerGatewayOutputWithContext(context.Background())
}
func (i *CustomerGateway) ToCustomerGatewayOutputWithContext(ctx context.Context) CustomerGatewayOutput {
return pulumi.ToOutputWithContext(ctx, i).(CustomerGatewayOutput)
}
// CustomerGatewayArrayInput is an input type that accepts CustomerGatewayArray and CustomerGatewayArrayOutput values.
// You can construct a concrete instance of `CustomerGatewayArrayInput` via:
//
// CustomerGatewayArray{ CustomerGatewayArgs{...} }
type CustomerGatewayArrayInput interface {
pulumi.Input
ToCustomerGatewayArrayOutput() CustomerGatewayArrayOutput
ToCustomerGatewayArrayOutputWithContext(context.Context) CustomerGatewayArrayOutput
}
type CustomerGatewayArray []CustomerGatewayInput
func (CustomerGatewayArray) ElementType() reflect.Type {
return reflect.TypeOf((*[]*CustomerGateway)(nil)).Elem()
}
func (i CustomerGatewayArray) ToCustomerGatewayArrayOutput() CustomerGatewayArrayOutput {
return i.ToCustomerGatewayArrayOutputWithContext(context.Background())
}
func (i CustomerGatewayArray) ToCustomerGatewayArrayOutputWithContext(ctx context.Context) CustomerGatewayArrayOutput {
return pulumi.ToOutputWithContext(ctx, i).(CustomerGatewayArrayOutput)
}
// CustomerGatewayMapInput is an input type that accepts CustomerGatewayMap and CustomerGatewayMapOutput values.
// You can construct a concrete instance of `CustomerGatewayMapInput` via:
//
// CustomerGatewayMap{ "key": CustomerGatewayArgs{...} }
type CustomerGatewayMapInput interface {
pulumi.Input
ToCustomerGatewayMapOutput() CustomerGatewayMapOutput
ToCustomerGatewayMapOutputWithContext(context.Context) CustomerGatewayMapOutput
}
type CustomerGatewayMap map[string]CustomerGatewayInput
func (CustomerGatewayMap) ElementType() reflect.Type {
return reflect.TypeOf((*map[string]*CustomerGateway)(nil)).Elem()
}
func (i CustomerGatewayMap) ToCustomerGatewayMapOutput() CustomerGatewayMapOutput {
return i.ToCustomerGatewayMapOutputWithContext(context.Background())
}
func (i CustomerGatewayMap) ToCustomerGatewayMapOutputWithContext(ctx context.Context) CustomerGatewayMapOutput {
return pulumi.ToOutputWithContext(ctx, i).(CustomerGatewayMapOutput)
}
type CustomerGatewayOutput struct{ *pulumi.OutputState }
func (CustomerGatewayOutput) ElementType() reflect.Type {
return reflect.TypeOf((**CustomerGateway)(nil)).Elem()
}
func (o CustomerGatewayOutput) ToCustomerGatewayOutput() CustomerGatewayOutput {
return o
}
func (o CustomerGatewayOutput) ToCustomerGatewayOutputWithContext(ctx context.Context) CustomerGatewayOutput {
return o
}
type CustomerGatewayArrayOutput struct{ *pulumi.OutputState }
func (CustomerGatewayArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]*CustomerGateway)(nil)).Elem()
}
func (o CustomerGatewayArrayOutput) ToCustomerGatewayArrayOutput() CustomerGatewayArrayOutput {
return o
}
func (o CustomerGatewayArrayOutput) ToCustomerGatewayArrayOutputWithContext(ctx context.Context) CustomerGatewayArrayOutput {
return o
}
func (o CustomerGatewayArrayOutput) Index(i pulumi.IntInput) CustomerGatewayOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) *CustomerGateway {
return vs[0].([]*CustomerGateway)[vs[1].(int)]
}).(CustomerGatewayOutput)
}
type CustomerGatewayMapOutput struct{ *pulumi.OutputState }
func (CustomerGatewayMapOutput) ElementType() reflect.Type {
return reflect.TypeOf((*map[string]*CustomerGateway)(nil)).Elem()
}
func (o CustomerGatewayMapOutput) ToCustomerGatewayMapOutput() CustomerGatewayMapOutput {
return o
}
func (o CustomerGatewayMapOutput) ToCustomerGatewayMapOutputWithContext(ctx context.Context) CustomerGatewayMapOutput {
return o
}
func (o CustomerGatewayMapOutput) MapIndex(k pulumi.StringInput) CustomerGatewayOutput {
return pulumi.All(o, k).ApplyT(func(vs []interface{}) *CustomerGateway {
return vs[0].(map[string]*CustomerGateway)[vs[1].(string)]
}).(CustomerGatewayOutput)
}
func init() {
pulumi.RegisterInputType(reflect.TypeOf((*CustomerGatewayInput)(nil)).Elem(), &CustomerGateway{})
pulumi.RegisterInputType(reflect.TypeOf((*CustomerGatewayArrayInput)(nil)).Elem(), CustomerGatewayArray{})
pulumi.RegisterInputType(reflect.TypeOf((*CustomerGatewayMapInput)(nil)).Elem(), CustomerGatewayMap{})
pulumi.RegisterOutputType(CustomerGatewayOutput{})
pulumi.RegisterOutputType(CustomerGatewayArrayOutput{})
pulumi.RegisterOutputType(CustomerGatewayMapOutput{})
} | // The gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN). |
notebooks.go | package notebooks
import (
"encoding/json"
"github.com/navikt/mutatingflow/pkg/apis/notebook/v1alpha1"
"github.com/navikt/mutatingflow/pkg/commons"
log "github.com/sirupsen/logrus"
"k8s.io/api/admission/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var (
// notebookNameAnnotation is the annotation we use to check if a pod is of the notebook type
notebookNameAnnotation = "notebook-name"
)
func mutatePodSpec(spec corev1.PodSpec) corev1.PodSpec |
func patchPodTemplate(spec corev1.PodSpec) commons.PatchOperation {
return commons.PatchOperation{
Op: "add",
Path: "/spec/template/spec",
Value: mutatePodSpec(spec),
}
}
func createPatch(notebook v1alpha1.Notebook) ([]byte, error) {
var patch []commons.PatchOperation
patch = append(patch, patchPodTemplate(notebook.Spec.Template.Spec))
patch = append(patch, commons.PatchStatusAnnotation(notebook.Annotations))
return json.Marshal(patch)
}
func MutateNotebook(request v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
var notebook v1alpha1.Notebook
err := json.Unmarshal(request.Object.Raw, ¬ebook)
if err != nil {
log.Errorf("Notebook: Couldn't unmarshal raw object: %v", err)
return &v1beta1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
}
log.Infof("Notebook: Namespace=%v Name=%v (%v) patchOperation=%v", request.Namespace, request.Name, notebook.Name, request.Operation)
if !commons.MutationRequired(notebook.ObjectMeta, notebookNameAnnotation) {
log.Infof("Notebook: Skipping mutation for %s/%s due to policy check", notebook.Namespace, notebook.Name)
return &v1beta1.AdmissionResponse{
Allowed: true,
}
}
patchBytes, err := createPatch(notebook)
if err != nil {
return &v1beta1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
}
log.Info("Notebook: Mutated")
return &v1beta1.AdmissionResponse{
Allowed: true,
Patch: patchBytes,
PatchType: func() *v1beta1.PatchType {
pt := v1beta1.PatchTypeJSONPatch
return &pt
}(),
}
}
| {
spec.ImagePullSecrets = []corev1.LocalObjectReference{
{Name: "gpr-credentials"},
}
return spec
} |
size_test.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
| package math
import test "github.com/nelsam/gxui/testing"
import "testing"
func TestSizeEdgeAlignedFitTopEdge(t *testing.T) {
outer := CreateRect(0, 0, 100, 100)
s := Size{10, 10}
p := Point{50, 50}
test.AssertEquals(t, CreateRect(45, 50, 55, 60), s.EdgeAlignedFit(outer, p))
}
func TestSizeEdgeAlignedFitBottomEdge(t *testing.T) {
outer := CreateRect(0, 0, 100, 100)
s := Size{10, 10}
p := Point{50, 95}
test.AssertEquals(t, CreateRect(45, 85, 55, 95), s.EdgeAlignedFit(outer, p))
}
func TestSizeEdgeAlignedFitLeftEdge(t *testing.T) {
outer := CreateRect(0, 0, 100, 100)
s := Size{10, 80}
p := Point{5, 50}
test.AssertEquals(t, CreateRect(5, 10, 15, 90), s.EdgeAlignedFit(outer, p))
}
func TestSizeEdgeAlignedFitRightEdge(t *testing.T) {
outer := CreateRect(0, 0, 100, 100)
s := Size{10, 80}
p := Point{95, 50}
test.AssertEquals(t, CreateRect(85, 10, 95, 90), s.EdgeAlignedFit(outer, p))
} | |
api.py | __author__ = 'Fang.Xu'
newsrefresh='/api/v1.0/news/refresh'
newsloadmore='/api/v1.0/news/loadmore/<string:nid>'
updatesrefresh='/api/v1.0/updates/refresh'
updatesloadmore='/api/v1.0/updates/loadmore/<string:nid>'
newsdetail='/api/v1.0/newsdetail/<string:date>/<string:nid>'
strategyrefresh='/api/v1.0/strategy/refresh/<string:strategy_type>'
strategyloadmore='/api/v1.0/strategy/loadmore/<string:strategy_type>/<string:nid>'
videorefresh='/api/v1.0/video/refresh/<string:video_type>' | videoykvid='/api/v1.0/video/youkuvid/<string:date>/<string:vid>' | videoloadmore='/api/v1.0/video/loadmore/<string:video_type>/<string:vid>'
videoset='/api/v1.0/video/videoset/<string:date>/<string:vid>' |
contenido_gc.py | #!/usr/bin/python
# CALCULATION OF GC CONTENT (GENES)
import os
import logging
import argparse
import fastalib
from fileutils import safe_filename
def | ():
args_parser = argparse.ArgumentParser(
description='Found GC in FASTA files of a directory',
)
args_parser.add_argument(
'path',
help='Path where look for FASTA files, use . to use current working dir',
)
args_parser.add_argument(
'--tron',
dest='tron',
action='store_const',
const=True,
default=False,
help='Show trace of activity (Disabled by default)',
)
args = args_parser.parse_args()
logging.basicConfig(
level=logging.INFO if args.tron else logging.ERROR,
format='%(asctime)s %(levelname)s %(message)s',
)
return args
if __name__ == '__main__':
args = get_option_args()
genomes = [fn for fn in os.listdir(args.path) if fn.endswith(".faa")]
for filename in genomes:
if args.tron:
logging.info('Processing {}'.format(filename))
full_name = os.path.join(args.path, filename)
data = fastalib.read_fasta_file(full_name)
if args.tron:
logging.info('Generating output files')
num_outputs = 0
for key in data:
lines = data[key]
filename = safe_filename('result_id_{}.fasta'.format(key))
with open(filename, 'w') as f1:
for l in lines:
f1.write('{}\n'.format(l))
num_outputs += 1
g, a, t, c = fastalib.count_nucleotydes_gatc(lines)
filename = safe_filename('result_GC_{}.fasta'.format(key))
with open(filename, 'w') as f2:
f2.write('Guanine: {:d}\n'.format(g))
f2.write('Adenine: {:d}\n'.format(a))
f2.write('Thymine: {:d}\n'.format(t))
f2.write('Cytosine: {:d}\n'.format(c))
p = round(float(c+g)/(a+c+g+t), 9)
f2.write('CG proportion: {:9f}\n'.format(p))
num_outputs += 1
if args.tron:
logging.info('Finished: files processed {}, generated {}'.format(
len(genomes),
num_outputs,
))
| get_option_args |
opening2d.py | import numpy as np
import itk
import matplotlib.pyplot as plt
# Input file name
input_filename = './jenga_g_150.png'
# Set dimension
Dimension = 2
| InputPixelType = itk.UC
InputImageType = itk.Image[InputPixelType, Dimension]
# Loading
reader = itk.ImageFileReader[InputImageType].New()
reader.SetFileName(input_filename)
# Apply a filter: Thresholding
thresholdFilter = itk.BinaryThresholdImageFilter[InputImageType,InputImageType].New()
thresholdFilter.SetInput(reader.GetOutput())
thresholdFilter.SetUpperThreshold(200)
thresholdFilter.SetOutsideValue(1)
thresholdFilter.SetInsideValue(0)
StructuringElementType = itk.FlatStructuringElement[Dimension]
structuringElement = StructuringElementType.Ball(3)
# Apply Opening (erosion and dilation)
erodeFilter = itk.BinaryErodeImageFilter[InputImageType,InputImageType,StructuringElementType].New()
erodeFilter.SetInput(thresholdFilter.GetOutput())
erodeFilter.SetKernel(structuringElement)
erodeFilter.SetForegroundValue(1)
dilateFilter = itk.BinaryDilateImageFilter[InputImageType,InputImageType,StructuringElementType].New()
dilateFilter.SetInput(erodeFilter.GetOutput())
dilateFilter.SetKernel(structuringElement)
dilateFilter.SetForegroundValue(1)
dilateFilter.Update()
# Plot the input and output images.
plt.figure(figsize=(12, 4), dpi=50)
plt.subplot(1,3,1),plt.title("original"),plt.imshow(itk_image, cmap="gray")
plt.subplot(1,3,2),plt.title("threshold"),plt.imshow(thresholdFilter.GetOutput())
plt.subplot(1,3,3),plt.title("output"),plt.imshow(dilateFilter.GetOutput())
plt.savefig("./img/jenga_opening2d.png") | # Read input image
itk_image = itk.imread(input_filename)
# Setting for input image (Grayscale) |
Circle.ts | import { Vector, IVector } from "./Vector";
export interface ICircle {
radius: number;
pos: IVector;
}
export class | {
public radius: number;
public pos: IVector;
constructor(radius: number, pos: IVector = new Vector(0, 0)) {
this.radius = radius;
this.pos = pos;
}
public isCollidingWith(circle: ICircle): boolean {
let a = this.radius + circle.radius;
let x = this.pos.x - circle.pos.x;
let y = this.pos.y - circle.pos.y;
return a > Math.sqrt((x * x) + (y * y));
}
} | Circle |
librenms.go | package main
import (
"fmt"
"github.com/lfkeitel/yobot/librenms"
)
const (
address = ""
authToken = ""
hostname = ""
)
func main() | {
c, err := librenms.NewClient(address)
if err != nil {
fmt.Println(err)
return
}
c.SkipTLSVerify()
if err := c.Login(authToken); err != nil {
fmt.Println(err)
return
}
fmt.Println("Connected")
dev, err := c.GetDevice(hostname)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("%#v\n", dev)
} |
|
autonomia_aviao.py | def autonomia(carga):
if(carga <= 50000):
return 18000, 19800
elif(carga <= 200000):
return 9000, 9900
else:
return 3000, 3300
carga = int(input())
auto = autonomia(carga)
ax = float(input())
ay = float(input())
bx = float(input())
by = float(input()) | dist = (((bx - ax) ** 2) + ((by - ay) ** 2)) ** 0.5
print(round(dist,2))
if(auto[0] >= dist):
print("SIM")
elif(auto[1] >= dist):
print("TALVEZ")
else:
print("NAO") | |
Actions.tsx | import React from 'react'
import {Palette} from '../../types/color'
import Button from '../common/Button'
import UserMenu from './user-menu/UserMenu'
import Toolbar from '../common/Toolbar'
import ToolbarItem from '../common/ToolbarItem'
import DocumentAdd from '../icons/DocumentAdd'
import Trash from '../icons/Trash'
import ExternalLink from '../icons/ExternalLink'
interface Props {
selectedPalette: Palette | null
showSplash: () => void
exportPalette: () => void
deletePalette: () => void
}
const Actions = ({
selectedPalette,
showSplash,
exportPalette,
deletePalette
}: Props) => {
return (
<Toolbar>
<ToolbarItem>
<Button
onClick={showSplash}
toolbar
icon={<DocumentAdd />}
text="New Palette"
/>
</ToolbarItem>
{selectedPalette && (
<>
<ToolbarItem>
<Button
onClick={exportPalette}
toolbar
icon={<ExternalLink />}
text="Export"
/>
</ToolbarItem>
<ToolbarItem>
<Button
onClick={deletePalette}
toolbar
icon={<Trash />} | </ToolbarItem>
</>
)}
<UserMenu />
</Toolbar>
)
}
export default Actions | text="Delete Palette"
/> |
map-collect-i-mod-10-to-i-with-mutex-vec.rs | extern crate rayon_1_0_0 ; extern crate lolbench_support ; use lolbench_support :: { criterion_from_env , init_logging } ; fn | ( ) { init_logging ( ) ; let mut crit = criterion_from_env ( ) ; rayon_1_0_0 :: map_collect :: i_mod_10_to_i :: with_mutex_vec ( & mut crit ) ; } | main |
pv_protection.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"context"
"time"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/slice"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("PV Protection", func() {
var (
client clientset.Interface
nameSpace string
err error
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
pvConfig e2epv.PersistentVolumeConfig
pvcConfig e2epv.PersistentVolumeClaimConfig
volLabel labels.Set
selector *metav1.LabelSelector
)
f := framework.NewDefaultFramework("pv-protection")
ginkgo.BeforeEach(func() {
client = f.ClientSet
nameSpace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
// Enforce binding only within test space via selector labels
volLabel = labels.Set{e2epv.VolumeSelectorKey: nameSpace}
selector = metav1.SetAsLabelSelector(volLabel)
pvConfig = e2epv.PersistentVolumeConfig{
NamePrefix: "hostpath-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/tmp/data",
},
},
}
emptyStorageClass := ""
pvcConfig = e2epv.PersistentVolumeClaimConfig{ |
ginkgo.By("Creating a PV")
// make the pv definitions
pv = e2epv.MakePersistentVolume(pvConfig)
// create the PV
pv, err = client.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating PV")
ginkgo.By("Waiting for PV to enter phase Available")
framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeAvailable, client, pv.Name, 1*time.Second, 30*time.Second))
ginkgo.By("Checking that PV Protection finalizer is set")
pv, err = client.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While getting PV status")
framework.ExpectEqual(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil), true, "PV Protection finalizer(%v) is not set in %v", volumeutil.PVProtectionFinalizer, pv.ObjectMeta.Finalizers)
})
ginkgo.AfterEach(func() {
framework.Logf("AfterEach: Cleaning up test resources.")
if errs := e2epv.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
})
ginkgo.It("Verify \"immediate\" deletion of a PV that is not bound to a PVC", func() {
ginkgo.By("Deleting the PV")
err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PV")
framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout)
})
ginkgo.It("Verify that PV bound to a PVC is not removed immediately", func() {
ginkgo.By("Creating a PVC")
pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, nameSpace)
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating PVC")
ginkgo.By("Waiting for PVC to become Bound")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, e2epv.ClaimBindingTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
ginkgo.By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC")
err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PV")
ginkgo.By("Checking that the PV status is Terminating")
pv, err = client.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While checking PV status")
framework.ExpectNotEqual(pv.ObjectMeta.DeletionTimestamp, nil)
ginkgo.By("Deleting the PVC that is bound to the PV")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PVC")
ginkgo.By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC")
framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout)
})
}) | Selector: selector,
StorageClassName: &emptyStorageClass,
} |
forms.py | import flask_wtf
import wtforms
class LoginForm(flask_wtf.Form):
"""Accepts a nickname and a room."""
name = wtforms.fields.StringField('Name', validators=[wtforms.validators.Required()])
#room = StringField('Room', validators=[Required()]) | submit = wtforms.fields.SubmitField('Start') |
|
OptionsPaneOptions.tsx | import React, { useMemo, useState } from 'react';
import { FieldConfigSource, GrafanaTheme, PanelData, PanelPlugin, SelectableValue } from '@grafana/data';
import { DashboardModel, PanelModel } from '../../state';
import { CustomScrollbar, RadioButtonGroup, useStyles } from '@grafana/ui';
import { getPanelFrameCategory } from './getPanelFrameOptions';
import { getVizualizationOptions } from './getVizualizationOptions';
import { css } from 'emotion';
import { FilterInput } from 'app/core/components/FilterInput/FilterInput';
import { OptionsPaneCategory } from './OptionsPaneCategory';
import { getFieldOverrideCategories } from './getFieldOverrideElements';
import { OptionsPaneCategoryDescriptor } from './OptionsPaneCategoryDescriptor';
import { OptionSearchEngine } from './state/OptionSearchEngine';
import { AngularPanelOptions } from './AngularPanelOptions';
import { getRecentOptions } from './state/getRecentOptions';
interface Props {
plugin: PanelPlugin;
panel: PanelModel;
dashboard: DashboardModel;
data?: PanelData;
onFieldConfigsChange: (config: FieldConfigSource) => void;
onPanelOptionsChanged: (options: any) => void;
onPanelConfigChange: (configKey: string, value: any) => void;
}
| const [searchQuery, setSearchQuery] = useState('');
const [listMode, setListMode] = useState(OptionFilter.All);
const styles = useStyles(getStyles);
const [panelFrameOptions, vizOptions, justOverrides] = useMemo(
() => [getPanelFrameCategory(props), getVizualizationOptions(props), getFieldOverrideCategories(props)],
[props]
);
const mainBoxElements: React.ReactNode[] = [];
const isSearching = searchQuery.length > 0;
const optionRadioFilters = useMemo(getOptionRadioFilters, []);
const allOptions = [panelFrameOptions, ...vizOptions];
if (isSearching) {
mainBoxElements.push(renderSearchHits(allOptions, justOverrides, searchQuery));
// If searching for angular panel then we need to add notice that results are limited
if (props.plugin.angularPanelCtrl) {
mainBoxElements.push(
<div className={styles.searchNotice} key="Search notice">
This is an old visualization type that does not support searching all options.
</div>
);
}
} else {
switch (listMode) {
case OptionFilter.All:
// Panel frame options first
mainBoxElements.push(panelFrameOptions.render());
// If angular add those options next
if (props.plugin.angularPanelCtrl) {
mainBoxElements.push(
<AngularPanelOptions plugin={plugin} dashboard={dashboard} panel={panel} key="AngularOptions" />
);
}
// Then add all panel & field defaults
for (const item of vizOptions) {
mainBoxElements.push(item.render());
}
break;
case OptionFilter.Overrides:
for (const override of justOverrides) {
mainBoxElements.push(override.render());
}
break;
case OptionFilter.Recent:
mainBoxElements.push(
<OptionsPaneCategory id="Recent options" title="Recent options" key="Recent options" forceOpen={1}>
{getRecentOptions(allOptions).map((item) => item.render())}
</OptionsPaneCategory>
);
break;
}
}
return (
<div className={styles.wrapper}>
<div className={styles.formBox}>
<div className={styles.formRow}>
<FilterInput width={0} value={searchQuery} onChange={setSearchQuery} placeholder={'Search options'} />
</div>
{!isSearching && (
<div className={styles.formRow}>
<RadioButtonGroup options={optionRadioFilters} value={listMode} fullWidth onChange={setListMode} />
</div>
)}
</div>
<div className={styles.scrollWrapper}>
<CustomScrollbar autoHeightMin="100%">
<div className={styles.mainBox}>{mainBoxElements}</div>
{!isSearching && listMode === OptionFilter.All && (
<div className={styles.overridesBox}>{justOverrides.map((override) => override.render())}</div>
)}
</CustomScrollbar>
</div>
</div>
);
};
function getOptionRadioFilters(): Array<SelectableValue<OptionFilter>> {
return [
{ label: OptionFilter.All, value: OptionFilter.All },
{ label: OptionFilter.Recent, value: OptionFilter.Recent },
{ label: OptionFilter.Overrides, value: OptionFilter.Overrides },
];
}
export enum OptionFilter {
All = 'All',
Overrides = 'Overrides',
Recent = 'Recent',
}
function renderSearchHits(
allOptions: OptionsPaneCategoryDescriptor[],
overrides: OptionsPaneCategoryDescriptor[],
searchQuery: string
) {
const engine = new OptionSearchEngine(allOptions, overrides);
const { optionHits, totalCount, overrideHits } = engine.search(searchQuery);
return (
<div key="search results">
<OptionsPaneCategory
id="Found options"
title={`Matched ${optionHits.length}/${totalCount} options`}
key="Normal options"
forceOpen={1}
>
{optionHits.map((hit) => hit.render(true))}
</OptionsPaneCategory>
{overrideHits.map((override) => override.render(true))}
</div>
);
}
const getStyles = (theme: GrafanaTheme) => ({
wrapper: css`
height: 100%;
display: flex;
flex-direction: column;
flex: 1 1 0;
`,
searchBox: css`
display: flex;
flex-direction: column;
min-height: 0;
`,
formRow: css`
margin-bottom: ${theme.spacing.sm};
`,
formBox: css`
padding: ${theme.spacing.sm};
background: ${theme.colors.bg1};
border: 1px solid ${theme.colors.border1};
border-bottom: none;
`,
closeButton: css`
margin-left: ${theme.spacing.sm};
`,
searchHits: css`
padding: ${theme.spacing.sm} ${theme.spacing.sm} 0 ${theme.spacing.sm};
`,
scrollWrapper: css`
flex-grow: 1;
min-height: 0;
`,
searchNotice: css`
font-size: ${theme.typography.size.sm};
color: ${theme.colors.textWeak};
padding: ${theme.spacing.sm};
text-align: center;
`,
mainBox: css`
background: ${theme.colors.bg1};
margin-bottom: ${theme.spacing.md};
border: 1px solid ${theme.colors.border1};
border-top: none;
`,
overridesBox: css`
background: ${theme.colors.bg1};
border: 1px solid ${theme.colors.border1};
margin-bottom: ${theme.spacing.md};
`,
}); | export const OptionsPaneOptions: React.FC<Props> = (props) => {
const { plugin, dashboard, panel } = props; |
api_meta.go | /*
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document.
API version: 1.0.9-5517
Contact: [email protected]
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package intersight
import (
"bytes"
_context "context"
_ioutil "io/ioutil"
_nethttp "net/http"
_neturl "net/url"
"strings"
)
// Linger please
var (
_ _context.Context
)
// MetaApiService MetaApi service
type MetaApiService service
type ApiDeleteMetaDefinitionRequest struct {
ctx _context.Context
ApiService *MetaApiService
moid string
}
func (r ApiDeleteMetaDefinitionRequest) Execute() (*_nethttp.Response, error) {
return r.ApiService.DeleteMetaDefinitionExecute(r)
}
/*
DeleteMetaDefinition Delete a 'meta.Definition' resource.
@param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
@param moid The unique Moid identifier of a resource instance.
@return ApiDeleteMetaDefinitionRequest
*/
func (a *MetaApiService) DeleteMetaDefinition(ctx _context.Context, moid string) ApiDeleteMetaDefinitionRequest {
return ApiDeleteMetaDefinitionRequest{
ApiService: a,
ctx: ctx,
moid: moid,
}
}
// Execute executes the request
func (a *MetaApiService) DeleteMetaDefinitionExecute(r ApiDeleteMetaDefinitionRequest) (*_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodDelete
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
)
localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MetaApiService.DeleteMetaDefinition")
if err != nil {
return nil, GenericOpenAPIError{error: err.Error()}
}
localVarPath := localBasePath + "/api/v1/meta/Definitions/{Moid}"
localVarPath = strings.Replace(localVarPath, "{"+"Moid"+"}", _neturl.PathEscape(parameterToString(r.moid, "")), -1)
localVarHeaderParams := make(map[string]string)
localVarQueryParams := _neturl.Values{}
localVarFormParams := _neturl.Values{}
// to determine the Content-Type header
localVarHTTPContentTypes := []string{}
// set Content-Type header
localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)
if localVarHTTPContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHTTPContentType
}
// to determine the Accept header
localVarHTTPHeaderAccepts := []string{"application/json"}
// set Accept header
localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)
if localVarHTTPHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept
}
req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
return nil, err
}
localVarHTTPResponse, err := a.client.callAPI(req)
if err != nil || localVarHTTPResponse == nil {
return localVarHTTPResponse, err
}
localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)
localVarHTTPResponse.Body.Close()
localVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))
if err != nil {
return localVarHTTPResponse, err
}
if localVarHTTPResponse.StatusCode >= 300 {
newErr := GenericOpenAPIError{
body: localVarBody,
error: localVarHTTPResponse.Status,
}
if localVarHTTPResponse.StatusCode == 400 {
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarHTTPResponse, newErr
}
newErr.model = v | }
if localVarHTTPResponse.StatusCode == 401 {
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarHTTPResponse, newErr
}
newErr.model = v
return localVarHTTPResponse, newErr
}
if localVarHTTPResponse.StatusCode == 403 {
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarHTTPResponse, newErr
}
newErr.model = v
return localVarHTTPResponse, newErr
}
if localVarHTTPResponse.StatusCode == 404 {
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarHTTPResponse, newErr
}
newErr.model = v
return localVarHTTPResponse, newErr
}
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarHTTPResponse, newErr
}
newErr.model = v
return localVarHTTPResponse, newErr
}
return localVarHTTPResponse, nil
}
type ApiGetMetaDefinitionByMoidRequest struct {
ctx _context.Context
ApiService *MetaApiService
moid string
}
func (r ApiGetMetaDefinitionByMoidRequest) Execute() (MetaDefinition, *_nethttp.Response, error) {
return r.ApiService.GetMetaDefinitionByMoidExecute(r)
}
/*
GetMetaDefinitionByMoid Read a 'meta.Definition' resource.
@param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
@param moid The unique Moid identifier of a resource instance.
@return ApiGetMetaDefinitionByMoidRequest
*/
func (a *MetaApiService) GetMetaDefinitionByMoid(ctx _context.Context, moid string) ApiGetMetaDefinitionByMoidRequest {
return ApiGetMetaDefinitionByMoidRequest{
ApiService: a,
ctx: ctx,
moid: moid,
}
}
// Execute executes the request
// @return MetaDefinition
func (a *MetaApiService) GetMetaDefinitionByMoidExecute(r ApiGetMetaDefinitionByMoidRequest) (MetaDefinition, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
localVarReturnValue MetaDefinition
)
localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MetaApiService.GetMetaDefinitionByMoid")
if err != nil {
return localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}
}
localVarPath := localBasePath + "/api/v1/meta/Definitions/{Moid}"
localVarPath = strings.Replace(localVarPath, "{"+"Moid"+"}", _neturl.PathEscape(parameterToString(r.moid, "")), -1)
localVarHeaderParams := make(map[string]string)
localVarQueryParams := _neturl.Values{}
localVarFormParams := _neturl.Values{}
// to determine the Content-Type header
localVarHTTPContentTypes := []string{}
// set Content-Type header
localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)
if localVarHTTPContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHTTPContentType
}
// to determine the Accept header
localVarHTTPHeaderAccepts := []string{"application/json", "text/csv", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"}
// set Accept header
localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)
if localVarHTTPHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept
}
req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
return localVarReturnValue, nil, err
}
localVarHTTPResponse, err := a.client.callAPI(req)
if err != nil || localVarHTTPResponse == nil {
return localVarReturnValue, localVarHTTPResponse, err
}
localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)
localVarHTTPResponse.Body.Close()
localVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))
if err != nil {
return localVarReturnValue, localVarHTTPResponse, err
}
if localVarHTTPResponse.StatusCode >= 300 {
newErr := GenericOpenAPIError{
body: localVarBody,
error: localVarHTTPResponse.Status,
}
if localVarHTTPResponse.StatusCode == 400 {
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, newErr
}
newErr.model = v
return localVarReturnValue, localVarHTTPResponse, newErr
}
if localVarHTTPResponse.StatusCode == 401 {
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, newErr
}
newErr.model = v
return localVarReturnValue, localVarHTTPResponse, newErr
}
if localVarHTTPResponse.StatusCode == 403 {
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, newErr
}
newErr.model = v
return localVarReturnValue, localVarHTTPResponse, newErr
}
if localVarHTTPResponse.StatusCode == 404 {
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, newErr
}
newErr.model = v
return localVarReturnValue, localVarHTTPResponse, newErr
}
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, newErr
}
newErr.model = v
return localVarReturnValue, localVarHTTPResponse, newErr
}
err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr := GenericOpenAPIError{
body: localVarBody,
error: err.Error(),
}
return localVarReturnValue, localVarHTTPResponse, newErr
}
return localVarReturnValue, localVarHTTPResponse, nil
}
type ApiGetMetaDefinitionListRequest struct {
ctx _context.Context
ApiService *MetaApiService
filter *string
orderby *string
top *int32
skip *int32
select_ *string
expand *string
apply *string
count *bool
inlinecount *string
at *string
tags *string
}
// Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).
func (r ApiGetMetaDefinitionListRequest) Filter(filter string) ApiGetMetaDefinitionListRequest {
r.filter = &filter
return r
}
// Determines what properties are used to sort the collection of resources.
func (r ApiGetMetaDefinitionListRequest) Orderby(orderby string) ApiGetMetaDefinitionListRequest {
r.orderby = &orderby
return r
}
// Specifies the maximum number of resources to return in the response.
func (r ApiGetMetaDefinitionListRequest) Top(top int32) ApiGetMetaDefinitionListRequest {
r.top = &top
return r
}
// Specifies the number of resources to skip in the response.
func (r ApiGetMetaDefinitionListRequest) Skip(skip int32) ApiGetMetaDefinitionListRequest {
r.skip = &skip
return r
}
// Specifies a subset of properties to return.
func (r ApiGetMetaDefinitionListRequest) Select_(select_ string) ApiGetMetaDefinitionListRequest {
r.select_ = &select_
return r
}
// Specify additional attributes or related resources to return in addition to the primary resources.
func (r ApiGetMetaDefinitionListRequest) Expand(expand string) ApiGetMetaDefinitionListRequest {
r.expand = &expand
return r
}
// Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.
func (r ApiGetMetaDefinitionListRequest) Apply(apply string) ApiGetMetaDefinitionListRequest {
r.apply = &apply
return r
}
// The $count query specifies the service should return the count of the matching resources, instead of returning the resources.
func (r ApiGetMetaDefinitionListRequest) Count(count bool) ApiGetMetaDefinitionListRequest {
r.count = &count
return r
}
// The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.
func (r ApiGetMetaDefinitionListRequest) Inlinecount(inlinecount string) ApiGetMetaDefinitionListRequest {
r.inlinecount = &inlinecount
return r
}
// Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.
func (r ApiGetMetaDefinitionListRequest) At(at string) ApiGetMetaDefinitionListRequest {
r.at = &at
return r
}
// The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.
func (r ApiGetMetaDefinitionListRequest) Tags(tags string) ApiGetMetaDefinitionListRequest {
r.tags = &tags
return r
}
func (r ApiGetMetaDefinitionListRequest) Execute() (MetaDefinitionResponse, *_nethttp.Response, error) {
return r.ApiService.GetMetaDefinitionListExecute(r)
}
/*
GetMetaDefinitionList Read a 'meta.Definition' resource.
@param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
@return ApiGetMetaDefinitionListRequest
*/
func (a *MetaApiService) GetMetaDefinitionList(ctx _context.Context) ApiGetMetaDefinitionListRequest {
return ApiGetMetaDefinitionListRequest{
ApiService: a,
ctx: ctx,
}
}
// Execute executes the request
// @return MetaDefinitionResponse
func (a *MetaApiService) GetMetaDefinitionListExecute(r ApiGetMetaDefinitionListRequest) (MetaDefinitionResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
localVarReturnValue MetaDefinitionResponse
)
localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MetaApiService.GetMetaDefinitionList")
if err != nil {
return localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}
}
localVarPath := localBasePath + "/api/v1/meta/Definitions"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := _neturl.Values{}
localVarFormParams := _neturl.Values{}
if r.filter != nil {
localVarQueryParams.Add("$filter", parameterToString(*r.filter, ""))
}
if r.orderby != nil {
localVarQueryParams.Add("$orderby", parameterToString(*r.orderby, ""))
}
if r.top != nil {
localVarQueryParams.Add("$top", parameterToString(*r.top, ""))
}
if r.skip != nil {
localVarQueryParams.Add("$skip", parameterToString(*r.skip, ""))
}
if r.select_ != nil {
localVarQueryParams.Add("$select", parameterToString(*r.select_, ""))
}
if r.expand != nil {
localVarQueryParams.Add("$expand", parameterToString(*r.expand, ""))
}
if r.apply != nil {
localVarQueryParams.Add("$apply", parameterToString(*r.apply, ""))
}
if r.count != nil {
localVarQueryParams.Add("$count", parameterToString(*r.count, ""))
}
if r.inlinecount != nil {
localVarQueryParams.Add("$inlinecount", parameterToString(*r.inlinecount, ""))
}
if r.at != nil {
localVarQueryParams.Add("at", parameterToString(*r.at, ""))
}
if r.tags != nil {
localVarQueryParams.Add("tags", parameterToString(*r.tags, ""))
}
// to determine the Content-Type header
localVarHTTPContentTypes := []string{}
// set Content-Type header
localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)
if localVarHTTPContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHTTPContentType
}
// to determine the Accept header
localVarHTTPHeaderAccepts := []string{"application/json", "text/csv", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"}
// set Accept header
localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)
if localVarHTTPHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept
}
req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
return localVarReturnValue, nil, err
}
localVarHTTPResponse, err := a.client.callAPI(req)
if err != nil || localVarHTTPResponse == nil {
return localVarReturnValue, localVarHTTPResponse, err
}
localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)
localVarHTTPResponse.Body.Close()
localVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))
if err != nil {
return localVarReturnValue, localVarHTTPResponse, err
}
if localVarHTTPResponse.StatusCode >= 300 {
newErr := GenericOpenAPIError{
body: localVarBody,
error: localVarHTTPResponse.Status,
}
if localVarHTTPResponse.StatusCode == 400 {
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, newErr
}
newErr.model = v
return localVarReturnValue, localVarHTTPResponse, newErr
}
if localVarHTTPResponse.StatusCode == 401 {
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, newErr
}
newErr.model = v
return localVarReturnValue, localVarHTTPResponse, newErr
}
if localVarHTTPResponse.StatusCode == 403 {
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, newErr
}
newErr.model = v
return localVarReturnValue, localVarHTTPResponse, newErr
}
if localVarHTTPResponse.StatusCode == 404 {
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, newErr
}
newErr.model = v
return localVarReturnValue, localVarHTTPResponse, newErr
}
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, newErr
}
newErr.model = v
return localVarReturnValue, localVarHTTPResponse, newErr
}
err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr := GenericOpenAPIError{
body: localVarBody,
error: err.Error(),
}
return localVarReturnValue, localVarHTTPResponse, newErr
}
return localVarReturnValue, localVarHTTPResponse, nil
} | return localVarHTTPResponse, newErr |
is-defined.ts | export function isDefined<T>(argument: T | undefined): argument is T {
return argument !== undefined
}
export function | (val) {
return val === NaN ||
val === 'NaN' ||
val === undefined ||
val == null ||
val.length <= 0
? true
: false
}
| isEmpty |
lifecycle_test.go | package support
import (
"bytes"
"errors"
"io"
"io/ioutil"
"reflect"
"sort"
"strings"
"testing"
"time"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/diff"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/watch"
deployapi "github.com/openshift/origin/pkg/deploy/api"
deploytest "github.com/openshift/origin/pkg/deploy/api/test"
deployv1 "github.com/openshift/origin/pkg/deploy/api/v1"
deployutil "github.com/openshift/origin/pkg/deploy/util"
"github.com/openshift/origin/pkg/util/namer"
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
_ "github.com/openshift/origin/pkg/api/install"
)
func nowFunc() *unversioned.Time {
return &unversioned.Time{Time: time.Now().Add(-5 * time.Second)}
}
func newTestClient(config *deployapi.DeploymentConfig) *testclient.Fake {
client := &testclient.Fake{}
// when creating a lifecycle pod, we query the deployer pod for the start time to
// calculate the active deadline seconds for the lifecycle pod.
client.AddReactor("get", "pods", func(a testclient.Action) (handled bool, ret runtime.Object, err error) {
action := a.(testclient.GetAction)
if strings.HasPrefix(action.GetName(), config.Name) && strings.HasSuffix(action.GetName(), "-deploy") {
return true, &kapi.Pod{
ObjectMeta: kapi.ObjectMeta{
Name: "deployer",
},
Status: kapi.PodStatus{
StartTime: nowFunc(),
},
}, nil
}
return true, nil, nil
})
return client
}
func TestHookExecutor_executeExecNewCreatePodFailure(t *testing.T) {
hook := &deployapi.LifecycleHook{
FailurePolicy: deployapi.LifecycleHookFailurePolicyAbort,
ExecNewPod: &deployapi.ExecNewPodHook{
ContainerName: "container1",
},
}
dc := deploytest.OkDeploymentConfig(1)
deployment, _ := deployutil.MakeDeployment(dc, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion))
client := newTestClient(dc)
client.AddReactor("create", "pods", func(a testclient.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.New("could not create the pod")
})
executor := &HookExecutor{
pods: client,
decoder: kapi.Codecs.UniversalDecoder(),
}
if err := executor.executeExecNewPod(hook, deployment, "hook", "test"); err == nil {
t.Fatalf("expected an error")
}
}
func TestHookExecutor_executeExecNewPodSucceeded(t *testing.T) {
hook := &deployapi.LifecycleHook{
FailurePolicy: deployapi.LifecycleHookFailurePolicyAbort,
ExecNewPod: &deployapi.ExecNewPodHook{
ContainerName: "container1",
},
}
config := deploytest.OkDeploymentConfig(1)
deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion))
deployment.Spec.Template.Spec.NodeSelector = map[string]string{"labelKey1": "labelValue1", "labelKey2": "labelValue2"}
client := newTestClient(config)
podCreated := make(chan struct{})
var createdPod *kapi.Pod
client.AddReactor("create", "pods", func(a testclient.Action) (handled bool, ret runtime.Object, err error) {
defer close(podCreated)
action := a.(testclient.CreateAction)
object := action.GetObject()
createdPod = object.(*kapi.Pod)
return true, createdPod, nil
})
podsWatch := watch.NewFake()
client.AddWatchReactor("pods", testclient.DefaultWatchReactor(podsWatch, nil))
podLogs := &bytes.Buffer{}
// Simulate creation of the lifecycle pod
go func() {
<-podCreated
podsWatch.Add(createdPod)
podCopy, _ := kapi.Scheme.Copy(createdPod)
updatedPod := podCopy.(*kapi.Pod)
updatedPod.Status.Phase = kapi.PodSucceeded
podsWatch.Modify(updatedPod)
}()
executor := &HookExecutor{
pods: client,
out: podLogs,
decoder: kapi.Codecs.UniversalDecoder(),
getPodLogs: func(*kapi.Pod) (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader("test")), nil
},
}
err := executor.executeExecNewPod(hook, deployment, "hook", "test")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if e, a := "--> test: Running hook pod ...\ntest--> test: Success\n", podLogs.String(); e != a {
t.Fatalf("expected pod logs to be %q, got %q", e, a)
}
if e, a := deployment.Spec.Template.Spec.NodeSelector, createdPod.Spec.NodeSelector; !reflect.DeepEqual(e, a) {
t.Fatalf("expected pod NodeSelector %v, got %v", e, a)
}
if createdPod.Spec.ActiveDeadlineSeconds == nil {
t.Fatalf("expected ActiveDeadlineSeconds to be set on the deployment hook executor pod")
}
if *createdPod.Spec.ActiveDeadlineSeconds >= deployapi.MaxDeploymentDurationSeconds {
t.Fatalf("expected ActiveDeadlineSeconds %+v to be lower than %+v", *createdPod.Spec.ActiveDeadlineSeconds, deployapi.MaxDeploymentDurationSeconds)
}
}
func TestHookExecutor_executeExecNewPodFailed(t *testing.T) {
hook := &deployapi.LifecycleHook{
FailurePolicy: deployapi.LifecycleHookFailurePolicyAbort,
ExecNewPod: &deployapi.ExecNewPodHook{
ContainerName: "container1",
},
}
config := deploytest.OkDeploymentConfig(1)
deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion))
client := newTestClient(config)
podCreated := make(chan struct{})
var createdPod *kapi.Pod
client.AddReactor("create", "pods", func(a testclient.Action) (handled bool, ret runtime.Object, err error) {
defer close(podCreated)
action := a.(testclient.CreateAction)
object := action.GetObject()
createdPod = object.(*kapi.Pod)
return true, createdPod, nil
})
podsWatch := watch.NewFake()
client.AddWatchReactor("pods", testclient.DefaultWatchReactor(podsWatch, nil))
go func() {
<-podCreated
podsWatch.Add(createdPod)
podCopy, _ := kapi.Scheme.Copy(createdPod)
updatedPod := podCopy.(*kapi.Pod)
updatedPod.Status.Phase = kapi.PodFailed
podsWatch.Modify(updatedPod)
}()
executor := &HookExecutor{
pods: client,
out: ioutil.Discard,
decoder: kapi.Codecs.UniversalDecoder(),
getPodLogs: func(*kapi.Pod) (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader("test")), nil
},
}
err := executor.executeExecNewPod(hook, deployment, "hook", "test")
if err == nil {
t.Fatalf("expected an error, got none")
}
t.Logf("got expected error: %T", err)
}
func TestHookExecutor_makeHookPodInvalidContainerRef(t *testing.T) {
deployerPod := &kapi.Pod{
ObjectMeta: kapi.ObjectMeta{
Name: "deployer",
},
Status: kapi.PodStatus{
StartTime: nowFunc(),
},
}
hook := &deployapi.LifecycleHook{
FailurePolicy: deployapi.LifecycleHookFailurePolicyAbort,
ExecNewPod: &deployapi.ExecNewPodHook{
ContainerName: "undefined",
},
}
config := deploytest.OkDeploymentConfig(1)
deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion))
_, err := makeHookPod(hook, deployment, deployerPod, &config.Spec.Strategy, "hook")
if err == nil {
t.Fatalf("expected an error")
}
}
func TestHookExecutor_makeHookPod(t *testing.T) {
deploymentName := "deployment-1"
deploymentNamespace := "test"
maxDeploymentDurationSeconds := deployapi.MaxDeploymentDurationSeconds
gracePeriod := int64(10)
deployerPod := &kapi.Pod{
ObjectMeta: kapi.ObjectMeta{
Name: "deployer",
},
Status: kapi.PodStatus{
StartTime: nowFunc(),
},
}
tests := []struct {
name string
hook *deployapi.LifecycleHook
expected *kapi.Pod
strategyLabels map[string]string
strategyAnnotations map[string]string
}{
{
name: "overrides",
hook: &deployapi.LifecycleHook{
FailurePolicy: deployapi.LifecycleHookFailurePolicyAbort,
ExecNewPod: &deployapi.ExecNewPodHook{
ContainerName: "container1",
Command: []string{"overridden"},
Env: []kapi.EnvVar{
{
Name: "name",
Value: "value",
},
{
Name: "ENV1",
Value: "overridden",
},
},
Volumes: []string{"volume-2"},
},
},
expected: &kapi.Pod{
ObjectMeta: kapi.ObjectMeta{
Name: namer.GetPodName(deploymentName, "hook"),
Labels: map[string]string{
deployapi.DeploymentPodTypeLabel: "hook",
deployapi.DeployerPodForDeploymentLabel: deploymentName,
},
Annotations: map[string]string{
deployapi.DeploymentAnnotation: deploymentName,
},
},
Spec: kapi.PodSpec{
RestartPolicy: kapi.RestartPolicyNever,
Volumes: []kapi.Volume{
{
Name: "volume-2",
},
},
ActiveDeadlineSeconds: &maxDeploymentDurationSeconds,
Containers: []kapi.Container{
{
Name: "lifecycle",
Image: "registry:8080/repo1:ref1",
Command: []string{"overridden"},
Env: []kapi.EnvVar{
{
Name: "name",
Value: "value",
},
{
Name: "ENV1",
Value: "overridden",
},
{
Name: "OPENSHIFT_DEPLOYMENT_NAME",
Value: deploymentName,
},
{
Name: "OPENSHIFT_DEPLOYMENT_NAMESPACE",
Value: deploymentNamespace,
},
},
Resources: kapi.ResourceRequirements{
Limits: kapi.ResourceList{
kapi.ResourceCPU: resource.MustParse("10"),
kapi.ResourceMemory: resource.MustParse("10M"),
},
},
VolumeMounts: []kapi.VolumeMount{
{
Name: "volume-2",
ReadOnly: true,
MountPath: "/mnt/volume-2",
},
},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
ImagePullSecrets: []kapi.LocalObjectReference{
{
Name: "secret-1",
},
},
},
},
},
{
name: "no overrides",
hook: &deployapi.LifecycleHook{
FailurePolicy: deployapi.LifecycleHookFailurePolicyAbort,
ExecNewPod: &deployapi.ExecNewPodHook{
ContainerName: "container1",
},
},
expected: &kapi.Pod{
ObjectMeta: kapi.ObjectMeta{
Name: namer.GetPodName(deploymentName, "hook"),
Labels: map[string]string{
deployapi.DeploymentPodTypeLabel: "hook",
deployapi.DeployerPodForDeploymentLabel: deploymentName,
},
Annotations: map[string]string{
deployapi.DeploymentAnnotation: deploymentName,
},
},
Spec: kapi.PodSpec{
RestartPolicy: kapi.RestartPolicyNever,
ActiveDeadlineSeconds: &maxDeploymentDurationSeconds,
Containers: []kapi.Container{
{
Name: "lifecycle",
Image: "registry:8080/repo1:ref1",
Env: []kapi.EnvVar{
{
Name: "ENV1",
Value: "VAL1",
},
{
Name: "OPENSHIFT_DEPLOYMENT_NAME",
Value: deploymentName,
},
{
Name: "OPENSHIFT_DEPLOYMENT_NAMESPACE",
Value: deploymentNamespace,
},
},
Resources: kapi.ResourceRequirements{
Limits: kapi.ResourceList{
kapi.ResourceCPU: resource.MustParse("10"),
kapi.ResourceMemory: resource.MustParse("10M"),
},
},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
ImagePullSecrets: []kapi.LocalObjectReference{
{
Name: "secret-1",
},
},
},
},
},
{
name: "labels and annotations",
hook: &deployapi.LifecycleHook{
FailurePolicy: deployapi.LifecycleHookFailurePolicyAbort,
ExecNewPod: &deployapi.ExecNewPodHook{
ContainerName: "container1",
},
},
expected: &kapi.Pod{
ObjectMeta: kapi.ObjectMeta{
Name: namer.GetPodName(deploymentName, "hook"),
Labels: map[string]string{
deployapi.DeploymentPodTypeLabel: "hook",
deployapi.DeployerPodForDeploymentLabel: deploymentName,
"label1": "value1",
},
Annotations: map[string]string{
deployapi.DeploymentAnnotation: deploymentName,
"annotation2": "value2",
},
},
Spec: kapi.PodSpec{
RestartPolicy: kapi.RestartPolicyNever,
ActiveDeadlineSeconds: &maxDeploymentDurationSeconds,
Containers: []kapi.Container{
{
Name: "lifecycle",
Image: "registry:8080/repo1:ref1",
Env: []kapi.EnvVar{
{
Name: "ENV1",
Value: "VAL1",
},
{
Name: "OPENSHIFT_DEPLOYMENT_NAME",
Value: deploymentName,
},
{
Name: "OPENSHIFT_DEPLOYMENT_NAMESPACE",
Value: deploymentNamespace,
},
},
Resources: kapi.ResourceRequirements{
Limits: kapi.ResourceList{
kapi.ResourceCPU: resource.MustParse("10"),
kapi.ResourceMemory: resource.MustParse("10M"),
},
},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
ImagePullSecrets: []kapi.LocalObjectReference{
{
Name: "secret-1",
},
},
},
},
strategyLabels: map[string]string{
deployapi.DeployerPodForDeploymentLabel: "ignoredValue",
"label1": "value1",
},
strategyAnnotations: map[string]string{"annotation2": "value2"},
},
}
for _, test := range tests {
t.Logf("evaluating test: %s", test.name)
config, deployment := deployment("deployment", "test", test.strategyLabels, test.strategyAnnotations)
pod, err := makeHookPod(test.hook, deployment, deployerPod, &config.Spec.Strategy, "hook")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
for _, c := range pod.Spec.Containers {
sort.Sort(envByNameAsc(c.Env))
}
for _, c := range test.expected.Spec.Containers {
sort.Sort(envByNameAsc(c.Env))
}
if *pod.Spec.ActiveDeadlineSeconds >= *test.expected.Spec.ActiveDeadlineSeconds {
t.Errorf("expected pod ActiveDeadlineSeconds %+v to be lower than %+v", *pod.Spec.ActiveDeadlineSeconds, *test.expected.Spec.ActiveDeadlineSeconds)
}
// Copy the ActiveDeadlineSeconds the deployer pod is running for 5 seconds already
test.expected.Spec.ActiveDeadlineSeconds = pod.Spec.ActiveDeadlineSeconds
if !kapi.Semantic.DeepEqual(pod, test.expected) {
t.Errorf("unexpected pod diff: %v", diff.ObjectDiff(pod, test.expected))
}
}
}
func TestHookExecutor_makeHookPodRestart(t *testing.T) {
hook := &deployapi.LifecycleHook{
FailurePolicy: deployapi.LifecycleHookFailurePolicyRetry,
ExecNewPod: &deployapi.ExecNewPodHook{
ContainerName: "container1",
},
}
deployerPod := &kapi.Pod{
ObjectMeta: kapi.ObjectMeta{
Name: "deployer",
},
Status: kapi.PodStatus{
StartTime: nowFunc(),
},
}
config := deploytest.OkDeploymentConfig(1)
deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion))
pod, err := makeHookPod(hook, deployment, deployerPod, &config.Spec.Strategy, "hook")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if e, a := kapi.RestartPolicyOnFailure, pod.Spec.RestartPolicy; e != a {
t.Errorf("expected pod restart policy %s, got %s", e, a)
}
}
func | (t *testing.T) {
scenarios := []struct {
name string
// any pods which are previously accepted
acceptedPods []string
// the current pods which will be in the store; pod name -> ready
currentPods map[string]bool
// whether or not the scenario should result in acceptance
accepted bool
}{
{
name: "all ready, none previously accepted",
accepted: true,
acceptedPods: []string{},
currentPods: map[string]bool{
"pod-1": true,
"pod-2": true,
},
},
{
name: "some ready, none previously accepted",
accepted: false,
acceptedPods: []string{},
currentPods: map[string]bool{
"pod-1": false,
"pod-2": true,
},
},
{
name: "previously accepted has become unready, new are ready",
accepted: true,
acceptedPods: []string{"pod-1"},
currentPods: map[string]bool{
// this pod should be ignored because it was previously accepted
"pod-1": false,
"pod-2": true,
},
},
{
name: "previously accepted all ready, new is unready",
accepted: false,
acceptedPods: []string{"pod-1"},
currentPods: map[string]bool{
"pod-1": true,
"pod-2": false,
},
},
}
for _, s := range scenarios {
t.Logf("running scenario: %s", s.name)
// Populate the store with real pods with the desired ready condition.
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
for podName, ready := range s.currentPods {
status := kapi.ConditionTrue
if !ready {
status = kapi.ConditionFalse
}
pod := &kapi.Pod{
ObjectMeta: kapi.ObjectMeta{
Name: podName,
},
Status: kapi.PodStatus{
Conditions: []kapi.PodCondition{
{
Type: kapi.PodReady,
Status: status,
},
},
},
}
store.Add(pod)
}
// Set up accepted pods for the scenario.
acceptedPods := sets.NewString()
for _, podName := range s.acceptedPods {
acceptedPods.Insert(podName)
}
acceptorLogs := &bytes.Buffer{}
acceptor := &AcceptNewlyObservedReadyPods{
out: acceptorLogs,
timeout: 10 * time.Millisecond,
interval: 1 * time.Millisecond,
getDeploymentPodStore: func(deployment *kapi.ReplicationController) (cache.Store, chan struct{}) {
return store, make(chan struct{})
},
acceptedPods: acceptedPods,
}
deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion))
deployment.Spec.Replicas = 1
acceptor.out = &bytes.Buffer{}
err := acceptor.Accept(deployment)
if s.accepted {
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
} else {
if err == nil {
t.Fatalf("expected an error")
}
t.Logf("got expected error: %s", err)
}
}
}
func deployment(name, namespace string, strategyLabels, strategyAnnotations map[string]string) (*deployapi.DeploymentConfig, *kapi.ReplicationController) {
config := &deployapi.DeploymentConfig{
ObjectMeta: kapi.ObjectMeta{
Name: name,
Namespace: namespace,
},
Status: deployapi.DeploymentConfigStatus{
LatestVersion: 1,
},
Spec: deployapi.DeploymentConfigSpec{
Replicas: 1,
Selector: map[string]string{"a": "b"},
Strategy: deployapi.DeploymentStrategy{
Type: deployapi.DeploymentStrategyTypeRecreate,
Resources: kapi.ResourceRequirements{
Limits: kapi.ResourceList{
kapi.ResourceName(kapi.ResourceCPU): resource.MustParse("10"),
kapi.ResourceName(kapi.ResourceMemory): resource.MustParse("10G"),
},
},
Labels: strategyLabels,
Annotations: strategyAnnotations,
},
Template: &kapi.PodTemplateSpec{
Spec: kapi.PodSpec{
Containers: []kapi.Container{
{
Name: "container1",
Image: "registry:8080/repo1:ref1",
Env: []kapi.EnvVar{
{
Name: "ENV1",
Value: "VAL1",
},
},
ImagePullPolicy: kapi.PullIfNotPresent,
Resources: kapi.ResourceRequirements{
Limits: kapi.ResourceList{
kapi.ResourceCPU: resource.MustParse("10"),
kapi.ResourceMemory: resource.MustParse("10M"),
},
},
VolumeMounts: []kapi.VolumeMount{
{
Name: "volume-2",
ReadOnly: true,
MountPath: "/mnt/volume-2",
},
},
},
{
Name: "container2",
Image: "registry:8080/repo1:ref2",
ImagePullPolicy: kapi.PullIfNotPresent,
},
},
Volumes: []kapi.Volume{
{
Name: "volume-1",
},
{
Name: "volume-2",
},
},
RestartPolicy: kapi.RestartPolicyAlways,
DNSPolicy: kapi.DNSClusterFirst,
ImagePullSecrets: []kapi.LocalObjectReference{
{
Name: "secret-1",
},
},
},
ObjectMeta: kapi.ObjectMeta{
Labels: map[string]string{"a": "b"},
},
},
},
}
deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion))
deployment.Namespace = namespace
return config, deployment
}
type envByNameAsc []kapi.EnvVar
func (a envByNameAsc) Len() int {
return len(a)
}
func (a envByNameAsc) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a envByNameAsc) Less(i, j int) bool {
return a[j].Name < a[i].Name
}
| TestAcceptNewlyObservedReadyPods_scenarios |
test_stream_xep_0047.py | import asyncio
import threading
import time
import unittest
from slixmpp.test import SlixTest
class TestInBandByteStreams(SlixTest):
def setUp(self):
self.stream_start(plugins=['xep_0047', 'xep_0030'])
def tearDown(self):
self.stream_close()
def testOpenStream(self):
"""Test requesting a stream, successfully"""
events = []
def on_stream_start(stream):
events.append('ibb_stream_start')
self.xmpp.add_event_handler('ibb_stream_start', on_stream_start)
self.xmpp['xep_0047'].open_stream('tester@localhost/receiver',
sid='testing')
self.send("""
<iq type="set" to="tester@localhost/receiver" id="1">
<open xmlns="http://jabber.org/protocol/ibb"
sid="testing"
block-size="4096"
stanza="iq" />
</iq>
""")
self.recv("""
<iq type="result" id="1"
to="tester@localhost"
from="tester@localhost/receiver" />
""")
self.assertEqual(events, ['ibb_stream_start'])
def testAysncOpenStream(self):
"""Test requesting a stream, aysnc"""
events = set()
def on_stream_start(stream):
events.add('ibb_stream_start')
def stream_callback(iq):
events.add('callback')
self.xmpp.add_event_handler('ibb_stream_start', on_stream_start)
self.xmpp['xep_0047'].open_stream('tester@localhost/receiver',
sid='testing',
callback=stream_callback)
self.send("""
<iq type="set" to="tester@localhost/receiver" id="1">
<open xmlns="http://jabber.org/protocol/ibb"
sid="testing"
block-size="4096"
stanza="iq" />
</iq>
""")
self.recv("""
<iq type="result" id="1"
to="tester@localhost"
from="tester@localhost/receiver" />
""")
self.assertEqual(events, {'ibb_stream_start', 'callback'})
async def | (self):
"""Test sending data over an in-band bytestream."""
streams = []
data = []
def on_stream_start(stream):
streams.append(stream)
def on_stream_data(d):
data.append(d['data'])
self.xmpp.add_event_handler('ibb_stream_start', on_stream_start)
self.xmpp.add_event_handler('ibb_stream_data', on_stream_data)
self.xmpp['xep_0047'].open_stream('tester@localhost/receiver',
sid='testing')
self.send("""
<iq type="set" to="tester@localhost/receiver" id="1">
<open xmlns="http://jabber.org/protocol/ibb"
sid="testing"
block-size="4096"
stanza="iq" />
</iq>
""")
self.recv("""
<iq type="result" id="1"
to="tester@localhost"
from="tester@localhost/receiver" />
""")
stream = streams[0]
# Test sending data out
await stream.send("Testing")
self.send("""
<iq type="set" id="2"
from="tester@localhost"
to="tester@localhost/receiver">
<data xmlns="http://jabber.org/protocol/ibb"
seq="0"
sid="testing">
VGVzdGluZw==
</data>
</iq>
""")
self.recv("""
<iq type="result" id="2"
to="tester@localhost"
from="tester@localhost/receiver" />
""")
# Test receiving data
self.recv("""
<iq type="set" id="A"
to="tester@localhost"
from="tester@localhost/receiver">
<data xmlns="http://jabber.org/protocol/ibb"
seq="0"
sid="testing">
aXQgd29ya3Mh
</data>
</iq>
""")
self.send("""
<iq type="result" id="A"
to="tester@localhost/receiver" />
""")
self.assertEqual(data, [b'it works!'])
suite = unittest.TestLoader().loadTestsFromTestCase(TestInBandByteStreams)
| testSendData |
exec.rs | use nu_protocol::{
ast::Call,
engine::{Command, EngineState, Stack},
Category, Example, PipelineData, ShellError, Signature, SyntaxShape,
};
#[derive(Clone)]
pub struct Exec;
impl Command for Exec {
fn | (&self) -> &str {
"exec"
}
fn signature(&self) -> Signature {
Signature::build("exec")
.required("command", SyntaxShape::String, "the command to execute")
.rest(
"rest",
SyntaxShape::String,
"any additional arguments for the command",
)
.category(Category::System)
}
fn usage(&self) -> &str {
"Execute a command, replacing the current process."
}
fn extra_usage(&self) -> &str {
"Currently supported only on Unix-based systems."
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
_input: PipelineData,
) -> Result<nu_protocol::PipelineData, nu_protocol::ShellError> {
exec(engine_state, stack, call)
}
fn examples(&self) -> Vec<Example> {
vec![
Example {
description: "Execute external 'ps aux' tool",
example: "exec ps aux",
result: None,
},
Example {
description: "Execute 'nautilus'",
example: "exec nautilus",
result: None,
},
]
}
}
#[cfg(unix)]
fn exec(
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
) -> Result<nu_protocol::PipelineData, nu_protocol::ShellError> {
use std::os::unix::process::CommandExt;
use nu_engine::{current_dir, env_to_strings, CallExt};
use nu_protocol::Spanned;
use super::run_external::ExternalCommand;
let name: Spanned<String> = call.req(engine_state, stack, 0)?;
let name_span = name.span;
let args: Vec<Spanned<String>> = call.rest(engine_state, stack, 1)?;
let cwd = current_dir(engine_state, stack)?;
let env_vars = env_to_strings(engine_state, stack)?;
let current_dir = current_dir(engine_state, stack)?;
let external_command = ExternalCommand {
name,
args,
env_vars,
redirect_stdout: true,
redirect_stderr: false,
};
let mut command = external_command.spawn_simple_command(&cwd.to_string_lossy().to_string())?;
command.current_dir(current_dir);
println!("{:#?}", command);
let err = command.exec(); // this replaces our process, should not return
Err(ShellError::SpannedLabeledError(
"Error on exec".to_string(),
err.to_string(),
name_span,
))
}
#[cfg(not(unix))]
fn exec(
_engine_state: &EngineState,
_stack: &mut Stack,
call: &Call,
) -> Result<nu_protocol::PipelineData, nu_protocol::ShellError> {
Err(ShellError::SpannedLabeledError(
"Error on exec".to_string(),
"exec is not supported on your platform".to_string(),
call.head,
))
}
| name |
Test.ts | import { TestNodeModule } from "test-nm";
export interface Test extends TestNodeModule {
| } | |
strings.py | """ string constants for office module """
fed_type = 'federal'
| office_type_list = [fed_type, leg_type, state_type, loc_gov_type]
office_id_str = 'Office ID '
office_key = 'office'
prezzo = 'President'
mca = 'Member of County Assembly'
wr = 'Women Representative'
gov = 'Governer'
sen = 'Senator' | leg_type = 'legislative'
state_type = 'state'
loc_gov_type = 'local government'
|
metrics-router.ts | /**
* Copyright 2019 the prism authors
* This file is part of the prism library in the Orbs project.
*
* This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree.
* The above notice should be included in all copies or substantial portions of the software.
*/
import { Router } from 'express';
import { Storage } from '../storage/storage';
import httpStatusCodes from 'http-status-codes';
import client from 'prom-client';
export function | (storage: Storage) {
const router = Router();
// Manual diagnostics
router.get('/api/health/status', async (req, res) => {
try {
const diagnostics = await storage.getDiagnostics();
res.send(diagnostics);
} catch (e) {
res.send(httpStatusCodes.INTERNAL_SERVER_ERROR);
}
});
// Prometheus
router.get('/metrics', (req, res) => {
res.set('Content-Type', client.register.contentType);
res.end(client.register.metrics());
});
return router;
}
| metricsRouter |
condition_with_args.py | from dataclasses import dataclass
from typing import List
from tst.types.condition_opcodes import ConditionOpcode
from tst.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class ConditionWithArgs(Streamable):
| """
This structure is used to store parsed CLVM conditions
Conditions in CLVM have either format of (opcode, var1) or (opcode, var1, var2)
"""
opcode: ConditionOpcode
vars: List[bytes] |
|
logical-operations.py | def logic(a, b):
print(('a and b:', a and b))
print(('a or b:', a or b)) | print(('not a:', not a)) | |
change.go | package scyllacdc
import (
"context"
"fmt"
"reflect"
"strconv"
"strings"
"github.com/gocql/gocql"
)
// OperationType corresponds to the cdc$operation column in CDC log, and
// describes the type of the operation given row represents.
//
// For a comprehensive explanation of what each operation type means,
// see Scylla documentation about CDC.
type OperationType int8
const (
PreImage OperationType = 0
Update = 1
Insert = 2
RowDelete = 3
PartitionDelete = 4
RangeDeleteStartInclusive = 5
RangeDeleteStartExclusive = 6
RangeDeleteEndInclusive = 7
RangeDeleteEndExclusive = 8
PostImage = 9
)
// String is needed to implement the fmt.Stringer interface.
func (ot OperationType) String() string {
switch ot {
case PreImage:
return "PREIMAGE"
case Update:
return "UPDATE"
case Insert:
return "INSERT"
case RowDelete:
return "ROW_DELETE"
case PartitionDelete:
return "PARTITION_DELETE"
case RangeDeleteStartInclusive:
return "RANGE_DELETE_START_INCLUSIVE"
case RangeDeleteStartExclusive:
return "RANGE_DELETE_START_EXCLUSIVE"
case RangeDeleteEndInclusive:
return "RANGE_DELETE_END_INCLUSIVE"
case RangeDeleteEndExclusive:
return "RANGE_DELETE_END_EXCLUSIVE"
case PostImage:
return "POSTIMAGE"
default:
return "(wrong OperationType)"
}
}
// Change represents a group of rows from CDC log with the same cdc$stream_id
// and cdc$time timestamp.
type Change struct {
// Corresponds to cdc$stream_id.
StreamID StreamID
// Corresponds to cdc$time.
Time gocql.UUID
// PreImage rows of the group.
PreImage []*ChangeRow
// Delta rows of the group.
Delta []*ChangeRow
// PostImage rows of the group.
PostImage []*ChangeRow
}
// GetCassandraTimestamp returns a timestamp of the operation
// suitable to put as a TIMESTAMP parameter to a DML statement
// (INSERT, UPDATE, DELETE).
func (c *Change) GetCassandraTimestamp() int64 {
return timeuuidToTimestamp(c.Time)
}
// ChangeRow corresponds to a single row from the CDC log.
//
// The ChangeRow uses a slightly different representation of values than gocql's
// MapScan in order to faithfully represent nullability of all values:
//
// Scalar types such as int, text etc. are represented by a pointer to
// their counterpart in gocql (in this case, *int and *string). The only
// exception is the blob, which is encoded as []byte slice - if the column
// was nil, then it will contain a nil slice, if the column was not nil but
// just empty, then the resulting slice will be empty, but not nil.
//
// Tuple types are always represented as an []interface{} slice of values
// in this representation (e.g. tuple<int, text> will contain an *int and
// a *string). If the tuple itself was null, then it will be represented
// as a nil []interface{} slice.
//
// Lists and sets are represented as slices of the corresponding type.
// Because lists and sets cannot contain nils, if a value was to be
// represented as a pointer, it will be represented as a value instead.
// For example, list<int> becomes []int, but list<frozen<tuple<int, text>>
// becomes [][]interface{} because the tuple type cannot be flattened.
//
// Maps are represented as map[K]V, where K and V are in the "flattened" form
// as lists and sets.
//
// UDTs are represented as map[string]interface{}, with values fields being
// represented as described here. For example, a UDT with fields (a int,
// b text) will be represented as a map with two values of types (*int)
// and (*string).
//
// For a comprehensive guide on how to interpret data in the CDC log,
// see Scylla documentation about CDC.
type ChangeRow struct {
fieldNameToIdx map[string]int
data []interface{}
colInfos []gocql.ColumnInfo
cdcCols cdcChangeRowCols
}
// Contains columns specific to a change row batch (rows which have
// the same cdc$stream_id and cdc$time)
type cdcChangeBatchCols struct {
streamID []byte
time gocql.UUID
}
// Contains columns specific to a change row, but independent from
// the base table schema.
type cdcChangeRowCols struct {
batchSeqNo int32
operation int8
ttl int64
endOfBatch bool
}
// AtomicChange represents a change to a column of an atomic or a frozen type.
type AtomicChange struct {
// Value contains the scalar value of the column.
// If the column was not changed or was deleted, it will be nil.
//
// Type: T.
Value interface{}
// IsDeleted tells if this column was set to NULL by this change.
IsDeleted bool
}
// ListChange represents a change to a column of a type list<T>.
type ListChange struct {
// AppendedElements contains values appended to the list in the form
// of map from cell timestamps to values.
//
// For more information about how to interpret it, see "Advanced column"
// types" in the CDC documentation.
//
// Type: map[gocql.UUID]T
AppendedElements interface{}
// RemovedElements contains indices of the removed elements.
//
// For more information about how to interpret it, see "Advanced column"
// types" in the CDC documentation.
//
// Type: []gocql.UUID
RemovedElements []gocql.UUID
// IsReset tells if the list value was overwritten instead of being
// appended to or removed from. If it's true, than AppendedValue will
// contain the new state of the list (which can be NULL).
IsReset bool
}
// SetChange represents a change to a column of type set<T>.
type SetChange struct {
// AddedElements contains a slice of values which were added to the set
// by the operation. If there were any values added, it will contain
// a slice of form []T, where T is gocql's representation of the element
// type.
//
// Type: []T
AddedElements interface{}
// RemovedElements contains a slice of values which were removed from the set
// by the operation. Like AddedValues, it's either a slice or a nil
// interface.
//
// Please note that if the operation overwrote the old value of the set
// instead of adding/removing elements, this field _will be nil_.
// Instead, IsReset field will be set, and AddedValues will contain
// the new state of the set.
//
// Type: []T
RemovedElements interface{}
// IsReset tells if the set value was overwritten instead of being
// appended to or removed from. If it's true, than AddedElements will
// contain the new state of the set (which can be NULL).
IsReset bool
}
// MapChange represents a change to a column of type map<K, V>.
type MapChange struct {
// AddedElements contains a map of elements which were added to the map
// by the operation.
//
// Type: map[K]V.
AddedElements interface{}
// RemovedElements contains a slice of keys which were removed from the map
// by the operation.
// Please note that if the operation overwrote the old value of the map
// instead of adding/removing elements, this field _will be nil_.
// Instead, IsReset field will be set, and AddedValues will contain
// the new state of the map.
//
// Type: []K
RemovedElements interface{}
// IsReset tells if the map value was overwritten instead of being
// appended to or removed from. If it's true, than AddedElements will
// contain the new state of the map (which can be NULL).
IsReset bool
}
// UDTChange represents a change to a column of a UDT type.
type UDTChange struct {
// AddedFields contains a map of fields. Non-null value of a field
// indicate that the field was written to, otherwise it was not written.
AddedFields map[string]interface{}
// RemovedFields contains names of fields which were set to null
// by this operation.
RemovedFields []string
// RemovedFieldsIndices contains indices of tields which were set to null
// by this operation.
RemovedFieldsIndices []int16
// IsReset tells if the UDT was overwritten instead of only some fields
// being overwritten. If this flag is true, then nil fields in AddedFields
// will mean that those fields should be set to null.
IsReset bool
}
// GetAtomicChange returns a ScalarChange struct for a given column.
// Results are undefined if the column in the base table was not an atomic type.
func (c *ChangeRow) GetAtomicChange(column string) AtomicChange {
v, _ := c.GetValue(column)
isDeleted, _ := c.IsDeleted(column)
return AtomicChange{
Value: v,
IsDeleted: isDeleted,
}
}
// GetListChange returns a ListChange struct for a given column.
// Results are undefined if the column in the base table was not a list.
func (c *ChangeRow) GetListChange(column string) ListChange {
v, _ := c.GetValue(column)
isDeleted, _ := c.IsDeleted(column)
deletedElements, _ := c.GetDeletedElements(column)
typedDeletedElements, _ := deletedElements.([]gocql.UUID)
return ListChange{
AppendedElements: v,
RemovedElements: typedDeletedElements,
IsReset: isDeleted,
}
}
// GetSetChange returns a SetChange struct for a given column.
// Results are undefined if the column in the base table was not a set.
func (c *ChangeRow) GetSetChange(column string) SetChange {
v, _ := c.GetValue(column)
isDeleted, _ := c.IsDeleted(column)
deletedElements, _ := c.GetDeletedElements(column)
return SetChange{
AddedElements: v,
RemovedElements: deletedElements,
IsReset: isDeleted,
}
}
// GetMapChange returns a MapChange struct for a given column.
// Results are undefined if the column in the base table was not a map.
func (c *ChangeRow) GetMapChange(column string) MapChange {
v, _ := c.GetValue(column)
isDeleted, _ := c.IsDeleted(column)
deletedElements, _ := c.GetDeletedElements(column)
return MapChange{
AddedElements: v,
RemovedElements: deletedElements,
IsReset: isDeleted,
}
}
// GetUDTChange returns a UDTChange struct for a given column.
// Results are undefined if the column in the base table was not a UDT.
func (c *ChangeRow) GetUDTChange(column string) UDTChange {
v, _ := c.GetValue(column)
typedV, _ := v.(map[string]interface{})
colType, _ := c.GetType(column)
udtType, _ := colType.(gocql.UDTTypeInfo)
isDeleted, _ := c.IsDeleted(column)
deletedElements, _ := c.GetDeletedElements(column)
typedDeletedElements, _ := deletedElements.([]int16)
deletedNames := make([]string, 0, len(typedDeletedElements))
for i, el := range typedDeletedElements {
if i < len(udtType.Elements) {
deletedNames = append(deletedNames, udtType.Elements[el].Name)
}
}
udtC := UDTChange{
AddedFields: typedV,
RemovedFieldsIndices: typedDeletedElements,
RemovedFields: deletedNames,
IsReset: isDeleted,
}
return udtC
}
// GetOperation returns the type of operation this change represents.
func (c *ChangeRow) GetOperation() OperationType {
return OperationType(c.cdcCols.operation)
}
// GetTTL returns TTL for the operation, or 0 if no TTL was used.
func (c *ChangeRow) GetTTL() int64 {
return c.cdcCols.ttl
}
// GetValue returns value that was assigned to this specific column.
func (c *ChangeRow) GetValue(columnName string) (interface{}, bool) {
idx, ok := c.fieldNameToIdx[columnName]
if !ok {
return nil, false
}
return c.data[idx], true
}
// IsDeleted returns a boolean indicating if given column was set to null.
// This only works for clustering columns.
func (c *ChangeRow) IsDeleted(columnName string) (bool, bool) {
v, ok := c.GetValue("cdc$deleted_" + columnName)
if !ok {
return false, false
}
vb := v.(*bool)
return vb != nil && *vb, true
}
// GetDeletedElements returns which elements were deleted from the non-atomic column.
// This function works only for non-atomic columns
func (c *ChangeRow) GetDeletedElements(columnName string) (interface{}, bool) {
v, ok := c.GetValue("cdc$deleted_elements_" + columnName)
return v, ok
}
// Columns returns information about data columns in the cdc log table. It contains
// information about all columns - both with and without cdc$ prefix.
func (c *ChangeRow) Columns() []gocql.ColumnInfo {
return c.colInfos
}
// GetType returns gocql's representation of given column type.
func (c *ChangeRow) GetType(columnName string) (gocql.TypeInfo, bool) {
idx, ok := c.fieldNameToIdx[columnName]
if !ok {
return nil, false
}
return c.colInfos[idx].TypeInfo, true
}
// String is needed to implement the fmt.Stringer interface.
func (c *ChangeRow) String() string {
var b strings.Builder
b.WriteString(OperationType(c.cdcCols.operation).String())
b.WriteString(" ")
b.WriteString(strconv.FormatInt(c.cdcCols.ttl, 10))
b.WriteString(" -> {")
first := true
for _, info := range c.colInfos {
v, hasValue := c.GetValue(info.Name)
isDeleted, hasDeleted := c.IsDeleted(info.Name)
deletedElements, hasDeletedElements := c.GetDeletedElements(info.Name)
if !first {
b.WriteString(", ")
}
first = false
b.WriteString(info.Name)
b.WriteString(":")
if hasValue {
b.WriteString(fmt.Sprintf("%v", v))
} else {
b.WriteString("nil")
}
if hasDeleted {
b.WriteString(", cdc$deleted_")
b.WriteString(info.Name)
b.WriteString(":")
b.WriteString(fmt.Sprintf("%t", isDeleted))
}
if hasDeletedElements {
b.WriteString(", cdc$deleted_elements_")
b.WriteString(info.Name)
b.WriteString(":")
b.WriteString(fmt.Sprintf("%v", deletedElements))
}
}
b.WriteString("}")
return b.String()
}
// CreateChangeConsumerInput represents input to the CreateChangeConsumer function.
type CreateChangeConsumerInput struct {
// Name of the table from which the new ChangeConsumer will receive changes.
TableName string
// ID of the stream from which the new ChangeConsumer will receive changes.
StreamID StreamID
ProgressReporter *ProgressReporter
}
// ChangeConsumerFactory is used by the library to instantiate ChangeConsumer
// objects when the new generation starts.
type ChangeConsumerFactory interface {
// Creates a change consumer with given parameters.
//
// If this method returns an error, the library will stop with an error.
CreateChangeConsumer(ctx context.Context, input CreateChangeConsumerInput) (ChangeConsumer, error)
}
// ChangeConsumer processes changes from a single stream of the CDC log.
type ChangeConsumer interface {
// Processes a change from the CDC log associated with the stream of
// the ChangeConsumer. This method is called in a sequential manner for each
// row that appears in the stream.
//
// If this method returns an error, the library will stop with an error.
Consume(ctx context.Context, change Change) error
// Called after all rows from the stream were consumed, and the reader
// is about to switch to a new generation, or stop execution altogether.
//
// If this method returns an error, the library will stop with an error.
End() error
}
// MakeChangeConsumerFactoryFromFunc can be used if your processing is very
// simple, and don't need to keep any per-stream state or save any progress.
// The function supplied as an argument will be shared by all consumers created
// by this factory, and will be called for each change in the CDC log.
//
// Please note that the consumers created by this factory do not perform
// any synchronization on their own when calling supplied function, therefore
// you need to guarantee that calling `f` is thread safe.
func MakeChangeConsumerFactoryFromFunc(f ChangeConsumerFunc) ChangeConsumerFactory {
return &changeConsumerFuncInstanceFactory{f}
}
type changeConsumerFuncInstanceFactory struct {
f ChangeConsumerFunc
}
// CreateChangeConsumer is needed to implement the ChangeConsumerFactory interface.
func (ccfif *changeConsumerFuncInstanceFactory) CreateChangeConsumer(
ctx context.Context,
input CreateChangeConsumerInput,
) (ChangeConsumer, error) {
return &changeConsumerFuncInstance{
tableName: input.TableName,
f: ccfif.f,
}, nil
}
type changeConsumerFuncInstance struct {
tableName string
f ChangeConsumerFunc
}
func (ccfi *changeConsumerFuncInstance) End() error {
return nil
}
func (ccfi *changeConsumerFuncInstance) Consume(ctx context.Context, change Change) error {
return ccfi.f(ctx, ccfi.tableName, change)
}
// ChangeConsumerFunc can be used in conjunction with MakeChangeConsumerFactoryFromFunc
// if your processing is very simple. For more information, see the description
// of the MakeChangeConsumerFactoryFromFunc function.
type ChangeConsumerFunc func(ctx context.Context, tableName string, change Change) error
type changeRowQuerier struct {
keyspaceName string
tableName string
session *gocql.Session
pkCondition string
bindArgs []interface{}
consistency gocql.Consistency
}
func | (session *gocql.Session, streams []StreamID, keyspaceName, tableName string, consistency gocql.Consistency) *changeRowQuerier {
var pkCondition string
if len(streams) == 1 {
pkCondition = "\"cdc$stream_id\" = ?"
} else {
pkCondition = "\"cdc$stream_id\" IN (?" + strings.Repeat(", ?", len(streams)-1) + ")"
}
bindArgs := make([]interface{}, len(streams)+2)
for i, stream := range streams {
bindArgs[i] = stream
}
return &changeRowQuerier{
keyspaceName: keyspaceName,
tableName: tableName,
session: session,
pkCondition: pkCondition,
bindArgs: bindArgs,
consistency: consistency,
}
}
func (crq *changeRowQuerier) queryRange(start, end gocql.UUID) (*changeRowIterator, error) {
// We need metadata to check if there are any tuples
kmeta, err := crq.session.KeyspaceMetadata(crq.keyspaceName)
if err != nil {
return nil, err
}
tmeta, ok := kmeta.Tables[crq.tableName+cdcTableSuffix]
if !ok {
return nil, fmt.Errorf("no such table: %s.%s", crq.keyspaceName, crq.tableName)
}
var colNames []string
var tupleNames []string
for _, col := range tmeta.Columns {
var ct interface{} = col.Type
var ctStr string
switch ct := ct.(type) {
case string:
ctStr = ct
case fmt.Stringer:
ctStr = ct.String()
}
if strings.HasPrefix(ctStr, "frozen<tuple<") || strings.HasPrefix(ctStr, "tuple<") || strings.HasPrefix(ctStr, "tuple(") {
tupleNames = append(tupleNames, col.Name)
colNames = append(colNames, fmt.Sprintf("writetime(%s)", escapeColumnNameIfNeeded(col.Name)))
}
}
if len(tupleNames) == 0 {
colNames = []string{"*"}
} else {
for name := range tmeta.Columns {
colNames = append(colNames, escapeColumnNameIfNeeded(name))
}
}
queryStr := fmt.Sprintf(
"SELECT %s FROM %s.%s%s WHERE %s AND \"cdc$time\" > ? AND \"cdc$time\" <= ? BYPASS CACHE",
strings.Join(colNames, ", "),
crq.keyspaceName,
crq.tableName,
cdcTableSuffix,
crq.pkCondition,
)
crq.bindArgs[len(crq.bindArgs)-2] = start
crq.bindArgs[len(crq.bindArgs)-1] = end
iter := crq.session.Query(queryStr, crq.bindArgs...).Consistency(crq.consistency).Iter()
return newChangeRowIterator(iter, tupleNames)
}
// For a given range, returns the cdc$time of the earliest rows for each stream.
func (crq *changeRowQuerier) findFirstRowsInRange(start, end gocql.UUID) (map[string]gocql.UUID, error) {
queryStr := fmt.Sprintf(
"SELECT \"cdc$stream_id\", \"cdc$time\" FROM %s.%s%s WHERE %s AND \"cdc$time\" > ? AND \"cdc$time\" <= ? PER PARTITION LIMIT 1 BYPASS CACHE",
crq.keyspaceName,
crq.tableName,
cdcTableSuffix,
crq.pkCondition,
)
crq.bindArgs[len(crq.bindArgs)-2] = start
crq.bindArgs[len(crq.bindArgs)-1] = end
ret := make(map[string]gocql.UUID)
iter := crq.session.Query(queryStr, crq.bindArgs...).Consistency(crq.consistency).Iter()
var (
streamID StreamID
cdcTime gocql.UUID
)
for iter.Scan(&streamID, &cdcTime) {
ret[string(streamID)] = cdcTime
}
if err := iter.Close(); err != nil {
return nil, err
}
return ret, nil
}
// An adapter over gocql.Iterator which chooses representation for row values
// which is more suitable for CDC than the default one.
//
// Gocql has two main methods of retrieving row data:
//
// - If you know what columns will be returned by the query and which types
// to use to represent them, you use (*Iter).Scan(...) function and pass
// a list of pointers to values of types you chose for the representation.
// For example, if `x` is int, `Scan(&x)` will put the value of the column
// directly to the `x` variable, setting it to 0 if the column was null.
// - If you don't know which columns will be returned and what are their
// types, you can use (*Iter).MapScan, which returns a map from column
// name to the column value. Gocql automatically chooses a type which
// will be used to represent the column value.
//
// In our interface, we would like to use an API like MapScan, but there
// are some problems which are addressed by changeRowIterator:
//
// - Gocql's choice of the type used to represent column values is not the best
// for CDC use case. First and foremost, it's very important to differentiate
// Go's default value for a type from a null. For example, for int columns,
// MapScan chooses Go's int type, and sets it to 0 in both cases if it was 0
// or null in the table. For CDC, this means completely different things -
// 0 would mean that the 0 value was written to that column, while null would
// mean that this column value was not changed.
// Fortunately, we can solve this issue by using a pointer-to-type (e.g. *int).
// Gocql will set it to null if it was null in the database, and set it
// to a pointer to a proper value if it was not null.
//
// - Similarly to above, UDTs suffer from a similar problem - they are,
// by default, represented by a map[string]interface{} which holds non-pointer
// values of UDT's elements. Fortunately, we can provide a custom type
// which uses pointers to UDT's elements - see udtWithNulls.
//
// - Tuples are handled in a peculiar way - instead of returning, for example,
// an []interface{} which holds tuple values, Scan expects that a pointer
// for each tuple element will be provided, and MapScan puts each tuple
// element under a separate key in the map. This creates a problem - it's
// impossible to differentiate a tuple with all fields set to null, and
// a tuple that is just a null. In CDC, the first means an overwrite of the
// column, and the second means that the column should not be changed.
// This is worked around by using the writetime(X) function on the tuple
// column - this function returns null iff column X was null.
// Moreover, tuples are represented as an []interface{} slice containing
// pointers to tuple elements.
type changeRowIterator struct {
iter *gocql.Iter
columnValues []interface{}
cdcChangeBatchCols cdcChangeBatchCols
cdcChangeRowCols cdcChangeRowCols
// Contains information on all columns apart from the writetime() ones
colInfos []gocql.ColumnInfo
// Maps from tuple column names to the index they occupy in columnValues
tupleNameToWritetimeIdx map[string]int
// Maps from column name to index in change slice
fieldNameToIdx map[string]int
tupleWriteTimes []int64
}
func newChangeRowIterator(iter *gocql.Iter, tupleNames []string) (*changeRowIterator, error) {
// TODO: Check how costly is the reflection here
// We could amortize the cost by preparing the dataFields only at the
// beginning of the iteration, and change them only if the fields
// have changed
// This possibility should be looked into
allCols := iter.Columns()
if len(allCols) == 0 {
// No columns indicate an error
return nil, iter.Close()
}
// If there are tuples in the table, the query will have form
// SELECT writetime(X), writetime(Z), X, Y, Z FROM ...
// where X and Z are tuples.
// We need to get the writetime for tuples in order to work around
// an issue in gocql - otherwise we wouldn't be able to differentiate
// a tuple with all columns null, and a tuple which is null itself.
// Assign slots in the beginning for tuples' writetime
tupleNameToWritetimeIdx := make(map[string]int, len(tupleNames))
for i, name := range tupleNames {
tupleNameToWritetimeIdx[name] = i
}
ci := &changeRowIterator{
iter: iter,
columnValues: make([]interface{}, 0, len(allCols)),
colInfos: make([]gocql.ColumnInfo, 0, len(allCols)),
tupleNameToWritetimeIdx: tupleNameToWritetimeIdx,
fieldNameToIdx: make(map[string]int),
tupleWriteTimes: make([]int64, len(tupleNames)),
}
// tupleWriteTimes will receive results of the writetime function
// for each tuple column
for i := range tupleNames {
ci.columnValues = append(ci.columnValues, &ci.tupleWriteTimes[i])
}
ci.colInfos = allCols[len(tupleNames):]
for colIdx, col := range ci.colInfos {
if tupTyp, ok := col.TypeInfo.(gocql.TupleTypeInfo); ok {
// Gocql operates on "flattened" tuples, therefore we need to put
// a separate value for each tuple element.
// To represent a field, use value returned by gocql's TypeInfo.New(),
// but convert it into a pointer
ci.fieldNameToIdx[col.Name] = colIdx
for range tupTyp.Elems {
ci.columnValues = append(ci.columnValues, &withNullUnmarshaler{})
}
} else {
var cval interface{}
// For common cdc column names, we want their values to be placed
// in cdcChangeBatchCols and cdcChangeRowCols structures
switch col.Name {
case "cdc$stream_id":
cval = &ci.cdcChangeBatchCols.streamID
case "cdc$time":
cval = &ci.cdcChangeBatchCols.time
case "cdc$batch_seq_no":
cval = &ci.cdcChangeRowCols.batchSeqNo
case "cdc$ttl":
cval = &ci.cdcChangeRowCols.ttl
case "cdc$operation":
cval = &ci.cdcChangeRowCols.operation
case "cdc$end_of_batch":
cval = &ci.cdcChangeRowCols.endOfBatch
default:
cval = &withNullUnmarshaler{}
}
ci.fieldNameToIdx[col.Name] = colIdx
ci.columnValues = append(ci.columnValues, cval)
}
}
return ci, nil
}
func (ci *changeRowIterator) Next() (cdcChangeBatchCols, *ChangeRow) {
if !ci.iter.Scan(ci.columnValues...) {
return cdcChangeBatchCols{}, nil
}
change := &ChangeRow{
fieldNameToIdx: ci.fieldNameToIdx,
data: make([]interface{}, len(ci.colInfos)),
colInfos: ci.colInfos,
cdcCols: ci.cdcChangeRowCols,
}
// Beginning of tupleWriteTimes contains
// At the beginning, there are writetime() for tuples. Skip them
pos := len(ci.tupleWriteTimes)
for idxInSlice, col := range ci.colInfos {
// TODO: Optimize
if strings.HasPrefix(col.Name, "cdc$") && !strings.HasPrefix(col.Name, "cdc$deleted_") {
pos++
continue
}
if tupTyp, ok := col.TypeInfo.(gocql.TupleTypeInfo); ok {
// We deviate from gocql's convention here - we represent a tuple
// as an []interface{}, we don't keep a separate column for each
// tuple element.
// This was made in order to avoid confusion with respect to
// the cdc log table - if we split tuple v into v[0], v[1], ...,
// we would also have to artificially split cdc$deleted_v
// into cdc$deleted_v[0], cdc$deleted_v[1]...
// Check the writetime of the tuple
// If the tuple was null, then the writetime will be null (zero in our case)
// This is a workaround needed because gocql does not differentiate
// null tuples from tuples which have all their elements as null
tupLen := len(tupTyp.Elems)
tupIdx := ci.tupleNameToWritetimeIdx[col.Name]
if ci.tupleWriteTimes[tupIdx] != 0 {
v := make([]interface{}, tupLen)
for i := 0; i < tupLen; i++ {
vv := ci.columnValues[pos+i].(*withNullUnmarshaler).value
v[i] = adjustBytes(vv)
}
change.data[idxInSlice] = v
} else {
change.data[idxInSlice] = ([]interface{})(nil)
}
pos += tupLen
} else {
v, isWithNull := ci.columnValues[pos].(*withNullUnmarshaler)
if isWithNull {
change.data[idxInSlice] = v.value
} else {
change.data[idxInSlice] = dereference(ci.columnValues[pos])
}
pos++
}
}
return ci.cdcChangeBatchCols, change
}
func (ci *changeRowIterator) Close() error {
return ci.iter.Close()
}
func dereference(i interface{}) interface{} {
return reflect.Indirect(reflect.ValueOf(i)).Interface()
}
// Converts a v1 UUID to a Cassandra timestamp.
// UUID timestamp is measured in 100-nanosecond intervals since 00:00:00.00, 15 October 1582.
// Cassandra timestamp is measured in milliseconds since 00:00:00.00, 1 January 1970.
func timeuuidToTimestamp(from gocql.UUID) int64 {
return (from.Timestamp() - 0x01b21dd213814000) / 10
}
type withNullUnmarshaler struct {
value interface{}
}
func (wnu *withNullUnmarshaler) UnmarshalCQL(info gocql.TypeInfo, data []byte) error {
switch info.Type() {
case gocql.TypeUDT:
// UDTs are unmarshaled as map[string]interface{}
// Returned map is nil iff the whole UDT value was nil
if data == nil {
wnu.value = (map[string]interface{})(nil)
return nil
}
udtInfo := info.(gocql.UDTTypeInfo)
uwn := udtWithNulls{make(map[string]interface{}, len(udtInfo.Elements))}
if err := gocql.Unmarshal(info, data, &uwn); err != nil {
return err
}
wnu.value = uwn.fields
return nil
case gocql.TypeTuple:
// Tuples are unmarshaled as []interface{}
// Returned slice is nil iff the whole tuple is nil
if data == nil {
wnu.value = ([]interface{})(nil)
return nil
}
// Make a tuple with withNullMarshallers
tupInfo := info.(gocql.TupleTypeInfo)
tupValue := make([]interface{}, len(tupInfo.Elems))
for i := range tupValue {
tupValue[i] = &withNullUnmarshaler{}
}
if err := gocql.Unmarshal(info, data, tupValue); err != nil {
return err
}
// Unwrap tuple values
for i := range tupValue {
tupValue[i] = tupValue[i].(*withNullUnmarshaler).value
}
wnu.value = tupValue
return nil
case gocql.TypeList, gocql.TypeSet:
if data == nil {
wnu.value = reflect.ValueOf(info.New()).Elem().Interface()
return nil
}
// Make a list with withNullMarshallers
var lWnm []withNullUnmarshaler
if err := gocql.Unmarshal(info, data, &lWnm); err != nil {
return err
}
lV := reflect.ValueOf(info.New()).Elem()
for _, wnm := range lWnm {
lV.Set(reflect.Append(lV, reflect.ValueOf(wnm.derefForListOrMap())))
}
wnu.value = lV.Interface()
return nil
case gocql.TypeMap:
if data == nil {
wnu.value = reflect.ValueOf(info.New()).Elem().Interface()
return nil
}
// Make a map with withNullMarshallers
mapInfo := info.(gocql.CollectionType)
keyType := reflect.TypeOf(mapInfo.Key.New()).Elem()
mapWithWnuType := reflect.MapOf(keyType, reflect.TypeOf(withNullUnmarshaler{}))
mapWithWnuPtr := reflect.New(mapWithWnuType)
mapWithWnuPtr.Elem().Set(reflect.MakeMap(mapWithWnuType))
if err := gocql.Unmarshal(info, data, mapWithWnuPtr.Interface()); err != nil {
return err
}
resultMapType := reflect.TypeOf(info.New()).Elem()
resultMap := reflect.MakeMap(resultMapType)
iter := mapWithWnuPtr.Elem().MapRange()
for iter.Next() {
unwrapped := iter.Value().Interface().(withNullUnmarshaler)
resultMap.SetMapIndex(iter.Key(), reflect.ValueOf(unwrapped.derefForListOrMap()))
}
wnu.value = resultMap.Interface()
return nil
case gocql.TypeBlob:
if data == nil {
wnu.value = ([]byte)(nil)
return nil
}
slice := make([]byte, 0)
if err := gocql.Unmarshal(info, data, &slice); err != nil {
return err
}
wnu.value = slice
return nil
default:
vptr := reflect.New(reflect.TypeOf(info.New()))
if err := gocql.Unmarshal(info, data, vptr.Interface()); err != nil {
return err
}
wnu.value = vptr.Elem().Interface()
return nil
}
}
func (wnu *withNullUnmarshaler) derefForListOrMap() interface{} {
v := reflect.ValueOf(wnu.value)
if v.Kind() == reflect.Ptr {
return v.Elem().Interface()
}
return wnu.value
}
// A wrapper over map[string]interface{} which is used to deserialize UDTs.
// Unlike raw map[string]interface{}, it keeps UDT fields as pointers,
// not values, which allows to determine which values in the UDT are null.
// Remember to pass an initialized map, nil map value won't be good
type udtWithNulls struct {
fields map[string]interface{}
}
func (uwn *udtWithNulls) UnmarshalUDT(name string, info gocql.TypeInfo, data []byte) error {
var wnu withNullUnmarshaler
if err := gocql.Unmarshal(info, data, &wnu); err != nil {
return err
}
if uwn.fields == nil {
uwn.fields = make(map[string]interface{})
}
uwn.fields[name] = wnu.value
return nil
}
func adjustBytes(v interface{}) interface{} {
// Not sure why, but empty slices get deserialized as []byte(nil).
// We need to convert it to []byte{} (non-nil, empty slice).
// This is important because when used in a query,
// a nil empty slice is treated as null, whereas non-nil
// slice is treated as an empty slice, which are distinct
// in CQL.
switch vTyped := v.(type) {
case []byte:
if len(vTyped) == 0 {
v = make([]byte, 0)
}
case *[]byte:
if vTyped != nil && len(*vTyped) == 0 {
vv := make([]byte, 0)
v = &vv
}
}
return v
}
| newChangeRowQuerier |
fileutils.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import os
from ceilometer.openstack.common import excutils
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload and filename in _FILE_CACHE:
del _FILE_CACHE[filename]
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug(_("Reloading cached file %s") % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_if_exists(path):
"""Delete a file, but ignore file not found error.
:param path: File to delete
"""
try:
os.unlink(path)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
|
@contextlib.contextmanager
def remove_path_on_error(path):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
delete_if_exists(path)
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
| raise |
test_migrations.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for database migrations for the API database.
These are "opportunistic" tests which allow testing against all three databases
(sqlite in memory, mysql, pg) in a properly configured unit test environment.
For the opportunistic testing you need to set up DBs named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost. The
test will then use that DB and username/password combo to run the tests. Refer
to the 'tools/test-setup.sh' for an example of how to configure this.
"""
from alembic import command as alembic_api
from alembic import script as alembic_script
from migrate.versioning import api as migrate_api
import mock
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
from oslo_log import log as logging
import testtools
from nova.db.api import models
from nova.db import migration
from nova import test
LOG = logging.getLogger(__name__)
class NovaModelsMigrationsSync(test_migrations.ModelsMigrationsSync):
"""Test that the models match the database after migrations are run."""
def setUp(self):
super().setUp()
self.engine = enginefacade.writer.get_engine()
def db_sync(self, engine):
with mock.patch.object(migration, '_get_engine', return_value=engine):
migration.db_sync(database='api')
def get_engine(self):
return self.engine
def get_metadata(self):
return models.BASE.metadata
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table':
# migrate_version is a sqlalchemy-migrate control table and
# isn't included in the model.
if name == 'migrate_version':
return False
return True
def filter_metadata_diff(self, diff):
# Filter out diffs that shouldn't cause a sync failure.
new_diff = []
# Define a whitelist of ForeignKeys that exist on the model but not in
# the database. They will be removed from the model at a later time.
fkey_whitelist = {'build_requests': ['request_spec_id']}
# Define a whitelist of columns that will be removed from the
# DB at a later release and aren't on a model anymore.
column_whitelist = {
'build_requests': [
'vm_state', 'instance_metadata',
'display_name', 'access_ip_v6', 'access_ip_v4', 'key_name',
'locked_by', 'image_ref', 'progress', 'request_spec_id',
'info_cache', 'user_id', 'task_state', 'security_groups',
'config_drive',
],
'resource_providers': ['can_host'],
}
for element in diff:
if isinstance(element, list):
# modify_nullable is a list
new_diff.append(element)
else:
# tuple with action as first element. Different actions have
# different tuple structures.
if element[0] == 'add_fk':
fkey = element[1]
tablename = fkey.table.name
column_keys = fkey.column_keys
if (
tablename in fkey_whitelist and
column_keys == fkey_whitelist[tablename]
):
continue
elif element[0] == 'remove_column':
tablename = element[2]
column = element[3]
if (
tablename in column_whitelist and
column.name in column_whitelist[tablename]
):
continue
new_diff.append(element)
return new_diff
class TestModelsSyncSQLite(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
pass
class TestModelsSyncMySQL(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestModelsSyncPostgreSQL(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
class NovaModelsMigrationsLegacySync(NovaModelsMigrationsSync):
"""Test that the models match the database after old migrations are run."""
def db_sync(self, engine):
# the 'nova.db.migration.db_sync' method will not use the legacy
# sqlalchemy-migrate-based migration flow unless the database is
# already controlled with sqlalchemy-migrate, so we need to manually
# enable version controlling with this tool to test this code path
repository = migration._find_migrate_repo(database='api')
migrate_api.version_control(
engine, repository, migration.MIGRATE_INIT_VERSION['api'])
# now we can apply migrations as expected and the legacy path will be
# followed
super().db_sync(engine)
class TestModelsLegacySyncSQLite(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
pass
class TestModelsLegacySyncMySQL(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestModelsLegacySyncPostgreSQL(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
class NovaMigrationsWalk(
test_fixtures.OpportunisticDBTestMixin, test.NoDBTestCase,
):
def setUp(self):
super().setUp()
self.engine = enginefacade.writer.get_engine()
self.config = migration._find_alembic_conf('api')
self.init_version = migration.ALEMBIC_INIT_VERSION['api']
def _migrate_up(self, connection, revision):
if revision == self.init_version: # no tests for the initial revision
alembic_api.upgrade(self.config, revision)
return
self.assertIsNotNone(
getattr(self, '_check_%s' % revision, None),
(
'API DB Migration %s does not have a test; you must add one'
) % revision,
)
pre_upgrade = getattr(self, '_pre_upgrade_%s' % revision, None)
if pre_upgrade:
pre_upgrade(connection)
alembic_api.upgrade(self.config, revision)
post_upgrade = getattr(self, '_check_%s' % revision, None)
if post_upgrade:
post_upgrade(connection)
def test_single_base_revision(self):
"""Ensure we only have a single base revision.
There's no good reason for us to have diverging history, so validate
that only one base revision exists. This will prevent simple errors
where people forget to specify the base revision. If this fail for your
change, look for migrations that do not have a 'revises' line in them.
"""
script = alembic_script.ScriptDirectory.from_config(self.config)
self.assertEqual(1, len(script.get_bases()))
def test_single_head_revision(self):
"""Ensure we only have a single head revision.
There's no good reason for us to have diverging history, so validate
that only one head revision exists. This will prevent merge conflicts
adding additional head revision points. If this fail for your change,
look for migrations with the same 'revises' line in them.
"""
script = alembic_script.ScriptDirectory.from_config(self.config)
self.assertEqual(1, len(script.get_heads()))
def test_walk_versions(self):
with self.engine.begin() as connection:
self.config.attributes['connection'] = connection
script = alembic_script.ScriptDirectory.from_config(self.config)
revisions = [x.revision for x in script.walk_revisions()]
# for some reason, 'walk_revisions' gives us the revisions in
# reverse chronological order so we have to invert this
revisions.reverse()
self.assertEqual(revisions[0], self.init_version)
for revision in revisions:
LOG.info('Testing revision %s', revision)
self._migrate_up(connection, revision)
def | (self):
migration.db_sync(database='api')
script = alembic_script.ScriptDirectory.from_config(self.config)
head = script.get_current_head()
self.assertEqual(head, migration.db_version(database='api'))
class TestMigrationsWalkSQLite(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
pass
class TestMigrationsWalkMySQL(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestMigrationsWalkPostgreSQL(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
| test_db_version_alembic |
runtime~main.a4f02cce975f5a06641f.bundle.js | !function(modules){function webpackJsonpCallback(data){for(var moduleId,chunkId,chunkIds=data[0],moreModules=data[1],executeModules=data[2],i=0,resolves=[];i<chunkIds.length;i++)chunkId=chunkIds[i],Object.prototype.hasOwnProperty.call(installedChunks,chunkId)&&installedChunks[chunkId]&&resolves.push(installedChunks[chunkId][0]),installedChunks[chunkId]=0;for(moduleId in moreModules)Object.prototype.hasOwnProperty.call(moreModules,moduleId)&&(modules[moduleId]=moreModules[moduleId]);for(parentJsonpFunction&&parentJsonpFunction(data);resolves.length;)resolves.shift()();return deferredModules.push.apply(deferredModules,executeModules||[]),checkDeferredModules()}function checkDeferredModules(){for(var result,i=0;i<deferredModules.length;i++){for(var deferredModule=deferredModules[i],fulfilled=!0,j=1;j<deferredModule.length;j++){var depId=deferredModule[j];0!==installedChunks[depId]&&(fulfilled=!1)}fulfilled&&(deferredModules.splice(i--,1),result=__webpack_require__(__webpack_require__.s=deferredModule[0]))}return result}var installedModules={},installedChunks={1:0},deferredModules=[];function __webpack_require__(moduleId){if(installedModules[moduleId])return installedModules[moduleId].exports;var module=installedModules[moduleId]={i:moduleId,l:!1,exports:{}};return modules[moduleId].call(module.exports,module,module.exports,__webpack_require__),module.l=!0,module.exports}__webpack_require__.m=modules,__webpack_require__.c=installedModules,__webpack_require__.d=function(exports,name,getter){__webpack_require__.o(exports,name)||Object.defineProperty(exports,name,{enumerable:!0,get:getter})},__webpack_require__.r=function(exports){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(exports,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(exports,"__esModule",{value:!0})},__webpack_require__.t=function(value,mode){if(1&mode&&(value=__webpack_require__(value)),8&mode)return value;if(4&mode&&"object"==typeof value&&value&&value.__esModule)return value;var ns=Object.create(null);if(__webpack_require__.r(ns),Object.defineProperty(ns,"default",{enumerable:!0,value:value}),2&mode&&"string"!=typeof value)for(var key in value)__webpack_require__.d(ns,key,function(key){return value[key]}.bind(null,key));return ns},__webpack_require__.n=function(module){var getter=module&&module.__esModule?function getDefault(){return module.default}:function getModuleExports(){return module};return __webpack_require__.d(getter,"a",getter),getter},__webpack_require__.o=function(object,property){return Object.prototype.hasOwnProperty.call(object,property)},__webpack_require__.p="";var jsonpArray=window.webpackJsonp=window.webpackJsonp||[],oldJsonpFunction=jsonpArray.push.bind(jsonpArray);jsonpArray.push=webpackJsonpCallback,jsonpArray=jsonpArray.slice();for(var i=0;i<jsonpArray.length;i++)webpackJsonpCallback(jsonpArray[i]);var parentJsonpFunction=oldJsonpFunction;checkDeferredModules()}([]); | //# sourceMappingURL=runtime~main.a4f02cce975f5a06641f.bundle.js.map | |
npy.rs | use ndarray::IntoDimension;
use numpy::{
npyffi::{self, flags, types::npy_intp},
ToNpyDims, PY_ARRAY_API,
};
use numpy::{Element, PyArray1};
use polars::chunked_array::builder::memory;
use pyo3::prelude::*;
use std::{mem, ptr};
/// Create an empty numpy array arrows 64 byte alignment
///
/// # Safety
/// All elements in the array are non initialized
///
/// The array is also writable from Python.
pub unsafe fn aligned_array<T: Element>(py: Python<'_>, size: usize) -> (&PyArray1<T>, *mut T) {
let t_size = std::mem::size_of::<T>();
let capacity = size * t_size;
let ptr = memory::allocate_aligned(capacity).as_ptr() as *mut T;
let mut buf = Vec::from_raw_parts(ptr, 0, capacity);
buf.set_len(size);
// modified from
// numpy-0.10.0/src/array.rs:375
let len = buf.len();
let buffer_ptr = buf.as_mut_ptr();
let dims = [len].into_dimension();
let strides = [mem::size_of::<T>() as npy_intp];
| dims.as_dims_ptr(),
T::npy_type() as i32,
strides.as_ptr() as *mut _, // strides
buffer_ptr as _, // data
mem::size_of::<T>() as i32, // itemsize
flags::NPY_ARRAY_OUT_ARRAY, // flag
ptr::null_mut(), //obj
);
mem::forget(buf);
(PyArray1::from_owned_ptr(py, ptr), buffer_ptr)
}
pub unsafe fn vec_from_ptr<T>(ptr: usize, len: usize) -> Vec<T> {
let ptr = ptr as *mut T;
Vec::from_raw_parts(ptr, len, len)
} | let ptr = PY_ARRAY_API.PyArray_New(
PY_ARRAY_API.get_type_object(npyffi::NpyTypes::PyArray_Type),
dims.ndim_cint(), |
util.py | import os
RED = 31
GREEN = 32
BLUE = 34
MAGENTA = 35
def color(code, string):
return '\033[' + str(code) + 'm' + string + '\033[0m'
def display_path(path):
return color(MAGENTA, path)
def colon():
return color(BLUE, ':')
EXCLUDE_DIRS = ['.git', '.vagrant']
def project_path():
# One dirname for tests dir, another for project dir
project_dir = os.path.dirname(os.path.dirname(__file__))
common = os.path.commonpath([project_dir, os.getcwd()])
return project_dir.replace(common, '.', 1) # Only replace once
def paths():
for root, dirs, files in os.walk(project_path(), topdown=True):
for exclude_dir in EXCLUDE_DIRS:
if exclude_dir in dirs:
dirs.remove(exclude_dir)
for filename in files:
yield os.path.join(root, filename)
class TestResult(object):
pass
class Success(TestResult):
def __init__(self, message):
self.message = message
def is_success(self):
return True
def is_failure(self):
return False
class Failure(TestResult):
def __init__(self, message, output):
self.message = message
self.output = output
def is_success(self): |
def is_failure(self):
return True | return False |
image.go | package ui
import (
"image"
"image/draw"
"time"
)
type ImageProps struct {
ImagePath string
}
var _ Component = (*Image)(nil)
type Image struct {
SharedComponent
props ImageProps
prevImagePath string
image image.Image
}
func (i *Image) Paint(painter *Painter, destLayer draw.Image, offset Offset) {
if i.image == nil {
return
}
if i.hasChanged {
i.initContentLayer()
painter.drawImage(i.image, i.image.Bounds(), i.contentLayer, image.Point{
X: 0,
Y: 0,
})
}
painter.drawImage(i.contentLayer, i.contentLayer.Bounds(), destLayer, image.Point{
X: offset.x,
Y: offset.y,
})
}
func (i Image) ComputeLeafSize(_ Constraints) Size {
if i.image == nil {
return Size{}
}
imageBound := i.image.Bounds()
width := imageBound.Max.X - imageBound.Min.X
style := i.getStyle()
if style.Width != nil {
width = *style.Width
}
height := imageBound.Max.Y - imageBound.Min.Y
if style.Height != nil {
height = *style.Height
}
return Size{
width: width,
height: height,
}
}
func (i *Image) Update(timeElapsed time.Duration, screenOffset Offset, deps *UpdateDeps) {
i.SharedComponent.Update(timeElapsed, screenOffset, deps)
if i.props.ImagePath != i.prevImagePath {
if len(i.props.ImagePath) > 0 {
i.image = deps.assets.GetImage(i.props.ImagePath)
}
i.hasChanged = true
i.prevImagePath = i.props.ImagePath
}
if i.StatefulStyle.HasChanged() {
i.hasChanged = true
}
}
func NewImage(props *ImageProps, statefulStyle *StatefulStyle) *Image | {
if props == nil {
props = &ImageProps{}
}
if statefulStyle == nil {
statefulStyle = NewStatefulStyle()
}
return &Image{
props: *props,
SharedComponent: SharedComponent{
Name: "Image",
States: map[State]struct{}{},
StatefulStyle: statefulStyle,
},
}
} |
|
root.go | package cmd
import (
"errors"
"io"
"os"
"path/filepath"
"github.com/spf13/cast"
"github.com/spf13/cobra"
tmcli "github.com/tendermint/tendermint/libs/cli"
"github.com/tendermint/tendermint/libs/log"
dbm "github.com/tendermint/tm-db"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/debug"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/client/keys"
"github.com/cosmos/cosmos-sdk/client/rpc"
"github.com/cosmos/cosmos-sdk/server"
servertypes "github.com/cosmos/cosmos-sdk/server/types"
"github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/simapp/params"
"github.com/cosmos/cosmos-sdk/snapshots"
"github.com/cosmos/cosmos-sdk/store"
sdk "github.com/cosmos/cosmos-sdk/types"
authclient "github.com/cosmos/cosmos-sdk/x/auth/client"
authcmd "github.com/cosmos/cosmos-sdk/x/auth/client/cli"
"github.com/cosmos/cosmos-sdk/x/auth/types"
vestingcli "github.com/cosmos/cosmos-sdk/x/auth/vesting/client/cli"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
"github.com/cosmos/cosmos-sdk/x/crisis"
genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli"
)
// NewRootCmd creates a new root command for simd. It is called once in the
// main function.
func NewRootCmd() (*cobra.Command, params.EncodingConfig) {
encodingConfig := simapp.MakeTestEncodingConfig()
initClientCtx := client.Context{}.
WithJSONMarshaler(encodingConfig.Marshaler).
WithInterfaceRegistry(encodingConfig.InterfaceRegistry).
WithTxConfig(encodingConfig.TxConfig).
WithLegacyAmino(encodingConfig.Amino).
WithInput(os.Stdin).
WithAccountRetriever(types.AccountRetriever{}).
WithBroadcastMode(flags.BroadcastBlock).
WithHomeDir(simapp.DefaultNodeHome)
rootCmd := &cobra.Command{
Use: "simd",
Short: "simulation app",
PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
if err := client.SetCmdClientContextHandler(initClientCtx, cmd); err != nil {
return err
}
return server.InterceptConfigsPreRunHandler(cmd)
},
}
initRootCmd(rootCmd, encodingConfig)
return rootCmd, encodingConfig
}
func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig) {
authclient.Codec = encodingConfig.Marshaler
rootCmd.AddCommand(
genutilcli.InitCmd(simapp.ModuleBasics, simapp.DefaultNodeHome),
genutilcli.CollectGenTxsCmd(banktypes.GenesisBalancesIterator{}, simapp.DefaultNodeHome),
genutilcli.MigrateGenesisCmd(),
genutilcli.GenTxCmd(simapp.ModuleBasics, encodingConfig.TxConfig, banktypes.GenesisBalancesIterator{}, simapp.DefaultNodeHome),
genutilcli.ValidateGenesisCmd(simapp.ModuleBasics),
AddGenesisAccountCmd(simapp.DefaultNodeHome),
tmcli.NewCompletionCmd(rootCmd, true),
testnetCmd(simapp.ModuleBasics, banktypes.GenesisBalancesIterator{}),
debug.Cmd(),
)
a := appCreator{encodingConfig}
server.AddCommands(rootCmd, simapp.DefaultNodeHome, a.newApp, a.appExport, addModuleInitFlags)
// add keybase, auxiliary RPC, query, and tx child commands
rootCmd.AddCommand(
rpc.StatusCommand(),
queryCommand(),
txCommand(),
keys.Commands(simapp.DefaultNodeHome),
)
// add rosetta
rootCmd.AddCommand(server.RosettaCommand(encodingConfig.InterfaceRegistry, encodingConfig.Marshaler))
}
func addModuleInitFlags(startCmd *cobra.Command) {
crisis.AddModuleInitFlags(startCmd)
}
func queryCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "query",
Aliases: []string{"q"},
Short: "Querying subcommands",
DisableFlagParsing: true,
SuggestionsMinimumDistance: 2,
RunE: client.ValidateCmd,
}
cmd.AddCommand(
authcmd.GetAccountCmd(),
rpc.ValidatorCommand(),
rpc.BlockCommand(),
authcmd.QueryTxsByEventsCmd(),
authcmd.QueryTxCmd(),
)
simapp.ModuleBasics.AddQueryCommands(cmd)
cmd.PersistentFlags().String(flags.FlagChainID, "", "The network chain ID")
return cmd
}
func txCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "tx",
Short: "Transactions subcommands",
DisableFlagParsing: true,
SuggestionsMinimumDistance: 2,
RunE: client.ValidateCmd,
}
cmd.AddCommand(
authcmd.GetSignCommand(),
authcmd.GetSignBatchCommand(),
authcmd.GetMultiSignCommand(),
authcmd.GetValidateSignaturesCommand(),
flags.LineBreak,
authcmd.GetBroadcastCommand(),
authcmd.GetEncodeCommand(),
authcmd.GetDecodeCommand(),
flags.LineBreak,
vestingcli.GetTxCmd(),
)
simapp.ModuleBasics.AddTxCommands(cmd)
cmd.PersistentFlags().String(flags.FlagChainID, "", "The network chain ID")
return cmd
}
type appCreator struct {
encCfg params.EncodingConfig
}
// newApp is an AppCreator
func (a appCreator) newApp(logger log.Logger, db dbm.DB, traceStore io.Writer, appOpts servertypes.AppOptions) servertypes.Application {
var cache sdk.MultiStorePersistentCache
if cast.ToBool(appOpts.Get(server.FlagInterBlockCache)) {
cache = store.NewCommitKVStoreCacheManager()
}
skipUpgradeHeights := make(map[int64]bool)
for _, h := range cast.ToIntSlice(appOpts.Get(server.FlagUnsafeSkipUpgrades)) {
skipUpgradeHeights[int64(h)] = true
}
pruningOpts, err := server.GetPruningOptionsFromFlags(appOpts)
if err != nil |
snapshotDir := filepath.Join(cast.ToString(appOpts.Get(flags.FlagHome)), "data", "snapshots")
snapshotDB, err := sdk.NewLevelDB("metadata", snapshotDir)
if err != nil {
panic(err)
}
snapshotStore, err := snapshots.NewStore(snapshotDB, snapshotDir)
if err != nil {
panic(err)
}
return simapp.NewSimApp(
logger, db, traceStore, true, skipUpgradeHeights,
cast.ToString(appOpts.Get(flags.FlagHome)),
cast.ToUint(appOpts.Get(server.FlagInvCheckPeriod)),
a.encCfg,
appOpts,
baseapp.SetPruning(pruningOpts),
baseapp.SetMinGasPrices(cast.ToString(appOpts.Get(server.FlagMinGasPrices))),
baseapp.SetHaltHeight(cast.ToUint64(appOpts.Get(server.FlagHaltHeight))),
baseapp.SetHaltTime(cast.ToUint64(appOpts.Get(server.FlagHaltTime))),
baseapp.SetMinRetainBlocks(cast.ToUint64(appOpts.Get(server.FlagMinRetainBlocks))),
baseapp.SetInterBlockCache(cache),
baseapp.SetTrace(cast.ToBool(appOpts.Get(server.FlagTrace))),
baseapp.SetIndexEvents(cast.ToStringSlice(appOpts.Get(server.FlagIndexEvents))),
baseapp.SetSnapshotStore(snapshotStore),
baseapp.SetSnapshotInterval(cast.ToUint64(appOpts.Get(server.FlagStateSyncSnapshotInterval))),
baseapp.SetSnapshotKeepRecent(cast.ToUint32(appOpts.Get(server.FlagStateSyncSnapshotKeepRecent))),
)
}
// appExport creates a new simapp (optionally at a given height)
// and exports state.
func (a appCreator) appExport(
logger log.Logger, db dbm.DB, traceStore io.Writer, height int64, forZeroHeight bool, jailAllowedAddrs []string,
appOpts servertypes.AppOptions) (servertypes.ExportedApp, error) {
var simApp *simapp.SimApp
homePath, ok := appOpts.Get(flags.FlagHome).(string)
if !ok || homePath == "" {
return servertypes.ExportedApp{}, errors.New("application home not set")
}
if height != -1 {
simApp = simapp.NewSimApp(logger, db, traceStore, false, map[int64]bool{}, homePath, uint(1), a.encCfg, appOpts)
if err := simApp.LoadHeight(height); err != nil {
return servertypes.ExportedApp{}, err
}
} else {
simApp = simapp.NewSimApp(logger, db, traceStore, true, map[int64]bool{}, homePath, uint(1), a.encCfg, appOpts)
}
return simApp.ExportAppStateAndValidators(forZeroHeight, jailAllowedAddrs)
}
| {
panic(err)
} |
flowaggregatequeryfilter.go | package platformclientv2
import (
"github.com/leekchan/timeutil"
"encoding/json"
"strconv"
"strings"
)
// Flowaggregatequeryfilter
type Flowaggregatequeryfilter struct {
// VarType - Boolean operation to apply to the provided predicates and clauses
VarType *string `json:"type,omitempty"`
// Clauses - Boolean 'and/or' logic with up to two-levels of nesting
Clauses *[]Flowaggregatequeryclause `json:"clauses,omitempty"`
// Predicates - Like a three-word sentence: (attribute-name) (operator) (target-value).
Predicates *[]Flowaggregatequerypredicate `json:"predicates,omitempty"`
}
func (o *Flowaggregatequeryfilter) MarshalJSON() ([]byte, error) {
// Redundant initialization to avoid unused import errors for models with no Time values
_ = timeutil.Timedelta{}
type Alias Flowaggregatequeryfilter
return json.Marshal(&struct {
VarType *string `json:"type,omitempty"`
Clauses *[]Flowaggregatequeryclause `json:"clauses,omitempty"`
Predicates *[]Flowaggregatequerypredicate `json:"predicates,omitempty"`
*Alias
}{
VarType: o.VarType,
Clauses: o.Clauses,
Predicates: o.Predicates,
Alias: (*Alias)(o),
})
}
func (o *Flowaggregatequeryfilter) UnmarshalJSON(b []byte) error {
var FlowaggregatequeryfilterMap map[string]interface{}
err := json.Unmarshal(b, &FlowaggregatequeryfilterMap)
if err != nil {
return err
}
if VarType, ok := FlowaggregatequeryfilterMap["type"].(string); ok {
o.VarType = &VarType
}
if Clauses, ok := FlowaggregatequeryfilterMap["clauses"].([]interface{}); ok {
ClausesString, _ := json.Marshal(Clauses)
json.Unmarshal(ClausesString, &o.Clauses)
}
if Predicates, ok := FlowaggregatequeryfilterMap["predicates"].([]interface{}); ok {
PredicatesString, _ := json.Marshal(Predicates)
json.Unmarshal(PredicatesString, &o.Predicates)
}
return nil
}
// String returns a JSON representation of the model
func (o *Flowaggregatequeryfilter) String() string {
j, _ := json.Marshal(o) | return str
} | str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\u`, `\u`, -1))
|
main.go | /*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"context"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"os/signal"
"strconv"
"strings"
"sync/atomic"
"syscall"
"time"
"github.com/hazelcast/hazelcast-go-client/serialization"
"github.com/hazelcast/hazelcast-go-client/predicate"
"github.com/hazelcast/hazelcast-go-client"
)
const displayDur = 10 * time.Second
const entryCount = 10_000
const goroutineCount = 10
var remainingGoroutines = int32(goroutineCount)
var tic time.Time
type Stats struct {
Id int
opCount int64
}
func (s *Stats) IncOpCount() {
atomic.AddInt64(&s.opCount, 1)
}
func (s *Stats) OpCountAndReset() int64 {
return atomic.SwapInt64(&s.opCount, 0)
}
const simpleEntryProcessorFactoryID = 66
const simpleEntryProcessorClassID = 1
type SimpleEntryProcessor struct {
value string
}
func (s SimpleEntryProcessor) FactoryID() int32 {
return simpleEntryProcessorFactoryID
}
func (s SimpleEntryProcessor) ClassID() int32 {
return simpleEntryProcessorClassID
}
func (s SimpleEntryProcessor) WriteData(output serialization.DataOutput) {
output.WriteString(s.value)
}
func (s *SimpleEntryProcessor) ReadData(input serialization.DataInput) {
s.value = input.ReadString()
}
type Factory struct {
}
func (f Factory) Create(id int32) serialization.IdentifiedDataSerializable {
if id == simpleEntryProcessorClassID {
return &SimpleEntryProcessor{}
}
panic(fmt.Sprintf("unknown class ID: %d", id))
}
func (f Factory) FactoryID() int32 {
return simpleEntryProcessorFactoryID
}
func displayStats(sts []*Stats) {
log.Println(strings.Repeat("*", 40))
totalOp := int64(0)
hanged := []int{}
toc := time.Now()
period := toc.Sub(tic)
tic = toc
for _, st := range sts {
total := st.OpCountAndReset()
totalOp += total
if total == 0 {
hanged = append(hanged, st.Id)
}
}
if len(hanged) == 0 {
log.Println("All goroutines worked without blocking")
} else {
log.Printf("%d goroutines hanged with ids: %v", len(hanged), hanged)
}
log.Println(strings.Repeat("-", 40))
log.Printf("OPS: %.2f (%d / %.2f)", float64(totalOp)/period.Seconds(), totalOp, period.Seconds())
}
func loadConfig(path string, cfg *hazelcast.Config) error {
text, err := ioutil.ReadFile(path)
if err != nil {
return err
}
if err = json.Unmarshal(text, cfg); err != nil {
return err
}
return nil
}
func createClient(ctx context.Context, configPath string) *hazelcast.Client {
config := hazelcast.NewConfig()
if configPath != "" {
log.Println("Configuration Path : ", configPath)
if err := loadConfig(configPath, &config); err != nil {
log.Fatal(err)
}
}
config.Serialization.SetIdentifiedDataSerializableFactories(&Factory{})
client, err := hazelcast.StartNewClientWithConfig(ctx, config)
if err != nil {
log.Fatal(err)
}
return client
}
func run(ctx context.Context, m *hazelcast.Map, st *Stats) {
var err error
for {
key := strconv.Itoa(rand.Intn(entryCount))
value := strconv.Itoa(rand.Intn(entryCount))
op := rand.Intn(100)
if op < 30 {
_, err = m.Get(ctx, key)
} else if op < 60 {
_, err = m.Put(ctx, key, value)
} else if op < 80 {
_, err = m.GetValuesWithPredicate(ctx, predicate.Between("this", 0, 10))
} else {
_, err = m.ExecuteOnEntries(ctx, &SimpleEntryProcessor{value: "test"})
}
if err != nil {
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
break
}
log.Fatal(err)
}
st.IncOpCount()
}
log.Printf("Goroutine %d exited", st.Id)
atomic.AddInt32(&remainingGoroutines, -1)
}
func | () {
configPath := flag.String("c", "", "Path of the JSON configuration file.")
durStr := flag.String("d", "48h",
"Running time. You can use the following prefixes: s -> seconds, m -> minutes, h -> hours and their combinations, e.g., 2h30m")
flag.Parse()
dur, err := time.ParseDuration(*durStr)
if err != nil {
log.Fatal(err)
}
log.Println("Duration : ", dur)
log.Println("Goroutine Count : ", goroutineCount)
ctx, cancel := context.WithTimeout(context.Background(), dur)
defer cancel()
client := createClient(ctx, *configPath)
testMap, err := client.GetMap(ctx, "-test-map")
if err != nil {
log.Fatal(err)
}
tic = time.Now()
sts := make([]*Stats, goroutineCount)
for i := 0; i < goroutineCount; i++ {
st := &Stats{Id: i}
sts[i] = st
go run(ctx, testMap, st)
}
go func(ctx context.Context, sts []*Stats) {
ticker := time.NewTicker(displayDur)
defer ticker.Stop()
for {
select {
case <-ticker.C:
displayStats(sts)
case <-ctx.Done():
return
}
}
}(ctx, sts)
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
select {
case <-ctx.Done():
case <-c:
}
displayStats(sts)
time.Sleep(10 * time.Second)
client.Shutdown(ctx)
log.Println(strings.Repeat("*", 40))
resultText := "SUCCESS"
if remainingGoroutines > 0 {
resultText = "FAILURE"
}
log.Printf("Soak test finished with %s.\n", resultText)
log.Printf("Remaining goroutine count: %d", remainingGoroutines)
log.Println(strings.Repeat("-", 40))
}
| main |
context.py | import os | import sys
sys.path.insert(0, os.path.abspath('../..'))
from algolib.disjoint_set import DisjointSet |
|
datatype.py | #coding=utf-8 |
integer = 123
print integer
_float = 12.34
print _float
string = 'hello \'python\' '
print string
string1 = r'hello \\\\'
print string1
boolean = True
print boolean
print
# 逻辑运算
print True and True
print True and False
print True or False
print False or False
print not True
print
print None
# 常量,用大写表示
PI = 3.141592653
print PI
print 10/3
print 10.0/3 | # datatype.py |
simplify.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
//
// A test that checks various simplifications
use mirai_annotations::*;
pub fn f1(b: bool) {
verify!(b || !b);
verify!(!b || b);
}
pub fn f2(x: bool, y: bool) {
let z = (x || y) && x;
verify!(z == x);
let z = (x || y) && y;
verify!(z == y);
}
pub fn f3(x: bool, y: bool) {
let z = (x || y) && (!x);
verify!(z == y && !x); //~ possible false verification condition
}
pub fn f4(x: bool, y: bool) {
let z = (x || y) && (!y);
verify!(z == x && !y); //~ possible false verification condition
}
pub fn f5(x: bool, y: bool) {
let z = (x && y) || x;
verify!(z == x);
}
pub fn f6(x: bool, y: bool) {
let z = (x && y) || y;
verify!(z == y);
}
pub fn f7(x: bool, y: bool) {
let z = (x && !y) || y;
verify!(z == (y || x));
}
pub fn | (x: bool, y: bool, a: i32, b: i32) {
let z = if x || y {
if x {
a
} else {
b
}
} else {
b
};
verify!(z == if x { a } else { b });
}
pub fn f9(x: bool, y: bool, a: i32, b: i32) {
let z = if x || y {
if y {
a
} else {
b
}
} else {
b
};
verify!(z == if y { a } else { b });
}
pub fn f10(x: bool, y: bool, a: i32, b: i32) {
let z = if x || y {
if x {
a
} else {
b
}
} else {
a
};
verify!(z == if y { b } else { a }); //~ possible false verification condition
}
pub fn f11(x: bool, y: bool, a: i32, b: i32) {
let z = if x || y {
if y {
a
} else {
b
}
} else {
a
};
verify!(z == if x { b } else { a }); //~ possible false verification condition
}
pub fn main() {}
| f8 |
test_package.py | import importlib
def | ():
"""The package imports correctly."""
# Capturing the exception and then asserting for it makes the failure mode
# look normal; if we call pytest.fail() inside the except block, we get
# a long traceback with exceptions raised during outer exception handling.
exception = None
try:
importlib.import_module("bpp_speech")
except ModuleNotFoundError as err:
exception = err
assert exception is None
| test_import |
lib.rs | #![deny(clippy::all)]
#[macro_use]
extern crate napi_derive;
#[macro_use]
extern crate serde_derive;
mod error;
mod file;
use napi::{JsObject, Result};
#[cfg(all(
any(windows, unix),
target_arch = "x86_64",
not(target_env = "musl"),
not(debug_assertions)
))]
#[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
#[module_exports]
fn init(mut exports: JsObject) -> Result<()> | {
exports.create_named_method("createFileClass", file::create_file_class)?;
Ok(())
} |
|
views.py | from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import AnonymousUser
from django.core.paginator import Paginator
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.views.decorators.cache import cache_page
from django.db.models import Count
from .forms import CreatePost, CreateComment
from .models import Post, User, Comment, Follow
def _create_paginator(request, post):
paginator = Paginator(post, 10)
page_number = request.GET.get("page")
page = paginator.get_page(page_number)
return page, paginator
def _search_text(request):
keyword = request.GET.get("q", None)
posts_list = Post.objects.select_related(
"author", "group").filter(
text__contains=keyword
).prefetch_related("comments")
data_paginator = _create_paginator(request, posts_list)
return data_paginator
@cache_page(20, key_prefix="index_page")
def index(request):
if request.GET.get("q") is None:
posts_list = Post.objects.order_by("-pub_date")\
.all()\
.select_related("author", "group", )\
.prefetch_related("comments",)
data_paginator = _create_paginator(request, posts_list)
else:
data_paginator = _search_text(request)
return render(request, "index.html", {"page": data_paginator[0],
"paginator": data_paginator[1],
"title": "Последние обновления",
"description": "Последние обновления на сайте",
"changing_it": "index"})
@login_required
def new_post(request):
content = {"title_name": "Новый пост", "btn_name": "Добавить пост"}
if request.method == "POST":
form = CreatePost(request.POST, files=request.FILES or None)
if form.is_valid():
author = request.user
form.cleaned_data['author'] = author
date_clean = form.cleaned_data
post = Post.objects.create(**date_clean)
messages.success(request, "Пост добавлен")
return redirect("index")
else:
form = CreatePost()
return render(request, "add_post.html", {"form": form, "content": content})
def profile(request, username):
user_name = get_object_or_404(User, username=username)
following = None
if request.user != AnonymousUser():
following = Follow.objects.filter(user=request.user, author=user_name)
print(following)
posts = Post.objects.filter(author_id__username=user_name)\
.select_related("author", "group")\
.prefetch_related("comments")
data_paginator = _create_paginator(request, posts)
return render(request, "profile.html", {"page": data_paginator[0],
"paginator": data_paginator[1],
"author": user_name,
"following": following})
def post_view(request, username, post_id):
profile_person = get_object_or_404(User, username=username)
print(type(profile_person))
select_post = get_object_or_404(Post, pk=post_id, author=profile_person.id)
# comments = select_post.comments.all()
comments = list(Comment.objects.filter(post_id=post_id).select_related("author", "post"))
return render(request, "post.html", {"user_post": select_post,
"author": profile_person,
"comments": comments})
def post_edit(request, username, post_id):
content = {"title_name": "Редактировать запись", "btn_name": "Сохранить"}
profile_person = get_object_or_404(User, username=username)
select_post = get_object_or_404(Post, pk=post_id, author=profile_person.id)
if request.user != profile_person:
return redirect("post", username=username, post_id=post_id)
form = CreatePost(request.POST or None,
instance=select_post,
files=request.FILES or None)
if form.is_valid():
form.save()
print("Post can editable")
return redirect("post", username=username, post_id=post_id)
return render(request, "add_post.html", {"form": form,
"selected_post": select_post,
"content": content})
def page_not_found(request, exeption):
return render(request, "misc/404.html", {"path": request.path}, status=404)
def server_error(request):
return render(request, "misc/500.html", status=500)
@login_required
def add_comment(request, username, post_id):
profile_person = get_object_or_404(User, username=username)
select_post = get_object_or_404(Post, pk=post_id, author=profile_person)
if request.method == "POST":
form = CreateComment(request.POST)
print(form)
if form.is_valid():
author = request.user
form.cleaned_data["post"] = select_post
form.cleaned_data["author"] = author
data_clean = form.cleaned_data
comment = Comment.objects.create(**data_clean)
messages.success(request, "Коммент поставлен")
return redirect("post", username=username, post_id=post_id)
else:
form = CreateComment()
return render(request, "comments.html", {"form": form})
@login_required
def follow_index(request):
my_follow = Post.objects.filter(author__following__user=request.user)\
.select_related("author", "group")\
.prefetch_related("comments")
data_paginator = _create_paginator(request, my_follow)
return render(request, "index.html", {"page": data_paginator[0],
"paginator": data_paginator[1],
"title": "Подписки",
"description": "Последние обновления твоих людей",
"changing_it": "follow"})
@login_required
def profile_follow(request, username):
author = get_object_or_404(User, username=username)
if request.user != author:
Follow.objects.get_or_create(author=author, user=request.user)
return redirect("profile", username=username)
@login_required
def profile_unfollow(request, username):
author = get_object_or_404(User, username=username)
if request.user != author:
Follow.objects.filter(autho | equest.user).delete()
return redirect('profile', username=username) | r=author, user=r |
struct.rs | use std::cell::RefCell;
use std::collections::HashMap;
use super::{ToTypeDef, TypeDef};
use crate::{ty_to_ts_type, NapiImpl, NapiStruct, NapiStructKind};
thread_local! {
pub(crate) static TASK_STRUCTS: RefCell<HashMap<String, String>> = Default::default();
}
impl ToTypeDef for NapiStruct {
fn to_type_def(&self) -> TypeDef {
TypeDef {
kind: String::from(if self.kind == NapiStructKind::Object {
"interface"
} else {
"struct"
}),
name: self.js_name.to_owned(),
def: self.gen_ts_class(),
}
}
}
impl ToTypeDef for NapiImpl {
fn to_type_def(&self) -> TypeDef |
}
impl NapiStruct {
fn gen_ts_class(&self) -> String {
let mut ctor_args = vec![];
let def = self
.fields
.iter()
.filter(|f| f.getter)
.map(|f| {
let mut field_str = String::from("");
if !f.setter {
field_str.push_str("readonly ")
}
let arg = format!("{}: {}", &f.js_name, ty_to_ts_type(&f.ty, false));
if self.kind == NapiStructKind::Constructor {
ctor_args.push(arg.clone());
}
field_str.push_str(&arg);
field_str
})
.collect::<Vec<_>>()
.join("\\n");
if self.kind == NapiStructKind::Constructor {
format!("{}\\nconstructor({})", def, ctor_args.join(", "))
} else {
def
}
}
}
| {
if let Some(output_type) = &self.task_output_type {
TASK_STRUCTS.with(|t| {
t.borrow_mut()
.insert(self.js_name.clone(), ty_to_ts_type(output_type, false));
});
}
TypeDef {
kind: "impl".to_owned(),
name: self.js_name.to_owned(),
def: self
.items
.iter()
.map(|f| f.to_type_def().def)
.collect::<Vec<_>>()
.join("\\n"),
}
} |
wallet.go | package zcncore
import (
"context"
"encoding/hex"
"encoding/json"
"fmt"
"math"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/0chain/errors"
"github.com/0chain/gosdk/core/common"
"github.com/0chain/gosdk/core/logger"
"github.com/0chain/gosdk/core/util"
"github.com/0chain/gosdk/core/version"
"github.com/0chain/gosdk/core/zcncrypto"
"github.com/0chain/gosdk/zboxcore/zboxutil"
)
type ChainConfig struct {
ChainID string `json:"chain_id,omitempty"`
BlockWorker string `json:"block_worker"`
Miners []string `json:"miners"`
Sharders []string `json:"sharders"`
SignatureScheme string `json:"signature_scheme"`
MinSubmit int `json:"min_submit"`
MinConfirmation int `json:"min_confirmation"`
ConfirmationChainLength int `json:"confirmation_chain_length"`
EthNode string `json:"eth_node"`
}
var defaultLogLevel = logger.DEBUG
var Logger logger.Logger
const (
REGISTER_CLIENT = `/v1/client/put`
GET_CLIENT = `/v1/client/get`
PUT_TRANSACTION = `/v1/transaction/put`
TXN_VERIFY_URL = `/v1/transaction/get/confirmation?hash=`
GET_BALANCE = `/v1/client/get/balance?client_id=`
GET_LOCK_CONFIG = `/v1/screst/` + InterestPoolSmartContractAddress + `/getLockConfig`
GET_LOCKED_TOKENS = `/v1/screst/` + InterestPoolSmartContractAddress + `/getPoolsStats?client_id=`
GET_BLOCK_INFO = `/v1/block/get?`
GET_MAGIC_BLOCK_INFO = `/v1/block/magic/get?`
GET_LATEST_FINALIZED = `/v1/block/get/latest_finalized`
GET_LATEST_FINALIZED_MAGIC_BLOCK = `/v1/block/get/latest_finalized_magic_block`
GET_CHAIN_STATS = `/v1/chain/get/stats`
// vesting SC
VESTINGSC_PFX = `/v1/screst/` + VestingSmartContractAddress
GET_VESTING_CONFIG = VESTINGSC_PFX + `/getConfig`
GET_VESTING_POOL_INFO = VESTINGSC_PFX + `/getPoolInfo`
GET_VESTING_CLIENT_POOLS = VESTINGSC_PFX + `/getClientPools`
// miner SC
MINERSC_PFX = `/v1/screst/` + MinerSmartContractAddress
GET_MINERSC_NODE = MINERSC_PFX + "/nodeStat"
GET_MINERSC_POOL = MINERSC_PFX + "/nodePoolStat"
GET_MINERSC_CONFIG = MINERSC_PFX + "/configs"
GET_MINERSC_USER = MINERSC_PFX + "/getUserPools"
GET_MINERSC_MINERS = MINERSC_PFX + "/getMinerList"
GET_MINERSC_SHARDERS = MINERSC_PFX + "/getSharderList"
// storage SC
STORAGESC_PFX = "/v1/screst/" + StorageSmartContractAddress
STORAGESC_GET_SC_CONFIG = STORAGESC_PFX + "/getConfig"
STORAGESC_GET_CHALLENGE_POOL_INFO = STORAGESC_PFX + "/getChallengePoolStat"
STORAGESC_GET_ALLOCATION = STORAGESC_PFX + "/allocation"
STORAGESC_GET_ALLOCATIONS = STORAGESC_PFX + "/allocations"
STORAGESC_GET_READ_POOL_INFO = STORAGESC_PFX + "/getReadPoolStat"
STORAGESC_GET_STAKE_POOL_INFO = STORAGESC_PFX + "/getStakePoolStat"
STORAGESC_GET_STAKE_POOL_USER_INFO = STORAGESC_PFX + "/getUserStakePoolStat"
STORAGESC_GET_BLOBBERS = STORAGESC_PFX + "/getblobbers"
STORAGESC_GET_BLOBBER = STORAGESC_PFX + "/getBlobber"
STORAGESC_GET_WRITE_POOL_INFO = STORAGESC_PFX + "/getWritePoolStat"
)
const (
StorageSmartContractAddress = `6dba10422e368813802877a85039d3985d96760ed844092319743fb3a76712d7`
VestingSmartContractAddress = `2bba5b05949ea59c80aed3ac3474d7379d3be737e8eb5a968c52295e48333ead`
FaucetSmartContractAddress = `6dba10422e368813802877a85039d3985d96760ed844092319743fb3a76712d3`
InterestPoolSmartContractAddress = `cf8d0df9bd8cc637a4ff4e792ffe3686da6220c45f0e1103baa609f3f1751ef4`
MultiSigSmartContractAddress = `27b5ef7120252b79f9dd9c05505dd28f328c80f6863ee446daede08a84d651a7`
MinerSmartContractAddress = `6dba10422e368813802877a85039d3985d96760ed844092319743fb3a76712d9`
MultiSigRegisterFuncName = "register"
MultiSigVoteFuncName = "vote"
)
// In percentage
const consensusThresh = float32(25.0)
const (
defaultMinSubmit = int(50)
defaultMinConfirmation = int(50)
defaultConfirmationChainLength = int(3)
defaultTxnExpirationSeconds = 60
defaultWaitSeconds = (3 * time.Second)
)
const (
StatusSuccess int = 0
StatusNetworkError int = 1
// TODO: Change to specific error
StatusError int = 2
StatusRejectedByUser int = 3
StatusInvalidSignature int = 4
StatusAuthError int = 5
StatusAuthVerifyFailed int = 6
StatusAuthTimeout int = 7
StatusUnknown int = -1
)
const TOKEN_UNIT int64 = 1e10
const (
OpGetTokenLockConfig int = iota
OpGetLockedTokens
OpGetUserPools
OpGetUserPoolDetail
// storage SC ops
OpStorageSCGetConfig
OpStorageSCGetChallengePoolInfo
OpStorageSCGetAllocation
OpStorageSCGetAllocations
OpStorageSCGetReadPoolInfo
OpStorageSCGetStakePoolInfo
OpStorageSCGetBlobbers
OpStorageSCGetBlobber
OpStorageSCGetWritePoolInfo
)
// WalletCallback needs to be implmented for wallet creation.
type WalletCallback interface {
OnWalletCreateComplete(status int, wallet string, err string)
}
// GetBalanceCallback needs to be implemented by the caller of GetBalance() to get the status
type GetBalanceCallback interface {
OnBalanceAvailable(status int, value int64, info string)
}
// GetInfoCallback needs to be implemented by the caller of GetLockTokenConfig() and GetLockedTokens()
type GetInfoCallback interface {
// OnInfoAvailable will be called when GetLockTokenConfig is complete
// if status == StatusSuccess then info is valid
// is status != StatusSuccess then err will give the reason
OnInfoAvailable(op int, status int, info string, err string)
}
// GetUSDInfoCallback needs to be implemented by the caller of GetZcnUSDInfo()
type GetUSDInfoCallback interface {
// This will be called when GetZcnUSDInfo completes.
// if status == StatusSuccess then info is valid
// is status != StatusSuccess then err will give the reason
OnUSDInfoAvailable(status int, info string, err string)
}
// AuthCallback needs to be implemented by the caller SetupAuth()
type AuthCallback interface {
// This call back gives the status of the Two factor authenticator(zauth) setup.
OnSetupComplete(status int, err string)
}
type regInfo struct {
ID string `json:"id"`
PublicKey string `json:"public_key"`
}
type httpResponse struct {
status string
body []byte
err error
}
type localConfig struct {
chain ChainConfig
wallet zcncrypto.Wallet
authUrl string
isConfigured bool
isValidWallet bool
isSplitWallet bool
}
// Singleton
var _config localConfig
func init() {
Logger.Init(defaultLogLevel, "0chain-core-sdk")
}
func checkSdkInit() error {
if !_config.isConfigured || len(_config.chain.Miners) < 1 || len(_config.chain.Sharders) < 1 {
return errors.New("", "SDK not initialized")
}
return nil
}
func checkWalletConfig() error {
if !_config.isValidWallet || _config.wallet.ClientID == "" {
Logger.Error("wallet info not found. returning error.")
return errors.New("", "wallet info not found. set wallet info")
}
return nil
}
func checkConfig() error {
err := checkSdkInit()
if err != nil {
return err
}
err = checkWalletConfig()
if err != nil {
return err
}
return nil
}
func assertConfig() {
if _config.chain.MinSubmit <= 0 {
_config.chain.MinSubmit = defaultMinSubmit
}
if _config.chain.MinConfirmation <= 0 {
_config.chain.MinConfirmation = defaultMinConfirmation
}
if _config.chain.ConfirmationChainLength <= 0 {
_config.chain.ConfirmationChainLength = defaultConfirmationChainLength
}
}
func getMinMinersSubmit() int {
minMiners := util.MaxInt(calculateMinRequired(float64(_config.chain.MinSubmit), float64(len(_config.chain.Miners))/100), 1)
Logger.Info("Minimum miners used for submit :", minMiners)
return minMiners
}
func GetMinShardersVerify() int {
return getMinShardersVerify()
}
func getMinShardersVerify() int {
minSharders := util.MaxInt(calculateMinRequired(float64(_config.chain.MinConfirmation), float64(len(_config.chain.Sharders))/100), 1)
Logger.Info("Minimum sharders used for verify :", minSharders)
return minSharders
}
func getMinRequiredChainLength() int64 {
return int64(_config.chain.ConfirmationChainLength)
}
func calculateMinRequired(minRequired, percent float64) int {
return int(math.Ceil(minRequired * percent))
}
// GetVersion - returns version string
func GetVersion() string {
return version.VERSIONSTR
}
// SetLogLevel set the log level.
// lvl - 0 disabled; higher number (upto 4) more verbosity
func SetLogLevel(lvl int) {
Logger.SetLevel(lvl)
}
// SetLogFile - sets file path to write log
// verbose - true - console output; false - no console output
func SetLogFile(logFile string, verbose bool) {
f, err := os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return
}
Logger.SetLogFile(f, verbose)
Logger.Info("******* Wallet SDK Version:", version.VERSIONSTR, " *******")
}
func GetLogger() *logger.Logger {
return &Logger
}
// CloseLog closes log file
func CloseLog() {
Logger.Close()
}
// Init inializes the SDK with miner, sharder and signature scheme provided in
// configuration provided in JSON format
func Init(c string) error {
err := json.Unmarshal([]byte(c), &_config.chain)
if err == nil {
// Check signature scheme is supported
if _config.chain.SignatureScheme != "ed25519" && _config.chain.SignatureScheme != "bls0chain" {
return errors.New("", "invalid/unsupported signature scheme")
}
err := UpdateNetworkDetails()
if err != nil {
return err
}
go UpdateNetworkDetailsWorker(context.Background())
assertConfig()
_config.isConfigured = true
}
Logger.Info("******* Wallet SDK Version:", version.VERSIONSTR, " *******")
return err
}
func WithChainID(id string) func(c *ChainConfig) error {
return func(c *ChainConfig) error {
c.ChainID = id
return nil
}
}
func WithMinSubmit(m int) func(c *ChainConfig) error {
return func(c *ChainConfig) error {
c.MinSubmit = m
return nil
}
}
func WithMinConfirmation(m int) func(c *ChainConfig) error {
return func(c *ChainConfig) error {
c.MinConfirmation = m
return nil
}
}
func WithConfirmationChainLength(m int) func(c *ChainConfig) error {
return func(c *ChainConfig) error {
c.ConfirmationChainLength = m
return nil
}
}
// InitZCNSDK initializes the SDK with miner, sharder and signature scheme provided.
func InitZCNSDK(blockWorker string, signscheme string, configs ...func(*ChainConfig) error) error {
if signscheme != "ed25519" && signscheme != "bls0chain" {
return errors.New("", "invalid/unsupported signature scheme")
}
_config.chain.BlockWorker = blockWorker
_config.chain.SignatureScheme = signscheme
err := UpdateNetworkDetails()
if err != nil {
return err
}
go UpdateNetworkDetailsWorker(context.Background())
for _, conf := range configs {
err := conf(&_config.chain)
if err != nil {
return errors.Wrap(err, "invalid/unsupported options.")
}
}
assertConfig()
_config.isConfigured = true
Logger.Info("******* Wallet SDK Version:", version.VERSIONSTR, " *******")
return nil
}
func GetNetwork() *Network {
return &Network{
Miners: _config.chain.Miners,
Sharders: _config.chain.Sharders,
}
}
func SetNetwork(miners []string, sharders []string) {
_config.chain.Miners = miners
_config.chain.Sharders = sharders
}
func GetNetworkJSON() string {
network := GetNetwork()
networkBytes, _ := json.Marshal(network)
return string(networkBytes)
}
// CreateWallet creates the a wallet for the configure signature scheme.
// It also registers the wallet again to block chain.
func CreateWallet(statusCb WalletCallback) error {
if len(_config.chain.Miners) < 1 || len(_config.chain.Sharders) < 1 {
return errors.New("", "SDK not initialized")
}
go func() {
sigScheme := zcncrypto.NewSignatureScheme(_config.chain.SignatureScheme)
wallet, err := sigScheme.GenerateKeys()
if err != nil {
statusCb.OnWalletCreateComplete(StatusError, "", fmt.Sprintf("%s", err.Error()))
return
}
err = RegisterToMiners(wallet, statusCb)
if err != nil {
statusCb.OnWalletCreateComplete(StatusError, "", fmt.Sprintf("%s", err.Error()))
return
}
}()
return nil
}
// RecoverWallet recovers the previously generated wallet using the mnemonic.
// It also registers the wallet again to block chain.
func RecoverWallet(mnemonic string, statusCb WalletCallback) error {
if zcncrypto.IsMnemonicValid(mnemonic) != true {
return errors.New("", "Invalid mnemonic")
}
go func() {
sigScheme := zcncrypto.NewSignatureScheme(_config.chain.SignatureScheme)
wallet, err := sigScheme.RecoverKeys(mnemonic)
if err != nil {
statusCb.OnWalletCreateComplete(StatusError, "", fmt.Sprintf("%s", err.Error()))
return
}
err = RegisterToMiners(wallet, statusCb)
if err != nil {
statusCb.OnWalletCreateComplete(StatusError, "", fmt.Sprintf("%s", err.Error()))
return
}
}()
return nil
}
// Split keys from the primary master key
func SplitKeys(privateKey string, numSplits int) (string, error) {
if _config.chain.SignatureScheme != "bls0chain" {
return "", errors.New("", "signature key doesn't support split key")
}
sigScheme := zcncrypto.NewBLS0ChainScheme()
err := sigScheme.SetPrivateKey(privateKey)
if err != nil {
return "", errors.Wrap(err, "set private key failed")
}
w, err := sigScheme.SplitKeys(numSplits)
if err != nil {
return "", errors.Wrap(err, "split key failed.")
}
wStr, err := w.Marshal()
if err != nil {
return "", errors.Wrap(err, "wallet encoding failed.")
}
return wStr, nil
}
// RegisterToMiners can be used to register the wallet.
func RegisterToMiners(wallet *zcncrypto.Wallet, statusCb WalletCallback) error {
result := make(chan *util.PostResponse)
defer close(result)
for _, miner := range _config.chain.Miners {
go func(minerurl string) {
url := minerurl + REGISTER_CLIENT
Logger.Info(url)
regData := map[string]string{
"id": wallet.ClientID,
"public_key": wallet.ClientKey,
}
req, err := util.NewHTTPPostRequest(url, regData)
if err != nil {
Logger.Error(minerurl, "new post request failed. ", err.Error())
return
}
res, err := req.Post()
if err != nil {
Logger.Error(minerurl, "send error. ", err.Error())
}
result <- res
return
}(miner)
}
consensus := float32(0)
for range _config.chain.Miners {
select {
case rsp := <-result:
Logger.Debug(rsp.Url, rsp.Status)
if rsp.StatusCode == http.StatusOK {
consensus++
} else {
Logger.Debug(rsp.Body)
}
}
}
rate := consensus * 100 / float32(len(_config.chain.Miners))
if rate < consensusThresh {
return fmt.Errorf("Register consensus not met. Consensus: %f, Expected: %f", rate, consensusThresh)
}
w, err := wallet.Marshal()
if err != nil {
return errors.Wrap(err, "wallet encoding failed")
}
statusCb.OnWalletCreateComplete(StatusSuccess, w, "")
return nil
}
type GetClientResponse struct {
ID string `json:"id"`
Version string `json:"version"`
CreationDate int `json:"creation_date"`
PublicKey string `json:"public_key"`
}
func GetClientDetails(clientID string) (*GetClientResponse, error) {
minerurl := util.GetRandom(_config.chain.Miners, 1)[0]
url := minerurl + GET_CLIENT
url = fmt.Sprintf("%v?id=%v", url, clientID)
req, err := util.NewHTTPGetRequest(url)
if err != nil {
Logger.Error(minerurl, "new get request failed. ", err.Error())
return nil, err
}
res, err := req.Get()
if err != nil {
Logger.Error(minerurl, "send error. ", err.Error())
return nil, err
}
var clientDetails GetClientResponse
err = json.Unmarshal([]byte(res.Body), &clientDetails)
if err != nil {
return nil, err
}
return &clientDetails, nil
}
// IsMnemonicValid is an utility function to check the mnemonic valid
func IsMnemonicValid(mnemonic string) bool {
return zcncrypto.IsMnemonicValid(mnemonic)
}
// SetWalletInfo should be set before any transaction or client specific APIs
// splitKeyWallet parameter is valid only if SignatureScheme is "BLS0Chain"
func SetWalletInfo(w string, splitKeyWallet bool) error {
err := json.Unmarshal([]byte(w), &_config.wallet)
if err == nil {
if _config.chain.SignatureScheme == "bls0chain" {
_config.isSplitWallet = splitKeyWallet
}
_config.isValidWallet = true
}
return err
}
// SetAuthUrl will be called by app to set zauth URL to SDK.
func SetAuthUrl(url string) error {
if !_config.isSplitWallet {
return errors.New("", "wallet type is not split key")
}
if url == "" {
return errors.New("", "invalid auth url")
}
_config.authUrl = strings.TrimRight(url, "/")
return nil
}
// GetBalance retreives wallet balance from sharders
func GetBalance(cb GetBalanceCallback) error {
err := checkConfig()
if err != nil {
return err
}
go func() {
value, info, err := getBalanceFromSharders(_config.wallet.ClientID)
if err != nil {
Logger.Error(err)
cb.OnBalanceAvailable(StatusError, 0, info)
return
}
cb.OnBalanceAvailable(StatusSuccess, value, info)
}()
return nil
}
// GetBalance retreives wallet balance from sharders
func | (walletStr string, cb GetBalanceCallback) error {
w, err := GetWallet(walletStr)
if err != nil {
fmt.Printf("Error while parsing the wallet. %v\n", err)
return err
}
go func() {
value, info, err := getBalanceFromSharders(w.ClientID)
if err != nil {
Logger.Error(err)
cb.OnBalanceAvailable(StatusError, 0, info)
return
}
cb.OnBalanceAvailable(StatusSuccess, value, info)
}()
return nil
}
func getBalanceFromSharders(clientID string) (int64, string, error) {
result := make(chan *util.GetResponse)
defer close(result)
// getMinShardersVerify
var numSharders = len(_config.chain.Sharders) // overwrite, use all
queryFromSharders(numSharders, fmt.Sprintf("%v%v", GET_BALANCE, clientID), result)
consensus := float32(0)
balMap := make(map[int64]float32)
winBalance := int64(0)
var winInfo string
var winError string
for i := 0; i < numSharders; i++ {
select {
case rsp := <-result:
Logger.Debug(rsp.Url, rsp.Status)
if rsp.StatusCode != http.StatusOK {
Logger.Error(rsp.Body)
winError = rsp.Body
continue
}
Logger.Debug(rsp.Body)
var objmap map[string]json.RawMessage
err := json.Unmarshal([]byte(rsp.Body), &objmap)
if err != nil {
continue
}
if v, ok := objmap["balance"]; ok {
bal, err := strconv.ParseInt(string(v), 10, 64)
if err != nil {
continue
}
balMap[bal]++
if balMap[bal] > consensus {
consensus = balMap[bal]
winBalance = bal
winInfo = rsp.Body
}
}
}
}
rate := consensus * 100 / float32(len(_config.chain.Sharders))
if rate < consensusThresh {
return 0, winError, errors.New("", "get balance failed. consensus not reached")
}
return winBalance, winInfo, nil
}
// ConvertToToken converts the value to ZCN tokens
func ConvertToToken(value int64) float64 {
return float64(value) / float64(TOKEN_UNIT)
}
// ConvertToValue converts ZCN tokens to value
func ConvertToValue(token float64) int64 {
return int64(token * float64(TOKEN_UNIT))
}
func ConvertTokenToUSD(token float64) (float64, error) {
zcnRate, err := getTokenUSDRate()
if err != nil {
return 0, err
}
return token * zcnRate, nil
}
func ConvertUSDToToken(usd float64) (float64, error) {
zcnRate, err := getTokenUSDRate()
if err != nil {
return 0, err
}
return usd * (1 / zcnRate), nil
}
func getTokenUSDRate() (float64, error) {
return getTokenRateByCurrency("usd")
}
func getTokenRateByCurrency(currency string) (float64, error) {
var CoinGeckoResponse struct {
ID string `json:"id"`
Symbol string `json:"symbol"`
MarketData struct {
CurrentPrice map[string]float64 `json:"current_price"`
} `json:"market_data"`
}
req, err := util.NewHTTPGetRequest("https://api.coingecko.com/api/v3/coins/0chain?localization=false")
if err != nil {
Logger.Error("new get request failed." + err.Error())
return 0, err
}
res, err := req.Get()
if err != nil {
Logger.Error("get error. ", err.Error())
return 0, err
}
if res.StatusCode != http.StatusOK {
Logger.Error("Response status not OK. ", res.StatusCode)
return 0, errors.New("invalid_res_status_code", "Response status code is not OK")
}
err = json.Unmarshal([]byte(res.Body), &CoinGeckoResponse)
if err != nil {
return 0, err
}
return CoinGeckoResponse.MarketData.CurrentPrice[currency], nil
}
func getInfoFromSharders(urlSuffix string, op int, cb GetInfoCallback) {
result := make(chan *util.GetResponse)
defer close(result)
// getMinShardersVerify()
var numSharders = len(_config.chain.Sharders) // overwrite, use all
queryFromSharders(numSharders, urlSuffix, result)
consensus := float32(0)
resultMap := make(map[int]float32)
var winresult *util.GetResponse
for i := 0; i < numSharders; i++ {
select {
case rsp := <-result:
Logger.Debug(rsp.Url, rsp.Status)
resultMap[rsp.StatusCode]++
if resultMap[rsp.StatusCode] > consensus {
consensus = resultMap[rsp.StatusCode]
winresult = rsp
}
}
}
rate := consensus * 100 / float32(len(_config.chain.Sharders))
if rate < consensusThresh {
newerr := fmt.Sprintf(`{"code": "consensus_failed", "error": "consensus failed on sharders.", "server_error": "%v"}`, winresult.Body)
cb.OnInfoAvailable(op, StatusError, "", newerr)
return
}
if winresult.StatusCode != http.StatusOK {
cb.OnInfoAvailable(op, StatusError, "", winresult.Body)
} else {
cb.OnInfoAvailable(op, StatusSuccess, winresult.Body, "")
}
}
// GetLockConfig returns the lock token configuration information such as interest rate from blockchain
func GetLockConfig(cb GetInfoCallback) error {
err := checkSdkInit()
if err != nil {
return err
}
go getInfoFromSharders(GET_LOCK_CONFIG, OpGetTokenLockConfig, cb)
return nil
}
// GetLockedTokens returns the ealier locked token pool stats
func GetLockedTokens(cb GetInfoCallback) error {
err := checkConfig()
if err != nil {
return err
}
go func() {
urlSuffix := fmt.Sprintf("%v%v", GET_LOCKED_TOKENS, _config.wallet.ClientID)
getInfoFromSharders(urlSuffix, OpGetLockedTokens, cb)
}()
return nil
}
//GetWallet get a wallet object from a wallet string
func GetWallet(walletStr string) (*zcncrypto.Wallet, error) {
var w zcncrypto.Wallet
err := json.Unmarshal([]byte(walletStr), &w)
if err != nil {
fmt.Printf("error while parsing wallet string.\n%v\n", err)
return nil, err
}
return &w, nil
}
//GetWalletClientID -- given a walletstr return ClientID
func GetWalletClientID(walletStr string) (string, error) {
w, err := GetWallet(walletStr)
if err != nil {
return "", err
}
return w.ClientID, nil
}
// GetZcnUSDInfo returns USD value for ZCN token from coinmarketcap.com
func GetZcnUSDInfo(cb GetUSDInfoCallback) error {
go func() {
req, err := util.NewHTTPGetRequest("https://api.coingecko.com/api/v3/coins/0chain?localization=false")
if err != nil {
Logger.Error("new get request failed." + err.Error())
cb.OnUSDInfoAvailable(StatusError, "", "new get request failed."+err.Error())
return
}
res, err := req.Get()
if err != nil {
Logger.Error("get error. ", err.Error())
cb.OnUSDInfoAvailable(StatusError, "", "get error"+err.Error())
return
}
if res.StatusCode != http.StatusOK {
cb.OnUSDInfoAvailable(StatusError, "", fmt.Sprintf("%s: %s", res.Status, res.Body))
return
}
cb.OnUSDInfoAvailable(StatusSuccess, res.Body, "")
}()
return nil
}
// SetupAuth prepare auth app with clientid, key and a set of public, private key and local publickey
// which is running on PC/Mac.
func SetupAuth(authHost, clientID, clientKey, publicKey, privateKey, localPublicKey string, cb AuthCallback) error {
go func() {
authHost = strings.TrimRight(authHost, "/")
data := map[string]string{"client_id": clientID, "client_key": clientKey, "public_key": publicKey, "private_key": privateKey, "peer_public_key": localPublicKey}
req, err := util.NewHTTPPostRequest(authHost+"/setup", data)
if err != nil {
Logger.Error("new post request failed. ", err.Error())
return
}
res, err := req.Post()
if err != nil {
Logger.Error(authHost+"send error. ", err.Error())
}
if res.StatusCode != http.StatusOK {
cb.OnSetupComplete(StatusError, res.Body)
return
}
cb.OnSetupComplete(StatusSuccess, "")
}()
return nil
}
func GetIdForUrl(url string) string {
url = strings.TrimRight(url, "/")
url = fmt.Sprintf("%v/_nh/whoami", url)
req, err := util.NewHTTPGetRequest(url)
if err != nil {
Logger.Error(url, "new get request failed. ", err.Error())
return ""
}
res, err := req.Get()
if err != nil {
Logger.Error(url, "get error. ", err.Error())
return ""
}
s := strings.Split(res.Body, ",")
if len(s) >= 3 {
return s[3]
}
return ""
}
//
// vesting pool
//
type VestingDestInfo struct {
ID common.Key `json:"id"` // identifier
Wanted common.Balance `json:"wanted"` // wanted amount for entire period
Earned common.Balance `json:"earned"` // can unlock
Vested common.Balance `json:"vested"` // already vested
Last common.Timestamp `json:"last"` // last time unlocked
}
type VestingPoolInfo struct {
ID common.Key `json:"pool_id"` // pool ID
Balance common.Balance `json:"balance"` // real pool balance
Left common.Balance `json:"left"` // owner can unlock
Description string `json:"description"` // description
StartTime common.Timestamp `json:"start_time"` // from
ExpireAt common.Timestamp `json:"expire_at"` // until
Destinations []*VestingDestInfo `json:"destinations"` // receivers
ClientID common.Key `json:"client_id"` // owner
}
type Params map[string]string
func (p Params) Query() string {
if len(p) == 0 {
return ""
}
var params = make(url.Values)
for k, v := range p {
params[k] = []string{v}
}
return "?" + params.Encode()
}
func withParams(uri string, params Params) string {
return uri + params.Query()
}
func GetVestingPoolInfo(poolID string, cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
getInfoFromSharders(withParams(GET_VESTING_POOL_INFO, Params{
"pool_id": poolID,
}), 0, cb)
return
}
type VestingClientList struct {
Pools []common.Key `json:"pools"`
}
func GetVestingClientList(clientID string, cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
if clientID == "" {
clientID = _config.wallet.ClientID // if not blank
}
go getInfoFromSharders(withParams(GET_VESTING_CLIENT_POOLS, Params{
"client_id": clientID,
}), 0, cb)
return
}
type VestingSCConfig struct {
MinLock common.Balance `json:"min_lock"`
MinDuration time.Duration `json:"min_duration"`
MaxDuration time.Duration `json:"max_duration"`
MaxDestinations int `json:"max_destinations"`
MaxDescriptionLength int `json:"max_description_length"`
}
func GetVestingSCConfig(cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
go getInfoFromSharders(GET_VESTING_CONFIG, 0, cb)
return
}
//
// miner SC
//
type Miner struct {
ID string `json:"id"`
N2NHost string `json:"n2n_host"`
Host string `json:"host"`
Port int `json:"port"`
PublicKey string `json:"public_key"`
ShortName string `json:"short_name"`
BuildTag string `json:"build_tag"`
TotalStake int `json:"total_stake"`
DelegateWallet string `json:"delegate_wallet"`
ServiceCharge float64 `json:"service_charge"`
NumberOfDelegates int `json:"number_of_delegates"`
MinStake int64 `json:"min_stake"`
MaxStake int64 `json:"max_stake"`
Stat interface{} `json:"stat"`
}
type Node struct {
Miner Miner `json:"simple_miner"`
}
type MinerSCNodes struct {
Nodes []Node `json:"Nodes"`
}
// GetMiners obtains list of all active miners.
func GetMiners(cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
var url = GET_MINERSC_MINERS
go getInfoFromSharders(url, 0, cb)
return
}
// GetSharders obtains list of all active sharders.
func GetSharders(cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
var url = GET_MINERSC_SHARDERS
go getInfoFromSharders(url, 0, cb)
return
}
func GetMinerSCNodeInfo(id string, cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
go getInfoFromSharders(withParams(GET_MINERSC_NODE, Params{
"id": id,
}), 0, cb)
return
}
func GetMinerSCNodePool(id, poolID string, cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
go getInfoFromSharders(withParams(GET_MINERSC_POOL, Params{
"id": id,
"pool_id": poolID,
}), 0, cb)
return
}
type MinerSCDelegatePoolInfo struct {
ID common.Key `json:"id"` // pool ID
Balance common.Balance `json:"balance"` //
InterestPaid common.Balance `json:"interest_paid"` //
RewardPaid common.Balance `json:"reward_paid"` //
Status string `json:"status"` //
High common.Balance `json:"high"` // }
Low common.Balance `json:"low"` // }
}
type MinerSCUserPoolsInfo struct {
Pools map[string]map[string][]*MinerSCDelegatePoolInfo `json:"pools"`
}
func GetMinerSCUserInfo(clientID string, cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
if clientID == "" {
clientID = _config.wallet.ClientID
}
go getInfoFromSharders(withParams(GET_MINERSC_USER, Params{
"client_id": clientID,
}), 0, cb)
return
}
type MinerSCConfig struct {
ViewChange int64 `json:"view_change"`
MaxN int `json:"max_n"`
MinN int `json:"min_n"`
MinS int `json:"min_s"`
MaxS int `json:"max_s"`
TPercent float64 `json:"t_percent"`
KPercent float64 `json:"k_percent"`
LastRound int64 `json:"last_round"`
MaxStake common.Balance `json:"max_stake"`
MinStake common.Balance `json:"min_stake"`
InterestRate float64 `json:"interest_rate"`
RewardRate float64 `json:"reward_rate"`
ShareRatio float64 `json:"share_ratio"`
BlockReward common.Balance `json:"block_reward"`
MaxCharge float64 `json:"max_charge"`
Epoch int64 `json:"epoch"`
RewardDeclineRate float64 `json:"reward_decline_rate"`
InterestDeclineRate float64 `json:"interest_decline_rate"`
MaxMint common.Balance `json:"max_mint"`
Minted common.Balance `json:"minted"`
MaxDelegates int `json:"max_delegates"`
}
func GetMinerSCConfig(cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
go getInfoFromSharders(GET_MINERSC_CONFIG, 0, cb)
return
}
//
// Storage SC
//
// GetStorageSCConfig obtains Storage SC configurations.
func GetStorageSCConfig(cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
var url = STORAGESC_GET_SC_CONFIG
go getInfoFromSharders(url, OpStorageSCGetConfig, cb)
return
}
// GetChallengePoolInfo obtains challenge pool information for an allocation.
func GetChallengePoolInfo(allocID string, cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
var url = withParams(STORAGESC_GET_CHALLENGE_POOL_INFO, Params{
"allocation_id": allocID,
})
go getInfoFromSharders(url, OpStorageSCGetChallengePoolInfo, cb)
return
}
// GetAllocation obtains allocation information.
func GetAllocation(allocID string, cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
var url = withParams(STORAGESC_GET_ALLOCATION, Params{
"allocation": allocID,
})
go getInfoFromSharders(url, OpStorageSCGetAllocation, cb)
return
}
// GetAllocations obtains list of allocations of a user.
func GetAllocations(clientID string, cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
if clientID == "" {
clientID = _config.wallet.ClientID
}
var url = withParams(STORAGESC_GET_ALLOCATIONS, Params{
"client": clientID,
})
go getInfoFromSharders(url, OpStorageSCGetAllocations, cb)
return
}
// GetReadPoolInfo obtains information about read pool of a user.
func GetReadPoolInfo(clientID string, cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
if clientID == "" {
clientID = _config.wallet.ClientID
}
var url = withParams(STORAGESC_GET_READ_POOL_INFO, Params{
"client_id": clientID,
})
go getInfoFromSharders(url, OpStorageSCGetReadPoolInfo, cb)
return
}
// GetStakePoolInfo obtains information about stake pool of a blobber and
// related validator.
func GetStakePoolInfo(blobberID string, cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
var url = withParams(STORAGESC_GET_STAKE_POOL_INFO, Params{
"blobber_id": blobberID,
})
go getInfoFromSharders(url, OpStorageSCGetStakePoolInfo, cb)
return
}
// GetStakePoolUserInfo for a user.
func GetStakePoolUserInfo(clientID string, cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
if clientID == "" {
clientID = _config.wallet.ClientID
}
var url = withParams(STORAGESC_GET_STAKE_POOL_USER_INFO, Params{
"client_id": clientID,
})
go getInfoFromSharders(url, OpStorageSCGetStakePoolInfo, cb)
return
}
// GetBlobbers obtains list of all active blobbers.
func GetBlobbers(cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
var url = STORAGESC_GET_BLOBBERS
go getInfoFromSharders(url, OpStorageSCGetBlobbers, cb)
return
}
// GetBlobber obtains blobber information.
func GetBlobber(blobberID string, cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
var url = withParams(STORAGESC_GET_BLOBBER, Params{
"blobber_id": blobberID,
})
go getInfoFromSharders(url, OpStorageSCGetBlobber, cb)
return
}
// GetWritePoolInfo obtains information about all write pools of a user.
// If given clientID is empty, then current user used.
func GetWritePoolInfo(clientID string, cb GetInfoCallback) (err error) {
if err = checkConfig(); err != nil {
return
}
if clientID == "" {
clientID = _config.wallet.ClientID
}
var url = withParams(STORAGESC_GET_WRITE_POOL_INFO, Params{
"client_id": clientID,
})
go getInfoFromSharders(url, OpStorageSCGetWritePoolInfo, cb)
return
}
func Encrypt(key, text string) (string, error) {
keyBytes := []byte(key)
textBytes := []byte(text)
response, err := zboxutil.Encrypt(keyBytes, textBytes)
if err != nil {
return "", err
}
return hex.EncodeToString(response), nil
}
func Decrypt(key, text string) (string, error) {
keyBytes := []byte(key)
textBytes, _ := hex.DecodeString(text)
response, err := zboxutil.Decrypt(keyBytes, textBytes)
if err != nil {
return "", err
}
return string(response), nil
}
| GetBalanceWallet |
android.go | package commit_msg
import "text/template"
const (
// TmplNameAndroid is the name of the commit message template used by
// rollers which roll into Android.
TmplNameAndroid = "android"
)
var (
// TmplAndroid is the commit message template used by rollers which roll
// into Android. It can be referenced in config files using tmplNameAndroid.
tmplAndroid = template.Must(parseCommitMsgTemplate(tmplCommitMsg, TmplNameAndroid,
`{{- define "footer" -}}
{{ if .IncludeTbrLine -}}
Tbr: {{ stringsJoin .Reviewers "," }}
{{ end -}}
Test: Presubmit checks will test this change.
Exempt-From-Owner-Approval: The autoroll bot does not require owner approval.
{{ if .BugProject -}}
{{ range .Bugs }}Bug: {{ . }}
{{ end }}
{{- end -}}
{{- if .IncludeTests -}}
{{ range .Tests }}Test: {{ . }} | {{- end -}}`))
) | {{- end}}
{{- end -}} |
decoder.go | package simulation
import (
"bytes"
"fmt"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/x/nft/internal/types"
)
// DecodeStore unmarshals the KVPair's Value to the corresponding gov type | func DecodeStore(cdc *codec.Codec, kvA, kvB cmn.KVPair) string {
switch {
case bytes.Equal(kvA.Key[:1], types.CollectionsKeyPrefix):
var collectionA, collectionB types.Collection
cdc.MustUnmarshalBinaryLengthPrefixed(kvA.Value, &collectionA)
cdc.MustUnmarshalBinaryLengthPrefixed(kvB.Value, &collectionB)
return fmt.Sprintf("%v\n%v", collectionA, collectionB)
case bytes.Equal(kvA.Key[:1], types.OwnersKeyPrefix):
var idCollectionA, idCollectionB types.IDCollection
cdc.MustUnmarshalBinaryLengthPrefixed(kvA.Value, &idCollectionA)
cdc.MustUnmarshalBinaryLengthPrefixed(kvB.Value, &idCollectionB)
return fmt.Sprintf("%v\n%v", idCollectionA, idCollectionB)
default:
panic(fmt.Sprintf("invalid %s key prefix %X", types.ModuleName, kvA.Key[:1]))
}
} | |
challenge.py | DATA = [
{
'name': 'Facundo',
'age': 72,
'organization': 'Platzi',
'position': 'Technical Mentor',
'language': 'python',
},
{
'name': 'Luisana',
'age': 33,
'organization': 'Globant',
'position': 'UX Designer',
'language': 'javascript',
},
{
'name': 'Héctor',
'age': 19,
'organization': 'Platzi',
'position': 'Associate',
'language': 'ruby',
},
{
'name': 'Gabriel',
'age': 20,
'organization': 'Platzi',
'position': 'Associate',
'language': 'javascript',
},
{
'name': 'Mariandrea',
'age': 30,
'organization': 'Platzi',
'position': 'QA Manager',
'language': 'java',
},
{
'name': 'Karo',
'age': 23,
'organization': 'Everis',
'position': 'Backend Developer',
'language': 'python',
},
{
'name': 'Ariel',
'age': 32,
'organization': 'Rappi',
'position': 'Support',
'language': '',
},
{
'name': 'Juan',
'age': 17,
'organization': '',
'position': 'Student',
'language': 'go',
},
{
'name': 'Pablo',
'age': 32,
'organization': 'Master',
'position': 'Human Resources Manager',
'language': 'python',
},
{
'name': 'Lorena',
'age': 56,
'organization': 'Python Organization',
'position': 'Language Maker',
'language': 'python',
},
]
def homeless(data):
person = data.copy()
if data['organization'] == '':
person['homeless'] = True
else:
person['homeless'] = False
return person
def old(data):
person = data.copy()
if data['age'] > 30:
person['old'] = True
else:
person['old'] = 'False'
return person
def run():
all_python_devs = list(filter(lambda dev: dev['language'] == 'python' , DATA))
all_Platzi_workers = list(filter(lambda worker: worker['organization'] == 'Platzi', DATA))
adults = list(filter(lambda person: person['age'] > 18, DATA))
workers = list(map(homeless, DATA))
old_people = list(map(old, DATA))
print('Python devs: ')
for dev in all_python_devs:
print(dev['name'])
print('\n\n')
print('Platzi workers: ')
for worker in all_Platzi_workers:
print(worker['name'])
print('\n\n')
print('Adults: ')
for adult in adults:
print(adult['name'])
print('\n\n')
print(workers)
print('\n\n')
print(old_people)
print('\n\n')
| # Remember: when possible, use lambdas
if __name__ == '__main__':
run() | |
textarea.js | import phaser from 'phaser/src/phaser.js';
import UIPlugin from '../../templates/ui/ui-plugin.js';
const COLOR_PRIMARY = 0x4e342e;
const COLOR_LIGHT = 0x7b5e57;
const COLOR_DARK = 0x260e04;
class Demo extends Phaser.Scene {
constructor() {
super({
key: 'examples'
})
}
preload() { }
create() {
var textArea = this.rexUI.add.textArea({
x: 400,
y: 300,
width: 220,
height: 260,
background: this.rexUI.add.roundRectangle(0, 0, 2, 2, 0, COLOR_PRIMARY),
// text: this.add.text(),
text: this.rexUI.add.BBCodeText(),
// textMask: false,
slider: {
track: this.rexUI.add.roundRectangle(0, 0, 20, 10, 10, COLOR_DARK),
thumb: this.rexUI.add.roundRectangle(0, 0, 0, 0, 13, COLOR_LIGHT),
},
space: {
left: 0,
right: 0,
top: 0,
bottom: 0,
text: 10,
// text: { | // bottom: 20,
// left: 20,
// right: 20,
// },
header: 0,
footer: 0,
},
mouseWheelScroller: {
focus: false,
speed: 0.1
},
header: this.rexUI.add.label({
height: 30,
orientation: 0,
background: this.rexUI.add.roundRectangle(0, 0, 20, 20, 0, COLOR_DARK),
text: this.add.text(0, 0, 'Header'),
}),
footer: this.rexUI.add.label({
height: 30,
orientation: 0,
background: this.rexUI.add.roundRectangle(0, 0, 20, 20, 0, COLOR_DARK),
text: this.add.text(0, 0, 'Footer'),
}),
content: CreateContent(10000),
})
.layout()
.drawBounds(this.add.graphics(), 0xff0000);
//textArea.setText(CreateContent(10000));
}
update() { }
}
var content = `Phaser is a fast, free, and fun open source HTML5 game framework that offers WebGL and Canvas rendering across desktop and mobile web browsers. Games can be compiled to iOS, Android and native apps by using 3rd party tools. You can use JavaScript or TypeScript for development.`;
var CreateContent = function (linesCount) {
var numbers = [];
for (var i = 0; i < linesCount; i++) {
numbers.push('[color=' + ((i % 2) ? 'green' : 'yellow') + ']' + i.toString() + '[/color]');
}
return content + '\n' + numbers.join('\n');
}
var config = {
type: Phaser.AUTO,
parent: 'phaser-example',
width: 800,
height: 600,
scale: {
mode: Phaser.Scale.FIT,
autoCenter: Phaser.Scale.CENTER_BOTH,
},
scene: Demo,
plugins: {
scene: [{
key: 'rexUI',
plugin: UIPlugin,
mapping: 'rexUI'
}]
}
};
var game = new Phaser.Game(config); | // top: 20, |
OrbitControls.js | /**
* @author qiao / https://github.com/qiao
* @author mrdoob / http://mrdoob.com
* @author alteredq / http://alteredqualia.com/
* @author WestLangley / http://github.com/WestLangley
* @author erich666 / http://erichaines.com
* @author ScieCode / http://github.com/sciecode
*/
import {
EventDispatcher,
MOUSE,
Quaternion,
Spherical,
TOUCH,
Vector2,
Vector3
} from "./three.module.js";
// This set of controls performs orbiting, dollying (zooming), and panning.
// Unlike TrackballControls, it maintains the "up" direction object.up (+Y by default).
//
// Orbit - left mouse / touch: one-finger move
// Zoom - middle mouse, or mousewheel / touch: two-finger spread or squish
// Pan - right mouse, or left mouse + ctrl/meta/shiftKey, or arrow keys / touch: two-finger move
var OrbitControls = function ( object, domElement ) {
if ( domElement === undefined ) console.warn( 'THREE.OrbitControls: The second parameter "domElement" is now mandatory.' );
if ( domElement === document ) console.error( 'THREE.OrbitControls: "document" should not be used as the target "domElement". Please use "renderer.domElement" instead.' );
this.object = object;
this.domElement = domElement;
// Set to false to disable this control
this.enabled = true;
// "target" sets the location of focus, where the object orbits around
this.target = new Vector3();
// How far you can dolly in and out ( PerspectiveCamera only )
this.minDistance = 0;
this.maxDistance = Infinity;
// How far you can zoom in and out ( OrthographicCamera only )
this.minZoom = 0;
this.maxZoom = Infinity;
// How far you can orbit vertically, upper and lower limits.
// Range is 0 to Math.PI radians.
this.minPolarAngle = 0; // radians
this.maxPolarAngle = Math.PI; // radians
// How far you can orbit horizontally, upper and lower limits.
// If set, must be a sub-interval of the interval [ - Math.PI, Math.PI ].
this.minAzimuthAngle = - Infinity; // radians
this.maxAzimuthAngle = Infinity; // radians
// Set to true to enable damping (inertia)
// If damping is enabled, you must call controls.update() in your animation loop
this.enableDamping = false;
this.dampingFactor = 0.05;
// This option actually enables dollying in and out; left as "zoom" for backwards compatibility.
// Set to false to disable zooming
this.enableZoom = true;
this.zoomSpeed = 1.0;
// Set to false to disable rotating
this.enableRotate = true;
this.rotateSpeed = 1.0;
// Set to false to disable panning
this.enablePan = true;
this.panSpeed = 1.0;
this.screenSpacePanning = false; // if true, pan in screen-space
this.keyPanSpeed = 7.0; // pixels moved per arrow key push
// Set to true to automatically rotate around the target
// If auto-rotate is enabled, you must call controls.update() in your animation loop
this.autoRotate = false;
this.autoRotateSpeed = 2.0; // 30 seconds per round when fps is 60
// Set to false to disable use of the keys
this.enableKeys = true;
// The four arrow keys
this.keys = { LEFT: 37, UP: 38, RIGHT: 39, BOTTOM: 40 };
// Mouse buttons
this.mouseButtons = { LEFT: MOUSE.ROTATE, MIDDLE: MOUSE.DOLLY, RIGHT: MOUSE.PAN };
// Touch fingers
this.touches = { ONE: TOUCH.ROTATE, TWO: TOUCH.DOLLY_PAN };
// for reset
this.target0 = this.target.clone();
this.position0 = this.object.position.clone();
this.zoom0 = this.object.zoom;
//
// public methods
//
this.getPolarAngle = function () {
return spherical.phi;
};
this.getAzimuthalAngle = function () {
return spherical.theta;
};
this.saveState = function () {
scope.target0.copy( scope.target );
scope.position0.copy( scope.object.position );
scope.zoom0 = scope.object.zoom;
};
this.reset = function () {
scope.target.copy( scope.target0 );
scope.object.position.copy( scope.position0 );
scope.object.zoom = scope.zoom0;
scope.object.updateProjectionMatrix();
scope.dispatchEvent( changeEvent );
scope.update();
state = STATE.NONE;
};
// this method is exposed, but perhaps it would be better if we can make it private...
this.update = function () {
var offset = new Vector3();
// so camera.up is the orbit axis
var quat = new Quaternion().setFromUnitVectors( object.up, new Vector3( 0, 1, 0 ) );
var quatInverse = quat.clone().inverse();
var lastPosition = new Vector3();
var lastQuaternion = new Quaternion();
return function update() {
var position = scope.object.position;
offset.copy( position ).sub( scope.target );
// rotate offset to "y-axis-is-up" space
offset.applyQuaternion( quat );
// angle from z-axis around y-axis
spherical.setFromVector3( offset );
if ( scope.autoRotate && state === STATE.NONE ) {
rotateLeft( getAutoRotationAngle() );
}
if ( scope.enableDamping ) {
spherical.theta += sphericalDelta.theta * scope.dampingFactor;
spherical.phi += sphericalDelta.phi * scope.dampingFactor;
} else {
spherical.theta += sphericalDelta.theta;
spherical.phi += sphericalDelta.phi;
}
// restrict theta to be between desired limits
spherical.theta = Math.max( scope.minAzimuthAngle, Math.min( scope.maxAzimuthAngle, spherical.theta ) );
// restrict phi to be between desired limits
spherical.phi = Math.max( scope.minPolarAngle, Math.min( scope.maxPolarAngle, spherical.phi ) );
spherical.makeSafe();
spherical.radius *= scale;
// restrict radius to be between desired limits
spherical.radius = Math.max( scope.minDistance, Math.min( scope.maxDistance, spherical.radius ) );
// move target to panned location
if ( scope.enableDamping === true ) {
scope.target.addScaledVector( panOffset, scope.dampingFactor );
} else {
scope.target.add( panOffset );
}
offset.setFromSpherical( spherical );
// rotate offset back to "camera-up-vector-is-up" space
offset.applyQuaternion( quatInverse );
position.copy( scope.target ).add( offset );
scope.object.lookAt( scope.target );
if ( scope.enableDamping === true ) {
sphericalDelta.theta *= ( 1 - scope.dampingFactor );
sphericalDelta.phi *= ( 1 - scope.dampingFactor );
panOffset.multiplyScalar( 1 - scope.dampingFactor );
} else {
sphericalDelta.set( 0, 0, 0 );
panOffset.set( 0, 0, 0 );
}
scale = 1;
// update condition is:
// min(camera displacement, camera rotation in radians)^2 > EPS
// using small-angle approximation cos(x/2) = 1 - x^2 / 8
if ( zoomChanged ||
lastPosition.distanceToSquared( scope.object.position ) > EPS ||
8 * ( 1 - lastQuaternion.dot( scope.object.quaternion ) ) > EPS ) {
scope.dispatchEvent( changeEvent );
lastPosition.copy( scope.object.position );
lastQuaternion.copy( scope.object.quaternion );
zoomChanged = false;
return true;
}
return false;
};
}();
this.dispose = function () {
scope.domElement.removeEventListener( 'contextmenu', onContextMenu, false );
scope.domElement.removeEventListener( 'mousedown', onMouseDown, false );
scope.domElement.removeEventListener( 'wheel', onMouseWheel, false );
scope.domElement.removeEventListener( 'touchstart', onTouchStart, false );
scope.domElement.removeEventListener( 'touchend', onTouchEnd, false );
scope.domElement.removeEventListener( 'touchmove', onTouchMove, false );
document.removeEventListener( 'mousemove', onMouseMove, false );
document.removeEventListener( 'mouseup', onMouseUp, false );
scope.domElement.removeEventListener( 'keydown', onKeyDown, false );
//scope.dispatchEvent( { type: 'dispose' } ); // should this be added here?
};
//
// internals
//
var scope = this;
var changeEvent = { type: 'change' };
var startEvent = { type: 'start' };
var endEvent = { type: 'end' };
var STATE = {
NONE: - 1,
ROTATE: 0,
DOLLY: 1,
PAN: 2,
TOUCH_ROTATE: 3,
TOUCH_PAN: 4,
TOUCH_DOLLY_PAN: 5,
TOUCH_DOLLY_ROTATE: 6
};
var state = STATE.NONE;
var EPS = 0.000001;
// current position in spherical coordinates
var spherical = new Spherical();
var sphericalDelta = new Spherical();
var scale = 1;
var panOffset = new Vector3();
var zoomChanged = false;
var rotateStart = new Vector2();
var rotateEnd = new Vector2();
var rotateDelta = new Vector2();
var panStart = new Vector2();
var panEnd = new Vector2();
var panDelta = new Vector2();
var dollyStart = new Vector2();
var dollyEnd = new Vector2();
var dollyDelta = new Vector2();
function getAutoRotationAngle() {
return 2 * Math.PI / 60 / 60 * scope.autoRotateSpeed;
}
function getZoomScale() {
return Math.pow( 0.95, scope.zoomSpeed );
}
function rotateLeft( angle ) {
sphericalDelta.theta -= angle;
}
function rotateUp( angle ) {
sphericalDelta.phi -= angle;
}
var panLeft = function () {
var v = new Vector3();
return function panLeft( distance, objectMatrix ) {
v.setFromMatrixColumn( objectMatrix, 0 ); // get X column of objectMatrix
v.multiplyScalar( - distance );
panOffset.add( v );
};
}();
var panUp = function () {
var v = new Vector3();
return function panUp( distance, objectMatrix ) {
if ( scope.screenSpacePanning === true ) {
v.setFromMatrixColumn( objectMatrix, 1 );
} else {
v.setFromMatrixColumn( objectMatrix, 0 );
v.crossVectors( scope.object.up, v );
}
v.multiplyScalar( distance );
panOffset.add( v );
};
}();
// deltaX and deltaY are in pixels; right and down are positive
var pan = function () {
var offset = new Vector3();
return function pan( deltaX, deltaY ) {
var element = scope.domElement;
if ( scope.object.isPerspectiveCamera ) {
// perspective
var position = scope.object.position;
offset.copy( position ).sub( scope.target );
var targetDistance = offset.length();
// half of the fov is center to top of screen
targetDistance *= Math.tan( ( scope.object.fov / 2 ) * Math.PI / 180.0 );
// we use only clientHeight here so aspect ratio does not distort speed
panLeft( 2 * deltaX * targetDistance / element.clientHeight, scope.object.matrix );
panUp( 2 * deltaY * targetDistance / element.clientHeight, scope.object.matrix );
} else if ( scope.object.isOrthographicCamera ) {
// orthographic
panLeft( deltaX * ( scope.object.right - scope.object.left ) / scope.object.zoom / element.clientWidth, scope.object.matrix );
panUp( deltaY * ( scope.object.top - scope.object.bottom ) / scope.object.zoom / element.clientHeight, scope.object.matrix );
} else {
// camera neither orthographic nor perspective
console.warn( 'WARNING: OrbitControls.js encountered an unknown camera type - pan disabled.' );
scope.enablePan = false;
}
};
}();
function dollyOut( dollyScale ) {
if ( scope.object.isPerspectiveCamera ) {
scale /= dollyScale;
} else if ( scope.object.isOrthographicCamera ) {
scope.object.zoom = Math.max( scope.minZoom, Math.min( scope.maxZoom, scope.object.zoom * dollyScale ) );
scope.object.updateProjectionMatrix();
zoomChanged = true;
} else {
console.warn( 'WARNING: OrbitControls.js encountered an unknown camera type - dolly/zoom disabled.' );
scope.enableZoom = false;
}
}
function dollyIn( dollyScale ) {
if ( scope.object.isPerspectiveCamera ) {
scale *= dollyScale;
} else if ( scope.object.isOrthographicCamera ) {
scope.object.zoom = Math.max( scope.minZoom, Math.min( scope.maxZoom, scope.object.zoom / dollyScale ) );
scope.object.updateProjectionMatrix();
zoomChanged = true;
} else {
console.warn( 'WARNING: OrbitControls.js encountered an unknown camera type - dolly/zoom disabled.' );
scope.enableZoom = false;
}
}
//
// event callbacks - update the object state
//
function handleMouseDownRotate( event ) {
rotateStart.set( event.clientX, event.clientY );
}
function handleMouseDownDolly( event ) {
dollyStart.set( event.clientX, event.clientY );
}
function handleMouseDownPan( event ) {
panStart.set( event.clientX, event.clientY );
}
function handleMouseMoveRotate( event ) {
rotateEnd.set( event.clientX, event.clientY );
rotateDelta.subVectors( rotateEnd, rotateStart ).multiplyScalar( scope.rotateSpeed );
var element = scope.domElement;
rotateLeft( 2 * Math.PI * rotateDelta.x / element.clientHeight ); // yes, height
rotateUp( 2 * Math.PI * rotateDelta.y / element.clientHeight );
rotateStart.copy( rotateEnd );
scope.update();
}
function | ( event ) {
dollyEnd.set( event.clientX, event.clientY );
dollyDelta.subVectors( dollyEnd, dollyStart );
if ( dollyDelta.y > 0 ) {
dollyOut( getZoomScale() );
} else if ( dollyDelta.y < 0 ) {
dollyIn( getZoomScale() );
}
dollyStart.copy( dollyEnd );
scope.update();
}
function handleMouseMovePan( event ) {
panEnd.set( event.clientX, event.clientY );
panDelta.subVectors( panEnd, panStart ).multiplyScalar( scope.panSpeed );
pan( panDelta.x, panDelta.y );
panStart.copy( panEnd );
scope.update();
}
function handleMouseUp( /*event*/ ) {
// no-op
}
function handleMouseWheel( event ) {
if ( event.deltaY < 0 ) {
dollyIn( getZoomScale() );
} else if ( event.deltaY > 0 ) {
dollyOut( getZoomScale() );
}
scope.update();
}
function handleKeyDown( event ) {
var needsUpdate = false;
switch ( event.keyCode ) {
case scope.keys.UP:
pan( 0, scope.keyPanSpeed );
needsUpdate = true;
break;
case scope.keys.BOTTOM:
pan( 0, - scope.keyPanSpeed );
needsUpdate = true;
break;
case scope.keys.LEFT:
pan( scope.keyPanSpeed, 0 );
needsUpdate = true;
break;
case scope.keys.RIGHT:
pan( - scope.keyPanSpeed, 0 );
needsUpdate = true;
break;
}
if ( needsUpdate ) {
// prevent the browser from scrolling on cursor keys
event.preventDefault();
scope.update();
}
}
function handleTouchStartRotate( event ) {
if ( event.touches.length == 1 ) {
rotateStart.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY );
} else {
var x = 0.5 * ( event.touches[ 0 ].pageX + event.touches[ 1 ].pageX );
var y = 0.5 * ( event.touches[ 0 ].pageY + event.touches[ 1 ].pageY );
rotateStart.set( x, y );
}
}
function handleTouchStartPan( event ) {
if ( event.touches.length == 1 ) {
panStart.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY );
} else {
var x = 0.5 * ( event.touches[ 0 ].pageX + event.touches[ 1 ].pageX );
var y = 0.5 * ( event.touches[ 0 ].pageY + event.touches[ 1 ].pageY );
panStart.set( x, y );
}
}
function handleTouchStartDolly( event ) {
var dx = event.touches[ 0 ].pageX - event.touches[ 1 ].pageX;
var dy = event.touches[ 0 ].pageY - event.touches[ 1 ].pageY;
var distance = Math.sqrt( dx * dx + dy * dy );
dollyStart.set( 0, distance );
}
function handleTouchStartDollyPan( event ) {
if ( scope.enableZoom ) handleTouchStartDolly( event );
if ( scope.enablePan ) handleTouchStartPan( event );
}
function handleTouchStartDollyRotate( event ) {
if ( scope.enableZoom ) handleTouchStartDolly( event );
if ( scope.enableRotate ) handleTouchStartRotate( event );
}
function handleTouchMoveRotate( event ) {
if ( event.touches.length == 1 ) {
rotateEnd.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY );
} else {
var x = 0.5 * ( event.touches[ 0 ].pageX + event.touches[ 1 ].pageX );
var y = 0.5 * ( event.touches[ 0 ].pageY + event.touches[ 1 ].pageY );
rotateEnd.set( x, y );
}
rotateDelta.subVectors( rotateEnd, rotateStart ).multiplyScalar( scope.rotateSpeed );
var element = scope.domElement;
rotateLeft( 2 * Math.PI * rotateDelta.x / element.clientHeight ); // yes, height
rotateUp( 2 * Math.PI * rotateDelta.y / element.clientHeight );
rotateStart.copy( rotateEnd );
}
function handleTouchMovePan( event ) {
if ( event.touches.length == 1 ) {
panEnd.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY );
} else {
var x = 0.5 * ( event.touches[ 0 ].pageX + event.touches[ 1 ].pageX );
var y = 0.5 * ( event.touches[ 0 ].pageY + event.touches[ 1 ].pageY );
panEnd.set( x, y );
}
panDelta.subVectors( panEnd, panStart ).multiplyScalar( scope.panSpeed );
pan( panDelta.x, panDelta.y );
panStart.copy( panEnd );
}
function handleTouchMoveDolly( event ) {
var dx = event.touches[ 0 ].pageX - event.touches[ 1 ].pageX;
var dy = event.touches[ 0 ].pageY - event.touches[ 1 ].pageY;
var distance = Math.sqrt( dx * dx + dy * dy );
dollyEnd.set( 0, distance );
dollyDelta.set( 0, Math.pow( dollyEnd.y / dollyStart.y, scope.zoomSpeed ) );
dollyOut( dollyDelta.y );
dollyStart.copy( dollyEnd );
}
function handleTouchMoveDollyPan( event ) {
if ( scope.enableZoom ) handleTouchMoveDolly( event );
if ( scope.enablePan ) handleTouchMovePan( event );
}
function handleTouchMoveDollyRotate( event ) {
if ( scope.enableZoom ) handleTouchMoveDolly( event );
if ( scope.enableRotate ) handleTouchMoveRotate( event );
}
function handleTouchEnd( /*event*/ ) {
// no-op
}
//
// event handlers - FSM: listen for events and reset state
//
function onMouseDown( event ) {
if ( scope.enabled === false ) return;
// Prevent the browser from scrolling.
event.preventDefault();
// Manually set the focus since calling preventDefault above
// prevents the browser from setting it automatically.
scope.domElement.focus ? scope.domElement.focus() : window.focus();
var mouseAction;
switch ( event.button ) {
case 0:
mouseAction = scope.mouseButtons.LEFT;
break;
case 1:
mouseAction = scope.mouseButtons.MIDDLE;
break;
case 2:
mouseAction = scope.mouseButtons.RIGHT;
break;
default:
mouseAction = - 1;
}
switch ( mouseAction ) {
case MOUSE.DOLLY:
if ( scope.enableZoom === false ) return;
handleMouseDownDolly( event );
state = STATE.DOLLY;
break;
case MOUSE.ROTATE:
if ( event.ctrlKey || event.metaKey || event.shiftKey ) {
if ( scope.enablePan === false ) return;
handleMouseDownPan( event );
state = STATE.PAN;
} else {
if ( scope.enableRotate === false ) return;
handleMouseDownRotate( event );
state = STATE.ROTATE;
}
break;
case MOUSE.PAN:
if ( event.ctrlKey || event.metaKey || event.shiftKey ) {
if ( scope.enableRotate === false ) return;
handleMouseDownRotate( event );
state = STATE.ROTATE;
} else {
if ( scope.enablePan === false ) return;
handleMouseDownPan( event );
state = STATE.PAN;
}
break;
default:
state = STATE.NONE;
}
if ( state !== STATE.NONE ) {
document.addEventListener( 'mousemove', onMouseMove, false );
document.addEventListener( 'mouseup', onMouseUp, false );
scope.dispatchEvent( startEvent );
}
}
function onMouseMove( event ) {
if ( scope.enabled === false ) return;
event.preventDefault();
switch ( state ) {
case STATE.ROTATE:
if ( scope.enableRotate === false ) return;
handleMouseMoveRotate( event );
break;
case STATE.DOLLY:
if ( scope.enableZoom === false ) return;
handleMouseMoveDolly( event );
break;
case STATE.PAN:
if ( scope.enablePan === false ) return;
handleMouseMovePan( event );
break;
}
}
function onMouseUp( event ) {
if ( scope.enabled === false ) return;
handleMouseUp( event );
document.removeEventListener( 'mousemove', onMouseMove, false );
document.removeEventListener( 'mouseup', onMouseUp, false );
scope.dispatchEvent( endEvent );
state = STATE.NONE;
}
function onMouseWheel( event ) {
if ( scope.enabled === false || scope.enableZoom === false || ( state !== STATE.NONE && state !== STATE.ROTATE ) ) return;
event.preventDefault();
event.stopPropagation();
scope.dispatchEvent( startEvent );
handleMouseWheel( event );
scope.dispatchEvent( endEvent );
}
function onKeyDown( event ) {
if ( scope.enabled === false || scope.enableKeys === false || scope.enablePan === false ) return;
handleKeyDown( event );
}
function onTouchStart( event ) {
if ( scope.enabled === false ) return;
event.preventDefault(); // prevent scrolling
switch ( event.touches.length ) {
case 1:
switch ( scope.touches.ONE ) {
case TOUCH.ROTATE:
if ( scope.enableRotate === false ) return;
handleTouchStartRotate( event );
state = STATE.TOUCH_ROTATE;
break;
case TOUCH.PAN:
if ( scope.enablePan === false ) return;
handleTouchStartPan( event );
state = STATE.TOUCH_PAN;
break;
default:
state = STATE.NONE;
}
break;
case 2:
switch ( scope.touches.TWO ) {
case TOUCH.DOLLY_PAN:
if ( scope.enableZoom === false && scope.enablePan === false ) return;
handleTouchStartDollyPan( event );
state = STATE.TOUCH_DOLLY_PAN;
break;
case TOUCH.DOLLY_ROTATE:
if ( scope.enableZoom === false && scope.enableRotate === false ) return;
handleTouchStartDollyRotate( event );
state = STATE.TOUCH_DOLLY_ROTATE;
break;
default:
state = STATE.NONE;
}
break;
default:
state = STATE.NONE;
}
if ( state !== STATE.NONE ) {
scope.dispatchEvent( startEvent );
}
}
function onTouchMove( event ) {
if ( scope.enabled === false ) return;
event.preventDefault(); // prevent scrolling
event.stopPropagation();
switch ( state ) {
case STATE.TOUCH_ROTATE:
if ( scope.enableRotate === false ) return;
handleTouchMoveRotate( event );
scope.update();
break;
case STATE.TOUCH_PAN:
if ( scope.enablePan === false ) return;
handleTouchMovePan( event );
scope.update();
break;
case STATE.TOUCH_DOLLY_PAN:
if ( scope.enableZoom === false && scope.enablePan === false ) return;
handleTouchMoveDollyPan( event );
scope.update();
break;
case STATE.TOUCH_DOLLY_ROTATE:
if ( scope.enableZoom === false && scope.enableRotate === false ) return;
handleTouchMoveDollyRotate( event );
scope.update();
break;
default:
state = STATE.NONE;
}
}
function onTouchEnd( event ) {
if ( scope.enabled === false ) return;
handleTouchEnd( event );
scope.dispatchEvent( endEvent );
state = STATE.NONE;
}
function onContextMenu( event ) {
if ( scope.enabled === false ) return;
event.preventDefault();
}
//
scope.domElement.addEventListener( 'contextmenu', onContextMenu, false );
scope.domElement.addEventListener( 'mousedown', onMouseDown, false );
scope.domElement.addEventListener( 'wheel', onMouseWheel, false );
scope.domElement.addEventListener( 'touchstart', onTouchStart, false );
scope.domElement.addEventListener( 'touchend', onTouchEnd, false );
scope.domElement.addEventListener( 'touchmove', onTouchMove, false );
scope.domElement.addEventListener( 'keydown', onKeyDown, false );
// make sure element can receive keys.
if ( scope.domElement.tabIndex === - 1 ) {
scope.domElement.tabIndex = 0;
}
// force an update at start
this.update();
};
OrbitControls.prototype = Object.create( EventDispatcher.prototype );
OrbitControls.prototype.constructor = OrbitControls;
// This set of controls performs orbiting, dollying (zooming), and panning.
// Unlike TrackballControls, it maintains the "up" direction object.up (+Y by default).
// This is very similar to OrbitControls, another set of touch behavior
//
// Orbit - right mouse, or left mouse + ctrl/meta/shiftKey / touch: two-finger rotate
// Zoom - middle mouse, or mousewheel / touch: two-finger spread or squish
// Pan - left mouse, or arrow keys / touch: one-finger move
var MapControls = function ( object, domElement ) {
OrbitControls.call( this, object, domElement );
this.mouseButtons.LEFT = MOUSE.PAN;
this.mouseButtons.RIGHT = MOUSE.ROTATE;
this.touches.ONE = TOUCH.PAN;
this.touches.TWO = TOUCH.DOLLY_ROTATE;
};
MapControls.prototype = Object.create( EventDispatcher.prototype );
MapControls.prototype.constructor = MapControls;
export { OrbitControls, MapControls };
| handleMouseMoveDolly |
0001_load_initial_data.py | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "share-34244.botics.co"
site_params = {
"name": "SHARE",
}
if custom_domain:
site_params["domain"] = custom_domain
| Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
] | |
index.ts | import {SeleniumPluginConfig} from '../types';
import {IBrowserProxyPlugin, WindowFeaturesConfig} from '@testring/types';
import {ChildProcess} from 'child_process';
import {Config, BrowserObject, remote} from 'webdriverio';
import * as deepmerge from 'deepmerge';
import {spawn} from '@testring/child-process';
import {loggerClient} from '@testring/logger';
import {absoluteExtensionPath} from '@testring/devtool-extension';
import Cookie = WebdriverIO.Cookie;
import ClickOptions = WebdriverIO.ClickOptions;
// Stupidly needed thing for making our own requests
const _webdriverReq = require('webdriver/build/request');
const WebDriverRequest = _webdriverReq.default;
type BrowserObjectCustom = BrowserObject & {
deleteSessionId: (sessionId: string) => Promise<void>;
};
type browserClientItem = {
client: BrowserObjectCustom;
sessionId: string;
initTime: number;
};
const DEFAULT_CONFIG: SeleniumPluginConfig = {
recorderExtension: false,
clientCheckInterval: 5 * 1000,
clientTimeout: 15 * 60 * 1000,
port: 4444,
logLevel: 'warn',
capabilities: {
browserName: 'chrome',
'goog:chromeOptions': {
// for local ChromeDriver
args: [] as string[],
},
},
};
function delay(timeout) {
return new Promise<void>((resolve) => setTimeout(() => resolve(), timeout));
}
function stringifyWindowFeatures(windowFeatures: WindowFeaturesConfig) {
let result;
if (typeof windowFeatures === 'string') {
result = windowFeatures;
} else {
result = Object.keys(windowFeatures)
.map((key) => `${key}=${windowFeatures[key]}`)
.join(',');
}
return result;
}
export class SeleniumPlugin implements IBrowserProxyPlugin {
private logger = loggerClient.withPrefix('[selenium-browser-process]');
private clientCheckInterval: NodeJS.Timer;
private expiredBrowserClients: Set<string> = new Set();
private browserClients: Map<string, browserClientItem> = new Map();
private waitForReadyState: Promise<void> = Promise.resolve();
private localSelenium: ChildProcess;
private config: SeleniumPluginConfig;
private incrementWinId = 0;
constructor(config: Partial<SeleniumPluginConfig> = {}) {
this.config = this.createConfig(config);
if (this.config.host === undefined) {
this.runLocalSelenium();
}
this.initIntervals();
}
private getDevelopmentConfigAdditions(): Partial<SeleniumPluginConfig> {
return {
capabilities: {
'goog:chromeOptions': {
args: [`load-extension=${absoluteExtensionPath}`],
},
},
} as any;
}
private createConfig(
config: Partial<SeleniumPluginConfig>,
): SeleniumPluginConfig {
let mergedConfig = deepmerge.all<SeleniumPluginConfig>(
[DEFAULT_CONFIG, config],
{
clone: true,
},
);
if (mergedConfig.recorderExtension) {
mergedConfig = deepmerge.all<SeleniumPluginConfig>(
[mergedConfig, this.getDevelopmentConfigAdditions()],
{
customMerge: (mergeKey) => {
if (mergeKey === 'goog:chromeOptions') {
return (itemA, itemB) => {
if (!itemA.args || !itemB.args) {
return deepmerge(itemA, itemB);
}
const res: Record<string, any> = {};
res.args = deepmerge(itemA.args, itemB.args);
let ext: string[] = res.args.filter(
(argItem: string) =>
argItem.startsWith('load-extension'),
);
if (ext.length === 2) {
ext = [
`load-extension=${ext[0]
.split('=', 2)
.pop()},${ext[1]
.split('=', 2)
.pop()}`,
];
}
res.args = [
...ext,
...res.args.filter(
(argItem: string) =>
!argItem.startsWith(
'load-extension',
),
),
];
Object.keys(itemA).forEach((key) => {
if (key === 'args') {
return;
}
if (itemB[key] !== undefined) {
res[key] = deepmerge(
itemA[key],
itemB[key],
);
} else {
res[key] = itemA[key];
}
});
Object.keys(itemB).forEach((key) => {
if (
key === 'args' ||
res[key] !== undefined
) {
return;
}
res[key] = itemB[key];
});
return res;
};
}
},
},
);
}
if (!mergedConfig.hostname && mergedConfig.host) {
mergedConfig.hostname = mergedConfig.host;
}
return mergedConfig;
}
private initIntervals() {
if (this.config.clientCheckInterval > 0) {
this.clientCheckInterval = setInterval(
() => this.checkClientsTimeout(),
this.config.clientCheckInterval,
);
}
process.on('exit', () => {
clearInterval(this.clientCheckInterval);
this.stopAllSessions().catch((err) => {
this.logger.error('Clean process exit failed', err);
});
});
}
private stopAllSessions() {
const clientsRequests: Promise<any>[] = [];
for (const [applicant] of this.browserClients) {
this.logger.debug(
`Stopping sessions before process exit for applicant ${applicant}.`,
);
clientsRequests.push(
this.end(applicant).catch((err) => {
this.logger.error(
`Session stop before process exit error for applicant ${applicant}: \n`,
err,
);
}),
);
}
return Promise.all(clientsRequests);
}
private getChromeDriverArgs() {
let chromeDriverPath;
if (this.config.chromeDriverPath) {
chromeDriverPath = this.config.chromeDriverPath;
} else {
chromeDriverPath = require('chromedriver').path;
}
return [`-Dwebdriver.chrome.driver=${chromeDriverPath}`];
}
private async runLocalSelenium() {
const seleniumServer = require('selenium-server');
const seleniumJarPath = seleniumServer.path;
this.logger.debug('Init local selenium server');
try {
this.localSelenium = spawn('java', [
...this.getChromeDriverArgs(),
'-jar',
seleniumJarPath,
'-port',
this.config.port,
]);
this.waitForReadyState = new Promise((resolve, reject) => {
if (this.localSelenium.stderr) {
this.localSelenium.stderr.on('data', (data) => {
const message = data.toString();
this.logger.verbose(message);
if (message.includes('SeleniumServer.boot')) {
delay(500).then(resolve);
}
});
} else {
reject(new Error('There is no STDERR on selenium worker'));
}
});
} catch (err) {
this.logger.error('Local selenium server init failed', err);
}
}
private getApplicantSessionId(applicant): string | undefined {
const item = this.browserClients.get(applicant);
if (item) {
return item.sessionId;
}
}
private hasBrowserClient(applicant): boolean {
return this.browserClients.has(applicant);
}
private getBrowserClient(applicant): BrowserObjectCustom {
const item = this.browserClients.get(applicant);
if (item) {
return item.client;
}
throw new Error('Browser client is not found');
}
private async pingClients() {
for (const [applicant] of this.browserClients) {
try {
await this.execute(applicant, '(function () {})()', []);
} catch (e) {
/* ignore */
}
}
}
private async closeExpiredClients() {
const timeLimit = Date.now() - this.config.clientTimeout;
for (const [applicant, clientData] of this.browserClients) {
if (clientData.initTime < timeLimit) {
this.logger.warn(
`Session applicant ${applicant} marked as expired`,
);
try {
await this.end(applicant);
} catch (e) {
this.logger.error(
`Session applicant ${applicant} failed to stop`,
e,
);
}
this.expiredBrowserClients.add(applicant);
}
}
}
private async checkClientsTimeout() {
if (this.config.clientTimeout === 0) {
await this.pingClients();
} else {
await this.closeExpiredClients();
}
}
private async createClient(applicant: string): Promise<void> {
await this.waitForReadyState;
const clientData = this.browserClients.get(applicant);
if (clientData) {
this.browserClients.set(applicant, {
...clientData,
initTime: Date.now(),
});
return;
}
if (this.expiredBrowserClients.has(applicant)) {
throw Error(
`This session expired in ${this.config.clientTimeout}ms`,
);
}
const client = await remote(this.config);
let sessionId: string;
if (client.sessionId) {
sessionId = client.sessionId;
} else {
throw Error('Session can not be null');
}
const customClient = this.addCustromMethods(client);
this.browserClients.set(applicant, {
client: customClient,
sessionId,
initTime: Date.now(),
});
this.logger.debug(
`Started session for applicant: ${applicant}. Session id: ${sessionId}`,
);
}
protected addCustromMethods(client: BrowserObject): BrowserObjectCustom {
// Creating our delete selenium session to be able to close
// session if it's id is changed while we are running test
client.addCommand(
'deleteSessionId',
function (sessionId) {
const {
w3cCaps,
jsonwpCaps,
} = this.options.requestedCapabilities;
const sessionDeleteRequest = new WebDriverRequest(
'DELETE',
'/session/:sessionId',
{
capabilities: w3cCaps, // W3C compliant
desiredCapabilities: jsonwpCaps, // JSONWP compliant
},
);
return sessionDeleteRequest.makeRequest(
this.options,
sessionId,
);
},
false,
);
return client as BrowserObjectCustom;
}
public async end(applicant: string) {
await this.waitForReadyState;
if (!this.hasBrowserClient(applicant)) {
this.logger.warn(`No ${applicant} is registered`);
return;
}
const client = this.getBrowserClient(applicant);
try {
await this.alertDismiss(applicant);
} catch {
/* ignore */
}
const startingSessionID = this.getApplicantSessionId(applicant);
const sessionID = client.sessionId;
if (startingSessionID === sessionID) {
this.logger.debug(
`Stopping sessions for applicant ${applicant}. Session id: ${sessionID}`,
);
await client.deleteSession();
} else {
await this.logger.stepWarning(
`Stopping sessions for applicant warning ${applicant}. ` +
`Session ids are not equal, started with - ${startingSessionID}, ended with - ${sessionID}`,
async () => {
try {
if (startingSessionID) {
await client.deleteSessionId(startingSessionID);
}
} catch (err) {
this.logger.error(
`Old session ${startingSessionID} delete error`,
err,
);
}
try {
await client.deleteSession();
} catch (err) {
this.logger.error(
`New session ${client.sessionId} delete error`,
err,
);
}
},
);
}
this.browserClients.delete(applicant);
}
public async kill() {
this.logger.debug('Kill command is called');
for (const applicant of this.browserClients.keys()) {
try {
await this.end(applicant);
} catch (e) {
this.logger.error(e);
}
}
if (this.localSelenium) {
this.localSelenium.kill();
}
}
public async refresh(applicant: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.refresh();
}
public async click(
applicant: string,
selector: string,
options?: ClickOptions,
) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const element = await client.$(selector);
return element.click();
}
public async getSize(applicant: string, selector: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const element = await client.$(selector);
return element.getSize();
}
public async url(applicant: string, val: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
if (!val) {
return client.getUrl();
}
return client.url(val);
}
generateWinId() {
this.incrementWinId++;
return `window-${this.incrementWinId}`;
}
public async newWindow(
applicant: string,
val: string,
windowName: string,
windowFeatures: WindowFeaturesConfig = {},
) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const args = stringifyWindowFeatures(windowFeatures);
return client.newWindow(val, windowName || this.generateWinId(), args);
}
public async waitForExist(
applicant: string,
xpath: string,
timeout: number,
) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.waitForExist(timeout);
}
public async waitForVisible(
applicant: string,
xpath: string,
timeout: number,
) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.waitForDisplayed(timeout);
}
public async isVisible(applicant: string, xpath: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.isDisplayed();
}
public async moveToObject(
applicant: string,
xpath: string,
x: number,
y: number,
) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.moveTo(x || 0, y || 0);
}
public async execute(applicant: string, fn: any, args: Array<any>) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.execute(fn, ...args);
}
public async executeAsync(applicant: string, fn: any, args: Array<any>) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.executeAsync(fn, ...args);
}
public async getTitle(applicant: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.getTitle();
}
public async clearElement(applicant: string, xpath: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.clearValue();
}
public async keys(applicant: string, value: any) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.keys(value);
}
public async elementIdText(applicant: string, elementId: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.getElementText(elementId);
}
public async elements(applicant: string, xpath: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const elements = (await client.findElements('xpath', xpath)) as unknown;
return (elements as Array<Record<string, string>>).map((o) => {
const keys = Object.keys(o);
return {ELEMENT: o[keys[0]]};
});
}
public async frame(applicant: string, frameID: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.switchToFrame(frameID);
}
public async frameParent(applicant: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.switchToParentFrame();
}
public async getValue(applicant: string, xpath: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.getValue();
}
public async setValue(applicant: string, xpath: string, value: any) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.setValue(value);
}
public async selectByIndex(applicant: string, xpath: string, value: any) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.selectByIndex(value);
}
public async selectByValue(applicant: string, xpath: string, value: any) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.selectByAttribute('value', value);
}
public async selectByVisibleText(
applicant: string,
xpath: string,
str: string,
) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.selectByVisibleText(str);
}
public async getAttribute(
applicant: string,
xpath: string,
attr: string,
): Promise<any> {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.getAttribute(attr);
}
public async windowHandleMaximize(applicant: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.maximizeWindow();
}
public async isEnabled(applicant: string, xpath: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.isEnabled();
}
public async scroll(
applicant: string,
xpath: string,
x: number,
y: number,
) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const element = await client.$(xpath);
await element.scrollIntoView();
return element.moveTo(x, y);
}
public async scrollIntoView(
applicant: string,
xpath: string,
scrollIntoViewOptions?: boolean | null,
) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const element = await client.$(xpath);
await element.scrollIntoView(
scrollIntoViewOptions !== null ? scrollIntoViewOptions : undefined,
);
}
public async isAlertOpen(applicant: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.isAlertOpen();
}
public async alertAccept(applicant: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
if (await this.isAlertOpen(applicant)) {
return client.acceptAlert();
}
throw Error('There is no open alert');
}
public async alertDismiss(applicant: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
if (await this.isAlertOpen(applicant)) {
return client.dismissAlert();
}
throw Error('There is no open alert');
}
public async alertText(applicant: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
if (await this.isAlertOpen(applicant)) {
return client.getAlertText();
}
throw Error('There is no open alert');
}
public async dragAndDrop(
applicant: string,
xpathSource: string,
xpathDestination: string,
) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const sourceElement = await client.$(xpathSource);
const destinationElement = await client.$(xpathDestination);
return sourceElement.dragAndDrop(destinationElement);
}
public async setCookie(applicant: string, cookieObj: Cookie) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return await client.setCookies(cookieObj);
}
public async getCookie(applicant: string, cookieName?: string | null) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
if (cookieName) {
try {
const cookies = await client.getCookies([cookieName]);
return cookies[0]?.value;
} catch (e) {
return undefined;
}
}
return client.getAllCookies();
}
public async deleteCookie(applicant: string, cookieName?: string | null) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
if (cookieName) {
return client.deleteCookie(cookieName);
}
return client.deleteAllCookies();
}
public async getHTML(applicant: string, xpath: string, b: any) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.getHTML(b);
}
public async getCurrentTabId(applicant: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.getWindowHandle();
}
public async getTabIds(applicant: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.getWindowHandles();
}
// @deprecated
public async windowHandles(applicant: string) {
return this.getTabIds(applicant);
}
public async window(applicant: string, tabId: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.switchToWindow(tabId);
}
public async switchTab(applicant: string, tabId: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const result = await client.switchToWindow(tabId);
const body = await client.$('body');
await client.waitUntil(async () => body.isExisting(), 10000);
return result;
}
public async close(applicant: string, tabId: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const tabs = await this.getTabIds(applicant);
if (tabs.length === 1 && tabs[0] === tabId) {
return this.end(applicant);
}
await client.switchToWindow(tabId);
return client.closeWindow();
}
public async getTagName(applicant: string, xpath: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.getTagName();
}
public async isSelected(applicant: string, xpath: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.isSelected();
}
public async getText(applicant: string, xpath: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.getText();
}
public async elementIdSelected(applicant: string, id: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.isElementSelected(id);
}
public async makeScreenshot(applicant: string): Promise<string | void> {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.takeScreenshot();
}
public async uploadFile(
applicant: string,
filePath: string,
): Promise<string | void> {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.uploadFile(filePath);
}
public async getCssProperty(
applicant: string,
xpath: string,
cssProperty: string,
): Promise<any> {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const element = await client.$(xpath);
const property = await element.getCSSProperty(cssProperty);
return property.value;
}
public async getSource(applicant: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.getPageSource();
}
public async isExisting(applicant: string, xpath: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.isExisting();
}
public async waitForValue(
applicant: string,
xpath: string,
timeout: number,
reverse: boolean,
) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.waitUntil(async () => {
const elemValue = await (await client.$(xpath)).getValue();
return reverse ? !elemValue : !!elemValue;
}, timeout);
}
public async waitForSelected(
applicant: string,
xpath: string,
timeout: number,
reverse: boolean,
) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.waitUntil(async () => {
const isSelected = await (await client.$(xpath)).isSelected();
return reverse ? !isSelected : isSelected;
}, timeout);
}
public async waitUntil(
applicant: string,
condition: () => Promise<boolean>,
timeout?: number,
timeoutMsg?: string,
interval?: number,
) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
return client.waitUntil(condition, timeout, timeoutMsg, interval);
}
public async selectByAttribute(
applicant: string,
xpath: string,
attribute: string,
value: string,
) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const selector = await client.$(xpath);
return selector.selectByAttribute(attribute, value);
}
public async gridProxyDetails(applicant: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
if (this.localSelenium) {
return {
localSelenium: true,
};
}
return client.gridProxyDetails(client.sessionId);
}
public async gridTestSession(applicant: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
if (this.localSelenium) {
return {
localSelenium: true,
};
}
return client.gridTestSession(client.sessionId);
}
public async getGridNodeDetails(applicant: string) {
await this.createClient(applicant);
const client = this.getBrowserClient(applicant);
const testSession = await this.gridTestSession(applicant);
if (!testSession.localSelenium) {
const proxyDetails = await client.gridProxyDetails(applicant);
delete testSession.msg;
delete testSession.success;
delete proxyDetails.msg;
delete proxyDetails.success;
delete proxyDetails.id;
return {...testSession, ...proxyDetails};
}
return testSession;
}
}
export default function | (config: Config) {
return new SeleniumPlugin(config);
}
| seleniumProxy |
expr-alt.rs |
// -*- rust -*-
// Tests for using match as an expression
fn test_basic() {
let mut rs: bool = match true { true => { true } false => { false } };
assert (rs);
rs = match false { true => { false } false => { true } };
assert (rs);
}
fn test_inferrence() {
let mut rs = match true { true => { true } false => { false } };
assert (rs);
}
fn | () {
// Yeah, this is kind of confusing ...
let rs =
match match false { true => { true } false => { false } } {
true => { false }
false => { true }
};
assert (rs);
}
fn test_alt_as_block_result() {
let rs =
match false {
true => { false }
false => { match true { true => { true } false => { false } } }
};
assert (rs);
}
fn main() {
test_basic();
test_inferrence();
test_alt_as_alt_head();
test_alt_as_block_result();
}
| test_alt_as_alt_head |
init_container.go | /*
Copyright The Stash Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbac
import (
"context"
"strings"
"stash.appscode.dev/apimachinery/apis"
api "stash.appscode.dev/apimachinery/apis/stash/v1alpha1"
api_v1beta1 "stash.appscode.dev/apimachinery/apis/stash/v1beta1"
"github.com/appscode/go/log"
core "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
kerr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
core_util "kmodules.xyz/client-go/core/v1"
meta_util "kmodules.xyz/client-go/meta"
rbac_util "kmodules.xyz/client-go/rbac/v1"
wapi "kmodules.xyz/webhook-runtime/apis/workload/v1"
)
func EnsureRestoreInitContainerRBAC(kubeClient kubernetes.Interface, owner *metav1.OwnerReference, namespace, sa string, labels map[string]string) error |
func ensureRestoreInitContainerClusterRole(kc kubernetes.Interface, labels map[string]string) error {
meta := metav1.ObjectMeta{
Name: apis.StashRestoreInitContainerClusterRole,
Labels: labels,
}
_, _, err := rbac_util.CreateOrPatchClusterRole(context.TODO(), kc, meta, func(in *rbac.ClusterRole) *rbac.ClusterRole {
in.Rules = []rbac.PolicyRule{
{
APIGroups: []string{api_v1beta1.SchemeGroupVersion.Group},
Resources: []string{"*"},
Verbs: []string{"*"},
},
{
APIGroups: []string{api.SchemeGroupVersion.Group},
Resources: []string{"*"},
Verbs: []string{"*"},
},
{
APIGroups: []string{core.GroupName},
Resources: []string{"configmaps"},
Verbs: []string{"create", "update", "get"},
},
{
APIGroups: []string{core.GroupName},
Resources: []string{"pods"},
Verbs: []string{"get"},
},
{
APIGroups: []string{core.GroupName},
Resources: []string{"pods/exec"},
Verbs: []string{"get", "create"},
},
{
APIGroups: []string{core.GroupName},
Resources: []string{"events"},
Verbs: []string{"create"},
},
}
return in
}, metav1.PatchOptions{})
return err
}
func ensureRestoreInitContainerRoleBinding(kc kubernetes.Interface, owner *metav1.OwnerReference, namespace, sa string, labels map[string]string) error {
meta := metav1.ObjectMeta{
Namespace: namespace,
Name: getRestoreInitContainerRoleBindingName(owner.Kind, owner.Name),
Labels: labels,
}
_, _, err := rbac_util.CreateOrPatchRoleBinding(context.TODO(), kc, meta, func(in *rbac.RoleBinding) *rbac.RoleBinding {
core_util.EnsureOwnerReference(&in.ObjectMeta, owner)
if in.Annotations == nil {
in.Annotations = map[string]string{}
}
in.RoleRef = rbac.RoleRef{
APIGroup: rbac.GroupName,
Kind: apis.KindClusterRole,
Name: apis.StashRestoreInitContainerClusterRole,
}
in.Subjects = []rbac.Subject{
{
Kind: rbac.ServiceAccountKind,
Name: sa,
Namespace: namespace,
},
}
return in
}, metav1.PatchOptions{})
return err
}
func getRestoreInitContainerRoleBindingName(kind, name string) string {
return meta_util.ValidNameWithPefixNSuffix(apis.StashRestoreInitContainerClusterRole, strings.ToLower(kind), name)
}
func ensureRestoreInitContainerRoleBindingDeleted(kubeClient kubernetes.Interface, w *wapi.Workload) error {
err := kubeClient.RbacV1().RoleBindings(w.Namespace).Delete(
context.TODO(),
getRestoreInitContainerRoleBindingName(w.Kind, w.Name),
metav1.DeleteOptions{})
if err != nil && !kerr.IsNotFound(err) {
return err
}
if err == nil {
log.Infof("RoleBinding %s/%s has been deleted", w.Namespace, getRestoreInitContainerRoleBindingName(w.Kind, w.Name))
}
return nil
}
| {
// ensure ClusterRole for restore init container
err := ensureRestoreInitContainerClusterRole(kubeClient, labels)
if err != nil {
return err
}
// ensure RoleBinding for restore init container
err = ensureRestoreInitContainerRoleBinding(kubeClient, owner, namespace, sa, labels)
if err != nil {
return err
}
return nil
} |
MotorModule.py | from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_StepperMotor
import time
import atexit
MH = Adafruit_MotorHAT(0x60)
STEPRES = 1.8 # Step resulition in units of degree/step
DIRECTIONS = { "ccw":Adafruit_MotorHAT.FORWARD,
"cw":Adafruit_MotorHAT.BACKWARD}
STEPTYPES = { "single":Adafruit_MotorHAT.SINGLE,
"double":Adafruit_MotorHAT.DOUBLE,
"interleave":Adafruit_MotorHAT.INTERLEAVE,
"micro":Adafruit_MotorHAT.MICROSTEP}
MOTORS = {"horizontal":1,"vertical":2}
class MotorController:
def __init__(self,motor,steps = 200,addr = 0x60):
motorPort = MOTORS[motor]
self.motorPort = motorPort
self.steps = steps
self.hatAddress = addr
global MH
MH = Adafruit_MotorHAT(addr)
self.stepperMotor = MH.getStepper(steps, motorPort)
self.stepperMotor.setSpeed(180)
def rotateMotor(self,degree,dir = "cw",step = "single"):
"""
Rotate motor for a certain degree from where it is located
at in a specified direction.
Inputs: degree - Degrees to rotate
dir - cw or ccw rotation
step - Type of step motor should make for
rotation. By default it is set to 'double';
which provides the highest torque that
the motor is able to provide.
Other types types of steps include
'single', 'interleave', and 'microstep'.
"""
# print("ROTATING MOTOR")
x = 0
if step == "interleave":
x = int(degree/STEPRES)*2
else:
x = int(degree/STEPRES)
self.stepperMotor.step(x,DIRECTIONS[dir],STEPTYPES[step])
def | ():
"""
Turn off all motors
"""
MH.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
MH.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
MH.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
MH.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
# recommended for auto-disabling motors on shutdown!
atexit.register(turnOffMotors)
if __name__ == '__main__':
m = MotorController(motor="vertical")
m.rotateMotor(degree=1000,step="double")
# m.rotateMotor(degree=360,step="double",dir="ccw")
| turnOffMotors |
icon_text_format.rs |
pub struct IconTextFormat {
props: crate::Props,
}
impl yew::Component for IconTextFormat {
type Properties = crate::Props;
type Message = ();
fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self
{
Self { props }
}
fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender
|
fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender
{
false
}
fn view(&self) -> yew::prelude::Html
{
yew::prelude::html! {
<svg
class=self.props.class.unwrap_or("")
width=self.props.size.unwrap_or(24).to_string()
height=self.props.size.unwrap_or(24).to_string()
viewBox="0 0 24 24"
fill=self.props.fill.unwrap_or("none")
stroke=self.props.color.unwrap_or("currentColor")
stroke-width=self.props.stroke_width.unwrap_or(2).to_string()
stroke-linecap=self.props.stroke_linecap.unwrap_or("round")
stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round")
>
<svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0z" fill="none"/><path d="M5 17v2h14v-2H5zm4.5-4.2h5l.9 2.2h2.1L12.75 4h-1.5L6.5 15h2.1l.9-2.2zM12 5.98L13.87 11h-3.74L12 5.98z"/></svg>
</svg>
}
}
}
| {
true
} |
unboxed-closures-move-mutable.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(unboxed_closures)]
#![deny(unused_mut)]
// Test that mutating a mutable upvar in a capture-by-value unboxed
// closure does not ice (issue #18238) and marks the upvar as used
// mutably so we do not get a spurious warning about it not needing to
// be declared mutable (issue #18336).
fn main() | {
{
let mut x = 0u;
move |&mut:| x += 1;
}
{
let mut x = 0u;
move |:| x += 1;
}
} |
|
traits.rs | // Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ops::{Mul, Add};
use num::Wrapping;
/// Conversion from an `Iterator`.
///
/// By implementing `FromIterator` for a type, you define how it will be
/// created from an iterator. This is common for types which describe a
/// collection of some kind.
///
/// `FromIterator`'s [`from_iter`] is rarely called explicitly, and is instead
/// used through [`Iterator`]'s [`collect`] method. See [`collect`]'s
/// documentation for more examples.
///
/// [`from_iter`]: #tymethod.from_iter
/// [`Iterator`]: trait.Iterator.html
/// [`collect`]: trait.Iterator.html#method.collect
///
/// See also: [`IntoIterator`].
///
/// [`IntoIterator`]: trait.IntoIterator.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::iter::FromIterator;
///
/// let five_fives = std::iter::repeat(5).take(5);
///
/// let v = Vec::from_iter(five_fives);
///
/// assert_eq!(v, vec![5, 5, 5, 5, 5]);
/// ```
///
/// Using [`collect`] to implicitly use `FromIterator`:
///
/// ```
/// let five_fives = std::iter::repeat(5).take(5);
///
/// let v: Vec<i32> = five_fives.collect();
///
/// assert_eq!(v, vec![5, 5, 5, 5, 5]);
/// ```
///
/// Implementing `FromIterator` for your type:
///
/// ```
/// use std::iter::FromIterator;
///
/// // A sample collection, that's just a wrapper over Vec<T>
/// #[derive(Debug)]
/// struct MyCollection(Vec<i32>);
///
/// // Let's give it some methods so we can create one and add things
/// // to it.
/// impl MyCollection {
/// fn new() -> MyCollection {
/// MyCollection(Vec::new())
/// }
///
/// fn add(&mut self, elem: i32) {
/// self.0.push(elem);
/// }
/// }
///
/// // and we'll implement FromIterator
/// impl FromIterator<i32> for MyCollection {
/// fn from_iter<I: IntoIterator<Item=i32>>(iter: I) -> Self {
/// let mut c = MyCollection::new();
///
/// for i in iter {
/// c.add(i);
/// }
///
/// c
/// }
/// }
///
/// // Now we can make a new iterator...
/// let iter = (0..5).into_iter();
///
/// // ... and make a MyCollection out of it
/// let c = MyCollection::from_iter(iter);
///
/// assert_eq!(c.0, vec![0, 1, 2, 3, 4]);
///
/// // collect works too!
///
/// let iter = (0..5).into_iter();
/// let c: MyCollection = iter.collect();
///
/// assert_eq!(c.0, vec![0, 1, 2, 3, 4]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented="a collection of type `{Self}` cannot be \
built from an iterator over elements of type `{A}`"]
pub trait FromIterator<A>: Sized {
/// Creates a value from an iterator.
///
/// See the [module-level documentation] for more.
///
/// [module-level documentation]: trait.FromIterator.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::iter::FromIterator;
///
/// let five_fives = std::iter::repeat(5).take(5);
///
/// let v = Vec::from_iter(five_fives);
///
/// assert_eq!(v, vec![5, 5, 5, 5, 5]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn from_iter<T: IntoIterator<Item=A>>(iter: T) -> Self;
}
/// Conversion into an `Iterator`.
///
/// By implementing `IntoIterator` for a type, you define how it will be
/// converted to an iterator. This is common for types which describe a
/// collection of some kind.
///
/// One benefit of implementing `IntoIterator` is that your type will [work
/// with Rust's `for` loop syntax](index.html#for-loops-and-intoiterator).
///
/// See also: [`FromIterator`].
///
/// [`FromIterator`]: trait.FromIterator.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let v = vec![1, 2, 3];
///
/// let mut iter = v.into_iter();
///
/// let n = iter.next();
/// assert_eq!(Some(1), n);
///
/// let n = iter.next();
/// assert_eq!(Some(2), n);
///
/// let n = iter.next();
/// assert_eq!(Some(3), n);
///
/// let n = iter.next();
/// assert_eq!(None, n);
/// ```
///
/// Implementing `IntoIterator` for your type:
///
/// ```
/// // A sample collection, that's just a wrapper over Vec<T>
/// #[derive(Debug)]
/// struct MyCollection(Vec<i32>);
///
/// // Let's give it some methods so we can create one and add things
/// // to it.
/// impl MyCollection {
/// fn new() -> MyCollection {
/// MyCollection(Vec::new())
/// }
///
/// fn add(&mut self, elem: i32) {
/// self.0.push(elem);
/// }
/// }
///
/// // and we'll implement IntoIterator
/// impl IntoIterator for MyCollection {
/// type Item = i32;
/// type IntoIter = ::std::vec::IntoIter<i32>;
///
/// fn into_iter(self) -> Self::IntoIter {
/// self.0.into_iter()
/// }
/// }
///
/// // Now we can make a new collection...
/// let mut c = MyCollection::new();
///
/// // ... add some stuff to it ...
/// c.add(0);
/// c.add(1);
/// c.add(2);
///
/// // ... and then turn it into an Iterator:
/// for (i, n) in c.into_iter().enumerate() {
/// assert_eq!(i as i32, n);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait IntoIterator {
/// The type of the elements being iterated over.
#[stable(feature = "rust1", since = "1.0.0")]
type Item;
/// Which kind of iterator are we turning this into?
#[stable(feature = "rust1", since = "1.0.0")]
type IntoIter: Iterator<Item=Self::Item>;
/// Creates an iterator from a value.
///
/// See the [module-level documentation] for more.
///
/// [module-level documentation]: trait.IntoIterator.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let v = vec![1, 2, 3];
///
/// let mut iter = v.into_iter();
///
/// let n = iter.next();
/// assert_eq!(Some(1), n);
///
/// let n = iter.next();
/// assert_eq!(Some(2), n);
///
/// let n = iter.next();
/// assert_eq!(Some(3), n);
///
/// let n = iter.next();
/// assert_eq!(None, n);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn into_iter(self) -> Self::IntoIter;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: Iterator> IntoIterator for I {
type Item = I::Item;
type IntoIter = I;
fn into_iter(self) -> I {
self
}
}
/// Extend a collection with the contents of an iterator.
///
/// Iterators produce a series of values, and collections can also be thought
/// of as a series of values. The `Extend` trait bridges this gap, allowing you
/// to extend a collection by including the contents of that iterator. When
/// extending a collection with an already existing key, that entry is updated
/// or, in the case of collections that permit multiple entries with equal
/// keys, that entry is inserted.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // You can extend a String with some chars:
/// let mut message = String::from("The first three letters are: ");
///
/// message.extend(&['a', 'b', 'c']);
///
/// assert_eq!("abc", &message[29..32]);
/// ```
///
/// Implementing `Extend`:
///
/// ```
/// // A sample collection, that's just a wrapper over Vec<T>
/// #[derive(Debug)]
/// struct MyCollection(Vec<i32>);
///
/// // Let's give it some methods so we can create one and add things
/// // to it.
/// impl MyCollection {
/// fn new() -> MyCollection {
/// MyCollection(Vec::new())
/// }
///
/// fn add(&mut self, elem: i32) {
/// self.0.push(elem);
/// }
/// }
///
/// // since MyCollection has a list of i32s, we implement Extend for i32
/// impl Extend<i32> for MyCollection {
///
/// // This is a bit simpler with the concrete type signature: we can call
/// // extend on anything which can be turned into an Iterator which gives
/// // us i32s. Because we need i32s to put into MyCollection.
/// fn extend<T: IntoIterator<Item=i32>>(&mut self, iter: T) {
///
/// // The implementation is very straightforward: loop through the
/// // iterator, and add() each element to ourselves.
/// for elem in iter {
/// self.add(elem);
/// }
/// }
/// }
///
/// let mut c = MyCollection::new();
///
/// c.add(5);
/// c.add(6);
/// c.add(7);
///
/// // let's extend our collection with three more numbers
/// c.extend(vec![1, 2, 3]);
///
/// // we've added these elements onto the end
/// assert_eq!("MyCollection([5, 6, 7, 1, 2, 3])", format!("{:?}", c));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Extend<A> {
/// Extends a collection with the contents of an iterator.
///
/// As this is the only method for this trait, the [trait-level] docs
/// contain more details.
///
/// [trait-level]: trait.Extend.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // You can extend a String with some chars:
/// let mut message = String::from("abc");
///
/// message.extend(['d', 'e', 'f'].iter());
///
/// assert_eq!("abcdef", &message);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn extend<T: IntoIterator<Item=A>>(&mut self, iter: T);
}
/// An iterator able to yield elements from both ends.
///
/// Something that implements `DoubleEndedIterator` has one extra capability
/// over something that implements [`Iterator`]: the ability to also take
/// `Item`s from the back, as well as the front.
///
/// It is important to note that both back and forth work on the same range,
/// and do not cross: iteration is over when they meet in the middle.
///
/// In a similar fashion to the [`Iterator`] protocol, once a
/// `DoubleEndedIterator` returns `None` from a `next_back()`, calling it again
/// may or may not ever return `Some` again. `next()` and `next_back()` are
/// interchangable for this purpose.
///
/// [`Iterator`]: trait.Iterator.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let numbers = vec![1, 2, 3, 4, 5, 6];
///
/// let mut iter = numbers.iter();
///
/// assert_eq!(Some(&1), iter.next());
/// assert_eq!(Some(&6), iter.next_back());
/// assert_eq!(Some(&5), iter.next_back());
/// assert_eq!(Some(&2), iter.next());
/// assert_eq!(Some(&3), iter.next());
/// assert_eq!(Some(&4), iter.next());
/// assert_eq!(None, iter.next());
/// assert_eq!(None, iter.next_back());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait DoubleEndedIterator: Iterator {
/// Removes and returns an element from the end of the iterator.
///
/// Returns `None` when there are no more elements.
///
/// The [trait-level] docs contain more details.
///
/// [trait-level]: trait.DoubleEndedIterator.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let numbers = vec![1, 2, 3, 4, 5, 6];
///
/// let mut iter = numbers.iter();
///
/// assert_eq!(Some(&1), iter.next());
/// assert_eq!(Some(&6), iter.next_back());
/// assert_eq!(Some(&5), iter.next_back());
/// assert_eq!(Some(&2), iter.next());
/// assert_eq!(Some(&3), iter.next());
/// assert_eq!(Some(&4), iter.next());
/// assert_eq!(None, iter.next());
/// assert_eq!(None, iter.next_back());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn next_back(&mut self) -> Option<Self::Item>;
/// Searches for an element of an iterator from the right that satisfies a predicate.
///
/// `rfind()` takes a closure that returns `true` or `false`. It applies
/// this closure to each element of the iterator, starting at the end, and if any
/// of them return `true`, then `rfind()` returns [`Some(element)`]. If they all return
/// `false`, it returns [`None`].
///
/// `rfind()` is short-circuiting; in other words, it will stop processing
/// as soon as the closure returns `true`.
///
/// Because `rfind()` takes a reference, and many iterators iterate over
/// references, this leads to a possibly confusing situation where the
/// argument is a double reference. You can see this effect in the
/// examples below, with `&&x`.
///
/// [`Some(element)`]: ../../std/option/enum.Option.html#variant.Some
/// [`None`]: ../../std/option/enum.Option.html#variant.None
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// #![feature(iter_rfind)]
///
/// let a = [1, 2, 3];
///
/// assert_eq!(a.iter().rfind(|&&x| x == 2), Some(&2));
///
/// assert_eq!(a.iter().rfind(|&&x| x == 5), None);
/// ```
///
/// Stopping at the first `true`:
///
/// ```
/// #![feature(iter_rfind)]
///
/// let a = [1, 2, 3];
///
/// let mut iter = a.iter();
///
/// assert_eq!(iter.rfind(|&&x| x == 2), Some(&2));
///
/// // we can still use `iter`, as there are more elements.
/// assert_eq!(iter.next_back(), Some(&1));
/// ```
#[inline]
#[unstable(feature = "iter_rfind", issue = "39480")]
fn rfind<P>(&mut self, mut predicate: P) -> Option<Self::Item> where
Self: Sized,
P: FnMut(&Self::Item) -> bool
{
while let Some(x) = self.next_back() {
if predicate(&x) { return Some(x) }
}
None
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for &'a mut I {
fn next_back(&mut self) -> Option<I::Item> { (**self).next_back() }
}
/// An iterator that knows its exact length.
///
/// Many [`Iterator`]s don't know how many times they will iterate, but some do.
/// If an iterator knows how many times it can iterate, providing access to
/// that information can be useful. For example, if you want to iterate
/// backwards, a good start is to know where the end is.
///
/// When implementing an `ExactSizeIterator`, You must also implement
/// [`Iterator`]. When doing so, the implementation of [`size_hint`] *must*
/// return the exact size of the iterator.
///
/// [`Iterator`]: trait.Iterator.html
/// [`size_hint`]: trait.Iterator.html#method.size_hint
///
/// The [`len`] method has a default implementation, so you usually shouldn't
/// implement it. However, you may be able to provide a more performant
/// implementation than the default, so overriding it in this case makes sense.
///
/// [`len`]: #method.len
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // a finite range knows exactly how many times it will iterate
/// let five = 0..5;
///
/// assert_eq!(5, five.len());
/// ```
///
/// In the [module level docs][moddocs], we implemented an [`Iterator`],
/// `Counter`. Let's implement `ExactSizeIterator` for it as well:
///
/// [moddocs]: index.html
///
/// ```
/// # struct Counter {
/// # count: usize,
/// # }
/// # impl Counter {
/// # fn new() -> Counter {
/// # Counter { count: 0 }
/// # }
/// # }
/// # impl Iterator for Counter {
/// # type Item = usize;
/// # fn next(&mut self) -> Option<usize> {
/// # self.count += 1;
/// # if self.count < 6 {
/// # Some(self.count)
/// # } else {
/// # None
/// # }
/// # }
/// # }
/// impl ExactSizeIterator for Counter {
/// // We can easily calculate the remaining number of iterations.
/// fn len(&self) -> usize {
/// 5 - self.count
/// }
/// }
///
/// // And now we can use it!
///
/// let counter = Counter::new();
///
/// assert_eq!(5, counter.len());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait ExactSizeIterator: Iterator {
/// Returns the exact number of times the iterator will iterate.
///
/// This method has a default implementation, so you usually should not
/// implement it directly. However, if you can provide a more efficient
/// implementation, you can do so. See the [trait-level] docs for an
/// example.
///
/// This function has the same safety guarantees as the [`size_hint`]
/// function.
///
/// [trait-level]: trait.ExactSizeIterator.html
/// [`size_hint`]: trait.Iterator.html#method.size_hint
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // a finite range knows exactly how many times it will iterate
/// let five = 0..5;
///
/// assert_eq!(5, five.len());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn len(&self) -> usize {
let (lower, upper) = self.size_hint();
// Note: This assertion is overly defensive, but it checks the invariant
// guaranteed by the trait. If this trait were rust-internal,
// we could use debug_assert!; assert_eq! will check all Rust user
// implementations too.
assert_eq!(upper, Some(lower));
lower
}
/// Returns whether the iterator is empty.
///
/// This method has a default implementation using `self.len()`, so you
/// don't need to implement it yourself.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// #![feature(exact_size_is_empty)]
///
/// let mut one_element = 0..1;
/// assert!(!one_element.is_empty());
///
/// assert_eq!(one_element.next(), Some(0));
/// assert!(one_element.is_empty());
///
/// assert_eq!(one_element.next(), None);
/// ```
#[inline]
#[unstable(feature = "exact_size_is_empty", issue = "35428")]
fn is_empty(&self) -> bool {
self.len() == 0
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, I: ExactSizeIterator + ?Sized> ExactSizeIterator for &'a mut I {
fn len(&self) -> usize {
(**self).len()
}
fn is_empty(&self) -> bool {
(**self).is_empty()
}
}
/// Trait to represent types that can be created by summing up an iterator.
///
/// This trait is used to implement the [`sum`] method on iterators. Types which
/// implement the trait can be generated by the [`sum`] method. Like
/// [`FromIterator`] this trait should rarely be called directly and instead
/// interacted with through [`Iterator::sum`].
///
/// [`sum`]: ../../std/iter/trait.Sum.html#tymethod.sum
/// [`FromIterator`]: ../../std/iter/trait.FromIterator.html
/// [`Iterator::sum`]: ../../std/iter/trait.Iterator.html#method.sum
#[stable(feature = "iter_arith_traits", since = "1.12.0")]
pub trait Sum<A = Self>: Sized {
/// Method which takes an iterator and generates `Self` from the elements by
/// "summing up" the items.
#[stable(feature = "iter_arith_traits", since = "1.12.0")]
fn sum<I: Iterator<Item=A>>(iter: I) -> Self;
}
/// Trait to represent types that can be created by multiplying elements of an
/// iterator.
///
/// This trait is used to implement the [`product`] method on iterators. Types
/// which implement the trait can be generated by the [`product`] method. Like
/// [`FromIterator`] this trait should rarely be called directly and instead
/// interacted with through [`Iterator::product`].
///
/// [`product`]: ../../std/iter/trait.Product.html#tymethod.product
/// [`FromIterator`]: ../../std/iter/trait.FromIterator.html
/// [`Iterator::product`]: ../../std/iter/trait.Iterator.html#method.product
#[stable(feature = "iter_arith_traits", since = "1.12.0")]
pub trait Product<A = Self>: Sized {
/// Method which takes an iterator and generates `Self` from the elements by
/// multiplying the items.
#[stable(feature = "iter_arith_traits", since = "1.12.0")]
fn product<I: Iterator<Item=A>>(iter: I) -> Self;
}
// NB: explicitly use Add and Mul here to inherit overflow checks
macro_rules! integer_sum_product {
(@impls $zero:expr, $one:expr, #[$attr:meta], $($a:ty)*) => ($(
#[$attr]
impl Sum for $a {
fn sum<I: Iterator<Item=$a>>(iter: I) -> $a {
iter.fold($zero, Add::add)
}
}
#[$attr]
impl Product for $a {
fn product<I: Iterator<Item=$a>>(iter: I) -> $a {
iter.fold($one, Mul::mul)
}
}
#[$attr]
impl<'a> Sum<&'a $a> for $a {
fn sum<I: Iterator<Item=&'a $a>>(iter: I) -> $a {
iter.fold($zero, Add::add)
}
}
#[$attr]
impl<'a> Product<&'a $a> for $a {
fn product<I: Iterator<Item=&'a $a>>(iter: I) -> $a {
iter.fold($one, Mul::mul)
}
}
)*);
($($a:ty)*) => (
integer_sum_product!(@impls 0, 1,
#[stable(feature = "iter_arith_traits", since = "1.12.0")],
$($a)+);
integer_sum_product!(@impls Wrapping(0), Wrapping(1),
#[stable(feature = "wrapping_iter_arith", since = "1.14.0")],
$(Wrapping<$a>)+);
);
}
| fn sum<I: Iterator<Item=$a>>(iter: I) -> $a {
iter.fold(0.0, |a, b| a + b)
}
}
#[stable(feature = "iter_arith_traits", since = "1.12.0")]
impl Product for $a {
fn product<I: Iterator<Item=$a>>(iter: I) -> $a {
iter.fold(1.0, |a, b| a * b)
}
}
#[stable(feature = "iter_arith_traits", since = "1.12.0")]
impl<'a> Sum<&'a $a> for $a {
fn sum<I: Iterator<Item=&'a $a>>(iter: I) -> $a {
iter.fold(0.0, |a, b| a + *b)
}
}
#[stable(feature = "iter_arith_traits", since = "1.12.0")]
impl<'a> Product<&'a $a> for $a {
fn product<I: Iterator<Item=&'a $a>>(iter: I) -> $a {
iter.fold(1.0, |a, b| a * *b)
}
}
)*)
}
integer_sum_product! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize }
float_sum_product! { f32 f64 }
/// An iterator adapter that produces output as long as the underlying
/// iterator produces `Result::Ok` values.
///
/// If an error is encountered, the iterator stops and the error is
/// stored. The error may be recovered later via `reconstruct`.
struct ResultShunt<I, E> {
iter: I,
error: Option<E>,
}
impl<I, T, E> ResultShunt<I, E>
where I: Iterator<Item = Result<T, E>>
{
/// Process the given iterator as if it yielded a `T` instead of a
/// `Result<T, _>`. Any errors will stop the inner iterator and
/// the overall result will be an error.
pub fn process<F, U>(iter: I, mut f: F) -> Result<U, E>
where F: FnMut(&mut Self) -> U
{
let mut shunt = ResultShunt::new(iter);
let value = f(shunt.by_ref());
shunt.reconstruct(value)
}
fn new(iter: I) -> Self {
ResultShunt {
iter: iter,
error: None,
}
}
/// Consume the adapter and rebuild a `Result` value. This should
/// *always* be called, otherwise any potential error would be
/// lost.
fn reconstruct<U>(self, val: U) -> Result<U, E> {
match self.error {
None => Ok(val),
Some(e) => Err(e),
}
}
}
impl<I, T, E> Iterator for ResultShunt<I, E>
where I: Iterator<Item = Result<T, E>>
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
match self.iter.next() {
Some(Ok(v)) => Some(v),
Some(Err(e)) => {
self.error = Some(e);
None
}
None => None,
}
}
}
#[stable(feature = "iter_arith_traits_result", since="1.16.0")]
impl<T, U, E> Sum<Result<U, E>> for Result<T, E>
where T: Sum<U>,
{
fn sum<I>(iter: I) -> Result<T, E>
where I: Iterator<Item = Result<U, E>>,
{
ResultShunt::process(iter, |i| i.sum())
}
}
#[stable(feature = "iter_arith_traits_result", since="1.16.0")]
impl<T, U, E> Product<Result<U, E>> for Result<T, E>
where T: Product<U>,
{
fn product<I>(iter: I) -> Result<T, E>
where I: Iterator<Item = Result<U, E>>,
{
ResultShunt::process(iter, |i| i.product())
}
}
/// An iterator that always continues to yield `None` when exhausted.
///
/// Calling next on a fused iterator that has returned `None` once is guaranteed
/// to return [`None`] again. This trait is should be implemented by all iterators
/// that behave this way because it allows for some significant optimizations.
///
/// Note: In general, you should not use `FusedIterator` in generic bounds if
/// you need a fused iterator. Instead, you should just call [`Iterator::fuse`]
/// on the iterator. If the iterator is already fused, the additional [`Fuse`]
/// wrapper will be a no-op with no performance penalty.
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [`Iterator::fuse`]: ../../std/iter/trait.Iterator.html#method.fuse
/// [`Fuse`]: ../../std/iter/struct.Fuse.html
#[unstable(feature = "fused", issue = "35602")]
pub trait FusedIterator: Iterator {}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, I: FusedIterator + ?Sized> FusedIterator for &'a mut I {}
/// An iterator that reports an accurate length using size_hint.
///
/// The iterator reports a size hint where it is either exact
/// (lower bound is equal to upper bound), or the upper bound is [`None`].
/// The upper bound must only be [`None`] if the actual iterator length is
/// larger than [`usize::MAX`].
///
/// The iterator must produce exactly the number of elements it reported.
///
/// # Safety
///
/// This trait must only be implemented when the contract is upheld.
/// Consumers of this trait must inspect [`.size_hint`]’s upper bound.
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [`usize::MAX`]: ../../std/usize/constant.MAX.html
/// [`.size_hint`]: ../../std/iter/trait.Iterator.html#method.size_hint
#[unstable(feature = "trusted_len", issue = "37572")]
pub unsafe trait TrustedLen : Iterator {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<'a, I: TrustedLen + ?Sized> TrustedLen for &'a mut I {} | macro_rules! float_sum_product {
($($a:ident)*) => ($(
#[stable(feature = "iter_arith_traits", since = "1.12.0")]
impl Sum for $a { |
552.b3033c40.js | "use strict";(self["webpackChunkecommerce_admin"]=self["webpackChunkecommerce_admin"]||[]).push([[552],{5552:(e,c,s)=>{s.r(c),s.d(c,{default:()=>g});var a=s(3673);const n={class:"col-xs-12 col-sm-6"},o={class:"col-xs-12 col-sm-6"};function | (e,c,s,r,m,t){const d=(0,a.up)("config-form"),l=(0,a.up)("change-password-form"),i=(0,a.up)("q-card-section"),u=(0,a.up)("q-card"),f=(0,a.up)("q-page");return(0,a.wg)(),(0,a.j4)(f,{padding:""},{default:(0,a.w5)((()=>[(0,a.Wm)(u,{class:"text-grey-9"},{default:(0,a.w5)((()=>[(0,a.Wm)(i,{class:"row q-col-gutter-sm"},{default:(0,a.w5)((()=>[(0,a._)("div",n,[(0,a.Wm)(d)]),(0,a._)("div",o,[(0,a.Wm)(l)])])),_:1})])),_:1})])),_:1})}const m=(0,a.aZ)({name:"ConfigPage",components:{"config-form":(0,a.RC)((()=>Promise.all([s.e(736),s.e(741)]).then(s.bind(s,8741)))),"change-password-form":(0,a.RC)((()=>Promise.all([s.e(736),s.e(906)]).then(s.bind(s,7906))))}});var t=s(4260),d=s(4379),l=s(151),i=s(5589),u=s(7518),f=s.n(u);const p=(0,t.Z)(m,[["render",r]]),g=p;f()(m,"components",{QPage:d.Z,QCard:l.Z,QCardSection:i.Z})}}]); | r |
task.component.ts | import {Component, OnDestroy, OnInit} from '@angular/core';
import {Validators} from '@angular/forms';
import {ActivatedRoute, Router} from '@angular/router';
import {Task, TaskCreateDto, TaskPage, TaskPageCreateDto, TaskPageSwapDto} from 'app/_dto/task';
import {Subscription} from 'rxjs/Subscription';
import {ConfirmDialogComponent} from '../_dialog/confirm-dialog/confirm-dialog.component';
import {TextPromptDialogComponent} from '../_dialog/prompt-dialog/text-prompt-dialog.component';
import {UserEnrollment, UserRole} from '../_dto/user';
import {DialogService} from '../_service/dialog.service';
import {TaskService} from '../_service/task.service';
import {UserAuthenticationService} from '../_service/user-authentication.service';
import {ChapterGroupService} from "../_service/chapter-group.service";
import {Chapter, ChapterGroup} from "../_dto/chapter";
import {ChapterService} from "../_service/chapter.service";
import {combineLatest} from "rxjs/observable/combineLatest";
import {ChapterGroupEnrollmentService} from "../_service/chapter-group-enrollment.service";
@Component({
selector: 'app-task',
templateUrl: './task.component.html',
styleUrls: ['./task.component.scss']
})
export class TaskComponent implements OnInit, OnDestroy {
user: UserEnrollment;
task: Task;
taskPages: TaskPage[];
chapter: Chapter;
chapterGroup: ChapterGroup;
valid: boolean = true;
loading: boolean = false;
serviceSubscriptions: Subscription[] = [];
showExamples: boolean = false;
nextTasks: Task[] = [];
constructor(
private dialogService: DialogService,
private route: ActivatedRoute,
private router: Router,
private userAuthenticationService: UserAuthenticationService,
private chapterService: ChapterService,
private chapterGroupService: ChapterGroupService,
private taskService: TaskService,
private enrollmentService: ChapterGroupEnrollmentService
) {
}
ngOnInit() {
combineLatest(this.userAuthenticationService.getCurrentUser(), this.route.params)
.subscribe(([user, params]) => {
const chapterId = +params['chapterId'];
const taskId = +params['taskId'];
const chapterGroupId = +params['chapterGroupId'];
this.loading = true;
this.serviceSubscriptions.push(
this.chapterService.getChapter(chapterId)
.subscribe(chapter => this.chapter = chapter)
);
if (chapterGroupId !== 0 && !Number.isNaN(chapterGroupId)) {
this.serviceSubscriptions.push(
this.chapterGroupService.getChapterGroup(chapterId, chapterGroupId)
.subscribe(chapterGroup => this.chapterGroup = chapterGroup)
);
}
if (user.role === UserRole.STUDENT) {
this.enrollmentService.getEnrollment(chapterId, chapterGroupId, user.id)
.subscribe(e => {
//Merge enrollment into user to create a UserEnrollment.
this.user = Object.assign(
{enrollment: e},
user
);
}, () => { //ignore error.
});
} else {
//When we're not a Student, we're either a Teacher or Anonymous. In both cases
//we wont need the actual enrollment as this is only used to determine if the
//user is an assistant.
this.user = Object.assign(
{enrollment: undefined},
user);
}
this.serviceSubscriptions.push(this.taskService.getTask(chapterId, taskId)
.subscribe(task => {
this.task = task;
this.getTaskPages();
},
() => {
this.valid = false;
this.loading = false;
})
);
this.serviceSubscriptions.push(this.taskService.getNextTasks(chapterId, taskId)
.subscribe(tasks => this.nextTasks = tasks)
);
});
}
/**
* Delete all subscriptions when leaving the page.
*/
ngOnDestroy() {
this.serviceSubscriptions.forEach(sub => sub.unsubscribe());
}
/**
* True if the user's role is ANONYMOUS.
* @returns {boolean}
*/
isAnonymous(): boolean {
return this.user && this.user.role == UserRole.ANONYMOUS;
}
/**
* True if the user's role is TEACHER.
* @returns {boolean}
*/
isTeacher(): boolean {
return this.user && this.user.role == UserRole.TEACHER;
}
/**
* True if the user's role is STUDENT.
* @returns {boolean}
*/
isStudent(): boolean {
return this.user && this.user.role == UserRole.STUDENT;
}
/**
* True if the user's role is TA
* @returns {boolean}
*/
isTa(): boolean {
return this.user && this.user.enrollment && this.user.enrollment.assistant;
}
/**
* Shows the example link when the given parameter is true.
* @param {boolean} show
*/
setShowExamples(show: boolean) {
this.showExamples = show;
}
/**
* Returns true when the whole page should be editable, false if not.
* @returns {boolean}
*/
isEditable() {
return this.isTeacher() && !this.chapterGroup;
}
getTrackClass() {
return this.task.track.toString().toLowerCase();
}
/**
* Retrieves all taskpages.
*/
getTaskPages() {
this.serviceSubscriptions.push(
this.taskService.getTaskPages(this.task.chapterId, this.task.id).subscribe(
taskPages => {
this.taskPages = taskPages;
},
() => {
this.valid = false;
}, () => this.loading = false
));
}
/**
* Adds an empty taskpage.
*/
addTaskPage() {
TextPromptDialogComponent.create(this.dialogService,
'New step',
'Please insert the title for the new step',
'',
[Validators.required, 'required', 'A name is required'],
[Validators.minLength(1), 'minlength', 'The name should be at least 1 character long.'],
[Validators.maxLength(100), 'maxlength', 'The name must be shorter than 100 characters.']
).subscribe(title => {
if (!!title) {
const createDto = new TaskPageCreateDto();
createDto.title = title;
this.taskService.addTaskPage(this.task, createDto).subscribe(() => {
this.getTaskPages();
});
}
});
}
/**
* Deletes the task after confirming this was the idea. Navigates back to the assignments page
* after deleting.
*/
deleteTask() {
ConfirmDialogComponent.create(
this.dialogService,
'Please confirm',
'Are you sure you want to delete this task?'
).subscribe(confirm => {
if (confirm) {
this.taskService.deleteTask(this.task).subscribe(() => {
this.router.navigateByUrl('/assignments');
})
}
});
}
/**
* Edits the task when given a valid name (Which is not empty)
*/
editTask() {
TextPromptDialogComponent.create(this.dialogService,
'Edit task',
'Please insert a new name for this task.',
this.task.name,
[Validators.required, 'required', 'A name is required'],
[Validators.minLength(1), 'minlength', 'The name should be at least 1 character long.'],
[Validators.maxLength(32), 'maxlength', 'The name must be shorter than 32 characters.']
).subscribe(title => {
if (!!title) {
const createDto = new TaskCreateDto();
createDto.name = title;
createDto.track = this.task.track;
createDto.slot = this.task.slot;
this.taskService.editTask(this.task, createDto).subscribe(task => this.task = task);
}
});
}
/**
* Deletes a task page and retrieves all task pages afterwards.
* @param {TaskPage} tp
*/
deleteTaskPage(tp: TaskPage) {
ConfirmDialogComponent.create(
this.dialogService,
'Please confirm',
'Are you sure you want to delete this step?'
).subscribe(confirm => {
if (confirm) {
this.taskService.deleteTaskPage(this.task, tp).subscribe(() => {
this.getTaskPages();
})
}
});
}
/**
* Swaps the selected taskpage with the one below, when possible.
* @param {number} index
*/
swapDown(index: number) {
if (index >= this.taskPages.length - 1) {
return;
}
const swapDto = new TaskPageSwapDto();
swapDto.firstTaskPage = this.taskPages[index].id;
swapDto.secondTaskPage = this.taskPages[index + 1].id;
this.taskService.swapTaskPages(this.task, swapDto).subscribe(() => this.getTaskPages());
}
/**
* Swaps the selected taskpage with the one above, when possible. | return;
}
const swapDto = new TaskPageSwapDto();
swapDto.firstTaskPage = this.taskPages[index - 1].id;
swapDto.secondTaskPage = this.taskPages[index].id;
this.taskService.swapTaskPages(this.task, swapDto).subscribe(() => this.getTaskPages());
}
getTaskRouterLink(next: Task): (string|number)[] {
if (!this.chapterGroup) {
return ['/courses', next.chapterId, 'tasks', next.id];
}
return ['/courses', next.chapterId, 'editions', this.chapterGroup.id, 'tasks', next.id];
}
} | * @param {number} index
*/
swapUp(index: number) {
if (index == 0) { |
rpc_signmessage.py | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The CounosH Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC commands for signing and verifying messages."""
from test_framework.test_framework import CounosHTestFramework
from test_framework.util import assert_equal
class SignMessagesTest(CounosHTestFramework):
def set_test_params(self):
|
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
message = 'This is just a test message'
self.log.info('test signing with priv_key')
priv_key = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N'
address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB'
expected_signature = 'INbVnW4e6PeRmsv2Qgu8NuopvrVjkcxob+sX8OcZG0SALhWybUjzMLPdAsXI46YZGb0KQTRii+wWIQzRpG/U+S0='
signature = self.nodes[0].signmessagewithprivkey(priv_key, message)
assert_equal(expected_signature, signature)
assert self.nodes[0].verifymessage(address, signature, message)
self.log.info('test signing with an address with wallet')
address = self.nodes[0].getnewaddress()
signature = self.nodes[0].signmessage(address, message)
assert self.nodes[0].verifymessage(address, signature, message)
self.log.info('test verifying with another address should not work')
other_address = self.nodes[0].getnewaddress()
other_signature = self.nodes[0].signmessage(other_address, message)
assert not self.nodes[0].verifymessage(other_address, signature, message)
assert not self.nodes[0].verifymessage(address, other_signature, message)
if __name__ == '__main__':
SignMessagesTest().main()
| self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-addresstype=legacy"]] |
aws_security_configuration_change.py | SECURITY_CONFIG_ACTIONS = [
'DeleteAccountPublicAccessBlock',
'DeleteDeliveryChannel',
'DeleteDetector',
'DeleteFlowLogs',
'DeleteRule',
'DeleteTrail',
'DisableEbsEncryptionByDefault',
'DisableRule',
'StopConfigurationRecorder',
'StopLogging',
]
def rule(event):
| if event['eventName'] == 'UpdateDetector':
return not event['requestParameters'].get('enable', True)
return event['eventName'] in SECURITY_CONFIG_ACTIONS |
|
geo.go | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// Package geo contains the base types for spatial data type operations.
package geo
import (
"encoding/binary"
"github.com/cockroachdb/cockroach/pkg/geo/geopb"
"github.com/cockroachdb/errors"
"github.com/golang/geo/s2"
"github.com/twpayne/go-geom"
"github.com/twpayne/go-geom/encoding/ewkb"
)
// EWKBEncodingFormat is the encoding format for EWKB.
var EWKBEncodingFormat = binary.LittleEndian
//
// Geometry
//
// Geometry is planar spatial object.
type Geometry struct {
geopb.SpatialObject
}
// NewGeometry returns a new Geometry. Assumes the input EWKB is validated and in little endian.
func NewGeometry(spatialObject geopb.SpatialObject) *Geometry {
return &Geometry{SpatialObject: spatialObject}
}
// ParseGeometry parses a Geometry from a given text.
func ParseGeometry(str string) (*Geometry, error) {
spatialObject, err := parseAmbiguousText(str, geopb.DefaultGeometrySRID)
if err != nil {
return nil, err
}
return NewGeometry(spatialObject), nil
}
// MustParseGeometry behaves as ParseGeometry, but panics if there is an error.
func MustParseGeometry(str string) *Geometry {
g, err := ParseGeometry(str)
if err != nil {
panic(err)
}
return g
}
// ParseGeometryFromEWKT parses the EWKT into a Geometry.
func ParseGeometryFromEWKT(
ewkt geopb.EWKT, srid geopb.SRID, defaultSRIDOverwriteSetting defaultSRIDOverwriteSetting,
) (*Geometry, error) {
g, err := parseEWKT(ewkt, srid, defaultSRIDOverwriteSetting)
if err != nil {
return nil, err
}
return NewGeometry(g), nil
}
// ParseGeometryFromEWKB parses the EWKB into a Geometry.
func ParseGeometryFromEWKB(ewkb geopb.EWKB) (*Geometry, error) {
g, err := parseEWKB(ewkb, geopb.DefaultGeometrySRID, DefaultSRIDIsHint)
if err != nil {
return nil, err
}
return NewGeometry(g), nil
}
// ParseGeometryFromWKB parses the WKB into a given Geometry.
func ParseGeometryFromWKB(wkb geopb.WKB, srid geopb.SRID) (*Geometry, error) {
g, err := parseWKB(wkb, srid)
if err != nil {
return nil, err
}
return NewGeometry(g), nil
}
// ParseGeometryFromGeoJSON parses the GeoJSON into a given Geometry.
func ParseGeometryFromGeoJSON(json []byte) (*Geometry, error) {
g, err := parseGeoJSON(json, geopb.DefaultGeometrySRID)
if err != nil {
return nil, err
}
return NewGeometry(g), nil
}
// ParseGeometryFromEWKBRaw returns a new Geometry from an EWKB, without any SRID checks.
// You should only do this if you trust the EWKB is setup correctly.
// You must likely want geo.ParseGeometryFromEWKB instead.
func ParseGeometryFromEWKBRaw(ewkb geopb.EWKB) (*Geometry, error) {
base, err := parseEWKBRaw(ewkb)
if err != nil {
return nil, err
}
return &Geometry{SpatialObject: base}, nil
}
// MustParseGeometryFromEWKBRaw behaves as ParseGeometryFromEWKBRaw, but panics if an error occurs.
func MustParseGeometryFromEWKBRaw(ewkb geopb.EWKB) *Geometry {
ret, err := ParseGeometryFromEWKBRaw(ewkb)
if err != nil {
panic(err)
}
return ret
}
// AsGeography converts a given Geometry to it's Geography form.
func (g *Geometry) AsGeography() (*Geography, error) {
if g.SRID() != 0 {
// TODO(otan): check SRID is latlng
return NewGeography(g.SpatialObject), nil
}
// Set a default SRID if one is not already set.
t, err := ewkb.Unmarshal(g.EWKB())
if err != nil {
return nil, err
}
adjustGeomSRID(t, geopb.DefaultGeographySRID)
spatialObject, err := spatialObjectFromGeom(t)
if err != nil {
return nil, err
}
return NewGeography(spatialObject), nil
}
// AsGeomT returns the geometry as a geom.T object.
func (g *Geometry) AsGeomT() (geom.T, error) {
return ewkb.Unmarshal(g.SpatialObject.EWKB)
}
// EWKB returns the EWKB representation of the Geometry.
func (g *Geometry) EWKB() geopb.EWKB {
return g.SpatialObject.EWKB
}
// SRID returns the SRID representation of the Geometry.
func (g *Geometry) SRID() geopb.SRID {
return g.SpatialObject.SRID
}
// Shape returns the shape of the Geometry.
func (g *Geometry) Shape() geopb.Shape {
return g.SpatialObject.Shape
}
//
// Geography
//
// Geography is a spherical spatial object.
type Geography struct {
geopb.SpatialObject
}
// NewGeography returns a new Geography. Assumes the input EWKB is validated and in little endian.
func NewGeography(spatialObject geopb.SpatialObject) *Geography {
return &Geography{SpatialObject: spatialObject}
}
// ParseGeography parses a Geography from a given text.
func ParseGeography(str string) (*Geography, error) {
spatialObject, err := parseAmbiguousText(str, geopb.DefaultGeographySRID)
if err != nil {
return nil, err
}
return NewGeography(spatialObject), nil
}
// ParseGeographyFromEWKT parses the EWKT into a Geography.
func ParseGeographyFromEWKT(
ewkt geopb.EWKT, srid geopb.SRID, defaultSRIDOverwriteSetting defaultSRIDOverwriteSetting,
) (*Geography, error) {
g, err := parseEWKT(ewkt, srid, defaultSRIDOverwriteSetting)
if err != nil {
return nil, err
}
return NewGeography(g), nil
}
// ParseGeographyFromEWKB parses the EWKB into a Geography.
func ParseGeographyFromEWKB(ewkb geopb.EWKB) (*Geography, error) {
g, err := parseEWKB(ewkb, geopb.DefaultGeographySRID, DefaultSRIDIsHint)
if err != nil {
return nil, err
}
return NewGeography(g), nil
}
// ParseGeographyFromWKB parses the WKB into a given Geography.
func ParseGeographyFromWKB(wkb geopb.WKB, srid geopb.SRID) (*Geography, error) {
g, err := parseWKB(wkb, srid)
if err != nil {
return nil, err
}
return NewGeography(g), nil
}
// ParseGeographyFromGeoJSON parses the GeoJSON into a given Geography.
func ParseGeographyFromGeoJSON(json []byte) (*Geography, error) {
g, err := parseGeoJSON(json, geopb.DefaultGeographySRID)
if err != nil {
return nil, err
}
return NewGeography(g), nil
}
// ParseGeographyFromEWKBRaw returns a new Geography from an EWKB, without any SRID checks.
// You should only do this if you trust the EWKB is setup correctly.
// You must likely want ParseGeographyFromEWKB instead.
func ParseGeographyFromEWKBRaw(ewkb geopb.EWKB) (*Geography, error) {
base, err := parseEWKBRaw(ewkb)
if err != nil {
return nil, err
}
return &Geography{SpatialObject: base}, nil
}
// MustParseGeographyFromEWKBRaw behaves as ParseGeographyFromEWKBRaw, but panics if an error occurs.
func MustParseGeographyFromEWKBRaw(ewkb geopb.EWKB) *Geography {
ret, err := ParseGeographyFromEWKBRaw(ewkb)
if err != nil {
panic(err)
}
return ret
}
// AsGeometry converts a given Geography to it's Geometry form.
func (g *Geography) AsGeometry() *Geometry {
return NewGeometry(g.SpatialObject)
}
// AsGeomT returns the Geography as a geom.T object.
func (g *Geography) AsGeomT() (geom.T, error) {
return ewkb.Unmarshal(g.SpatialObject.EWKB)
}
// EWKB returns the EWKB representation of the Geography.
func (g *Geography) EWKB() geopb.EWKB {
return g.SpatialObject.EWKB
}
// SRID returns the SRID representation of the Geography.
func (g *Geography) SRID() geopb.SRID {
return g.SpatialObject.SRID
}
// AsS2 converts a given Geography into it's S2 form.
func (g *Geography) AsS2() ([]s2.Region, error) {
geomRepr, err := g.AsGeomT()
if err != nil {
return nil, err
}
// TODO(otan): convert by reading from EWKB to S2 directly.
return S2RegionsFromGeom(geomRepr), nil
}
// isLinearRingCCW returns whether a given linear ring is counter clock wise.
// See 2.07 of http://www.faqs.org/faqs/graphics/algorithms-faq/.
// "Find the lowest vertex (or, if there is more than one vertex with the same lowest coordinate,
// the rightmost of those vertices) and then take the cross product of the edges fore and aft of it."
func isLinearRingCCW(linearRing *geom.LinearRing) bool {
smallestIdx := 0
smallest := linearRing.Coord(0)
for pointIdx := 1; pointIdx < linearRing.NumCoords()-1; pointIdx++ {
curr := linearRing.Coord(pointIdx)
if curr.Y() < smallest.Y() || (curr.Y() == smallest.Y() && curr.X() > smallest.X()) {
smallestIdx = pointIdx
smallest = curr
}
}
// prevIdx is the previous point. If we are at the 0th point, the last coordinate
// is also the 0th point, so take the second last point.
// Note we don't have to apply this for "nextIdx" as we cap the search above at the
// second last vertex.
prevIdx := smallestIdx - 1
if smallestIdx == 0 {
prevIdx = linearRing.NumCoords() - 2
}
a := linearRing.Coord(prevIdx)
b := smallest
c := linearRing.Coord(smallestIdx + 1)
// We could do the cross product, but we are only interested in the sign.
// To find the sign, reorganize into the orientation matrix:
// 1 x_a y_a
// 1 x_b y_b
// 1 x_c y_c
// and find the determinant.
// https://en.wikipedia.org/wiki/Curve_orientation#Orientation_of_a_simple_polygon
areaSign := a.X()*b.Y() - a.Y()*b.X() +
a.Y()*c.X() - a.X()*c.Y() +
b.X()*c.Y() - c.X()*b.Y()
// Note having an area sign of 0 means it is a flat polygon, which is invalid.
return areaSign > 0
}
// S2RegionsFromGeom converts an geom representation of an object
// to s2 regions.
func S2RegionsFromGeom(geomRepr geom.T) []s2.Region {
var regions []s2.Region
switch repr := geomRepr.(type) {
case *geom.Point:
regions = []s2.Region{
s2.PointFromLatLng(s2.LatLngFromDegrees(repr.Y(), repr.X())),
}
case *geom.LineString:
latLngs := make([]s2.LatLng, repr.NumCoords()) | latLngs[i] = s2.LatLngFromDegrees(p.Y(), p.X())
}
regions = []s2.Region{
s2.PolylineFromLatLngs(latLngs),
}
case *geom.Polygon:
loops := make([]*s2.Loop, repr.NumLinearRings())
// All loops must be oriented CCW for S2.
for ringIdx := 0; ringIdx < repr.NumLinearRings(); ringIdx++ {
linearRing := repr.LinearRing(ringIdx)
points := make([]s2.Point, linearRing.NumCoords())
isCCW := isLinearRingCCW(linearRing)
for pointIdx := 0; pointIdx < linearRing.NumCoords(); pointIdx++ {
p := linearRing.Coord(pointIdx)
pt := s2.PointFromLatLng(s2.LatLngFromDegrees(p.Y(), p.X()))
if isCCW {
points[pointIdx] = pt
} else {
points[len(points)-pointIdx-1] = pt
}
}
loops[ringIdx] = s2.LoopFromPoints(points)
}
regions = []s2.Region{
s2.PolygonFromLoops(loops),
}
case *geom.GeometryCollection:
for _, geom := range repr.Geoms() {
regions = append(regions, S2RegionsFromGeom(geom)...)
}
case *geom.MultiPoint:
for i := 0; i < repr.NumPoints(); i++ {
regions = append(regions, S2RegionsFromGeom(repr.Point(i))...)
}
case *geom.MultiLineString:
for i := 0; i < repr.NumLineStrings(); i++ {
regions = append(regions, S2RegionsFromGeom(repr.LineString(i))...)
}
case *geom.MultiPolygon:
for i := 0; i < repr.NumPolygons(); i++ {
regions = append(regions, S2RegionsFromGeom(repr.Polygon(i))...)
}
}
return regions
}
//
// common
//
// spatialObjectFromGeom creates a geopb.SpatialObject from a geom.T.
func spatialObjectFromGeom(t geom.T) (geopb.SpatialObject, error) {
ret, err := ewkb.Marshal(t, EWKBEncodingFormat)
if err != nil {
return geopb.SpatialObject{}, err
}
var shape geopb.Shape
switch t := t.(type) {
case *geom.Point:
shape = geopb.Shape_Point
case *geom.LineString:
shape = geopb.Shape_LineString
case *geom.Polygon:
shape = geopb.Shape_Polygon
case *geom.MultiPoint:
shape = geopb.Shape_MultiPoint
case *geom.MultiLineString:
shape = geopb.Shape_MultiLineString
case *geom.MultiPolygon:
shape = geopb.Shape_MultiPolygon
case *geom.GeometryCollection:
shape = geopb.Shape_GeometryCollection
default:
return geopb.SpatialObject{}, errors.Newf("unknown shape: %T", t)
}
if t.Layout() != geom.XY {
return geopb.SpatialObject{}, errors.Newf("only 2D objects are currently supported")
}
return geopb.SpatialObject{
EWKB: geopb.EWKB(ret),
SRID: geopb.SRID(t.SRID()),
Shape: shape,
}, nil
} | for i := 0; i < repr.NumCoords(); i++ {
p := repr.Coord(i) |
upload.rs | // Copyright (c) 2016-2018 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::super::key::download::download_public_encryption_key;
use api_client::Client;
use common::ui::{Status, UIWriter, UI};
use error::{Error, Result};
use hcore::crypto::BoxKeyPair;
use std::path::Path;
use {PRODUCT, VERSION};
pub fn | (
ui: &mut UI,
bldr_url: &str,
token: &str,
origin: &str,
key: &str,
secret: &str,
cache: &Path,
) -> Result<()> {
let api_client = Client::new(bldr_url, PRODUCT, VERSION, None).map_err(Error::APIClient)?;
let encryption_key = match BoxKeyPair::get_latest_pair_for(origin, cache) {
Ok(key) => key,
Err(_) => {
debug!("Didn't find public encryption key in cache path");
download_public_encryption_key(ui, &api_client, origin, token, cache)?;
BoxKeyPair::get_latest_pair_for(origin, cache)?
}
};
ui.status(Status::Encrypting, format!("value for key {}.", key))?;
let encrypted_secret_bytes = encryption_key.encrypt(secret.as_bytes(), None)?;
let encrypted_secret_string = match String::from_utf8(encrypted_secret_bytes) {
Ok(string_from_bytes) => string_from_bytes,
Err(_) => {
return Err(Error::ArgumentError(
"Failed to convert encrypted bytes to string",
))
}
};
ui.status(Status::Encrypted, format!("{}=[REDACTED].", key))?;
ui.status(Status::Uploading, format!("secret for key {}.", key))?;
api_client
.create_origin_secret(origin, token, key, &encrypted_secret_string)
.map_err(Error::APIClient)?;
ui.status(Status::Uploaded, format!("secret for {}.", key))?;
Ok(())
}
| start |
controller.py | from abc import ABC
def route(rule, **options):
"""Decorator for defining routes of FlaskController classes.
Acts in the same way ass @app.route.
Can be used for a class to set a base route too.
Args:
path (str): The path of the newly defined route
options: refer to flasks docs for those, all of them can be used
"""
def decorator(f):
f._route = (rule, options)
return f
return decorator
class FlaskController(ABC):
"""Baseclass for the Controller Classes.
Extend tis class and use it in conjunction with the route decoractor
to define routes for your flask app.
Use the register method to add your defined routes to a flask app.
"""
def __init__(self):
super(FlaskController, self).__init__()
def register(self, app):
"""Adds the routes of a Controller to a Flask instance.
Args:
app (Flask)
"""
members = dir(self)
routes = []
for member in members:
if hasattr(getattr(self, member), "_route"):
if member is not "__class__":
routes.append(member)
self._register_routes(routes, app)
def _register_routes(self, routes, app):
for route in routes:
func = getattr(self, route)
real_route = self._generate_route(func._route[0])
options = func._route[1]
app.add_url_rule(real_route, route + real_route, func, **options)
def _generate_route(self, route):
base_route = ""
if hasattr(self, "_route"): | base_route = self._route[0]
return base_route + route |
|
php.rs | use crate::traits::{ Instruction, VirtualCpu };
use crate::types::{ Byte };
/// Php: PHP
pub struct | { }
impl Instruction for Php {
fn opcode (&self) -> &'static str { "PHP"}
fn hexcode(&self) -> Byte { 0x08 }
fn execute(&self, _cpu: &mut dyn VirtualCpu) -> std::io::Result<()> {
panic!("opcode PHP (Php) not implemented!");
// Ok(())
}
}
| Php |
main.go | package main
import (
"fmt"
"log"
"net/http"
)
func main() {
http.HandleFunc("/", rootHandler)
err := http.ListenAndServe(":8000", nil)
if err != nil {
log.Fatal(err)
}
} | } |
func rootHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Hello World") |
names.js | 'use strict';
var hasOwnProperty = Object.prototype.hasOwnProperty;
var keywords = Object.create(null);
var properties = Object.create(null);
var HYPHENMINUS = 45; // '-'.charCodeAt()
function isVariable(str, offset) {
return str.charCodeAt(offset) === HYPHENMINUS &&
str.charCodeAt(offset + 1) === HYPHENMINUS;
} | // vendor should contain at least one letter
var secondDashIndex = str.indexOf('-', offset + 2);
if (secondDashIndex !== -1) {
return str.substring(offset, secondDashIndex + 1);
}
}
return '';
}
function getKeywordInfo(keyword) {
if (hasOwnProperty.call(keywords, keyword)) {
return keywords[keyword];
}
var name = keyword.toLowerCase();
if (hasOwnProperty.call(keywords, name)) {
return keywords[keyword] = keywords[name];
}
var vendor = !isVariable(name, 0) ? getVendorPrefix(name, 0) : '';
return keywords[keyword] = Object.freeze({
vendor: vendor,
prefix: vendor,
name: name.substr(vendor.length)
});
}
function getPropertyInfo(property) {
if (hasOwnProperty.call(properties, property)) {
return properties[property];
}
var name = property;
var hack = property[0];
if (hack === '/' && property[1] === '/') {
hack = '//';
} else if (hack !== '_' &&
hack !== '*' &&
hack !== '$' &&
hack !== '#' &&
hack !== '+') {
hack = '';
}
var variable = isVariable(name, hack.length);
if (!variable) {
name = name.toLowerCase();
if (hasOwnProperty.call(properties, name)) {
return properties[property] = properties[name];
}
}
var vendor = !variable ? getVendorPrefix(name, hack.length) : '';
return properties[property] = Object.freeze({
hack: hack,
vendor: vendor,
prefix: hack + vendor,
name: name.substr(hack.length + vendor.length),
variable: variable
});
}
module.exports = {
keyword: getKeywordInfo,
property: getPropertyInfo
}; |
function getVendorPrefix(str, offset) {
if (str.charCodeAt(offset) === HYPHENMINUS) { |
whale_alert_view.py | """Whale Alert view"""
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.cryptocurrency.onchain import whale_alert_model
from openbb_terminal.decorators import check_api_key
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
print_rich_table,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_WHALE_ALERT_KEY"])
def display_whales_transactions(
min_value: int = 800000,
top: int = 100,
sortby: str = "date",
descend: bool = False,
show_address: bool = False,
export: str = "",
) -> None:
| """Display huge value transactions from major blockchains. [Source: https://docs.whale-alert.io/]
Parameters
----------
min_value: int
Minimum value of trade to track.
top: int
Limit of transactions. Maximum 100
sortby: str
Key to sort by.
descend: str
Sort in descending order.
show_address: bool
Flag to show addresses of transactions.
export : str
Export dataframe data to csv,json,xlsx file
"""
df = whale_alert_model.get_whales_transactions(min_value)
if df.empty:
console.print("Failed to retrieve data.")
return
df_data = df.copy()
df = df.sort_values(by=sortby, ascending=descend)
if not show_address:
df = df.drop(["from_address", "to_address"], axis=1)
else:
df = df.drop(["from", "to", "blockchain"], axis=1)
for col in ["amount_usd", "amount"]:
df[col] = df[col].apply(lambda x: lambda_long_number_format(x))
print_rich_table(
df.head(top),
headers=list(df.columns),
show_index=False,
title="Large Value Transactions",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"whales",
df_data,
) |
|
SR4DFlowNet.py | import tensorflow as tf
class SR4DFlowNet():
def __init__(self, res_increase):
self.res_increase = res_increase
def build_network(self, u, v, w, u_mag, v_mag, w_mag, low_resblock=8, hi_resblock=4, channel_nr=64):
channel_nr = 64
speed = (u ** 2 + v ** 2 + w ** 2) ** 0.5
mag = (u_mag ** 2 + v_mag ** 2 + w_mag ** 2) ** 0.5
pcmr = mag * speed
phase = tf.keras.layers.concatenate([u,v,w])
pc = tf.keras.layers.concatenate([pcmr, mag, speed])
pc = conv3d(pc,3,channel_nr, 'SYMMETRIC', 'relu')
pc = conv3d(pc,3,channel_nr, 'SYMMETRIC', 'relu')
phase = conv3d(phase,3,channel_nr, 'SYMMETRIC', 'relu')
phase = conv3d(phase,3,channel_nr, 'SYMMETRIC', 'relu')
concat_layer = tf.keras.layers.concatenate([phase, pc])
concat_layer = conv3d(concat_layer, 1, channel_nr, 'SYMMETRIC', 'relu')
concat_layer = conv3d(concat_layer, 3, channel_nr, 'SYMMETRIC', 'relu')
# res blocks
rb = concat_layer
for i in range(low_resblock):
rb = resnet_block(rb, "ResBlock", channel_nr, pad='SYMMETRIC')
rb = upsample3d(rb, self.res_increase)
# refinement in HR
for i in range(hi_resblock):
rb = resnet_block(rb, "ResBlock", channel_nr, pad='SYMMETRIC')
# 3 separate path version
u_path = conv3d(rb, 3, channel_nr, 'SYMMETRIC', 'relu')
u_path = conv3d(u_path, 3, 1, 'SYMMETRIC', None)
v_path = conv3d(rb, 3, channel_nr, 'SYMMETRIC', 'relu')
v_path = conv3d(v_path, 3, 1, 'SYMMETRIC', None)
w_path = conv3d(rb, 3, channel_nr, 'SYMMETRIC', 'relu')
w_path = conv3d(w_path, 3, 1, 'SYMMETRIC', None)
b_out = tf.keras.layers.concatenate([u_path, v_path, w_path])
return b_out
def | (input_tensor, res_increase):
"""
Resize the image by linearly interpolating the input
using TF '``'resize_bilinear' function.
:param input_tensor: 2D/3D image tensor, with shape:
'batch, X, Y, Z, Channels'
:return: interpolated volume
Original source: https://niftynet.readthedocs.io/en/dev/_modules/niftynet/layer/linear_resize.html
"""
# We need this option for the bilinear resize to prevent shifting bug
align = True
b_size, x_size, y_size, z_size, c_size = input_tensor.shape
x_size_new, y_size_new, z_size_new = x_size * res_increase, y_size * res_increase, z_size * res_increase
if res_increase == 1:
# already in the target shape
return input_tensor
# resize y-z
squeeze_b_x = tf.reshape(input_tensor, [-1, y_size, z_size, c_size], name='reshape_bx')
resize_b_x = tf.compat.v1.image.resize_bilinear(squeeze_b_x, [y_size_new, z_size_new], align_corners=align)
resume_b_x = tf.reshape(resize_b_x, [-1, x_size, y_size_new, z_size_new, c_size], name='resume_bx')
# Reorient
reoriented = tf.transpose(resume_b_x, [0, 3, 2, 1, 4])
# squeeze and 2d resize
squeeze_b_z = tf.reshape(reoriented, [-1, y_size_new, x_size, c_size], name='reshape_bz')
resize_b_z = tf.compat.v1.image.resize_bilinear(squeeze_b_z, [y_size_new, x_size_new], align_corners=align)
resume_b_z = tf.reshape(resize_b_z, [-1, z_size_new, y_size_new, x_size_new, c_size], name='resume_bz')
output_tensor = tf.transpose(resume_b_z, [0, 3, 2, 1, 4])
return output_tensor
def conv3d(x, kernel_size, filters, padding='SYMMETRIC', activation=None, initialization=None, use_bias=True):
"""
Based on: https://github.com/gitlimlab/CycleGAN-Tensorflow/blob/master/ops.py
For tf padding, refer to: https://www.tensorflow.org/api_docs/python/tf/pad
"""
reg_l2 = tf.keras.regularizers.l2(5e-7)
if padding == 'SYMMETRIC' or padding == 'REFLECT':
p = (kernel_size - 1) // 2
x = tf.pad(x, [[0,0],[p,p],[p,p], [p,p],[0,0]], padding)
x = tf.keras.layers.Conv3D(filters, kernel_size, activation=activation, kernel_initializer=initialization, use_bias=use_bias, kernel_regularizer=reg_l2)(x)
else:
assert padding in ['SAME', 'VALID']
x = tf.keras.layers.Conv3D(filters, kernel_size, activation=activation, kernel_initializer=initialization, use_bias=use_bias, kernel_regularizer=reg_l2)(x)
return x
def resnet_block(x, block_name='ResBlock', channel_nr=64, scale = 1, pad='SAME'):
tmp = conv3d(x, kernel_size=3, filters=channel_nr, padding=pad, activation=None, use_bias=False, initialization=None)
tmp = tf.keras.layers.LeakyReLU(alpha=0.2)(tmp)
tmp = conv3d(tmp, kernel_size=3, filters=channel_nr, padding=pad, activation=None, use_bias=False, initialization=None)
tmp = x + tmp * scale
tmp = tf.keras.layers.LeakyReLU(alpha=0.2)(tmp)
return tmp
| upsample3d |
image-augmentation.py | # coding: utf-8
# # Image Augmentation
# - Check images/sample-train
# - Check images/sample-confirm is empty
#
# In[15]:
import numpy as np
# In[16]:
from keras.preprocessing.image import ImageDataGenerator,array_to_img,img_to_array,load_img
from keras.applications.inception_v3 import preprocess_input
# **Check that sample-confirm is empty**
# In[17]:
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
test_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True
)
jf_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input, |
# ## Check on a sample to see the image generators work in the way we expect
# In[18]:
train_generator = train_datagen.flow_from_directory('images/sample-train/',target_size=(150,150), save_to_dir='images/sample-confirm/')
# In[19]:
i=0
for batch in train_datagen.flow_from_directory('images/sample-train/', target_size=(150,150), save_to_dir='images/sample-confirm/'):
i+=1
if (i>10):
break
# In[20]:
j=0
for batch in jf_datagen.flow_from_directory('images/sample-train/', target_size=(150,150), save_to_dir='images/sample-confirm/'):
j+=1
if ( j > 10):
break | horizontal_flip=True
) |
test_json.rs | mod json_report_handler {
use miette::{Diagnostic, MietteError, NamedSource, Report, SourceSpan};
use miette::JSONReportHandler;
use thiserror::Error;
fn fmt_report(diag: Report) -> String {
let mut out = String::new();
JSONReportHandler::new()
.render_report(&mut out, diag.as_ref())
.unwrap();
out
}
#[test]
fn single_line_with_wide_char() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("oops!")]
#[diagnostic(code(oops::my::bad), help("try doing it better next time?"))]
struct MyBad {
#[source_code]
src: NamedSource,
#[label("this bit here")]
highlight: SourceSpan,
}
let src = "source\n 👼🏼text\n here".to_string();
let err = MyBad {
src: NamedSource::new("bad_file.rs", src),
highlight: (9, 6).into(),
};
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file.rs",
"labels": [
{
"label": "this bit here",
"span": {
"offset": 9,
"length": 6
}
}
],
"related": []
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
#[test]
fn single_line_highlight() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("oops!")]
#[diagnostic(code(oops::my::bad), help("try doing it better next time?"))]
struct MyBad {
#[source_code]
src: NamedSource,
#[label("this bit here")]
highlight: SourceSpan,
}
let src = "source\n text\n here".to_string();
let err = MyBad {
src: NamedSource::new("bad_file.rs", src),
highlight: (9, 4).into(),
};
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file.rs",
"labels": [
{
"label": "this bit here",
"span": {
"offset": 9,
"length": 4
}
}
],
"related": []
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
#[test]
fn single_line_highlight_offset_zero() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("oops!")]
#[diagnostic(code(oops::my::bad), help("try doing it better next time?"))]
struct MyBad {
#[source_code]
src: NamedSource,
#[label("this bit here")]
highlight: SourceSpan,
}
let src = "source\n text\n here".to_string();
let err = MyBad {
src: NamedSource::new("bad_file.rs", src),
highlight: (0, 0).into(),
};
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file.rs",
"labels": [
{
"label": "this bit here",
"span": {
"offset": 0,
"length": 0
}
}
],
"related": []
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
#[test]
fn single_line_highlight_with_empty_span() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("oops!")]
#[diagnostic(code(oops::my::bad), help("try doing it better next time?"))]
struct MyBad {
#[source_code]
src: NamedSource,
#[label("this bit here")]
highlight: SourceSpan,
}
let src = "source\n text\n here".to_string();
let err = MyBad {
src: NamedSource::new("bad_file.rs", src),
highlight: (9, 0).into(),
};
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file.rs",
"labels": [
{
"label": "this bit here",
"span": {
"offset": 9,
"length": 0
}
}
],
"related": []
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
#[test]
fn single_line_highlight_no_label() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("oops!")]
#[diagnostic(code(oops::my::bad), help("try doing it better next time?"))]
struct MyBad {
#[source_code]
src: NamedSource,
#[label]
highlight: SourceSpan,
}
let src = "source\n text\n here".to_string();
let err = MyBad {
src: NamedSource::new("bad_file.rs", src),
highlight: (9, 4).into(),
};
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file.rs",
"labels": [
{
"span": {
"offset": 9,
"length": 4
}
}
],
"related": []
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
#[test]
fn single_line_highlight_at_line_start() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("oops!")]
#[diagnostic(code(oops::my::bad), help("try doing it better next time?"))]
struct MyBad | #[source_code]
src: NamedSource,
#[label("this bit here")]
highlight: SourceSpan,
}
let src = "source\ntext\n here".to_string();
let err = MyBad {
src: NamedSource::new("bad_file.rs", src),
highlight: (7, 4).into(),
};
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file.rs",
"labels": [
{
"label": "this bit here",
"span": {
"offset": 7,
"length": 4
}
}
],
"related": []
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
#[test]
fn multiple_same_line_highlights() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("oops!")]
#[diagnostic(code(oops::my::bad), help("try doing it better next time?"))]
struct MyBad {
#[source_code]
src: NamedSource,
#[label = "x"]
highlight1: SourceSpan,
#[label = "y"]
highlight2: SourceSpan,
#[label = "z"]
highlight3: SourceSpan,
}
let src = "source\n text text text text text\n here".to_string();
let err = MyBad {
src: NamedSource::new("bad_file.rs", src),
highlight1: (9, 4).into(),
highlight2: (14, 4).into(),
highlight3: (24, 4).into(),
};
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file.rs",
"labels": [
{
"label": "x",
"span": {
"offset": 9,
"length": 4
}
},
{
"label": "y",
"span": {
"offset": 14,
"length": 4
}
},
{
"label": "z",
"span": {
"offset": 24,
"length": 4
}
}
],
"related": []
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
#[test]
fn multiline_highlight_adjacent() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("oops!")]
#[diagnostic(code(oops::my::bad), help("try doing it better next time?"))]
struct MyBad {
#[source_code]
src: NamedSource,
#[label = "these two lines"]
highlight: SourceSpan,
}
let src = "source\n text\n here".to_string();
let err = MyBad {
src: NamedSource::new("bad_file.rs", src),
highlight: (9, 11).into(),
};
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file.rs",
"labels": [
{
"label": "these two lines",
"span": {
"offset": 9,
"length": 11
}
}
],
"related": []
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
#[test]
fn multiline_highlight_flyby() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("oops!")]
#[diagnostic(code(oops::my::bad), help("try doing it better next time?"))]
struct MyBad {
#[source_code]
src: NamedSource,
#[label = "block 1"]
highlight1: SourceSpan,
#[label = "block 2"]
highlight2: SourceSpan,
}
let src = r#"line1
line2
line3
line4
line5
"#
.to_string();
let len = src.len();
let err = MyBad {
src: NamedSource::new("bad_file.rs", src),
highlight1: (0, len).into(),
highlight2: (10, 9).into(),
};
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file.rs",
"labels": [
{
"label": "block 1",
"span": {
"offset": 0,
"length": 50
}
},
{
"label": "block 2",
"span": {
"offset": 10,
"length": 9
}
}
],
"related": []
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
#[test]
fn multiline_highlight_no_label() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("wtf?!\nit broke :(")]
#[diagnostic(code(oops::my::bad), help("try doing it better next time?"))]
struct MyBad {
#[source]
source: Inner,
#[source_code]
src: NamedSource,
#[label = "block 1"]
highlight1: SourceSpan,
#[label]
highlight2: SourceSpan,
}
#[derive(Debug, Error)]
#[error("something went wrong\n\nHere's a more detailed explanation of everything that actually went wrong because it's actually important.\n")]
struct Inner(#[source] InnerInner);
#[derive(Debug, Error)]
#[error("very much went wrong")]
struct InnerInner;
let src = r#"line1
line2
line3
line4
line5
"#
.to_string();
let len = src.len();
let err = MyBad {
source: Inner(InnerInner),
src: NamedSource::new("bad_file.rs", src),
highlight1: (0, len).into(),
highlight2: (10, 9).into(),
};
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "wtf?!\nit broke :(",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file.rs",
"labels": [
{
"label": "block 1",
"span": {
"offset": 0,
"length": 50
}
},
{
"span": {
"offset": 10,
"length": 9
}
}
],
"related": []
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
#[test]
fn multiple_multiline_highlights_adjacent() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("oops!")]
#[diagnostic(code(oops::my::bad), help("try doing it better next time?"))]
struct MyBad {
#[source_code]
src: NamedSource,
#[label = "this bit here"]
highlight1: SourceSpan,
#[label = "also this bit"]
highlight2: SourceSpan,
}
let src = "source\n text\n here\nmore here".to_string();
let err = MyBad {
src: NamedSource::new("bad_file.rs", src),
highlight1: (0, 10).into(),
highlight2: (20, 6).into(),
};
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file.rs",
"labels": [
{
"label": "this bit here",
"span": {
"offset": 0,
"length": 10
}
},
{
"label": "also this bit",
"span": {
"offset": 20,
"length": 6
}
}
],
"related": []
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
#[test]
fn multiple_multiline_highlights_overlapping_lines() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("oops!")]
#[diagnostic(code(oops::my::bad), help("try doing it better next time?"))]
struct MyBad {
#[source_code]
src: NamedSource,
#[label = "this bit here"]
highlight1: SourceSpan,
#[label = "also this bit"]
highlight2: SourceSpan,
}
let src = "source\n text\n here".to_string();
let err = MyBad {
src: NamedSource::new("bad_file.rs", src),
highlight1: (0, 8).into(),
highlight2: (9, 10).into(),
};
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file.rs",
"labels": [
{
"label": "this bit here",
"span": {
"offset": 0,
"length": 8
}
},
{
"label": "also this bit",
"span": {
"offset": 9,
"length": 10
}
}
],
"related": []
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
#[test]
fn multiple_multiline_highlights_overlapping_offsets() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("oops!")]
#[diagnostic(code(oops::my::bad), help("try doing it better next time?"))]
struct MyBad {
#[source_code]
src: NamedSource,
#[label = "this bit here"]
highlight1: SourceSpan,
#[label = "also this bit"]
highlight2: SourceSpan,
}
let src = "source\n text\n here".to_string();
let err = MyBad {
src: NamedSource::new("bad_file.rs", src),
highlight1: (0, 8).into(),
highlight2: (10, 10).into(),
};
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file.rs",
"labels": [
{
"label": "this bit here",
"span": {
"offset": 0,
"length": 8
}
},
{
"label": "also this bit",
"span": {
"offset": 10,
"length": 10
}
}
],
"related": []
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
#[test]
fn url() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("oops!")]
#[diagnostic(help("try doing it better next time?"), url("https://example.com"))]
struct MyBad;
let err = MyBad;
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "oops!",
"severity": "error",
"url": "https://example.com",
"help": "try doing it better next time?",
"labels": [],
"related": []
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
#[test]
fn related() -> Result<(), MietteError> {
#[derive(Debug, Diagnostic, Error)]
#[error("oops!")]
#[diagnostic(code(oops::my::bad), help("try doing it better next time?"))]
struct MyBad {
#[source_code]
src: NamedSource,
#[label("this bit here")]
highlight: SourceSpan,
#[related]
related: Vec<MyBad>,
}
let src = "source\n text\n here".to_string();
let err = MyBad {
src: NamedSource::new("bad_file.rs", src.clone()),
highlight: (9, 4).into(),
related: vec![
MyBad {
src: NamedSource::new("bad_file2.rs", src.clone()),
highlight: (0, 6).into(),
related: vec![],
},
MyBad {
src: NamedSource::new("bad_file3.rs", src),
highlight: (0, 6).into(),
related: vec![],
},
],
};
let out = fmt_report(err.into());
println!("Error: {}", out);
let expected: String = r#"
{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file.rs",
"labels": [
{
"label": "this bit here",
"span": {
"offset": 9,
"length": 4
}
}
],
"related": [{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file2.rs",
"labels": [
{
"label": "this bit here",
"span": {
"offset": 0,
"length": 6
}
}
],
"related": []
},{
"message": "oops!",
"code": "oops::my::bad",
"severity": "error",
"help": "try doing it better next time?",
"filename": "bad_file3.rs",
"labels": [
{
"label": "this bit here",
"span": {
"offset": 0,
"length": 6
}
}
],
"related": []
}]
}"#
.lines()
.into_iter()
.map(|s| s.trim_matches(|c| c == ' ' || c == '\n'))
.collect();
assert_eq!(expected, out);
Ok(())
}
}
| {
|
executor_fairness_test.rs | #![no_std]
#![no_main]
#![feature(min_type_alias_impl_trait)]
#![feature(impl_trait_in_bindings)]
#![feature(type_alias_impl_trait)]
#![allow(incomplete_features)]
#[path = "../example_common.rs"]
mod example_common;
use example_common::*;
use core::task::Poll;
use defmt::panic;
use embassy::executor::Spawner;
use embassy::time::{Duration, Instant, Timer};
use embassy_nrf::{interrupt, Peripherals};
#[embassy::task]
async fn | () {
loop {
info!("DING DONG");
Timer::after(Duration::from_ticks(16000)).await;
}
}
#[embassy::task]
async fn run2() {
loop {
Timer::at(Instant::from_ticks(0)).await;
}
}
#[embassy::task]
async fn run3() {
futures::future::poll_fn(|cx| {
cx.waker().wake_by_ref();
Poll::<()>::Pending
})
.await;
}
#[embassy::main]
async fn main(spawner: Spawner, _p: Peripherals) {
unwrap!(spawner.spawn(run1()));
unwrap!(spawner.spawn(run2()));
unwrap!(spawner.spawn(run3()));
}
| run1 |
user.service.ts | // /* eslint-disable prettier/prettier */
// import { Injectable } from '@nestjs/common';
// import { InjectRepository } from '@nestjs/typeorm';
// import { Repository } from 'typeorm';
// import { addUserDto } from './dto/add-user.dto';
// import { User } from './entities/user.entity';
// @Injectable()
// export class UserService {
// constructor(
// @InjectRepository(User)
// private readonly userRepository: Repository<User>,
// ) {}
// async read(id: number): Promise<User> {
// const user = await this.findById(id);
// delete user.password;
// return user;
// }
// async findById(id: number) {
// return await User.findOne(id);
// }
// async findByEmail(email: string) {
// return await this.userRepository.findOne({ where: { email:email } });
// }
// async findAll(): Promise<User[]> {
// return await this.userRepository.find();
// }
// async create(addUserDto: addUserDto) { | // userEntity.nom = addUserDto.nom;
// userEntity.email = addUserDto.email;
// userEntity.telephone = addUserDto.telephone;
// userEntity.role = addUserDto.role;
// userEntity.password = addUserDto.password;
// const user = this.userRepository.create(userEntity);
// await this.userRepository.save(user);
// delete user.password;
// return user;
// }
// async update(id: number, data: Partial<addUserDto>) {
// await this.userRepository.update({ id }, data);
// const user = await this.userRepository.findOne({ where: { id } });
// delete user.password;
// return user;
// }
// async delete(id: number) {
// const user = await this.userRepository.findOne({ where: { id } });
// await this.userRepository.delete({ id });
// return user;
// }
// } | // const userEntity = new User();
// userEntity.prenom = addUserDto.prenom; |
test_toposort.py | import unittest
from libconda.toposort import toposort, pop_key
class TopoSortTests(unittest.TestCase):
def test_pop_key(self):
key = pop_key({'a':{'b', 'c'}, 'b':{'c'}})
self.assertEqual(key, 'b')
key = pop_key({'a':{'b'}, 'b':{'c', 'a'}})
self.assertEqual(key, 'a')
key = pop_key({'a':{'b'}, 'b':{'a'}})
self.assertEqual(key, 'a')
def test_simple(self):
data = {'a':'bc', 'b':'c'}
results = toposort(data, safe=True)
self.assertEqual(results, ['c', 'b', 'a'])
results = toposort(data, safe=False)
self.assertEqual(results, ['c', 'b', 'a'])
def | (self):
data = {'a':'b', 'b':'a'}
with self.assertRaises(ValueError):
toposort(data, False)
results = toposort(data)
# Results do not have an guaranteed order
self.assertEqual(set(results), {'b', 'a'})
def test_cycle_best_effort(self):
data = {'a':'bc', 'b':'c', '1':'2', '2':'1'}
results = toposort(data)
self.assertEqual(results[:3], ['c', 'b', 'a'])
# Cycles come last
# Results do not have an guaranteed order
self.assertEqual(set(results[3:]), {'1', '2'})
def test_python_is_prioritized(self):
"""
This test checks a special invariant related to 'python' specifically.
Python is part of a cycle (pip <--> python), which can cause it to be
installed *after* packages that need python (possibly in
post-install.sh).
A special case in toposort() breaks the cycle, to ensure that python
isn't installed too late. Here, we verify that it works.
"""
# This is the actual dependency graph for python (as of the time of this writing, anyway)
data = {'python' : ['pip', 'openssl', 'readline', 'sqlite', 'tk', 'xz', 'zlib'],
'pip': ['python', 'setuptools', 'wheel'],
'setuptools' : ['python'],
'wheel' : ['python'],
'openssl' : [],
'readline' : [],
'sqlite' : [],
'tk' : [],
'xz' : [],
'zlib' : []}
# Here are some extra pure-python libs, just for good measure.
data.update({'psutil' : ['python'],
'greenlet' : ['python'],
'futures' : ['python'],
'six' : ['python']})
results = toposort(data)
# Python always comes before things that need it!
self.assertLess(results.index('python'), results.index('setuptools'))
self.assertLess(results.index('python'), results.index('wheel'))
self.assertLess(results.index('python'), results.index('pip'))
self.assertLess(results.index('python'), results.index('psutil'))
self.assertLess(results.index('python'), results.index('greenlet'))
self.assertLess(results.index('python'), results.index('futures'))
self.assertLess(results.index('python'), results.index('six'))
if __name__ == '__main__':
unittest.main()
| test_cycle |
decoders.py | import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
from convlab2.policy.larl.multiwoz.latent_dialog.enc2dec.base_modules import BaseRNN
from convlab2.policy.larl.multiwoz.latent_dialog.utils import cast_type, LONG, FLOAT
from convlab2.policy.larl.multiwoz.latent_dialog.corpora import DECODING_MASKED_TOKENS, EOS
TEACH_FORCE = 'teacher_forcing'
TEACH_GEN = 'teacher_gen'
GEN = 'gen'
GEN_VALID = 'gen_valid'
class Attention(nn.Module):
def __init__(self, dec_cell_size, ctx_cell_size, attn_mode, project):
super(Attention, self).__init__()
self.dec_cell_size = dec_cell_size
self.ctx_cell_size = ctx_cell_size
self.attn_mode = attn_mode
if project:
self.linear_out = nn.Linear(
dec_cell_size+ctx_cell_size, dec_cell_size)
else:
self.linear_out = None
if attn_mode == 'general':
self.dec_w = nn.Linear(dec_cell_size, ctx_cell_size)
elif attn_mode == 'cat':
self.dec_w = nn.Linear(dec_cell_size, dec_cell_size)
self.attn_w = nn.Linear(ctx_cell_size, dec_cell_size)
self.query_w = nn.Linear(dec_cell_size, 1)
def forward(self, output, context):
# output: (batch_size, output_seq_len, dec_cell_size)
# context: (batch_size, max_ctx_len, ctx_cell_size)
batch_size = output.size(0)
max_ctx_len = context.size(1)
if self.attn_mode == 'dot':
# (batch_size, output_seq_len, max_ctx_len)
attn = th.bmm(output, context.transpose(1, 2))
elif self.attn_mode == 'general':
# (batch_size, output_seq_len, ctx_cell_size)
mapped_output = self.dec_w(output)
# (batch_size, output_seq_len, max_ctx_len)
attn = th.bmm(mapped_output, context.transpose(1, 2))
elif self.attn_mode == 'cat':
# (batch_size, output_seq_len, dec_cell_size)
mapped_output = self.dec_w(output)
# (batch_size, max_ctx_len, dec_cell_size)
mapped_attn = self.attn_w(context)
# (batch_size, output_seq_len, max_ctx_len, dec_cell_size)
tiled_output = mapped_output.unsqueeze(
2).repeat(1, 1, max_ctx_len, 1)
# (batch_size, 1, max_ctx_len, dec_cell_size)
tiled_attn = mapped_attn.unsqueeze(1)
# (batch_size, output_seq_len, max_ctx_len, dec_cell_size)
fc1 = F.tanh(tiled_output+tiled_attn)
# (batch_size, otuput_seq_len, max_ctx_len)
attn = self.query_w(fc1).squeeze(-1)
else:
raise ValueError('Unknown attention mode')
# TODO mask
# if self.mask is not None:
# (batch_size, output_seq_len, max_ctx_len)
attn = F.softmax(attn.view(-1, max_ctx_len),
dim=1).view(batch_size, -1, max_ctx_len)
# (batch_size, output_seq_len, ctx_cell_size)
mix = th.bmm(attn, context)
# (batch_size, output_seq_len, dec_cell_size+ctx_cell_size)
combined = th.cat((mix, output), dim=2)
if self.linear_out is None:
return combined, attn
else:
output = F.tanh(
self.linear_out(combined.view(-1, self.dec_cell_size+self.ctx_cell_size))).view(
batch_size, -1, self.dec_cell_size) # (batch_size, output_seq_len, dec_cell_size)
return output, attn
class DecoderRNN(BaseRNN):
def __init__(self, input_dropout_p, rnn_cell, input_size, hidden_size, num_layers, output_dropout_p,
bidirectional, vocab_size, use_attn, ctx_cell_size, attn_mode, sys_id, eos_id, use_gpu,
max_dec_len, embedding=None):
super(DecoderRNN, self).__init__(input_dropout_p=input_dropout_p,
rnn_cell=rnn_cell,
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
output_dropout_p=output_dropout_p,
bidirectional=bidirectional)
# TODO embedding is None or not
if embedding is None:
self.embedding = nn.Embedding(vocab_size, input_size)
else:
self.embedding = embedding
# share parameters between encoder and decoder
# self.rnn = ctx_encoder.rnn
# self.FC = nn.Linear(input_size, utt_encoder.output_size)
self.use_attn = use_attn
if self.use_attn:
self.attention = Attention(dec_cell_size=hidden_size,
ctx_cell_size=ctx_cell_size,
attn_mode=attn_mode,
project=True)
self.dec_cell_size = hidden_size
self.output_size = vocab_size
self.project = nn.Linear(self.dec_cell_size, self.output_size)
self.log_softmax = F.log_softmax
self.sys_id = sys_id
self.eos_id = eos_id
self.use_gpu = use_gpu
self.max_dec_len = max_dec_len
def forward(self, batch_size, dec_inputs, dec_init_state, attn_context, mode, gen_type, beam_size, goal_hid=None):
# dec_inputs: (batch_size, response_size-1)
# attn_context: (batch_size, max_ctx_len, ctx_cell_size)
# goal_hid: (batch_size, goal_nhid)
ret_dict = dict()
if self.use_attn:
ret_dict[DecoderRNN.KEY_ATTN_SCORE] = list()
if mode == GEN:
dec_inputs = None
if gen_type != 'beam':
beam_size = 1
if dec_inputs is not None:
decoder_input = dec_inputs
else:
# prepare the BOS inputs
with th.no_grad():
bos_var = Variable(th.LongTensor([self.sys_id]))
bos_var = cast_type(bos_var, LONG, self.use_gpu)
decoder_input = bos_var.expand(
batch_size*beam_size, 1) # (batch_size, 1)
if mode == GEN and gen_type == 'beam':
# TODO if beam search, repeat the initial states of the RNN
pass
else:
decoder_hidden_state = dec_init_state
# list of logprob | max_dec_len*(batch_size, 1, vocab_size)
prob_outputs = []
symbol_outputs = [] # list of word ids | max_dec_len*(batch_size, 1)
# back_pointers = []
# lengths = blabla...
def decode(step, cum_sum, step_output, step_attn):
prob_outputs.append(step_output)
step_output_slice = step_output.squeeze(
1) # (batch_size, vocab_size)
if self.use_attn:
ret_dict[DecoderRNN.KEY_ATTN_SCORE].append(step_attn)
if gen_type == 'greedy':
_, symbols = step_output_slice.topk(1) # (batch_size, 1)
elif gen_type == 'sample':
# TODO FIXME
# symbols = self.gumbel_max(step_output_slice)
pass
elif gen_type == 'beam':
# TODO
pass
else:
raise ValueError('Unsupported decoding mode')
symbol_outputs.append(symbols)
return cum_sum, symbols
if mode == TEACH_FORCE:
prob_outputs, decoder_hidden_state, attn = self.forward_step(
input_var=decoder_input, hidden_state=decoder_hidden_state, encoder_outputs=attn_context, goal_hid=goal_hid)
else:
# do free running here
cum_sum = None
for step in range(self.max_dec_len):
# Input:
# decoder_input: (batch_size, 1)
# decoder_hidden_state: tuple: (h, c)
# attn_context: (batch_size, max_ctx_len, ctx_cell_size)
# goal_hid: (batch_size, goal_nhid)
# Output:
# decoder_output: (batch_size, 1, vocab_size)
# decoder_hidden_state: tuple: (h, c)
# step_attn: (batch_size, 1, max_ctx_len)
decoder_output, decoder_hidden_state, step_attn = self.forward_step(
decoder_input, decoder_hidden_state, attn_context, goal_hid=goal_hid)
cum_sum, symbols = decode(
step, cum_sum, decoder_output, step_attn)
decoder_input = symbols
# (batch_size, max_dec_len, vocab_size)
prob_outputs = th.cat(prob_outputs, dim=1)
# back tracking to recover the 1-best in beam search
# if gen_type == 'beam':
ret_dict[DecoderRNN.KEY_SEQUENCE] = symbol_outputs
# prob_outputs: (batch_size, max_dec_len, vocab_size)
# decoder_hidden_state: tuple: (h, c)
# ret_dict[DecoderRNN.KEY_ATTN_SCORE]: max_dec_len*(batch_size, 1, max_ctx_len)
# ret_dict[DecoderRNN.KEY_SEQUENCE]: max_dec_len*(batch_size, 1)
return prob_outputs, decoder_hidden_state, ret_dict
def | (self, input_var, hidden_state, encoder_outputs, goal_hid):
# input_var: (batch_size, response_size-1 i.e. output_seq_len)
# hidden_state: tuple: (h, c)
# encoder_outputs: (batch_size, max_ctx_len, ctx_cell_size)
# goal_hid: (batch_size, goal_nhid)
batch_size, output_seq_len = input_var.size()
# (batch_size, output_seq_len, embedding_dim)
embedded = self.embedding(input_var)
# add goals
if goal_hid is not None:
# (batch_size, 1, goal_nhid)
goal_hid = goal_hid.view(goal_hid.size(0), 1, goal_hid.size(1))
# (batch_size, output_seq_len, goal_nhid)
goal_rep = goal_hid.repeat(1, output_seq_len, 1)
# (batch_size, output_seq_len, embedding_dim+goal_nhid)
embedded = th.cat([embedded, goal_rep], dim=2)
embedded = self.input_dropout(embedded)
# ############
# embedded = self.FC(embedded.view(-1, embedded.size(-1))).view(batch_size, output_seq_len, -1)
# output: (batch_size, output_seq_len, dec_cell_size)
# hidden: tuple: (h, c)
output, hidden_s = self.rnn(embedded, hidden_state)
attn = None
if self.use_attn:
# output: (batch_size, output_seq_len, dec_cell_size)
# encoder_outputs: (batch_size, max_ctx_len, ctx_cell_size)
# attn: (batch_size, output_seq_len, max_ctx_len)
output, attn = self.attention(output, encoder_outputs)
# (batch_size*output_seq_len, vocab_size)
logits = self.project(output.contiguous().view(-1, self.dec_cell_size))
prediction = self.log_softmax(logits, dim=logits.dim(
)-1).view(batch_size, output_seq_len, -1) # (batch_size, output_seq_len, vocab_size)
return prediction, hidden_s, attn
# special for rl
def _step(self, input_var, hidden_state, encoder_outputs, goal_hid):
# input_var: (1, 1)
# hidden_state: tuple: (h, c)
# encoder_outputs: (1, max_dlg_len, dlg_cell_size)
# goal_hid: (1, goal_nhid)
batch_size, output_seq_len = input_var.size()
embedded = self.embedding(input_var) # (1, 1, embedding_dim)
if goal_hid is not None:
goal_hid = goal_hid.view(goal_hid.size(
0), 1, goal_hid.size(1)) # (1, 1, goal_nhid)
goal_rep = goal_hid.repeat(
1, output_seq_len, 1) # (1, 1, goal_nhid)
# (1, 1, embedding_dim+goal_nhid)
embedded = th.cat([embedded, goal_rep], dim=2)
embedded = self.input_dropout(embedded)
# ############
# embedded = self.FC(embedded.view(-1, embedded.size(-1))).view(batch_size, output_seq_len, -1)
# output: (1, 1, dec_cell_size)
# hidden: tuple: (h, c)
output, hidden_s = self.rnn(embedded, hidden_state)
attn = None
if self.use_attn:
# output: (1, 1, dec_cell_size)
# encoder_outputs: (1, max_dlg_len, dlg_cell_size)
# attn: (1, 1, max_dlg_len)
output, attn = self.attention(output, encoder_outputs)
# (1*1, vocab_size)
logits = self.project(output.view(-1, self.dec_cell_size))
prediction = logits.view(
batch_size, output_seq_len, -1) # (1, 1, vocab_size)
# prediction = self.log_softmax(logits, dim=logits.dim()-1).view(batch_size, output_seq_len, -1) # (batch_size, output_seq_len, vocab_size)
return prediction, hidden_s
# special for rl
def write(self, input_var, hidden_state, encoder_outputs, max_words, vocab, stop_tokens, goal_hid=None, mask=True,
decoding_masked_tokens=DECODING_MASKED_TOKENS):
# input_var: (1, 1)
# hidden_state: tuple: (h, c)
# encoder_outputs: max_dlg_len*(1, 1, dlg_cell_size)
# goal_hid: (1, goal_nhid)
logprob_outputs = [] # list of logprob | max_dec_len*(1, )
symbol_outputs = [] # list of word ids | max_dec_len*(1, )
decoder_input = input_var
decoder_hidden_state = hidden_state
if type(encoder_outputs) is list:
# (1, max_dlg_len, dlg_cell_size)
encoder_outputs = th.cat(encoder_outputs, 1)
# print('encoder_outputs.size() = {}'.format(encoder_outputs.size()))
if mask:
special_token_mask = Variable(th.FloatTensor(
[-999. if token in decoding_masked_tokens else 0. for token in vocab]))
special_token_mask = cast_type(
special_token_mask, FLOAT, self.use_gpu) # (vocab_size, )
def _sample(dec_output, num_i):
# dec_output: (1, 1, vocab_size), need to softmax and log_softmax
dec_output = dec_output.view(-1) # (vocab_size, )
# TODO temperature
prob = F.softmax(dec_output/0.6, dim=0) # (vocab_size, )
logprob = F.log_softmax(dec_output, dim=0) # (vocab_size, )
symbol = prob.multinomial(num_samples=1).detach() # (1, )
# _, symbol = prob.topk(1) # (1, )
_, tmp_symbol = prob.topk(1) # (1, )
# print('multinomial symbol = {}, prob = {}'.format(symbol, prob[symbol.item()]))
# print('topk symbol = {}, prob = {}'.format(tmp_symbol, prob[tmp_symbol.item()]))
logprob = logprob.gather(0, symbol) # (1, )
return logprob, symbol
for i in range(max_words):
decoder_output, decoder_hidden_state = self._step(
decoder_input, decoder_hidden_state, encoder_outputs, goal_hid)
# disable special tokens from being generated in a normal turn
if mask:
decoder_output += special_token_mask.expand(1, 1, -1)
logprob, symbol = _sample(decoder_output, i)
logprob_outputs.append(logprob)
symbol_outputs.append(symbol)
decoder_input = symbol.view(1, -1)
if vocab[symbol.item()] in stop_tokens:
break
assert len(logprob_outputs) == len(symbol_outputs)
# logprob_list = [t.item() for t in logprob_outputs]
logprob_list = logprob_outputs
symbol_list = [t.item() for t in symbol_outputs]
return logprob_list, symbol_list
# For MultiWoz RL
def forward_rl(self, batch_size, dec_init_state, attn_context, vocab, max_words, goal_hid=None, mask=True, temp=0.1):
# prepare the BOS inputs
with th.no_grad():
bos_var = Variable(th.LongTensor([self.sys_id]))
bos_var = cast_type(bos_var, LONG, self.use_gpu)
decoder_input = bos_var.expand(batch_size, 1) # (1, 1)
decoder_hidden_state = dec_init_state # tuple: (h, c)
encoder_outputs = attn_context # (1, ctx_len, ctx_cell_size)
logprob_outputs = [] # list of logprob | max_dec_len*(1, )
symbol_outputs = [] # list of word ids | max_dec_len*(1, )
if mask:
special_token_mask = Variable(th.FloatTensor(
[-999. if token in DECODING_MASKED_TOKENS else 0. for token in vocab]))
special_token_mask = cast_type(
special_token_mask, FLOAT, self.use_gpu) # (vocab_size, )
def _sample(dec_output, num_i):
# dec_output: (1, 1, vocab_size), need to softmax and log_softmax
# (batch_size, vocab_size, )
dec_output = dec_output.view(batch_size, -1)
# (batch_size, vocab_size, )
prob = F.softmax(dec_output/temp, dim=1)
# (batch_size, vocab_size, )
logprob = F.log_softmax(dec_output, dim=1)
symbol = prob.multinomial(
num_samples=1).detach() # (batch_size, 1)
# _, symbol = prob.topk(1) # (1, )
_, tmp_symbol = prob.topk(1) # (1, )
# print('multinomial symbol = {}, prob = {}'.format(symbol, prob[symbol.item()]))
# print('topk symbol = {}, prob = {}'.format(tmp_symbol, prob[tmp_symbol.item()]))
logprob = logprob.gather(1, symbol) # (1, )
return logprob, symbol
stopped_samples = set()
for i in range(max_words):
decoder_output, decoder_hidden_state = self._step(
decoder_input, decoder_hidden_state, encoder_outputs, goal_hid)
# disable special tokens from being generated in a normal turn
if mask:
decoder_output += special_token_mask.expand(1, 1, -1)
logprob, symbol = _sample(decoder_output, i)
logprob_outputs.append(logprob)
symbol_outputs.append(symbol)
decoder_input = symbol.view(batch_size, -1)
for b_id in range(batch_size):
if vocab[symbol[b_id].item()] == EOS:
stopped_samples.add(b_id)
if len(stopped_samples) == batch_size:
break
assert len(logprob_outputs) == len(symbol_outputs)
symbol_outputs = th.cat(
symbol_outputs, dim=1).cpu().data.numpy().tolist()
logprob_outputs = th.cat(logprob_outputs, dim=1)
logprob_list = []
symbol_list = []
for b_id in range(batch_size):
b_logprob = []
b_symbol = []
for t_id in range(logprob_outputs.shape[1]):
symbol = symbol_outputs[b_id][t_id]
if vocab[symbol] == EOS and t_id != 0:
break
b_symbol.append(symbol_outputs[b_id][t_id])
b_logprob.append(logprob_outputs[b_id][t_id])
logprob_list.append(b_logprob)
symbol_list.append(b_symbol)
# TODO backward compatible, if batch_size == 1, we remove the nested structure
if batch_size == 1:
logprob_list = logprob_list[0]
symbol_list = symbol_list[0]
return logprob_list, symbol_list
| forward_step |
inject.go | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package inject
import (
"bufio"
"bytes"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"net"
"os"
"path"
"reflect"
"strconv"
"strings"
"text/template"
"github.com/ghodss/yaml"
"github.com/gogo/protobuf/jsonpb"
"github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/types"
multierror "github.com/hashicorp/go-multierror"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/batch/v2alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
yamlDecoder "k8s.io/apimachinery/pkg/util/yaml"
"istio.io/api/annotation"
"istio.io/api/label"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/config/validation"
"istio.io/istio/pkg/util/gogoprotomarshal"
"istio.io/pkg/log"
)
type annotationValidationFunc func(value string) error
// per-sidecar policy and status
var (
alwaysValidFunc = func(value string) error {
return nil
}
AnnotationValidation = map[string]annotationValidationFunc{
annotation.SidecarInject.Name: alwaysValidFunc,
annotation.SidecarStatus.Name: alwaysValidFunc,
annotation.SidecarRewriteAppHTTPProbers.Name: alwaysValidFunc,
annotation.SidecarControlPlaneAuthPolicy.Name: alwaysValidFunc,
annotation.SidecarDiscoveryAddress.Name: alwaysValidFunc,
annotation.SidecarProxyImage.Name: alwaysValidFunc,
annotation.SidecarProxyCPU.Name: alwaysValidFunc,
annotation.SidecarProxyMemory.Name: alwaysValidFunc,
annotation.SidecarInterceptionMode.Name: validateInterceptionMode,
annotation.SidecarBootstrapOverride.Name: alwaysValidFunc,
annotation.SidecarStatsInclusionPrefixes.Name: alwaysValidFunc,
annotation.SidecarStatsInclusionSuffixes.Name: alwaysValidFunc,
annotation.SidecarStatsInclusionRegexps.Name: alwaysValidFunc,
annotation.SidecarUserVolume.Name: alwaysValidFunc,
annotation.SidecarUserVolumeMount.Name: alwaysValidFunc,
annotation.SidecarEnableCoreDump.Name: validateBool,
annotation.SidecarStatusPort.Name: validateStatusPort,
annotation.SidecarStatusReadinessInitialDelaySeconds.Name: validateUInt32,
annotation.SidecarStatusReadinessPeriodSeconds.Name: validateUInt32,
annotation.SidecarStatusReadinessFailureThreshold.Name: validateUInt32,
annotation.SidecarTrafficIncludeOutboundIPRanges.Name: ValidateIncludeIPRanges,
annotation.SidecarTrafficExcludeOutboundIPRanges.Name: ValidateExcludeIPRanges,
annotation.SidecarTrafficIncludeInboundPorts.Name: ValidateIncludeInboundPorts,
annotation.SidecarTrafficExcludeInboundPorts.Name: ValidateExcludeInboundPorts,
annotation.SidecarTrafficExcludeOutboundPorts.Name: ValidateExcludeOutboundPorts,
annotation.SidecarTrafficKubevirtInterfaces.Name: alwaysValidFunc,
annotation.PrometheusMergeMetrics.Name: validateBool,
annotation.ProxyConfig.Name: validateProxyConfig,
"k8s.v1.cni.cncf.io/networks": alwaysValidFunc,
}
)
func validateProxyConfig(value string) error {
config := mesh.DefaultProxyConfig()
if err := gogoprotomarshal.ApplyYAML(value, &config); err != nil {
return fmt.Errorf("failed to convert to apply proxy config: %v", err)
}
return validation.ValidateProxyConfig(&config)
}
func validateAnnotations(annotations map[string]string) (err error) {
for name, value := range annotations {
if v, ok := AnnotationValidation[name]; ok {
if e := v(value); e != nil {
err = multierror.Append(err, fmt.Errorf("invalid value '%s' for annotation '%s': %v", value, name, e))
}
} else if strings.Contains(name, "istio") {
log.Warnf("Potentially misspelled annotation '%s' with value '%s' encountered", name, value)
}
}
return
}
// InjectionPolicy determines the policy for injecting the
// sidecar proxy into the watched namespace(s).
type InjectionPolicy string
const (
// InjectionPolicyDisabled specifies that the sidecar injector
// will not inject the sidecar into resources by default for the
// namespace(s) being watched. Resources can enable injection
// using the "sidecar.istio.io/inject" annotation with value of
// true.
InjectionPolicyDisabled InjectionPolicy = "disabled"
// InjectionPolicyEnabled specifies that the sidecar injector will
// inject the sidecar into resources by default for the
// namespace(s) being watched. Resources can disable injection
// using the "sidecar.istio.io/inject" annotation with value of
// false.
InjectionPolicyEnabled InjectionPolicy = "enabled"
)
const (
// ProxyContainerName is used by e2e integration tests for fetching logs
ProxyContainerName = "istio-proxy"
// ValidationContainerName is the name of the init container that validates
// if CNI has made the necessary changes to iptables
ValidationContainerName = "istio-validation"
)
// SidecarInjectionSpec collects all container types and volumes for
// sidecar mesh injection
type SidecarInjectionSpec struct {
// RewriteHTTPProbe indicates whether Kubernetes HTTP prober in the PodSpec
// will be rewritten to be redirected by pilot agent.
PodRedirectAnnot map[string]string `yaml:"podRedirectAnnot"`
RewriteAppHTTPProbe bool `yaml:"rewriteAppHTTPProbe"`
HoldApplicationUntilProxyStarts bool `yaml:"holdApplicationUntilProxyStarts"`
InitContainers []corev1.Container `yaml:"initContainers"`
Containers []corev1.Container `yaml:"containers"`
Volumes []corev1.Volume `yaml:"volumes"`
DNSConfig *corev1.PodDNSConfig `yaml:"dnsConfig"`
ImagePullSecrets []corev1.LocalObjectReference `yaml:"imagePullSecrets"`
}
// SidecarTemplateData is the data object to which the templated
// version of `SidecarInjectionSpec` is applied.
type SidecarTemplateData struct {
TypeMeta *metav1.TypeMeta
DeploymentMeta *metav1.ObjectMeta
ObjectMeta *metav1.ObjectMeta
Spec *corev1.PodSpec
ProxyConfig *meshconfig.ProxyConfig
MeshConfig *meshconfig.MeshConfig
Values map[string]interface{}
}
// Config specifies the sidecar injection configuration This includes
// the sidecar template and cluster-side injection policy. It is used
// by kube-inject, sidecar injector, and http endpoint.
type Config struct {
Policy InjectionPolicy `json:"policy"`
// Template is the templated version of `SidecarInjectionSpec` prior to
// expansion over the `SidecarTemplateData`.
Template string `json:"template"`
// NeverInjectSelector: Refuses the injection on pods whose labels match this selector.
// It's an array of label selectors, that will be OR'ed, meaning we will iterate
// over it and stop at the first match
// Takes precedence over AlwaysInjectSelector.
NeverInjectSelector []metav1.LabelSelector `json:"neverInjectSelector"`
// AlwaysInjectSelector: Forces the injection on pods whose labels match this selector.
// It's an array of label selectors, that will be OR'ed, meaning we will iterate
// over it and stop at the first match
AlwaysInjectSelector []metav1.LabelSelector `json:"alwaysInjectSelector"`
// InjectedAnnotations are additional annotations that will be added to the pod spec after injection
// This is primarily to support PSP annotations.
InjectedAnnotations map[string]string `json:"injectedAnnotations"`
}
func validateCIDRList(cidrs string) error {
if len(cidrs) > 0 {
for _, cidr := range strings.Split(cidrs, ",") {
if _, _, err := net.ParseCIDR(cidr); err != nil {
return fmt.Errorf("failed parsing cidr '%s': %v", cidr, err)
}
}
}
return nil
}
func splitPorts(portsString string) []string {
return strings.Split(portsString, ",")
}
func parsePort(portStr string) (int, error) {
port, err := strconv.ParseUint(strings.TrimSpace(portStr), 10, 16)
if err != nil {
return 0, fmt.Errorf("failed parsing port '%d': %v", port, err)
}
return int(port), nil
}
func parsePorts(portsString string) ([]int, error) {
portsString = strings.TrimSpace(portsString)
ports := make([]int, 0)
if len(portsString) > 0 {
for _, portStr := range splitPorts(portsString) {
port, err := parsePort(portStr)
if err != nil {
return nil, fmt.Errorf("failed parsing port '%d': %v", port, err)
}
ports = append(ports, port)
}
}
return ports, nil
}
func validatePortList(parameterName, ports string) error {
if _, err := parsePorts(ports); err != nil {
return fmt.Errorf("%s invalid: %v", parameterName, err)
}
return nil
}
// validateInterceptionMode validates the interceptionMode annotation
func validateInterceptionMode(mode string) error {
switch mode {
case meshconfig.ProxyConfig_REDIRECT.String():
case meshconfig.ProxyConfig_TPROXY.String():
case string(model.InterceptionNone): // not a global mesh config - must be enabled for each sidecar
default:
return fmt.Errorf("interceptionMode invalid, use REDIRECT,TPROXY,NONE: %v", mode)
}
return nil
}
// ValidateIncludeIPRanges validates the includeIPRanges parameter
func ValidateIncludeIPRanges(ipRanges string) error {
if ipRanges != "*" {
if e := validateCIDRList(ipRanges); e != nil {
return fmt.Errorf("includeIPRanges invalid: %v", e)
}
}
return nil
}
// ValidateExcludeIPRanges validates the excludeIPRanges parameter
func ValidateExcludeIPRanges(ipRanges string) error {
if e := validateCIDRList(ipRanges); e != nil {
return fmt.Errorf("excludeIPRanges invalid: %v", e)
}
return nil
}
// ValidateIncludeInboundPorts validates the includeInboundPorts parameter
func | (ports string) error {
if ports != "*" {
return validatePortList("includeInboundPorts", ports)
}
return nil
}
// ValidateExcludeInboundPorts validates the excludeInboundPorts parameter
func ValidateExcludeInboundPorts(ports string) error {
return validatePortList("excludeInboundPorts", ports)
}
// ValidateExcludeOutboundPorts validates the excludeOutboundPorts parameter
func ValidateExcludeOutboundPorts(ports string) error {
return validatePortList("excludeOutboundPorts", ports)
}
// validateStatusPort validates the statusPort parameter
func validateStatusPort(port string) error {
if _, e := parsePort(port); e != nil {
return fmt.Errorf("excludeInboundPorts invalid: %v", e)
}
return nil
}
// validateUInt32 validates that the given annotation value is a positive integer.
func validateUInt32(value string) error {
_, err := strconv.ParseUint(value, 10, 32)
return err
}
// validateBool validates that the given annotation value is a boolean.
func validateBool(value string) error {
_, err := strconv.ParseBool(value)
return err
}
func injectRequired(ignored []string, config *Config, podSpec *corev1.PodSpec, metadata *metav1.ObjectMeta) bool { // nolint: lll
// Skip injection when host networking is enabled. The problem is
// that the iptables changes are assumed to be within the pod when,
// in fact, they are changing the routing at the host level. This
// often results in routing failures within a node which can
// affect the network provider within the cluster causing
// additional pod failures.
if podSpec.HostNetwork {
return false
}
// skip special kubernetes system namespaces
for _, namespace := range ignored {
if metadata.Namespace == namespace {
return false
}
}
annos := metadata.GetAnnotations()
if annos == nil {
annos = map[string]string{}
}
var useDefault bool
var inject bool
switch strings.ToLower(annos[annotation.SidecarInject.Name]) {
// http://yaml.org/type/bool.html
case "y", "yes", "true", "on":
inject = true
case "":
useDefault = true
}
// If an annotation is not explicitly given, check the LabelSelectors, starting with NeverInject
if useDefault {
for _, neverSelector := range config.NeverInjectSelector {
selector, err := metav1.LabelSelectorAsSelector(&neverSelector)
if err != nil {
log.Warnf("Invalid selector for NeverInjectSelector: %v (%v)", neverSelector, err)
} else if !selector.Empty() && selector.Matches(labels.Set(metadata.Labels)) {
log.Debugf("Explicitly disabling injection for pod %s/%s due to pod labels matching NeverInjectSelector config map entry.",
metadata.Namespace, potentialPodName(metadata))
inject = false
useDefault = false
break
}
}
}
// If there's no annotation nor a NeverInjectSelector, check the AlwaysInject one
if useDefault {
for _, alwaysSelector := range config.AlwaysInjectSelector {
selector, err := metav1.LabelSelectorAsSelector(&alwaysSelector)
if err != nil {
log.Warnf("Invalid selector for AlwaysInjectSelector: %v (%v)", alwaysSelector, err)
} else if !selector.Empty() && selector.Matches(labels.Set(metadata.Labels)) {
log.Debugf("Explicitly enabling injection for pod %s/%s due to pod labels matching AlwaysInjectSelector config map entry.",
metadata.Namespace, potentialPodName(metadata))
inject = true
useDefault = false
break
}
}
}
var required bool
switch config.Policy {
default: // InjectionPolicyOff
log.Errorf("Illegal value for autoInject:%s, must be one of [%s,%s]. Auto injection disabled!",
config.Policy, InjectionPolicyDisabled, InjectionPolicyEnabled)
required = false
case InjectionPolicyDisabled:
if useDefault {
required = false
} else {
required = inject
}
case InjectionPolicyEnabled:
if useDefault {
required = true
} else {
required = inject
}
}
if log.DebugEnabled() {
// Build a log message for the annotations.
annotationStr := ""
for name := range AnnotationValidation {
value, ok := annos[name]
if !ok {
value = "(unset)"
}
annotationStr += fmt.Sprintf("%s:%s ", name, value)
}
log.Debugf("Sidecar injection policy for %v/%v: namespacePolicy:%v useDefault:%v inject:%v required:%v %s",
metadata.Namespace,
potentialPodName(metadata),
config.Policy,
useDefault,
inject,
required,
annotationStr)
}
return required
}
func formatDuration(in *types.Duration) string {
dur, err := types.DurationFromProto(in)
if err != nil {
return "1s"
}
return dur.String()
}
func isset(m map[string]string, key string) bool {
_, ok := m[key]
return ok
}
func directory(filepath string) string {
dir, _ := path.Split(filepath)
return dir
}
func flippedContains(needle, haystack string) bool {
return strings.Contains(haystack, needle)
}
// InjectionData renders sidecarTemplate with valuesConfig.
func InjectionData(sidecarTemplate, valuesConfig, version string, typeMetadata *metav1.TypeMeta, deploymentMetadata *metav1.ObjectMeta, spec *corev1.PodSpec,
metadata *metav1.ObjectMeta, meshConfig *meshconfig.MeshConfig, path string) (
*SidecarInjectionSpec, string, error) {
// If DNSPolicy is not ClusterFirst, the Envoy sidecar may not able to connect to Istio Pilot.
if spec.DNSPolicy != "" && spec.DNSPolicy != corev1.DNSClusterFirst {
podName := potentialPodName(metadata)
log.Warnf("%q's DNSPolicy is not %q. The Envoy sidecar may not able to connect to Istio Pilot",
metadata.Namespace+"/"+podName, corev1.DNSClusterFirst)
}
if err := validateAnnotations(metadata.GetAnnotations()); err != nil {
log.Errorf("Injection failed due to invalid annotations: %v", err)
return nil, "", err
}
values := map[string]interface{}{}
if err := yaml.Unmarshal([]byte(valuesConfig), &values); err != nil {
log.Infof("Failed to parse values config: %v [%v]\n", err, valuesConfig)
return nil, "", multierror.Prefix(err, "could not parse configuration values:")
}
if pca, f := metadata.GetAnnotations()[annotation.ProxyConfig.Name]; f {
var merr error
meshConfig, merr = mesh.ApplyProxyConfig(pca, *meshConfig)
if merr != nil {
return nil, "", merr
}
}
data := SidecarTemplateData{
TypeMeta: typeMetadata,
DeploymentMeta: deploymentMetadata,
ObjectMeta: metadata,
Spec: spec,
ProxyConfig: meshConfig.GetDefaultConfig(),
MeshConfig: meshConfig,
Values: values,
}
funcMap := template.FuncMap{
"formatDuration": formatDuration,
"isset": isset,
"excludeInboundPort": excludeInboundPort,
"includeInboundPorts": includeInboundPorts,
"kubevirtInterfaces": kubevirtInterfaces,
"applicationPorts": applicationPorts,
"annotation": getAnnotation,
"valueOrDefault": valueOrDefault,
"toJSON": toJSON,
"toJson": toJSON, // Used by, e.g. Istio 1.0.5 template sidecar-injector-configmap.yaml
"fromJSON": fromJSON,
"structToJSON": structToJSON,
"protoToJSON": protoToJSON,
"toYaml": toYaml,
"indent": indent,
"directory": directory,
"contains": flippedContains,
"toLower": strings.ToLower,
"appendMultusNetwork": appendMultusNetwork,
}
// Allows the template to use env variables from istiod.
// Istiod will use a custom template, without 'values.yaml', and the pod will have
// an optional 'vendor' configmap where additional settings can be defined.
funcMap["env"] = func(key string, def string) string {
val := os.Getenv(key)
if val == "" {
return def
}
return val
}
// Need to use FuncMap and SidecarTemplateData context
funcMap["render"] = func(template string) string {
bbuf, err := parseTemplate(template, funcMap, data)
if err != nil {
return ""
}
return bbuf.String()
}
bbuf, err := parseTemplate(sidecarTemplate, funcMap, data)
if err != nil {
return nil, "", err
}
var sic SidecarInjectionSpec
if err := yaml.Unmarshal(bbuf.Bytes(), &sic); err != nil {
// This usually means an invalid injector template; we can't check
// the template itself because it is merely a string.
log.Warnf("Failed to unmarshal template %v\n %s", err, bbuf.String())
return nil, "", multierror.Prefix(err, "failed parsing generated injected YAML (check Istio sidecar injector configuration):")
}
// set sidecar --concurrency
applyConcurrency(sic.Containers)
// overwrite cluster name and network if needed
overwriteClusterInfo(sic.Containers, path)
status := &SidecarInjectionStatus{Version: version}
for _, c := range sic.InitContainers {
status.InitContainers = append(status.InitContainers, c.Name)
}
for _, c := range sic.Containers {
status.Containers = append(status.Containers, c.Name)
}
for _, c := range sic.Volumes {
status.Volumes = append(status.Volumes, c.Name)
}
for _, c := range sic.ImagePullSecrets {
status.ImagePullSecrets = append(status.ImagePullSecrets, c.Name)
}
statusAnnotationValue, err := json.Marshal(status)
if err != nil {
return nil, "", fmt.Errorf("error encoded injection status: %v", err)
}
sic.HoldApplicationUntilProxyStarts, _, _ = unstructured.NestedBool(data.Values, "global", "proxy", "holdApplicationUntilProxyStarts")
return &sic, string(statusAnnotationValue), nil
}
func parseTemplate(tmplStr string, funcMap map[string]interface{}, data SidecarTemplateData) (bytes.Buffer, error) {
var tmpl bytes.Buffer
temp := template.New("inject")
t, err := temp.Funcs(funcMap).Parse(tmplStr)
if err != nil {
log.Infof("Failed to parse template: %v %v\n", err, tmplStr)
return bytes.Buffer{}, err
}
if err := t.Execute(&tmpl, &data); err != nil {
log.Infof("Invalid template: %v %v\n", err, tmplStr)
return bytes.Buffer{}, err
}
return tmpl, nil
}
// IntoResourceFile injects the istio proxy into the specified
// kubernetes YAML file.
// nolint: lll
func IntoResourceFile(sidecarTemplate string, valuesConfig string, revision string, meshconfig *meshconfig.MeshConfig, in io.Reader, out io.Writer, warningHandler func(string)) error {
reader := yamlDecoder.NewYAMLReader(bufio.NewReaderSize(in, 4096))
for {
raw, err := reader.Read()
if err == io.EOF {
break
}
if err != nil {
return err
}
obj, err := FromRawToObject(raw)
if err != nil && !runtime.IsNotRegisteredError(err) {
return err
}
var updated []byte
if err == nil {
outObject, err := IntoObject(sidecarTemplate, valuesConfig, revision, meshconfig, obj, warningHandler) // nolint: vetshadow
if err != nil {
return err
}
if updated, err = yaml.Marshal(outObject); err != nil {
return err
}
} else {
updated = raw // unchanged
}
if _, err = out.Write(updated); err != nil {
return err
}
if _, err = fmt.Fprint(out, "---\n"); err != nil {
return err
}
}
return nil
}
// FromRawToObject is used to convert from raw to the runtime object
func FromRawToObject(raw []byte) (runtime.Object, error) {
var typeMeta metav1.TypeMeta
if err := yaml.Unmarshal(raw, &typeMeta); err != nil {
return nil, err
}
gvk := schema.FromAPIVersionAndKind(typeMeta.APIVersion, typeMeta.Kind)
obj, err := injectScheme.New(gvk)
if err != nil {
return nil, err
}
if err = yaml.Unmarshal(raw, obj); err != nil {
return nil, err
}
return obj, nil
}
// IntoObject convert the incoming resources into Injected resources
// nolint: lll
func IntoObject(sidecarTemplate string, valuesConfig string, revision string, meshconfig *meshconfig.MeshConfig, in runtime.Object, warningHandler func(string)) (interface{}, error) {
out := in.DeepCopyObject()
var deploymentMetadata *metav1.ObjectMeta
var metadata *metav1.ObjectMeta
var podSpec *corev1.PodSpec
var typeMeta *metav1.TypeMeta
// Handle Lists
if list, ok := out.(*corev1.List); ok {
result := list
for i, item := range list.Items {
obj, err := FromRawToObject(item.Raw)
if runtime.IsNotRegisteredError(err) {
continue
}
if err != nil {
return nil, err
}
r, err := IntoObject(sidecarTemplate, valuesConfig, revision, meshconfig, obj, warningHandler) // nolint: vetshadow
if err != nil {
return nil, err
}
re := runtime.RawExtension{}
re.Object = r.(runtime.Object)
result.Items[i] = re
}
return result, nil
}
// CronJobs have JobTemplates in them, instead of Templates, so we
// special case them.
switch v := out.(type) {
case *v2alpha1.CronJob:
job := v
typeMeta = &job.TypeMeta
metadata = &job.Spec.JobTemplate.ObjectMeta
deploymentMetadata = &job.ObjectMeta
podSpec = &job.Spec.JobTemplate.Spec.Template.Spec
case *corev1.Pod:
pod := v
typeMeta = &pod.TypeMeta
metadata = &pod.ObjectMeta
deploymentMetadata = &pod.ObjectMeta
podSpec = &pod.Spec
case *appsv1.Deployment: // Added to be explicit about the most expected case
deploy := v
typeMeta = &deploy.TypeMeta
deploymentMetadata = &deploy.ObjectMeta
metadata = &deploy.Spec.Template.ObjectMeta
podSpec = &deploy.Spec.Template.Spec
default:
// `in` is a pointer to an Object. Dereference it.
outValue := reflect.ValueOf(out).Elem()
typeMeta = outValue.FieldByName("TypeMeta").Addr().Interface().(*metav1.TypeMeta)
deploymentMetadata = outValue.FieldByName("ObjectMeta").Addr().Interface().(*metav1.ObjectMeta)
templateValue := outValue.FieldByName("Spec").FieldByName("Template")
// `Template` is defined as a pointer in some older API
// definitions, e.g. ReplicationController
if templateValue.Kind() == reflect.Ptr {
if templateValue.IsNil() {
return out, fmt.Errorf("spec.template is required value")
}
templateValue = templateValue.Elem()
}
metadata = templateValue.FieldByName("ObjectMeta").Addr().Interface().(*metav1.ObjectMeta)
podSpec = templateValue.FieldByName("Spec").Addr().Interface().(*corev1.PodSpec)
}
name := metadata.Name
if name == "" {
name = deploymentMetadata.Name
}
// Skip injection when host networking is enabled. The problem is
// that the iptable changes are assumed to be within the pod when,
// in fact, they are changing the routing at the host level. This
// often results in routing failures within a node which can
// affect the network provider within the cluster causing
// additional pod failures.
if podSpec.HostNetwork {
warningHandler(fmt.Sprintf("===> Skipping injection because %q has host networking enabled\n",
name))
return out, nil
}
// skip injection for injected pods
if len(podSpec.Containers) > 1 {
for _, c := range podSpec.Containers {
if c.Name == ProxyContainerName {
warningHandler(fmt.Sprintf("===> Skipping injection because %q has injected %q sidecar already\n",
name, ProxyContainerName))
return out, nil
}
}
}
spec, status, err := InjectionData(
sidecarTemplate,
valuesConfig,
sidecarTemplateVersionHash(sidecarTemplate),
typeMeta,
deploymentMetadata,
podSpec,
metadata,
meshconfig,
"")
if err != nil {
return nil, err
}
podSpec.InitContainers = append(podSpec.InitContainers, spec.InitContainers...)
podSpec.Containers = injectContainers(podSpec.Containers, spec)
podSpec.Volumes = append(podSpec.Volumes, spec.Volumes...)
podSpec.DNSConfig = spec.DNSConfig
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, spec.ImagePullSecrets...)
// Modify application containers' HTTP probe after appending injected containers.
// Because we need to extract istio-proxy's statusPort.
rewriteAppHTTPProbe(metadata.Annotations, podSpec, spec, meshconfig.DefaultConfig.GetStatusPort())
// due to bug https://github.com/kubernetes/kubernetes/issues/57923,
// k8s sa jwt token volume mount file is only accessible to root user, not istio-proxy(the user that istio proxy runs as).
// workaround by https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
var grp = int64(1337)
if podSpec.SecurityContext == nil {
podSpec.SecurityContext = &corev1.PodSecurityContext{
FSGroup: &grp,
}
} else {
podSpec.SecurityContext.FSGroup = &grp
}
if metadata.Annotations == nil {
metadata.Annotations = make(map[string]string)
}
if len(spec.PodRedirectAnnot) != 0 {
rewriteCniPodSpec(metadata.Annotations, spec)
}
metadata.Annotations[annotation.SidecarStatus.Name] = status
if metadata.Labels == nil {
metadata.Labels = make(map[string]string)
}
// This function, IntoObject(), is only used on the 'istioctl kube-kubeinject' path, which
// doesn't use Pilot bootstrap variables.
metadata.Labels[label.IstioRev] = revision
if status != "" && metadata.Labels[label.TLSMode] == "" {
metadata.Labels[label.TLSMode] = model.IstioMutualTLSModeLabel
}
return out, nil
}
func injectContainers(target []corev1.Container, sic *SidecarInjectionSpec) []corev1.Container {
containersToInject := sic.Containers
if sic.HoldApplicationUntilProxyStarts {
// inject sidecar at start of spec.containers
proxyIndex := -1
for i, c := range containersToInject {
if c.Name == ProxyContainerName {
proxyIndex = i
break
}
}
if proxyIndex != -1 {
result := make([]corev1.Container, 1, len(target)+len(containersToInject))
result[0] = containersToInject[proxyIndex]
result = append(result, target...)
result = append(result, containersToInject[:proxyIndex]...)
result = append(result, containersToInject[proxyIndex+1:]...)
return result
}
}
return append(target, containersToInject...)
}
func getPortsForContainer(container corev1.Container) []string {
parts := make([]string, 0)
for _, p := range container.Ports {
if p.Protocol == corev1.ProtocolUDP || p.Protocol == corev1.ProtocolSCTP {
continue
}
parts = append(parts, strconv.Itoa(int(p.ContainerPort)))
}
return parts
}
func getContainerPorts(containers []corev1.Container, shouldIncludePorts func(corev1.Container) bool) string {
parts := make([]string, 0)
for _, c := range containers {
if shouldIncludePorts(c) {
parts = append(parts, getPortsForContainer(c)...)
}
}
return strings.Join(parts, ",")
}
// this function is no longer used by the template but kept around for backwards compatibility
func applicationPorts(containers []corev1.Container) string {
return getContainerPorts(containers, func(c corev1.Container) bool {
return c.Name != ProxyContainerName
})
}
func includeInboundPorts(containers []corev1.Container) string {
// Include the ports from all containers in the deployment.
return getContainerPorts(containers, func(corev1.Container) bool { return true })
}
func kubevirtInterfaces(s string) string {
return s
}
func structToJSON(v interface{}) string {
if v == nil {
return "{}"
}
ba, err := json.Marshal(v)
if err != nil {
log.Warnf("Unable to marshal %v", v)
return "{}"
}
return string(ba)
}
func protoToJSON(v proto.Message) string {
v = cleanProxyConfig(v)
if v == nil {
return "{}"
}
m := jsonpb.Marshaler{}
ba, err := m.MarshalToString(v)
if err != nil {
log.Warnf("Unable to marshal %v: %v", v, err)
return "{}"
}
return ba
}
// Rather than dump the entire proxy config, we remove fields that are default
// This makes the pod spec much smaller
// This is not comprehensive code, but nothing will break if this misses some fields
func cleanProxyConfig(msg proto.Message) proto.Message {
originalProxyConfig, ok := msg.(*meshconfig.ProxyConfig)
if !ok || originalProxyConfig == nil {
return msg
}
pc := *originalProxyConfig
defaults := mesh.DefaultProxyConfig()
if pc.ConfigPath == defaults.ConfigPath {
pc.ConfigPath = ""
}
if pc.BinaryPath == defaults.BinaryPath {
pc.BinaryPath = ""
}
if pc.ControlPlaneAuthPolicy == defaults.ControlPlaneAuthPolicy {
pc.ControlPlaneAuthPolicy = 0
}
if pc.ServiceCluster == defaults.ServiceCluster {
pc.ServiceCluster = ""
}
if reflect.DeepEqual(pc.DrainDuration, defaults.DrainDuration) {
pc.DrainDuration = nil
}
if reflect.DeepEqual(pc.TerminationDrainDuration, defaults.TerminationDrainDuration) {
pc.TerminationDrainDuration = nil
}
if reflect.DeepEqual(pc.ParentShutdownDuration, defaults.ParentShutdownDuration) {
pc.ParentShutdownDuration = nil
}
if pc.DiscoveryAddress == defaults.DiscoveryAddress {
pc.DiscoveryAddress = ""
}
if reflect.DeepEqual(pc.EnvoyMetricsService, defaults.EnvoyMetricsService) {
pc.EnvoyMetricsService = nil
}
if reflect.DeepEqual(pc.EnvoyAccessLogService, defaults.EnvoyAccessLogService) {
pc.EnvoyAccessLogService = nil
}
if reflect.DeepEqual(pc.Tracing, defaults.Tracing) {
pc.Tracing = nil
}
if pc.ProxyAdminPort == defaults.ProxyAdminPort {
pc.ProxyAdminPort = 0
}
if pc.StatNameLength == defaults.StatNameLength {
pc.StatNameLength = 0
}
if pc.StatusPort == defaults.StatusPort {
pc.StatusPort = 0
}
if reflect.DeepEqual(pc.Concurrency, defaults.Concurrency) {
pc.Concurrency = nil
}
return proto.Message(&pc)
}
func toJSON(m map[string]string) string {
if m == nil {
return "{}"
}
ba, err := json.Marshal(m)
if err != nil {
log.Warnf("Unable to marshal %v", m)
return "{}"
}
return string(ba)
}
func fromJSON(j string) interface{} {
var m interface{}
err := json.Unmarshal([]byte(j), &m)
if err != nil {
log.Warnf("Unable to unmarshal %s", j)
return "{}"
}
log.Warnf("%v", m)
return m
}
func indent(spaces int, source string) string {
res := strings.Split(source, "\n")
for i, line := range res {
if i > 0 {
res[i] = fmt.Sprintf(fmt.Sprintf("%% %ds%%s", spaces), "", line)
}
}
return strings.Join(res, "\n")
}
func toYaml(value interface{}) string {
y, err := yaml.Marshal(value)
if err != nil {
log.Warnf("Unable to marshal %v", value)
return ""
}
return string(y)
}
func getAnnotation(meta metav1.ObjectMeta, name string, defaultValue interface{}) string {
value, ok := meta.Annotations[name]
if !ok {
value = fmt.Sprint(defaultValue)
}
return value
}
func appendMultusNetwork(existingValue, istioCniNetwork string) string {
if existingValue == "" {
return istioCniNetwork
}
i := strings.LastIndex(existingValue, "]")
isJSON := i != -1
if isJSON {
return existingValue[0:i] + fmt.Sprintf(`, {"name": "%s"}`, istioCniNetwork) + existingValue[i:]
}
return existingValue + ", " + istioCniNetwork
}
func excludeInboundPort(port interface{}, excludedInboundPorts string) string {
portStr := strings.TrimSpace(fmt.Sprint(port))
if len(portStr) == 0 || portStr == "0" {
// Nothing to do.
return excludedInboundPorts
}
// Exclude the readiness port if not already excluded.
ports := splitPorts(excludedInboundPorts)
outPorts := make([]string, 0, len(ports))
for _, port := range ports {
if port == portStr {
// The port is already excluded.
return excludedInboundPorts
}
port = strings.TrimSpace(port)
if len(port) > 0 {
outPorts = append(outPorts, port)
}
}
// The port was not already excluded - exclude it now.
outPorts = append(outPorts, portStr)
return strings.Join(outPorts, ",")
}
func valueOrDefault(value interface{}, defaultValue interface{}) interface{} {
if value == "" || value == nil {
return defaultValue
}
return value
}
// SidecarInjectionStatus contains basic information about the
// injected sidecar. This includes the names of added containers and
// volumes.
type SidecarInjectionStatus struct {
Version string `json:"version"`
InitContainers []string `json:"initContainers"`
Containers []string `json:"containers"`
Volumes []string `json:"volumes"`
ImagePullSecrets []string `json:"imagePullSecrets"`
}
// helper function to generate a template version identifier from a
// hash of the un-executed template contents.
func sidecarTemplateVersionHash(in string) string {
hash := sha256.Sum256([]byte(in))
return hex.EncodeToString(hash[:])
}
func potentialPodName(metadata *metav1.ObjectMeta) string {
if metadata.Name != "" {
return metadata.Name
}
if metadata.GenerateName != "" {
return metadata.GenerateName + "***** (actual name not yet known)"
}
return ""
}
// rewriteCniPodSpec will check if values from the sidecar injector Helm
// values need to be inserted as Pod annotations so the CNI will apply
// the proper redirection rules.
func rewriteCniPodSpec(annotations map[string]string, spec *SidecarInjectionSpec) {
if spec == nil {
return
}
if len(spec.PodRedirectAnnot) == 0 {
return
}
for k := range AnnotationValidation {
if spec.PodRedirectAnnot[k] != "" {
if annotations[k] == spec.PodRedirectAnnot[k] {
continue
}
annotations[k] = spec.PodRedirectAnnot[k]
}
}
}
// overwriteClusterInfo updates cluster name and network from url path
// This is needed when webconfig config runs on a different cluster than webhook
func overwriteClusterInfo(containers []corev1.Container, path string) {
res := strings.Split(path, "/")
if len(res) >= 5 {
// if len is less than 5, not enough length for /cluster/X/net/Y
clusterName, clusterNetwork := "", ""
clusterName = res[len(res)-3]
clusterNetwork = res[len(res)-1]
log.Debugf("Updating cluster info based on clusterName: %s clusterNetwork: %s\n", clusterName, clusterNetwork)
for i, c := range containers {
if c.Name == ProxyContainerName {
updateClusterInfo(&containers[i], clusterName, clusterNetwork)
}
}
}
}
func updateClusterInfo(container *corev1.Container, clusterName, clusterNetwork string) {
envVars := make([]corev1.EnvVar, 0)
for _, env := range container.Env {
if env.Name != "ISTIO_META_CLUSTER_ID" && env.Name != "ISTIO_META_NETWORK" {
envVars = append(envVars, env)
}
}
log.Debugf("Appending env ISTIO_META_CLUSTER_ID: %s and ISTIO_META_NETWORK: %s\n", clusterName, clusterNetwork)
envVars = append(envVars,
corev1.EnvVar{Name: "ISTIO_META_CLUSTER_ID", Value: clusterName, ValueFrom: nil},
corev1.EnvVar{Name: "ISTIO_META_NETWORK", Value: clusterNetwork, ValueFrom: nil})
container.Env = envVars
}
| ValidateIncludeInboundPorts |
resources.go | package main
import (
"fmt"
"sync"
"time"
kh "github.com/Comcast/kuberhealthy/v2/pkg/checks/external/checkclient"
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Job struct containing namespace object used throughout code to grab all resources in all namespaces
type Job struct {
namespace string
}
func runResourceQuotaCheck() {
// List all namespaces in the cluster.
allNamespaces, err := client.CoreV1().Namespaces().List(metav1.ListOptions{})
if err != nil {
err = fmt.Errorf("error occurred listing namespaces from the cluster: %v", err)
reportErr := kh.ReportFailure([]string{err.Error()})
if reportErr != nil {
log.Fatalln("error reporting failure to kuberhealthy:", reportErr.Error())
}
return
}
select {
case rqErrors := <-examineResourceQuotas(allNamespaces):
if len(rqErrors) != 0 {
log.Infoln("This check created", len(rqErrors), "errors and warnings.")
log.Debugln("Errors and warnings:")
for _, err := range rqErrors {
log.Debugln(err)
}
log.Infoln("Reporting failures to kuberhealthy.")
reportErr := kh.ReportFailure(rqErrors)
if reportErr != nil {
log.Fatalln("error reporting failures to kuberhealthy:", reportErr.Error())
}
return
}
log.Infoln("No errors or warnings were created during this check!")
case <-ctx.Done():
log.Infoln("Exiting and shutting down from interrupt.")
return
case <-time.After(checkTimeLimit):
err := fmt.Errorf("Check took too long and timed out.")
log.Infoln("Reporting failure to kuberhealthy.")
reportErr := kh.ReportFailure([]string{err.Error()})
if reportErr != nil {
log.Fatalln("error reporting failures to kuberhealthy:", reportErr.Error())
}
return
}
log.Infoln("Reporting success to kuberhealthy.")
reportErr := kh.ReportSuccess()
if reportErr != nil {
log.Fatalln("error reporting success to kuberhealthy:", reportErr.Error())
}
}
// examineResourceQuotas looks at the resource quotas and makes reports on namespaces that meet or pass the threshold.
func examineResourceQuotas(namespaceList *v1.NamespaceList) chan []string {
resultChan := make(chan []string)
resourceQuotasJobChan := make(chan *Job, len(namespaceList.Items))
resourceQuotaErrorsChan := make(chan string, len(namespaceList.Items))
go fillJobChan(namespaceList, resourceQuotasJobChan)
go func(jobs chan *Job, results chan string) {
errors := make([]string, 0)
waitGroup := sync.WaitGroup{}
for job := range jobs {
waitGroup.Add(1)
log.Debugln("Starting worker for", job.namespace, "namespace.")
go createWorkerForNamespaceResourceQuotaCheck(job.namespace, results, &waitGroup)
}
go func(wg *sync.WaitGroup) {
log.Debugln("Waiting for workers to complete.")
wg.Wait()
log.Debugln("Workers done. Closing resource quota examination channel.")
close(results)
}(&waitGroup)
for err := range results {
errors = append(errors, err)
}
resultChan <- errors
return
}(resourceQuotasJobChan, resourceQuotaErrorsChan)
return resultChan
}
// createWorkerForNamespaceResourceQuotaCheck looks at the resource quotas for a given namespace and creates error messages
// if usage is over a threshold.
/*
if blacklist is specified, and whitelist is not, then we simply operate on a blacklist
if blacklist is specified, and whitelist is also specified, then we operate on the whitelist unless the item is in the blacklist
if blacklist is not specified, but whitelist is, then we operate on a whitelist
if neither a blacklist or whitelist is specified, then all namespaces are targeted
*/
func createWorkerForNamespaceResourceQuotaCheck(namespace string, quotasChan chan string, wg *sync.WaitGroup) {
defer wg.Done()
defer log.Debugln("worker for", namespace, "namespace is done!")
// Prioritize blacklist over the whitelist.
if len(blacklist) > 0 {
if contains(namespace, blacklist) |
}
if len(whitelist) > 0 {
if !contains(namespace, whitelist) {
log.Infoln("Skipping", namespace, "namespace (Whitelist).")
return
}
}
examineResouceQuotasForNamespace(namespace, quotasChan)
}
// examineResouceQuotasForNamespace looks at resource quotas and sends error messages on threshold violations.
func examineResouceQuotasForNamespace(namespace string, c chan<- string) {
log.Infoln("Looking at resource quotas for", namespace, "namespace.")
quotas, err := client.CoreV1().ResourceQuotas(namespace).List(metav1.ListOptions{})
if err != nil {
err = fmt.Errorf("error occurred listing resource quotas for %s namespace %v", namespace, err)
c <- err.Error()
return
}
// Check if usage is at certain a threshold (percentage) of the limit.
for _, rq := range quotas.Items {
limits := rq.Status.Hard
status := rq.Status.Used
percentCPUUsed := float64(status.Cpu().MilliValue()) / float64(limits.Cpu().MilliValue())
percentMemoryUsed := float64(status.Memory().MilliValue()) / float64(limits.Memory().MilliValue())
log.Debugln("Current used for", namespace, "CPU:", status.Cpu().MilliValue(), "Memory:", status.Memory().MilliValue())
log.Debugln("Limits for", namespace, "CPU:", limits.Cpu().MilliValue(), "Memory:", limits.Memory().MilliValue())
if percentCPUUsed >= threshold {
err := fmt.Errorf("cpu for %s namespace has reached threshold of %4.2f: USED: %d LIMIT: %d PERCENT_USED: %6.3f",
namespace, threshold, status.Cpu().MilliValue(), limits.Cpu().MilliValue(), percentCPUUsed)
c <- err.Error()
}
if percentMemoryUsed >= threshold {
err := fmt.Errorf("memory for %s namespace has reached threshold of %4.2f: USED: %d LIMIT: %d PERCENT_USED: %6.3f",
namespace, threshold, status.Memory().MilliValue(), limits.Memory().MilliValue(), percentMemoryUsed)
c <- err.Error()
}
}
}
// fillJobChan fills the job channel with namespace jobs.
func fillJobChan(namespaces *v1.NamespaceList, c chan<- *Job) {
defer close(c)
log.Infoln(len(namespaces.Items), "namespaces to look at.")
for _, ns := range namespaces.Items {
log.Debugln("Creating job for", ns.GetName(), "namespace.")
c <- &Job{
namespace: ns.GetName(),
}
}
return
}
// contains returns a boolean value based on whether or not a slice of strings contains
// a string.
func contains(s string, list []string) bool {
for _, str := range list {
if s == str {
return true
}
}
return false
}
| {
log.Infoln("Skipping", namespace, "namespace (Blacklist).")
return
} |
simplewal.go | /*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
// Package simplewal is a basic WAL implementation meant to be the first 'real' WAL
// option for mirbft. More sophisticated WALs with checksums, byte alignments, etc.
// may be produced in the future, but this is just a simple place to start.
package simplewal
import (
"sync"
"github.com/IBM/mirbft/pkg/pb/msgs"
"github.com/pkg/errors"
"github.com/tidwall/wal"
"google.golang.org/protobuf/proto"
)
type WAL struct {
mutex sync.Mutex
log *wal.Log
}
func Open(path string) (*WAL, error) {
log, err := wal.Open(path, &wal.Options{
NoSync: true,
NoCopy: true,
})
if err != nil {
return nil, errors.WithMessage(err, "could not open WAL")
}
return &WAL{
log: log,
}, nil
}
func (w *WAL) IsEmpty() (bool, error) {
firstIndex, err := w.log.FirstIndex()
if err != nil {
return false, errors.WithMessage(err, "could not read first index")
}
return firstIndex == 0, nil
}
func (w *WAL) LoadAll(forEach func(index uint64, p *msgs.Persistent)) error {
w.mutex.Lock()
defer w.mutex.Unlock()
firstIndex, err := w.log.FirstIndex()
if err != nil {
return errors.WithMessage(err, "could not read first index")
}
if firstIndex == 0 {
// WAL is empty
return nil
}
lastIndex, err := w.log.LastIndex()
if err != nil {
return errors.WithMessage(err, "could not read first index")
}
for i := firstIndex; i <= lastIndex; i++ {
data, err := w.log.Read(i)
if err != nil {
return errors.WithMessagef(err, "could not read index %d", i)
}
result := &msgs.Persistent{}
err = proto.Unmarshal(data, result)
if err != nil |
forEach(i, result)
}
return nil
}
func (w *WAL) Write(index uint64, p *msgs.Persistent) error {
data, err := proto.Marshal(p)
if err != nil {
return errors.WithMessage(err, "could not marshal")
}
w.mutex.Lock()
defer w.mutex.Unlock()
return w.log.Write(index, data)
}
func (w *WAL) Truncate(index uint64) error {
w.mutex.Lock()
defer w.mutex.Unlock()
return w.log.TruncateFront(index)
}
func (w *WAL) Sync() error {
return w.log.Sync()
}
func (w *WAL) Close() error {
return w.log.Close()
}
| {
return errors.WithMessage(err, "error decoding to proto, is the WAL corrupt?")
} |
rpc.py | """
Generic RPC functions for labby
"""
# import asyncio
import asyncio
from cgi import print_exception
import os
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Type, Union
import yaml
from attr import attrib, attrs
from autobahn.wamp.exception import ApplicationError
from labby.console import Console
from labby.resource import LabbyResource, NetworkSerialPort, PowerAction, power_resources, power_resource_from_name
from .labby_error import (LabbyError, failed, invalid_parameter,
not_found)
from .labby_types import (ExporterName, GroupName, LabbyPlace, PlaceName, PowerState, Resource,
ResourceName, Session, Place)
from .labby_util import flatten
def _check_not_none(*args, **kwargs) -> Optional[LabbyError]:
return next((invalid_parameter(f"Missing required parameter: {name}.") for name, val in vars().items() if val is None), None)
@attrs()
class RPCDesc():
name: str = attrib(default=None)
endpoint: str = attrib(default=None)
remote_endpoint: str = attrib(default=None)
info: Optional[str] = attrib(default=None)
parameter: Optional[List[Dict[str, str]]] = attrib(default=None)
return_type: Optional[str] = attrib(default=None)
def _localfile(path):
return Path(os.path.dirname(os.path.realpath(__file__))).joinpath(path)
FUNCTION_INFO = {}
with open(_localfile('rpc_desc.yaml'), 'r', encoding='utf-8') as file:
FUNCTION_INFO = {key: RPCDesc(**val) for key, val in yaml.load(file,
yaml.loader.FullLoader).items() if val is not None}
# non exhaustive list of serializable primitive types
_serializable_primitive: List[Type] = [int, float, str, bool]
def invalidates_cache(attribute, *rec_args, reconstitute: Optional[Callable] = None):
"""
on call clear attribute (e.g. set to None)
"""
def decorator(func: Callable):
def wrapped(self: Session, *args, **kwargs):
setattr(self, attribute, None)
return func(self, *args, **kwargs)
return wrapped
return decorator
def cached(attribute: str):
"""
Decorator defintion to cache data in labby context and fetch data from server
"""
assert attribute is not None
def decorator(func: Callable):
async def wrapped(context: Session, *args, **kwargs):
assert context is not None
if not hasattr(context, attribute):
context.__dict__.update({attribute: None})
data = None
else:
data: Optional[Dict] = context.__getattribute__(
attribute)
if data is None:
data: Optional[Dict] = await func(context, *args, **kwargs)
if not isinstance(data, LabbyError):
context.__setattr__(attribute, data)
return data
return wrapped
return decorator
def labby_serialized(func):
"""
Custom serializer decorator for labby rpc functions
to make sure returned values are cbor/json serializable
"""
async def wrapped(*args, **kwargs) -> Union[None, List, Dict, int, float, str, bool]:
ret = await func(*args, **kwargs)
if ret is None:
return None
if isinstance(ret, LabbyError):
return ret.to_json()
if isinstance(ret, LabbyPlace):
return ret.to_json()
if isinstance(ret, (dict, list)) or type(ret) in _serializable_primitive:
return ret
raise NotImplementedError(
f"{type(ret)} can currently not be serialized!")
return wrapped
async def fetch(context: Session, attribute: str, endpoint: str, *args, **kwargs) -> Any:
"""
QoL function to fetch data drom Coordinator and store in attribute member in Session
"""
assert context is not None
assert attribute is not None
assert endpoint is not None
data: Optional[Dict] = getattr(context, attribute)
if data is None:
data: Optional[Dict] = await context.call(endpoint, *args, **kwargs)
setattr(context, attribute, data)
return data
async def fetch_places(context: Session,
place: Optional[PlaceName]) -> Union[Dict[PlaceName, Place], LabbyError]:
"""
Fetch places from coordinator, update if missing and handle possible errors
"""
assert context is not None
_data = await context.places.get(context) # type: ignore
if _data is None:
if place is None:
return not_found("Could not find any places.")
return not_found(f"Could not find place with name {place}.")
if place is not None:
if place in _data.keys():
return {place: _data[place]}
return not_found(f"Could not find place with name {place}.")
return _data
async def fetch_resources(context: Session,
place: Optional[PlaceName],
resource_key: Optional[ResourceName]) -> Union[Dict, LabbyError]:
"""
Fetch resources from coordinator, update if missing and handle possible errors
"""
assert context is not None
data: Optional[Dict] = await context.resources.get(context)
if data is None:
if place is None:
return not_found("Could not find any resources.")
return not_found(f"No resources found for place {place}.")
if place is not None:
data = {exporter: {k: v for k, v in exporter_data.items() if k == place and v}
for exporter, exporter_data in data.items()}
if resource_key is not None:
data = {exporter:
{place_name:
{k: v for k, v in place_res.items() if k == resource_key if v}
for place_name, place_res in exporter_data.items() if place_res}
for exporter, exporter_data in data.items()}
return data
@cached("peers")
async def fetch_peers(context: Session) -> Union[Dict, LabbyError]:
session_ids = await context.call("wamp.session.list")
sessions = {}
for sess in session_ids: # ['exact']:
tmp = await context.call("wamp.session.get", sess)
if tmp and 'authid' in tmp:
sessions[tmp['authid']] = tmp
return sessions
async def get_exporters(context: Session) -> Union[List[ExporterName], LabbyError]:
peers = await fetch_peers(context)
if isinstance(peers, LabbyError):
return peers
assert peers is not None
return [x.replace('exporter/', '') for x in peers if x.startswith('exporter')]
def _calc_power_for_place(place_name, resources: Iterable[Dict]):
pstate = False
for res in resources:
if isinstance(res['acquired'], Iterable):
pstate |= place_name in res['acquired']
else:
pstate |= res['acquired'] == place_name
return pstate
@cached("power_states")
async def | (context: Session,
place: Optional[PlaceName]) -> Union[PowerState, LabbyError]:
"""
Use fetch resource to determine power state, this may update context.resource
"""
_resources = await fetch_resources(context=context, place=place, resource_key=None)
if isinstance(_resources, LabbyError):
return _resources
if len(_resources) > 0:
_resources = flatten(_resources)
_places = await fetch_places(context, place)
if isinstance(_places, LabbyError):
return _places
power_states = {}
assert _places
for place_name, place_data in _places.items():
if 'acquired_resources' in place_data:
if len(place_data['acquired_resources']) == 0 or place_name not in _resources:
power_states[place_name] = {'power_state': False}
continue
resources_to_check = ((v for k, v in _resources[place_name].items() if any(
(k in a for a in place_data['acquired_resources']))))
power_states[place_name] = {
'power_state': _calc_power_for_place(place_name, resources_to_check)}
return power_states
@labby_serialized
async def places(context: Session,
place: Optional[PlaceName] = None) -> Union[List[LabbyPlace], LabbyError]:
"""
returns registered places as dict of lists
"""
context.log.info("Fetching places.")
data = await fetch_places(context, place)
if isinstance(data, LabbyError):
return data
power_states = await fetch_power_state(context=context, place=place)
assert power_states is not None
if isinstance(power_states, LabbyError):
return power_states
await get_reservations(context)
def token_from_place(name):
return next((token for token, x in context.reservations.items()
if x['filters']['main']['name'] == name), None)
place_res = []
assert data
for place_name, place_data in data.items():
# append the place to acquired places if
# it has been acquired in a previous session
if (place_data and place_data['acquired'] == context.user_name
and place_name not in context.acquired_places
):
context.acquired_places.add(place_name)
if place is not None and place_name != place:
continue
# ??? (Kevin) what if there are more than one or no matches
if len(place_data["matches"]) > 0 and 'exporter' in place_data["matches"]:
exporter = place_data["matches"][0]["exporter"]
else:
exporter = None
place_data.update({
"name": place_name,
"exporter": exporter,
"power_state": power_states.get(place_name, {}).get('power_state', None),
"reservation": token_from_place(place_name)
})
place_res.append(place_data)
return place_res
@labby_serialized
async def list_places(context: Session) -> List[PlaceName]:
"""
Return all place names
"""
await fetch_places(context, None)
return list(context.places.get_soft().keys()) if context.places else []
@labby_serialized
async def resource(context: Session,
place: Optional[PlaceName] = None,
resource_key=None
) -> Union[Dict[ResourceName, Resource], LabbyError]:
"""
rpc: returns resources registered for given place
"""
context.log.info(f"Fetching resources for {place}.")
resource_data = await fetch_resources(context=context, place=place, resource_key=resource_key)
if isinstance(resource_data, LabbyError):
return resource_data
if place is None:
return resource_data
if len(flatten(resource_data)) == 0:
return not_found(f"Place {place} not found.")
return resource_data
@labby_serialized
async def power_state(context: Session,
place: PlaceName,
) -> Union[PowerState, LabbyError]:
"""
rpc: return power state for a given place
"""
if place is None:
return invalid_parameter("Missing required parameter: place.").to_json()
power_data = await fetch_power_state(context=context, place=place)
assert power_data is not None
if isinstance(power_data, LabbyError):
return power_data
if place not in power_data.keys():
return not_found(f"Place {place} not found on Coordinator.").to_json()
return power_data[place]
@labby_serialized
async def resource_overview(context: Session,
place: Optional[PlaceName] = None,
) -> Union[List[Resource], LabbyError]:
"""
rpc: returns list of all resources on target
"""
context.log.info(f"Fetching resources overview for {place}.")
targets = await fetch_resources(context=context, place=place, resource_key=None)
if isinstance(targets, LabbyError):
return targets
ret = []
for exporter, resources in targets.items():
for res_place, res in resources.items():
if place is None or place == res_place:
ret.extend({'name': key, 'target': exporter,
'place': res_place, **values} for key, values in res.items())
return ret
@labby_serialized
async def resource_by_name(context: Session,
name: ResourceName, # filter by name
) -> Union[List[Resource], LabbyError]:
"""
rpc: returns list of all resources of given name on target
"""
if name is None:
return invalid_parameter("Missing required parameter: name.")
resource_data = await fetch_resources(context, place=None, resource_key=None)
if isinstance(resource_data, LabbyError):
return resource_data
ret = []
for target, resources in resource_data.items():
for place, res in resources.items():
ret.extend(
{'name': key, 'target': target, 'place': place, **values}
for key, values in res.items()
if name == key
)
return ret
@labby_serialized
async def resource_names(context: Session) -> List[Dict[str, str]]:
await fetch_resources(context, None, None)
data = context.resources or {}
def it(x): return x.items()
return [
{'exporter': exporter,
'group': grp_name,
'class': x.get('cls'),
'name': name,
}
for exporter, group in it(data) for grp_name, res in it(group) for name, x in it(res)
]
@labby_serialized
async def acquire(context: Session,
place: PlaceName) -> Union[bool, LabbyError]:
"""
rpc for acquiring places
"""
if place is None:
return invalid_parameter("Missing required parameter: place.")
if place in context.acquired_places:
return failed(f"Already acquired place {place}.")
# , group, resource_key, place)
context.log.info(f"Acquiring place {place}.")
try:
acquire_successful = await context.call("org.labgrid.coordinator.acquire_place", place)
except ApplicationError as err:
return failed(f"Got exception while trying to call org.labgrid.coordinator.acquire_place. {err}")
if acquire_successful:
context.acquired_places.add(place)
# remove the reservation if there was one
if token := next((token for token, x in context.reservations.items() if x['filters']['main']['name'] == place), None,):
ret = await cancel_reservation(context, token)
if isinstance(ret, LabbyError):
# context.log.error(f"Could not cancel reservation after acquire: {ret}")
print(f"Could not cancel reservation after acquire: {ret}")
del context.reservations[token]
return acquire_successful
@labby_serialized
async def release(context: Session,
place: PlaceName) -> Union[bool, LabbyError]:
"""
rpc for releasing 'acquired' places
"""
if place is None:
return invalid_parameter("Missing required parameter: place.")
if place not in context.acquired_places:
return failed(f"Place {place} is not acquired")
context.log.info(f"Releasing place {place}.")
try:
release_successful = await context.call('org.labgrid.coordinator.release_place', place)
if place in context.acquired_places: # place update was quicker
context.acquired_places.remove(place)
except ApplicationError as err:
return failed(f"Got exception while trying to call org.labgrid.coordinator.release_place. {err}")
return release_successful
@labby_serialized
async def info(_context=None, func_key: Optional[str] = None) -> Union[List[Dict], LabbyError]:
"""
RPC call for general info for RPC function usage
"""
if func_key is None:
return [desc.__dict__ for desc in globals()["FUNCTION_INFO"].values()]
if func_key not in globals()["FUNCTION_INFO"]:
return not_found(f"Function {func_key} not found in registry.")
return globals()["FUNCTION_INFO"][func_key].__dict__
async def get_reservations(context: Session) -> Dict:
"""
RPC call to list current reservations on the Coordinator
"""
reservation_data: Dict = await context.call("org.labgrid.coordinator.get_reservations")
for token, data in reservation_data.items():
if (data['state'] in ('waiting', 'allocated', 'acquired')
and data['owner'] == context.user_name):
context.to_refresh.add(token)
context.reservations.update(**reservation_data)
return reservation_data
@labby_serialized
async def create_reservation(context: Session, place: PlaceName, priority: float = 0.) -> Union[Dict, LabbyError]:
# TODO figure out filters, priorities, etc
# TODO should multiple reservations be allowed?
if place is None:
return invalid_parameter("Missing required parameter: place.")
await get_reservations(context) # get current state from coordinator
if any((place == x['filters']['main']['name'] for x in context.reservations.values() if 'name' in x['filters']['main'] and x['state'] not in ('expired', 'invalid'))):
return failed(f"Place {place} is already reserved.")
reservation = await context.call("org.labgrid.coordinator.create_reservation",
f"name={place}",
prio=priority)
if not reservation:
return failed("Failed to create reservation")
context.reservations.update(reservation)
context.to_refresh.add((next(iter(reservation.keys()))))
return reservation
async def refresh_reservations(context: Session):
while True:
to_remove = set()
context.reservations = await context.call("org.labgrid.coordinator.get_reservations")
for token in context.to_refresh:
if token in context.reservations:
# context.log.info(f"Refreshing reservation {token}")
state = context.reservations[token]['state']
place_name = context.reservations[token]['filters']['main']['name']
if state == 'waiting':
ret = await context.call("org.labgrid.coordinator.poll_reservation", token)
if not ret:
context.log.error(
f"Failed to poll reservation {token}.")
context.reservations[token] = ret
# acquire the resource, when it has been allocated by the coordinator
elif (context.reservations[token]['state'] == 'allocated'
or (context.reservations[token]['state'] == 'acquired' and place_name not in context.acquired_places)
):
ret = await acquire(context, place_name)
await cancel_reservation(context, place_name)
if not ret:
context.log.error(
f"Could not acquire reserved place {token}: {place_name}")
to_remove.add(token)
else:
to_remove.add(token)
else:
to_remove.add(token)
for token in to_remove:
context.to_refresh.remove(token)
await asyncio.sleep(1.) # !! TODO set to 10s
@labby_serialized
async def cancel_reservation(context: Session, place: PlaceName) -> Union[bool, LabbyError]:
if place is None:
return invalid_parameter("Missing required parameter: place.")
await get_reservations(context) # get current state from coordinator
token = next((token for token, x in context.reservations.items()
if x['filters']['main']['name'] == place), None)
if token is None:
return failed(f"No reservations available for place {place}.")
del context.reservations[token]
return await context.call("org.labgrid.coordinator.cancel_reservation", token)
@labby_serialized
async def poll_reservation(context: Session, place: PlaceName) -> Union[Dict, LabbyError]:
if place is None:
return invalid_parameter("Missing required parameter: place.")
token = next((token for token, x in context.reservations.items()
if x['filters']['main']['name'] == place), None)
if token is None:
return failed(f"No reservations available for place {place}.")
if not token:
return failed("Failed to poll reservation.")
reservation = await context.call("org.labgrid.coordinator.poll_reservation", token)
context.reservations[token] = reservation
return reservation
@labby_serialized
async def reset(context: Session, place: PlaceName) -> Union[bool, LabbyError]:
"""
Send a reset request to a place matching a given place name
Note
"""
check = _check_not_none()
if isinstance(check, LabbyError):
return check
context.log.info(f"Resetting place {place}")
release_later = False
if place not in context.acquired_places:
release_later = True
acq = await acquire(context, place)
if isinstance(acquire, LabbyError):
return acq
if not acq:
return failed(f"Could not acquire place {place}.")
res = await fetch_resources(context, place, None)
if isinstance(res, LabbyError):
return failed(f"Failed to get resources for place {place}.")
res = flatten(res, 2) # remove exporter and group from res
for resname, resdata in res.items():
if resname in power_resources:
try:
context.log.info(f"Resetting {place}/{resname}.")
power_resource = power_resource_from_name(resname, resdata)
url = power_resource.power(PowerAction.cycle)
assert (ssh_session := context.ssh_session) is not None
assert ssh_session.client
(_, _, serr) = ssh_session.client.exec_command(
command=f"curl -Ss '{url}' > /dev/null"
)
if len(msg := serr.read()) > 0:
context.log.error(
f"Got error while resetting console. {msg}")
except ValueError:
pass # not a valid powerresource after all ??
except Exception as e:
raise e # other errors occured o.O
if release_later:
rel = await release(context, place)
if isinstance(rel, LabbyError) or not rel:
return failed(f"Failed to release place {place} after reset.")
return True
@labby_serialized
async def console(context: Session, place: PlaceName):
# TODO allow selection of resource to connect console to
if place is None:
return invalid_parameter("Missing required parameter: place.")
if place not in context.acquired_places:
ret = await acquire(context, place)
if isinstance(ret, LabbyError):
return ret
if not ret:
return failed("Failed to acquire Place (It may already have been acquired).")
if place in context.open_consoles:
return failed(f"There is already a console open for {place}.")
# check that place has a console
_resources = await fetch_resources(context, place, resource_key=None)
if isinstance(_resources, LabbyError):
return _resources
if len(_resources) == 0:
return failed(f"No resources on {place}.")
_resources = flatten(_resources, depth=2) # remove exporter and place
_resource: Optional[LabbyResource] = next(
(
NetworkSerialPort(
cls=data['cls'],
port=data['params']['port'],
host=data['params']['host'],
speed=data['params']['speed'],
protocol=data['params'].get('protocol', 'rfc2217'),
)
for _, data in _resources.items()
if 'cls' in data and data['cls'] == 'NetworkSerialPort'
),
None,
)
if _resource is None:
return failed(f"No network serial port on {place}.")
assert isinstance(_resource, NetworkSerialPort)
assert context.ssh_session.client
context.open_consoles[place] = (_con := Console(host=_resource.host or 'localhost',
speed=_resource.speed,
port=_resource.port,
ssh_session=context.ssh_session.client))
async def _read(read_fn,):
while place in context.open_consoles:
try:
data = await read_fn()
assert context.frontend
context.frontend.publish(f"localhost.consoles.{place}", data)
except (OSError, EOFError):
print_exception()
context.log.error(f"Console closed read on {place}.")
_con.close()
if place in context.open_consoles:
del context.open_consoles[place]
print("Closing read.")
except:
print_exception()
context.log.error(f"Console on {place} read failed.")
_con.close()
if place in context.open_consoles:
del context.open_consoles[place]
print("Closing read exc.")
asyncio.run_coroutine_threadsafe(
_read(_con.read_stdout), asyncio.get_event_loop())
asyncio.run_coroutine_threadsafe(
_read(_con.read_stderr), asyncio.get_event_loop())
return True
@labby_serialized
async def console_write(context: Session, place: PlaceName, data: str) -> Union[bool, LabbyError]:
# TODO implement
if place not in context.acquired_places:
return failed(f"Place {place} is not acquired.")
if not (_console := context.open_consoles.get(place)):
return failed(f"Place {place} has no open consoles.")
if not data:
# data was empty
return failed(f"Could not write to Console {place}. Data was empty")
try:
_console.write_to_stdin(data)
except Exception as e:
context.log.exception(e)
return failed(f"Failed to write to Console {place}.")
#
# do stuff
#
context.log.info(f"Console on {place} received: {data}.")
return True
@labby_serialized
async def console_close(context: Session, place: PlaceName) -> Optional[LabbyError]:
if place not in context.acquired_places:
return failed(f"Place {place} is not acquired.")
if not context.open_consoles.get(place):
return failed(f"Place {place} has no open consoles.")
context.log.info(f"Closing console on {place}.")
context.open_consoles[place].close()
del context.open_consoles[place]
async def video(context: Session, *args):
pass
@labby_serialized
async def forward(context: Session, *args):
"""
Forward a rpc call to the labgrid coordinator
"""
return await context.call(*args)
@labby_serialized
async def create_place(context: Session, place: PlaceName) -> Union[bool, LabbyError]:
"""
Create a new place on the coordinator
"""
if place is None:
return invalid_parameter("Missing required parameter: place.")
_places = await fetch_places(context, place=None)
if isinstance(_places, LabbyError):
return _places
assert _places
if place in _places:
return failed(f"Place {place} already exists.")
return await context.call("org.labgrid.coordinator.add_place", place)
@labby_serialized
async def delete_place(context: Session, place: PlaceName) -> Union[bool, LabbyError]:
if place is None:
return invalid_parameter("Missing required parameter: place.")
_places = await fetch_places(context, place)
assert context.places # should have been set with fetch_places
if isinstance(_places, LabbyError):
return _places
return await context.call("org.labgrid.coordinator.del_place", place)
@labby_serialized
async def create_resource(context: Session, group_name: GroupName, resource_name: ResourceName) -> Union[bool, LabbyError]:
# TODO (Kevin) Find a way to do this without being a exporter/ delegate to exporter
if group_name is None:
return invalid_parameter("Missing required parameter: group_name.")
if resource_name is None:
return invalid_parameter("Missing required parameter: resource_name.")
ret = await context.call("org.labgrid.coordinator.set_resource", group_name, resource_name, {})
return ret
@labby_serialized
async def delete_resource(context: Session, group_name: GroupName, resource_name: ResourceName) -> Union[bool, LabbyError]:
# TODO (Kevin) Find a way to do this without being a exporter/ delegate to exporter
if group_name is None:
return invalid_parameter("Missing required parameter: group_name.")
if resource_name is None:
return invalid_parameter("Missing required parameter: resource_name.")
ret = await context.call("org.labgrid.coordinator.update_resource", group_name, resource_name, None)
return ret
@labby_serialized
async def places_names(context: Session) -> Union[List[PlaceName], LabbyError]:
_places = await fetch_places(context, None)
if isinstance(_places, LabbyError):
return _places
assert _places
return list(_places.keys())
@labby_serialized
async def get_alias(context: Session, place: PlaceName) -> Union[List[str], LabbyError]:
if place is None:
return invalid_parameter("Missing required parameter: place.")
data = await fetch_places(context, place)
if isinstance(data, LabbyError):
return data
assert data
if len(data) == 0:
return []
return [a for x in data.values() for a in x['aliases']]
@labby_serialized
async def add_match(context: Session,
place: PlaceName,
exporter: ExporterName,
group: GroupName,
cls: ResourceName,
name: ResourceName) -> Union[bool, LabbyError]:
_check_not_none(**vars())
try:
return await context.call("org.labgrid.coordinator.add_place_match", place, f"{exporter}/{group}/{cls}/{name}")
except:
return failed(f"Failed to add match {exporter}/{group}/{cls}/{name} to place {place}.")
@labby_serialized
async def del_match(context: Session,
place: PlaceName,
exporter: ExporterName,
group: GroupName,
cls: ResourceName,
name: ResourceName) -> Union[bool, LabbyError]:
_check_not_none(**vars())
try:
return await context.call("org.labgrid.coordinator.del_place_match", place, f"{exporter}/{group}/{cls}/{name}")
except:
return failed(f"Failed to add match {exporter}/{group}/{cls}/{name} to place {place}.")
@labby_serialized
async def acquire_resource(context: Session,
place_name: PlaceName,
exporter: ExporterName,
group_name: GroupName,
resource_name: ResourceName) -> Union[bool, LabbyError]:
_check_not_none(**vars())
try:
procedure = f"org.labgrid.exporter.{exporter}.acquire"
return await context.call(procedure, group_name, resource_name, place_name)
except:
return failed(f"Failed to acquire resource {exporter}/{place_name}/{resource_name}.")
@labby_serialized
async def release_resource(context: Session,
place_name: PlaceName,
exporter: ExporterName,
group_name: GroupName,
resource_name: ResourceName) -> Union[bool, LabbyError]:
_check_not_none(**vars())
try:
procedure = f"org.labgrid.exporter.{exporter}.release"
return await context.call(procedure, group_name, resource_name, place_name)
except Exception:
return failed(f"Failed to release resource {exporter}/{place_name}/{resource_name}.")
@labby_serialized
async def cli_command(context: Session, command: str) -> Union[str, LabbyError]:
if command is None or not command:
return failed("Command must not be empty.")
assert (ssh_session := context.ssh_session).client
context.log.info(
f"Issuing labgrid-client command: labgrid-client {command}")
try:
(_, sout, serr) = ssh_session.client.exec_command(
command=f"export LG_USERNAME={context.user_name}; labgrid-client {command}")
so = str(sout.read(), encoding='utf-8')
if se := str(serr.read(), encoding='utf-8'):
so += f"\n\n{se}"
return so
except Exception:
return failed("Failed to execute cli command.")
@labby_serialized
async def username(context: Session) -> Union[str, LabbyError]:
return context.user_name or failed("Username has not been set correctly.")
| fetch_power_state |
messagejson.go | package main
// This type provides a json encoding for the message body sent to messenger
type MessageBody struct {
MessagingType string `json:"messaging_type"`
Recipient Ident `json:"recipient"`
Mes Mess `json:"message"` |
func NewResponseMessage(to, reply string) *MessageBody {
m := new(MessageBody)
m.Mes.Text = reply
m.Recipient.ID = to
m.MessagingType = "RESPONSE"
return m
} | } |
line_ops.rs | /*
This code is part of the WhiteboxTools geospatial analysis library.
Authors: Dr. John Lindsay
Created: 15/10/2018
Last Modified: 15/10/2018
License: MIT
*/
use crate::structures::{BoundingBox, LineSegment, Point2D, Polyline};
// pub fn lines_are_equal(line1: &[Point2D], line2: &[Point2D]) -> bool {
// if line1.len() == line2.len() {
// let (reverse, early_return) = if line1[0].x == line2[0].x && line1[0].y == line2[0].y {
// (false, false)
// } else if line1[0].x == line2[line2.len() - 1].x && line1[0].y == line2[line2.len() - 1].y {
// (true, false)
// } else {
// (false, true)
// };
// if early_return {
// return false;
// }
// // if !reverse {
// // for p in 0..line1.len() {
// // if !(line1[p].nearly_equals(&line2[p])) {
// // return false;
// // }
// // }
// // return true;
// // } else {
// // for p in 0..line1.len() {
// // if !(line1[p].nearly_equals(&line2[line2.len() - 1 - p])) {
// // return false;
// // }
// // }
// // return true;
// // }
// return false;
// }
// false
// }
/// Perpendicular distance from a point to a line
pub fn point_line_distance(point: &Point2D, start: &Point2D, end: &Point2D) -> f64 {
if start == end {
return point.distance(&start);
} else {
let numerator = ((end.x - start.x) * (start.y - point.y)
- (start.x - point.x) * (end.y - start.y))
.abs();
let denominator = start.distance(&end);
numerator / denominator
}
}
/// An implementation of the Ramer–Douglas–Peucker line-simplification algorithm.
/// Based on the RDP crate.
///
/// References:
/// Douglas, D.H., Peucker, T.K., 1973. Algorithms for the reduction of the number of points required to
/// represent a digitized line or its caricature. Cartographica: The International Journal for Geographic
/// Information and Geovisualization 10, 112–122. DOI
///
/// Ramer, U., 1972. An iterative procedure for the polygonal approximation of plane curves. Computer
/// Graphics and Image Processing 1, 244–256. DOI
pub fn simplify_rdp(points: &[Point2D], epsilon: &f64) -> Vec<Point2D> {
if points.is_empty() || points.len() == 1 {
return points.to_vec();
}
let mut dmax = 0.0;
let mut index: usize = 0;
let mut distance: f64;
for (i, _) in points.iter().enumerate().take(points.len() - 1).skip(1) {
distance = point_line_distance(
&points[i],
&*points.first().unwrap(),
&*points.last().unwrap(),
);
if distance > dmax {
index = i;
dmax = distance;
}
}
if dmax > *epsilon {
let mut intermediate = simplify_rdp(&points[..index + 1], &*epsilon);
intermediate.pop();
// recur!
intermediate.extend_from_slice(&simplify_rdp(&points[index..], &*epsilon));
intermediate
} else {
vec![*points.first().unwrap(), *points.last().unwrap()]
}
}
pub fn find_line_intersections(line1: &[Point2D], line2: &[Point2D]) -> Vec<LineSegment> {
let mut ret: Vec<LineSegment> = vec![];
let box1 = BoundingBox::from_points(&line1);
let box2 = BoundingBox::from_points(&line2);
if box1.overlaps(box2) { | ls1 = LineSegment::new(line1[a], line1[a + 1]);
for b in 0..line2.len() - 1 {
ls2 = LineSegment::new(line2[b], line2[b + 1]);
match ls1.get_intersection(&ls2) {
Some(p) => ret.push(p),
None => {} // do nothing, the don't intersect
}
}
}
}
ret
}
pub fn do_polylines_intersect(line1: &Polyline, line2: &Polyline) -> bool {
let box1 = line1.get_bounding_box();
let box2 = line2.get_bounding_box();
if box1.overlaps(box2) {
let mut ls1: LineSegment;
let mut ls2: LineSegment;
for a in 0..line1.len() - 1 {
ls1 = LineSegment::new(line1[a], line1[a + 1]);
for b in 0..line2.len() - 1 {
ls2 = LineSegment::new(line2[b], line2[b + 1]);
match ls1.get_intersection(&ls2) {
Some(_) => {
return true;
}
None => {} // do nothing, the don't intersect
}
}
}
}
false
}
pub fn find_split_points_at_line_intersections(line1: &mut Polyline, line2: &mut Polyline) {
let box1 = line1.get_bounding_box();
let box2 = line2.get_bounding_box();
if box1.overlaps(box2) {
let mut ls1: LineSegment;
let mut ls2: LineSegment;
for a in 0..line1.len() - 1 {
ls1 = LineSegment::new(line1[a], line1[a + 1]);
for b in 0..line2.len() - 1 {
ls2 = LineSegment::new(line2[b], line2[b + 1]);
match ls1.get_intersection(&ls2) {
Some(ls) => {
line1.insert_split_point(
a as f64
+ ls.p1.distance_squared(&ls1.p1)
/ ls1.p2.distance_squared(&ls1.p1), //(ls.p1.x - ls1.p1.x) / (ls1.p2.x - ls1.p1.x),
ls.p1,
);
line2.insert_split_point(
b as f64
+ ls.p1.distance_squared(&ls2.p1)
/ ls2.p2.distance_squared(&ls2.p1), //(ls.p1.x - ls2.p1.x) / (ls2.p2.x - ls2.p1.x),
ls.p1,
);
if ls.p1 != ls.p2 {
line1.insert_split_point(
a as f64
+ ls.p2.distance_squared(&ls1.p1)
/ ls1.p2.distance_squared(&ls1.p1), //(ls.p2.x - ls1.p1.x) / (ls1.p2.x - ls1.p1.x),
ls.p2,
);
line2.insert_split_point(
b as f64
+ ls.p2.distance_squared(&ls2.p1)
/ ls2.p2.distance_squared(&ls2.p1), //(ls.p2.x - ls2.p1.x) / (ls2.p2.x - ls2.p1.x),
ls.p2,
);
}
}
None => {} // do nothing, the don't intersect
}
}
}
}
}
#[cfg(test)]
mod test {
use super::find_line_intersections;
use crate::structures::{LineSegment, Point2D};
#[test]
fn test_find_line_intersections() {
let line1 = vec![
Point2D::new(0.0, 0.0),
Point2D::new(10.0, 10.0),
Point2D::new(12.0, 6.0),
Point2D::new(6.0, 0.0),
];
let line2 = vec![
Point2D::new(-1.0, 5.0),
Point2D::new(6.0, 5.0),
Point2D::new(6.0, 2.0),
Point2D::new(12.0, 2.0),
];
let intersections = find_line_intersections(&line1, &line2);
let intersections_should_be = vec![
LineSegment::new(Point2D::new(5.0, 5.0), Point2D::new(5.0, 5.0)),
LineSegment::new(Point2D::new(8.0, 2.0), Point2D::new(8.0, 2.0)),
];
assert_eq!(intersections, intersections_should_be);
}
#[test]
fn test_no_lines_intersections() {
let line1 = vec![
Point2D::new(0.0, 0.0),
Point2D::new(10.0, 10.0),
Point2D::new(12.0, 6.0),
Point2D::new(6.0, 0.0),
];
let line2 = vec![Point2D::new(-1.0, -5.0), Point2D::new(-6.0, -5.0)];
let intersections = find_line_intersections(&line1, &line2);
assert_eq!(intersections.len(), 0);
}
#[test]
fn test_vertical_lines_intersections() {
let line1 = vec![Point2D::new(0.0, 0.0), Point2D::new(10.0, 10.0)];
let line2 = vec![Point2D::new(5.0, 1.0), Point2D::new(5.0, 10.0)];
let intersections = find_line_intersections(&line1, &line2);
let intersections_should_be = vec![LineSegment::new(
Point2D::new(5.0, 5.0),
Point2D::new(5.0, 5.0),
)];
assert_eq!(intersections, intersections_should_be);
}
#[test]
fn test_coincident_lines_intersections() {
let line1 = vec![Point2D::new(0.0, 0.0), Point2D::new(10.0, 10.0)];
let line2 = vec![Point2D::new(5.0, 5.0), Point2D::new(18.0, 18.0)];
let intersections = find_line_intersections(&line1, &line2);
let intersections_should_be = vec![LineSegment::new(
Point2D::new(5.0, 5.0),
Point2D::new(10.0, 10.0),
)];
assert_eq!(intersections, intersections_should_be);
}
} | let mut ls1: LineSegment;
let mut ls2: LineSegment;
for a in 0..line1.len() - 1 { |
index.tsx | export {default} from './Alert'; | export * from './Alert'; |
|
test-https-max-header-size-per-stream.js | 'use strict';
const common = require('../common');
if (!common.hasCrypto) {
common.skip('missing crypto');
}
const fixtures = require('../common/fixtures');
const assert = require('assert');
const https = require('https');
const http = require('http');
const tls = require('tls');
const MakeDuplexPair = require('../common/duplexpair');
const { finished } = require('stream');
const certFixture = {
key: fixtures.readKey('agent1-key.pem'),
cert: fixtures.readKey('agent1-cert.pem'),
ca: fixtures.readKey('ca1-cert.pem'),
};
// Test that setting the `maxHeaderSize` option works on a per-stream-basis.
// Test 1: The server sends larger headers than what would otherwise be allowed.
{
const { clientSide, serverSide } = MakeDuplexPair();
const req = https.request({
createConnection: common.mustCall(() => clientSide),
maxHeaderSize: http.maxHeaderSize * 4
}, common.mustCall((res) => {
assert.strictEqual(res.headers.hello, 'A'.repeat(http.maxHeaderSize * 3));
res.resume(); // We don’t actually care about contents.
res.on('end', common.mustCall());
}));
req.end();
serverSide.resume(); // Dump the request
serverSide.end('HTTP/1.1 200 OK\r\n' +
'Hello: ' + 'A'.repeat(http.maxHeaderSize * 3) + '\r\n' +
'Content-Length: 0\r\n' +
'\r\n\r\n');
}
// Test 2: The same as Test 1 except without the option, to make sure it fails.
{
const { clientSide, serverSide } = MakeDuplexPair();
const req = https.request({
createConnection: common.mustCall(() => clientSide)
}, common.mustNotCall());
req.end();
req.on('error', common.mustCall());
serverSide.resume(); // Dump the request
serverSide.end('HTTP/1.1 200 OK\r\n' +
'Hello: ' + 'A'.repeat(http.maxHeaderSize * 3) + '\r\n' +
'Content-Length: 0\r\n' +
'\r\n\r\n');
}
// Test 3: The client sends larger headers than what would otherwise be allowed.
{
const testData = 'Hello, World!\n';
const server = https.createServer(
{ maxHeaderSize: http.maxHeaderSize * 4,
...certFixture },
common.mustCall((req, res) => {
res.statusCode = 200;
res.setHeader('Content-Type', 'text/plain');
res.end(testData);
}));
server.on('clientError', common.mustNotCall());
server.listen(0, common.mustCall(() => {
const client = tls.connect({
port: server.address().port,
rejectUnauthorized: false
});
client.write(
'GET / HTTP/1.1\r\n' +
'Hello: ' + 'A'.repeat(http.maxHeaderSize * 3) + '\r\n' +
'\r\n\r\n');
client.end();
client.on('data', () => {});
finished(client, common.mustCall(() => {
server.close();
}));
}));
}
// Test 4: The same as Test 3 except without the option, to make sure it fails.
{
const server = https.createServer({ ...certFixture }, common.mustNotCall());
// clientError may be emitted multiple times when header is larger than
// maxHeaderSize.
server.on('clientError', common.mustCallAtLeast(() => {}, 1));
server.listen(0, common.mustCall(() => {
const client = tls.connect({
port: server.address().port,
rejectUnauthorized: false
});
client.write(
'GET / HTTP/1.1\r\n' +
'Hello: ' + 'A'.repeat(http.maxHeaderSize * 3) + '\r\n' +
'\r\n\r\n'); | client.end();
client.on('data', () => {});
finished(client, common.mustCall(() => {
server.close();
}));
}));
} | |
VideoDetailsUC.ts | import { VideosDetailsGateway } from "../gateway/VideosDetailsGateway";
|
export interface VideoDetailsUCOutput {
videosDetails: any
}
export class VideoDetailsUC {
constructor(private videoDetaisGw: VideosDetailsGateway){}
public async execute(input: VideoDetailsUCUnput): Promise<VideoDetailsUCOutput>{
const videosDetails = await this.videoDetaisGw.getVideosDetails(input.videoId)
return ({videosDetails})
}
} |
export interface VideoDetailsUCUnput {
videoId: string
} |
factor_tools.py | from fractions import Fraction
import math
def compute_factors(n):
"""
Return a list of all factors (proper divisors) of a number n, including the factor 1
"""
factors = [1]
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
factors.append(i)
factors.append(n // i)
return factors
def is_prime(n, prime_cache=None, prime_cache_max=None):
"""
Return true if n is prime (n>1)
If prime_cache is given, it should be a set of consecutive primes from 2 to prime_cache_max
(and prime_cache_max must also be given).
Then if n <= prime_cache_max, this test will use set lookup rather than factorization
"""
# Optimizations to quickly reject known non-primes
if n in [2, 3, 5, 7]:
return True
if (n % 10) not in [1, 3, 7, 9] or n == 1:
return False
if prime_cache and n <= prime_cache_max:
return n in prime_cache
return len(compute_factors(n)) == 1
def next_prime(previous):
"""
Get the next prime after previous
"""
i = previous + 1
while True:
if is_prime(i):
return i
i += 1
def prime_factors(n, primes=None):
"""
Compute all prime factors of a number n
Some prime factors may be repeated e.g. 12 has prime factors [2, 2, 3]
primes: if supplied, primes up to sqrt(n) should be available
"""
if not primes:
primes = get_primes(int(math.sqrt(n)))
factors = []
remainder = n
for prime in primes:
# Divide by the current prime as many times as we can
while remainder % current_prime == 0:
factors.append(current_prime)
remainder //= current_prime
# We can bail out once we've finished factorizing
if remainder == 1:
break
return factors
def get_primes(up_to):
|
def totient(n, primes):
"""
Compute totient function with precomputed primes
primes must include all (ordered) primes from 2 up to at least n
"""
product = n
for p in primes:
if p > n:
break
if n % p == 0:
product *= (1 - Fraction(1, p))
return product
def get_coprimes(n, primes):
"""
Get list of numbers coprime to n
primes: list of prime numbers up to at least sqrt(n)
"""
factors = set(prime_factors(n, primes))
# Now sieve out the factors
coprime = [True for i in range(n)]
coprime[0] = False
coprime[1] = False
for factor in factors:
for multiplier in range(1, n // factor):
coprime[factor * multiplier] = False
# And we have the coprimes!
return [c for c in coprime if c]
| """
Get all primes up to (but not including) up_to
"""
primes = [2]
while primes[-1] < up_to:
primes.append(next_prime(primes[-1]))
return primes[:-1] |
Layout.js | /**
* @version 0.43
* @author yura
*/
import React from 'react'
import Modal from 'react-native-modal'
import { View, StyleSheet, Dimensions } from 'react-native'
import { hideModal } from '@app/appstores/Stores/Modal/ModalActions'
import { ThemeContext } from '@app/theme/ThemeProvider'
const { height: WINDOW_HEIGHT, width: WINDOW_WIDTH } = Dimensions.get('window')
let windowHeight, windowWidth
if (WINDOW_HEIGHT < WINDOW_WIDTH) {
windowHeight = WINDOW_WIDTH
windowWidth = WINDOW_HEIGHT
} else {
windowHeight = WINDOW_HEIGHT
windowWidth = WINDOW_WIDTH
}
class | extends React.PureComponent {
render() {
const { colors } = this.context
return (
<Modal style={styles.modal}
hasBackdrop={true}
backdropOpacity={0.4}
isVisible={this.props.visible === true || this.props.visible === 'true'}
onBackdropPress={() => {this.props.noBackdropPress === true ? null : hideModal()}}
useNativeDriver={true}
>
<View style={{...styles.container, backgroundColor: colors.common.background, minHeight: this.props.notifications ? 170 : 290 }}>
<View>
{this.props.children}
</View>
</View>
</Modal>
)
}
}
ModalLayout.contextType = ThemeContext
export default ModalLayout
const styles = StyleSheet.create({
modal: {
margin: 0,
padding: 0,
justifyContent: 'center',
backgroundColor: 'transparent',
width: windowWidth,
height: windowHeight
},
container: {
justifyContent: 'flex-end',
position: 'relative',
left: (windowWidth - 313) / 2,
width: 313,
marginVertical: 5,
borderRadius: 16,
zIndex: 1
}
})
| ModalLayout |
mod.rs | use serde::{ser, Deserialize, Serialize};
use std::io;
use crate::error::{Error, Result};
use crate::extensions::Extensions;
mod value;
/// Serializes `value` into `writer`
pub fn to_writer<W, T>(writer: W, value: &T) -> Result<()>
where
W: io::Write,
T: Serialize,
{
let mut s = Serializer::new(writer, None, false)?;
value.serialize(&mut s)
}
/// Serializes `value` and returns it as string.
///
/// This function does not generate any newlines or nice formatting;
/// if you want that, you can use `to_string_pretty` instead.
pub fn to_string<T>(value: &T) -> Result<String>
where
T: Serialize,
{
let buf = Vec::new();
let mut s = Serializer::new(buf, None, false)?;
value.serialize(&mut s)?;
Ok(String::from_utf8(s.output).expect("Ron should be utf-8"))
}
/// Serializes `value` in the recommended RON layout in a pretty way.
pub fn to_string_pretty<T>(value: &T, config: PrettyConfig) -> Result<String>
where
T: Serialize,
{
let buf = Vec::new();
let mut s = Serializer::new(buf, Some(config), false)?;
value.serialize(&mut s)?;
Ok(String::from_utf8(s.output).expect("Ron should be utf-8"))
}
/// Pretty serializer state
struct Pretty {
indent: usize,
sequence_index: Vec<usize>,
}
/// Pretty serializer configuration.
///
/// # Examples
///
/// ```
/// use bevy_ron::ser::PrettyConfig;
///
/// let my_config = PrettyConfig::new()
/// .depth_limit(4)
/// // definitely superior (okay, just joking)
/// .indentor("\t".to_owned());
/// ```
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct PrettyConfig {
/// Limit the pretty-ness up to the given depth.
#[serde(default = "default_depth_limit")]
pub depth_limit: usize,
/// New line string
#[serde(default = "default_new_line")]
pub new_line: String,
/// Indentation string
#[serde(default = "default_indentor")]
pub indentor: String,
/// Separate tuple members with indentation
#[serde(default = "default_separate_tuple_members")]
pub separate_tuple_members: bool,
/// Enumerate array items in comments
#[serde(default = "default_enumerate_arrays")]
pub enumerate_arrays: bool,
/// Always include the decimal in floats
#[serde(default = "default_decimal_floats")]
pub decimal_floats: bool,
/// Enable extensions. Only configures 'implicit_some' for now.
pub extensions: Extensions,
/// Private field to ensure adding a field is non-breaking.
#[serde(skip)]
_future_proof: (),
}
impl PrettyConfig {
/// Creates a default `PrettyConfig`.
pub fn new() -> Self {
Default::default()
}
/// Limits the pretty-formatting based on the number of indentations.
/// I.e., with a depth limit of 5, starting with an element of depth
/// (indentation level) 6, everything will be put into the same line,
/// without pretty formatting.
///
/// Default: [std::usize::MAX]
pub fn depth_limit(mut self, depth_limit: usize) -> Self {
self.depth_limit = depth_limit;
self
}
/// Configures the newlines used for serialization.
///
/// Default: `\r\n` on Windows, `\n` otherwise
pub fn new_line(mut self, new_line: String) -> Self {
self.new_line = new_line;
self
}
/// Configures the string sequence used for indentation.
///
/// Default: 4 spaces
pub fn indentor(mut self, indentor: String) -> Self {
self.indentor = indentor;
self
}
/// Configures whether tuples are single- or multi-line.
/// If set to `true`, tuples will have their fields indented and in new
/// lines. If set to `false`, tuples will be serialized without any
/// newlines or indentations.
///
/// Default: `false`
pub fn separate_tuple_members(mut self, separate_tuple_members: bool) -> Self {
self.separate_tuple_members = separate_tuple_members;
self
}
/// Configures whether a comment shall be added to every array element,
/// indicating the index.
///
/// Default: `false`
pub fn enumerate_arrays(mut self, enumerate_arrays: bool) -> Self {
self.enumerate_arrays = enumerate_arrays;
self
}
/// Configures whether floats should always include a decimal.
/// When false `1.0` will serialize as `1`
/// When true `1.0` will serialize as `1.0`
///
/// Default: `false`
pub fn decimal_floats(mut self, decimal_floats: bool) -> Self {
self.decimal_floats = decimal_floats;
self
}
/// Configures extensions
///
/// Default: Extensions::empty()
pub fn extensions(mut self, extensions: Extensions) -> Self {
self.extensions = extensions;
self
}
}
fn default_depth_limit() -> usize {
!0
}
fn default_new_line() -> String {
#[cfg(not(target_os = "windows"))]
let new_line = "\n".to_string();
#[cfg(target_os = "windows")]
let new_line = "\r\n".to_string();
new_line
}
fn default_decimal_floats() -> bool {
false
}
fn default_indentor() -> String {
" ".to_string()
}
fn default_separate_tuple_members() -> bool {
false
}
fn default_enumerate_arrays() -> bool {
false
}
impl Default for PrettyConfig {
fn default() -> Self {
PrettyConfig {
depth_limit: default_depth_limit(),
new_line: default_new_line(),
indentor: default_indentor(),
separate_tuple_members: default_separate_tuple_members(),
enumerate_arrays: default_enumerate_arrays(),
extensions: Extensions::default(),
decimal_floats: default_decimal_floats(),
_future_proof: (),
}
}
}
/// The RON serializer.
///
/// You can just use `to_string` for deserializing a value.
/// If you want it pretty-printed, take a look at the `pretty` module.
pub struct Serializer<W: io::Write> {
output: W,
pretty: Option<(PrettyConfig, Pretty)>,
struct_names: bool,
is_empty: Option<bool>,
}
impl<W: io::Write> Serializer<W> {
/// Creates a new `Serializer`.
///
/// Most of the time you can just use `to_string` or `to_string_pretty`.
pub fn new(mut writer: W, config: Option<PrettyConfig>, struct_names: bool) -> Result<Self> {
if let Some(conf) = &config {
if conf.extensions.contains(Extensions::IMPLICIT_SOME) {
writer.write_all(b"#![enable(implicit_some)]")?;
writer.write_all(conf.new_line.as_bytes())?;
};
};
Ok(Serializer {
output: writer,
pretty: config.map(|conf| {
(
conf,
Pretty {
indent: 0,
sequence_index: Vec::new(),
},
)
}),
struct_names,
is_empty: None,
})
}
fn is_pretty(&self) -> bool {
match self.pretty {
Some((ref config, ref pretty)) => pretty.indent <= config.depth_limit,
None => false,
}
}
fn separate_tuple_members(&self) -> bool {
self.pretty
.as_ref()
.map_or(false, |&(ref config, _)| config.separate_tuple_members)
}
fn decimal_floats(&self) -> bool {
self.pretty
.as_ref()
.map_or(false, |&(ref config, _)| config.decimal_floats)
}
fn extensions(&self) -> Extensions {
self.pretty
.as_ref()
.map_or(Extensions::empty(), |&(ref config, _)| config.extensions)
}
fn start_indent(&mut self) -> Result<()> {
if let Some((ref config, ref mut pretty)) = self.pretty {
pretty.indent += 1;
if pretty.indent <= config.depth_limit {
let is_empty = self.is_empty.unwrap_or(false);
if !is_empty {
self.output.write_all(config.new_line.as_bytes())?;
}
}
}
Ok(())
}
fn indent(&mut self) -> io::Result<()> {
if let Some((ref config, ref pretty)) = self.pretty {
if pretty.indent <= config.depth_limit {
for _ in 0..pretty.indent {
self.output.write_all(config.indentor.as_bytes())?;
}
}
}
Ok(())
}
fn end_indent(&mut self) -> io::Result<()> {
if let Some((ref config, ref mut pretty)) = self.pretty {
if pretty.indent <= config.depth_limit {
let is_empty = self.is_empty.unwrap_or(false);
if !is_empty {
for _ in 1..pretty.indent {
self.output.write_all(config.indentor.as_bytes())?;
}
}
}
pretty.indent -= 1;
self.is_empty = None;
}
Ok(())
}
fn serialize_escaped_str(&mut self, value: &str) -> io::Result<()> {
self.output.write_all(b"\"")?;
let mut scalar = [0u8; 4];
for c in value.chars().flat_map(|c| c.escape_debug()) {
self.output
.write_all(c.encode_utf8(&mut scalar).as_bytes())?;
}
self.output.write_all(b"\"")?;
Ok(())
}
}
impl<'a, W: io::Write> ser::Serializer for &'a mut Serializer<W> {
type Error = Error;
type Ok = ();
type SerializeMap = Compound<'a, W>;
type SerializeSeq = Compound<'a, W>;
type SerializeStruct = Compound<'a, W>;
type SerializeStructVariant = Compound<'a, W>;
type SerializeTuple = Compound<'a, W>;
type SerializeTupleStruct = Compound<'a, W>;
type SerializeTupleVariant = Compound<'a, W>;
fn serialize_bool(self, v: bool) -> Result<()> {
self.output.write_all(if v { b"true" } else { b"false" })?;
Ok(())
}
fn serialize_i8(self, v: i8) -> Result<()> {
self.serialize_i128(v as i128)
}
fn serialize_i16(self, v: i16) -> Result<()> {
self.serialize_i128(v as i128)
}
fn serialize_i32(self, v: i32) -> Result<()> {
self.serialize_i128(v as i128)
}
fn serialize_i64(self, v: i64) -> Result<()> {
self.serialize_i128(v as i128)
}
fn serialize_i128(self, v: i128) -> Result<()> {
// TODO optimize
write!(self.output, "{}", v)?;
Ok(())
}
fn serialize_u8(self, v: u8) -> Result<()> {
self.serialize_u128(v as u128)
}
fn serialize_u16(self, v: u16) -> Result<()> {
self.serialize_u128(v as u128)
}
fn serialize_u32(self, v: u32) -> Result<()> {
self.serialize_u128(v as u128)
}
fn serialize_u64(self, v: u64) -> Result<()> {
self.serialize_u128(v as u128)
}
fn serialize_u128(self, v: u128) -> Result<()> {
write!(self.output, "{}", v)?;
Ok(())
}
fn serialize_f32(self, v: f32) -> Result<()> {
write!(self.output, "{}", v)?;
// TODO: use f32::EPSILON when minimum supported rust version is 1.43
pub const EPSILON: f32 = 1.19209290e-07_f32;
if self.decimal_floats() && (v - v.floor()).abs() < EPSILON {
write!(self.output, ".0")?;
}
Ok(())
}
fn serialize_f64(self, v: f64) -> Result<()> {
write!(self.output, "{}", v)?;
// TODO: use f64::EPSILON when minimum supported rust version is 1.43
pub const EPSILON: f64 = 2.2204460492503131e-16_f64;
if self.decimal_floats() && (v - v.floor()).abs() < EPSILON {
write!(self.output, ".0")?;
}
Ok(())
}
fn serialize_char(self, v: char) -> Result<()> {
self.output.write_all(b"'")?;
if v == '\\' || v == '\'' {
self.output.write_all(b"\\")?;
}
write!(self.output, "{}", v)?;
self.output.write_all(b"'")?;
Ok(())
}
fn serialize_str(self, v: &str) -> Result<()> {
self.serialize_escaped_str(v)?;
Ok(())
}
fn serialize_bytes(self, v: &[u8]) -> Result<()> {
self.serialize_str(base64::encode(v).as_str())
}
fn serialize_none(self) -> Result<()> {
self.output.write_all(b"None")?;
Ok(())
}
fn serialize_some<T>(self, value: &T) -> Result<()>
where
T: ?Sized + Serialize,
{
let implicit_some = self.extensions().contains(Extensions::IMPLICIT_SOME);
if !implicit_some {
self.output.write_all(b"Some(")?;
}
value.serialize(&mut *self)?;
if !implicit_some {
self.output.write_all(b")")?;
}
Ok(())
}
fn serialize_unit(self) -> Result<()> {
self.output.write_all(b"()")?;
Ok(())
}
fn serialize_unit_struct(self, name: &'static str) -> Result<()> {
if self.struct_names {
self.output.write_all(name.as_bytes())?;
Ok(())
} else {
self.serialize_unit()
}
}
fn serialize_unit_variant(self, _: &'static str, _: u32, variant: &'static str) -> Result<()> {
self.output.write_all(variant.as_bytes())?;
Ok(())
}
fn serialize_newtype_struct<T>(self, name: &'static str, value: &T) -> Result<()>
where
T: ?Sized + Serialize,
{
if self.struct_names {
self.output.write_all(name.as_bytes())?;
}
self.output.write_all(b"(")?;
value.serialize(&mut *self)?;
self.output.write_all(b")")?;
Ok(())
}
fn serialize_newtype_variant<T>(
self,
_: &'static str,
_: u32,
variant: &'static str,
value: &T,
) -> Result<()>
where
T: ?Sized + Serialize,
{
self.output.write_all(variant.as_bytes())?;
self.output.write_all(b"(")?;
value.serialize(&mut *self)?;
self.output.write_all(b")")?;
Ok(())
}
fn serialize_seq(self, len: Option<usize>) -> Result<Self::SerializeSeq> {
self.output.write_all(b"[")?;
if let Some(len) = len {
self.is_empty = Some(len == 0);
}
self.start_indent()?;
if let Some((_, ref mut pretty)) = self.pretty {
pretty.sequence_index.push(0);
}
Ok(Compound {
ser: self,
state: State::First,
})
}
fn | (self, len: usize) -> Result<Self::SerializeTuple> {
self.output.write_all(b"(")?;
if self.separate_tuple_members() {
self.is_empty = Some(len == 0);
self.start_indent()?;
}
Ok(Compound {
ser: self,
state: State::First,
})
}
fn serialize_tuple_struct(
self,
name: &'static str,
len: usize,
) -> Result<Self::SerializeTupleStruct> {
if self.struct_names {
self.output.write_all(name.as_bytes())?;
}
self.serialize_tuple(len)
}
fn serialize_tuple_variant(
self,
_: &'static str,
_: u32,
variant: &'static str,
len: usize,
) -> Result<Self::SerializeTupleVariant> {
self.output.write_all(variant.as_bytes())?;
self.output.write_all(b"(")?;
if self.separate_tuple_members() {
self.is_empty = Some(len == 0);
self.start_indent()?;
}
Ok(Compound {
ser: self,
state: State::First,
})
}
fn serialize_map(self, len: Option<usize>) -> Result<Self::SerializeMap> {
self.output.write_all(b"{")?;
if let Some(len) = len {
self.is_empty = Some(len == 0);
}
self.start_indent()?;
Ok(Compound {
ser: self,
state: State::First,
})
}
fn serialize_struct(self, name: &'static str, len: usize) -> Result<Self::SerializeStruct> {
if self.struct_names {
self.output.write_all(name.as_bytes())?;
}
self.output.write_all(b"(")?;
self.is_empty = Some(len == 0);
self.start_indent()?;
Ok(Compound {
ser: self,
state: State::First,
})
}
fn serialize_struct_variant(
self,
_: &'static str,
_: u32,
variant: &'static str,
len: usize,
) -> Result<Self::SerializeStructVariant> {
self.output.write_all(variant.as_bytes())?;
self.output.write_all(b"(")?;
self.is_empty = Some(len == 0);
self.start_indent()?;
Ok(Compound {
ser: self,
state: State::First,
})
}
}
pub enum State {
First,
Rest,
}
pub struct Compound<'a, W: io::Write> {
ser: &'a mut Serializer<W>,
state: State,
}
impl<'a, W: io::Write> ser::SerializeSeq for Compound<'a, W> {
type Error = Error;
type Ok = ();
fn serialize_element<T>(&mut self, value: &T) -> Result<()>
where
T: ?Sized + Serialize,
{
if let State::First = self.state {
self.state = State::Rest;
} else {
self.ser.output.write_all(b",")?;
if let Some((ref config, ref mut pretty)) = self.ser.pretty {
if pretty.indent <= config.depth_limit {
if config.enumerate_arrays {
assert!(config.new_line.contains('\n'));
let index = pretty.sequence_index.last_mut().unwrap();
//TODO: when /**/ comments are supported, prepend the index
// to an element instead of appending it.
write!(self.ser.output, "// [{}]", index).unwrap();
*index += 1;
}
self.ser.output.write_all(config.new_line.as_bytes())?;
}
}
}
self.ser.indent()?;
value.serialize(&mut *self.ser)?;
Ok(())
}
fn end(self) -> Result<()> {
if let State::Rest = self.state {
if let Some((ref config, ref mut pretty)) = self.ser.pretty {
if pretty.indent <= config.depth_limit {
self.ser.output.write_all(b",")?;
self.ser.output.write_all(config.new_line.as_bytes())?;
}
}
}
self.ser.end_indent()?;
if let Some((_, ref mut pretty)) = self.ser.pretty {
pretty.sequence_index.pop();
}
self.ser.output.write_all(b"]")?;
Ok(())
}
}
impl<'a, W: io::Write> ser::SerializeTuple for Compound<'a, W> {
type Error = Error;
type Ok = ();
fn serialize_element<T>(&mut self, value: &T) -> Result<()>
where
T: ?Sized + Serialize,
{
if let State::First = self.state {
self.state = State::Rest;
} else {
self.ser.output.write_all(b",")?;
if let Some((ref config, ref pretty)) = self.ser.pretty {
if pretty.indent <= config.depth_limit {
self.ser
.output
.write_all(if self.ser.separate_tuple_members() {
config.new_line.as_bytes()
} else {
b" "
})?;
}
}
}
if self.ser.separate_tuple_members() {
self.ser.indent()?;
}
value.serialize(&mut *self.ser)?;
Ok(())
}
fn end(self) -> Result<()> {
if let State::Rest = self.state {
if let Some((ref config, ref pretty)) = self.ser.pretty {
if self.ser.separate_tuple_members() && pretty.indent <= config.depth_limit {
self.ser.output.write_all(b",")?;
self.ser.output.write_all(config.new_line.as_bytes())?;
}
}
}
if self.ser.separate_tuple_members() {
self.ser.end_indent()?;
}
self.ser.output.write_all(b")")?;
Ok(())
}
}
// Same thing but for tuple structs.
impl<'a, W: io::Write> ser::SerializeTupleStruct for Compound<'a, W> {
type Error = Error;
type Ok = ();
fn serialize_field<T>(&mut self, value: &T) -> Result<()>
where
T: ?Sized + Serialize,
{
ser::SerializeTuple::serialize_element(self, value)
}
fn end(self) -> Result<()> {
ser::SerializeTuple::end(self)
}
}
impl<'a, W: io::Write> ser::SerializeTupleVariant for Compound<'a, W> {
type Error = Error;
type Ok = ();
fn serialize_field<T>(&mut self, value: &T) -> Result<()>
where
T: ?Sized + Serialize,
{
ser::SerializeTuple::serialize_element(self, value)
}
fn end(self) -> Result<()> {
ser::SerializeTuple::end(self)
}
}
impl<'a, W: io::Write> ser::SerializeMap for Compound<'a, W> {
type Error = Error;
type Ok = ();
fn serialize_key<T>(&mut self, key: &T) -> Result<()>
where
T: ?Sized + Serialize,
{
if let State::First = self.state {
self.state = State::Rest;
} else {
self.ser.output.write_all(b",")?;
if let Some((ref config, ref pretty)) = self.ser.pretty {
if pretty.indent <= config.depth_limit {
self.ser.output.write_all(config.new_line.as_bytes())?;
}
}
}
self.ser.indent()?;
key.serialize(&mut *self.ser)
}
fn serialize_value<T>(&mut self, value: &T) -> Result<()>
where
T: ?Sized + Serialize,
{
self.ser.output.write_all(b":")?;
if self.ser.is_pretty() {
self.ser.output.write_all(b" ")?;
}
value.serialize(&mut *self.ser)?;
Ok(())
}
fn end(self) -> Result<()> {
if let State::Rest = self.state {
if let Some((ref config, ref pretty)) = self.ser.pretty {
if pretty.indent <= config.depth_limit {
self.ser.output.write_all(b",")?;
self.ser.output.write_all(config.new_line.as_bytes())?;
}
}
}
self.ser.end_indent()?;
self.ser.output.write_all(b"}")?;
Ok(())
}
}
impl<'a, W: io::Write> ser::SerializeStruct for Compound<'a, W> {
type Error = Error;
type Ok = ();
fn serialize_field<T>(&mut self, key: &'static str, value: &T) -> Result<()>
where
T: ?Sized + Serialize,
{
if let State::First = self.state {
self.state = State::Rest;
} else {
self.ser.output.write_all(b",")?;
if let Some((ref config, ref pretty)) = self.ser.pretty {
if pretty.indent <= config.depth_limit {
self.ser.output.write_all(config.new_line.as_bytes())?;
}
}
}
self.ser.indent()?;
self.ser.output.write_all(key.as_bytes())?;
self.ser.output.write_all(b":")?;
if self.ser.is_pretty() {
self.ser.output.write_all(b" ")?;
}
value.serialize(&mut *self.ser)?;
Ok(())
}
fn end(self) -> Result<()> {
if let State::Rest = self.state {
if let Some((ref config, ref pretty)) = self.ser.pretty {
if pretty.indent <= config.depth_limit {
self.ser.output.write_all(b",")?;
self.ser.output.write_all(config.new_line.as_bytes())?;
}
}
}
self.ser.end_indent()?;
self.ser.output.write_all(b")")?;
Ok(())
}
}
impl<'a, W: io::Write> ser::SerializeStructVariant for Compound<'a, W> {
type Error = Error;
type Ok = ();
fn serialize_field<T>(&mut self, key: &'static str, value: &T) -> Result<()>
where
T: ?Sized + Serialize,
{
ser::SerializeStruct::serialize_field(self, key, value)
}
fn end(self) -> Result<()> {
ser::SerializeStruct::end(self)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Serialize)]
struct EmptyStruct1;
#[derive(Serialize)]
struct EmptyStruct2 {}
#[derive(Serialize)]
struct MyStruct {
x: f32,
y: f32,
}
#[derive(Serialize)]
enum MyEnum {
A,
B(bool),
C(bool, f32),
D { a: i32, b: i32 },
}
#[test]
fn test_empty_struct() {
assert_eq!(to_string(&EmptyStruct1).unwrap(), "()");
assert_eq!(to_string(&EmptyStruct2 {}).unwrap(), "()");
}
#[test]
fn test_struct() {
let my_struct = MyStruct { x: 4.0, y: 7.0 };
assert_eq!(to_string(&my_struct).unwrap(), "(x:4,y:7)");
#[derive(Serialize)]
struct NewType(i32);
assert_eq!(to_string(&NewType(42)).unwrap(), "(42)");
#[derive(Serialize)]
struct TupleStruct(f32, f32);
assert_eq!(to_string(&TupleStruct(2.0, 5.0)).unwrap(), "(2,5)");
}
#[test]
fn test_option() {
assert_eq!(to_string(&Some(1u8)).unwrap(), "Some(1)");
assert_eq!(to_string(&None::<u8>).unwrap(), "None");
}
#[test]
fn test_enum() {
assert_eq!(to_string(&MyEnum::A).unwrap(), "A");
assert_eq!(to_string(&MyEnum::B(true)).unwrap(), "B(true)");
assert_eq!(to_string(&MyEnum::C(true, 3.5)).unwrap(), "C(true,3.5)");
assert_eq!(to_string(&MyEnum::D { a: 2, b: 3 }).unwrap(), "D(a:2,b:3)");
}
#[test]
fn test_array() {
let empty: [i32; 0] = [];
assert_eq!(to_string(&empty).unwrap(), "()");
let empty_ref: &[i32] = ∅
assert_eq!(to_string(&empty_ref).unwrap(), "[]");
assert_eq!(to_string(&[2, 3, 4i32]).unwrap(), "(2,3,4)");
assert_eq!(to_string(&(&[2, 3, 4i32] as &[i32])).unwrap(), "[2,3,4]");
}
#[test]
fn test_map() {
use std::collections::HashMap;
let mut map = HashMap::new();
map.insert((true, false), 4);
map.insert((false, false), 123);
let s = to_string(&map).unwrap();
s.starts_with("{");
s.contains("(true,false):4");
s.contains("(false,false):123");
s.ends_with("}");
}
#[test]
fn test_string() {
assert_eq!(to_string(&"Some string").unwrap(), "\"Some string\"");
}
#[test]
fn test_char() {
assert_eq!(to_string(&'c').unwrap(), "'c'");
}
#[test]
fn test_escape() {
assert_eq!(to_string(&r#""Quoted""#).unwrap(), r#""\"Quoted\"""#);
}
#[test]
fn test_byte_stream() {
use serde_bytes;
let small: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
assert_eq!(
to_string(&small).unwrap(),
"(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)"
);
let large = vec![255u8; 64];
let large = serde_bytes::Bytes::new(&large);
assert_eq!(
to_string(&large).unwrap(),
concat!(
"\"/////////////////////////////////////////",
"////////////////////////////////////////////w==\""
)
);
}
}
| serialize_tuple |
register_predicates.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package defaults
import (
"k8s.io/kubernetes/pkg/scheduler"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
)
func init() {
// Register functions that extract metadata used by predicates computations.
scheduler.RegisterPredicateMetadataProducerFactory( | return f.GetPredicateMetadata
})
// IMPORTANT NOTES for predicate developers:
// Registers predicates and priorities that are not enabled by default, but user can pick when creating their
// own set of priorities/predicates.
// PodFitsPorts has been replaced by PodFitsHostPorts for better user understanding.
// For backwards compatibility with 1.0, PodFitsPorts is registered as well.
scheduler.RegisterFitPredicate("PodFitsPorts", predicates.PodFitsHostPorts)
// Fit is defined based on the absence of port conflicts.
// This predicate is actually a default predicate, because it is invoked from
// predicates.GeneralPredicates()
scheduler.RegisterFitPredicate(predicates.PodFitsHostPortsPred, predicates.PodFitsHostPorts)
// Fit is determined by resource availability.
// This predicate is actually a default predicate, because it is invoked from
// predicates.GeneralPredicates()
scheduler.RegisterFitPredicate(predicates.PodFitsResourcesPred, predicates.PodFitsResources)
// Fit is determined by the presence of the Host parameter and a string match
// This predicate is actually a default predicate, because it is invoked from
// predicates.GeneralPredicates()
scheduler.RegisterFitPredicate(predicates.HostNamePred, predicates.PodFitsHost)
// Fit is determined by node selector query.
scheduler.RegisterFitPredicate(predicates.MatchNodeSelectorPred, predicates.PodMatchNodeSelector)
// Fit is determined by volume zone requirements.
scheduler.RegisterFitPredicateFactory(
predicates.NoVolumeZoneConflictPred,
func(args scheduler.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewVolumeZonePredicate(args.PVLister, args.PVCLister, args.StorageClassLister)
},
)
// Fit is determined by whether or not there would be too many AWS EBS volumes attached to the node
scheduler.RegisterFitPredicateFactory(
predicates.MaxEBSVolumeCountPred,
func(args scheduler.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilterType, args.CSINodeLister, args.StorageClassLister, args.PVLister, args.PVCLister)
},
)
// Fit is determined by whether or not there would be too many GCE PD volumes attached to the node
scheduler.RegisterFitPredicateFactory(
predicates.MaxGCEPDVolumeCountPred,
func(args scheduler.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilterType, args.CSINodeLister, args.StorageClassLister, args.PVLister, args.PVCLister)
},
)
// Fit is determined by whether or not there would be too many Azure Disk volumes attached to the node
scheduler.RegisterFitPredicateFactory(
predicates.MaxAzureDiskVolumeCountPred,
func(args scheduler.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilterType, args.CSINodeLister, args.StorageClassLister, args.PVLister, args.PVCLister)
},
)
scheduler.RegisterFitPredicateFactory(
predicates.MaxCSIVolumeCountPred,
func(args scheduler.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewCSIMaxVolumeLimitPredicate(args.CSINodeLister, args.PVLister, args.PVCLister, args.StorageClassLister)
},
)
scheduler.RegisterFitPredicateFactory(
predicates.MaxCinderVolumeCountPred,
func(args scheduler.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewMaxPDVolumeCountPredicate(predicates.CinderVolumeFilterType, args.CSINodeLister, args.StorageClassLister, args.PVLister, args.PVCLister)
},
)
// Fit is determined by inter-pod affinity.
scheduler.RegisterFitPredicateFactory(
predicates.MatchInterPodAffinityPred,
func(args scheduler.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewPodAffinityPredicate(args.NodeInfoLister, args.PodLister)
},
)
// Fit is determined by non-conflicting disk volumes.
scheduler.RegisterFitPredicate(predicates.NoDiskConflictPred, predicates.NoDiskConflict)
// GeneralPredicates are the predicates that are enforced by all Kubernetes components
// (e.g. kubelet and all schedulers)
scheduler.RegisterFitPredicate(predicates.GeneralPred, predicates.GeneralPredicates)
// Fit is determined based on whether a pod can tolerate all of the node's taints
scheduler.RegisterMandatoryFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints)
// Fit is determined based on whether a pod can tolerate unschedulable of node
scheduler.RegisterMandatoryFitPredicate(predicates.CheckNodeUnschedulablePred, predicates.CheckNodeUnschedulablePredicate)
// Fit is determined by volume topology requirements.
scheduler.RegisterFitPredicateFactory(
predicates.CheckVolumeBindingPred,
func(args scheduler.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewVolumeBindingPredicate(args.VolumeBinder)
},
)
} | func(args scheduler.PluginFactoryArgs) predicates.MetadataProducer {
f := &predicates.MetadataProducerFactory{} |
test_raw_node.rs | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use protobuf::{self, ProtobufEnum};
use raft::eraftpb::*;
use raft::storage::MemStorage;
use raft::*;
use test_util::*;
fn new_peer(id: u64) -> Peer {
Peer {
id,
..Default::default()
}
}
fn entry(t: EntryType, term: u64, i: u64, data: Option<Vec<u8>>) -> Entry {
let mut e = Entry::new();
e.set_index(i);
e.set_term(term);
if let Some(d) = data {
e.set_data(d);
}
e.set_entry_type(t);
e
}
fn conf_change(t: ConfChangeType, node_id: u64) -> ConfChange {
let mut cc = ConfChange::new();
cc.set_change_type(t);
cc.set_node_id(node_id);
cc
}
fn cmp_ready(
r: &Ready,
ss: &Option<SoftState>,
hs: &Option<HardState>,
entries: &[Entry],
committed_entries: Vec<Entry>,
must_sync: bool,
) -> bool {
r.ss() == ss.as_ref()
&& r.hs() == hs.as_ref()
&& r.entries() == entries
&& r.committed_entries == Some(committed_entries)
&& r.must_sync() == must_sync
&& r.read_states().is_empty()
&& r.snapshot() == &Snapshot::default()
&& r.messages.is_empty()
}
fn new_raw_node(
id: u64,
peers: Vec<u64>,
election: usize,
heartbeat: usize,
storage: MemStorage,
peer_nodes: Vec<Peer>,
) -> RawNode<MemStorage> {
RawNode::new(
&new_test_config(id, peers, election, heartbeat),
storage,
peer_nodes,
)
.unwrap()
}
// test_raw_node_step ensures that RawNode.Step ignore local message.
#[test]
fn test_raw_node_step() {
setup_for_test();
for msg_t in MessageType::values() {
let mut raw_node = new_raw_node(1, vec![], 10, 1, new_storage(), vec![new_peer(1)]);
let res = raw_node.step(new_message(0, 0, *msg_t, 0));
// local msg should be ignored.
if vec![
MessageType::MsgBeat,
MessageType::MsgHup,
MessageType::MsgUnreachable,
MessageType::MsgSnapStatus,
]
.contains(msg_t)
{
assert_eq!(res, Err(Error::StepLocalMsg));
}
}
}
// test_raw_node_read_index_to_old_leader ensures that MsgReadIndex to old leader gets
// forward to the new leader and 'send' method does not attach its term
#[test]
fn test_raw_node_read_index_to_old_leader() {
setup_for_test();
let r1 = new_test_raft(1, vec![1, 2, 3], 10, 1, new_storage());
let r2 = new_test_raft(2, vec![1, 2, 3], 10, 1, new_storage());
let r3 = new_test_raft(3, vec![1, 2, 3], 10, 1, new_storage());
let mut nt = Network::new(vec![Some(r1), Some(r2), Some(r3)]);
// elect r1 as leader
nt.send(vec![new_message(1, 1, MessageType::MsgHup, 0)]);
let mut test_entries = Entry::new();
test_entries.set_data(b"testdata".to_vec());
// send readindex request to r2(follower)
let _ = nt.peers.get_mut(&2).unwrap().step(new_message_with_entries(
2,
2,
MessageType::MsgReadIndex,
vec![test_entries.clone()],
));
// verify r2(follower) forwards this message to r1(leader) with term not set
assert_eq!(nt.peers[&2].msgs.len(), 1);
let read_index_msg1 =
new_message_with_entries(2, 1, MessageType::MsgReadIndex, vec![test_entries.clone()]);
assert_eq!(read_index_msg1, nt.peers[&2].msgs[0]);
// send readindex request to r3(follower)
let _ = nt.peers.get_mut(&3).unwrap().step(new_message_with_entries(
3,
3,
MessageType::MsgReadIndex,
vec![test_entries.clone()],
));
// verify r3(follower) forwards this message to r1(leader) with term not set as well.
assert_eq!(nt.peers[&3].msgs.len(), 1);
let read_index_msg2 =
new_message_with_entries(3, 1, MessageType::MsgReadIndex, vec![test_entries.clone()]);
assert_eq!(nt.peers[&3].msgs[0], read_index_msg2);
// now elect r3 as leader
nt.send(vec![new_message(3, 3, MessageType::MsgHup, 0)]);
// let r1 steps the two messages previously we got from r2, r3
let _ = nt.peers.get_mut(&1).unwrap().step(read_index_msg1);
let _ = nt.peers.get_mut(&1).unwrap().step(read_index_msg2);
// verify r1(follower) forwards these messages again to r3(new leader)
assert_eq!(nt.peers[&1].msgs.len(), 2);
let read_index_msg3 =
new_message_with_entries(1, 3, MessageType::MsgReadIndex, vec![test_entries.clone()]);
assert_eq!(nt.peers[&1].msgs[0], read_index_msg3);
assert_eq!(nt.peers[&1].msgs[1], read_index_msg3);
}
// test_raw_node_propose_and_conf_change ensures that RawNode.propose and
// RawNode.propose_conf_change send the given proposal and ConfChange to the underlying raft.
#[test]
fn test_raw_node_propose_and_conf_change() {
setup_for_test();
let s = new_storage();
let mut raw_node = new_raw_node(1, vec![], 10, 1, s.clone(), vec![new_peer(1)]);
let rd = raw_node.ready();
s.wl().append(rd.entries()).expect("");
raw_node.advance(rd);
raw_node.campaign().expect("");
let mut proposed = false;
let mut last_index;
let mut ccdata = vec![];
loop {
let rd = raw_node.ready();
s.wl().append(rd.entries()).expect("");
// Once we are the leader, propose a command and a ConfChange.
if !proposed && rd.ss().is_some() && rd.ss().unwrap().leader_id == raw_node.raft.id {
raw_node.propose(vec![], b"somedata".to_vec()).expect("");
let cc = conf_change(ConfChangeType::AddNode, 1);
ccdata = protobuf::Message::write_to_bytes(&cc).unwrap();
raw_node.propose_conf_change(vec![], cc).expect("");
proposed = true;
}
raw_node.advance(rd);
// Exit when we have four entries: one ConfChange, one no-op for the election,
// our proposed command and proposed ConfChange.
last_index = s.last_index().unwrap();
if last_index >= 4 {
break;
}
}
let entries = s.entries(last_index - 1, last_index + 1, NO_LIMIT).unwrap();
assert_eq!(entries.len(), 2);
assert_eq!(entries[0].get_data(), b"somedata");
assert_eq!(entries[1].get_entry_type(), EntryType::EntryConfChange);
assert_eq!(entries[1].get_data(), &*ccdata);
}
// test_raw_node_propose_add_duplicate_node ensures that two proposes to add the same node should
// not affect the later propose to add new node.
#[test]
fn test_raw_node_propose_add_duplicate_node() {
setup_for_test();
let s = new_storage();
let mut raw_node = new_raw_node(1, vec![], 10, 1, s.clone(), vec![new_peer(1)]);
let rd = raw_node.ready();
s.wl().append(rd.entries()).expect("");
raw_node.advance(rd);
raw_node.campaign().expect("");
loop {
let rd = raw_node.ready();
s.wl().append(rd.entries()).expect("");
if rd.ss().is_some() && rd.ss().unwrap().leader_id == raw_node.raft.id {
raw_node.advance(rd);
break;
}
raw_node.advance(rd);
}
let mut propose_conf_change_and_apply = |cc| {
raw_node.propose_conf_change(vec![], cc).expect("");
let rd = raw_node.ready();
s.wl().append(rd.entries()).expect("");
for e in rd.committed_entries.as_ref().unwrap() {
if e.get_entry_type() == EntryType::EntryConfChange {
let conf_change = protobuf::parse_from_bytes(e.get_data()).unwrap();
raw_node.apply_conf_change(&conf_change);
}
}
raw_node.advance(rd);
};
let cc1 = conf_change(ConfChangeType::AddNode, 1);
let ccdata1 = protobuf::Message::write_to_bytes(&cc1).unwrap();
propose_conf_change_and_apply(cc1.clone());
// try to add the same node again
propose_conf_change_and_apply(cc1);
// the new node join should be ok
let cc2 = conf_change(ConfChangeType::AddNode, 2);
let ccdata2 = protobuf::Message::write_to_bytes(&cc2).unwrap();
propose_conf_change_and_apply(cc2);
let last_index = s.last_index().unwrap();
// the last three entries should be: ConfChange cc1, cc1, cc2
let mut entries = s.entries(last_index - 2, last_index + 1, NO_LIMIT).unwrap();
assert_eq!(entries.len(), 3);
assert_eq!(entries[0].take_data(), ccdata1);
assert_eq!(entries[2].take_data(), ccdata2);
}
#[test]
fn test_raw_node_propose_add_learner_node() {
setup_for_test();
let s = new_storage();
let mut raw_node = new_raw_node(1, vec![], 10, 1, s.clone(), vec![new_peer(1)]);
let rd = raw_node.ready();
s.wl().append(rd.entries()).expect("");
raw_node.advance(rd);
raw_node.campaign().expect("");
loop {
let rd = raw_node.ready();
s.wl().append(rd.entries()).expect("");
if rd.ss().is_some() && rd.ss().unwrap().leader_id == raw_node.raft.id {
raw_node.advance(rd);
break;
}
raw_node.advance(rd);
}
// propose add learner node and check apply state
let cc = conf_change(ConfChangeType::AddLearnerNode, 2);
raw_node.propose_conf_change(vec![], cc).expect("");
let rd = raw_node.ready();
s.wl().append(rd.entries()).expect("");
assert!(
rd.committed_entries.is_some() && rd.committed_entries.as_ref().unwrap().len() == 1,
"should committed the conf change entry"
);
let e = &rd.committed_entries.as_ref().unwrap()[0];
let conf_change = protobuf::parse_from_bytes(e.get_data()).unwrap();
let conf_state = raw_node.apply_conf_change(&conf_change);
assert_eq!(conf_state.nodes, vec![1]);
assert_eq!(conf_state.learners, vec![2]);
}
// test_raw_node_read_index ensures that RawNode.read_index sends the MsgReadIndex message
// to the underlying raft. It also ensures that ReadState can be read out.
#[test]
fn test_raw_node_read_index() {
setup_for_test();
let wrequest_ctx = b"somedata".to_vec();
let wrs = vec![ReadState {
index: 2u64,
request_ctx: wrequest_ctx.clone(),
}];
let s = new_storage();
let mut raw_node = new_raw_node(1, vec![], 10, 1, s.clone(), vec![new_peer(1)]);
let rd = raw_node.ready();
s.wl().append(rd.entries()).expect("");
raw_node.advance(rd);
raw_node.campaign().expect("");
loop {
let rd = raw_node.ready();
s.wl().append(rd.entries()).expect("");
if rd.ss().map_or(false, |ss| ss.leader_id == raw_node.raft.id) {
raw_node.advance(rd);
// Once we are the leader, issue a read index request
raw_node.read_index(wrequest_ctx.clone());
break;
}
raw_node.advance(rd);
}
// ensure the read_states can be read out
assert!(!raw_node.raft.read_states.is_empty());
assert!(raw_node.has_ready());
let rd = raw_node.ready();
assert_eq!(rd.read_states(), wrs.as_slice());
s.wl().append(&rd.entries()).expect("");
raw_node.advance(rd);
// ensure raft.read_states is reset after advance
assert!(!raw_node.has_ready());
assert!(raw_node.raft.read_states.is_empty());
}
// test_raw_node_start ensures that a node can be started correctly. The node should
// start with correct configuration change entries, and can accept and commit
// proposals.
#[test]
fn test_raw_node_start() {
setup_for_test();
let cc = conf_change(ConfChangeType::AddNode, 1);
let ccdata = protobuf::Message::write_to_bytes(&cc).unwrap();
let store = new_storage();
let mut raw_node = new_raw_node(1, vec![], 10, 1, store.clone(), vec![new_peer(1)]);
let rd = raw_node.ready();
info!("rd {:?}", &rd);
assert!(cmp_ready(
&rd,
&None,
&Some(hard_state(1, 1, 0)),
&[entry(
EntryType::EntryConfChange,
1,
1,
Some(ccdata.clone()),
)],
vec![entry(
EntryType::EntryConfChange,
1,
1,
Some(ccdata.clone()),
)],
true,
));
store.wl().append(rd.entries()).expect("");
raw_node.advance(rd);
let rd = raw_node.ready();
store.wl().append(rd.entries()).expect("");
raw_node.advance(rd);
raw_node.campaign().expect("");
let rd = raw_node.ready();
store.wl().append(rd.entries()).expect("");
raw_node.advance(rd);
raw_node.propose(vec![], b"foo".to_vec()).expect("");
let rd = raw_node.ready();
assert!(cmp_ready(
&rd,
&None,
&Some(hard_state(2, 3, 1)),
&[new_entry(2, 3, Some("foo"))],
vec![new_entry(2, 3, Some("foo"))],
false, | assert!(!raw_node.has_ready());
}
#[test]
fn test_raw_node_restart() {
setup_for_test();
let entries = vec![empty_entry(1, 1), new_entry(1, 2, Some("foo"))];
let st = hard_state(1, 1, 0);
let store = new_storage();
store.wl().set_hardstate(st);
store.wl().append(&entries).expect("");
let mut raw_node = new_raw_node(1, vec![], 10, 1, store, vec![]);
let rd = raw_node.ready();
assert!(cmp_ready(
&rd,
&None,
&None,
&[],
entries[..1].to_vec(),
false
));
raw_node.advance(rd);
assert!(!raw_node.has_ready());
}
#[test]
fn test_raw_node_restart_from_snapshot() {
setup_for_test();
let snap = new_snapshot(2, 1, vec![1, 2]);
let entries = vec![new_entry(1, 3, Some("foo"))];
let st = hard_state(1, 3, 0);
let s = new_storage();
s.wl().set_hardstate(st);
s.wl().apply_snapshot(snap).expect("");
s.wl().append(&entries).expect("");
let mut raw_node = new_raw_node(1, vec![], 10, 1, s, vec![]);
let rd = raw_node.ready();
assert!(cmp_ready(&rd, &None, &None, &[], entries.clone(), false));
raw_node.advance(rd);
assert!(!raw_node.has_ready());
}
// test_skip_bcast_commit ensures that empty commit message is not sent out
// when skip_bcast_commit is true.
#[test]
fn test_skip_bcast_commit() {
setup_for_test();
let mut config = new_test_config(1, vec![1, 2, 3], 10, 1);
config.skip_bcast_commit = true;
let r1 = new_test_raft_with_config(&config, new_storage());
let r2 = new_test_raft(2, vec![1, 2, 3], 10, 1, new_storage());
let r3 = new_test_raft(3, vec![1, 2, 3], 10, 1, new_storage());
let mut nt = Network::new(vec![Some(r1), Some(r2), Some(r3)]);
// elect r1 as leader
nt.send(vec![new_message(1, 1, MessageType::MsgHup, 0)]);
// Without bcast commit, followers will not update its commit index immediately.
let mut test_entries = Entry::new();
test_entries.set_data(b"testdata".to_vec());
let msg = new_message_with_entries(1, 1, MessageType::MsgPropose, vec![test_entries.clone()]);
nt.send(vec![msg.clone()]);
assert_eq!(nt.peers[&1].raft_log.committed, 2);
assert_eq!(nt.peers[&2].raft_log.committed, 1);
assert_eq!(nt.peers[&3].raft_log.committed, 1);
// After bcast heartbeat, followers will be informed the actual commit index.
for _ in 0..nt.peers[&1].get_randomized_election_timeout() {
nt.peers.get_mut(&1).unwrap().tick();
}
nt.send(vec![new_message(1, 1, MessageType::MsgHup, 0)]);
assert_eq!(nt.peers[&2].raft_log.committed, 2);
assert_eq!(nt.peers[&3].raft_log.committed, 2);
// The feature should be able to be adjusted at run time.
nt.peers.get_mut(&1).unwrap().skip_bcast_commit(false);
nt.send(vec![msg.clone()]);
assert_eq!(nt.peers[&1].raft_log.committed, 3);
assert_eq!(nt.peers[&2].raft_log.committed, 3);
assert_eq!(nt.peers[&3].raft_log.committed, 3);
nt.peers.get_mut(&1).unwrap().skip_bcast_commit(true);
// Later proposal should commit former proposal.
nt.send(vec![msg.clone()]);
nt.send(vec![msg]);
assert_eq!(nt.peers[&1].raft_log.committed, 5);
assert_eq!(nt.peers[&2].raft_log.committed, 4);
assert_eq!(nt.peers[&3].raft_log.committed, 4);
// When committing conf change, leader should always bcast commit.
let mut cc_entry = Entry::new();
cc_entry.set_entry_type(EntryType::EntryConfChange);
nt.send(vec![new_message_with_entries(
1,
1,
MessageType::MsgPropose,
vec![cc_entry],
)]);
assert!(nt.peers[&1].should_bcast_commit());
assert!(nt.peers[&2].should_bcast_commit());
assert!(nt.peers[&3].should_bcast_commit());
assert_eq!(nt.peers[&1].raft_log.committed, 6);
assert_eq!(nt.peers[&2].raft_log.committed, 6);
assert_eq!(nt.peers[&3].raft_log.committed, 6);
} | ));
store.wl().append(rd.entries()).expect("");
raw_node.advance(rd); |
test_service_init.go | /*
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test_init
import (
"testing"
"magma/orc8r/cloud/go/blobstore"
"magma/orc8r/cloud/go/orc8r"
"magma/orc8r/cloud/go/services/directoryd"
"magma/orc8r/cloud/go/services/directoryd/servicers"
"magma/orc8r/cloud/go/services/directoryd/storage"
"magma/orc8r/cloud/go/sqorc"
"magma/orc8r/cloud/go/test_utils"
"magma/orc8r/lib/go/protos"
"github.com/stretchr/testify/assert"
)
func StartTestService(t *testing.T) | {
// Create service
srv, lis := test_utils.NewTestService(t, orc8r.ModuleName, directoryd.ServiceName)
// Init storage
db, err := sqorc.Open("sqlite3", ":memory:")
assert.NoError(t, err)
fact := blobstore.NewSQLBlobStorageFactory(storage.DirectorydTableBlobstore, db, sqorc.GetSqlBuilder())
err = fact.InitializeFactory()
assert.NoError(t, err)
store := storage.NewDirectorydBlobstore(fact)
// Add servicers
directoryServicer, err := servicers.NewDirectoryLookupServicer(store)
assert.NoError(t, err)
protos.RegisterDirectoryLookupServer(srv.GrpcServer, directoryServicer)
protos.RegisterGatewayDirectoryServiceServer(srv.GrpcServer, servicers.NewDirectoryUpdateServicer())
// Run service
go srv.RunTest(lis)
} |
|
Monitoring.ts | /// <amd-module name="Router/_Builder/_Bootstrap/DataAggregators/Monitoring" />
| import { IDataAggregatorModule, ICollectedDeps, IRenderOptions, IFullData } from 'Router/_Builder/_Bootstrap/Interface';
/**
* Скрипты мониторинга
*/
export class Monitoring implements IDataAggregatorModule {
execute(deps: ICollectedDeps, options?: IRenderOptions): Partial<IFullData> | null {
const HeadAPI = AppHead.getInstance();
const errorMonitoringScript = AppEnv.getStore('ErrorMonitoringScript') || '';
/** В случае, если в хранилище ничего нет, придет дефолтный IStore, а мы хотим все-же строку. */
if (!!errorMonitoringScript && typeof errorMonitoringScript === 'string') {
HeadAPI.createTag('script', {type: 'text/javascript'}, errorMonitoringScript);
}
const CDNMonitoringScript = AppEnv.getStore('CDNMonitoringScript') || '';
/** В случае, если в хранилище ничего нет, придет дефолтный IStore, а мы хотим все-же строку. */
if (options.sbisCDN && !!CDNMonitoringScript && typeof CDNMonitoringScript === 'string') {
HeadAPI.createTag('script', {type: 'text/javascript', important: 'true'}, CDNMonitoringScript);
}
return null;
}
} | import * as AppEnv from 'Application/Env';
import { Head as AppHead } from 'Application/Page'; |
borrowck-lend-flow-if.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Note: the borrowck analysis is currently flow-insensitive.
// Therefore, some of these errors are marked as spurious and could be
// corrected by a simple change to the analysis. The others are
// either genuine or would require more advanced changes. The latter
// cases are noted.
fn borrow(_v: &int) {}
fn borrow_mut(_v: &mut int) |
fn cond() -> bool { panic!() }
fn for_func<F>(_f: F) where F: FnOnce() -> bool { panic!() }
fn produce<T>() -> T { panic!(); }
fn inc(v: &mut Box<int>) {
*v = box() (**v + 1);
}
fn pre_freeze_cond() {
// In this instance, the freeze is conditional and starts before
// the mut borrow.
let mut v = box 3;
let _w;
if cond() {
_w = &v;
}
borrow_mut(&mut *v); //~ ERROR cannot borrow
}
fn pre_freeze_else() {
// In this instance, the freeze and mut borrow are on separate sides
// of the if.
let mut v = box 3;
let _w;
if cond() {
_w = &v;
} else {
borrow_mut(&mut *v);
}
}
fn main() {}
| {} |
logger.py | """
Provides logging utilities.
"""
import argparse
import difflib
import os
from dataclasses import dataclass
import sys
from types import TracebackType
from typing import Any, Optional, Type, cast
import fora
@dataclass
class State:
"""Global state for logging."""
indentation_level: int = 0
"""The current global indentation level."""
state: State = State()
"""The global logger state."""
def use_color() -> bool:
"""Returns true if color should be used."""
if not isinstance(cast(Any, fora.args), argparse.Namespace):
return os.getenv("NO_COLOR") is None
return not fora.args.no_color
def col(color_code: str) -> str:
"""Returns the given argument only if color is enabled."""
return color_code if use_color() else ""
class IndentationContext:
"""A context manager to modify the indentation level."""
def __enter__(self) -> None:
state.indentation_level += 1
def __exit__(self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException], traceback: Optional[TracebackType]) -> None:
_ = (exc_type, exc, traceback)
state.indentation_level -= 1
def ellipsis(s: str, width: int) -> str:
"""
Shrinks the given string to width (including an ellipsis character).
Parameters
----------
s
The string.
width
The maximum width.
Returns
-------
str
A modified string with at most `width` characters.
"""
if len(s) > width:
s = s[:width - 1] + "…"
return s
def indent() -> IndentationContext:
"" | def indent_prefix() -> str:
"""Returns the indentation prefix for the current indentation level."""
if not use_color():
return " " * state.indentation_level
ret = ""
for i in range(state.indentation_level):
if i % 2 == 0:
ret += "[90m│[m "
else:
ret += "[90m╵[m "
return ret
def debug(msg: str) -> None:
"""Prints the given message only in debug mode."""
if not fora.args.debug:
return
print(f" [1;34mDEBUG[m: {msg}", file=sys.stderr)
def debug_args(msg: str, args: dict[str, Any]) -> None:
"""Prints all given arguments when in debug mode."""
if not fora.args.debug:
return
str_args = ""
args = {k: v for k,v in args.items() if k != "self"}
if len(args) > 0:
str_args = " " + ", ".join(f"{k}={v}" for k,v in args.items())
print(f" [1;34mDEBUG[m: {msg}{str_args}", file=sys.stderr)
def print_indented(msg: str, **kwargs: Any) -> None:
"""Same as print(), but prefixes the message with the indentation prefix."""
print(f"{indent_prefix()}{msg}", **kwargs)
def connection_init(connector: Any) -> None:
"""Prints connection initialization information."""
print_indented(f"{col('[1;34m')}host{col('[m')} {connector.host.name} via {col('[1;33m')}{connector.host.url}{col('[m')}", flush=True)
def connection_failed(error_msg: str) -> None:
"""Signals that an error has occurred while establishing the connection."""
print(col("[1;31m") + "ERR" + col("[m"))
print_indented(f" {col('[90m')}└{col('[m')} " + f"{col('[31m')}{error_msg}{col('[m')}")
def connection_established() -> None:
"""Signals that the connection has been successfully established."""
#print(col("[1;32m") + "OK" + col("[m"))
def run_script(script: str, name: Optional[str] = None) -> None:
"""Prints the script file and name that is being executed next."""
if name is not None:
print_indented(f"{col('[33;1m')}script{col('[m')} {script} {col('[90m')}({name}){col('[m')}")
else:
print_indented(f"{col('[33;1m')}script{col('[m')} {script}")
def print_operation_title(op: Any, title_color: str, end: str = "\n") -> None:
"""Prints the operation title and description."""
name_if_given = (" " + col('[90m') + f"({op.name})" + col('[m')) if op.name is not None else ""
dry_run_info = f" {col('[90m')}(dry){col('[m')}" if fora.args.dry else ""
print_indented(f"{title_color}{op.op_name}{col('[m')}{dry_run_info} {op.description}{name_if_given}", end=end, flush=True)
def print_operation_early(op: Any) -> None:
"""Prints the operation title and description before the final status is known."""
title_color = col("[1;33m")
# Only overwrite status later if debugging is not enabled.
print_operation_title(op, title_color, end=" (early status)\n" if fora.args.debug else "")
def decode_escape(data: bytes, encoding: str = 'utf-8') -> str:
"""
Tries to decode the given data with the given encoding, but replaces all non-decodeable
and non-printable characters with backslash escape sequences.
Example:
```python
>>> decode_escape(b'It is Wednesday\\nmy dudes\\r\\n🐸\\xff\\0')
'It is Wednesday\\\\nMy Dudes\\\\r\\\\n🐸\\\\xff\\\\0'
```
Parameters
----------
content
The content that should be decoded and escaped.
encoding
The encoding that should be tried. To preserve utf-8 symbols, use 'utf-8',
to replace any non-ascii character with an escape sequence use 'ascii'.
Returns
-------
str
The decoded and escaped string.
"""
def escape_char(c: str) -> str:
special = {'\x00': '\\0', '\n': '\\n', '\r': '\\r', '\t': '\\t'}
if c in special:
return special[c]
num = ord(c)
if not c.isprintable() and num <= 0xff:
return f"\\x{num:02x}"
return c
return ''.join([escape_char(c) for c in data.decode(encoding, 'backslashreplace')])
def diff(filename: str, old: Optional[bytes], new: Optional[bytes], color: bool = True) -> list[str]:
"""
Creates a diff between the old and new content of the given filename,
that can be printed to the console. This function returns the diff
output as an array of lines. The lines in the output array are not
terminated by newlines.
If color is True, the diff is colored using ANSI escape sequences.
If you want to provide an alternative diffing function, beware that
the input can theoretically contain any bytes and therefore should
be decoded as utf-8 if possible, but non-decodeable
or non-printable charaters should be replaced with human readable
variants such as `\\x00`, `^@` or similar represenations.
Your diffing function should still be able to work on the raw bytes
representation, after you aquire the diff and before you apply colors,
your output should be made printable with a function such as `fora.logger.decode_escape`:
```python
# First decode and escape
line = logger.decode_escape(byteline)
# Add coloring afterwards so ANSI escape sequences are not escaped
```
Parameters
----------
filename
The filename of the file that is being diffed.
old
The old content, or None if the file didn't exist before.
new
The new content, or None if the file was deleted.
color
Whether the output should be colored (with ANSI color sequences).
Returns
-------
list[str]
The lines of the diff output. The individual lines will not have a terminating newline.
"""
bdiff = list(difflib.diff_bytes(difflib.unified_diff,
a=[] if old is None else old.split(b'\n'),
b=[] if new is None else new.split(b'\n'),
lineterm=b''))
# Strip file name header and decode diff to be human readable.
difflines = map(decode_escape, bdiff[2:])
# Create custom file name header
action = 'created' if old is None else 'deleted' if new is None else 'modified'
title = f"{action}: {filename}"
N = len(title)
header = ['─' * N, title, '─' * N]
# Apply coloring if desired
if color:
def apply_color(line: str) -> str:
linecolor = {
'+': '[32m',
'-': '[31m',
'@': '[34m',
}
return linecolor.get(line[0], '[90m') + line + '[m'
# Apply color to diff
difflines = map(apply_color, difflines)
# Apply color to header
header = list(map(lambda line: f"[33m{line}[m", header))
return header + list(difflines)
# TODO: move functions to operation api. cleaner and has type access.
def _operation_state_infos(result: Any) -> list[str]:
def to_str(v: Any) -> str:
return v.hex() if isinstance(v, bytes) else str(v)
# Print "key: value" pairs with changes
state_infos: list[str] = []
for k,final_v in result.final.items():
if final_v is None:
continue
initial_v = result.initial[k]
str_initial_v = to_str(initial_v)
str_final_v = to_str(final_v)
# Add ellipsis on long strings, if we are not in verbose mode
if fora.args.verbose == 0:
k = ellipsis(k, 12)
str_initial_v = ellipsis(to_str(initial_v), 9)
str_final_v = ellipsis(to_str(final_v), 9+3+9 if initial_v is None else 9)
if initial_v == final_v:
if fora.args.verbose >= 1:
# TODO = instead of : for better readability
entry_str = f"{col('[90m')}{k}: {str_initial_v}{col('[m')}"
state_infos.append(entry_str)
else:
if initial_v is None:
entry_str = f"{col('[33m')}{k}: {col('[32m')}{str_final_v}{col('[m')}"
else:
entry_str = f"{col('[33m')}{k}: {col('[31m')}{str_initial_v}{col('[33m')} → {col('[32m')}{str_final_v}{col('[m')}"
state_infos.append(entry_str)
return state_infos
def print_operation(op: Any, result: Any) -> None:
"""Prints the operation summary after it has finished execution."""
if result.success:
title_color = col("[1;32m") if result.changed else col("[1;90m")
else:
title_color = col("[1;31m")
# Print title and name, overwriting the transitive status
print("\r", end="")
print_operation_title(op, title_color)
if not result.success:
print_indented(f" {col('[90m')}└{col('[m')} " + f"{col('[31m')}{result.failure_message}{col('[m')}")
return
if not fora.args.changes:
return
# Cache number of upcoming diffs to determine what box character to print
n_diffs = len(op.diffs) if fora.args.diff else 0
box_char = '└' if n_diffs == 0 else '├'
# Print "key: value" pairs with changes
state_infos = _operation_state_infos(result)
if len(state_infos) > 0:
print_indented(f"{col('[90m')}{box_char}{col('[m')} " + f"{col('[90m')},{col('[m')} ".join(state_infos))
if fora.args.diff:
diff_lines = []
# Generate diffs
for file, old, new in op.diffs:
diff_lines.extend(diff(file, old, new))
# Print diffs with block character line
if len(diff_lines) > 0:
for l in diff_lines[:-1]:
print_indented(f"{col('[90m')}│ {col('[m')}" + l)
print_indented(f"{col('[90m')}└ {col('[m')}" + diff_lines[-1])
| "Retruns a context manager that increases the indentation level."""
return IndentationContext()
|
process_engine_dto.rs | /*
* Camunda BPM REST API
*
* OpenApi Spec for Camunda BPM REST API.
*
* The version of the OpenAPI document: 7.14.0
*
* Generated by: https://openapi-generator.tech
*/
| pub struct ProcessEngineDto {
/// The name of the process engine.
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
impl ProcessEngineDto {
pub fn new() -> ProcessEngineDto {
ProcessEngineDto {
name: None,
}
}
} |
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] |
Sprint3 Creating Redshift Cluster.py |
import pandas as pd
import boto3
import json
import configparser
config = configparser.ConfigParser()
config.read_file(open('dwh.cfg'))
KEY = config.get('AWS','KEY')
SECRET = config.get('AWS','SECRET')
DWH_CLUSTER_TYPE = config.get("DWH","DWH_CLUSTER_TYPE")
DWH_NUM_NODES = config.get("DWH","DWH_NUM_NODES")
DWH_NODE_TYPE = config.get("DWH","DWH_NODE_TYPE")
DWH_CLUSTER_IDENTIFIER = config.get("DWH","DWH_CLUSTER_IDENTIFIER")
DWH_DB = config.get("DWH","DWH_DB")
DWH_DB_USER = config.get("DWH","DWH_DB_USER")
DWH_DB_PASSWORD = config.get("DWH","DWH_DB_PASSWORD")
DWH_PORT = config.get("DWH","DWH_PORT")
DWH_IAM_ROLE_NAME = config.get("DWH", "DWH_IAM_ROLE_NAME")
(DWH_DB_USER, DWH_DB_PASSWORD, DWH_DB)
pd.DataFrame({"Param":
["DWH_CLUSTER_TYPE", "DWH_NUM_NODES", "DWH_NODE_TYPE", "DWH_CLUSTER_IDENTIFIER", "DWH_DB", "DWH_DB_USER", "DWH_DB_PASSWORD", "DWH_PORT", "DWH_IAM_ROLE_NAME"],
"Value":
[DWH_CLUSTER_TYPE, DWH_NUM_NODES, DWH_NODE_TYPE, DWH_CLUSTER_IDENTIFIER, DWH_DB, DWH_DB_USER, DWH_DB_PASSWORD, DWH_PORT, DWH_IAM_ROLE_NAME]
})
# # Create clients for IAM, EC2, S3 and Redshift
# In[69]:
import boto3
ec2 = boto3.resource('ec2',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
s3 = boto3.resource('s3',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
iam = boto3.client('iam',aws_access_key_id=KEY,
aws_secret_access_key=SECRET,
region_name='us-west-2'
)
redshift = boto3.client('redshift',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
sampleDbBucket = s3.Bucket("awssampledbuswest2")
for obj in sampleDbBucket.objects.filter(Prefix="ssbgz"):
print(obj)
from botocore.exceptions import ClientError
#1.1 Create the role,
try:
print("1.1 Creating a new IAM Role")
dwhRole = iam.create_role(
Path='/',
RoleName=DWH_IAM_ROLE_NAME,
Description = "Allows Redshift clusters to call AWS services on your behalf.",
AssumeRolePolicyDocument=json.dumps(
{'Statement': [{'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {'Service': 'redshift.amazonaws.com'}}],
'Version': '2012-10-17'})
)
except Exception as e:
print(e)
print("1.2 Attaching Policy")
iam.attach_role_policy(RoleName=DWH_IAM_ROLE_NAME,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"
)['ResponseMetadata']['HTTPStatusCode']
print("1.3 Get the IAM role ARN")
roleArn = iam.get_role(RoleName=DWH_IAM_ROLE_NAME)['Role']['Arn']
print(roleArn)
# # STEP 2: Redshift Cluster
#
# - Create a RedShift Cluster
# In[83]:
try:
response = redshift.create_cluster(
#HW
ClusterType=DWH_CLUSTER_TYPE,
NodeType=DWH_NODE_TYPE,
NumberOfNodes=int(DWH_NUM_NODES),
#Identifiers & Credentials
DBName=DWH_DB,
ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,
MasterUsername=DWH_DB_USER,
MasterUserPassword=DWH_DB_PASSWORD,
#Roles (for s3 access)
IamRoles=[roleArn]
)
except Exception as e:
print(e)
# ## 2.1 *Describe* the cluster to see its status
def prettyRedshiftProps(props):
|
myClusterProps = redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]
prettyRedshiftProps(myClusterProps)
# 2.2 Take note of the cluster <font color='red'> endpoint and role ARN </font> </h2>
DWH_ENDPOINT = myClusterProps['Endpoint']['Address']
DWH_ROLE_ARN = myClusterProps['IamRoles'][0]['IamRoleArn']
print("DWH_ENDPOINT :: ", endpoint)
print("DWH_ROLE_ARN :: ", roleArn)
# ## STEP 3: Open an incoming TCP port to access the cluster ednpoint
# In[84]:
try:
vpc = ec2.Vpc(id=myClusterProps['VpcId'])
defaultSg = list(vpc.security_groups.all())[0]
print(defaultSg)
defaultSg.authorize_ingress(
GroupName=defaultSg.group_name,
CidrIp='0.0.0.0/0',
IpProtocol='TCP',
FromPort=int(DWH_PORT),
ToPort=int(DWH_PORT)
)
except Exception as e:
print(e)
# # STEP 4: Make sure you can connect to the cluster
get_ipython().run_line_magic('load_ext', 'sql')
conn_string="postgresql://{}:{}@{}:{}/{}".format(DWH_DB_USER, DWH_DB_PASSWORD, DWH_ENDPOINT, DWH_PORT,DWH_DB)
print(conn_string)
get_ipython().run_line_magic('sql', '$conn_string')
| pd.set_option('display.max_colwidth', -1)
keysToShow = ["ClusterIdentifier", "NodeType", "ClusterStatus", "MasterUsername", "DBName", "Endpoint", "NumberOfNodes", 'VpcId']
x = [(k, v) for k,v in props.items() if k in keysToShow]
return pd.DataFrame(data=x, columns=["Key", "Value"]) |
issue-52113.rs | //
#![allow(warnings)]
#![feature(nll)]
trait Bazinga {}
impl<F> Bazinga for F {}
fn produce1<'a>(data: &'a u32) -> impl Bazinga + 'a {
let x = move || {
let _data: &'a u32 = data;
};
x
}
fn produce2<'a>(data: &'a mut Vec<&'a u32>, value: &'a u32) -> impl Bazinga + 'a {
let x = move || {
let value: &'a u32 = value;
data.push(value);
};
x
}
fn produce3<'a, 'b: 'a>(data: &'a mut Vec<&'a u32>, value: &'b u32) -> impl Bazinga + 'a {
let x = move || {
let value: &'a u32 = value;
data.push(value);
};
x
}
fn produce_err<'a, 'b: 'a>(data: &'b mut Vec<&'b u32>, value: &'a u32) -> impl Bazinga + 'b |
fn main() {}
| {
let x = move || {
let value: &'a u32 = value;
data.push(value);
};
x //~ ERROR lifetime may not live long enough
} |
oob.py | """
Copyright (c) 2016-17 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from programy.parser.template.nodes.base import TemplateNode
class TemplateOOBNode(TemplateNode):
def __init__(self):
TemplateNode.__init__(self)
def resolve_to_string(self, bot, clientid):
resolved = self.resolve_children_to_string(bot, clientid)
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.debug("[%s] resolved to [%s]", self.to_string(), resolved)
return "<oob>" + resolved + "</oob>"
def resolve(self, bot, clientid):
try:
return self.resolve_to_string(bot, clientid)
except Exception as excep:
logging.exception(excep)
return ""
def to_string(self):
return "OOB"
def to_xml(self, bot, clientid):
xml = "<oob>"
xml += self.children_to_xml(bot, clientid)
xml += "</oob>"
return xml
def parse_expression(self, graph, expression):
head_text = self.get_text_from_element(expression)
self.parse_text(graph, head_text)
for child in expression:
graph.parse_tag_expression(child, self)
| tail_text = self.get_tail_from_element(child)
self.parse_text(graph, tail_text) |
|
get.rs | use std::str;
use async_trait::async_trait;
use clap::{App, Arg, ArgMatches, SubCommand};
use dimension_client::Error;
use dimension_node::rpcs::info::GetDeploy;
use crate::{command::ClientCommand, common, Success};
/// This struct defines the order in which the args are shown for this subcommand's help message.
enum DisplayOrder {
Verbose,
NodeAddress,
RpcId,
DeployHash,
}
/// Handles providing the arg for and retrieval of the deploy hash.
mod deploy_hash {
use super::*;
const ARG_NAME: &str = "deploy-hash";
const ARG_VALUE_NAME: &str = "HEX STRING";
const ARG_HELP: &str = "Hex-encoded deploy hash";
pub(super) fn arg() -> Arg<'static, 'static> |
pub(super) fn get<'a>(matches: &'a ArgMatches) -> &'a str {
matches
.value_of(ARG_NAME)
.unwrap_or_else(|| panic!("should have {} arg", ARG_NAME))
}
}
#[async_trait]
impl<'a, 'b> ClientCommand<'a, 'b> for GetDeploy {
const NAME: &'static str = "get-deploy";
const ABOUT: &'static str = "Retrieves a deploy from the network";
fn build(display_order: usize) -> App<'a, 'b> {
SubCommand::with_name(Self::NAME)
.about(Self::ABOUT)
.display_order(display_order)
.arg(common::verbose::arg(DisplayOrder::Verbose as usize))
.arg(common::node_address::arg(
DisplayOrder::NodeAddress as usize,
))
.arg(common::rpc_id::arg(DisplayOrder::RpcId as usize))
.arg(deploy_hash::arg())
}
async fn run(matches: &ArgMatches<'a>) -> Result<Success, Error> {
let maybe_rpc_id = common::rpc_id::get(matches);
let node_address = common::node_address::get(matches);
let verbosity_level = common::verbose::get(matches);
let deploy_hash = deploy_hash::get(matches);
dimension_client::get_deploy(maybe_rpc_id, node_address, verbosity_level, deploy_hash)
.await
.map(Success::from)
}
}
| {
Arg::with_name(ARG_NAME)
.required(true)
.value_name(ARG_VALUE_NAME)
.help(ARG_HELP)
.display_order(DisplayOrder::DeployHash as usize)
} |
SimpleServer.py | import MiniNero
import ed25519
import binascii
import PaperWallet
import cherrypy
import os
import time
import bitmonerod
import SimpleXMR2
lasttime = 0
def HexSigningPubKey(s):
return binascii.hexlify(ed25519.publickey(ed25519.encodeint(MiniNero.hexToInt(s))))
def | (m, sk):
#note this seems to return nicely sized version of the signature
#contrast with, i.e. tweetnacl..
sk2 = ed25519.encodeint(MiniNero.hexToInt(sk))
pk = ed25519.publickey(sk2)
return binascii.hexlify(ed25519.signature(m, sk2, pk))
def Verify(sig, m, pk):
return ed25519.checkvalid(binascii.unhexlify(sig), m, binascii.unhexlify(pk))
class MiniNeroServer:
exposed = True
def GET(self, id=None):
times = str(int(time.time()))
return (times)
def POST(self, signature, Type, timestamp, amount=None, destination=None, pid=None, mixin=None):
times= int(time.time())
pubkey = MiniNeroPk
global lasttime
if (abs(times - int(timestamp)) > 30):
ver = False
return ('fail based on timestamp too old')
else:
if Type == 'address':
message = Type+timestamp
ver = Verify(signature.encode("utf8"), message.encode("utf8"), pubkey)
if (ver):
print("getting address")
address = bitmonerod.myAddress()
return (str(address))
if Type == 'balance':
message = Type+timestamp
ver = Verify(signature.encode("utf8"), message.encode("utf8"), pubkey)
if (ver):
print("getting balance")
balance = bitmonerod.balance()
return (str(float(balance)/1000000000000))
if Type == 'send':
message = Type+amount.replace('.', 'd')+timestamp+destination
ver = Verify(signature.encode("utf8"), message.encode("utf8"), pubkey)
if (ver) and (abs(times - lasttime >30 )):
#create xmr2 order async, return uuid
uuid, xmr_amount, xmr_addr, xmr_pid = SimpleXMR2.btc2xmr(destination, amount)
bitmonerod.send(xmr_addr, float(xmr_amount), xmr_pid, 3)
lasttime = times
return ('order uuid: '+uuid)
if Type == 'sendXMR':
message = Type+amount.replace('.', 'd')+timestamp+destination
ver = Verify(signature.encode("utf8"), message.encode("utf8"), pubkey)
if (ver) and (abs(times - lasttime >30 )):
#create xmr2 order async, return uuid
#uuid, xmr_amount, xmr_addr, xmr_pid = SimpleXMR2.btc2xmr(destination, amount)
lasttime = times
xmr_amount = amount
xmr_addr = destination
xmr_pid = pid
bitmonerod.send(xmr_addr, float(xmr_amount), xmr_pid, 3)
return ('sent')
if __name__ == '__main__':
#check if api pubkey is created, if not create it:
if(os.path.isfile('MiniNeroPubKey.py')):
from MiniNeroPubKey import *
try:
MiniNeroPk
except NameError:
MiniNeroSk= PaperWallet.skGen()
MiniNeroPk= HexSigningPubKey(MiniNeroSk)
print("Your new api secret key is:")
print(MiniNeroSk)
print("You should save this in a password manager")
print("Your pubkey will be stored in MiniNeroPubKey.py")
f = open('MiniNeroPubKey.py', 'w')
f.write("MiniNeroPk = \'"+MiniNeroPk+"\'")
print("Your MiniNeroServer PubKey is:")
print(MiniNeroPk)
lasttime = 0
#Launch Cherry Server
cherrypy.tree.mount(
MiniNeroServer(), '/api/mininero',
{'/':
{'request.dispatch': cherrypy.dispatch.MethodDispatcher()}
}
)
cherrypy.server.socket_host = '0.0.0.0' #run on metal
cherrypy.engine.start()
cherrypy.engine.block()
| Signature |
node.rs | // This is an attempt at an implementation following the ideal
//
// ```
// struct BTreeMap<K, V> {
// height: usize,
// root: Option<Box<Node<K, V, height>>>
// }
//
// struct Node<K, V, height: usize> {
// keys: [K; 2 * B - 1],
// vals: [V; 2 * B - 1],
// edges: if height > 0 {
// [Box<Node<K, V, height - 1>>; 2 * B]
// } else { () },
// parent: *const Node<K, V, height + 1>,
// parent_idx: u16,
// len: u16,
// }
// ```
//
// Since Rust doesn't actually have dependent types and polymorphic recursion,
// we make do with lots of unsafety.
// A major goal of this module is to avoid complexity by treating the tree as a generic (if
// weirdly shaped) container and avoiding dealing with most of the B-Tree invariants. As such,
// this module doesn't care whether the entries are sorted, which nodes can be underfull, or
// even what underfull means. However, we do rely on a few invariants:
//
// - Trees must have uniform depth/height. This means that every path down to a leaf from a
// given node has exactly the same length.
// - A node of length `n` has `n` keys, `n` values, and (in an internal node) `n + 1` edges.
// This implies that even an empty internal node has at least one edge.
use core::cmp::Ordering;
use core::marker::PhantomData;
use core::mem::{self, MaybeUninit};
use core::ptr::{self, NonNull, Unique};
use core::slice;
use crate::alloc::{AllocRef, Global, Layout};
use crate::boxed::Box;
const B: usize = 6;
pub const MIN_LEN: usize = B - 1;
pub const CAPACITY: usize = 2 * B - 1;
/// The underlying representation of leaf nodes.
#[repr(C)]
struct LeafNode<K, V> {
/// We use `*const` as opposed to `*mut` so as to be covariant in `K` and `V`.
/// This either points to an actual node or is null.
parent: *const InternalNode<K, V>,
/// This node's index into the parent node's `edges` array.
/// `*node.parent.edges[node.parent_idx]` should be the same thing as `node`.
/// This is only guaranteed to be initialized when `parent` is non-null.
parent_idx: MaybeUninit<u16>,
/// The number of keys and values this node stores.
///
/// This next to `parent_idx` to encourage the compiler to join `len` and
/// `parent_idx` into the same 32-bit word, reducing space overhead.
len: u16,
/// The arrays storing the actual data of the node. Only the first `len` elements of each
/// array are initialized and valid.
keys: [MaybeUninit<K>; CAPACITY],
vals: [MaybeUninit<V>; CAPACITY],
}
impl<K, V> LeafNode<K, V> {
/// Creates a new `LeafNode`. Unsafe because all nodes should really be hidden behind
/// `BoxedNode`, preventing accidental dropping of uninitialized keys and values.
unsafe fn new() -> Self {
LeafNode {
// As a general policy, we leave fields uninitialized if they can be, as this should
// be both slightly faster and easier to track in Valgrind.
keys: [MaybeUninit::UNINIT; CAPACITY],
vals: [MaybeUninit::UNINIT; CAPACITY],
parent: ptr::null(),
parent_idx: MaybeUninit::uninit(),
len: 0,
}
}
}
/// The underlying representation of internal nodes. As with `LeafNode`s, these should be hidden
/// behind `BoxedNode`s to prevent dropping uninitialized keys and values. Any pointer to an
/// `InternalNode` can be directly casted to a pointer to the underlying `LeafNode` portion of the
/// node, allowing code to act on leaf and internal nodes generically without having to even check
/// which of the two a pointer is pointing at. This property is enabled by the use of `repr(C)`.
#[repr(C)]
struct InternalNode<K, V> {
data: LeafNode<K, V>,
/// The pointers to the children of this node. `len + 1` of these are considered
/// initialized and valid. Although during the process of `into_iter` or `drop`,
/// some pointers are dangling while others still need to be traversed.
edges: [MaybeUninit<BoxedNode<K, V>>; 2 * B],
}
impl<K, V> InternalNode<K, V> {
/// Creates a new `InternalNode`.
///
/// This is unsafe for two reasons. First, it returns an `InternalNode` by value, risking
/// dropping of uninitialized fields. Second, an invariant of internal nodes is that `len + 1`
/// edges are initialized and valid, meaning that even when the node is empty (having a
/// `len` of 0), there must be one initialized and valid edge. This function does not set up
/// such an edge.
unsafe fn new() -> Self {
InternalNode { data: unsafe { LeafNode::new() }, edges: [MaybeUninit::UNINIT; 2 * B] }
}
}
/// A managed, non-null pointer to a node. This is either an owned pointer to
/// `LeafNode<K, V>` or an owned pointer to `InternalNode<K, V>`.
///
/// However, `BoxedNode` contains no information as to which of the two types
/// of nodes it actually contains, and, partially due to this lack of information,
/// has no destructor.
struct BoxedNode<K, V> {
ptr: Unique<LeafNode<K, V>>,
}
impl<K, V> BoxedNode<K, V> {
fn from_leaf(node: Box<LeafNode<K, V>>) -> Self {
BoxedNode { ptr: Box::into_unique(node) }
}
fn from_internal(node: Box<InternalNode<K, V>>) -> Self {
BoxedNode { ptr: Box::into_unique(node).cast() }
}
unsafe fn from_ptr(ptr: NonNull<LeafNode<K, V>>) -> Self {
BoxedNode { ptr: unsafe { Unique::new_unchecked(ptr.as_ptr()) } }
}
fn as_ptr(&self) -> NonNull<LeafNode<K, V>> {
NonNull::from(self.ptr)
}
}
/// An owned tree.
///
/// Note that this does not have a destructor, and must be cleaned up manually.
pub struct Root<K, V> {
node: BoxedNode<K, V>,
/// The number of levels below the root node.
height: usize,
}
unsafe impl<K: Sync, V: Sync> Sync for Root<K, V> {}
unsafe impl<K: Send, V: Send> Send for Root<K, V> {}
impl<K, V> Root<K, V> {
/// Returns the number of levels below the root.
pub fn height(&self) -> usize {
self.height
}
/// Returns a new owned tree, with its own root node that is initially empty.
pub fn new_leaf() -> Self {
Root { node: BoxedNode::from_leaf(Box::new(unsafe { LeafNode::new() })), height: 0 }
}
/// Borrows and returns an immutable reference to the node owned by the root.
pub fn node_as_ref(&self) -> NodeRef<marker::Immut<'_>, K, V, marker::LeafOrInternal> {
NodeRef {
height: self.height,
node: self.node.as_ptr(),
root: ptr::null(),
_marker: PhantomData,
}
}
/// Borrows and returns a mutable reference to the node owned by the root.
pub fn node_as_mut(&mut self) -> NodeRef<marker::Mut<'_>, K, V, marker::LeafOrInternal> {
NodeRef {
height: self.height,
node: self.node.as_ptr(),
root: self as *mut _,
_marker: PhantomData,
}
}
pub fn into_ref(self) -> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> {
NodeRef {
height: self.height,
node: self.node.as_ptr(),
root: ptr::null(),
_marker: PhantomData,
}
}
/// Adds a new internal node with a single edge, pointing to the previous root, and make that
/// new node the root. This increases the height by 1 and is the opposite of
/// `pop_internal_level`.
pub fn push_internal_level(&mut self) -> NodeRef<marker::Mut<'_>, K, V, marker::Internal> {
let mut new_node = Box::new(unsafe { InternalNode::new() });
new_node.edges[0].write(unsafe { BoxedNode::from_ptr(self.node.as_ptr()) });
self.node = BoxedNode::from_internal(new_node);
self.height += 1;
let mut ret = NodeRef {
height: self.height,
node: self.node.as_ptr(),
root: self as *mut _,
_marker: PhantomData,
};
unsafe {
ret.reborrow_mut().first_edge().correct_parent_link();
}
ret
}
/// Removes the internal root node, using its first child as the new root.
/// As it is intended only to be called when the root has only one child,
/// no cleanup is done on any of the other children of the root.
/// This decreases the height by 1 and is the opposite of `push_internal_level`.
/// Panics if there is no internal level, i.e. if the root is a leaf.
pub fn pop_internal_level(&mut self) {
assert!(self.height > 0);
let top = self.node.ptr;
self.node = unsafe {
BoxedNode::from_ptr(
self.node_as_mut().cast_unchecked::<marker::Internal>().first_edge().descend().node,
)
};
self.height -= 1;
unsafe {
(*self.node_as_mut().as_leaf_mut()).parent = ptr::null();
}
unsafe {
Global.dealloc(NonNull::from(top).cast(), Layout::new::<InternalNode<K, V>>());
}
}
}
// N.B. `NodeRef` is always covariant in `K` and `V`, even when the `BorrowType`
// is `Mut`. This is technically wrong, but cannot result in any unsafety due to
// internal use of `NodeRef` because we stay completely generic over `K` and `V`.
// However, whenever a public type wraps `NodeRef`, make sure that it has the
// correct variance.
/// A reference to a node.
///
/// This type has a number of parameters that controls how it acts:
/// - `BorrowType`: This can be `Immut<'a>` or `Mut<'a>` for some `'a` or `Owned`.
/// When this is `Immut<'a>`, the `NodeRef` acts roughly like `&'a Node`,
/// when this is `Mut<'a>`, the `NodeRef` acts roughly like `&'a mut Node`,
/// and when this is `Owned`, the `NodeRef` acts roughly like `Box<Node>`.
/// - `K` and `V`: These control what types of things are stored in the nodes.
/// - `Type`: This can be `Leaf`, `Internal`, or `LeafOrInternal`. When this is
/// `Leaf`, the `NodeRef` points to a leaf node, when this is `Internal` the
/// `NodeRef` points to an internal node, and when this is `LeafOrInternal` the
/// `NodeRef` could be pointing to either type of node.
pub struct NodeRef<BorrowType, K, V, Type> {
/// The number of levels below the node.
height: usize,
node: NonNull<LeafNode<K, V>>,
// `root` is null unless the borrow type is `Mut`
root: *const Root<K, V>,
_marker: PhantomData<(BorrowType, Type)>,
}
impl<'a, K: 'a, V: 'a, Type> Copy for NodeRef<marker::Immut<'a>, K, V, Type> {}
impl<'a, K: 'a, V: 'a, Type> Clone for NodeRef<marker::Immut<'a>, K, V, Type> {
fn clone(&self) -> Self {
*self
}
}
unsafe impl<BorrowType, K: Sync, V: Sync, Type> Sync for NodeRef<BorrowType, K, V, Type> {}
unsafe impl<'a, K: Sync + 'a, V: Sync + 'a, Type> Send for NodeRef<marker::Immut<'a>, K, V, Type> {}
unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef<marker::Mut<'a>, K, V, Type> {}
unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Owned, K, V, Type> {}
impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Internal> {
fn as_internal(&self) -> &InternalNode<K, V> {
unsafe { &*(self.node.as_ptr() as *mut InternalNode<K, V>) }
}
}
impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
fn as_internal_mut(&mut self) -> &mut InternalNode<K, V> {
unsafe { &mut *(self.node.as_ptr() as *mut InternalNode<K, V>) }
}
}
impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
/// Finds the length of the node. This is the number of keys or values. In an
/// internal node, the number of edges is `len() + 1`.
/// For any node, the number of possible edge handles is also `len() + 1`.
/// Note that, despite being safe, calling this function can have the side effect
/// of invalidating mutable references that unsafe code has created.
pub fn len(&self) -> usize {
self.as_leaf().len as usize
}
/// Returns the height of this node in the whole tree. Zero height denotes the
/// leaf level.
pub fn height(&self) -> usize {
self.height
}
/// Temporarily takes out another, immutable reference to the same node.
fn reborrow(&self) -> NodeRef<marker::Immut<'_>, K, V, Type> {
NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData }
}
/// Exposes the leaf "portion" of any leaf or internal node.
/// If the node is a leaf, this function simply opens up its data.
/// If the node is an internal node, so not a leaf, it does have all the data a leaf has
/// (header, keys and values), and this function exposes that.
fn as_leaf(&self) -> &LeafNode<K, V> {
// The node must be valid for at least the LeafNode portion.
// This is not a reference in the NodeRef type because we don't know if
// it should be unique or shared.
unsafe { self.node.as_ref() }
}
/// Borrows a view into the keys stored in the node.
pub fn keys(&self) -> &[K] {
self.reborrow().into_key_slice()
}
/// Borrows a view into the values stored in the node.
fn vals(&self) -> &[V] {
self.reborrow().into_val_slice()
}
/// Finds the parent of the current node. Returns `Ok(handle)` if the current
/// node actually has a parent, where `handle` points to the edge of the parent
/// that points to the current node. Returns `Err(self)` if the current node has
/// no parent, giving back the original `NodeRef`.
///
/// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
/// both, upon success, do nothing.
pub fn ascend(
self,
) -> Result<Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>, Self> {
let parent_as_leaf = self.as_leaf().parent as *const LeafNode<K, V>;
if let Some(non_zero) = NonNull::new(parent_as_leaf as *mut _) {
Ok(Handle {
node: NodeRef {
height: self.height + 1,
node: non_zero,
root: self.root,
_marker: PhantomData,
},
idx: unsafe { usize::from(*self.as_leaf().parent_idx.as_ptr()) },
_marker: PhantomData,
})
} else {
Err(self)
}
}
pub fn first_edge(self) -> Handle<Self, marker::Edge> {
unsafe { Handle::new_edge(self, 0) }
}
pub fn last_edge(self) -> Handle<Self, marker::Edge> {
let len = self.len();
unsafe { Handle::new_edge(self, len) }
}
/// Note that `self` must be nonempty.
pub fn first_kv(self) -> Handle<Self, marker::KV> {
let len = self.len();
assert!(len > 0);
unsafe { Handle::new_kv(self, 0) }
}
/// Note that `self` must be nonempty.
pub fn last_kv(self) -> Handle<Self, marker::KV> {
let len = self.len();
assert!(len > 0);
unsafe { Handle::new_kv(self, len - 1) }
}
}
impl<K, V> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> {
/// Similar to `ascend`, gets a reference to a node's parent node, but also
/// deallocate the current node in the process. This is unsafe because the
/// current node will still be accessible despite being deallocated.
pub unsafe fn deallocate_and_ascend(
self,
) -> Option<Handle<NodeRef<marker::Owned, K, V, marker::Internal>, marker::Edge>> {
let height = self.height;
let node = self.node;
let ret = self.ascend().ok();
unsafe {
Global.dealloc(
node.cast(),
if height > 0 {
Layout::new::<InternalNode<K, V>>()
} else {
Layout::new::<LeafNode<K, V>>()
},
);
}
ret
}
}
impl<'a, K, V, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
/// Unsafely asserts to the compiler some static information about whether this
/// node is a `Leaf` or an `Internal`.
unsafe fn cast_unchecked<NewType>(&mut self) -> NodeRef<marker::Mut<'_>, K, V, NewType> {
NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData }
}
/// Temporarily takes out another, mutable reference to the same node. Beware, as
/// this method is very dangerous, doubly so since it may not immediately appear
/// dangerous.
///
/// Because mutable pointers can roam anywhere around the tree and can even (through
/// `into_root_mut`) mess with the root of the tree, the result of `reborrow_mut`
/// can easily be used to make the original mutable pointer dangling, or, in the case
/// of a reborrowed handle, out of bounds.
// FIXME(@gereeter) consider adding yet another type parameter to `NodeRef` that restricts
// the use of `ascend` and `into_root_mut` on reborrowed pointers, preventing this unsafety.
unsafe fn reborrow_mut(&mut self) -> NodeRef<marker::Mut<'_>, K, V, Type> {
NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData }
}
/// Exposes the leaf "portion" of any leaf or internal node for writing.
/// If the node is a leaf, this function simply opens up its data.
/// If the node is an internal node, so not a leaf, it does have all the data a leaf has
/// (header, keys and values), and this function exposes that.
///
/// Returns a raw ptr to avoid asserting exclusive access to the entire node.
fn as_leaf_mut(&mut self) -> *mut LeafNode<K, V> {
self.node.as_ptr()
}
fn keys_mut(&mut self) -> &mut [K] {
// SAFETY: the caller will not be able to call further methods on self
// until the key slice reference is dropped, as we have unique access
// for the lifetime of the borrow.
unsafe { self.reborrow_mut().into_key_slice_mut() }
}
fn vals_mut(&mut self) -> &mut [V] {
// SAFETY: the caller will not be able to call further methods on self
// until the value slice reference is dropped, as we have unique access
// for the lifetime of the borrow.
unsafe { self.reborrow_mut().into_val_slice_mut() }
}
}
impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Immut<'a>, K, V, Type> {
fn into_key_slice(self) -> &'a [K] {
unsafe { slice::from_raw_parts(MaybeUninit::first_ptr(&self.as_leaf().keys), self.len()) }
}
fn into_val_slice(self) -> &'a [V] {
unsafe { slice::from_raw_parts(MaybeUninit::first_ptr(&self.as_leaf().vals), self.len()) }
}
}
impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
/// Gets a mutable reference to the root itself. This is useful primarily when the
/// height of the tree needs to be adjusted. Never call this on a reborrowed pointer.
pub fn into_root_mut(self) -> &'a mut Root<K, V> {
unsafe { &mut *(self.root as *mut Root<K, V>) }
}
fn into_key_slice_mut(mut self) -> &'a mut [K] {
// SAFETY: The keys of a node must always be initialized up to length.
unsafe {
slice::from_raw_parts_mut(
MaybeUninit::first_ptr_mut(&mut (*self.as_leaf_mut()).keys),
self.len(),
)
}
}
fn into_val_slice_mut(mut self) -> &'a mut [V] {
// SAFETY: The values of a node must always be initialized up to length.
unsafe {
slice::from_raw_parts_mut(
MaybeUninit::first_ptr_mut(&mut (*self.as_leaf_mut()).vals),
self.len(),
)
}
}
fn into_slices_mut(mut self) -> (&'a mut [K], &'a mut [V]) {
// We cannot use the getters here, because calling the second one
// invalidates the reference returned by the first.
// More precisely, it is the call to `len` that is the culprit,
// because that creates a shared reference to the header, which *can*
// overlap with the keys (and even the values, for ZST keys).
let len = self.len();
let leaf = self.as_leaf_mut();
// SAFETY: The keys and values of a node must always be initialized up to length.
let keys = unsafe {
slice::from_raw_parts_mut(MaybeUninit::first_ptr_mut(&mut (*leaf).keys), len)
};
let vals = unsafe {
slice::from_raw_parts_mut(MaybeUninit::first_ptr_mut(&mut (*leaf).vals), len)
};
(keys, vals)
}
}
impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> {
/// Adds a key/value pair to the end of the node.
pub fn push(&mut self, key: K, val: V) {
assert!(self.len() < CAPACITY);
let idx = self.len();
unsafe {
ptr::write(self.keys_mut().get_unchecked_mut(idx), key);
ptr::write(self.vals_mut().get_unchecked_mut(idx), val);
(*self.as_leaf_mut()).len += 1;
}
}
/// Adds a key/value pair to the beginning of the node.
pub fn push_front(&mut self, key: K, val: V) {
assert!(self.len() < CAPACITY);
unsafe {
slice_insert(self.keys_mut(), 0, key);
slice_insert(self.vals_mut(), 0, val);
(*self.as_leaf_mut()).len += 1;
}
}
}
impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
/// Adds a key/value pair and an edge to go to the right of that pair to
/// the end of the node.
pub fn push(&mut self, key: K, val: V, edge: Root<K, V>) {
assert!(edge.height == self.height - 1);
assert!(self.len() < CAPACITY);
let idx = self.len();
unsafe {
ptr::write(self.keys_mut().get_unchecked_mut(idx), key);
ptr::write(self.vals_mut().get_unchecked_mut(idx), val);
self.as_internal_mut().edges.get_unchecked_mut(idx + 1).write(edge.node);
(*self.as_leaf_mut()).len += 1;
Handle::new_edge(self.reborrow_mut(), idx + 1).correct_parent_link();
}
}
// Unsafe because 'first' and 'after_last' must be in range
unsafe fn correct_childrens_parent_links(&mut self, first: usize, after_last: usize) {
debug_assert!(first <= self.len());
debug_assert!(after_last <= self.len() + 1);
for i in first..after_last {
unsafe { Handle::new_edge(self.reborrow_mut(), i) }.correct_parent_link();
}
}
fn correct_all_childrens_parent_links(&mut self) {
let len = self.len();
unsafe { self.correct_childrens_parent_links(0, len + 1) };
}
/// Adds a key/value pair and an edge to go to the left of that pair to
/// the beginning of the node.
pub fn push_front(&mut self, key: K, val: V, edge: Root<K, V>) {
assert!(edge.height == self.height - 1);
assert!(self.len() < CAPACITY);
unsafe {
slice_insert(self.keys_mut(), 0, key);
slice_insert(self.vals_mut(), 0, val);
slice_insert(
slice::from_raw_parts_mut(
MaybeUninit::first_ptr_mut(&mut self.as_internal_mut().edges),
self.len() + 1,
),
0,
edge.node,
);
(*self.as_leaf_mut()).len += 1;
self.correct_all_childrens_parent_links();
}
}
}
impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
/// Removes a key/value pair from the end of this node and returns the pair.
/// If this is an internal node, also removes the edge that was to the right
/// of that pair and returns the orphaned node that this edge owned with its
/// parent erased.
pub fn pop(&mut self) -> (K, V, Option<Root<K, V>>) {
assert!(self.len() > 0);
let idx = self.len() - 1;
unsafe {
let key = ptr::read(self.keys().get_unchecked(idx));
let val = ptr::read(self.vals().get_unchecked(idx));
let edge = match self.reborrow_mut().force() {
ForceResult::Leaf(_) => None,
ForceResult::Internal(internal) => {
let edge =
ptr::read(internal.as_internal().edges.get_unchecked(idx + 1).as_ptr());
let mut new_root = Root { node: edge, height: internal.height - 1 };
(*new_root.node_as_mut().as_leaf_mut()).parent = ptr::null();
Some(new_root)
}
};
(*self.as_leaf_mut()).len -= 1;
(key, val, edge)
}
}
/// Removes a key/value pair from the beginning of this node. If this is an internal node,
/// also removes the edge that was to the left of that pair.
pub fn pop_front(&mut self) -> (K, V, Option<Root<K, V>>) {
assert!(self.len() > 0);
let old_len = self.len();
unsafe {
let key = slice_remove(self.keys_mut(), 0);
let val = slice_remove(self.vals_mut(), 0);
let edge = match self.reborrow_mut().force() {
ForceResult::Leaf(_) => None,
ForceResult::Internal(mut internal) => {
let edge = slice_remove(
slice::from_raw_parts_mut(
MaybeUninit::first_ptr_mut(&mut internal.as_internal_mut().edges),
old_len + 1,
),
0,
);
let mut new_root = Root { node: edge, height: internal.height - 1 };
(*new_root.node_as_mut().as_leaf_mut()).parent = ptr::null();
for i in 0..old_len {
Handle::new_edge(internal.reborrow_mut(), i).correct_parent_link();
}
Some(new_root)
}
};
(*self.as_leaf_mut()).len -= 1;
(key, val, edge)
}
}
fn into_kv_pointers_mut(mut self) -> (*mut K, *mut V) {
(self.keys_mut().as_mut_ptr(), self.vals_mut().as_mut_ptr())
}
}
impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
/// Checks whether a node is an `Internal` node or a `Leaf` node.
pub fn force(
self,
) -> ForceResult<
NodeRef<BorrowType, K, V, marker::Leaf>,
NodeRef<BorrowType, K, V, marker::Internal>,
> {
if self.height == 0 {
ForceResult::Leaf(NodeRef {
height: self.height,
node: self.node,
root: self.root,
_marker: PhantomData,
})
} else {
ForceResult::Internal(NodeRef {
height: self.height,
node: self.node,
root: self.root,
_marker: PhantomData,
})
}
}
}
/// A reference to a specific key/value pair or edge within a node. The `Node` parameter
/// must be a `NodeRef`, while the `Type` can either be `KV` (signifying a handle on a key/value
/// pair) or `Edge` (signifying a handle on an edge).
///
/// Note that even `Leaf` nodes can have `Edge` handles. Instead of representing a pointer to
/// a child node, these represent the spaces where child pointers would go between the key/value
/// pairs. For example, in a node with length 2, there would be 3 possible edge locations - one
/// to the left of the node, one between the two pairs, and one at the right of the node.
pub struct Handle<Node, Type> {
node: Node,
idx: usize,
_marker: PhantomData<Type>,
}
impl<Node: Copy, Type> Copy for Handle<Node, Type> {}
// We don't need the full generality of `#[derive(Clone)]`, as the only time `Node` will be
// `Clone`able is when it is an immutable reference and therefore `Copy`.
impl<Node: Copy, Type> Clone for Handle<Node, Type> {
fn clone(&self) -> Self {
*self
}
}
impl<Node, Type> Handle<Node, Type> {
/// Retrieves the node that contains the edge of key/value pair this handle points to.
pub fn into_node(self) -> Node {
self.node
}
/// Returns the position of this handle in the node.
pub fn idx(&self) -> usize {
self.idx
}
}
impl<BorrowType, K, V, NodeType> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV> {
/// Creates a new handle to a key/value pair in `node`.
/// Unsafe because the caller must ensure that `idx < node.len()`.
pub unsafe fn new_kv(node: NodeRef<BorrowType, K, V, NodeType>, idx: usize) -> Self {
debug_assert!(idx < node.len());
Handle { node, idx, _marker: PhantomData }
}
pub fn left_edge(self) -> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> {
unsafe { Handle::new_edge(self.node, self.idx) }
}
pub fn right_edge(self) -> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> {
unsafe { Handle::new_edge(self.node, self.idx + 1) }
}
}
impl<BorrowType, K, V, NodeType, HandleType> PartialEq
for Handle<NodeRef<BorrowType, K, V, NodeType>, HandleType>
{
fn eq(&self, other: &Self) -> bool {
self.node.node == other.node.node && self.idx == other.idx
}
}
impl<BorrowType, K, V, NodeType, HandleType> PartialOrd
for Handle<NodeRef<BorrowType, K, V, NodeType>, HandleType>
{
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
if self.node.node == other.node.node { Some(self.idx.cmp(&other.idx)) } else { None }
}
}
impl<BorrowType, K, V, NodeType, HandleType>
Handle<NodeRef<BorrowType, K, V, NodeType>, HandleType>
{
/// Temporarily takes out another, immutable handle on the same location.
pub fn reborrow(&self) -> Handle<NodeRef<marker::Immut<'_>, K, V, NodeType>, HandleType> {
// We can't use Handle::new_kv or Handle::new_edge because we don't know our type
Handle { node: self.node.reborrow(), idx: self.idx, _marker: PhantomData }
}
}
impl<'a, K, V, NodeType, HandleType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, HandleType> {
/// Temporarily takes out another, mutable handle on the same location. Beware, as
/// this method is very dangerous, doubly so since it may not immediately appear
/// dangerous.
///
/// Because mutable pointers can roam anywhere around the tree and can even (through
/// `into_root_mut`) mess with the root of the tree, the result of `reborrow_mut`
/// can easily be used to make the original mutable pointer dangling, or, in the case
/// of a reborrowed handle, out of bounds.
// FIXME(@gereeter) consider adding yet another type parameter to `NodeRef` that restricts
// the use of `ascend` and `into_root_mut` on reborrowed pointers, preventing this unsafety.
pub unsafe fn reborrow_mut(
&mut self,
) -> Handle<NodeRef<marker::Mut<'_>, K, V, NodeType>, HandleType> {
// We can't use Handle::new_kv or Handle::new_edge because we don't know our type
Handle { node: unsafe { self.node.reborrow_mut() }, idx: self.idx, _marker: PhantomData }
}
}
impl<BorrowType, K, V, NodeType> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> {
/// Creates a new handle to an edge in `node`.
/// Unsafe because the caller must ensure that `idx <= node.len()`.
pub unsafe fn new_edge(node: NodeRef<BorrowType, K, V, NodeType>, idx: usize) -> Self {
debug_assert!(idx <= node.len());
Handle { node, idx, _marker: PhantomData }
}
pub fn left_kv(self) -> Result<Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV>, Self> {
if self.idx > 0 {
Ok(unsafe { Handle::new_kv(self.node, self.idx - 1) })
} else {
Err(self)
}
}
pub fn right_kv(self) -> Result<Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV>, Self> {
if self.idx < self.node.len() {
Ok(unsafe { Handle::new_kv(self.node, self.idx) })
} else {
Err(self)
}
}
}
impl<'a, K, V, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::Edge> {
/// Helps implementations of `insert_fit` for a particular `NodeType`,
/// by taking care of leaf data.
/// Inserts a new key/value pair between the key/value pairs to the right and left of
/// this edge. This method assumes that there is enough space in the node for the new
/// pair to fit.
fn leafy_insert_fit(&mut self, key: K, val: V) {
// Necessary for correctness, but in a private module
debug_assert!(self.node.len() < CAPACITY);
unsafe {
slice_insert(self.node.keys_mut(), self.idx, key);
slice_insert(self.node.vals_mut(), self.idx, val);
(*self.node.as_leaf_mut()).len += 1;
}
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
/// Inserts a new key/value pair between the key/value pairs to the right and left of
/// this edge. This method assumes that there is enough space in the node for the new
/// pair to fit.
///
/// The returned pointer points to the inserted value.
fn insert_fit(&mut self, key: K, val: V) -> *mut V {
self.leafy_insert_fit(key, val);
unsafe { self.node.vals_mut().get_unchecked_mut(self.idx) }
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
/// Inserts a new key/value pair between the key/value pairs to the right and left of
/// this edge. This method splits the node if there isn't enough room.
///
/// The returned pointer points to the inserted value.
fn insert(mut self, key: K, val: V) -> (InsertResult<'a, K, V, marker::Leaf>, *mut V) {
if self.node.len() < CAPACITY {
let ptr = self.insert_fit(key, val);
let kv = unsafe { Handle::new_kv(self.node, self.idx) };
(InsertResult::Fit(kv), ptr)
} else {
let middle = unsafe { Handle::new_kv(self.node, B) };
let (mut left, k, v, mut right) = middle.split();
let ptr = if self.idx <= B {
unsafe { Handle::new_edge(left.reborrow_mut(), self.idx).insert_fit(key, val) }
} else {
unsafe {
Handle::new_edge(
right.node_as_mut().cast_unchecked::<marker::Leaf>(),
self.idx - (B + 1),
)
.insert_fit(key, val)
}
};
(InsertResult::Split(SplitResult { left: left.forget_type(), k, v, right }), ptr)
}
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::Edge> {
/// Fixes the parent pointer and index in the child node below this edge. This is useful
/// when the ordering of edges has been changed, such as in the various `insert` methods.
fn correct_parent_link(mut self) {
let idx = self.idx as u16;
let ptr = self.node.as_internal_mut() as *mut _;
let mut child = self.descend();
unsafe {
(*child.as_leaf_mut()).parent = ptr;
(*child.as_leaf_mut()).parent_idx.write(idx);
}
}
/// Inserts a new key/value pair and an edge that will go to the right of that new pair
/// between this edge and the key/value pair to the right of this edge. This method assumes
/// that there is enough space in the node for the new pair to fit.
fn insert_fit(&mut self, key: K, val: V, edge: Root<K, V>) {
// Necessary for correctness, but in an internal module
debug_assert!(self.node.len() < CAPACITY);
debug_assert!(edge.height == self.node.height - 1);
unsafe {
self.leafy_insert_fit(key, val);
slice_insert(
slice::from_raw_parts_mut(
MaybeUninit::first_ptr_mut(&mut self.node.as_internal_mut().edges),
self.node.len(),
),
self.idx + 1,
edge.node,
);
for i in (self.idx + 1)..(self.node.len() + 1) {
Handle::new_edge(self.node.reborrow_mut(), i).correct_parent_link();
}
}
}
/// Inserts a new key/value pair and an edge that will go to the right of that new pair
/// between this edge and the key/value pair to the right of this edge. This method splits
/// the node if there isn't enough room.
fn insert(
mut self,
key: K,
val: V,
edge: Root<K, V>,
) -> InsertResult<'a, K, V, marker::Internal> {
assert!(edge.height == self.node.height - 1);
if self.node.len() < CAPACITY {
self.insert_fit(key, val, edge);
let kv = unsafe { Handle::new_kv(self.node, self.idx) };
InsertResult::Fit(kv)
} else {
let middle = unsafe { Handle::new_kv(self.node, B) };
let (mut left, k, v, mut right) = middle.split();
if self.idx <= B {
unsafe {
Handle::new_edge(left.reborrow_mut(), self.idx).insert_fit(key, val, edge);
}
} else {
unsafe {
Handle::new_edge(
right.node_as_mut().cast_unchecked::<marker::Internal>(),
self.idx - (B + 1),
)
.insert_fit(key, val, edge);
}
}
InsertResult::Split(SplitResult { left: left.forget_type(), k, v, right })
}
} | }
impl<'a, K: 'a, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
/// Inserts a new key/value pair between the key/value pairs to the right and left of
/// this edge. This method splits the node if there isn't enough room, and tries to
/// insert the split off portion into the parent node recursively, until the root is reached.
///
/// If the returned result is a `Fit`, its handle's node can be this edge's node or an ancestor.
/// If the returned result is a `Split`, the `left` field will be the root node.
/// The returned pointer points to the inserted value.
pub fn insert_recursing(
self,
key: K,
value: V,
) -> (InsertResult<'a, K, V, marker::LeafOrInternal>, *mut V) {
let (mut split, val_ptr) = match self.insert(key, value) {
(InsertResult::Fit(handle), ptr) => {
return (InsertResult::Fit(handle.forget_node_type()), ptr);
}
(InsertResult::Split(split), val_ptr) => (split, val_ptr),
};
loop {
split = match split.left.ascend() {
Ok(parent) => match parent.insert(split.k, split.v, split.right) {
InsertResult::Fit(handle) => {
return (InsertResult::Fit(handle.forget_node_type()), val_ptr);
}
InsertResult::Split(split) => split,
},
Err(root) => {
return (InsertResult::Split(SplitResult { left: root, ..split }), val_ptr);
}
};
}
}
}
impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge> {
/// Finds the node pointed to by this edge.
///
/// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
/// both, upon success, do nothing.
pub fn descend(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
NodeRef {
height: self.node.height - 1,
node: unsafe {
(&*self.node.as_internal().edges.get_unchecked(self.idx).as_ptr()).as_ptr()
},
root: self.node.root,
_marker: PhantomData,
}
}
}
impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Immut<'a>, K, V, NodeType>, marker::KV> {
pub fn into_kv(self) -> (&'a K, &'a V) {
let keys = self.node.into_key_slice();
let vals = self.node.into_val_slice();
unsafe { (keys.get_unchecked(self.idx), vals.get_unchecked(self.idx)) }
}
}
impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> {
pub fn into_kv_mut(self) -> (&'a mut K, &'a mut V) {
unsafe {
let (keys, vals) = self.node.into_slices_mut();
(keys.get_unchecked_mut(self.idx), vals.get_unchecked_mut(self.idx))
}
}
}
impl<'a, K, V, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> {
pub fn kv_mut(&mut self) -> (&mut K, &mut V) {
unsafe {
let (keys, vals) = self.node.reborrow_mut().into_slices_mut();
(keys.get_unchecked_mut(self.idx), vals.get_unchecked_mut(self.idx))
}
}
}
impl<'a, K, V, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> {
/// Helps implementations of `split` for a particular `NodeType`,
/// by taking care of leaf data.
fn leafy_split(&mut self, new_node: &mut LeafNode<K, V>) -> (K, V, usize) {
unsafe {
let k = ptr::read(self.node.keys().get_unchecked(self.idx));
let v = ptr::read(self.node.vals().get_unchecked(self.idx));
let new_len = self.node.len() - self.idx - 1;
ptr::copy_nonoverlapping(
self.node.keys().as_ptr().add(self.idx + 1),
new_node.keys.as_mut_ptr() as *mut K,
new_len,
);
ptr::copy_nonoverlapping(
self.node.vals().as_ptr().add(self.idx + 1),
new_node.vals.as_mut_ptr() as *mut V,
new_len,
);
(*self.node.as_leaf_mut()).len = self.idx as u16;
new_node.len = new_len as u16;
(k, v, new_len)
}
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> {
/// Splits the underlying node into three parts:
///
/// - The node is truncated to only contain the key/value pairs to the right of
/// this handle.
/// - The key and value pointed to by this handle and extracted.
/// - All the key/value pairs to the right of this handle are put into a newly
/// allocated node.
pub fn split(mut self) -> (NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, K, V, Root<K, V>) {
unsafe {
let mut new_node = Box::new(LeafNode::new());
let (k, v, _) = self.leafy_split(&mut new_node);
(self.node, k, v, Root { node: BoxedNode::from_leaf(new_node), height: 0 })
}
}
/// Removes the key/value pair pointed to by this handle and returns it, along with the edge
/// between the now adjacent key/value pairs (if any) to the left and right of this handle.
pub fn remove(
mut self,
) -> ((K, V), Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>) {
unsafe {
let k = slice_remove(self.node.keys_mut(), self.idx);
let v = slice_remove(self.node.vals_mut(), self.idx);
(*self.node.as_leaf_mut()).len -= 1;
((k, v), self.left_edge())
}
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV> {
/// Splits the underlying node into three parts:
///
/// - The node is truncated to only contain the edges and key/value pairs to the
/// right of this handle.
/// - The key and value pointed to by this handle and extracted.
/// - All the edges and key/value pairs to the right of this handle are put into
/// a newly allocated node.
pub fn split(mut self) -> (NodeRef<marker::Mut<'a>, K, V, marker::Internal>, K, V, Root<K, V>) {
unsafe {
let mut new_node = Box::new(InternalNode::new());
let (k, v, new_len) = self.leafy_split(&mut new_node.data);
let height = self.node.height;
ptr::copy_nonoverlapping(
self.node.as_internal().edges.as_ptr().add(self.idx + 1),
new_node.edges.as_mut_ptr(),
new_len + 1,
);
let mut new_root = Root { node: BoxedNode::from_internal(new_node), height };
for i in 0..(new_len + 1) {
Handle::new_edge(new_root.node_as_mut().cast_unchecked(), i).correct_parent_link();
}
(self.node, k, v, new_root)
}
}
/// Returns `true` if it is valid to call `.merge()`, i.e., whether there is enough room in
/// a node to hold the combination of the nodes to the left and right of this handle along
/// with the key/value pair at this handle.
pub fn can_merge(&self) -> bool {
(self.reborrow().left_edge().descend().len()
+ self.reborrow().right_edge().descend().len()
+ 1)
<= CAPACITY
}
/// Combines the node immediately to the left of this handle, the key/value pair pointed
/// to by this handle, and the node immediately to the right of this handle into one new
/// child of the underlying node, returning an edge referencing that new child.
///
/// Assumes that this edge `.can_merge()`.
pub fn merge(
mut self,
) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::Edge> {
let self1 = unsafe { ptr::read(&self) };
let self2 = unsafe { ptr::read(&self) };
let mut left_node = self1.left_edge().descend();
let left_len = left_node.len();
let mut right_node = self2.right_edge().descend();
let right_len = right_node.len();
// necessary for correctness, but in a private module
assert!(left_len + right_len < CAPACITY);
unsafe {
ptr::write(
left_node.keys_mut().get_unchecked_mut(left_len),
slice_remove(self.node.keys_mut(), self.idx),
);
ptr::copy_nonoverlapping(
right_node.keys().as_ptr(),
left_node.keys_mut().as_mut_ptr().add(left_len + 1),
right_len,
);
ptr::write(
left_node.vals_mut().get_unchecked_mut(left_len),
slice_remove(self.node.vals_mut(), self.idx),
);
ptr::copy_nonoverlapping(
right_node.vals().as_ptr(),
left_node.vals_mut().as_mut_ptr().add(left_len + 1),
right_len,
);
slice_remove(&mut self.node.as_internal_mut().edges, self.idx + 1);
for i in self.idx + 1..self.node.len() {
Handle::new_edge(self.node.reborrow_mut(), i).correct_parent_link();
}
(*self.node.as_leaf_mut()).len -= 1;
(*left_node.as_leaf_mut()).len += right_len as u16 + 1;
let layout = if self.node.height > 1 {
ptr::copy_nonoverlapping(
right_node.cast_unchecked().as_internal().edges.as_ptr(),
left_node
.cast_unchecked()
.as_internal_mut()
.edges
.as_mut_ptr()
.add(left_len + 1),
right_len + 1,
);
for i in left_len + 1..left_len + right_len + 2 {
Handle::new_edge(left_node.cast_unchecked().reborrow_mut(), i)
.correct_parent_link();
}
Layout::new::<InternalNode<K, V>>()
} else {
Layout::new::<LeafNode<K, V>>()
};
Global.dealloc(right_node.node.cast(), layout);
Handle::new_edge(self.node, self.idx)
}
}
/// This removes a key/value pair from the left child and places it in the key/value storage
/// pointed to by this handle while pushing the old key/value pair of this handle into the right
/// child.
pub fn steal_left(&mut self) {
unsafe {
let (k, v, edge) = self.reborrow_mut().left_edge().descend().pop();
let k = mem::replace(self.reborrow_mut().into_kv_mut().0, k);
let v = mem::replace(self.reborrow_mut().into_kv_mut().1, v);
match self.reborrow_mut().right_edge().descend().force() {
ForceResult::Leaf(mut leaf) => leaf.push_front(k, v),
ForceResult::Internal(mut internal) => internal.push_front(k, v, edge.unwrap()),
}
}
}
/// This removes a key/value pair from the right child and places it in the key/value storage
/// pointed to by this handle while pushing the old key/value pair of this handle into the left
/// child.
pub fn steal_right(&mut self) {
unsafe {
let (k, v, edge) = self.reborrow_mut().right_edge().descend().pop_front();
let k = mem::replace(self.reborrow_mut().into_kv_mut().0, k);
let v = mem::replace(self.reborrow_mut().into_kv_mut().1, v);
match self.reborrow_mut().left_edge().descend().force() {
ForceResult::Leaf(mut leaf) => leaf.push(k, v),
ForceResult::Internal(mut internal) => internal.push(k, v, edge.unwrap()),
}
}
}
/// This does stealing similar to `steal_left` but steals multiple elements at once.
pub fn bulk_steal_left(&mut self, count: usize) {
unsafe {
let mut left_node = ptr::read(self).left_edge().descend();
let left_len = left_node.len();
let mut right_node = ptr::read(self).right_edge().descend();
let right_len = right_node.len();
// Make sure that we may steal safely.
assert!(right_len + count <= CAPACITY);
assert!(left_len >= count);
let new_left_len = left_len - count;
// Move data.
{
let left_kv = left_node.reborrow_mut().into_kv_pointers_mut();
let right_kv = right_node.reborrow_mut().into_kv_pointers_mut();
let parent_kv = {
let kv = self.reborrow_mut().into_kv_mut();
(kv.0 as *mut K, kv.1 as *mut V)
};
// Make room for stolen elements in the right child.
ptr::copy(right_kv.0, right_kv.0.add(count), right_len);
ptr::copy(right_kv.1, right_kv.1.add(count), right_len);
// Move elements from the left child to the right one.
move_kv(left_kv, new_left_len + 1, right_kv, 0, count - 1);
// Move parent's key/value pair to the right child.
move_kv(parent_kv, 0, right_kv, count - 1, 1);
// Move the left-most stolen pair to the parent.
move_kv(left_kv, new_left_len, parent_kv, 0, 1);
}
(*left_node.reborrow_mut().as_leaf_mut()).len -= count as u16;
(*right_node.reborrow_mut().as_leaf_mut()).len += count as u16;
match (left_node.force(), right_node.force()) {
(ForceResult::Internal(left), ForceResult::Internal(mut right)) => {
// Make room for stolen edges.
let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr();
ptr::copy(right_edges, right_edges.add(count), right_len + 1);
right.correct_childrens_parent_links(count, count + right_len + 1);
move_edges(left, new_left_len + 1, right, 0, count);
}
(ForceResult::Leaf(_), ForceResult::Leaf(_)) => {}
_ => {
unreachable!();
}
}
}
}
/// The symmetric clone of `bulk_steal_left`.
pub fn bulk_steal_right(&mut self, count: usize) {
unsafe {
let mut left_node = ptr::read(self).left_edge().descend();
let left_len = left_node.len();
let mut right_node = ptr::read(self).right_edge().descend();
let right_len = right_node.len();
// Make sure that we may steal safely.
assert!(left_len + count <= CAPACITY);
assert!(right_len >= count);
let new_right_len = right_len - count;
// Move data.
{
let left_kv = left_node.reborrow_mut().into_kv_pointers_mut();
let right_kv = right_node.reborrow_mut().into_kv_pointers_mut();
let parent_kv = {
let kv = self.reborrow_mut().into_kv_mut();
(kv.0 as *mut K, kv.1 as *mut V)
};
// Move parent's key/value pair to the left child.
move_kv(parent_kv, 0, left_kv, left_len, 1);
// Move elements from the right child to the left one.
move_kv(right_kv, 0, left_kv, left_len + 1, count - 1);
// Move the right-most stolen pair to the parent.
move_kv(right_kv, count - 1, parent_kv, 0, 1);
// Fix right indexing
ptr::copy(right_kv.0.add(count), right_kv.0, new_right_len);
ptr::copy(right_kv.1.add(count), right_kv.1, new_right_len);
}
(*left_node.reborrow_mut().as_leaf_mut()).len += count as u16;
(*right_node.reborrow_mut().as_leaf_mut()).len -= count as u16;
match (left_node.force(), right_node.force()) {
(ForceResult::Internal(left), ForceResult::Internal(mut right)) => {
move_edges(right.reborrow_mut(), 0, left, left_len + 1, count);
// Fix right indexing.
let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr();
ptr::copy(right_edges.add(count), right_edges, new_right_len + 1);
right.correct_childrens_parent_links(0, new_right_len + 1);
}
(ForceResult::Leaf(_), ForceResult::Leaf(_)) => {}
_ => {
unreachable!();
}
}
}
}
}
unsafe fn move_kv<K, V>(
source: (*mut K, *mut V),
source_offset: usize,
dest: (*mut K, *mut V),
dest_offset: usize,
count: usize,
) {
unsafe {
ptr::copy_nonoverlapping(source.0.add(source_offset), dest.0.add(dest_offset), count);
ptr::copy_nonoverlapping(source.1.add(source_offset), dest.1.add(dest_offset), count);
}
}
// Source and destination must have the same height.
unsafe fn move_edges<K, V>(
mut source: NodeRef<marker::Mut<'_>, K, V, marker::Internal>,
source_offset: usize,
mut dest: NodeRef<marker::Mut<'_>, K, V, marker::Internal>,
dest_offset: usize,
count: usize,
) {
let source_ptr = source.as_internal_mut().edges.as_mut_ptr();
let dest_ptr = dest.as_internal_mut().edges.as_mut_ptr();
unsafe {
ptr::copy_nonoverlapping(source_ptr.add(source_offset), dest_ptr.add(dest_offset), count);
dest.correct_childrens_parent_links(dest_offset, dest_offset + count);
}
}
impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Leaf> {
/// Removes any static information asserting that this node is a `Leaf` node.
pub fn forget_type(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData }
}
}
impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Internal> {
/// Removes any static information asserting that this node is an `Internal` node.
pub fn forget_type(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData }
}
}
impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
pub fn forget_node_type(
self,
) -> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::Edge> {
unsafe { Handle::new_edge(self.node.forget_type(), self.idx) }
}
}
impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge> {
pub fn forget_node_type(
self,
) -> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::Edge> {
unsafe { Handle::new_edge(self.node.forget_type(), self.idx) }
}
}
impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::KV> {
pub fn forget_node_type(
self,
) -> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV> {
unsafe { Handle::new_kv(self.node.forget_type(), self.idx) }
}
}
impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::KV> {
pub fn forget_node_type(
self,
) -> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV> {
unsafe { Handle::new_kv(self.node.forget_type(), self.idx) }
}
}
impl<BorrowType, K, V, HandleType>
Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, HandleType>
{
/// Checks whether the underlying node is an `Internal` node or a `Leaf` node.
pub fn force(
self,
) -> ForceResult<
Handle<NodeRef<BorrowType, K, V, marker::Leaf>, HandleType>,
Handle<NodeRef<BorrowType, K, V, marker::Internal>, HandleType>,
> {
match self.node.force() {
ForceResult::Leaf(node) => {
ForceResult::Leaf(Handle { node, idx: self.idx, _marker: PhantomData })
}
ForceResult::Internal(node) => {
ForceResult::Internal(Handle { node, idx: self.idx, _marker: PhantomData })
}
}
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::Edge> {
/// Move the suffix after `self` from one node to another one. `right` must be empty.
/// The first edge of `right` remains unchanged.
pub fn move_suffix(
&mut self,
right: &mut NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>,
) {
unsafe {
let left_new_len = self.idx;
let mut left_node = self.reborrow_mut().into_node();
let right_new_len = left_node.len() - left_new_len;
let mut right_node = right.reborrow_mut();
assert!(right_node.len() == 0);
assert!(left_node.height == right_node.height);
if right_new_len > 0 {
let left_kv = left_node.reborrow_mut().into_kv_pointers_mut();
let right_kv = right_node.reborrow_mut().into_kv_pointers_mut();
move_kv(left_kv, left_new_len, right_kv, 0, right_new_len);
(*left_node.reborrow_mut().as_leaf_mut()).len = left_new_len as u16;
(*right_node.reborrow_mut().as_leaf_mut()).len = right_new_len as u16;
match (left_node.force(), right_node.force()) {
(ForceResult::Internal(left), ForceResult::Internal(right)) => {
move_edges(left, left_new_len + 1, right, 1, right_new_len);
}
(ForceResult::Leaf(_), ForceResult::Leaf(_)) => {}
_ => {
unreachable!();
}
}
}
}
}
}
pub enum ForceResult<Leaf, Internal> {
Leaf(Leaf),
Internal(Internal),
}
/// Result of insertion, when a node needed to expand beyond its capacity.
/// Does not distinguish between `Leaf` and `Internal` because `Root` doesn't.
pub struct SplitResult<'a, K, V> {
// Altered node in existing tree with elements and edges that belong to the left of `k`.
pub left: NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>,
// Some key and value split off, to be inserted elsewhere.
pub k: K,
pub v: V,
// Owned, unattached, new node with elements and edges that belong to the right of `k`.
pub right: Root<K, V>,
}
pub enum InsertResult<'a, K, V, Type> {
Fit(Handle<NodeRef<marker::Mut<'a>, K, V, Type>, marker::KV>),
Split(SplitResult<'a, K, V>),
}
pub mod marker {
use core::marker::PhantomData;
pub enum Leaf {}
pub enum Internal {}
pub enum LeafOrInternal {}
pub enum Owned {}
pub struct Immut<'a>(PhantomData<&'a ()>);
pub struct Mut<'a>(PhantomData<&'a mut ()>);
pub enum KV {}
pub enum Edge {}
}
unsafe fn slice_insert<T>(slice: &mut [T], idx: usize, val: T) {
unsafe {
ptr::copy(slice.as_ptr().add(idx), slice.as_mut_ptr().add(idx + 1), slice.len() - idx);
ptr::write(slice.get_unchecked_mut(idx), val);
}
}
unsafe fn slice_remove<T>(slice: &mut [T], idx: usize) -> T {
unsafe {
let ret = ptr::read(slice.get_unchecked(idx));
ptr::copy(slice.as_ptr().add(idx + 1), slice.as_mut_ptr().add(idx), slice.len() - idx - 1);
ret
}
} | |
admin-hike-images-feature.module.ts | import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
@NgModule({
imports: [CommonModule],
})
export class | {}
| AdminHikeImagesFeatureModule |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.