file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
urls.py |
import logging
from django.conf.urls.defaults import *
from piston.resource import Resource as R
from piston.authentication import HttpBasicAuthentication
import handlers
l = logging.getLogger(__name__)
class Auth(HttpBasicAuthentication):
def | (self, request):
user = super(Auth, self).is_authenticated(request)
if user and request.user.has_perm('dreamuserdb.api'):
return user
return False
auth = Auth(realm='UserDB API')
organisation = R(handlers.Organisation, auth)
group = R(handlers.Group, auth)
role = R(handlers.Role, auth)
user = R(handlers.User, auth)
authenticate = R(handlers.Authenticate, auth)
urlpatterns = patterns('',
url(r'^organisation/(?P<id>[^/]+)/$', organisation),
url(r'^organisation/$', organisation),
url(r'^role/(?P<filter>organisation)/(?P<id>[^/]+)/$', role),
url(r'^role/(?P<id>[^/]+)/$', role),
url(r'^role/$', role),
url(r'^group/(?P<filter>organisation)/(?P<id>[^/]+)/$', group),
url(r'^group/(?P<id>[^/]+)/$', group),
url(r'^group/$', group),
url(r'^user/(?P<filter>organisation)/(?P<id>[^/]+)/$', user),
url(r'^user/(?P<filter>role)/(?P<id>[^/]+)/$', user),
url(r'^user/(?P<filter>group)/(?P<id>[^/]+)/$', user),
url(r'^user/(?P<id>[^/]+)/$', user),
url(r'^user/$', user),
# Authenticate
url(r'^authenticate/$', authenticate),
)
| is_authenticated |
radial.go | // Package radial provides conversion functions for angle units.
package radial
import (
"math"
)
// DegToRad converts degrees to radians.
func DegToRad(deg float64) float64 {
return (deg * math.Pi) / 180
}
// RadToDeg converts radians to degrees.
func RadToDeg(rad float64) float64 | {
return (rad * 180) / math.Pi
} |
|
main.rs | use crate::exercise::{Exercise, ExerciseList};
use crate::run::run;
use crate::verify::verify;
use clap::{crate_version, App, Arg, SubCommand};
use notify::DebouncedEvent;
use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use std::ffi::OsStr;
use std::fs;
use std::path::Path;
use std::sync::mpsc::channel; | mod run;
mod verify;
fn main() {
let matches = App::new("rustlings")
.version(crate_version!())
.author("Olivia Hugger, Carol Nichols")
.about("Rustlings is a collection of small exercises to get you used to writing and reading Rust code")
.subcommand(SubCommand::with_name("verify").alias("v").about("Verifies all exercises according to the recommended order"))
.subcommand(SubCommand::with_name("watch").alias("w").about("Reruns `verify` when files were edited"))
.subcommand(
SubCommand::with_name("run")
.alias("r")
.about("Runs/Tests a single exercise")
.arg(Arg::with_name("file").required(true).index(1))
.arg(Arg::with_name("test").short("t").long("test").help("Run the file as a test")),
)
.get_matches();
if None == matches.subcommand_name() {
println!();
println!(r#" welcome to... "#);
println!(r#" _ _ _ "#);
println!(r#" _ __ _ _ ___| |_| (_)_ __ __ _ ___ "#);
println!(r#" | '__| | | / __| __| | | '_ \ / _` / __| "#);
println!(r#" | | | |_| \__ \ |_| | | | | | (_| \__ \ "#);
println!(r#" |_| \__,_|___/\__|_|_|_| |_|\__, |___/ "#);
println!(r#" |___/ "#);
println!();
}
if !Path::new("info.toml").exists() {
println!(
"{} must be run from the rustlings directory",
std::env::current_exe().unwrap().to_str().unwrap()
);
println!("Try `cd rustlings/`!");
std::process::exit(1);
}
let toml_str = &fs::read_to_string("info.toml").unwrap();
let exercises = toml::from_str::<ExerciseList>(toml_str).unwrap().exercises;
if let Some(ref matches) = matches.subcommand_matches("run") {
let filename = matches.value_of("file").unwrap_or_else(|| {
println!("Please supply a file name!");
std::process::exit(1);
});
let matching_exercise = |e: &&Exercise| {
Path::new(filename)
.canonicalize()
.map(|p| p.ends_with(&e.path))
.unwrap_or(false)
};
let exercise = exercises.iter().find(matching_exercise).unwrap_or_else(|| {
println!("No exercise found for your file name!");
std::process::exit(1)
});
run(&exercise).unwrap_or_else(|_| std::process::exit(1));
}
if matches.subcommand_matches("verify").is_some() {
verify(&exercises).unwrap_or_else(|_| std::process::exit(1));
}
if matches.subcommand_matches("watch").is_some() {
watch(&exercises).unwrap();
}
if matches.subcommand_name().is_none() {
let text = fs::read_to_string("default_out.txt").unwrap();
println!("{}", text);
}
}
fn watch(exercises: &[Exercise]) -> notify::Result<()> {
let (tx, rx) = channel();
let mut watcher: RecommendedWatcher = Watcher::new(tx, Duration::from_secs(2))?;
watcher.watch(Path::new("./exercises"), RecursiveMode::Recursive)?;
let _ignored = verify(exercises.iter());
loop {
match rx.recv() {
Ok(event) => match event {
DebouncedEvent::Create(b) | DebouncedEvent::Chmod(b) | DebouncedEvent::Write(b) => {
if b.extension() == Some(OsStr::new("rs")) {
println!("----------**********----------\n");
let filepath = b.as_path().canonicalize().unwrap();
let exercise = exercises
.iter()
.skip_while(|e| !filepath.ends_with(&e.path));
let _ignored = verify(exercise);
}
}
_ => {}
},
Err(e) => println!("watch error: {:?}", e),
}
}
} | use std::time::Duration;
mod exercise; |
pdpb.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: pdpb.proto
package pdpb
import (
"fmt"
"io"
"math"
proto "github.com/golang/protobuf/proto"
_ "github.com/gogo/protobuf/gogoproto"
eraftpb "github.com/pingcap/kvproto/pkg/eraftpb"
metapb "github.com/pingcap/kvproto/pkg/metapb"
replication_modepb "github.com/pingcap/kvproto/pkg/replication_modepb"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ErrorType int32
const (
ErrorType_OK ErrorType = 0
ErrorType_UNKNOWN ErrorType = 1
ErrorType_NOT_BOOTSTRAPPED ErrorType = 2
ErrorType_STORE_TOMBSTONE ErrorType = 3
ErrorType_ALREADY_BOOTSTRAPPED ErrorType = 4
ErrorType_INCOMPATIBLE_VERSION ErrorType = 5
ErrorType_REGION_NOT_FOUND ErrorType = 6
)
var ErrorType_name = map[int32]string{
0: "OK",
1: "UNKNOWN",
2: "NOT_BOOTSTRAPPED",
3: "STORE_TOMBSTONE",
4: "ALREADY_BOOTSTRAPPED",
5: "INCOMPATIBLE_VERSION",
6: "REGION_NOT_FOUND",
}
var ErrorType_value = map[string]int32{
"OK": 0,
"UNKNOWN": 1,
"NOT_BOOTSTRAPPED": 2,
"STORE_TOMBSTONE": 3,
"ALREADY_BOOTSTRAPPED": 4,
"INCOMPATIBLE_VERSION": 5,
"REGION_NOT_FOUND": 6,
}
func (x ErrorType) String() string {
return proto.EnumName(ErrorType_name, int32(x))
}
func (ErrorType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{0}
}
type CheckPolicy int32
const (
CheckPolicy_SCAN CheckPolicy = 0
CheckPolicy_APPROXIMATE CheckPolicy = 1
CheckPolicy_USEKEY CheckPolicy = 2
)
var CheckPolicy_name = map[int32]string{
0: "SCAN",
1: "APPROXIMATE",
2: "USEKEY",
}
var CheckPolicy_value = map[string]int32{
"SCAN": 0,
"APPROXIMATE": 1,
"USEKEY": 2,
}
func (x CheckPolicy) String() string {
return proto.EnumName(CheckPolicy_name, int32(x))
}
func (CheckPolicy) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{1}
}
type OperatorStatus int32
const (
OperatorStatus_SUCCESS OperatorStatus = 0
OperatorStatus_TIMEOUT OperatorStatus = 1
OperatorStatus_CANCEL OperatorStatus = 2
OperatorStatus_REPLACE OperatorStatus = 3
OperatorStatus_RUNNING OperatorStatus = 4
)
var OperatorStatus_name = map[int32]string{
0: "SUCCESS",
1: "TIMEOUT",
2: "CANCEL",
3: "REPLACE",
4: "RUNNING",
}
var OperatorStatus_value = map[string]int32{
"SUCCESS": 0,
"TIMEOUT": 1,
"CANCEL": 2,
"REPLACE": 3,
"RUNNING": 4,
}
func (x OperatorStatus) String() string {
return proto.EnumName(OperatorStatus_name, int32(x))
}
func (OperatorStatus) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{2}
}
type QueryKind int32
const (
QueryKind_Others QueryKind = 0
QueryKind_GC QueryKind = 1
QueryKind_Get QueryKind = 2
QueryKind_Scan QueryKind = 3
QueryKind_Coprocessor QueryKind = 4
QueryKind_Delete QueryKind = 5
QueryKind_DeleteRange QueryKind = 6
QueryKind_Put QueryKind = 7
QueryKind_Prewrite QueryKind = 8
QueryKind_AcquirePessimisticLock QueryKind = 9
QueryKind_Commit QueryKind = 10
QueryKind_Rollback QueryKind = 11
)
var QueryKind_name = map[int32]string{
0: "Others",
1: "GC",
2: "Get",
3: "Scan",
4: "Coprocessor",
5: "Delete",
6: "DeleteRange",
7: "Put",
8: "Prewrite",
9: "AcquirePessimisticLock",
10: "Commit",
11: "Rollback",
}
var QueryKind_value = map[string]int32{
"Others": 0,
"GC": 1,
"Get": 2,
"Scan": 3,
"Coprocessor": 4,
"Delete": 5,
"DeleteRange": 6,
"Put": 7,
"Prewrite": 8,
"AcquirePessimisticLock": 9,
"Commit": 10,
"Rollback": 11,
}
func (x QueryKind) String() string {
return proto.EnumName(QueryKind_name, int32(x))
}
func (QueryKind) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{3}
}
type RequestHeader struct {
// cluster_id is the ID of the cluster which be sent to.
ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
// sender_id is the ID of the sender server, also member ID or etcd ID.
SenderId uint64 `protobuf:"varint,2,opt,name=sender_id,json=senderId,proto3" json:"sender_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RequestHeader) Reset() { *m = RequestHeader{} }
func (m *RequestHeader) String() string { return proto.CompactTextString(m) }
func (*RequestHeader) ProtoMessage() {}
func (*RequestHeader) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{0}
}
func (m *RequestHeader) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RequestHeader) XXX_Merge(src proto.Message) {
xxx_messageInfo_RequestHeader.Merge(dst, src)
}
func (m *RequestHeader) XXX_Size() int {
return m.Size()
}
func (m *RequestHeader) XXX_DiscardUnknown() {
xxx_messageInfo_RequestHeader.DiscardUnknown(m)
}
var xxx_messageInfo_RequestHeader proto.InternalMessageInfo
func (m *RequestHeader) GetClusterId() uint64 {
if m != nil {
return m.ClusterId
}
return 0
}
func (m *RequestHeader) GetSenderId() uint64 {
if m != nil {
return m.SenderId
}
return 0
}
type ResponseHeader struct {
// cluster_id is the ID of the cluster which sent the response.
ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
Error *Error `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ResponseHeader) Reset() { *m = ResponseHeader{} }
func (m *ResponseHeader) String() string { return proto.CompactTextString(m) }
func (*ResponseHeader) ProtoMessage() {}
func (*ResponseHeader) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{1}
}
func (m *ResponseHeader) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ResponseHeader) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResponseHeader.Merge(dst, src)
}
func (m *ResponseHeader) XXX_Size() int {
return m.Size()
}
func (m *ResponseHeader) XXX_DiscardUnknown() {
xxx_messageInfo_ResponseHeader.DiscardUnknown(m)
}
var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo
func (m *ResponseHeader) GetClusterId() uint64 {
if m != nil {
return m.ClusterId
}
return 0
}
func (m *ResponseHeader) GetError() *Error {
if m != nil {
return m.Error
}
return nil
}
type Error struct {
Type ErrorType `protobuf:"varint,1,opt,name=type,proto3,enum=pdpb.ErrorType" json:"type,omitempty"`
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Error) Reset() { *m = Error{} }
func (m *Error) String() string { return proto.CompactTextString(m) }
func (*Error) ProtoMessage() {}
func (*Error) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{2}
}
func (m *Error) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Error.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *Error) XXX_Merge(src proto.Message) {
xxx_messageInfo_Error.Merge(dst, src)
}
func (m *Error) XXX_Size() int {
return m.Size()
}
func (m *Error) XXX_DiscardUnknown() {
xxx_messageInfo_Error.DiscardUnknown(m)
}
var xxx_messageInfo_Error proto.InternalMessageInfo
func (m *Error) GetType() ErrorType {
if m != nil {
return m.Type
}
return ErrorType_OK
}
func (m *Error) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
type TsoRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
DcLocation string `protobuf:"bytes,3,opt,name=dc_location,json=dcLocation,proto3" json:"dc_location,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TsoRequest) Reset() { *m = TsoRequest{} }
func (m *TsoRequest) String() string { return proto.CompactTextString(m) }
func (*TsoRequest) ProtoMessage() {}
func (*TsoRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{3}
}
func (m *TsoRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TsoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_TsoRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *TsoRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_TsoRequest.Merge(dst, src)
}
func (m *TsoRequest) XXX_Size() int {
return m.Size()
}
func (m *TsoRequest) XXX_DiscardUnknown() {
xxx_messageInfo_TsoRequest.DiscardUnknown(m)
}
var xxx_messageInfo_TsoRequest proto.InternalMessageInfo
func (m *TsoRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *TsoRequest) GetCount() uint32 {
if m != nil {
return m.Count
}
return 0
}
func (m *TsoRequest) GetDcLocation() string {
if m != nil {
return m.DcLocation
}
return ""
}
type Timestamp struct {
Physical int64 `protobuf:"varint,1,opt,name=physical,proto3" json:"physical,omitempty"`
Logical int64 `protobuf:"varint,2,opt,name=logical,proto3" json:"logical,omitempty"`
// Number of suffix bits used for global distinction,
// PD client will use this to compute a TSO's logical part.
SuffixBits uint32 `protobuf:"varint,3,opt,name=suffix_bits,json=suffixBits,proto3" json:"suffix_bits,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{4}
}
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *Timestamp) XXX_Merge(src proto.Message) {
xxx_messageInfo_Timestamp.Merge(dst, src)
}
func (m *Timestamp) XXX_Size() int {
return m.Size()
}
func (m *Timestamp) XXX_DiscardUnknown() {
xxx_messageInfo_Timestamp.DiscardUnknown(m)
}
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
func (m *Timestamp) GetPhysical() int64 {
if m != nil {
return m.Physical
}
return 0
}
func (m *Timestamp) GetLogical() int64 {
if m != nil {
return m.Logical
}
return 0
}
func (m *Timestamp) GetSuffixBits() uint32 {
if m != nil {
return m.SuffixBits
}
return 0
}
type TsoResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
Timestamp *Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TsoResponse) Reset() { *m = TsoResponse{} }
func (m *TsoResponse) String() string { return proto.CompactTextString(m) }
func (*TsoResponse) ProtoMessage() {}
func (*TsoResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{5}
}
func (m *TsoResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TsoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_TsoResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *TsoResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_TsoResponse.Merge(dst, src)
}
func (m *TsoResponse) XXX_Size() int {
return m.Size()
}
func (m *TsoResponse) XXX_DiscardUnknown() {
xxx_messageInfo_TsoResponse.DiscardUnknown(m)
}
var xxx_messageInfo_TsoResponse proto.InternalMessageInfo
func (m *TsoResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *TsoResponse) GetCount() uint32 {
if m != nil {
return m.Count
}
return 0
}
func (m *TsoResponse) GetTimestamp() *Timestamp {
if m != nil {
return m.Timestamp
}
return nil
}
type BootstrapRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Store *metapb.Store `protobuf:"bytes,2,opt,name=store" json:"store,omitempty"`
Region *metapb.Region `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BootstrapRequest) Reset() { *m = BootstrapRequest{} }
func (m *BootstrapRequest) String() string { return proto.CompactTextString(m) }
func (*BootstrapRequest) ProtoMessage() {}
func (*BootstrapRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{6}
}
func (m *BootstrapRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BootstrapRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BootstrapRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *BootstrapRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_BootstrapRequest.Merge(dst, src)
}
func (m *BootstrapRequest) XXX_Size() int {
return m.Size()
}
func (m *BootstrapRequest) XXX_DiscardUnknown() {
xxx_messageInfo_BootstrapRequest.DiscardUnknown(m)
}
var xxx_messageInfo_BootstrapRequest proto.InternalMessageInfo
func (m *BootstrapRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *BootstrapRequest) GetStore() *metapb.Store {
if m != nil {
return m.Store
}
return nil
}
func (m *BootstrapRequest) GetRegion() *metapb.Region {
if m != nil {
return m.Region
}
return nil
}
type BootstrapResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
ReplicationStatus *replication_modepb.ReplicationStatus `protobuf:"bytes,2,opt,name=replication_status,json=replicationStatus" json:"replication_status,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BootstrapResponse) Reset() { *m = BootstrapResponse{} }
func (m *BootstrapResponse) String() string { return proto.CompactTextString(m) }
func (*BootstrapResponse) ProtoMessage() {}
func (*BootstrapResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{7}
}
func (m *BootstrapResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BootstrapResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BootstrapResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *BootstrapResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_BootstrapResponse.Merge(dst, src)
}
func (m *BootstrapResponse) XXX_Size() int {
return m.Size()
}
func (m *BootstrapResponse) XXX_DiscardUnknown() {
xxx_messageInfo_BootstrapResponse.DiscardUnknown(m)
}
var xxx_messageInfo_BootstrapResponse proto.InternalMessageInfo
func (m *BootstrapResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *BootstrapResponse) GetReplicationStatus() *replication_modepb.ReplicationStatus {
if m != nil {
return m.ReplicationStatus
}
return nil
}
type IsBootstrappedRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *IsBootstrappedRequest) Reset() { *m = IsBootstrappedRequest{} }
func (m *IsBootstrappedRequest) String() string { return proto.CompactTextString(m) }
func (*IsBootstrappedRequest) ProtoMessage() {}
func (*IsBootstrappedRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{8}
}
func (m *IsBootstrappedRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *IsBootstrappedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_IsBootstrappedRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *IsBootstrappedRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_IsBootstrappedRequest.Merge(dst, src)
}
func (m *IsBootstrappedRequest) XXX_Size() int {
return m.Size()
}
func (m *IsBootstrappedRequest) XXX_DiscardUnknown() {
xxx_messageInfo_IsBootstrappedRequest.DiscardUnknown(m)
}
var xxx_messageInfo_IsBootstrappedRequest proto.InternalMessageInfo
func (m *IsBootstrappedRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
type IsBootstrappedResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Bootstrapped bool `protobuf:"varint,2,opt,name=bootstrapped,proto3" json:"bootstrapped,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *IsBootstrappedResponse) Reset() { *m = IsBootstrappedResponse{} }
func (m *IsBootstrappedResponse) String() string { return proto.CompactTextString(m) }
func (*IsBootstrappedResponse) ProtoMessage() {}
func (*IsBootstrappedResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{9}
}
func (m *IsBootstrappedResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *IsBootstrappedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_IsBootstrappedResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *IsBootstrappedResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_IsBootstrappedResponse.Merge(dst, src)
}
func (m *IsBootstrappedResponse) XXX_Size() int {
return m.Size()
}
func (m *IsBootstrappedResponse) XXX_DiscardUnknown() {
xxx_messageInfo_IsBootstrappedResponse.DiscardUnknown(m)
}
var xxx_messageInfo_IsBootstrappedResponse proto.InternalMessageInfo
func (m *IsBootstrappedResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *IsBootstrappedResponse) GetBootstrapped() bool {
if m != nil {
return m.Bootstrapped
}
return false
}
type AllocIDRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AllocIDRequest) Reset() { *m = AllocIDRequest{} }
func (m *AllocIDRequest) String() string { return proto.CompactTextString(m) }
func (*AllocIDRequest) ProtoMessage() {}
func (*AllocIDRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{10}
}
func (m *AllocIDRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *AllocIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_AllocIDRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *AllocIDRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_AllocIDRequest.Merge(dst, src)
}
func (m *AllocIDRequest) XXX_Size() int {
return m.Size()
}
func (m *AllocIDRequest) XXX_DiscardUnknown() {
xxx_messageInfo_AllocIDRequest.DiscardUnknown(m)
}
var xxx_messageInfo_AllocIDRequest proto.InternalMessageInfo
func (m *AllocIDRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
type AllocIDResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AllocIDResponse) Reset() { *m = AllocIDResponse{} }
func (m *AllocIDResponse) String() string { return proto.CompactTextString(m) }
func (*AllocIDResponse) ProtoMessage() {}
func (*AllocIDResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{11}
}
func (m *AllocIDResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *AllocIDResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_AllocIDResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *AllocIDResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_AllocIDResponse.Merge(dst, src)
}
func (m *AllocIDResponse) XXX_Size() int {
return m.Size()
}
func (m *AllocIDResponse) XXX_DiscardUnknown() {
xxx_messageInfo_AllocIDResponse.DiscardUnknown(m)
}
var xxx_messageInfo_AllocIDResponse proto.InternalMessageInfo
func (m *AllocIDResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *AllocIDResponse) GetId() uint64 {
if m != nil {
return m.Id
}
return 0
}
type GetStoreRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
StoreId uint64 `protobuf:"varint,2,opt,name=store_id,json=storeId,proto3" json:"store_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetStoreRequest) Reset() { *m = GetStoreRequest{} }
func (m *GetStoreRequest) String() string { return proto.CompactTextString(m) }
func (*GetStoreRequest) ProtoMessage() {}
func (*GetStoreRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{12}
}
func (m *GetStoreRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetStoreRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetStoreRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetStoreRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetStoreRequest.Merge(dst, src)
}
func (m *GetStoreRequest) XXX_Size() int {
return m.Size()
}
func (m *GetStoreRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetStoreRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetStoreRequest proto.InternalMessageInfo
func (m *GetStoreRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *GetStoreRequest) GetStoreId() uint64 {
if m != nil {
return m.StoreId
}
return 0
}
type GetStoreResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Store *metapb.Store `protobuf:"bytes,2,opt,name=store" json:"store,omitempty"`
Stats *StoreStats `protobuf:"bytes,3,opt,name=stats" json:"stats,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetStoreResponse) Reset() { *m = GetStoreResponse{} }
func (m *GetStoreResponse) String() string { return proto.CompactTextString(m) }
func (*GetStoreResponse) ProtoMessage() {}
func (*GetStoreResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{13}
}
func (m *GetStoreResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetStoreResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetStoreResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetStoreResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetStoreResponse.Merge(dst, src)
}
func (m *GetStoreResponse) XXX_Size() int {
return m.Size()
}
func (m *GetStoreResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetStoreResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetStoreResponse proto.InternalMessageInfo
func (m *GetStoreResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *GetStoreResponse) GetStore() *metapb.Store {
if m != nil {
return m.Store
}
return nil
}
func (m *GetStoreResponse) GetStats() *StoreStats {
if m != nil {
return m.Stats
}
return nil
}
type PutStoreRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Store *metapb.Store `protobuf:"bytes,2,opt,name=store" json:"store,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PutStoreRequest) Reset() { *m = PutStoreRequest{} }
func (m *PutStoreRequest) String() string { return proto.CompactTextString(m) }
func (*PutStoreRequest) ProtoMessage() {}
func (*PutStoreRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{14}
}
func (m *PutStoreRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PutStoreRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PutStoreRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *PutStoreRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_PutStoreRequest.Merge(dst, src)
}
func (m *PutStoreRequest) XXX_Size() int {
return m.Size()
}
func (m *PutStoreRequest) XXX_DiscardUnknown() {
xxx_messageInfo_PutStoreRequest.DiscardUnknown(m)
}
var xxx_messageInfo_PutStoreRequest proto.InternalMessageInfo
func (m *PutStoreRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *PutStoreRequest) GetStore() *metapb.Store {
if m != nil {
return m.Store
}
return nil
}
type PutStoreResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
ReplicationStatus *replication_modepb.ReplicationStatus `protobuf:"bytes,2,opt,name=replication_status,json=replicationStatus" json:"replication_status,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PutStoreResponse) Reset() { *m = PutStoreResponse{} }
func (m *PutStoreResponse) String() string { return proto.CompactTextString(m) }
func (*PutStoreResponse) ProtoMessage() {}
func (*PutStoreResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{15}
}
func (m *PutStoreResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PutStoreResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PutStoreResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *PutStoreResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_PutStoreResponse.Merge(dst, src)
}
func (m *PutStoreResponse) XXX_Size() int {
return m.Size()
}
func (m *PutStoreResponse) XXX_DiscardUnknown() {
xxx_messageInfo_PutStoreResponse.DiscardUnknown(m)
}
var xxx_messageInfo_PutStoreResponse proto.InternalMessageInfo
func (m *PutStoreResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *PutStoreResponse) GetReplicationStatus() *replication_modepb.ReplicationStatus {
if m != nil {
return m.ReplicationStatus
}
return nil
}
type GetAllStoresRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
// Do NOT return tombstone stores if set to true.
ExcludeTombstoneStores bool `protobuf:"varint,2,opt,name=exclude_tombstone_stores,json=excludeTombstoneStores,proto3" json:"exclude_tombstone_stores,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetAllStoresRequest) Reset() { *m = GetAllStoresRequest{} }
func (m *GetAllStoresRequest) String() string { return proto.CompactTextString(m) }
func (*GetAllStoresRequest) ProtoMessage() {}
func (*GetAllStoresRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{16}
}
func (m *GetAllStoresRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetAllStoresRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetAllStoresRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetAllStoresRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetAllStoresRequest.Merge(dst, src)
}
func (m *GetAllStoresRequest) XXX_Size() int {
return m.Size()
}
func (m *GetAllStoresRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetAllStoresRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetAllStoresRequest proto.InternalMessageInfo
func (m *GetAllStoresRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *GetAllStoresRequest) GetExcludeTombstoneStores() bool {
if m != nil {
return m.ExcludeTombstoneStores
}
return false
}
type GetAllStoresResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Stores []*metapb.Store `protobuf:"bytes,2,rep,name=stores" json:"stores,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetAllStoresResponse) Reset() { *m = GetAllStoresResponse{} }
func (m *GetAllStoresResponse) String() string { return proto.CompactTextString(m) }
func (*GetAllStoresResponse) ProtoMessage() {}
func (*GetAllStoresResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{17}
}
func (m *GetAllStoresResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetAllStoresResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetAllStoresResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetAllStoresResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetAllStoresResponse.Merge(dst, src)
}
func (m *GetAllStoresResponse) XXX_Size() int {
return m.Size()
}
func (m *GetAllStoresResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetAllStoresResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetAllStoresResponse proto.InternalMessageInfo
func (m *GetAllStoresResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *GetAllStoresResponse) GetStores() []*metapb.Store {
if m != nil {
return m.Stores
}
return nil
}
type GetRegionRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
RegionKey []byte `protobuf:"bytes,2,opt,name=region_key,json=regionKey,proto3" json:"region_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetRegionRequest) Reset() { *m = GetRegionRequest{} }
func (m *GetRegionRequest) String() string { return proto.CompactTextString(m) }
func (*GetRegionRequest) ProtoMessage() {}
func (*GetRegionRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{18}
}
func (m *GetRegionRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetRegionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetRegionRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetRegionRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetRegionRequest.Merge(dst, src)
}
func (m *GetRegionRequest) XXX_Size() int {
return m.Size()
}
func (m *GetRegionRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetRegionRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetRegionRequest proto.InternalMessageInfo
func (m *GetRegionRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *GetRegionRequest) GetRegionKey() []byte {
if m != nil {
return m.RegionKey
}
return nil
}
type GetRegionResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Region *metapb.Region `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"`
Leader *metapb.Peer `protobuf:"bytes,3,opt,name=leader" json:"leader,omitempty"`
// Leader considers that these peers are down.
DownPeers []*PeerStats `protobuf:"bytes,5,rep,name=down_peers,json=downPeers" json:"down_peers,omitempty"`
// Pending peers are the peers that the leader can't consider as
// working followers.
PendingPeers []*metapb.Peer `protobuf:"bytes,6,rep,name=pending_peers,json=pendingPeers" json:"pending_peers,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetRegionResponse) Reset() { *m = GetRegionResponse{} }
func (m *GetRegionResponse) String() string { return proto.CompactTextString(m) }
func (*GetRegionResponse) ProtoMessage() {}
func (*GetRegionResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{19}
}
func (m *GetRegionResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetRegionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetRegionResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetRegionResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetRegionResponse.Merge(dst, src)
}
func (m *GetRegionResponse) XXX_Size() int {
return m.Size()
}
func (m *GetRegionResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetRegionResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetRegionResponse proto.InternalMessageInfo
func (m *GetRegionResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *GetRegionResponse) GetRegion() *metapb.Region {
if m != nil {
return m.Region
}
return nil
}
func (m *GetRegionResponse) GetLeader() *metapb.Peer {
if m != nil {
return m.Leader
}
return nil
}
func (m *GetRegionResponse) GetDownPeers() []*PeerStats {
if m != nil {
return m.DownPeers
}
return nil
}
func (m *GetRegionResponse) GetPendingPeers() []*metapb.Peer {
if m != nil {
return m.PendingPeers
}
return nil
}
type GetRegionByIDRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
RegionId uint64 `protobuf:"varint,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetRegionByIDRequest) Reset() { *m = GetRegionByIDRequest{} }
func (m *GetRegionByIDRequest) String() string { return proto.CompactTextString(m) }
func (*GetRegionByIDRequest) ProtoMessage() {}
func (*GetRegionByIDRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{20}
}
func (m *GetRegionByIDRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetRegionByIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetRegionByIDRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetRegionByIDRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetRegionByIDRequest.Merge(dst, src)
}
func (m *GetRegionByIDRequest) XXX_Size() int {
return m.Size()
}
func (m *GetRegionByIDRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetRegionByIDRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetRegionByIDRequest proto.InternalMessageInfo
func (m *GetRegionByIDRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *GetRegionByIDRequest) GetRegionId() uint64 {
if m != nil {
return m.RegionId
}
return 0
}
type ScanRegionsRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"`
Limit int32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
EndKey []byte `protobuf:"bytes,4,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScanRegionsRequest) Reset() { *m = ScanRegionsRequest{} }
func (m *ScanRegionsRequest) String() string { return proto.CompactTextString(m) }
func (*ScanRegionsRequest) ProtoMessage() {}
func (*ScanRegionsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{21}
}
func (m *ScanRegionsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScanRegionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScanRegionsRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ScanRegionsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScanRegionsRequest.Merge(dst, src)
}
func (m *ScanRegionsRequest) XXX_Size() int {
return m.Size()
}
func (m *ScanRegionsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ScanRegionsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ScanRegionsRequest proto.InternalMessageInfo
func (m *ScanRegionsRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *ScanRegionsRequest) GetStartKey() []byte {
if m != nil {
return m.StartKey
}
return nil
}
func (m *ScanRegionsRequest) GetLimit() int32 {
if m != nil {
return m.Limit
}
return 0
}
func (m *ScanRegionsRequest) GetEndKey() []byte {
if m != nil {
return m.EndKey
}
return nil
}
type Region struct {
Region *metapb.Region `protobuf:"bytes,1,opt,name=region" json:"region,omitempty"`
Leader *metapb.Peer `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"`
// Leader considers that these peers are down.
DownPeers []*PeerStats `protobuf:"bytes,3,rep,name=down_peers,json=downPeers" json:"down_peers,omitempty"`
// Pending peers are the peers that the leader can't consider as
// working followers.
PendingPeers []*metapb.Peer `protobuf:"bytes,4,rep,name=pending_peers,json=pendingPeers" json:"pending_peers,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Region) Reset() { *m = Region{} }
func (m *Region) String() string { return proto.CompactTextString(m) }
func (*Region) ProtoMessage() {}
func (*Region) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{22}
}
func (m *Region) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Region) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Region.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *Region) XXX_Merge(src proto.Message) {
xxx_messageInfo_Region.Merge(dst, src)
}
func (m *Region) XXX_Size() int {
return m.Size()
}
func (m *Region) XXX_DiscardUnknown() {
xxx_messageInfo_Region.DiscardUnknown(m)
}
var xxx_messageInfo_Region proto.InternalMessageInfo
func (m *Region) GetRegion() *metapb.Region {
if m != nil {
return m.Region
}
return nil
}
func (m *Region) GetLeader() *metapb.Peer {
if m != nil {
return m.Leader
}
return nil
}
func (m *Region) GetDownPeers() []*PeerStats {
if m != nil {
return m.DownPeers
}
return nil
}
func (m *Region) GetPendingPeers() []*metapb.Peer {
if m != nil {
return m.PendingPeers
}
return nil
}
type ScanRegionsResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
// Keep for backword compatibability.
RegionMetas []*metapb.Region `protobuf:"bytes,2,rep,name=region_metas,json=regionMetas" json:"region_metas,omitempty"`
Leaders []*metapb.Peer `protobuf:"bytes,3,rep,name=leaders" json:"leaders,omitempty"`
// Extended region info with down/pending peers.
Regions []*Region `protobuf:"bytes,4,rep,name=regions" json:"regions,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScanRegionsResponse) Reset() { *m = ScanRegionsResponse{} }
func (m *ScanRegionsResponse) String() string { return proto.CompactTextString(m) }
func (*ScanRegionsResponse) ProtoMessage() {}
func (*ScanRegionsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{23}
}
func (m *ScanRegionsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScanRegionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScanRegionsResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ScanRegionsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScanRegionsResponse.Merge(dst, src)
}
func (m *ScanRegionsResponse) XXX_Size() int {
return m.Size()
}
func (m *ScanRegionsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ScanRegionsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ScanRegionsResponse proto.InternalMessageInfo
func (m *ScanRegionsResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *ScanRegionsResponse) GetRegionMetas() []*metapb.Region {
if m != nil {
return m.RegionMetas
}
return nil
}
func (m *ScanRegionsResponse) GetLeaders() []*metapb.Peer {
if m != nil {
return m.Leaders
}
return nil
}
func (m *ScanRegionsResponse) GetRegions() []*Region {
if m != nil {
return m.Regions
}
return nil
}
type GetClusterConfigRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetClusterConfigRequest) Reset() { *m = GetClusterConfigRequest{} }
func (m *GetClusterConfigRequest) String() string { return proto.CompactTextString(m) }
func (*GetClusterConfigRequest) ProtoMessage() {}
func (*GetClusterConfigRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{24}
}
func (m *GetClusterConfigRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetClusterConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetClusterConfigRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetClusterConfigRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetClusterConfigRequest.Merge(dst, src)
}
func (m *GetClusterConfigRequest) XXX_Size() int {
return m.Size()
}
func (m *GetClusterConfigRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetClusterConfigRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetClusterConfigRequest proto.InternalMessageInfo
func (m *GetClusterConfigRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
type GetClusterConfigResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Cluster *metapb.Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetClusterConfigResponse) Reset() { *m = GetClusterConfigResponse{} }
func (m *GetClusterConfigResponse) String() string { return proto.CompactTextString(m) }
func (*GetClusterConfigResponse) ProtoMessage() {}
func (*GetClusterConfigResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{25}
}
func (m *GetClusterConfigResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetClusterConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetClusterConfigResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetClusterConfigResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetClusterConfigResponse.Merge(dst, src)
}
func (m *GetClusterConfigResponse) XXX_Size() int {
return m.Size()
}
func (m *GetClusterConfigResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetClusterConfigResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetClusterConfigResponse proto.InternalMessageInfo
func (m *GetClusterConfigResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *GetClusterConfigResponse) GetCluster() *metapb.Cluster {
if m != nil {
return m.Cluster
}
return nil
}
type PutClusterConfigRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Cluster *metapb.Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PutClusterConfigRequest) Reset() { *m = PutClusterConfigRequest{} }
func (m *PutClusterConfigRequest) String() string { return proto.CompactTextString(m) }
func (*PutClusterConfigRequest) ProtoMessage() {}
func (*PutClusterConfigRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{26}
}
func (m *PutClusterConfigRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PutClusterConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PutClusterConfigRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *PutClusterConfigRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_PutClusterConfigRequest.Merge(dst, src)
}
func (m *PutClusterConfigRequest) XXX_Size() int {
return m.Size()
}
func (m *PutClusterConfigRequest) XXX_DiscardUnknown() {
xxx_messageInfo_PutClusterConfigRequest.DiscardUnknown(m)
}
var xxx_messageInfo_PutClusterConfigRequest proto.InternalMessageInfo
func (m *PutClusterConfigRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *PutClusterConfigRequest) GetCluster() *metapb.Cluster {
if m != nil {
return m.Cluster
}
return nil
}
type PutClusterConfigResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PutClusterConfigResponse) Reset() { *m = PutClusterConfigResponse{} }
func (m *PutClusterConfigResponse) String() string { return proto.CompactTextString(m) }
func (*PutClusterConfigResponse) ProtoMessage() {}
func (*PutClusterConfigResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{27}
}
func (m *PutClusterConfigResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PutClusterConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PutClusterConfigResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *PutClusterConfigResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_PutClusterConfigResponse.Merge(dst, src)
}
func (m *PutClusterConfigResponse) XXX_Size() int {
return m.Size()
}
func (m *PutClusterConfigResponse) XXX_DiscardUnknown() {
xxx_messageInfo_PutClusterConfigResponse.DiscardUnknown(m)
}
var xxx_messageInfo_PutClusterConfigResponse proto.InternalMessageInfo
func (m *PutClusterConfigResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
type Member struct {
// name is the name of the PD member.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// member_id is the unique id of the PD member.
MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"`
PeerUrls []string `protobuf:"bytes,3,rep,name=peer_urls,json=peerUrls" json:"peer_urls,omitempty"`
ClientUrls []string `protobuf:"bytes,4,rep,name=client_urls,json=clientUrls" json:"client_urls,omitempty"`
LeaderPriority int32 `protobuf:"varint,5,opt,name=leader_priority,json=leaderPriority,proto3" json:"leader_priority,omitempty"`
DeployPath string `protobuf:"bytes,6,opt,name=deploy_path,json=deployPath,proto3" json:"deploy_path,omitempty"`
BinaryVersion string `protobuf:"bytes,7,opt,name=binary_version,json=binaryVersion,proto3" json:"binary_version,omitempty"`
GitHash string `protobuf:"bytes,8,opt,name=git_hash,json=gitHash,proto3" json:"git_hash,omitempty"`
DcLocation string `protobuf:"bytes,9,opt,name=dc_location,json=dcLocation,proto3" json:"dc_location,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Member) Reset() { *m = Member{} }
func (m *Member) String() string { return proto.CompactTextString(m) }
func (*Member) ProtoMessage() {}
func (*Member) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{28}
}
func (m *Member) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Member.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *Member) XXX_Merge(src proto.Message) {
xxx_messageInfo_Member.Merge(dst, src)
}
func (m *Member) XXX_Size() int {
return m.Size()
}
func (m *Member) XXX_DiscardUnknown() {
xxx_messageInfo_Member.DiscardUnknown(m)
}
var xxx_messageInfo_Member proto.InternalMessageInfo
func (m *Member) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Member) GetMemberId() uint64 {
if m != nil {
return m.MemberId
}
return 0
}
func (m *Member) GetPeerUrls() []string {
if m != nil {
return m.PeerUrls
}
return nil
}
func (m *Member) GetClientUrls() []string {
if m != nil {
return m.ClientUrls
}
return nil
}
func (m *Member) GetLeaderPriority() int32 {
if m != nil {
return m.LeaderPriority
}
return 0
}
func (m *Member) GetDeployPath() string {
if m != nil {
return m.DeployPath
}
return ""
}
func (m *Member) GetBinaryVersion() string {
if m != nil {
return m.BinaryVersion
}
return ""
}
func (m *Member) GetGitHash() string {
if m != nil {
return m.GitHash
}
return ""
}
func (m *Member) GetDcLocation() string {
if m != nil {
return m.DcLocation
}
return ""
}
type GetMembersRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetMembersRequest) Reset() { *m = GetMembersRequest{} }
func (m *GetMembersRequest) String() string { return proto.CompactTextString(m) }
func (*GetMembersRequest) ProtoMessage() {}
func (*GetMembersRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{29}
}
func (m *GetMembersRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetMembersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetMembersRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetMembersRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetMembersRequest.Merge(dst, src)
}
func (m *GetMembersRequest) XXX_Size() int {
return m.Size()
}
func (m *GetMembersRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetMembersRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetMembersRequest proto.InternalMessageInfo
func (m *GetMembersRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
type GetMembersResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"`
Leader *Member `protobuf:"bytes,3,opt,name=leader" json:"leader,omitempty"`
EtcdLeader *Member `protobuf:"bytes,4,opt,name=etcd_leader,json=etcdLeader" json:"etcd_leader,omitempty"`
TsoAllocatorLeaders map[string]*Member `protobuf:"bytes,5,rep,name=tso_allocator_leaders,json=tsoAllocatorLeaders" json:"tso_allocator_leaders,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetMembersResponse) Reset() { *m = GetMembersResponse{} }
func (m *GetMembersResponse) String() string { return proto.CompactTextString(m) }
func (*GetMembersResponse) ProtoMessage() {}
func (*GetMembersResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{30}
}
func (m *GetMembersResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetMembersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetMembersResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetMembersResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetMembersResponse.Merge(dst, src)
}
func (m *GetMembersResponse) XXX_Size() int {
return m.Size()
}
func (m *GetMembersResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetMembersResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetMembersResponse proto.InternalMessageInfo
func (m *GetMembersResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *GetMembersResponse) GetMembers() []*Member {
if m != nil {
return m.Members
}
return nil
}
func (m *GetMembersResponse) GetLeader() *Member {
if m != nil {
return m.Leader
}
return nil
}
func (m *GetMembersResponse) GetEtcdLeader() *Member {
if m != nil {
return m.EtcdLeader
}
return nil
}
func (m *GetMembersResponse) GetTsoAllocatorLeaders() map[string]*Member {
if m != nil {
return m.TsoAllocatorLeaders
}
return nil
}
type PeerStats struct {
Peer *metapb.Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"`
DownSeconds uint64 `protobuf:"varint,2,opt,name=down_seconds,json=downSeconds,proto3" json:"down_seconds,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PeerStats) Reset() { *m = PeerStats{} }
func (m *PeerStats) String() string { return proto.CompactTextString(m) }
func (*PeerStats) ProtoMessage() {}
func (*PeerStats) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{31}
}
func (m *PeerStats) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PeerStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PeerStats.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *PeerStats) XXX_Merge(src proto.Message) {
xxx_messageInfo_PeerStats.Merge(dst, src)
}
func (m *PeerStats) XXX_Size() int {
return m.Size()
}
func (m *PeerStats) XXX_DiscardUnknown() {
xxx_messageInfo_PeerStats.DiscardUnknown(m)
}
var xxx_messageInfo_PeerStats proto.InternalMessageInfo
func (m *PeerStats) GetPeer() *metapb.Peer {
if m != nil {
return m.Peer
}
return nil
}
func (m *PeerStats) GetDownSeconds() uint64 {
if m != nil {
return m.DownSeconds
}
return 0
}
type RegionHeartbeatRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Region *metapb.Region `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"`
// Leader Peer sending the heartbeat.
Leader *metapb.Peer `protobuf:"bytes,3,opt,name=leader" json:"leader,omitempty"`
// Leader considers that these peers are down.
DownPeers []*PeerStats `protobuf:"bytes,4,rep,name=down_peers,json=downPeers" json:"down_peers,omitempty"`
// Pending peers are the peers that the leader can't consider as
// working followers.
PendingPeers []*metapb.Peer `protobuf:"bytes,5,rep,name=pending_peers,json=pendingPeers" json:"pending_peers,omitempty"`
// Bytes read/written during this period.
BytesWritten uint64 `protobuf:"varint,6,opt,name=bytes_written,json=bytesWritten,proto3" json:"bytes_written,omitempty"`
BytesRead uint64 `protobuf:"varint,7,opt,name=bytes_read,json=bytesRead,proto3" json:"bytes_read,omitempty"`
// Keys read/written during this period.
KeysWritten uint64 `protobuf:"varint,8,opt,name=keys_written,json=keysWritten,proto3" json:"keys_written,omitempty"`
KeysRead uint64 `protobuf:"varint,9,opt,name=keys_read,json=keysRead,proto3" json:"keys_read,omitempty"`
// Approximate region size.
ApproximateSize uint64 `protobuf:"varint,10,opt,name=approximate_size,json=approximateSize,proto3" json:"approximate_size,omitempty"`
// Actually reported time interval
Interval *TimeInterval `protobuf:"bytes,12,opt,name=interval" json:"interval,omitempty"`
// Approximate number of keys.
ApproximateKeys uint64 `protobuf:"varint,13,opt,name=approximate_keys,json=approximateKeys,proto3" json:"approximate_keys,omitempty"`
// Term is the term of raft group.
Term uint64 `protobuf:"varint,14,opt,name=term,proto3" json:"term,omitempty"`
ReplicationStatus *replication_modepb.RegionReplicationStatus `protobuf:"bytes,15,opt,name=replication_status,json=replicationStatus" json:"replication_status,omitempty"`
// QueryStats reported write query stats, and there are read query stats in store heartbeat
QueryStats *QueryStats `protobuf:"bytes,16,opt,name=query_stats,json=queryStats" json:"query_stats,omitempty"`
// cpu_usage is the CPU time usage of the leader region since the last heartbeat,
// which is calculated by cpu_time_delta/heartbeat_reported_interval.
CpuUsage uint64 `protobuf:"varint,17,opt,name=cpu_usage,json=cpuUsage,proto3" json:"cpu_usage,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RegionHeartbeatRequest) Reset() { *m = RegionHeartbeatRequest{} }
func (m *RegionHeartbeatRequest) String() string { return proto.CompactTextString(m) }
func (*RegionHeartbeatRequest) ProtoMessage() {}
func (*RegionHeartbeatRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{32}
}
func (m *RegionHeartbeatRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RegionHeartbeatRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RegionHeartbeatRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RegionHeartbeatRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RegionHeartbeatRequest.Merge(dst, src)
}
func (m *RegionHeartbeatRequest) XXX_Size() int {
return m.Size()
}
func (m *RegionHeartbeatRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RegionHeartbeatRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RegionHeartbeatRequest proto.InternalMessageInfo
func (m *RegionHeartbeatRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *RegionHeartbeatRequest) GetRegion() *metapb.Region {
if m != nil {
return m.Region
}
return nil
}
func (m *RegionHeartbeatRequest) GetLeader() *metapb.Peer {
if m != nil {
return m.Leader
}
return nil
}
func (m *RegionHeartbeatRequest) GetDownPeers() []*PeerStats {
if m != nil {
return m.DownPeers
}
return nil
}
func (m *RegionHeartbeatRequest) GetPendingPeers() []*metapb.Peer {
if m != nil {
return m.PendingPeers
}
return nil
}
func (m *RegionHeartbeatRequest) GetBytesWritten() uint64 {
if m != nil {
return m.BytesWritten
}
return 0
}
func (m *RegionHeartbeatRequest) GetBytesRead() uint64 {
if m != nil {
return m.BytesRead
}
return 0
}
func (m *RegionHeartbeatRequest) GetKeysWritten() uint64 {
if m != nil {
return m.KeysWritten
}
return 0
}
func (m *RegionHeartbeatRequest) GetKeysRead() uint64 {
if m != nil {
return m.KeysRead
}
return 0
}
func (m *RegionHeartbeatRequest) GetApproximateSize() uint64 {
if m != nil {
return m.ApproximateSize
}
return 0
}
func (m *RegionHeartbeatRequest) GetInterval() *TimeInterval {
if m != nil {
return m.Interval
}
return nil
}
func (m *RegionHeartbeatRequest) GetApproximateKeys() uint64 {
if m != nil {
return m.ApproximateKeys
}
return 0
}
func (m *RegionHeartbeatRequest) GetTerm() uint64 {
if m != nil {
return m.Term
}
return 0
}
func (m *RegionHeartbeatRequest) GetReplicationStatus() *replication_modepb.RegionReplicationStatus {
if m != nil {
return m.ReplicationStatus
}
return nil
}
func (m *RegionHeartbeatRequest) GetQueryStats() *QueryStats {
if m != nil {
return m.QueryStats
}
return nil
}
func (m *RegionHeartbeatRequest) GetCpuUsage() uint64 {
if m != nil {
return m.CpuUsage
}
return 0
}
type ChangePeer struct {
Peer *metapb.Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"`
ChangeType eraftpb.ConfChangeType `protobuf:"varint,2,opt,name=change_type,json=changeType,proto3,enum=eraftpb.ConfChangeType" json:"change_type,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ChangePeer) Reset() { *m = ChangePeer{} }
func (m *ChangePeer) String() string { return proto.CompactTextString(m) }
func (*ChangePeer) ProtoMessage() {}
func (*ChangePeer) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{33}
}
func (m *ChangePeer) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ChangePeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ChangePeer.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ChangePeer) XXX_Merge(src proto.Message) {
xxx_messageInfo_ChangePeer.Merge(dst, src)
}
func (m *ChangePeer) XXX_Size() int {
return m.Size()
}
func (m *ChangePeer) XXX_DiscardUnknown() {
xxx_messageInfo_ChangePeer.DiscardUnknown(m)
}
var xxx_messageInfo_ChangePeer proto.InternalMessageInfo
func (m *ChangePeer) GetPeer() *metapb.Peer {
if m != nil {
return m.Peer
}
return nil
}
func (m *ChangePeer) GetChangeType() eraftpb.ConfChangeType {
if m != nil {
return m.ChangeType
}
return eraftpb.ConfChangeType_AddNode
}
type ChangePeerV2 struct {
// If changes is empty, it means that to exit joint state.
Changes []*ChangePeer `protobuf:"bytes,1,rep,name=changes" json:"changes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ChangePeerV2) Reset() { *m = ChangePeerV2{} }
func (m *ChangePeerV2) String() string { return proto.CompactTextString(m) }
func (*ChangePeerV2) ProtoMessage() {}
func (*ChangePeerV2) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{34}
}
func (m *ChangePeerV2) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ChangePeerV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ChangePeerV2.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ChangePeerV2) XXX_Merge(src proto.Message) {
xxx_messageInfo_ChangePeerV2.Merge(dst, src)
}
func (m *ChangePeerV2) XXX_Size() int {
return m.Size()
}
func (m *ChangePeerV2) XXX_DiscardUnknown() {
xxx_messageInfo_ChangePeerV2.DiscardUnknown(m)
}
var xxx_messageInfo_ChangePeerV2 proto.InternalMessageInfo
func (m *ChangePeerV2) GetChanges() []*ChangePeer {
if m != nil {
return m.Changes
}
return nil
}
type TransferLeader struct {
Peer *metapb.Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TransferLeader) Reset() { *m = TransferLeader{} }
func (m *TransferLeader) String() string { return proto.CompactTextString(m) }
func (*TransferLeader) ProtoMessage() {}
func (*TransferLeader) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{35}
}
func (m *TransferLeader) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TransferLeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_TransferLeader.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *TransferLeader) XXX_Merge(src proto.Message) {
xxx_messageInfo_TransferLeader.Merge(dst, src)
}
func (m *TransferLeader) XXX_Size() int {
return m.Size()
}
func (m *TransferLeader) XXX_DiscardUnknown() {
xxx_messageInfo_TransferLeader.DiscardUnknown(m)
}
var xxx_messageInfo_TransferLeader proto.InternalMessageInfo
func (m *TransferLeader) GetPeer() *metapb.Peer {
if m != nil {
return m.Peer
}
return nil
}
type Merge struct {
Target *metapb.Region `protobuf:"bytes,1,opt,name=target" json:"target,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Merge) Reset() { *m = Merge{} }
func (m *Merge) String() string { return proto.CompactTextString(m) }
func (*Merge) ProtoMessage() {}
func (*Merge) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{36}
}
func (m *Merge) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Merge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Merge.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *Merge) XXX_Merge(src proto.Message) {
xxx_messageInfo_Merge.Merge(dst, src)
}
func (m *Merge) XXX_Size() int {
return m.Size()
}
func (m *Merge) XXX_DiscardUnknown() {
xxx_messageInfo_Merge.DiscardUnknown(m)
}
var xxx_messageInfo_Merge proto.InternalMessageInfo
func (m *Merge) GetTarget() *metapb.Region {
if m != nil {
return m.Target
}
return nil
}
type SplitRegion struct {
Policy CheckPolicy `protobuf:"varint,1,opt,name=policy,proto3,enum=pdpb.CheckPolicy" json:"policy,omitempty"`
Keys [][]byte `protobuf:"bytes,2,rep,name=keys" json:"keys,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SplitRegion) Reset() { *m = SplitRegion{} }
func (m *SplitRegion) String() string { return proto.CompactTextString(m) }
func (*SplitRegion) ProtoMessage() {}
func (*SplitRegion) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{37}
}
func (m *SplitRegion) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SplitRegion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SplitRegion.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SplitRegion) XXX_Merge(src proto.Message) {
xxx_messageInfo_SplitRegion.Merge(dst, src)
}
func (m *SplitRegion) XXX_Size() int {
return m.Size()
}
func (m *SplitRegion) XXX_DiscardUnknown() {
xxx_messageInfo_SplitRegion.DiscardUnknown(m)
}
var xxx_messageInfo_SplitRegion proto.InternalMessageInfo
func (m *SplitRegion) GetPolicy() CheckPolicy {
if m != nil {
return m.Policy
}
return CheckPolicy_SCAN
}
func (m *SplitRegion) GetKeys() [][]byte {
if m != nil {
return m.Keys
}
return nil
}
type RegionHeartbeatResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
// Notice, Pd only allows handling reported epoch >= current pd's.
// Leader peer reports region status with RegionHeartbeatRequest
// to pd regularly, pd will determine whether this region
// should do ChangePeer or not.
// E,g, max peer number is 3, region A, first only peer 1 in A.
// 1. Pd region state -> Peers (1), ConfVer (1).
// 2. Leader peer 1 reports region state to pd, pd finds the
// peer number is < 3, so first changes its current region
// state -> Peers (1, 2), ConfVer (1), and returns ChangePeer Adding 2.
// 3. Leader does ChangePeer, then reports Peers (1, 2), ConfVer (2),
// pd updates its state -> Peers (1, 2), ConfVer (2).
// 4. Leader may report old Peers (1), ConfVer (1) to pd before ConfChange
// finished, pd stills responses ChangePeer Adding 2, of course, we must
// guarantee the second ChangePeer can't be applied in TiKV.
ChangePeer *ChangePeer `protobuf:"bytes,2,opt,name=change_peer,json=changePeer" json:"change_peer,omitempty"`
// Pd can return transfer_leader to let TiKV does leader transfer itself.
TransferLeader *TransferLeader `protobuf:"bytes,3,opt,name=transfer_leader,json=transferLeader" json:"transfer_leader,omitempty"`
// ID of the region
RegionId uint64 `protobuf:"varint,4,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"`
RegionEpoch *metapb.RegionEpoch `protobuf:"bytes,5,opt,name=region_epoch,json=regionEpoch" json:"region_epoch,omitempty"`
// Leader of the region at the moment of the corresponding request was made.
TargetPeer *metapb.Peer `protobuf:"bytes,6,opt,name=target_peer,json=targetPeer" json:"target_peer,omitempty"`
Merge *Merge `protobuf:"bytes,7,opt,name=merge" json:"merge,omitempty"`
// PD sends split_region to let TiKV split a region into two regions.
SplitRegion *SplitRegion `protobuf:"bytes,8,opt,name=split_region,json=splitRegion" json:"split_region,omitempty"`
// Multiple change peer operations atomically.
// Note: PD can use both ChangePeer and ChangePeerV2 at the same time
// (not in the same RegionHeartbeatResponse).
// Now, PD use ChangePeerV2 only for replacing peers.
ChangePeerV2 *ChangePeerV2 `protobuf:"bytes,9,opt,name=change_peer_v2,json=changePeerV2" json:"change_peer_v2,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RegionHeartbeatResponse) Reset() { *m = RegionHeartbeatResponse{} }
func (m *RegionHeartbeatResponse) String() string { return proto.CompactTextString(m) }
func (*RegionHeartbeatResponse) ProtoMessage() {}
func (*RegionHeartbeatResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{38}
}
func (m *RegionHeartbeatResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RegionHeartbeatResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RegionHeartbeatResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RegionHeartbeatResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RegionHeartbeatResponse.Merge(dst, src)
}
func (m *RegionHeartbeatResponse) XXX_Size() int {
return m.Size()
}
func (m *RegionHeartbeatResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RegionHeartbeatResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RegionHeartbeatResponse proto.InternalMessageInfo
func (m *RegionHeartbeatResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *RegionHeartbeatResponse) GetChangePeer() *ChangePeer {
if m != nil {
return m.ChangePeer
}
return nil
}
func (m *RegionHeartbeatResponse) GetTransferLeader() *TransferLeader {
if m != nil {
return m.TransferLeader
}
return nil
}
func (m *RegionHeartbeatResponse) GetRegionId() uint64 {
if m != nil {
return m.RegionId
}
return 0
}
func (m *RegionHeartbeatResponse) GetRegionEpoch() *metapb.RegionEpoch {
if m != nil {
return m.RegionEpoch
}
return nil
}
func (m *RegionHeartbeatResponse) GetTargetPeer() *metapb.Peer {
if m != nil {
return m.TargetPeer
}
return nil
}
func (m *RegionHeartbeatResponse) GetMerge() *Merge {
if m != nil {
return m.Merge
}
return nil
}
func (m *RegionHeartbeatResponse) GetSplitRegion() *SplitRegion {
if m != nil {
return m.SplitRegion
}
return nil
}
func (m *RegionHeartbeatResponse) GetChangePeerV2() *ChangePeerV2 {
if m != nil {
return m.ChangePeerV2
}
return nil
}
type AskSplitRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Region *metapb.Region `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AskSplitRequest) Reset() { *m = AskSplitRequest{} }
func (m *AskSplitRequest) String() string { return proto.CompactTextString(m) }
func (*AskSplitRequest) ProtoMessage() {}
func (*AskSplitRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{39}
}
func (m *AskSplitRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *AskSplitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_AskSplitRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *AskSplitRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_AskSplitRequest.Merge(dst, src)
}
func (m *AskSplitRequest) XXX_Size() int {
return m.Size()
}
func (m *AskSplitRequest) XXX_DiscardUnknown() {
xxx_messageInfo_AskSplitRequest.DiscardUnknown(m)
}
var xxx_messageInfo_AskSplitRequest proto.InternalMessageInfo
func (m *AskSplitRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *AskSplitRequest) GetRegion() *metapb.Region {
if m != nil {
return m.Region
}
return nil
}
type AskSplitResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
// We split the region into two, first uses the origin
// parent region id, and the second uses the new_region_id.
// We must guarantee that the new_region_id is global unique.
NewRegionId uint64 `protobuf:"varint,2,opt,name=new_region_id,json=newRegionId,proto3" json:"new_region_id,omitempty"`
// The peer ids for the new split region.
NewPeerIds []uint64 `protobuf:"varint,3,rep,packed,name=new_peer_ids,json=newPeerIds" json:"new_peer_ids,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AskSplitResponse) Reset() { *m = AskSplitResponse{} }
func (m *AskSplitResponse) String() string { return proto.CompactTextString(m) }
func (*AskSplitResponse) ProtoMessage() {}
func (*AskSplitResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{40}
}
func (m *AskSplitResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *AskSplitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_AskSplitResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *AskSplitResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_AskSplitResponse.Merge(dst, src)
}
func (m *AskSplitResponse) XXX_Size() int {
return m.Size()
}
func (m *AskSplitResponse) XXX_DiscardUnknown() {
xxx_messageInfo_AskSplitResponse.DiscardUnknown(m)
}
var xxx_messageInfo_AskSplitResponse proto.InternalMessageInfo
func (m *AskSplitResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *AskSplitResponse) GetNewRegionId() uint64 {
if m != nil {
return m.NewRegionId
}
return 0
}
func (m *AskSplitResponse) GetNewPeerIds() []uint64 {
if m != nil {
return m.NewPeerIds
}
return nil
}
type ReportSplitRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Left *metapb.Region `protobuf:"bytes,2,opt,name=left" json:"left,omitempty"`
Right *metapb.Region `protobuf:"bytes,3,opt,name=right" json:"right,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReportSplitRequest) Reset() { *m = ReportSplitRequest{} }
func (m *ReportSplitRequest) String() string { return proto.CompactTextString(m) }
func (*ReportSplitRequest) ProtoMessage() {}
func (*ReportSplitRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{41}
}
func (m *ReportSplitRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReportSplitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReportSplitRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ReportSplitRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReportSplitRequest.Merge(dst, src)
}
func (m *ReportSplitRequest) XXX_Size() int {
return m.Size()
}
func (m *ReportSplitRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ReportSplitRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ReportSplitRequest proto.InternalMessageInfo
func (m *ReportSplitRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *ReportSplitRequest) GetLeft() *metapb.Region {
if m != nil {
return m.Left
}
return nil
}
func (m *ReportSplitRequest) GetRight() *metapb.Region {
if m != nil {
return m.Right
}
return nil
}
type ReportSplitResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReportSplitResponse) Reset() { *m = ReportSplitResponse{} }
func (m *ReportSplitResponse) String() string { return proto.CompactTextString(m) }
func (*ReportSplitResponse) ProtoMessage() {}
func (*ReportSplitResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{42}
}
func (m *ReportSplitResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReportSplitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReportSplitResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ReportSplitResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReportSplitResponse.Merge(dst, src)
}
func (m *ReportSplitResponse) XXX_Size() int {
return m.Size()
}
func (m *ReportSplitResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ReportSplitResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ReportSplitResponse proto.InternalMessageInfo
func (m *ReportSplitResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
type AskBatchSplitRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Region *metapb.Region `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"`
SplitCount uint32 `protobuf:"varint,3,opt,name=split_count,json=splitCount,proto3" json:"split_count,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AskBatchSplitRequest) Reset() { *m = AskBatchSplitRequest{} }
func (m *AskBatchSplitRequest) String() string { return proto.CompactTextString(m) }
func (*AskBatchSplitRequest) ProtoMessage() {}
func (*AskBatchSplitRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{43}
}
func (m *AskBatchSplitRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *AskBatchSplitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_AskBatchSplitRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *AskBatchSplitRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_AskBatchSplitRequest.Merge(dst, src)
}
func (m *AskBatchSplitRequest) XXX_Size() int {
return m.Size()
}
func (m *AskBatchSplitRequest) XXX_DiscardUnknown() {
xxx_messageInfo_AskBatchSplitRequest.DiscardUnknown(m)
}
var xxx_messageInfo_AskBatchSplitRequest proto.InternalMessageInfo
func (m *AskBatchSplitRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *AskBatchSplitRequest) GetRegion() *metapb.Region {
if m != nil {
return m.Region
}
return nil
}
func (m *AskBatchSplitRequest) GetSplitCount() uint32 {
if m != nil {
return m.SplitCount
}
return 0
}
type SplitID struct {
NewRegionId uint64 `protobuf:"varint,1,opt,name=new_region_id,json=newRegionId,proto3" json:"new_region_id,omitempty"`
NewPeerIds []uint64 `protobuf:"varint,2,rep,packed,name=new_peer_ids,json=newPeerIds" json:"new_peer_ids,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SplitID) Reset() { *m = SplitID{} }
func (m *SplitID) String() string { return proto.CompactTextString(m) }
func (*SplitID) ProtoMessage() {}
func (*SplitID) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{44}
}
func (m *SplitID) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SplitID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SplitID.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SplitID) XXX_Merge(src proto.Message) {
xxx_messageInfo_SplitID.Merge(dst, src)
}
func (m *SplitID) XXX_Size() int {
return m.Size()
}
func (m *SplitID) XXX_DiscardUnknown() {
xxx_messageInfo_SplitID.DiscardUnknown(m)
}
var xxx_messageInfo_SplitID proto.InternalMessageInfo
func (m *SplitID) GetNewRegionId() uint64 {
if m != nil {
return m.NewRegionId
}
return 0
}
func (m *SplitID) GetNewPeerIds() []uint64 {
if m != nil {
return m.NewPeerIds
}
return nil
}
type AskBatchSplitResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Ids []*SplitID `protobuf:"bytes,2,rep,name=ids" json:"ids,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AskBatchSplitResponse) Reset() { *m = AskBatchSplitResponse{} }
func (m *AskBatchSplitResponse) String() string { return proto.CompactTextString(m) }
func (*AskBatchSplitResponse) ProtoMessage() {}
func (*AskBatchSplitResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{45}
}
func (m *AskBatchSplitResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *AskBatchSplitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_AskBatchSplitResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *AskBatchSplitResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_AskBatchSplitResponse.Merge(dst, src)
}
func (m *AskBatchSplitResponse) XXX_Size() int {
return m.Size()
}
func (m *AskBatchSplitResponse) XXX_DiscardUnknown() {
xxx_messageInfo_AskBatchSplitResponse.DiscardUnknown(m)
}
var xxx_messageInfo_AskBatchSplitResponse proto.InternalMessageInfo
func (m *AskBatchSplitResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *AskBatchSplitResponse) GetIds() []*SplitID {
if m != nil {
return m.Ids
}
return nil
}
type ReportBatchSplitRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Regions []*metapb.Region `protobuf:"bytes,2,rep,name=regions" json:"regions,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReportBatchSplitRequest) Reset() { *m = ReportBatchSplitRequest{} }
func (m *ReportBatchSplitRequest) String() string { return proto.CompactTextString(m) }
func (*ReportBatchSplitRequest) ProtoMessage() {}
func (*ReportBatchSplitRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{46}
}
func (m *ReportBatchSplitRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReportBatchSplitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReportBatchSplitRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ReportBatchSplitRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReportBatchSplitRequest.Merge(dst, src)
}
func (m *ReportBatchSplitRequest) XXX_Size() int {
return m.Size()
}
func (m *ReportBatchSplitRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ReportBatchSplitRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ReportBatchSplitRequest proto.InternalMessageInfo
func (m *ReportBatchSplitRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *ReportBatchSplitRequest) GetRegions() []*metapb.Region {
if m != nil {
return m.Regions
}
return nil
}
type ReportBatchSplitResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReportBatchSplitResponse) Reset() { *m = ReportBatchSplitResponse{} }
func (m *ReportBatchSplitResponse) String() string { return proto.CompactTextString(m) }
func (*ReportBatchSplitResponse) ProtoMessage() {}
func (*ReportBatchSplitResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{47}
}
func (m *ReportBatchSplitResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReportBatchSplitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReportBatchSplitResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ReportBatchSplitResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReportBatchSplitResponse.Merge(dst, src)
}
func (m *ReportBatchSplitResponse) XXX_Size() int {
return m.Size()
}
func (m *ReportBatchSplitResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ReportBatchSplitResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ReportBatchSplitResponse proto.InternalMessageInfo
func (m *ReportBatchSplitResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
type TimeInterval struct {
// The unix timestamp in seconds of the start of this period.
StartTimestamp uint64 `protobuf:"varint,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
// The unix timestamp in seconds of the end of this period.
EndTimestamp uint64 `protobuf:"varint,2,opt,name=end_timestamp,json=endTimestamp,proto3" json:"end_timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TimeInterval) Reset() { *m = TimeInterval{} }
func (m *TimeInterval) String() string { return proto.CompactTextString(m) }
func (*TimeInterval) ProtoMessage() {}
func (*TimeInterval) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{48}
}
func (m *TimeInterval) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TimeInterval) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_TimeInterval.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *TimeInterval) XXX_Merge(src proto.Message) {
xxx_messageInfo_TimeInterval.Merge(dst, src)
}
func (m *TimeInterval) XXX_Size() int {
return m.Size()
}
func (m *TimeInterval) XXX_DiscardUnknown() {
xxx_messageInfo_TimeInterval.DiscardUnknown(m)
}
var xxx_messageInfo_TimeInterval proto.InternalMessageInfo
func (m *TimeInterval) GetStartTimestamp() uint64 {
if m != nil {
return m.StartTimestamp
}
return 0
}
func (m *TimeInterval) GetEndTimestamp() uint64 {
if m != nil {
return m.EndTimestamp
}
return 0
}
type RecordPair struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value uint64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RecordPair) Reset() { *m = RecordPair{} }
func (m *RecordPair) String() string { return proto.CompactTextString(m) }
func (*RecordPair) ProtoMessage() {}
func (*RecordPair) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{49}
}
func (m *RecordPair) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RecordPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RecordPair.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RecordPair) XXX_Merge(src proto.Message) {
xxx_messageInfo_RecordPair.Merge(dst, src)
}
func (m *RecordPair) XXX_Size() int {
return m.Size()
}
func (m *RecordPair) XXX_DiscardUnknown() {
xxx_messageInfo_RecordPair.DiscardUnknown(m)
}
var xxx_messageInfo_RecordPair proto.InternalMessageInfo
func (m *RecordPair) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
func (m *RecordPair) GetValue() uint64 {
if m != nil {
return m.Value
}
return 0
}
type PeerStat struct {
RegionId uint64 `protobuf:"varint,1,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"`
ReadKeys uint64 `protobuf:"varint,2,opt,name=read_keys,json=readKeys,proto3" json:"read_keys,omitempty"`
ReadBytes uint64 `protobuf:"varint,3,opt,name=read_bytes,json=readBytes,proto3" json:"read_bytes,omitempty"`
QueryStats *QueryStats `protobuf:"bytes,4,opt,name=query_stats,json=queryStats" json:"query_stats,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PeerStat) Reset() { *m = PeerStat{} }
func (m *PeerStat) String() string { return proto.CompactTextString(m) }
func (*PeerStat) ProtoMessage() {}
func (*PeerStat) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{50}
}
func (m *PeerStat) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PeerStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PeerStat.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *PeerStat) XXX_Merge(src proto.Message) {
xxx_messageInfo_PeerStat.Merge(dst, src)
}
func (m *PeerStat) XXX_Size() int {
return m.Size()
}
func (m *PeerStat) XXX_DiscardUnknown() {
xxx_messageInfo_PeerStat.DiscardUnknown(m)
}
var xxx_messageInfo_PeerStat proto.InternalMessageInfo
func (m *PeerStat) GetRegionId() uint64 {
if m != nil {
return m.RegionId
}
return 0
}
func (m *PeerStat) GetReadKeys() uint64 {
if m != nil {
return m.ReadKeys
}
return 0
}
func (m *PeerStat) GetReadBytes() uint64 {
if m != nil {
return m.ReadBytes
}
return 0
}
func (m *PeerStat) GetQueryStats() *QueryStats {
if m != nil {
return m.QueryStats
}
return nil
}
type StoreStats struct {
StoreId uint64 `protobuf:"varint,1,opt,name=store_id,json=storeId,proto3" json:"store_id,omitempty"`
// Capacity for the store.
Capacity uint64 `protobuf:"varint,2,opt,name=capacity,proto3" json:"capacity,omitempty"`
// Available size for the store.
Available uint64 `protobuf:"varint,3,opt,name=available,proto3" json:"available,omitempty"`
// Total region count in this store.
RegionCount uint32 `protobuf:"varint,4,opt,name=region_count,json=regionCount,proto3" json:"region_count,omitempty"`
// Current sending snapshot count.
SendingSnapCount uint32 `protobuf:"varint,5,opt,name=sending_snap_count,json=sendingSnapCount,proto3" json:"sending_snap_count,omitempty"`
// Current receiving snapshot count.
ReceivingSnapCount uint32 `protobuf:"varint,6,opt,name=receiving_snap_count,json=receivingSnapCount,proto3" json:"receiving_snap_count,omitempty"`
// When the store is started (unix timestamp in seconds).
StartTime uint32 `protobuf:"varint,7,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// How many region is applying snapshot.
ApplyingSnapCount uint32 `protobuf:"varint,8,opt,name=applying_snap_count,json=applyingSnapCount,proto3" json:"applying_snap_count,omitempty"`
// If the store is busy
IsBusy bool `protobuf:"varint,9,opt,name=is_busy,json=isBusy,proto3" json:"is_busy,omitempty"`
// Actually used space by db
UsedSize uint64 `protobuf:"varint,10,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"`
// Bytes written for the store during this period.
BytesWritten uint64 `protobuf:"varint,11,opt,name=bytes_written,json=bytesWritten,proto3" json:"bytes_written,omitempty"`
// Keys written for the store during this period.
KeysWritten uint64 `protobuf:"varint,12,opt,name=keys_written,json=keysWritten,proto3" json:"keys_written,omitempty"`
// Bytes read for the store during this period.
BytesRead uint64 `protobuf:"varint,13,opt,name=bytes_read,json=bytesRead,proto3" json:"bytes_read,omitempty"`
// Keys read for the store during this period.
KeysRead uint64 `protobuf:"varint,14,opt,name=keys_read,json=keysRead,proto3" json:"keys_read,omitempty"`
// Actually reported time interval
Interval *TimeInterval `protobuf:"bytes,15,opt,name=interval" json:"interval,omitempty"`
// Threads' CPU usages in the store
CpuUsages []*RecordPair `protobuf:"bytes,16,rep,name=cpu_usages,json=cpuUsages" json:"cpu_usages,omitempty"`
// Threads' read disk I/O rates in the store
ReadIoRates []*RecordPair `protobuf:"bytes,17,rep,name=read_io_rates,json=readIoRates" json:"read_io_rates,omitempty"`
// Threads' write disk I/O rates in the store
WriteIoRates []*RecordPair `protobuf:"bytes,18,rep,name=write_io_rates,json=writeIoRates" json:"write_io_rates,omitempty"`
// Operations' latencies in the store
OpLatencies []*RecordPair `protobuf:"bytes,19,rep,name=op_latencies,json=opLatencies" json:"op_latencies,omitempty"`
// Hot peer stat in the store
PeerStats []*PeerStat `protobuf:"bytes,20,rep,name=peer_stats,json=peerStats" json:"peer_stats,omitempty"`
// Store query stats
QueryStats *QueryStats `protobuf:"bytes,21,opt,name=query_stats,json=queryStats" json:"query_stats,omitempty"`
// Score that represents the speed of the store, ranges in [1, 100], lower is better.
SlowScore uint64 `protobuf:"varint,22,opt,name=slow_score,json=slowScore,proto3" json:"slow_score,omitempty"`
// Damaged regions on the store that need to be removed by PD.
DamagedRegionsId []uint64 `protobuf:"varint,23,rep,packed,name=damaged_regions_id,json=damagedRegionsId" json:"damaged_regions_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StoreStats) Reset() { *m = StoreStats{} }
func (m *StoreStats) String() string { return proto.CompactTextString(m) }
func (*StoreStats) ProtoMessage() {}
func (*StoreStats) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{51}
}
func (m *StoreStats) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *StoreStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_StoreStats.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *StoreStats) XXX_Merge(src proto.Message) {
xxx_messageInfo_StoreStats.Merge(dst, src)
}
func (m *StoreStats) XXX_Size() int {
return m.Size()
}
func (m *StoreStats) XXX_DiscardUnknown() {
xxx_messageInfo_StoreStats.DiscardUnknown(m)
}
var xxx_messageInfo_StoreStats proto.InternalMessageInfo
func (m *StoreStats) GetStoreId() uint64 {
if m != nil {
return m.StoreId
}
return 0
}
func (m *StoreStats) GetCapacity() uint64 {
if m != nil {
return m.Capacity
}
return 0
}
func (m *StoreStats) GetAvailable() uint64 {
if m != nil {
return m.Available
}
return 0
}
func (m *StoreStats) GetRegionCount() uint32 {
if m != nil {
return m.RegionCount
}
return 0
}
func (m *StoreStats) GetSendingSnapCount() uint32 {
if m != nil {
return m.SendingSnapCount
}
return 0
}
func (m *StoreStats) GetReceivingSnapCount() uint32 {
if m != nil {
return m.ReceivingSnapCount
}
return 0
}
func (m *StoreStats) GetStartTime() uint32 {
if m != nil {
return m.StartTime
}
return 0
}
func (m *StoreStats) GetApplyingSnapCount() uint32 {
if m != nil {
return m.ApplyingSnapCount
}
return 0
}
func (m *StoreStats) GetIsBusy() bool {
if m != nil {
return m.IsBusy
}
return false
}
func (m *StoreStats) GetUsedSize() uint64 {
if m != nil {
return m.UsedSize
}
return 0
}
func (m *StoreStats) GetBytesWritten() uint64 {
if m != nil {
return m.BytesWritten
}
return 0
}
func (m *StoreStats) GetKeysWritten() uint64 {
if m != nil {
return m.KeysWritten
}
return 0
}
func (m *StoreStats) GetBytesRead() uint64 {
if m != nil {
return m.BytesRead
}
return 0
}
func (m *StoreStats) GetKeysRead() uint64 {
if m != nil {
return m.KeysRead
}
return 0
}
func (m *StoreStats) GetInterval() *TimeInterval {
if m != nil {
return m.Interval
}
return nil
}
func (m *StoreStats) GetCpuUsages() []*RecordPair {
if m != nil {
return m.CpuUsages
}
return nil
}
func (m *StoreStats) GetReadIoRates() []*RecordPair {
if m != nil {
return m.ReadIoRates
}
return nil
}
func (m *StoreStats) GetWriteIoRates() []*RecordPair {
if m != nil {
return m.WriteIoRates
}
return nil
}
func (m *StoreStats) GetOpLatencies() []*RecordPair {
if m != nil {
return m.OpLatencies
}
return nil
}
func (m *StoreStats) GetPeerStats() []*PeerStat {
if m != nil {
return m.PeerStats
}
return nil
}
func (m *StoreStats) GetQueryStats() *QueryStats {
if m != nil {
return m.QueryStats
}
return nil
}
func (m *StoreStats) GetSlowScore() uint64 {
if m != nil {
return m.SlowScore
}
return 0
}
func (m *StoreStats) GetDamagedRegionsId() []uint64 {
if m != nil {
return m.DamagedRegionsId
}
return nil
}
type StoreHeartbeatRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Stats *StoreStats `protobuf:"bytes,2,opt,name=stats" json:"stats,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StoreHeartbeatRequest) Reset() { *m = StoreHeartbeatRequest{} }
func (m *StoreHeartbeatRequest) String() string { return proto.CompactTextString(m) }
func (*StoreHeartbeatRequest) ProtoMessage() {}
func (*StoreHeartbeatRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{52}
}
func (m *StoreHeartbeatRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *StoreHeartbeatRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_StoreHeartbeatRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *StoreHeartbeatRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_StoreHeartbeatRequest.Merge(dst, src)
}
func (m *StoreHeartbeatRequest) XXX_Size() int {
return m.Size()
}
func (m *StoreHeartbeatRequest) XXX_DiscardUnknown() {
xxx_messageInfo_StoreHeartbeatRequest.DiscardUnknown(m)
}
var xxx_messageInfo_StoreHeartbeatRequest proto.InternalMessageInfo
func (m *StoreHeartbeatRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *StoreHeartbeatRequest) GetStats() *StoreStats {
if m != nil {
return m.Stats
}
return nil
}
type StoreHeartbeatResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
ReplicationStatus *replication_modepb.ReplicationStatus `protobuf:"bytes,2,opt,name=replication_status,json=replicationStatus" json:"replication_status,omitempty"`
ClusterVersion string `protobuf:"bytes,3,opt,name=cluster_version,json=clusterVersion,proto3" json:"cluster_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StoreHeartbeatResponse) Reset() { *m = StoreHeartbeatResponse{} }
func (m *StoreHeartbeatResponse) String() string { return proto.CompactTextString(m) }
func (*StoreHeartbeatResponse) ProtoMessage() {}
func (*StoreHeartbeatResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{53}
}
func (m *StoreHeartbeatResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *StoreHeartbeatResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_StoreHeartbeatResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *StoreHeartbeatResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_StoreHeartbeatResponse.Merge(dst, src)
}
func (m *StoreHeartbeatResponse) XXX_Size() int {
return m.Size()
}
func (m *StoreHeartbeatResponse) XXX_DiscardUnknown() {
xxx_messageInfo_StoreHeartbeatResponse.DiscardUnknown(m)
}
var xxx_messageInfo_StoreHeartbeatResponse proto.InternalMessageInfo
func (m *StoreHeartbeatResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *StoreHeartbeatResponse) GetReplicationStatus() *replication_modepb.ReplicationStatus {
if m != nil {
return m.ReplicationStatus
}
return nil
}
func (m *StoreHeartbeatResponse) GetClusterVersion() string {
if m != nil {
return m.ClusterVersion
}
return ""
}
type ScatterRegionRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
RegionId uint64 `protobuf:"varint,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` // Deprecated: Do not use.
// PD will use these region information if it can't find the region.
// For example, the region is just split and hasn't report to PD yet.
Region *metapb.Region `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"`
Leader *metapb.Peer `protobuf:"bytes,4,opt,name=leader" json:"leader,omitempty"`
// If group is defined, the regions with the same group would be scattered as a whole group.
// If not defined, the regions would be scattered in a cluster level.
Group string `protobuf:"bytes,5,opt,name=group,proto3" json:"group,omitempty"`
// If regions_id is defined, the region_id would be ignored.
RegionsId []uint64 `protobuf:"varint,6,rep,packed,name=regions_id,json=regionsId" json:"regions_id,omitempty"`
RetryLimit uint64 `protobuf:"varint,7,opt,name=retry_limit,json=retryLimit,proto3" json:"retry_limit,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScatterRegionRequest) Reset() { *m = ScatterRegionRequest{} }
func (m *ScatterRegionRequest) String() string { return proto.CompactTextString(m) }
func (*ScatterRegionRequest) ProtoMessage() {}
func (*ScatterRegionRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{54}
}
func (m *ScatterRegionRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScatterRegionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScatterRegionRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ScatterRegionRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScatterRegionRequest.Merge(dst, src)
}
func (m *ScatterRegionRequest) XXX_Size() int {
return m.Size()
}
func (m *ScatterRegionRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ScatterRegionRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ScatterRegionRequest proto.InternalMessageInfo
func (m *ScatterRegionRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
// Deprecated: Do not use.
func (m *ScatterRegionRequest) GetRegionId() uint64 {
if m != nil {
return m.RegionId
}
return 0
}
func (m *ScatterRegionRequest) GetRegion() *metapb.Region {
if m != nil {
return m.Region
}
return nil
}
func (m *ScatterRegionRequest) GetLeader() *metapb.Peer {
if m != nil {
return m.Leader
}
return nil
}
func (m *ScatterRegionRequest) GetGroup() string {
if m != nil {
return m.Group
}
return ""
}
func (m *ScatterRegionRequest) GetRegionsId() []uint64 {
if m != nil {
return m.RegionsId
}
return nil
}
func (m *ScatterRegionRequest) GetRetryLimit() uint64 {
if m != nil {
return m.RetryLimit
}
return 0
}
type ScatterRegionResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
FinishedPercentage uint64 `protobuf:"varint,2,opt,name=finished_percentage,json=finishedPercentage,proto3" json:"finished_percentage,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScatterRegionResponse) Reset() { *m = ScatterRegionResponse{} }
func (m *ScatterRegionResponse) String() string { return proto.CompactTextString(m) }
func (*ScatterRegionResponse) ProtoMessage() {}
func (*ScatterRegionResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{55}
}
func (m *ScatterRegionResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScatterRegionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScatterRegionResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ScatterRegionResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScatterRegionResponse.Merge(dst, src)
}
func (m *ScatterRegionResponse) XXX_Size() int {
return m.Size()
}
func (m *ScatterRegionResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ScatterRegionResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ScatterRegionResponse proto.InternalMessageInfo
func (m *ScatterRegionResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *ScatterRegionResponse) GetFinishedPercentage() uint64 {
if m != nil {
return m.FinishedPercentage
}
return 0
}
type GetGCSafePointRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetGCSafePointRequest) Reset() { *m = GetGCSafePointRequest{} }
func (m *GetGCSafePointRequest) String() string { return proto.CompactTextString(m) }
func (*GetGCSafePointRequest) ProtoMessage() {}
func (*GetGCSafePointRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{56}
}
func (m *GetGCSafePointRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetGCSafePointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetGCSafePointRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetGCSafePointRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetGCSafePointRequest.Merge(dst, src)
}
func (m *GetGCSafePointRequest) XXX_Size() int {
return m.Size()
}
func (m *GetGCSafePointRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetGCSafePointRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetGCSafePointRequest proto.InternalMessageInfo
func (m *GetGCSafePointRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
type GetGCSafePointResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
SafePoint uint64 `protobuf:"varint,2,opt,name=safe_point,json=safePoint,proto3" json:"safe_point,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetGCSafePointResponse) Reset() { *m = GetGCSafePointResponse{} }
func (m *GetGCSafePointResponse) String() string { return proto.CompactTextString(m) }
func (*GetGCSafePointResponse) ProtoMessage() {}
func (*GetGCSafePointResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{57}
}
func (m *GetGCSafePointResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetGCSafePointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetGCSafePointResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetGCSafePointResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetGCSafePointResponse.Merge(dst, src)
}
func (m *GetGCSafePointResponse) XXX_Size() int {
return m.Size()
}
func (m *GetGCSafePointResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetGCSafePointResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetGCSafePointResponse proto.InternalMessageInfo
func (m *GetGCSafePointResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *GetGCSafePointResponse) GetSafePoint() uint64 {
if m != nil {
return m.SafePoint
}
return 0
}
type UpdateGCSafePointRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
SafePoint uint64 `protobuf:"varint,2,opt,name=safe_point,json=safePoint,proto3" json:"safe_point,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UpdateGCSafePointRequest) Reset() { *m = UpdateGCSafePointRequest{} }
func (m *UpdateGCSafePointRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateGCSafePointRequest) ProtoMessage() {}
func (*UpdateGCSafePointRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{58}
}
func (m *UpdateGCSafePointRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *UpdateGCSafePointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_UpdateGCSafePointRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *UpdateGCSafePointRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_UpdateGCSafePointRequest.Merge(dst, src)
}
func (m *UpdateGCSafePointRequest) XXX_Size() int {
return m.Size()
}
func (m *UpdateGCSafePointRequest) XXX_DiscardUnknown() {
xxx_messageInfo_UpdateGCSafePointRequest.DiscardUnknown(m)
}
var xxx_messageInfo_UpdateGCSafePointRequest proto.InternalMessageInfo
func (m *UpdateGCSafePointRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *UpdateGCSafePointRequest) GetSafePoint() uint64 {
if m != nil {
return m.SafePoint
}
return 0
}
type UpdateGCSafePointResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
NewSafePoint uint64 `protobuf:"varint,2,opt,name=new_safe_point,json=newSafePoint,proto3" json:"new_safe_point,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UpdateGCSafePointResponse) Reset() { *m = UpdateGCSafePointResponse{} }
func (m *UpdateGCSafePointResponse) String() string { return proto.CompactTextString(m) }
func (*UpdateGCSafePointResponse) ProtoMessage() {}
func (*UpdateGCSafePointResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{59}
}
func (m *UpdateGCSafePointResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *UpdateGCSafePointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_UpdateGCSafePointResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *UpdateGCSafePointResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_UpdateGCSafePointResponse.Merge(dst, src)
}
func (m *UpdateGCSafePointResponse) XXX_Size() int {
return m.Size()
}
func (m *UpdateGCSafePointResponse) XXX_DiscardUnknown() {
xxx_messageInfo_UpdateGCSafePointResponse.DiscardUnknown(m)
}
var xxx_messageInfo_UpdateGCSafePointResponse proto.InternalMessageInfo
func (m *UpdateGCSafePointResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *UpdateGCSafePointResponse) GetNewSafePoint() uint64 {
if m != nil {
return m.NewSafePoint
}
return 0
}
type UpdateServiceGCSafePointRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
ServiceId []byte `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
SafePoint uint64 `protobuf:"varint,4,opt,name=safe_point,json=safePoint,proto3" json:"safe_point,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UpdateServiceGCSafePointRequest) Reset() { *m = UpdateServiceGCSafePointRequest{} }
func (m *UpdateServiceGCSafePointRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateServiceGCSafePointRequest) ProtoMessage() {}
func (*UpdateServiceGCSafePointRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{60}
}
func (m *UpdateServiceGCSafePointRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *UpdateServiceGCSafePointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_UpdateServiceGCSafePointRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *UpdateServiceGCSafePointRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_UpdateServiceGCSafePointRequest.Merge(dst, src)
}
func (m *UpdateServiceGCSafePointRequest) XXX_Size() int {
return m.Size()
}
func (m *UpdateServiceGCSafePointRequest) XXX_DiscardUnknown() {
xxx_messageInfo_UpdateServiceGCSafePointRequest.DiscardUnknown(m)
}
var xxx_messageInfo_UpdateServiceGCSafePointRequest proto.InternalMessageInfo
func (m *UpdateServiceGCSafePointRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *UpdateServiceGCSafePointRequest) GetServiceId() []byte {
if m != nil {
return m.ServiceId
}
return nil
}
func (m *UpdateServiceGCSafePointRequest) GetTTL() int64 {
if m != nil {
return m.TTL
}
return 0
}
func (m *UpdateServiceGCSafePointRequest) GetSafePoint() uint64 {
if m != nil {
return m.SafePoint
}
return 0
}
type UpdateServiceGCSafePointResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
ServiceId []byte `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
MinSafePoint uint64 `protobuf:"varint,4,opt,name=min_safe_point,json=minSafePoint,proto3" json:"min_safe_point,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UpdateServiceGCSafePointResponse) Reset() { *m = UpdateServiceGCSafePointResponse{} }
func (m *UpdateServiceGCSafePointResponse) String() string { return proto.CompactTextString(m) }
func (*UpdateServiceGCSafePointResponse) ProtoMessage() {}
func (*UpdateServiceGCSafePointResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{61}
}
func (m *UpdateServiceGCSafePointResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *UpdateServiceGCSafePointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_UpdateServiceGCSafePointResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *UpdateServiceGCSafePointResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_UpdateServiceGCSafePointResponse.Merge(dst, src)
}
func (m *UpdateServiceGCSafePointResponse) XXX_Size() int {
return m.Size()
}
func (m *UpdateServiceGCSafePointResponse) XXX_DiscardUnknown() {
xxx_messageInfo_UpdateServiceGCSafePointResponse.DiscardUnknown(m)
}
var xxx_messageInfo_UpdateServiceGCSafePointResponse proto.InternalMessageInfo
func (m *UpdateServiceGCSafePointResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *UpdateServiceGCSafePointResponse) GetServiceId() []byte {
if m != nil {
return m.ServiceId
}
return nil
}
func (m *UpdateServiceGCSafePointResponse) GetTTL() int64 {
if m != nil {
return m.TTL
}
return 0
}
func (m *UpdateServiceGCSafePointResponse) GetMinSafePoint() uint64 {
if m != nil {
return m.MinSafePoint
}
return 0
}
type RegionStat struct {
// Bytes read/written during this period.
BytesWritten uint64 `protobuf:"varint,1,opt,name=bytes_written,json=bytesWritten,proto3" json:"bytes_written,omitempty"`
BytesRead uint64 `protobuf:"varint,2,opt,name=bytes_read,json=bytesRead,proto3" json:"bytes_read,omitempty"`
// Keys read/written during this period.
KeysWritten uint64 `protobuf:"varint,3,opt,name=keys_written,json=keysWritten,proto3" json:"keys_written,omitempty"`
KeysRead uint64 `protobuf:"varint,4,opt,name=keys_read,json=keysRead,proto3" json:"keys_read,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RegionStat) Reset() { *m = RegionStat{} }
func (m *RegionStat) String() string { return proto.CompactTextString(m) }
func (*RegionStat) ProtoMessage() {}
func (*RegionStat) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{62}
}
func (m *RegionStat) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RegionStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RegionStat.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RegionStat) XXX_Merge(src proto.Message) {
xxx_messageInfo_RegionStat.Merge(dst, src)
}
func (m *RegionStat) XXX_Size() int {
return m.Size()
}
func (m *RegionStat) XXX_DiscardUnknown() {
xxx_messageInfo_RegionStat.DiscardUnknown(m)
}
var xxx_messageInfo_RegionStat proto.InternalMessageInfo
func (m *RegionStat) GetBytesWritten() uint64 {
if m != nil {
return m.BytesWritten
}
return 0
}
func (m *RegionStat) GetBytesRead() uint64 {
if m != nil {
return m.BytesRead
}
return 0
}
func (m *RegionStat) GetKeysWritten() uint64 {
if m != nil {
return m.KeysWritten
}
return 0
}
func (m *RegionStat) GetKeysRead() uint64 {
if m != nil {
return m.KeysRead
}
return 0
}
type SyncRegionRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"`
// the follower PD will use the start index to locate historical changes
// that require synchronization.
StartIndex uint64 `protobuf:"varint,3,opt,name=start_index,json=startIndex,proto3" json:"start_index,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SyncRegionRequest) Reset() { *m = SyncRegionRequest{} }
func (m *SyncRegionRequest) String() string { return proto.CompactTextString(m) }
func (*SyncRegionRequest) ProtoMessage() {}
func (*SyncRegionRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{63}
}
func (m *SyncRegionRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SyncRegionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SyncRegionRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SyncRegionRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SyncRegionRequest.Merge(dst, src)
}
func (m *SyncRegionRequest) XXX_Size() int {
return m.Size()
}
func (m *SyncRegionRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SyncRegionRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SyncRegionRequest proto.InternalMessageInfo
func (m *SyncRegionRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *SyncRegionRequest) GetMember() *Member {
if m != nil {
return m.Member
}
return nil
}
func (m *SyncRegionRequest) GetStartIndex() uint64 {
if m != nil {
return m.StartIndex
}
return 0
}
type SyncRegionResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
// the leader PD will send the repsonds include
// changed regions records and the index of the first record.
Regions []*metapb.Region `protobuf:"bytes,2,rep,name=regions" json:"regions,omitempty"`
StartIndex uint64 `protobuf:"varint,3,opt,name=start_index,json=startIndex,proto3" json:"start_index,omitempty"`
RegionStats []*RegionStat `protobuf:"bytes,4,rep,name=region_stats,json=regionStats" json:"region_stats,omitempty"`
RegionLeaders []*metapb.Peer `protobuf:"bytes,5,rep,name=region_leaders,json=regionLeaders" json:"region_leaders,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SyncRegionResponse) Reset() { *m = SyncRegionResponse{} }
func (m *SyncRegionResponse) String() string { return proto.CompactTextString(m) }
func (*SyncRegionResponse) ProtoMessage() {}
func (*SyncRegionResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{64}
}
func (m *SyncRegionResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SyncRegionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SyncRegionResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SyncRegionResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SyncRegionResponse.Merge(dst, src)
}
func (m *SyncRegionResponse) XXX_Size() int {
return m.Size()
}
func (m *SyncRegionResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SyncRegionResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SyncRegionResponse proto.InternalMessageInfo
func (m *SyncRegionResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *SyncRegionResponse) GetRegions() []*metapb.Region {
if m != nil {
return m.Regions
}
return nil
}
func (m *SyncRegionResponse) GetStartIndex() uint64 {
if m != nil {
return m.StartIndex
}
return 0
}
func (m *SyncRegionResponse) GetRegionStats() []*RegionStat {
if m != nil {
return m.RegionStats
}
return nil
}
func (m *SyncRegionResponse) GetRegionLeaders() []*metapb.Peer {
if m != nil {
return m.RegionLeaders
}
return nil
}
type GetOperatorRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
RegionId uint64 `protobuf:"varint,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetOperatorRequest) Reset() { *m = GetOperatorRequest{} }
func (m *GetOperatorRequest) String() string { return proto.CompactTextString(m) }
func (*GetOperatorRequest) ProtoMessage() {}
func (*GetOperatorRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{65}
}
func (m *GetOperatorRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetOperatorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetOperatorRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetOperatorRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetOperatorRequest.Merge(dst, src)
}
func (m *GetOperatorRequest) XXX_Size() int {
return m.Size()
}
func (m *GetOperatorRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetOperatorRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetOperatorRequest proto.InternalMessageInfo
func (m *GetOperatorRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *GetOperatorRequest) GetRegionId() uint64 {
if m != nil {
return m.RegionId
}
return 0
}
type GetOperatorResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
RegionId uint64 `protobuf:"varint,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"`
Desc []byte `protobuf:"bytes,3,opt,name=desc,proto3" json:"desc,omitempty"`
Status OperatorStatus `protobuf:"varint,4,opt,name=status,proto3,enum=pdpb.OperatorStatus" json:"status,omitempty"`
Kind []byte `protobuf:"bytes,5,opt,name=kind,proto3" json:"kind,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetOperatorResponse) Reset() { *m = GetOperatorResponse{} }
func (m *GetOperatorResponse) String() string { return proto.CompactTextString(m) }
func (*GetOperatorResponse) ProtoMessage() {}
func (*GetOperatorResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{66}
}
func (m *GetOperatorResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetOperatorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetOperatorResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetOperatorResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetOperatorResponse.Merge(dst, src)
}
func (m *GetOperatorResponse) XXX_Size() int {
return m.Size()
}
func (m *GetOperatorResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetOperatorResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetOperatorResponse proto.InternalMessageInfo
func (m *GetOperatorResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *GetOperatorResponse) GetRegionId() uint64 {
if m != nil {
return m.RegionId
}
return 0
}
func (m *GetOperatorResponse) GetDesc() []byte {
if m != nil {
return m.Desc
}
return nil
}
func (m *GetOperatorResponse) GetStatus() OperatorStatus {
if m != nil {
return m.Status
}
return OperatorStatus_SUCCESS
}
func (m *GetOperatorResponse) GetKind() []byte {
if m != nil {
return m.Kind
}
return nil
}
type SyncMaxTSRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
MaxTs *Timestamp `protobuf:"bytes,2,opt,name=max_ts,json=maxTs" json:"max_ts,omitempty"`
// If skip_check is true, the sync will try to write the max_ts without checking whether it's bigger.
SkipCheck bool `protobuf:"varint,3,opt,name=skip_check,json=skipCheck,proto3" json:"skip_check,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SyncMaxTSRequest) Reset() { *m = SyncMaxTSRequest{} }
func (m *SyncMaxTSRequest) String() string { return proto.CompactTextString(m) }
func (*SyncMaxTSRequest) ProtoMessage() {}
func (*SyncMaxTSRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{67}
}
func (m *SyncMaxTSRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SyncMaxTSRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SyncMaxTSRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SyncMaxTSRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SyncMaxTSRequest.Merge(dst, src)
}
func (m *SyncMaxTSRequest) XXX_Size() int {
return m.Size()
}
func (m *SyncMaxTSRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SyncMaxTSRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SyncMaxTSRequest proto.InternalMessageInfo
func (m *SyncMaxTSRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *SyncMaxTSRequest) GetMaxTs() *Timestamp {
if m != nil {
return m.MaxTs
}
return nil
}
func (m *SyncMaxTSRequest) GetSkipCheck() bool {
if m != nil {
return m.SkipCheck
}
return false
}
type SyncMaxTSResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
MaxLocalTs *Timestamp `protobuf:"bytes,2,opt,name=max_local_ts,json=maxLocalTs" json:"max_local_ts,omitempty"`
SyncedDcs []string `protobuf:"bytes,3,rep,name=synced_dcs,json=syncedDcs" json:"synced_dcs,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SyncMaxTSResponse) Reset() { *m = SyncMaxTSResponse{} }
func (m *SyncMaxTSResponse) String() string { return proto.CompactTextString(m) }
func (*SyncMaxTSResponse) ProtoMessage() {}
func (*SyncMaxTSResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{68}
}
func (m *SyncMaxTSResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SyncMaxTSResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SyncMaxTSResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SyncMaxTSResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SyncMaxTSResponse.Merge(dst, src)
}
func (m *SyncMaxTSResponse) XXX_Size() int {
return m.Size()
}
func (m *SyncMaxTSResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SyncMaxTSResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SyncMaxTSResponse proto.InternalMessageInfo
func (m *SyncMaxTSResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *SyncMaxTSResponse) GetMaxLocalTs() *Timestamp {
if m != nil {
return m.MaxLocalTs
}
return nil
}
func (m *SyncMaxTSResponse) GetSyncedDcs() []string {
if m != nil {
return m.SyncedDcs
}
return nil
}
type SplitRegionsRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
SplitKeys [][]byte `protobuf:"bytes,2,rep,name=split_keys,json=splitKeys" json:"split_keys,omitempty"`
RetryLimit uint64 `protobuf:"varint,3,opt,name=retry_limit,json=retryLimit,proto3" json:"retry_limit,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SplitRegionsRequest) Reset() { *m = SplitRegionsRequest{} }
func (m *SplitRegionsRequest) String() string { return proto.CompactTextString(m) }
func (*SplitRegionsRequest) ProtoMessage() {}
func (*SplitRegionsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{69}
}
func (m *SplitRegionsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SplitRegionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SplitRegionsRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SplitRegionsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SplitRegionsRequest.Merge(dst, src)
}
func (m *SplitRegionsRequest) XXX_Size() int {
return m.Size()
}
func (m *SplitRegionsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SplitRegionsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SplitRegionsRequest proto.InternalMessageInfo
func (m *SplitRegionsRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *SplitRegionsRequest) GetSplitKeys() [][]byte {
if m != nil {
return m.SplitKeys
}
return nil
}
func (m *SplitRegionsRequest) GetRetryLimit() uint64 {
if m != nil {
return m.RetryLimit
}
return 0
}
type SplitRegionsResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
FinishedPercentage uint64 `protobuf:"varint,2,opt,name=finished_percentage,json=finishedPercentage,proto3" json:"finished_percentage,omitempty"`
RegionsId []uint64 `protobuf:"varint,3,rep,packed,name=regions_id,json=regionsId" json:"regions_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SplitRegionsResponse) Reset() { *m = SplitRegionsResponse{} }
func (m *SplitRegionsResponse) String() string { return proto.CompactTextString(m) }
func (*SplitRegionsResponse) ProtoMessage() {}
func (*SplitRegionsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{70}
}
func (m *SplitRegionsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SplitRegionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SplitRegionsResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SplitRegionsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SplitRegionsResponse.Merge(dst, src)
}
func (m *SplitRegionsResponse) XXX_Size() int {
return m.Size()
}
func (m *SplitRegionsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SplitRegionsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SplitRegionsResponse proto.InternalMessageInfo
func (m *SplitRegionsResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *SplitRegionsResponse) GetFinishedPercentage() uint64 {
if m != nil {
return m.FinishedPercentage
}
return 0
}
func (m *SplitRegionsResponse) GetRegionsId() []uint64 {
if m != nil {
return m.RegionsId
}
return nil
}
type GetDCLocationInfoRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
DcLocation string `protobuf:"bytes,2,opt,name=dc_location,json=dcLocation,proto3" json:"dc_location,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetDCLocationInfoRequest) Reset() { *m = GetDCLocationInfoRequest{} }
func (m *GetDCLocationInfoRequest) String() string { return proto.CompactTextString(m) }
func (*GetDCLocationInfoRequest) ProtoMessage() {}
func (*GetDCLocationInfoRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{71}
}
func (m *GetDCLocationInfoRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetDCLocationInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetDCLocationInfoRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetDCLocationInfoRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetDCLocationInfoRequest.Merge(dst, src)
}
func (m *GetDCLocationInfoRequest) XXX_Size() int {
return m.Size()
}
func (m *GetDCLocationInfoRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetDCLocationInfoRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetDCLocationInfoRequest proto.InternalMessageInfo
func (m *GetDCLocationInfoRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *GetDCLocationInfoRequest) GetDcLocation() string {
if m != nil {
return m.DcLocation
}
return ""
}
type GetDCLocationInfoResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
// suffix sign
Suffix int32 `protobuf:"varint,2,opt,name=suffix,proto3" json:"suffix,omitempty"`
// max_ts will be included into this response if PD leader think the receiver needs,
// which it's set when the number of the max suffix bits changes.
MaxTs *Timestamp `protobuf:"bytes,3,opt,name=max_ts,json=maxTs" json:"max_ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetDCLocationInfoResponse) Reset() { *m = GetDCLocationInfoResponse{} }
func (m *GetDCLocationInfoResponse) String() string { return proto.CompactTextString(m) }
func (*GetDCLocationInfoResponse) ProtoMessage() {}
func (*GetDCLocationInfoResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{72}
}
func (m *GetDCLocationInfoResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetDCLocationInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetDCLocationInfoResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetDCLocationInfoResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetDCLocationInfoResponse.Merge(dst, src)
}
func (m *GetDCLocationInfoResponse) XXX_Size() int {
return m.Size()
}
func (m *GetDCLocationInfoResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetDCLocationInfoResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetDCLocationInfoResponse proto.InternalMessageInfo
func (m *GetDCLocationInfoResponse) GetHeader() *ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *GetDCLocationInfoResponse) GetSuffix() int32 {
if m != nil {
return m.Suffix
}
return 0
}
func (m *GetDCLocationInfoResponse) GetMaxTs() *Timestamp {
if m != nil {
return m.MaxTs
}
return nil
}
type QueryStats struct {
GC uint64 `protobuf:"varint,1,opt,name=GC,proto3" json:"GC,omitempty"`
Get uint64 `protobuf:"varint,2,opt,name=Get,proto3" json:"Get,omitempty"`
Scan uint64 `protobuf:"varint,3,opt,name=Scan,proto3" json:"Scan,omitempty"`
Coprocessor uint64 `protobuf:"varint,4,opt,name=Coprocessor,proto3" json:"Coprocessor,omitempty"`
Delete uint64 `protobuf:"varint,5,opt,name=Delete,proto3" json:"Delete,omitempty"`
DeleteRange uint64 `protobuf:"varint,6,opt,name=DeleteRange,proto3" json:"DeleteRange,omitempty"`
Put uint64 `protobuf:"varint,7,opt,name=Put,proto3" json:"Put,omitempty"`
Prewrite uint64 `protobuf:"varint,8,opt,name=Prewrite,proto3" json:"Prewrite,omitempty"`
AcquirePessimisticLock uint64 `protobuf:"varint,9,opt,name=AcquirePessimisticLock,proto3" json:"AcquirePessimisticLock,omitempty"`
Commit uint64 `protobuf:"varint,10,opt,name=Commit,proto3" json:"Commit,omitempty"`
Rollback uint64 `protobuf:"varint,11,opt,name=Rollback,proto3" json:"Rollback,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *QueryStats) Reset() { *m = QueryStats{} }
func (m *QueryStats) String() string { return proto.CompactTextString(m) }
func (*QueryStats) ProtoMessage() {}
func (*QueryStats) Descriptor() ([]byte, []int) {
return fileDescriptor_pdpb_4500a4488ac1eb3b, []int{73}
}
func (m *QueryStats) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *QueryStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_QueryStats.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *QueryStats) XXX_Merge(src proto.Message) {
xxx_messageInfo_QueryStats.Merge(dst, src)
}
func (m *QueryStats) XXX_Size() int {
return m.Size()
}
func (m *QueryStats) XXX_DiscardUnknown() {
xxx_messageInfo_QueryStats.DiscardUnknown(m)
}
var xxx_messageInfo_QueryStats proto.InternalMessageInfo
func (m *QueryStats) GetGC() uint64 {
if m != nil {
return m.GC
}
return 0
}
func (m *QueryStats) GetGet() uint64 {
if m != nil {
return m.Get
}
return 0
}
func (m *QueryStats) GetScan() uint64 {
if m != nil {
return m.Scan
}
return 0
}
func (m *QueryStats) GetCoprocessor() uint64 {
if m != nil {
return m.Coprocessor
}
return 0
}
func (m *QueryStats) GetDelete() uint64 {
if m != nil {
return m.Delete
}
return 0
}
func (m *QueryStats) GetDeleteRange() uint64 {
if m != nil {
return m.DeleteRange
}
return 0
}
func (m *QueryStats) GetPut() uint64 {
if m != nil {
return m.Put
}
return 0
}
func (m *QueryStats) GetPrewrite() uint64 {
if m != nil {
return m.Prewrite
}
return 0
}
func (m *QueryStats) GetAcquirePessimisticLock() uint64 {
if m != nil {
return m.AcquirePessimisticLock
}
return 0
}
func (m *QueryStats) GetCommit() uint64 {
if m != nil {
return m.Commit
}
return 0
}
func (m *QueryStats) GetRollback() uint64 {
if m != nil {
return m.Rollback
}
return 0
}
func init() {
proto.RegisterType((*RequestHeader)(nil), "pdpb.RequestHeader")
proto.RegisterType((*ResponseHeader)(nil), "pdpb.ResponseHeader")
proto.RegisterType((*Error)(nil), "pdpb.Error")
proto.RegisterType((*TsoRequest)(nil), "pdpb.TsoRequest")
proto.RegisterType((*Timestamp)(nil), "pdpb.Timestamp")
proto.RegisterType((*TsoResponse)(nil), "pdpb.TsoResponse")
proto.RegisterType((*BootstrapRequest)(nil), "pdpb.BootstrapRequest")
proto.RegisterType((*BootstrapResponse)(nil), "pdpb.BootstrapResponse")
proto.RegisterType((*IsBootstrappedRequest)(nil), "pdpb.IsBootstrappedRequest")
proto.RegisterType((*IsBootstrappedResponse)(nil), "pdpb.IsBootstrappedResponse")
proto.RegisterType((*AllocIDRequest)(nil), "pdpb.AllocIDRequest")
proto.RegisterType((*AllocIDResponse)(nil), "pdpb.AllocIDResponse")
proto.RegisterType((*GetStoreRequest)(nil), "pdpb.GetStoreRequest")
proto.RegisterType((*GetStoreResponse)(nil), "pdpb.GetStoreResponse")
proto.RegisterType((*PutStoreRequest)(nil), "pdpb.PutStoreRequest")
proto.RegisterType((*PutStoreResponse)(nil), "pdpb.PutStoreResponse")
proto.RegisterType((*GetAllStoresRequest)(nil), "pdpb.GetAllStoresRequest")
proto.RegisterType((*GetAllStoresResponse)(nil), "pdpb.GetAllStoresResponse")
proto.RegisterType((*GetRegionRequest)(nil), "pdpb.GetRegionRequest")
proto.RegisterType((*GetRegionResponse)(nil), "pdpb.GetRegionResponse")
proto.RegisterType((*GetRegionByIDRequest)(nil), "pdpb.GetRegionByIDRequest")
proto.RegisterType((*ScanRegionsRequest)(nil), "pdpb.ScanRegionsRequest")
proto.RegisterType((*Region)(nil), "pdpb.Region")
proto.RegisterType((*ScanRegionsResponse)(nil), "pdpb.ScanRegionsResponse")
proto.RegisterType((*GetClusterConfigRequest)(nil), "pdpb.GetClusterConfigRequest")
proto.RegisterType((*GetClusterConfigResponse)(nil), "pdpb.GetClusterConfigResponse")
proto.RegisterType((*PutClusterConfigRequest)(nil), "pdpb.PutClusterConfigRequest")
proto.RegisterType((*PutClusterConfigResponse)(nil), "pdpb.PutClusterConfigResponse")
proto.RegisterType((*Member)(nil), "pdpb.Member")
proto.RegisterType((*GetMembersRequest)(nil), "pdpb.GetMembersRequest")
proto.RegisterType((*GetMembersResponse)(nil), "pdpb.GetMembersResponse")
proto.RegisterMapType((map[string]*Member)(nil), "pdpb.GetMembersResponse.TsoAllocatorLeadersEntry")
proto.RegisterType((*PeerStats)(nil), "pdpb.PeerStats")
proto.RegisterType((*RegionHeartbeatRequest)(nil), "pdpb.RegionHeartbeatRequest")
proto.RegisterType((*ChangePeer)(nil), "pdpb.ChangePeer")
proto.RegisterType((*ChangePeerV2)(nil), "pdpb.ChangePeerV2")
proto.RegisterType((*TransferLeader)(nil), "pdpb.TransferLeader")
proto.RegisterType((*Merge)(nil), "pdpb.Merge")
proto.RegisterType((*SplitRegion)(nil), "pdpb.SplitRegion")
proto.RegisterType((*RegionHeartbeatResponse)(nil), "pdpb.RegionHeartbeatResponse")
proto.RegisterType((*AskSplitRequest)(nil), "pdpb.AskSplitRequest")
proto.RegisterType((*AskSplitResponse)(nil), "pdpb.AskSplitResponse")
proto.RegisterType((*ReportSplitRequest)(nil), "pdpb.ReportSplitRequest")
proto.RegisterType((*ReportSplitResponse)(nil), "pdpb.ReportSplitResponse")
proto.RegisterType((*AskBatchSplitRequest)(nil), "pdpb.AskBatchSplitRequest")
proto.RegisterType((*SplitID)(nil), "pdpb.SplitID")
proto.RegisterType((*AskBatchSplitResponse)(nil), "pdpb.AskBatchSplitResponse")
proto.RegisterType((*ReportBatchSplitRequest)(nil), "pdpb.ReportBatchSplitRequest")
proto.RegisterType((*ReportBatchSplitResponse)(nil), "pdpb.ReportBatchSplitResponse")
proto.RegisterType((*TimeInterval)(nil), "pdpb.TimeInterval")
proto.RegisterType((*RecordPair)(nil), "pdpb.RecordPair")
proto.RegisterType((*PeerStat)(nil), "pdpb.PeerStat")
proto.RegisterType((*StoreStats)(nil), "pdpb.StoreStats")
proto.RegisterType((*StoreHeartbeatRequest)(nil), "pdpb.StoreHeartbeatRequest")
proto.RegisterType((*StoreHeartbeatResponse)(nil), "pdpb.StoreHeartbeatResponse")
proto.RegisterType((*ScatterRegionRequest)(nil), "pdpb.ScatterRegionRequest")
proto.RegisterType((*ScatterRegionResponse)(nil), "pdpb.ScatterRegionResponse")
proto.RegisterType((*GetGCSafePointRequest)(nil), "pdpb.GetGCSafePointRequest")
proto.RegisterType((*GetGCSafePointResponse)(nil), "pdpb.GetGCSafePointResponse")
proto.RegisterType((*UpdateGCSafePointRequest)(nil), "pdpb.UpdateGCSafePointRequest")
proto.RegisterType((*UpdateGCSafePointResponse)(nil), "pdpb.UpdateGCSafePointResponse")
proto.RegisterType((*UpdateServiceGCSafePointRequest)(nil), "pdpb.UpdateServiceGCSafePointRequest")
proto.RegisterType((*UpdateServiceGCSafePointResponse)(nil), "pdpb.UpdateServiceGCSafePointResponse")
proto.RegisterType((*RegionStat)(nil), "pdpb.RegionStat")
proto.RegisterType((*SyncRegionRequest)(nil), "pdpb.SyncRegionRequest")
proto.RegisterType((*SyncRegionResponse)(nil), "pdpb.SyncRegionResponse")
proto.RegisterType((*GetOperatorRequest)(nil), "pdpb.GetOperatorRequest")
proto.RegisterType((*GetOperatorResponse)(nil), "pdpb.GetOperatorResponse")
proto.RegisterType((*SyncMaxTSRequest)(nil), "pdpb.SyncMaxTSRequest")
proto.RegisterType((*SyncMaxTSResponse)(nil), "pdpb.SyncMaxTSResponse")
proto.RegisterType((*SplitRegionsRequest)(nil), "pdpb.SplitRegionsRequest")
proto.RegisterType((*SplitRegionsResponse)(nil), "pdpb.SplitRegionsResponse")
proto.RegisterType((*GetDCLocationInfoRequest)(nil), "pdpb.GetDCLocationInfoRequest")
proto.RegisterType((*GetDCLocationInfoResponse)(nil), "pdpb.GetDCLocationInfoResponse")
proto.RegisterType((*QueryStats)(nil), "pdpb.QueryStats")
proto.RegisterEnum("pdpb.ErrorType", ErrorType_name, ErrorType_value)
proto.RegisterEnum("pdpb.CheckPolicy", CheckPolicy_name, CheckPolicy_value)
proto.RegisterEnum("pdpb.OperatorStatus", OperatorStatus_name, OperatorStatus_value)
proto.RegisterEnum("pdpb.QueryKind", QueryKind_name, QueryKind_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for PD service
type PDClient interface {
// GetMembers get the member list of this cluster. It does not require
// the cluster_id in request matchs the id of this cluster.
GetMembers(ctx context.Context, in *GetMembersRequest, opts ...grpc.CallOption) (*GetMembersResponse, error)
Tso(ctx context.Context, opts ...grpc.CallOption) (PD_TsoClient, error)
Bootstrap(ctx context.Context, in *BootstrapRequest, opts ...grpc.CallOption) (*BootstrapResponse, error)
IsBootstrapped(ctx context.Context, in *IsBootstrappedRequest, opts ...grpc.CallOption) (*IsBootstrappedResponse, error)
AllocID(ctx context.Context, in *AllocIDRequest, opts ...grpc.CallOption) (*AllocIDResponse, error)
GetStore(ctx context.Context, in *GetStoreRequest, opts ...grpc.CallOption) (*GetStoreResponse, error)
PutStore(ctx context.Context, in *PutStoreRequest, opts ...grpc.CallOption) (*PutStoreResponse, error)
GetAllStores(ctx context.Context, in *GetAllStoresRequest, opts ...grpc.CallOption) (*GetAllStoresResponse, error)
StoreHeartbeat(ctx context.Context, in *StoreHeartbeatRequest, opts ...grpc.CallOption) (*StoreHeartbeatResponse, error)
RegionHeartbeat(ctx context.Context, opts ...grpc.CallOption) (PD_RegionHeartbeatClient, error)
GetRegion(ctx context.Context, in *GetRegionRequest, opts ...grpc.CallOption) (*GetRegionResponse, error)
GetPrevRegion(ctx context.Context, in *GetRegionRequest, opts ...grpc.CallOption) (*GetRegionResponse, error)
GetRegionByID(ctx context.Context, in *GetRegionByIDRequest, opts ...grpc.CallOption) (*GetRegionResponse, error)
ScanRegions(ctx context.Context, in *ScanRegionsRequest, opts ...grpc.CallOption) (*ScanRegionsResponse, error)
AskSplit(ctx context.Context, in *AskSplitRequest, opts ...grpc.CallOption) (*AskSplitResponse, error)
ReportSplit(ctx context.Context, in *ReportSplitRequest, opts ...grpc.CallOption) (*ReportSplitResponse, error)
AskBatchSplit(ctx context.Context, in *AskBatchSplitRequest, opts ...grpc.CallOption) (*AskBatchSplitResponse, error)
ReportBatchSplit(ctx context.Context, in *ReportBatchSplitRequest, opts ...grpc.CallOption) (*ReportBatchSplitResponse, error)
GetClusterConfig(ctx context.Context, in *GetClusterConfigRequest, opts ...grpc.CallOption) (*GetClusterConfigResponse, error)
PutClusterConfig(ctx context.Context, in *PutClusterConfigRequest, opts ...grpc.CallOption) (*PutClusterConfigResponse, error)
ScatterRegion(ctx context.Context, in *ScatterRegionRequest, opts ...grpc.CallOption) (*ScatterRegionResponse, error)
GetGCSafePoint(ctx context.Context, in *GetGCSafePointRequest, opts ...grpc.CallOption) (*GetGCSafePointResponse, error)
UpdateGCSafePoint(ctx context.Context, in *UpdateGCSafePointRequest, opts ...grpc.CallOption) (*UpdateGCSafePointResponse, error)
UpdateServiceGCSafePoint(ctx context.Context, in *UpdateServiceGCSafePointRequest, opts ...grpc.CallOption) (*UpdateServiceGCSafePointResponse, error)
SyncRegions(ctx context.Context, opts ...grpc.CallOption) (PD_SyncRegionsClient, error)
GetOperator(ctx context.Context, in *GetOperatorRequest, opts ...grpc.CallOption) (*GetOperatorResponse, error)
SyncMaxTS(ctx context.Context, in *SyncMaxTSRequest, opts ...grpc.CallOption) (*SyncMaxTSResponse, error)
SplitRegions(ctx context.Context, in *SplitRegionsRequest, opts ...grpc.CallOption) (*SplitRegionsResponse, error)
GetDCLocationInfo(ctx context.Context, in *GetDCLocationInfoRequest, opts ...grpc.CallOption) (*GetDCLocationInfoResponse, error)
}
type pDClient struct {
cc *grpc.ClientConn
}
func NewPDClient(cc *grpc.ClientConn) PDClient {
return &pDClient{cc}
}
func (c *pDClient) GetMembers(ctx context.Context, in *GetMembersRequest, opts ...grpc.CallOption) (*GetMembersResponse, error) {
out := new(GetMembersResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/GetMembers", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) Tso(ctx context.Context, opts ...grpc.CallOption) (PD_TsoClient, error) {
stream, err := c.cc.NewStream(ctx, &_PD_serviceDesc.Streams[0], "/pdpb.PD/Tso", opts...)
if err != nil {
return nil, err
}
x := &pDTsoClient{stream}
return x, nil
}
type PD_TsoClient interface {
Send(*TsoRequest) error
Recv() (*TsoResponse, error)
grpc.ClientStream
}
type pDTsoClient struct {
grpc.ClientStream
}
func (x *pDTsoClient) Send(m *TsoRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *pDTsoClient) Recv() (*TsoResponse, error) {
m := new(TsoResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *pDClient) Bootstrap(ctx context.Context, in *BootstrapRequest, opts ...grpc.CallOption) (*BootstrapResponse, error) {
out := new(BootstrapResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/Bootstrap", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) IsBootstrapped(ctx context.Context, in *IsBootstrappedRequest, opts ...grpc.CallOption) (*IsBootstrappedResponse, error) {
out := new(IsBootstrappedResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/IsBootstrapped", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) AllocID(ctx context.Context, in *AllocIDRequest, opts ...grpc.CallOption) (*AllocIDResponse, error) {
out := new(AllocIDResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/AllocID", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) GetStore(ctx context.Context, in *GetStoreRequest, opts ...grpc.CallOption) (*GetStoreResponse, error) {
out := new(GetStoreResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/GetStore", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) PutStore(ctx context.Context, in *PutStoreRequest, opts ...grpc.CallOption) (*PutStoreResponse, error) {
out := new(PutStoreResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/PutStore", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) GetAllStores(ctx context.Context, in *GetAllStoresRequest, opts ...grpc.CallOption) (*GetAllStoresResponse, error) {
out := new(GetAllStoresResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/GetAllStores", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) StoreHeartbeat(ctx context.Context, in *StoreHeartbeatRequest, opts ...grpc.CallOption) (*StoreHeartbeatResponse, error) {
out := new(StoreHeartbeatResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/StoreHeartbeat", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) RegionHeartbeat(ctx context.Context, opts ...grpc.CallOption) (PD_RegionHeartbeatClient, error) {
stream, err := c.cc.NewStream(ctx, &_PD_serviceDesc.Streams[1], "/pdpb.PD/RegionHeartbeat", opts...)
if err != nil {
return nil, err
}
x := &pDRegionHeartbeatClient{stream}
return x, nil
}
type PD_RegionHeartbeatClient interface {
Send(*RegionHeartbeatRequest) error
Recv() (*RegionHeartbeatResponse, error)
grpc.ClientStream
}
type pDRegionHeartbeatClient struct {
grpc.ClientStream
}
func (x *pDRegionHeartbeatClient) Send(m *RegionHeartbeatRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *pDRegionHeartbeatClient) Recv() (*RegionHeartbeatResponse, error) {
m := new(RegionHeartbeatResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *pDClient) GetRegion(ctx context.Context, in *GetRegionRequest, opts ...grpc.CallOption) (*GetRegionResponse, error) {
out := new(GetRegionResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/GetRegion", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) GetPrevRegion(ctx context.Context, in *GetRegionRequest, opts ...grpc.CallOption) (*GetRegionResponse, error) {
out := new(GetRegionResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/GetPrevRegion", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) GetRegionByID(ctx context.Context, in *GetRegionByIDRequest, opts ...grpc.CallOption) (*GetRegionResponse, error) {
out := new(GetRegionResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/GetRegionByID", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) ScanRegions(ctx context.Context, in *ScanRegionsRequest, opts ...grpc.CallOption) (*ScanRegionsResponse, error) {
out := new(ScanRegionsResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/ScanRegions", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Deprecated: Do not use.
func (c *pDClient) AskSplit(ctx context.Context, in *AskSplitRequest, opts ...grpc.CallOption) (*AskSplitResponse, error) {
out := new(AskSplitResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/AskSplit", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Deprecated: Do not use.
func (c *pDClient) ReportSplit(ctx context.Context, in *ReportSplitRequest, opts ...grpc.CallOption) (*ReportSplitResponse, error) {
out := new(ReportSplitResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/ReportSplit", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) AskBatchSplit(ctx context.Context, in *AskBatchSplitRequest, opts ...grpc.CallOption) (*AskBatchSplitResponse, error) {
out := new(AskBatchSplitResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/AskBatchSplit", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) ReportBatchSplit(ctx context.Context, in *ReportBatchSplitRequest, opts ...grpc.CallOption) (*ReportBatchSplitResponse, error) {
out := new(ReportBatchSplitResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/ReportBatchSplit", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) GetClusterConfig(ctx context.Context, in *GetClusterConfigRequest, opts ...grpc.CallOption) (*GetClusterConfigResponse, error) {
out := new(GetClusterConfigResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/GetClusterConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) PutClusterConfig(ctx context.Context, in *PutClusterConfigRequest, opts ...grpc.CallOption) (*PutClusterConfigResponse, error) {
out := new(PutClusterConfigResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/PutClusterConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) ScatterRegion(ctx context.Context, in *ScatterRegionRequest, opts ...grpc.CallOption) (*ScatterRegionResponse, error) {
out := new(ScatterRegionResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/ScatterRegion", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) GetGCSafePoint(ctx context.Context, in *GetGCSafePointRequest, opts ...grpc.CallOption) (*GetGCSafePointResponse, error) {
out := new(GetGCSafePointResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/GetGCSafePoint", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) UpdateGCSafePoint(ctx context.Context, in *UpdateGCSafePointRequest, opts ...grpc.CallOption) (*UpdateGCSafePointResponse, error) {
out := new(UpdateGCSafePointResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/UpdateGCSafePoint", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) UpdateServiceGCSafePoint(ctx context.Context, in *UpdateServiceGCSafePointRequest, opts ...grpc.CallOption) (*UpdateServiceGCSafePointResponse, error) {
out := new(UpdateServiceGCSafePointResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/UpdateServiceGCSafePoint", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) SyncRegions(ctx context.Context, opts ...grpc.CallOption) (PD_SyncRegionsClient, error) {
stream, err := c.cc.NewStream(ctx, &_PD_serviceDesc.Streams[2], "/pdpb.PD/SyncRegions", opts...)
if err != nil {
return nil, err
}
x := &pDSyncRegionsClient{stream}
return x, nil
}
type PD_SyncRegionsClient interface {
Send(*SyncRegionRequest) error
Recv() (*SyncRegionResponse, error)
grpc.ClientStream
}
type pDSyncRegionsClient struct {
grpc.ClientStream
}
func (x *pDSyncRegionsClient) Send(m *SyncRegionRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *pDSyncRegionsClient) Recv() (*SyncRegionResponse, error) {
m := new(SyncRegionResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *pDClient) GetOperator(ctx context.Context, in *GetOperatorRequest, opts ...grpc.CallOption) (*GetOperatorResponse, error) {
out := new(GetOperatorResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/GetOperator", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) SyncMaxTS(ctx context.Context, in *SyncMaxTSRequest, opts ...grpc.CallOption) (*SyncMaxTSResponse, error) {
out := new(SyncMaxTSResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/SyncMaxTS", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) SplitRegions(ctx context.Context, in *SplitRegionsRequest, opts ...grpc.CallOption) (*SplitRegionsResponse, error) {
out := new(SplitRegionsResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/SplitRegions", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pDClient) GetDCLocationInfo(ctx context.Context, in *GetDCLocationInfoRequest, opts ...grpc.CallOption) (*GetDCLocationInfoResponse, error) {
out := new(GetDCLocationInfoResponse)
err := c.cc.Invoke(ctx, "/pdpb.PD/GetDCLocationInfo", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for PD service
type PDServer interface {
// GetMembers get the member list of this cluster. It does not require
// the cluster_id in request matchs the id of this cluster.
GetMembers(context.Context, *GetMembersRequest) (*GetMembersResponse, error)
Tso(PD_TsoServer) error
Bootstrap(context.Context, *BootstrapRequest) (*BootstrapResponse, error)
IsBootstrapped(context.Context, *IsBootstrappedRequest) (*IsBootstrappedResponse, error)
AllocID(context.Context, *AllocIDRequest) (*AllocIDResponse, error)
GetStore(context.Context, *GetStoreRequest) (*GetStoreResponse, error)
PutStore(context.Context, *PutStoreRequest) (*PutStoreResponse, error)
GetAllStores(context.Context, *GetAllStoresRequest) (*GetAllStoresResponse, error)
StoreHeartbeat(context.Context, *StoreHeartbeatRequest) (*StoreHeartbeatResponse, error)
RegionHeartbeat(PD_RegionHeartbeatServer) error
GetRegion(context.Context, *GetRegionRequest) (*GetRegionResponse, error)
GetPrevRegion(context.Context, *GetRegionRequest) (*GetRegionResponse, error)
GetRegionByID(context.Context, *GetRegionByIDRequest) (*GetRegionResponse, error)
ScanRegions(context.Context, *ScanRegionsRequest) (*ScanRegionsResponse, error)
AskSplit(context.Context, *AskSplitRequest) (*AskSplitResponse, error)
ReportSplit(context.Context, *ReportSplitRequest) (*ReportSplitResponse, error)
AskBatchSplit(context.Context, *AskBatchSplitRequest) (*AskBatchSplitResponse, error)
ReportBatchSplit(context.Context, *ReportBatchSplitRequest) (*ReportBatchSplitResponse, error)
GetClusterConfig(context.Context, *GetClusterConfigRequest) (*GetClusterConfigResponse, error)
PutClusterConfig(context.Context, *PutClusterConfigRequest) (*PutClusterConfigResponse, error)
ScatterRegion(context.Context, *ScatterRegionRequest) (*ScatterRegionResponse, error)
GetGCSafePoint(context.Context, *GetGCSafePointRequest) (*GetGCSafePointResponse, error)
UpdateGCSafePoint(context.Context, *UpdateGCSafePointRequest) (*UpdateGCSafePointResponse, error)
UpdateServiceGCSafePoint(context.Context, *UpdateServiceGCSafePointRequest) (*UpdateServiceGCSafePointResponse, error)
SyncRegions(PD_SyncRegionsServer) error
GetOperator(context.Context, *GetOperatorRequest) (*GetOperatorResponse, error)
SyncMaxTS(context.Context, *SyncMaxTSRequest) (*SyncMaxTSResponse, error)
SplitRegions(context.Context, *SplitRegionsRequest) (*SplitRegionsResponse, error)
GetDCLocationInfo(context.Context, *GetDCLocationInfoRequest) (*GetDCLocationInfoResponse, error)
}
func RegisterPDServer(s *grpc.Server, srv PDServer) {
s.RegisterService(&_PD_serviceDesc, srv)
}
func _PD_GetMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetMembersRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).GetMembers(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/GetMembers",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).GetMembers(ctx, req.(*GetMembersRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_Tso_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(PDServer).Tso(&pDTsoServer{stream})
}
type PD_TsoServer interface {
Send(*TsoResponse) error
Recv() (*TsoRequest, error)
grpc.ServerStream
}
type pDTsoServer struct {
grpc.ServerStream
}
func (x *pDTsoServer) Send(m *TsoResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *pDTsoServer) Recv() (*TsoRequest, error) {
m := new(TsoRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _PD_Bootstrap_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(BootstrapRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).Bootstrap(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/Bootstrap",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).Bootstrap(ctx, req.(*BootstrapRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_IsBootstrapped_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(IsBootstrappedRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).IsBootstrapped(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/IsBootstrapped",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).IsBootstrapped(ctx, req.(*IsBootstrappedRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_AllocID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AllocIDRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).AllocID(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/AllocID",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).AllocID(ctx, req.(*AllocIDRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_GetStore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetStoreRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).GetStore(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/GetStore",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).GetStore(ctx, req.(*GetStoreRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_PutStore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PutStoreRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).PutStore(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/PutStore",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).PutStore(ctx, req.(*PutStoreRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_GetAllStores_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetAllStoresRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).GetAllStores(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/GetAllStores",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).GetAllStores(ctx, req.(*GetAllStoresRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_StoreHeartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StoreHeartbeatRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).StoreHeartbeat(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/StoreHeartbeat",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).StoreHeartbeat(ctx, req.(*StoreHeartbeatRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_RegionHeartbeat_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(PDServer).RegionHeartbeat(&pDRegionHeartbeatServer{stream})
}
type PD_RegionHeartbeatServer interface {
Send(*RegionHeartbeatResponse) error
Recv() (*RegionHeartbeatRequest, error)
grpc.ServerStream
}
type pDRegionHeartbeatServer struct {
grpc.ServerStream
}
func (x *pDRegionHeartbeatServer) Send(m *RegionHeartbeatResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *pDRegionHeartbeatServer) Recv() (*RegionHeartbeatRequest, error) {
m := new(RegionHeartbeatRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _PD_GetRegion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetRegionRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).GetRegion(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/GetRegion",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).GetRegion(ctx, req.(*GetRegionRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_GetPrevRegion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetRegionRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).GetPrevRegion(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/GetPrevRegion",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).GetPrevRegion(ctx, req.(*GetRegionRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_GetRegionByID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetRegionByIDRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).GetRegionByID(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/GetRegionByID",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).GetRegionByID(ctx, req.(*GetRegionByIDRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_ScanRegions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ScanRegionsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).ScanRegions(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/ScanRegions",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).ScanRegions(ctx, req.(*ScanRegionsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_AskSplit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AskSplitRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).AskSplit(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/AskSplit",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).AskSplit(ctx, req.(*AskSplitRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_ReportSplit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportSplitRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).ReportSplit(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/ReportSplit",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).ReportSplit(ctx, req.(*ReportSplitRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_AskBatchSplit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AskBatchSplitRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).AskBatchSplit(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/AskBatchSplit",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).AskBatchSplit(ctx, req.(*AskBatchSplitRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_ReportBatchSplit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportBatchSplitRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).ReportBatchSplit(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/ReportBatchSplit",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).ReportBatchSplit(ctx, req.(*ReportBatchSplitRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_GetClusterConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetClusterConfigRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).GetClusterConfig(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/GetClusterConfig",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).GetClusterConfig(ctx, req.(*GetClusterConfigRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_PutClusterConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PutClusterConfigRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).PutClusterConfig(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/PutClusterConfig",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).PutClusterConfig(ctx, req.(*PutClusterConfigRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_ScatterRegion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ScatterRegionRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).ScatterRegion(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/ScatterRegion",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).ScatterRegion(ctx, req.(*ScatterRegionRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_GetGCSafePoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetGCSafePointRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).GetGCSafePoint(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/GetGCSafePoint",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).GetGCSafePoint(ctx, req.(*GetGCSafePointRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_UpdateGCSafePoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateGCSafePointRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).UpdateGCSafePoint(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/UpdateGCSafePoint",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).UpdateGCSafePoint(ctx, req.(*UpdateGCSafePointRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_UpdateServiceGCSafePoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateServiceGCSafePointRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).UpdateServiceGCSafePoint(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/UpdateServiceGCSafePoint",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).UpdateServiceGCSafePoint(ctx, req.(*UpdateServiceGCSafePointRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_SyncRegions_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(PDServer).SyncRegions(&pDSyncRegionsServer{stream})
}
type PD_SyncRegionsServer interface {
Send(*SyncRegionResponse) error
Recv() (*SyncRegionRequest, error)
grpc.ServerStream
}
type pDSyncRegionsServer struct {
grpc.ServerStream
}
func (x *pDSyncRegionsServer) Send(m *SyncRegionResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *pDSyncRegionsServer) Recv() (*SyncRegionRequest, error) {
m := new(SyncRegionRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _PD_GetOperator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetOperatorRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).GetOperator(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/GetOperator",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).GetOperator(ctx, req.(*GetOperatorRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_SyncMaxTS_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SyncMaxTSRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).SyncMaxTS(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/SyncMaxTS",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).SyncMaxTS(ctx, req.(*SyncMaxTSRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_SplitRegions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SplitRegionsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).SplitRegions(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/SplitRegions",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).SplitRegions(ctx, req.(*SplitRegionsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PD_GetDCLocationInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetDCLocationInfoRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PDServer).GetDCLocationInfo(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pdpb.PD/GetDCLocationInfo",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PDServer).GetDCLocationInfo(ctx, req.(*GetDCLocationInfoRequest))
}
return interceptor(ctx, in, info, handler)
}
var _PD_serviceDesc = grpc.ServiceDesc{
ServiceName: "pdpb.PD",
HandlerType: (*PDServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetMembers",
Handler: _PD_GetMembers_Handler,
},
{
MethodName: "Bootstrap",
Handler: _PD_Bootstrap_Handler,
},
{
MethodName: "IsBootstrapped",
Handler: _PD_IsBootstrapped_Handler,
},
{
MethodName: "AllocID",
Handler: _PD_AllocID_Handler,
},
{
MethodName: "GetStore",
Handler: _PD_GetStore_Handler,
},
{
MethodName: "PutStore",
Handler: _PD_PutStore_Handler,
},
{
MethodName: "GetAllStores",
Handler: _PD_GetAllStores_Handler,
},
{
MethodName: "StoreHeartbeat",
Handler: _PD_StoreHeartbeat_Handler,
},
{
MethodName: "GetRegion",
Handler: _PD_GetRegion_Handler,
},
{
MethodName: "GetPrevRegion",
Handler: _PD_GetPrevRegion_Handler,
},
{
MethodName: "GetRegionByID",
Handler: _PD_GetRegionByID_Handler,
},
{
MethodName: "ScanRegions",
Handler: _PD_ScanRegions_Handler,
},
{
MethodName: "AskSplit",
Handler: _PD_AskSplit_Handler,
},
{
MethodName: "ReportSplit",
Handler: _PD_ReportSplit_Handler,
},
{
MethodName: "AskBatchSplit",
Handler: _PD_AskBatchSplit_Handler,
},
{
MethodName: "ReportBatchSplit",
Handler: _PD_ReportBatchSplit_Handler,
},
{
MethodName: "GetClusterConfig",
Handler: _PD_GetClusterConfig_Handler,
},
{
MethodName: "PutClusterConfig",
Handler: _PD_PutClusterConfig_Handler,
},
{
MethodName: "ScatterRegion",
Handler: _PD_ScatterRegion_Handler,
},
{
MethodName: "GetGCSafePoint",
Handler: _PD_GetGCSafePoint_Handler,
},
{
MethodName: "UpdateGCSafePoint",
Handler: _PD_UpdateGCSafePoint_Handler,
},
{
MethodName: "UpdateServiceGCSafePoint",
Handler: _PD_UpdateServiceGCSafePoint_Handler,
},
{
MethodName: "GetOperator",
Handler: _PD_GetOperator_Handler,
},
{
MethodName: "SyncMaxTS",
Handler: _PD_SyncMaxTS_Handler,
},
{
MethodName: "SplitRegions",
Handler: _PD_SplitRegions_Handler,
},
{
MethodName: "GetDCLocationInfo",
Handler: _PD_GetDCLocationInfo_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "Tso",
Handler: _PD_Tso_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "RegionHeartbeat",
Handler: _PD_RegionHeartbeat_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "SyncRegions",
Handler: _PD_SyncRegions_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "pdpb.proto",
}
func (m *RequestHeader) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.ClusterId != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ClusterId))
}
if m.SenderId != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.SenderId))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ResponseHeader) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.ClusterId != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ClusterId))
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Error.Size()))
n1, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Error) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Error) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Type != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Type))
}
if len(m.Message) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.Message)))
i += copy(dAtA[i:], m.Message)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *TsoRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TsoRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n2, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n2
}
if m.Count != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Count))
}
if len(m.DcLocation) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.DcLocation)))
i += copy(dAtA[i:], m.DcLocation)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Timestamp) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Physical != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Physical))
}
if m.Logical != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Logical))
}
if m.SuffixBits != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.SuffixBits))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *TsoResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TsoResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n3, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n3
}
if m.Count != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Count))
}
if m.Timestamp != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Timestamp.Size()))
n4, err := m.Timestamp.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n4
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *BootstrapRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BootstrapRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n5, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n5
}
if m.Store != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Store.Size()))
n6, err := m.Store.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n6
}
if m.Region != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Region.Size()))
n7, err := m.Region.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n7
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *BootstrapResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BootstrapResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n8, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n8
}
if m.ReplicationStatus != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ReplicationStatus.Size()))
n9, err := m.ReplicationStatus.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n9
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *IsBootstrappedRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *IsBootstrappedRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n10, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n10
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *IsBootstrappedResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *IsBootstrappedResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n11, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n11
}
if m.Bootstrapped {
dAtA[i] = 0x10
i++
if m.Bootstrapped {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *AllocIDRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *AllocIDRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n12, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n12
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *AllocIDResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *AllocIDResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n13, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n13
}
if m.Id != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Id))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetStoreRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetStoreRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n14, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n14
}
if m.StoreId != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.StoreId))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetStoreResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetStoreResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n15, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n15
}
if m.Store != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Store.Size()))
n16, err := m.Store.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n16
}
if m.Stats != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Stats.Size()))
n17, err := m.Stats.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n17
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PutStoreRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PutStoreRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n18, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n18
}
if m.Store != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Store.Size()))
n19, err := m.Store.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n19
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PutStoreResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PutStoreResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n20, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n20
}
if m.ReplicationStatus != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ReplicationStatus.Size()))
n21, err := m.ReplicationStatus.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n21
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetAllStoresRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetAllStoresRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n22, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n22
}
if m.ExcludeTombstoneStores {
dAtA[i] = 0x10
i++
if m.ExcludeTombstoneStores {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetAllStoresResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetAllStoresResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n23, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n23
}
if len(m.Stores) > 0 {
for _, msg := range m.Stores {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetRegionRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetRegionRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n24, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n24
}
if len(m.RegionKey) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.RegionKey)))
i += copy(dAtA[i:], m.RegionKey)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetRegionResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetRegionResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n25, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n25
}
if m.Region != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Region.Size()))
n26, err := m.Region.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n26
}
if m.Leader != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Leader.Size()))
n27, err := m.Leader.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n27
}
if len(m.DownPeers) > 0 {
for _, msg := range m.DownPeers {
dAtA[i] = 0x2a
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.PendingPeers) > 0 {
for _, msg := range m.PendingPeers {
dAtA[i] = 0x32
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetRegionByIDRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetRegionByIDRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n28, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n28
}
if m.RegionId != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.RegionId))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ScanRegionsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScanRegionsRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n29, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n29
}
if len(m.StartKey) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.StartKey)))
i += copy(dAtA[i:], m.StartKey)
}
if m.Limit != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Limit))
}
if len(m.EndKey) > 0 {
dAtA[i] = 0x22
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.EndKey)))
i += copy(dAtA[i:], m.EndKey)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Region) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Region) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Region != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Region.Size()))
n30, err := m.Region.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n30
}
if m.Leader != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Leader.Size()))
n31, err := m.Leader.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n31
}
if len(m.DownPeers) > 0 {
for _, msg := range m.DownPeers {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.PendingPeers) > 0 {
for _, msg := range m.PendingPeers {
dAtA[i] = 0x22
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ScanRegionsResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScanRegionsResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n32, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n32
}
if len(m.RegionMetas) > 0 {
for _, msg := range m.RegionMetas {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.Leaders) > 0 {
for _, msg := range m.Leaders {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.Regions) > 0 {
for _, msg := range m.Regions {
dAtA[i] = 0x22
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetClusterConfigRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetClusterConfigRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n33, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n33
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetClusterConfigResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetClusterConfigResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n34, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n34
}
if m.Cluster != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Cluster.Size()))
n35, err := m.Cluster.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n35
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PutClusterConfigRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PutClusterConfigRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n36, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n36
}
if m.Cluster != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Cluster.Size()))
n37, err := m.Cluster.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n37
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PutClusterConfigResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PutClusterConfigResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n38, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n38
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Member) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Member) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if m.MemberId != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.MemberId))
}
if len(m.PeerUrls) > 0 {
for _, s := range m.PeerUrls {
dAtA[i] = 0x1a
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
if len(m.ClientUrls) > 0 {
for _, s := range m.ClientUrls {
dAtA[i] = 0x22
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
if m.LeaderPriority != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.LeaderPriority))
}
if len(m.DeployPath) > 0 {
dAtA[i] = 0x32
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.DeployPath)))
i += copy(dAtA[i:], m.DeployPath)
}
if len(m.BinaryVersion) > 0 {
dAtA[i] = 0x3a
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.BinaryVersion)))
i += copy(dAtA[i:], m.BinaryVersion)
}
if len(m.GitHash) > 0 {
dAtA[i] = 0x42
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.GitHash)))
i += copy(dAtA[i:], m.GitHash)
}
if len(m.DcLocation) > 0 {
dAtA[i] = 0x4a
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.DcLocation)))
i += copy(dAtA[i:], m.DcLocation)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetMembersRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetMembersRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n39, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n39
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetMembersResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetMembersResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n40, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n40
}
if len(m.Members) > 0 {
for _, msg := range m.Members {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.Leader != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Leader.Size()))
n41, err := m.Leader.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n41
}
if m.EtcdLeader != nil {
dAtA[i] = 0x22
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.EtcdLeader.Size()))
n42, err := m.EtcdLeader.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n42
}
if len(m.TsoAllocatorLeaders) > 0 {
for k, _ := range m.TsoAllocatorLeaders {
dAtA[i] = 0x2a
i++
v := m.TsoAllocatorLeaders[k]
msgSize := 0
if v != nil {
msgSize = v.Size()
msgSize += 1 + sovPdpb(uint64(msgSize))
}
mapSize := 1 + len(k) + sovPdpb(uint64(len(k))) + msgSize
i = encodeVarintPdpb(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(k)))
i += copy(dAtA[i:], k)
if v != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(v.Size()))
n43, err := v.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n43
}
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PeerStats) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PeerStats) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Peer != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Peer.Size()))
n44, err := m.Peer.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n44
}
if m.DownSeconds != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.DownSeconds))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RegionHeartbeatRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RegionHeartbeatRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n45, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n45
}
if m.Region != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Region.Size()))
n46, err := m.Region.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n46
}
if m.Leader != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Leader.Size()))
n47, err := m.Leader.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n47
}
if len(m.DownPeers) > 0 {
for _, msg := range m.DownPeers {
dAtA[i] = 0x22
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.PendingPeers) > 0 {
for _, msg := range m.PendingPeers {
dAtA[i] = 0x2a
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.BytesWritten != 0 {
dAtA[i] = 0x30
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.BytesWritten))
}
if m.BytesRead != 0 {
dAtA[i] = 0x38
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.BytesRead))
}
if m.KeysWritten != 0 {
dAtA[i] = 0x40
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.KeysWritten))
}
if m.KeysRead != 0 {
dAtA[i] = 0x48
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.KeysRead))
}
if m.ApproximateSize != 0 {
dAtA[i] = 0x50
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ApproximateSize))
}
if m.Interval != nil {
dAtA[i] = 0x62
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Interval.Size()))
n48, err := m.Interval.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n48
}
if m.ApproximateKeys != 0 {
dAtA[i] = 0x68
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ApproximateKeys))
}
if m.Term != 0 {
dAtA[i] = 0x70
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Term))
}
if m.ReplicationStatus != nil {
dAtA[i] = 0x7a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ReplicationStatus.Size()))
n49, err := m.ReplicationStatus.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n49
}
if m.QueryStats != nil {
dAtA[i] = 0x82
i++
dAtA[i] = 0x1
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.QueryStats.Size()))
n50, err := m.QueryStats.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n50
}
if m.CpuUsage != 0 {
dAtA[i] = 0x88
i++
dAtA[i] = 0x1
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.CpuUsage))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ChangePeer) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ChangePeer) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Peer != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Peer.Size()))
n51, err := m.Peer.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n51
}
if m.ChangeType != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ChangeType))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ChangePeerV2) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ChangePeerV2) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Changes) > 0 {
for _, msg := range m.Changes {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *TransferLeader) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TransferLeader) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Peer != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Peer.Size()))
n52, err := m.Peer.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n52
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Merge) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Merge) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Target != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Target.Size()))
n53, err := m.Target.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n53
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *SplitRegion) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SplitRegion) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Policy != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Policy))
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RegionHeartbeatResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RegionHeartbeatResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n54, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n54
}
if m.ChangePeer != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ChangePeer.Size()))
n55, err := m.ChangePeer.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n55
}
if m.TransferLeader != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.TransferLeader.Size()))
n56, err := m.TransferLeader.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n56
}
if m.RegionId != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.RegionId))
}
if m.RegionEpoch != nil {
dAtA[i] = 0x2a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.RegionEpoch.Size()))
n57, err := m.RegionEpoch.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n57
}
if m.TargetPeer != nil {
dAtA[i] = 0x32
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.TargetPeer.Size()))
n58, err := m.TargetPeer.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n58
}
if m.Merge != nil {
dAtA[i] = 0x3a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Merge.Size()))
n59, err := m.Merge.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n59
}
if m.SplitRegion != nil {
dAtA[i] = 0x42
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.SplitRegion.Size()))
n60, err := m.SplitRegion.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n60
}
if m.ChangePeerV2 != nil {
dAtA[i] = 0x4a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ChangePeerV2.Size()))
n61, err := m.ChangePeerV2.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n61
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *AskSplitRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *AskSplitRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n62, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n62
}
if m.Region != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Region.Size()))
n63, err := m.Region.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n63
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *AskSplitResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *AskSplitResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n64, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n64
}
if m.NewRegionId != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.NewRegionId))
}
if len(m.NewPeerIds) > 0 {
dAtA66 := make([]byte, len(m.NewPeerIds)*10)
var j65 int
for _, num := range m.NewPeerIds {
for num >= 1<<7 {
dAtA66[j65] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j65++
}
dAtA66[j65] = uint8(num)
j65++
}
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(j65))
i += copy(dAtA[i:], dAtA66[:j65])
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ReportSplitRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ReportSplitRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n67, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n67
}
if m.Left != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Left.Size()))
n68, err := m.Left.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n68
}
if m.Right != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Right.Size()))
n69, err := m.Right.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n69
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ReportSplitResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ReportSplitResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n70, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n70
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *AskBatchSplitRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *AskBatchSplitRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n71, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n71
}
if m.Region != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Region.Size()))
n72, err := m.Region.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n72
}
if m.SplitCount != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.SplitCount))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *SplitID) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SplitID) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.NewRegionId != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.NewRegionId))
}
if len(m.NewPeerIds) > 0 {
dAtA74 := make([]byte, len(m.NewPeerIds)*10)
var j73 int
for _, num := range m.NewPeerIds {
for num >= 1<<7 {
dAtA74[j73] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j73++
}
dAtA74[j73] = uint8(num)
j73++
}
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(j73))
i += copy(dAtA[i:], dAtA74[:j73])
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *AskBatchSplitResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *AskBatchSplitResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n75, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n75
}
if len(m.Ids) > 0 {
for _, msg := range m.Ids {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ReportBatchSplitRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ReportBatchSplitRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n76, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n76
}
if len(m.Regions) > 0 {
for _, msg := range m.Regions {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ReportBatchSplitResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ReportBatchSplitResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n77, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n77
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *TimeInterval) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TimeInterval) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.StartTimestamp != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.StartTimestamp))
}
if m.EndTimestamp != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.EndTimestamp))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RecordPair) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RecordPair) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Key) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if m.Value != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Value))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PeerStat) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PeerStat) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionId != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.RegionId))
}
if m.ReadKeys != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ReadKeys))
}
if m.ReadBytes != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ReadBytes))
}
if m.QueryStats != nil {
dAtA[i] = 0x22
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.QueryStats.Size()))
n78, err := m.QueryStats.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n78
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *StoreStats) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *StoreStats) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.StoreId != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.StoreId))
}
if m.Capacity != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Capacity))
}
if m.Available != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Available))
}
if m.RegionCount != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.RegionCount))
}
if m.SendingSnapCount != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.SendingSnapCount))
}
if m.ReceivingSnapCount != 0 {
dAtA[i] = 0x30
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ReceivingSnapCount))
}
if m.StartTime != 0 {
dAtA[i] = 0x38
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.StartTime))
}
if m.ApplyingSnapCount != 0 {
dAtA[i] = 0x40
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ApplyingSnapCount))
}
if m.IsBusy {
dAtA[i] = 0x48
i++
if m.IsBusy {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.UsedSize != 0 {
dAtA[i] = 0x50
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.UsedSize))
}
if m.BytesWritten != 0 {
dAtA[i] = 0x58
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.BytesWritten))
}
if m.KeysWritten != 0 {
dAtA[i] = 0x60
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.KeysWritten))
}
if m.BytesRead != 0 {
dAtA[i] = 0x68
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.BytesRead))
}
if m.KeysRead != 0 {
dAtA[i] = 0x70
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.KeysRead))
}
if m.Interval != nil {
dAtA[i] = 0x7a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Interval.Size()))
n79, err := m.Interval.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n79
}
if len(m.CpuUsages) > 0 {
for _, msg := range m.CpuUsages {
dAtA[i] = 0x82
i++
dAtA[i] = 0x1
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.ReadIoRates) > 0 {
for _, msg := range m.ReadIoRates {
dAtA[i] = 0x8a
i++
dAtA[i] = 0x1
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.WriteIoRates) > 0 {
for _, msg := range m.WriteIoRates {
dAtA[i] = 0x92
i++
dAtA[i] = 0x1
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.OpLatencies) > 0 {
for _, msg := range m.OpLatencies {
dAtA[i] = 0x9a
i++
dAtA[i] = 0x1
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.PeerStats) > 0 {
for _, msg := range m.PeerStats {
dAtA[i] = 0xa2
i++
dAtA[i] = 0x1
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.QueryStats != nil {
dAtA[i] = 0xaa
i++
dAtA[i] = 0x1
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.QueryStats.Size()))
n80, err := m.QueryStats.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n80
}
if m.SlowScore != 0 {
dAtA[i] = 0xb0
i++
dAtA[i] = 0x1
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.SlowScore))
}
if len(m.DamagedRegionsId) > 0 {
dAtA82 := make([]byte, len(m.DamagedRegionsId)*10)
var j81 int
for _, num := range m.DamagedRegionsId {
for num >= 1<<7 {
dAtA82[j81] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j81++
}
dAtA82[j81] = uint8(num)
j81++
}
dAtA[i] = 0xba
i++
dAtA[i] = 0x1
i++
i = encodeVarintPdpb(dAtA, i, uint64(j81))
i += copy(dAtA[i:], dAtA82[:j81])
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *StoreHeartbeatRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *StoreHeartbeatRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n83, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n83
}
if m.Stats != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Stats.Size()))
n84, err := m.Stats.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n84
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *StoreHeartbeatResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *StoreHeartbeatResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n85, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n85
}
if m.ReplicationStatus != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.ReplicationStatus.Size()))
n86, err := m.ReplicationStatus.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n86
}
if len(m.ClusterVersion) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.ClusterVersion)))
i += copy(dAtA[i:], m.ClusterVersion)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ScatterRegionRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScatterRegionRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n87, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n87
}
if m.RegionId != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.RegionId))
}
if m.Region != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Region.Size()))
n88, err := m.Region.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n88
}
if m.Leader != nil {
dAtA[i] = 0x22
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Leader.Size()))
n89, err := m.Leader.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n89
}
if len(m.Group) > 0 {
dAtA[i] = 0x2a
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.Group)))
i += copy(dAtA[i:], m.Group)
}
if len(m.RegionsId) > 0 {
dAtA91 := make([]byte, len(m.RegionsId)*10)
var j90 int
for _, num := range m.RegionsId {
for num >= 1<<7 {
dAtA91[j90] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j90++
}
dAtA91[j90] = uint8(num)
j90++
}
dAtA[i] = 0x32
i++
i = encodeVarintPdpb(dAtA, i, uint64(j90))
i += copy(dAtA[i:], dAtA91[:j90])
}
if m.RetryLimit != 0 {
dAtA[i] = 0x38
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.RetryLimit))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ScatterRegionResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScatterRegionResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n92, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n92
}
if m.FinishedPercentage != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.FinishedPercentage))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetGCSafePointRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetGCSafePointRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n93, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n93
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetGCSafePointResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetGCSafePointResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n94, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n94
}
if m.SafePoint != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.SafePoint))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *UpdateGCSafePointRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *UpdateGCSafePointRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n95, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n95
}
if m.SafePoint != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.SafePoint))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *UpdateGCSafePointResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *UpdateGCSafePointResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n96, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n96
}
if m.NewSafePoint != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.NewSafePoint))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *UpdateServiceGCSafePointRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *UpdateServiceGCSafePointRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n97, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n97
}
if len(m.ServiceId) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.ServiceId)))
i += copy(dAtA[i:], m.ServiceId)
}
if m.TTL != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.TTL))
}
if m.SafePoint != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.SafePoint))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *UpdateServiceGCSafePointResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *UpdateServiceGCSafePointResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n98, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n98
}
if len(m.ServiceId) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.ServiceId)))
i += copy(dAtA[i:], m.ServiceId)
}
if m.TTL != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.TTL))
}
if m.MinSafePoint != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.MinSafePoint))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RegionStat) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RegionStat) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.BytesWritten != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.BytesWritten))
}
if m.BytesRead != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.BytesRead))
}
if m.KeysWritten != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.KeysWritten))
}
if m.KeysRead != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.KeysRead))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *SyncRegionRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SyncRegionRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n99, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n99
}
if m.Member != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Member.Size()))
n100, err := m.Member.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n100
}
if m.StartIndex != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.StartIndex))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *SyncRegionResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SyncRegionResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n101, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n101
}
if len(m.Regions) > 0 {
for _, msg := range m.Regions {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.StartIndex != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.StartIndex))
}
if len(m.RegionStats) > 0 {
for _, msg := range m.RegionStats {
dAtA[i] = 0x22
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.RegionLeaders) > 0 {
for _, msg := range m.RegionLeaders {
dAtA[i] = 0x2a
i++
i = encodeVarintPdpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetOperatorRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetOperatorRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n102, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n102
}
if m.RegionId != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.RegionId))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetOperatorResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetOperatorResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n103, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n103
}
if m.RegionId != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.RegionId))
}
if len(m.Desc) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.Desc)))
i += copy(dAtA[i:], m.Desc)
}
if m.Status != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Status))
}
if len(m.Kind) > 0 {
dAtA[i] = 0x2a
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.Kind)))
i += copy(dAtA[i:], m.Kind)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *SyncMaxTSRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SyncMaxTSRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n104, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n104
}
if m.MaxTs != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.MaxTs.Size()))
n105, err := m.MaxTs.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n105
}
if m.SkipCheck {
dAtA[i] = 0x18
i++
if m.SkipCheck {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *SyncMaxTSResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SyncMaxTSResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n106, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n106
}
if m.MaxLocalTs != nil {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.MaxLocalTs.Size()))
n107, err := m.MaxLocalTs.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n107
}
if len(m.SyncedDcs) > 0 {
for _, s := range m.SyncedDcs {
dAtA[i] = 0x1a
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *SplitRegionsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SplitRegionsRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n108, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n108
}
if len(m.SplitKeys) > 0 {
for _, b := range m.SplitKeys {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if m.RetryLimit != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.RetryLimit))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *SplitRegionsResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SplitRegionsResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n109, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n109
}
if m.FinishedPercentage != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.FinishedPercentage))
}
if len(m.RegionsId) > 0 {
dAtA111 := make([]byte, len(m.RegionsId)*10)
var j110 int
for _, num := range m.RegionsId {
for num >= 1<<7 {
dAtA111[j110] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j110++
}
dAtA111[j110] = uint8(num)
j110++
}
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(j110))
i += copy(dAtA[i:], dAtA111[:j110])
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetDCLocationInfoRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetDCLocationInfoRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n112, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n112
}
if len(m.DcLocation) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintPdpb(dAtA, i, uint64(len(m.DcLocation)))
i += copy(dAtA[i:], m.DcLocation)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetDCLocationInfoResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetDCLocationInfoResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Header.Size()))
n113, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n113
}
if m.Suffix != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Suffix))
}
if m.MaxTs != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.MaxTs.Size()))
n114, err := m.MaxTs.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n114
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *QueryStats) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *QueryStats) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.GC != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.GC))
}
if m.Get != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Get))
}
if m.Scan != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Scan))
}
if m.Coprocessor != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Coprocessor))
}
if m.Delete != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Delete))
}
if m.DeleteRange != 0 {
dAtA[i] = 0x30
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.DeleteRange))
}
if m.Put != 0 {
dAtA[i] = 0x38
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Put))
}
if m.Prewrite != 0 {
dAtA[i] = 0x40
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Prewrite))
}
if m.AcquirePessimisticLock != 0 {
dAtA[i] = 0x48
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.AcquirePessimisticLock))
}
if m.Commit != 0 {
dAtA[i] = 0x50
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Commit))
}
if m.Rollback != 0 {
dAtA[i] = 0x58
i++
i = encodeVarintPdpb(dAtA, i, uint64(m.Rollback))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func encodeVarintPdpb(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *RequestHeader) Size() (n int) {
var l int
_ = l
if m.ClusterId != 0 {
n += 1 + sovPdpb(uint64(m.ClusterId))
}
if m.SenderId != 0 {
n += 1 + sovPdpb(uint64(m.SenderId))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ResponseHeader) Size() (n int) {
var l int
_ = l
if m.ClusterId != 0 {
n += 1 + sovPdpb(uint64(m.ClusterId))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Error) Size() (n int) {
var l int
_ = l
if m.Type != 0 {
n += 1 + sovPdpb(uint64(m.Type))
}
l = len(m.Message)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *TsoRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Count != 0 {
n += 1 + sovPdpb(uint64(m.Count))
}
l = len(m.DcLocation)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Timestamp) Size() (n int) {
var l int
_ = l
if m.Physical != 0 {
n += 1 + sovPdpb(uint64(m.Physical))
}
if m.Logical != 0 {
n += 1 + sovPdpb(uint64(m.Logical))
}
if m.SuffixBits != 0 {
n += 1 + sovPdpb(uint64(m.SuffixBits))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *TsoResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Count != 0 {
n += 1 + sovPdpb(uint64(m.Count))
}
if m.Timestamp != nil {
l = m.Timestamp.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *BootstrapRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Store != nil {
l = m.Store.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Region != nil {
l = m.Region.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *BootstrapResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.ReplicationStatus != nil {
l = m.ReplicationStatus.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *IsBootstrappedRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *IsBootstrappedResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Bootstrapped {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *AllocIDRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *AllocIDResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Id != 0 {
n += 1 + sovPdpb(uint64(m.Id))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetStoreRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.StoreId != 0 {
n += 1 + sovPdpb(uint64(m.StoreId))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetStoreResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Store != nil {
l = m.Store.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Stats != nil {
l = m.Stats.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PutStoreRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Store != nil {
l = m.Store.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PutStoreResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.ReplicationStatus != nil {
l = m.ReplicationStatus.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetAllStoresRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.ExcludeTombstoneStores {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetAllStoresResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if len(m.Stores) > 0 {
for _, e := range m.Stores {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetRegionRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
l = len(m.RegionKey)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetRegionResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Region != nil {
l = m.Region.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Leader != nil {
l = m.Leader.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if len(m.DownPeers) > 0 {
for _, e := range m.DownPeers {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if len(m.PendingPeers) > 0 {
for _, e := range m.PendingPeers {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetRegionByIDRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.RegionId != 0 {
n += 1 + sovPdpb(uint64(m.RegionId))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ScanRegionsRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
l = len(m.StartKey)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if m.Limit != 0 {
n += 1 + sovPdpb(uint64(m.Limit))
}
l = len(m.EndKey)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Region) Size() (n int) {
var l int
_ = l
if m.Region != nil {
l = m.Region.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Leader != nil {
l = m.Leader.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if len(m.DownPeers) > 0 {
for _, e := range m.DownPeers {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if len(m.PendingPeers) > 0 {
for _, e := range m.PendingPeers {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ScanRegionsResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if len(m.RegionMetas) > 0 {
for _, e := range m.RegionMetas {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if len(m.Leaders) > 0 {
for _, e := range m.Leaders {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if len(m.Regions) > 0 {
for _, e := range m.Regions {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetClusterConfigRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetClusterConfigResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Cluster != nil {
l = m.Cluster.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PutClusterConfigRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Cluster != nil {
l = m.Cluster.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PutClusterConfigResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Member) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if m.MemberId != 0 {
n += 1 + sovPdpb(uint64(m.MemberId))
}
if len(m.PeerUrls) > 0 {
for _, s := range m.PeerUrls {
l = len(s)
n += 1 + l + sovPdpb(uint64(l))
}
}
if len(m.ClientUrls) > 0 {
for _, s := range m.ClientUrls {
l = len(s)
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.LeaderPriority != 0 {
n += 1 + sovPdpb(uint64(m.LeaderPriority))
}
l = len(m.DeployPath)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
l = len(m.BinaryVersion)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
l = len(m.GitHash)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
l = len(m.DcLocation)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetMembersRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetMembersResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if len(m.Members) > 0 {
for _, e := range m.Members {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.Leader != nil {
l = m.Leader.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.EtcdLeader != nil {
l = m.EtcdLeader.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if len(m.TsoAllocatorLeaders) > 0 {
for k, v := range m.TsoAllocatorLeaders {
_ = k
_ = v
l = 0
if v != nil {
l = v.Size()
l += 1 + sovPdpb(uint64(l))
}
mapEntrySize := 1 + len(k) + sovPdpb(uint64(len(k))) + l
n += mapEntrySize + 1 + sovPdpb(uint64(mapEntrySize))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PeerStats) Size() (n int) {
var l int
_ = l
if m.Peer != nil {
l = m.Peer.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.DownSeconds != 0 {
n += 1 + sovPdpb(uint64(m.DownSeconds))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RegionHeartbeatRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Region != nil {
l = m.Region.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Leader != nil {
l = m.Leader.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if len(m.DownPeers) > 0 {
for _, e := range m.DownPeers {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if len(m.PendingPeers) > 0 {
for _, e := range m.PendingPeers {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.BytesWritten != 0 {
n += 1 + sovPdpb(uint64(m.BytesWritten))
}
if m.BytesRead != 0 {
n += 1 + sovPdpb(uint64(m.BytesRead))
}
if m.KeysWritten != 0 {
n += 1 + sovPdpb(uint64(m.KeysWritten))
}
if m.KeysRead != 0 {
n += 1 + sovPdpb(uint64(m.KeysRead))
}
if m.ApproximateSize != 0 {
n += 1 + sovPdpb(uint64(m.ApproximateSize))
}
if m.Interval != nil {
l = m.Interval.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.ApproximateKeys != 0 {
n += 1 + sovPdpb(uint64(m.ApproximateKeys))
}
if m.Term != 0 {
n += 1 + sovPdpb(uint64(m.Term))
}
if m.ReplicationStatus != nil {
l = m.ReplicationStatus.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.QueryStats != nil {
l = m.QueryStats.Size()
n += 2 + l + sovPdpb(uint64(l))
}
if m.CpuUsage != 0 {
n += 2 + sovPdpb(uint64(m.CpuUsage))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ChangePeer) Size() (n int) {
var l int
_ = l
if m.Peer != nil {
l = m.Peer.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.ChangeType != 0 {
n += 1 + sovPdpb(uint64(m.ChangeType))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ChangePeerV2) Size() (n int) {
var l int
_ = l
if len(m.Changes) > 0 {
for _, e := range m.Changes {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *TransferLeader) Size() (n int) {
var l int
_ = l
if m.Peer != nil {
l = m.Peer.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Merge) Size() (n int) {
var l int
_ = l
if m.Target != nil {
l = m.Target.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *SplitRegion) Size() (n int) {
var l int
_ = l
if m.Policy != 0 {
n += 1 + sovPdpb(uint64(m.Policy))
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
l = len(b)
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RegionHeartbeatResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.ChangePeer != nil {
l = m.ChangePeer.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.TransferLeader != nil {
l = m.TransferLeader.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.RegionId != 0 {
n += 1 + sovPdpb(uint64(m.RegionId))
}
if m.RegionEpoch != nil {
l = m.RegionEpoch.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.TargetPeer != nil {
l = m.TargetPeer.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Merge != nil {
l = m.Merge.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.SplitRegion != nil {
l = m.SplitRegion.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.ChangePeerV2 != nil {
l = m.ChangePeerV2.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *AskSplitRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Region != nil {
l = m.Region.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *AskSplitResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.NewRegionId != 0 {
n += 1 + sovPdpb(uint64(m.NewRegionId))
}
if len(m.NewPeerIds) > 0 {
l = 0
for _, e := range m.NewPeerIds {
l += sovPdpb(uint64(e))
}
n += 1 + sovPdpb(uint64(l)) + l
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ReportSplitRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Left != nil {
l = m.Left.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Right != nil {
l = m.Right.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ReportSplitResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *AskBatchSplitRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Region != nil {
l = m.Region.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.SplitCount != 0 {
n += 1 + sovPdpb(uint64(m.SplitCount))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *SplitID) Size() (n int) {
var l int
_ = l
if m.NewRegionId != 0 {
n += 1 + sovPdpb(uint64(m.NewRegionId))
}
if len(m.NewPeerIds) > 0 {
l = 0
for _, e := range m.NewPeerIds {
l += sovPdpb(uint64(e))
}
n += 1 + sovPdpb(uint64(l)) + l
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *AskBatchSplitResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if len(m.Ids) > 0 {
for _, e := range m.Ids {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ReportBatchSplitRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if len(m.Regions) > 0 {
for _, e := range m.Regions {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ReportBatchSplitResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *TimeInterval) Size() (n int) {
var l int
_ = l
if m.StartTimestamp != 0 {
n += 1 + sovPdpb(uint64(m.StartTimestamp))
}
if m.EndTimestamp != 0 {
n += 1 + sovPdpb(uint64(m.EndTimestamp))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RecordPair) Size() (n int) {
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if m.Value != 0 {
n += 1 + sovPdpb(uint64(m.Value))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PeerStat) Size() (n int) {
var l int
_ = l
if m.RegionId != 0 {
n += 1 + sovPdpb(uint64(m.RegionId))
}
if m.ReadKeys != 0 {
n += 1 + sovPdpb(uint64(m.ReadKeys))
}
if m.ReadBytes != 0 {
n += 1 + sovPdpb(uint64(m.ReadBytes))
}
if m.QueryStats != nil {
l = m.QueryStats.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *StoreStats) Size() (n int) {
var l int
_ = l
if m.StoreId != 0 {
n += 1 + sovPdpb(uint64(m.StoreId))
}
if m.Capacity != 0 {
n += 1 + sovPdpb(uint64(m.Capacity))
}
if m.Available != 0 {
n += 1 + sovPdpb(uint64(m.Available))
}
if m.RegionCount != 0 {
n += 1 + sovPdpb(uint64(m.RegionCount))
}
if m.SendingSnapCount != 0 {
n += 1 + sovPdpb(uint64(m.SendingSnapCount))
}
if m.ReceivingSnapCount != 0 {
n += 1 + sovPdpb(uint64(m.ReceivingSnapCount))
}
if m.StartTime != 0 {
n += 1 + sovPdpb(uint64(m.StartTime))
}
if m.ApplyingSnapCount != 0 {
n += 1 + sovPdpb(uint64(m.ApplyingSnapCount))
}
if m.IsBusy {
n += 2
}
if m.UsedSize != 0 {
n += 1 + sovPdpb(uint64(m.UsedSize))
}
if m.BytesWritten != 0 {
n += 1 + sovPdpb(uint64(m.BytesWritten))
}
if m.KeysWritten != 0 {
n += 1 + sovPdpb(uint64(m.KeysWritten))
}
if m.BytesRead != 0 {
n += 1 + sovPdpb(uint64(m.BytesRead))
}
if m.KeysRead != 0 {
n += 1 + sovPdpb(uint64(m.KeysRead))
}
if m.Interval != nil {
l = m.Interval.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if len(m.CpuUsages) > 0 {
for _, e := range m.CpuUsages {
l = e.Size()
n += 2 + l + sovPdpb(uint64(l))
}
}
if len(m.ReadIoRates) > 0 {
for _, e := range m.ReadIoRates {
l = e.Size()
n += 2 + l + sovPdpb(uint64(l))
}
}
if len(m.WriteIoRates) > 0 {
for _, e := range m.WriteIoRates {
l = e.Size()
n += 2 + l + sovPdpb(uint64(l))
}
}
if len(m.OpLatencies) > 0 {
for _, e := range m.OpLatencies {
l = e.Size()
n += 2 + l + sovPdpb(uint64(l))
}
}
if len(m.PeerStats) > 0 {
for _, e := range m.PeerStats {
l = e.Size()
n += 2 + l + sovPdpb(uint64(l))
}
}
if m.QueryStats != nil {
l = m.QueryStats.Size()
n += 2 + l + sovPdpb(uint64(l))
}
if m.SlowScore != 0 {
n += 2 + sovPdpb(uint64(m.SlowScore))
}
if len(m.DamagedRegionsId) > 0 {
l = 0
for _, e := range m.DamagedRegionsId {
l += sovPdpb(uint64(e))
}
n += 2 + sovPdpb(uint64(l)) + l
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *StoreHeartbeatRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Stats != nil {
l = m.Stats.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *StoreHeartbeatResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.ReplicationStatus != nil {
l = m.ReplicationStatus.Size()
n += 1 + l + sovPdpb(uint64(l))
}
l = len(m.ClusterVersion)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ScatterRegionRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.RegionId != 0 {
n += 1 + sovPdpb(uint64(m.RegionId))
}
if m.Region != nil {
l = m.Region.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Leader != nil {
l = m.Leader.Size()
n += 1 + l + sovPdpb(uint64(l))
}
l = len(m.Group)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if len(m.RegionsId) > 0 {
l = 0
for _, e := range m.RegionsId {
l += sovPdpb(uint64(e))
}
n += 1 + sovPdpb(uint64(l)) + l
}
if m.RetryLimit != 0 {
n += 1 + sovPdpb(uint64(m.RetryLimit))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ScatterRegionResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.FinishedPercentage != 0 {
n += 1 + sovPdpb(uint64(m.FinishedPercentage))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetGCSafePointRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetGCSafePointResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.SafePoint != 0 {
n += 1 + sovPdpb(uint64(m.SafePoint))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *UpdateGCSafePointRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.SafePoint != 0 {
n += 1 + sovPdpb(uint64(m.SafePoint))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *UpdateGCSafePointResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.NewSafePoint != 0 {
n += 1 + sovPdpb(uint64(m.NewSafePoint))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *UpdateServiceGCSafePointRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
l = len(m.ServiceId)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if m.TTL != 0 {
n += 1 + sovPdpb(uint64(m.TTL))
}
if m.SafePoint != 0 {
n += 1 + sovPdpb(uint64(m.SafePoint))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *UpdateServiceGCSafePointResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
l = len(m.ServiceId)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if m.TTL != 0 {
n += 1 + sovPdpb(uint64(m.TTL))
}
if m.MinSafePoint != 0 {
n += 1 + sovPdpb(uint64(m.MinSafePoint))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RegionStat) Size() (n int) {
var l int
_ = l
if m.BytesWritten != 0 {
n += 1 + sovPdpb(uint64(m.BytesWritten))
}
if m.BytesRead != 0 {
n += 1 + sovPdpb(uint64(m.BytesRead))
}
if m.KeysWritten != 0 {
n += 1 + sovPdpb(uint64(m.KeysWritten))
}
if m.KeysRead != 0 {
n += 1 + sovPdpb(uint64(m.KeysRead))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *SyncRegionRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Member != nil {
l = m.Member.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.StartIndex != 0 {
n += 1 + sovPdpb(uint64(m.StartIndex))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *SyncRegionResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if len(m.Regions) > 0 {
for _, e := range m.Regions {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.StartIndex != 0 {
n += 1 + sovPdpb(uint64(m.StartIndex))
}
if len(m.RegionStats) > 0 {
for _, e := range m.RegionStats {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if len(m.RegionLeaders) > 0 {
for _, e := range m.RegionLeaders {
l = e.Size()
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetOperatorRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.RegionId != 0 {
n += 1 + sovPdpb(uint64(m.RegionId))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetOperatorResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.RegionId != 0 {
n += 1 + sovPdpb(uint64(m.RegionId))
}
l = len(m.Desc)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if m.Status != 0 {
n += 1 + sovPdpb(uint64(m.Status))
}
l = len(m.Kind)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *SyncMaxTSRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.MaxTs != nil {
l = m.MaxTs.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.SkipCheck {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *SyncMaxTSResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.MaxLocalTs != nil {
l = m.MaxLocalTs.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if len(m.SyncedDcs) > 0 {
for _, s := range m.SyncedDcs {
l = len(s)
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *SplitRegionsRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if len(m.SplitKeys) > 0 {
for _, b := range m.SplitKeys {
l = len(b)
n += 1 + l + sovPdpb(uint64(l))
}
}
if m.RetryLimit != 0 {
n += 1 + sovPdpb(uint64(m.RetryLimit))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *SplitRegionsResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.FinishedPercentage != 0 {
n += 1 + sovPdpb(uint64(m.FinishedPercentage))
}
if len(m.RegionsId) > 0 {
l = 0
for _, e := range m.RegionsId {
l += sovPdpb(uint64(e))
}
n += 1 + sovPdpb(uint64(l)) + l
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetDCLocationInfoRequest) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
l = len(m.DcLocation)
if l > 0 {
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetDCLocationInfoResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.Suffix != 0 {
n += 1 + sovPdpb(uint64(m.Suffix))
}
if m.MaxTs != nil {
l = m.MaxTs.Size()
n += 1 + l + sovPdpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *QueryStats) Size() (n int) {
var l int
_ = l
if m.GC != 0 {
n += 1 + sovPdpb(uint64(m.GC))
}
if m.Get != 0 {
n += 1 + sovPdpb(uint64(m.Get))
}
if m.Scan != 0 {
n += 1 + sovPdpb(uint64(m.Scan))
}
if m.Coprocessor != 0 {
n += 1 + sovPdpb(uint64(m.Coprocessor))
}
if m.Delete != 0 {
n += 1 + sovPdpb(uint64(m.Delete))
}
if m.DeleteRange != 0 {
n += 1 + sovPdpb(uint64(m.DeleteRange))
}
if m.Put != 0 {
n += 1 + sovPdpb(uint64(m.Put))
}
if m.Prewrite != 0 {
n += 1 + sovPdpb(uint64(m.Prewrite))
}
if m.AcquirePessimisticLock != 0 {
n += 1 + sovPdpb(uint64(m.AcquirePessimisticLock))
}
if m.Commit != 0 {
n += 1 + sovPdpb(uint64(m.Commit))
}
if m.Rollback != 0 {
n += 1 + sovPdpb(uint64(m.Rollback))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func | (x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozPdpb(x uint64) (n int) {
return sovPdpb(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *RequestHeader) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
}
m.ClusterId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ClusterId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SenderId", wireType)
}
m.SenderId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SenderId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ResponseHeader) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
}
m.ClusterId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ClusterId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &Error{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Error) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Error: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Error: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= (ErrorType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Message = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TsoRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TsoRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TsoRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
}
m.Count = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Count |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DcLocation", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DcLocation = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Timestamp) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Timestamp: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType)
}
m.Physical = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Physical |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Logical", wireType)
}
m.Logical = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Logical |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SuffixBits", wireType)
}
m.SuffixBits = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SuffixBits |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TsoResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TsoResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TsoResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
}
m.Count = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Count |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Timestamp == nil {
m.Timestamp = &Timestamp{}
}
if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *BootstrapRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BootstrapRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BootstrapRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Store == nil {
m.Store = &metapb.Store{}
}
if err := m.Store.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Region == nil {
m.Region = &metapb.Region{}
}
if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *BootstrapResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BootstrapResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BootstrapResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ReplicationStatus", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ReplicationStatus == nil {
m.ReplicationStatus = &replication_modepb.ReplicationStatus{}
}
if err := m.ReplicationStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *IsBootstrappedRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: IsBootstrappedRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: IsBootstrappedRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *IsBootstrappedResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: IsBootstrappedResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: IsBootstrappedResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Bootstrapped", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Bootstrapped = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *AllocIDRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: AllocIDRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: AllocIDRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *AllocIDResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: AllocIDResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: AllocIDResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
}
m.Id = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Id |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetStoreRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetStoreRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetStoreRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StoreId", wireType)
}
m.StoreId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StoreId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetStoreResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetStoreResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetStoreResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Store == nil {
m.Store = &metapb.Store{}
}
if err := m.Store.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Stats == nil {
m.Stats = &StoreStats{}
}
if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PutStoreRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PutStoreRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PutStoreRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Store == nil {
m.Store = &metapb.Store{}
}
if err := m.Store.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PutStoreResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PutStoreResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PutStoreResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ReplicationStatus", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ReplicationStatus == nil {
m.ReplicationStatus = &replication_modepb.ReplicationStatus{}
}
if err := m.ReplicationStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetAllStoresRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetAllStoresRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetAllStoresRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTombstoneStores", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.ExcludeTombstoneStores = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetAllStoresResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetAllStoresResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetAllStoresResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Stores", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Stores = append(m.Stores, &metapb.Store{})
if err := m.Stores[len(m.Stores)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetRegionRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetRegionRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetRegionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RegionKey = append(m.RegionKey[:0], dAtA[iNdEx:postIndex]...)
if m.RegionKey == nil {
m.RegionKey = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetRegionResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetRegionResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetRegionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Region == nil {
m.Region = &metapb.Region{}
}
if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Leader == nil {
m.Leader = &metapb.Peer{}
}
if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DownPeers", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DownPeers = append(m.DownPeers, &PeerStats{})
if err := m.DownPeers[len(m.DownPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PendingPeers", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PendingPeers = append(m.PendingPeers, &metapb.Peer{})
if err := m.PendingPeers[len(m.PendingPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetRegionByIDRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetRegionByIDRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetRegionByIDRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType)
}
m.RegionId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RegionId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScanRegionsRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScanRegionsRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScanRegionsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...)
if m.StartKey == nil {
m.StartKey = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
}
m.Limit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Limit |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...)
if m.EndKey == nil {
m.EndKey = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Region) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Region: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Region: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Region == nil {
m.Region = &metapb.Region{}
}
if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Leader == nil {
m.Leader = &metapb.Peer{}
}
if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DownPeers", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DownPeers = append(m.DownPeers, &PeerStats{})
if err := m.DownPeers[len(m.DownPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PendingPeers", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PendingPeers = append(m.PendingPeers, &metapb.Peer{})
if err := m.PendingPeers[len(m.PendingPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScanRegionsResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScanRegionsResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScanRegionsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionMetas", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RegionMetas = append(m.RegionMetas, &metapb.Region{})
if err := m.RegionMetas[len(m.RegionMetas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Leaders", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Leaders = append(m.Leaders, &metapb.Peer{})
if err := m.Leaders[len(m.Leaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Regions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Regions = append(m.Regions, &Region{})
if err := m.Regions[len(m.Regions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetClusterConfigRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetClusterConfigRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetClusterConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetClusterConfigResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetClusterConfigResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetClusterConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Cluster == nil {
m.Cluster = &metapb.Cluster{}
}
if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PutClusterConfigRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PutClusterConfigRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PutClusterConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Cluster == nil {
m.Cluster = &metapb.Cluster{}
}
if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PutClusterConfigResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PutClusterConfigResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PutClusterConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Member) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Member: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType)
}
m.MemberId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MemberId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PeerUrls", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PeerUrls = append(m.PeerUrls, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ClientUrls", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ClientUrls = append(m.ClientUrls, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LeaderPriority", wireType)
}
m.LeaderPriority = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.LeaderPriority |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DeployPath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DeployPath = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field BinaryVersion", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.BinaryVersion = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field GitHash", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.GitHash = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DcLocation", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DcLocation = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetMembersRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetMembersRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetMembersRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetMembersResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetMembersResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetMembersResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Members = append(m.Members, &Member{})
if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Leader == nil {
m.Leader = &Member{}
}
if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EtcdLeader", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.EtcdLeader == nil {
m.EtcdLeader = &Member{}
}
if err := m.EtcdLeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TsoAllocatorLeaders", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.TsoAllocatorLeaders == nil {
m.TsoAllocatorLeaders = make(map[string]*Member)
}
var mapkey string
var mapvalue *Member
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthPdpb
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var mapmsglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
mapmsglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if mapmsglen < 0 {
return ErrInvalidLengthPdpb
}
postmsgIndex := iNdEx + mapmsglen
if mapmsglen < 0 {
return ErrInvalidLengthPdpb
}
if postmsgIndex > l {
return io.ErrUnexpectedEOF
}
mapvalue = &Member{}
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
return err
}
iNdEx = postmsgIndex
} else {
iNdEx = entryPreIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.TsoAllocatorLeaders[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PeerStats) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PeerStats: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PeerStats: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Peer == nil {
m.Peer = &metapb.Peer{}
}
if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DownSeconds", wireType)
}
m.DownSeconds = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.DownSeconds |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RegionHeartbeatRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RegionHeartbeatRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RegionHeartbeatRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Region == nil {
m.Region = &metapb.Region{}
}
if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Leader == nil {
m.Leader = &metapb.Peer{}
}
if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DownPeers", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DownPeers = append(m.DownPeers, &PeerStats{})
if err := m.DownPeers[len(m.DownPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PendingPeers", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PendingPeers = append(m.PendingPeers, &metapb.Peer{})
if err := m.PendingPeers[len(m.PendingPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field BytesWritten", wireType)
}
m.BytesWritten = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.BytesWritten |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field BytesRead", wireType)
}
m.BytesRead = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.BytesRead |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KeysWritten", wireType)
}
m.KeysWritten = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.KeysWritten |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 9:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KeysRead", wireType)
}
m.KeysRead = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.KeysRead |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ApproximateSize", wireType)
}
m.ApproximateSize = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ApproximateSize |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 12:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Interval == nil {
m.Interval = &TimeInterval{}
}
if err := m.Interval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 13:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ApproximateKeys", wireType)
}
m.ApproximateKeys = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ApproximateKeys |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 14:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
}
m.Term = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Term |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 15:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ReplicationStatus", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ReplicationStatus == nil {
m.ReplicationStatus = &replication_modepb.RegionReplicationStatus{}
}
if err := m.ReplicationStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 16:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field QueryStats", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.QueryStats == nil {
m.QueryStats = &QueryStats{}
}
if err := m.QueryStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 17:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CpuUsage", wireType)
}
m.CpuUsage = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CpuUsage |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ChangePeer) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ChangePeer: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ChangePeer: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Peer == nil {
m.Peer = &metapb.Peer{}
}
if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ChangeType", wireType)
}
m.ChangeType = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ChangeType |= (eraftpb.ConfChangeType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ChangePeerV2) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ChangePeerV2: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ChangePeerV2: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Changes = append(m.Changes, &ChangePeer{})
if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TransferLeader) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TransferLeader: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TransferLeader: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Peer == nil {
m.Peer = &metapb.Peer{}
}
if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Merge) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Merge: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Merge: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Target == nil {
m.Target = &metapb.Region{}
}
if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SplitRegion) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SplitRegion: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SplitRegion: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType)
}
m.Policy = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Policy |= (CheckPolicy(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RegionHeartbeatResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RegionHeartbeatResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RegionHeartbeatResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ChangePeer", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ChangePeer == nil {
m.ChangePeer = &ChangePeer{}
}
if err := m.ChangePeer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TransferLeader", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.TransferLeader == nil {
m.TransferLeader = &TransferLeader{}
}
if err := m.TransferLeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType)
}
m.RegionId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RegionId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionEpoch", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionEpoch == nil {
m.RegionEpoch = &metapb.RegionEpoch{}
}
if err := m.RegionEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TargetPeer", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.TargetPeer == nil {
m.TargetPeer = &metapb.Peer{}
}
if err := m.TargetPeer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Merge", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Merge == nil {
m.Merge = &Merge{}
}
if err := m.Merge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SplitRegion", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.SplitRegion == nil {
m.SplitRegion = &SplitRegion{}
}
if err := m.SplitRegion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ChangePeerV2", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ChangePeerV2 == nil {
m.ChangePeerV2 = &ChangePeerV2{}
}
if err := m.ChangePeerV2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *AskSplitRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: AskSplitRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: AskSplitRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Region == nil {
m.Region = &metapb.Region{}
}
if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *AskSplitResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: AskSplitResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: AskSplitResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NewRegionId", wireType)
}
m.NewRegionId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.NewRegionId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType == 0 {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NewPeerIds = append(m.NewPeerIds, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + packedLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
for iNdEx < postIndex {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NewPeerIds = append(m.NewPeerIds, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field NewPeerIds", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReportSplitRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ReportSplitRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ReportSplitRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Left", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Left == nil {
m.Left = &metapb.Region{}
}
if err := m.Left.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Right", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Right == nil {
m.Right = &metapb.Region{}
}
if err := m.Right.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReportSplitResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ReportSplitResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ReportSplitResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *AskBatchSplitRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: AskBatchSplitRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: AskBatchSplitRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Region == nil {
m.Region = &metapb.Region{}
}
if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SplitCount", wireType)
}
m.SplitCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SplitCount |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SplitID) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SplitID: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SplitID: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NewRegionId", wireType)
}
m.NewRegionId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.NewRegionId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType == 0 {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NewPeerIds = append(m.NewPeerIds, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + packedLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
for iNdEx < postIndex {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NewPeerIds = append(m.NewPeerIds, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field NewPeerIds", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *AskBatchSplitResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: AskBatchSplitResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: AskBatchSplitResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ids", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ids = append(m.Ids, &SplitID{})
if err := m.Ids[len(m.Ids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReportBatchSplitRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ReportBatchSplitRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ReportBatchSplitRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Regions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Regions = append(m.Regions, &metapb.Region{})
if err := m.Regions[len(m.Regions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReportBatchSplitResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ReportBatchSplitResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ReportBatchSplitResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TimeInterval) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TimeInterval: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TimeInterval: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimestamp", wireType)
}
m.StartTimestamp = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartTimestamp |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field EndTimestamp", wireType)
}
m.EndTimestamp = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.EndTimestamp |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RecordPair) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RecordPair: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RecordPair: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
m.Value = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Value |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PeerStat) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PeerStat: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PeerStat: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType)
}
m.RegionId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RegionId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ReadKeys", wireType)
}
m.ReadKeys = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ReadKeys |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ReadBytes", wireType)
}
m.ReadBytes = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ReadBytes |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field QueryStats", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.QueryStats == nil {
m.QueryStats = &QueryStats{}
}
if err := m.QueryStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *StoreStats) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: StoreStats: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: StoreStats: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StoreId", wireType)
}
m.StoreId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StoreId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
}
m.Capacity = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Capacity |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Available", wireType)
}
m.Available = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Available |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionCount", wireType)
}
m.RegionCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RegionCount |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SendingSnapCount", wireType)
}
m.SendingSnapCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SendingSnapCount |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ReceivingSnapCount", wireType)
}
m.ReceivingSnapCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ReceivingSnapCount |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType)
}
m.StartTime = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartTime |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ApplyingSnapCount", wireType)
}
m.ApplyingSnapCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ApplyingSnapCount |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 9:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field IsBusy", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.IsBusy = bool(v != 0)
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field UsedSize", wireType)
}
m.UsedSize = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.UsedSize |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 11:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field BytesWritten", wireType)
}
m.BytesWritten = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.BytesWritten |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 12:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KeysWritten", wireType)
}
m.KeysWritten = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.KeysWritten |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 13:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field BytesRead", wireType)
}
m.BytesRead = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.BytesRead |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 14:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KeysRead", wireType)
}
m.KeysRead = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.KeysRead |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 15:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Interval == nil {
m.Interval = &TimeInterval{}
}
if err := m.Interval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 16:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CpuUsages", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.CpuUsages = append(m.CpuUsages, &RecordPair{})
if err := m.CpuUsages[len(m.CpuUsages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 17:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ReadIoRates", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ReadIoRates = append(m.ReadIoRates, &RecordPair{})
if err := m.ReadIoRates[len(m.ReadIoRates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 18:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field WriteIoRates", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.WriteIoRates = append(m.WriteIoRates, &RecordPair{})
if err := m.WriteIoRates[len(m.WriteIoRates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 19:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OpLatencies", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.OpLatencies = append(m.OpLatencies, &RecordPair{})
if err := m.OpLatencies[len(m.OpLatencies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 20:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PeerStats", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PeerStats = append(m.PeerStats, &PeerStat{})
if err := m.PeerStats[len(m.PeerStats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 21:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field QueryStats", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.QueryStats == nil {
m.QueryStats = &QueryStats{}
}
if err := m.QueryStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 22:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SlowScore", wireType)
}
m.SlowScore = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SlowScore |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 23:
if wireType == 0 {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.DamagedRegionsId = append(m.DamagedRegionsId, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + packedLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
for iNdEx < postIndex {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.DamagedRegionsId = append(m.DamagedRegionsId, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field DamagedRegionsId", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *StoreHeartbeatRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: StoreHeartbeatRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: StoreHeartbeatRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Stats == nil {
m.Stats = &StoreStats{}
}
if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *StoreHeartbeatResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: StoreHeartbeatResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: StoreHeartbeatResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ReplicationStatus", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ReplicationStatus == nil {
m.ReplicationStatus = &replication_modepb.ReplicationStatus{}
}
if err := m.ReplicationStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ClusterVersion", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ClusterVersion = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScatterRegionRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScatterRegionRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScatterRegionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType)
}
m.RegionId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RegionId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Region == nil {
m.Region = &metapb.Region{}
}
if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Leader == nil {
m.Leader = &metapb.Peer{}
}
if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Group = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType == 0 {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.RegionsId = append(m.RegionsId, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + packedLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
for iNdEx < postIndex {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.RegionsId = append(m.RegionsId, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field RegionsId", wireType)
}
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RetryLimit", wireType)
}
m.RetryLimit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RetryLimit |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScatterRegionResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScatterRegionResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScatterRegionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field FinishedPercentage", wireType)
}
m.FinishedPercentage = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.FinishedPercentage |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetGCSafePointRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetGCSafePointRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetGCSafePointRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetGCSafePointResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetGCSafePointResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetGCSafePointResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SafePoint", wireType)
}
m.SafePoint = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SafePoint |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *UpdateGCSafePointRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: UpdateGCSafePointRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: UpdateGCSafePointRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SafePoint", wireType)
}
m.SafePoint = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SafePoint |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *UpdateGCSafePointResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: UpdateGCSafePointResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: UpdateGCSafePointResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NewSafePoint", wireType)
}
m.NewSafePoint = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.NewSafePoint |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *UpdateServiceGCSafePointRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: UpdateServiceGCSafePointRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: UpdateServiceGCSafePointRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ServiceId", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ServiceId = append(m.ServiceId[:0], dAtA[iNdEx:postIndex]...)
if m.ServiceId == nil {
m.ServiceId = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
}
m.TTL = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TTL |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SafePoint", wireType)
}
m.SafePoint = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SafePoint |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *UpdateServiceGCSafePointResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: UpdateServiceGCSafePointResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: UpdateServiceGCSafePointResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ServiceId", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ServiceId = append(m.ServiceId[:0], dAtA[iNdEx:postIndex]...)
if m.ServiceId == nil {
m.ServiceId = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
}
m.TTL = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TTL |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MinSafePoint", wireType)
}
m.MinSafePoint = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MinSafePoint |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RegionStat) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RegionStat: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RegionStat: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field BytesWritten", wireType)
}
m.BytesWritten = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.BytesWritten |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field BytesRead", wireType)
}
m.BytesRead = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.BytesRead |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KeysWritten", wireType)
}
m.KeysWritten = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.KeysWritten |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KeysRead", wireType)
}
m.KeysRead = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.KeysRead |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SyncRegionRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SyncRegionRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SyncRegionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Member == nil {
m.Member = &Member{}
}
if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartIndex", wireType)
}
m.StartIndex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartIndex |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SyncRegionResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SyncRegionResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SyncRegionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Regions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Regions = append(m.Regions, &metapb.Region{})
if err := m.Regions[len(m.Regions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartIndex", wireType)
}
m.StartIndex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartIndex |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionStats", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RegionStats = append(m.RegionStats, &RegionStat{})
if err := m.RegionStats[len(m.RegionStats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionLeaders", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RegionLeaders = append(m.RegionLeaders, &metapb.Peer{})
if err := m.RegionLeaders[len(m.RegionLeaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetOperatorRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetOperatorRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetOperatorRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType)
}
m.RegionId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RegionId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetOperatorResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetOperatorResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetOperatorResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType)
}
m.RegionId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RegionId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Desc", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Desc = append(m.Desc[:0], dAtA[iNdEx:postIndex]...)
if m.Desc == nil {
m.Desc = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
m.Status = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Status |= (OperatorStatus(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Kind = append(m.Kind[:0], dAtA[iNdEx:postIndex]...)
if m.Kind == nil {
m.Kind = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SyncMaxTSRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SyncMaxTSRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SyncMaxTSRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxTs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.MaxTs == nil {
m.MaxTs = &Timestamp{}
}
if err := m.MaxTs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SkipCheck", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.SkipCheck = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SyncMaxTSResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SyncMaxTSResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SyncMaxTSResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxLocalTs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.MaxLocalTs == nil {
m.MaxLocalTs = &Timestamp{}
}
if err := m.MaxLocalTs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SyncedDcs", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SyncedDcs = append(m.SyncedDcs, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SplitRegionsRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SplitRegionsRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SplitRegionsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SplitKeys", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SplitKeys = append(m.SplitKeys, make([]byte, postIndex-iNdEx))
copy(m.SplitKeys[len(m.SplitKeys)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RetryLimit", wireType)
}
m.RetryLimit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RetryLimit |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SplitRegionsResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SplitRegionsResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SplitRegionsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field FinishedPercentage", wireType)
}
m.FinishedPercentage = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.FinishedPercentage |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType == 0 {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.RegionsId = append(m.RegionsId, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + packedLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
for iNdEx < postIndex {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.RegionsId = append(m.RegionsId, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field RegionsId", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetDCLocationInfoRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetDCLocationInfoRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetDCLocationInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &RequestHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DcLocation", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DcLocation = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetDCLocationInfoResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetDCLocationInfoResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetDCLocationInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Suffix", wireType)
}
m.Suffix = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Suffix |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxTs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPdpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.MaxTs == nil {
m.MaxTs = &Timestamp{}
}
if err := m.MaxTs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *QueryStats) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: QueryStats: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: QueryStats: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field GC", wireType)
}
m.GC = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.GC |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Get", wireType)
}
m.Get = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Get |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Scan", wireType)
}
m.Scan = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Scan |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Coprocessor", wireType)
}
m.Coprocessor = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Coprocessor |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType)
}
m.Delete = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Delete |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType)
}
m.DeleteRange = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.DeleteRange |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType)
}
m.Put = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Put |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Prewrite", wireType)
}
m.Prewrite = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Prewrite |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 9:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field AcquirePessimisticLock", wireType)
}
m.AcquirePessimisticLock = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.AcquirePessimisticLock |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
}
m.Commit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Commit |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 11:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Rollback", wireType)
}
m.Rollback = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPdpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Rollback |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPdpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPdpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipPdpb(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPdpb
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPdpb
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPdpb
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthPdpb
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPdpb
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipPdpb(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthPdpb = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowPdpb = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("pdpb.proto", fileDescriptor_pdpb_4500a4488ac1eb3b) }
var fileDescriptor_pdpb_4500a4488ac1eb3b = []byte{
// 3981 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3b, 0x4d, 0x6f, 0x23, 0x57,
0x72, 0x6a, 0x8a, 0xa4, 0xc8, 0xe2, 0x87, 0x5a, 0x4f, 0x1a, 0x89, 0x43, 0x7b, 0x3e, 0xb6, 0x3d,
0x76, 0xec, 0xd9, 0xb5, 0x6c, 0xcb, 0x86, 0x31, 0xd8, 0x60, 0x17, 0x91, 0x28, 0x5a, 0x43, 0x8f,
0x24, 0x32, 0x4d, 0xca, 0x1b, 0x07, 0xc1, 0x76, 0x5a, 0xdd, 0x4f, 0x54, 0x47, 0x64, 0x77, 0x4f,
0xbf, 0xa6, 0x66, 0xb8, 0xc8, 0x21, 0xc8, 0x21, 0xd9, 0x20, 0x1b, 0x20, 0x40, 0x92, 0x4d, 0x4e,
0x41, 0x0e, 0x01, 0x92, 0x4b, 0xae, 0x41, 0xf2, 0x03, 0xb2, 0xc8, 0x71, 0x8f, 0xb9, 0x25, 0x70,
0x7e, 0x44, 0x72, 0xc8, 0x21, 0x78, 0x5f, 0xcd, 0xee, 0x26, 0x39, 0x92, 0x7b, 0xd6, 0x8b, 0x3d,
0x89, 0x5d, 0x55, 0xaf, 0x5e, 0xbd, 0x7a, 0x55, 0xf5, 0xaa, 0xea, 0x3d, 0x01, 0xf8, 0xb6, 0x7f,
0xbe, 0xeb, 0x07, 0x5e, 0xe8, 0xa1, 0x3c, 0xfd, 0xdd, 0xac, 0x8e, 0x71, 0x68, 0x4a, 0x58, 0xb3,
0x86, 0x03, 0xf3, 0x22, 0x8c, 0x3e, 0x1b, 0x01, 0xf6, 0x47, 0x8e, 0x65, 0x86, 0x8e, 0xe7, 0x1a,
0x63, 0xcf, 0xc6, 0x11, 0x66, 0x6b, 0xe8, 0x0d, 0x3d, 0xf6, 0xf3, 0x03, 0xfa, 0x4b, 0x40, 0xd7,
0x83, 0x09, 0x09, 0xd9, 0x4f, 0x0e, 0xd0, 0x9e, 0x41, 0x4d, 0xc7, 0xcf, 0x27, 0x98, 0x84, 0x4f,
0xb1, 0x69, 0xe3, 0x00, 0xdd, 0x03, 0xb0, 0x46, 0x13, 0x12, 0xe2, 0xc0, 0x70, 0xec, 0x86, 0xf2,
0x50, 0x79, 0x37, 0xaf, 0x97, 0x05, 0xa4, 0x63, 0xa3, 0x37, 0xa0, 0x4c, 0xb0, 0x6b, 0x73, 0x6c,
0x8e, 0x61, 0x4b, 0x1c, 0xd0, 0xb1, 0x35, 0x1d, 0xea, 0x3a, 0x26, 0xbe, 0xe7, 0x12, 0x7c, 0x3b,
0x6e, 0xdf, 0x82, 0x02, 0x0e, 0x02, 0x2f, 0x60, 0x9c, 0x2a, 0x7b, 0x95, 0x5d, 0xb6, 0xfa, 0x36,
0x05, 0xe9, 0x1c, 0xa3, 0x7d, 0x06, 0x05, 0xf6, 0x8d, 0xde, 0x82, 0x7c, 0x38, 0xf5, 0x31, 0x63,
0x52, 0xdf, 0x5b, 0x8f, 0x91, 0x0e, 0xa6, 0x3e, 0xd6, 0x19, 0x12, 0x35, 0x60, 0x6d, 0x8c, 0x09,
0x31, 0x87, 0x98, 0xb1, 0x2c, 0xeb, 0xf2, 0x53, 0xf3, 0x01, 0x06, 0xc4, 0x13, 0x6b, 0x45, 0xdf,
0x86, 0xe2, 0x25, 0x93, 0x90, 0xb1, 0xab, 0xec, 0x6d, 0x72, 0x76, 0x09, 0x55, 0xe8, 0x82, 0x04,
0x6d, 0x41, 0xc1, 0xf2, 0x26, 0x6e, 0xc8, 0x58, 0xd6, 0x74, 0xfe, 0x81, 0x1e, 0x40, 0xc5, 0xb6,
0x8c, 0x91, 0xc7, 0x95, 0xdf, 0x58, 0x65, 0xd3, 0x81, 0x6d, 0x1d, 0x0b, 0x88, 0x76, 0x0e, 0xe5,
0x81, 0x33, 0xc6, 0x24, 0x34, 0xc7, 0x3e, 0x6a, 0x42, 0xc9, 0xbf, 0x9c, 0x12, 0xc7, 0x32, 0x47,
0x6c, 0xca, 0x55, 0x3d, 0xfa, 0xa6, 0x42, 0x8f, 0xbc, 0x21, 0x43, 0xe5, 0x18, 0x4a, 0x7e, 0xd2,
0x39, 0xc8, 0xe4, 0xe2, 0xc2, 0x79, 0x69, 0x9c, 0x3b, 0x21, 0x61, 0x73, 0xd4, 0x74, 0xe0, 0xa0,
0x03, 0x27, 0x24, 0xda, 0x1f, 0x28, 0x50, 0x61, 0xcb, 0xe2, 0x5a, 0x47, 0xdf, 0x49, 0xad, 0x6b,
0x4b, 0xae, 0x2b, 0xbe, 0x2b, 0x37, 0x2c, 0xec, 0x7d, 0x28, 0x87, 0x52, 0x6e, 0x36, 0x65, 0x45,
0x6a, 0x3b, 0x5a, 0x8e, 0x3e, 0xa3, 0xd0, 0x7e, 0xa2, 0x80, 0x7a, 0xe0, 0x79, 0x21, 0x09, 0x03,
0xd3, 0xcf, 0xa4, 0xdf, 0xb7, 0xa0, 0x40, 0x42, 0x2f, 0xc0, 0xc2, 0x0a, 0x6a, 0xbb, 0xc2, 0xe2,
0xfb, 0x14, 0xa8, 0x73, 0x1c, 0x7a, 0x07, 0x8a, 0x01, 0x1e, 0x4a, 0x4d, 0x57, 0xf6, 0xea, 0x92,
0x4a, 0x67, 0x50, 0x5d, 0x60, 0xb5, 0xbf, 0x56, 0x60, 0x23, 0x26, 0x4e, 0x26, 0xbd, 0x0c, 0x00,
0xc5, 0xfd, 0x8a, 0x84, 0x66, 0x38, 0x21, 0x42, 0xba, 0xb7, 0x77, 0x17, 0xb8, 0x9c, 0x3e, 0x03,
0xf5, 0x19, 0xb1, 0xbe, 0x11, 0xa4, 0x41, 0xda, 0x21, 0xdc, 0xe9, 0x90, 0x48, 0x34, 0x1f, 0xdb,
0x59, 0x94, 0xa5, 0xfd, 0x1e, 0x6c, 0xa7, 0xb9, 0x64, 0x5a, 0xa3, 0x06, 0xd5, 0xf3, 0x18, 0x17,
0xb6, 0xba, 0x92, 0x9e, 0x80, 0x69, 0xdf, 0x83, 0xfa, 0xfe, 0x68, 0xe4, 0x59, 0x9d, 0xc3, 0x4c,
0xa2, 0x76, 0x61, 0x3d, 0x1a, 0x9e, 0x49, 0xc6, 0x3a, 0xe4, 0xa2, 0x28, 0x93, 0x73, 0x6c, 0xed,
0x4b, 0x58, 0x3f, 0xc2, 0x21, 0x37, 0x8b, 0x2c, 0x86, 0x76, 0x17, 0x4a, 0xcc, 0x98, 0x66, 0xb1,
0x6b, 0x8d, 0x7d, 0x77, 0x6c, 0xed, 0xcf, 0x14, 0x50, 0x67, 0xbc, 0x33, 0x49, 0x7b, 0x4b, 0x33,
0x2e, 0x50, 0x73, 0x22, 0xc2, 0x8a, 0x55, 0xce, 0x91, 0x91, 0x50, 0x33, 0x21, 0x3a, 0x47, 0x6b,
0x16, 0xac, 0xf7, 0x26, 0xaf, 0xb1, 0xd4, 0xdb, 0x08, 0xa3, 0xfd, 0x54, 0x01, 0x75, 0x36, 0xcb,
0xaf, 0x90, 0xab, 0xfc, 0x3e, 0x6c, 0x1e, 0xe1, 0x70, 0x7f, 0x34, 0x62, 0xa2, 0x91, 0x4c, 0x1a,
0x78, 0x02, 0x0d, 0xfc, 0xd2, 0x1a, 0x4d, 0x6c, 0x6c, 0x84, 0xde, 0xf8, 0x9c, 0x84, 0x9e, 0x8b,
0x0d, 0xb6, 0x6e, 0x22, 0x8c, 0x7d, 0x5b, 0xe0, 0x07, 0x12, 0xcd, 0x67, 0xd3, 0xae, 0x60, 0x2b,
0x39, 0x7b, 0x26, 0xcd, 0xbc, 0x0d, 0xc5, 0x68, 0xb6, 0xd5, 0xf9, 0x2d, 0x10, 0x48, 0xed, 0x87,
0xcc, 0xee, 0x44, 0x10, 0xcb, 0xb2, 0xce, 0x7b, 0x00, 0x3c, 0xf4, 0x19, 0x57, 0x78, 0xca, 0x56,
0x56, 0xd5, 0xcb, 0x1c, 0xf2, 0x0c, 0x4f, 0xb5, 0xff, 0x55, 0x60, 0x23, 0x36, 0x41, 0xa6, 0xa5,
0xcc, 0x62, 0x6f, 0xee, 0x55, 0xb1, 0x17, 0x3d, 0x82, 0xe2, 0x88, 0x73, 0xe5, 0xd6, 0x5d, 0x95,
0x74, 0x3d, 0x4c, 0xb9, 0x71, 0x1c, 0xda, 0x05, 0xb0, 0xbd, 0x17, 0xae, 0xe1, 0x63, 0x1c, 0x90,
0x46, 0x81, 0x29, 0x47, 0x1c, 0x30, 0x94, 0x8e, 0xbb, 0x41, 0x99, 0x92, 0xd0, 0x4f, 0x82, 0x3e,
0x82, 0x9a, 0x8f, 0x5d, 0xdb, 0x71, 0x87, 0x62, 0x48, 0x91, 0x0d, 0x49, 0x32, 0xaf, 0x0a, 0x12,
0x36, 0xe4, 0xf3, 0x7c, 0x29, 0xaf, 0x16, 0xb4, 0xdf, 0x65, 0xfb, 0xc8, 0x65, 0x3c, 0x98, 0x66,
0x0b, 0x62, 0x34, 0xe1, 0x11, 0xea, 0x9d, 0x25, 0x3c, 0x1c, 0xc0, 0xa3, 0x06, 0xea, 0x5b, 0xa6,
0xcb, 0xe7, 0x20, 0x59, 0x27, 0x20, 0xa1, 0x19, 0x84, 0xb1, 0xed, 0x2b, 0x31, 0xc0, 0x33, 0x3c,
0xa5, 0x27, 0xf4, 0xc8, 0x19, 0x3b, 0x21, 0x53, 0x68, 0x41, 0xe7, 0x1f, 0x68, 0x07, 0xd6, 0xb0,
0x6b, 0xb3, 0x01, 0x79, 0x36, 0xa0, 0x88, 0x5d, 0x9b, 0x6e, 0xf6, 0xbf, 0x2a, 0x50, 0xe4, 0xb2,
0xc4, 0xf6, 0x4c, 0xb9, 0xe5, 0x9e, 0xe5, 0x6e, 0xbd, 0x67, 0xab, 0x5f, 0x7f, 0xcf, 0xf2, 0x37,
0xed, 0x99, 0xf6, 0x33, 0x05, 0x36, 0x13, 0xba, 0xcc, 0x64, 0xaa, 0x1f, 0x41, 0x55, 0x6c, 0x17,
0x9d, 0x49, 0xfa, 0x5e, 0x7a, 0xf1, 0x15, 0x4e, 0x73, 0x42, 0x49, 0xd0, 0x3b, 0xb0, 0xc6, 0x57,
0x29, 0x17, 0x96, 0x94, 0x52, 0x22, 0x29, 0x1d, 0x1f, 0x36, 0x5b, 0x8d, 0x90, 0x84, 0xf1, 0x94,
0x48, 0xed, 0x33, 0xd8, 0x39, 0xc2, 0x61, 0x8b, 0x27, 0xb9, 0x2d, 0xcf, 0xbd, 0x70, 0x86, 0x99,
0x8e, 0x4f, 0x02, 0x8d, 0x79, 0x3e, 0x99, 0x94, 0xf2, 0x1e, 0xac, 0x89, 0x9c, 0x5b, 0x6c, 0xf2,
0xba, 0x5c, 0xa1, 0xe0, 0xae, 0x4b, 0xbc, 0xf6, 0x1c, 0x76, 0x7a, 0x93, 0xd7, 0x17, 0xfe, 0xeb,
0x4c, 0xf9, 0x14, 0x1a, 0xf3, 0x53, 0x66, 0x59, 0xa7, 0xf6, 0xf7, 0x39, 0x28, 0x9e, 0xe0, 0xf1,
0x39, 0x0e, 0x10, 0x82, 0xbc, 0x6b, 0x8e, 0x79, 0xb5, 0x50, 0xd6, 0xd9, 0x6f, 0xea, 0x69, 0x63,
0x86, 0x8d, 0xb9, 0x32, 0x07, 0xf0, 0xc2, 0x86, 0x5a, 0xaa, 0x31, 0x09, 0x46, 0xdc, 0x0e, 0xca,
0x7a, 0x89, 0x02, 0xce, 0x82, 0x11, 0xa1, 0x79, 0xb8, 0x35, 0x72, 0xb0, 0x1b, 0x72, 0x74, 0x9e,
0xa1, 0x81, 0x83, 0x18, 0xc1, 0xaf, 0xc1, 0x3a, 0x37, 0x13, 0xc3, 0x0f, 0x1c, 0x2f, 0x70, 0xc2,
0x69, 0xa3, 0xc0, 0x3c, 0xb6, 0xce, 0xc1, 0x3d, 0x01, 0x65, 0x55, 0x03, 0xf6, 0x47, 0xde, 0xd4,
0xf0, 0xcd, 0xf0, 0xb2, 0x51, 0x14, 0x55, 0x03, 0x03, 0xf5, 0xcc, 0xf0, 0x12, 0xbd, 0x0d, 0xf5,
0x73, 0xc7, 0x35, 0x83, 0xa9, 0x71, 0x8d, 0x03, 0x42, 0xfd, 0x77, 0x8d, 0xd1, 0xd4, 0x38, 0xf4,
0x0b, 0x0e, 0xa4, 0xa9, 0xcc, 0xd0, 0x09, 0x8d, 0x4b, 0x93, 0x5c, 0x36, 0x4a, 0xbc, 0xd2, 0x19,
0x3a, 0xe1, 0x53, 0x93, 0x5c, 0xa6, 0x0b, 0x93, 0xf2, 0x5c, 0x61, 0xf2, 0x1b, 0xec, 0x44, 0xe0,
0x8a, 0xca, 0x14, 0xb3, 0xb4, 0xff, 0xcb, 0x01, 0x8a, 0xb3, 0xc8, 0x78, 0xaa, 0xac, 0x71, 0xed,
0x4b, 0x2f, 0x15, 0xfe, 0xc4, 0xb9, 0xea, 0x12, 0xb9, 0xe0, 0x54, 0x89, 0x93, 0xc9, 0x08, 0xf5,
0x3e, 0x54, 0x70, 0x68, 0xd9, 0x86, 0x20, 0xcd, 0x2f, 0x20, 0x05, 0x4a, 0x70, 0xcc, 0xc9, 0x31,
0xdc, 0x09, 0x89, 0x67, 0x98, 0x23, 0xa6, 0x27, 0x2f, 0x30, 0x64, 0x08, 0xe0, 0xe7, 0xd1, 0x47,
0x7c, 0xe0, 0xfc, 0x1a, 0x77, 0x07, 0xc4, 0xdb, 0x97, 0x83, 0x38, 0x2f, 0xd2, 0x76, 0xc3, 0x60,
0xaa, 0x6f, 0x86, 0xf3, 0x98, 0xe6, 0x00, 0x1a, 0xcb, 0x06, 0x20, 0x15, 0x56, 0x69, 0x04, 0xe7,
0x16, 0x4a, 0x7f, 0x22, 0x0d, 0x0a, 0xd7, 0xe6, 0x68, 0x82, 0xa3, 0x50, 0x1c, 0x97, 0x9e, 0xa3,
0xbe, 0x9b, 0x7b, 0xa2, 0x68, 0x3d, 0x28, 0x47, 0x51, 0x17, 0x3d, 0x84, 0x3c, 0xb5, 0x53, 0xa1,
0xf2, 0x64, 0xec, 0x62, 0x18, 0xf4, 0x2d, 0xa8, 0xb2, 0xe0, 0x4d, 0xb0, 0xe5, 0xb9, 0x36, 0x11,
0xa6, 0x5f, 0xa1, 0xb0, 0x3e, 0x07, 0x69, 0x3f, 0x2b, 0xc0, 0x36, 0x8f, 0x63, 0x4f, 0xb1, 0x19,
0x84, 0xe7, 0xd8, 0x0c, 0x33, 0xb9, 0xfd, 0x37, 0x99, 0x29, 0xe4, 0xbf, 0xfe, 0xa9, 0x53, 0xb8,
0xe9, 0xd4, 0x41, 0x6f, 0x41, 0xed, 0x7c, 0x1a, 0x62, 0x62, 0xbc, 0x08, 0x9c, 0x30, 0xc4, 0x2e,
0xf3, 0xc8, 0xbc, 0x5e, 0x65, 0xc0, 0x1f, 0x70, 0x18, 0x4d, 0xb1, 0x38, 0x51, 0x80, 0x4d, 0x9b,
0xf9, 0x63, 0x5e, 0x2f, 0x33, 0x88, 0x8e, 0x4d, 0x9b, 0xea, 0xf7, 0x0a, 0x4f, 0x67, 0x2c, 0x4a,
0x5c, 0xbf, 0x14, 0x26, 0x39, 0xbc, 0x01, 0x65, 0x46, 0xc2, 0x18, 0x94, 0x79, 0xe8, 0xa1, 0x00,
0x36, 0xfe, 0x3d, 0x50, 0x4d, 0xdf, 0x0f, 0xbc, 0x97, 0xce, 0xd8, 0x0c, 0xb1, 0x41, 0x9c, 0x1f,
0xe1, 0x06, 0x30, 0x9a, 0xf5, 0x18, 0xbc, 0xef, 0xfc, 0x08, 0xa3, 0x5d, 0x28, 0x39, 0x6e, 0x88,
0x83, 0x6b, 0x73, 0xd4, 0xa8, 0x32, 0xcd, 0xa1, 0x59, 0x69, 0xde, 0x11, 0x18, 0x3d, 0xa2, 0x49,
0xb3, 0xa6, 0x53, 0x36, 0x6a, 0x73, 0xac, 0x9f, 0xe1, 0x29, 0xa1, 0x11, 0x33, 0xc4, 0xc1, 0xb8,
0x51, 0x67, 0x68, 0xf6, 0x1b, 0xfd, 0xf6, 0xc2, 0xec, 0x7e, 0x9d, 0x4d, 0xfc, 0xed, 0xc5, 0xd9,
0x3d, 0x4f, 0x33, 0x6f, 0xce, 0xf1, 0xd1, 0x47, 0x50, 0x79, 0x3e, 0xc1, 0xc1, 0xd4, 0xe0, 0xf5,
0x90, 0x1a, 0xaf, 0x87, 0x7e, 0x93, 0x22, 0xf8, 0xf6, 0xc2, 0xf3, 0xe8, 0x37, 0xd5, 0xa2, 0xe5,
0x4f, 0x8c, 0x09, 0xeb, 0xef, 0x6c, 0x70, 0x2d, 0x5a, 0xfe, 0xe4, 0x8c, 0x7e, 0x7f, 0x9e, 0x2f,
0x55, 0xd4, 0xaa, 0x76, 0x09, 0xd0, 0xba, 0x34, 0xdd, 0x21, 0xa6, 0xdb, 0x7b, 0x0b, 0xdf, 0x78,
0x02, 0x15, 0x8b, 0xd1, 0x1b, 0xac, 0xb9, 0x94, 0x63, 0xcd, 0xa5, 0x9d, 0x5d, 0xd9, 0x65, 0xa3,
0xc7, 0x11, 0xe7, 0xc7, 0x9a, 0x4c, 0x60, 0x45, 0xbf, 0xb5, 0xef, 0x42, 0x75, 0x36, 0xd3, 0x17,
0x7b, 0xe8, 0x31, 0xac, 0x71, 0x2c, 0x69, 0x28, 0xcc, 0xec, 0xc4, 0x5a, 0x66, 0x44, 0xba, 0x24,
0xd0, 0xf6, 0xa0, 0x3e, 0x08, 0x4c, 0x97, 0x5c, 0x60, 0x11, 0x12, 0x6e, 0x96, 0x54, 0xfb, 0x00,
0x0a, 0x27, 0x38, 0x18, 0xb2, 0x4e, 0x48, 0x68, 0x06, 0x43, 0x1c, 0x2e, 0xcb, 0xec, 0x38, 0x56,
0x3b, 0x86, 0x4a, 0xdf, 0x1f, 0x39, 0x22, 0x01, 0x46, 0xef, 0x41, 0xd1, 0xf7, 0x46, 0x8e, 0x35,
0x15, 0x1d, 0xb4, 0x0d, 0x29, 0x1e, 0xb6, 0xae, 0x7a, 0x0c, 0xa1, 0x0b, 0x02, 0x6a, 0x0a, 0xcc,
0x52, 0x68, 0x58, 0xae, 0xea, 0xec, 0xb7, 0xf6, 0xf3, 0x55, 0xd8, 0x99, 0x8b, 0x10, 0x19, 0x53,
0x34, 0xa9, 0x72, 0xb6, 0xe2, 0x5c, 0x7c, 0xe3, 0x63, 0xca, 0x12, 0xba, 0x66, 0xfb, 0xf8, 0x3d,
0x58, 0x0f, 0x85, 0xbe, 0x8c, 0x44, 0xdc, 0x10, 0x33, 0x25, 0x95, 0xa9, 0xd7, 0xc3, 0xa4, 0x72,
0x13, 0x39, 0x7c, 0x3e, 0x99, 0xc3, 0xa3, 0x4f, 0xa3, 0x8c, 0x11, 0xfb, 0x9e, 0x75, 0xc9, 0xce,
0x6d, 0x1a, 0xe5, 0x12, 0x4a, 0x6d, 0x53, 0x94, 0x4c, 0x1b, 0xd9, 0x07, 0x3d, 0x70, 0xb8, 0xa2,
0xf9, 0x32, 0x8a, 0x0b, 0x36, 0x0e, 0x38, 0x41, 0x8f, 0x07, 0xe1, 0xc2, 0x98, 0x6e, 0x1f, 0x0b,
0x1f, 0x51, 0xab, 0x93, 0xed, 0xa8, 0xce, 0x31, 0xe8, 0x13, 0xa8, 0x12, 0xba, 0x61, 0x86, 0x08,
0xa1, 0x25, 0x46, 0x29, 0xf6, 0x29, 0xb6, 0x95, 0x7a, 0x85, 0xc4, 0xf6, 0xf5, 0x09, 0xd4, 0x63,
0xea, 0x34, 0xae, 0xf7, 0x58, 0x7c, 0x89, 0x02, 0x43, 0xdc, 0x46, 0xf5, 0xaa, 0x15, 0xfb, 0xd2,
0x2e, 0x60, 0x7d, 0x9f, 0x5c, 0x09, 0xc6, 0xdf, 0x5c, 0xb0, 0xd7, 0xfe, 0x48, 0x01, 0x75, 0x36,
0x51, 0xc6, 0x6e, 0x55, 0xcd, 0xc5, 0x2f, 0x8c, 0x74, 0x25, 0x56, 0x71, 0xf1, 0x0b, 0x5d, 0x6e,
0xe4, 0x43, 0xa8, 0x52, 0x1a, 0xa6, 0x05, 0xc7, 0xe6, 0x49, 0x5c, 0x5e, 0x07, 0x17, 0xbf, 0xa0,
0xeb, 0xed, 0xd8, 0x44, 0xfb, 0x53, 0x05, 0x90, 0x8e, 0x7d, 0x2f, 0x08, 0xb3, 0x2f, 0x5a, 0x83,
0xfc, 0x08, 0x5f, 0x84, 0x4b, 0x96, 0xcc, 0x70, 0xe8, 0x11, 0x14, 0x02, 0x67, 0x78, 0x19, 0x2e,
0x69, 0x55, 0x72, 0xa4, 0xd6, 0x82, 0xcd, 0x84, 0x30, 0x99, 0x52, 0xde, 0x9f, 0x28, 0xb0, 0xb5,
0x4f, 0xae, 0x0e, 0xcc, 0xd0, 0xba, 0xfc, 0xc6, 0x77, 0x92, 0xf5, 0xa3, 0x99, 0x85, 0xf2, 0xb6,
0xb1, 0xec, 0x47, 0x53, 0x50, 0x8b, 0x42, 0xb4, 0x2e, 0xac, 0x31, 0x29, 0x3a, 0x87, 0xf3, 0x5b,
0xa6, 0xdc, 0xbc, 0x65, 0xb9, 0xb9, 0x2d, 0xbb, 0x80, 0x3b, 0xa9, 0xe5, 0x65, 0xb2, 0x9f, 0x07,
0xb0, 0x2a, 0xf9, 0x57, 0xf6, 0x6a, 0x31, 0x8f, 0xea, 0x1c, 0xea, 0x14, 0xa3, 0xf9, 0x34, 0xba,
0xd1, 0xcd, 0x78, 0x4d, 0x4d, 0xbe, 0x3b, 0x2b, 0x12, 0x17, 0x97, 0x9e, 0x51, 0x99, 0xf8, 0x14,
0x1a, 0xf3, 0x33, 0x66, 0xb2, 0x81, 0xdf, 0x81, 0x6a, 0xfc, 0xf8, 0xa7, 0xc5, 0x08, 0xef, 0x28,
0xcc, 0xda, 0xf8, 0x5c, 0xf7, 0x75, 0x06, 0x9e, 0x5d, 0x4a, 0xbc, 0x05, 0x35, 0xec, 0xda, 0x31,
0x32, 0xee, 0x55, 0x55, 0xec, 0xda, 0x11, 0x91, 0xf6, 0x09, 0x80, 0x8e, 0x2d, 0x2f, 0xb0, 0x7b,
0xa6, 0x13, 0x2c, 0x48, 0x5a, 0xb7, 0xe2, 0x49, 0x6b, 0x5e, 0xa4, 0xa9, 0xda, 0x4f, 0x15, 0x28,
0xc9, 0x1c, 0x2d, 0x19, 0x7f, 0x95, 0x54, 0xfc, 0x65, 0x48, 0xd3, 0x36, 0xc4, 0x89, 0x23, 0x90,
0xa6, 0xcd, 0x92, 0x12, 0xd6, 0xdc, 0x32, 0x6d, 0x83, 0x25, 0x5b, 0xcc, 0xde, 0xf2, 0x3a, 0x23,
0x3f, 0xa0, 0x80, 0x74, 0x0e, 0x91, 0xbf, 0x39, 0x87, 0xd0, 0xfe, 0x6e, 0x0d, 0x60, 0xd6, 0x6e,
0x4d, 0xb4, 0x84, 0x95, 0x44, 0x4b, 0x18, 0x35, 0xa1, 0x64, 0x99, 0xbe, 0x69, 0xd1, 0x62, 0x4e,
0xc8, 0x25, 0xbf, 0xd1, 0x9b, 0x50, 0x36, 0xaf, 0x4d, 0x67, 0x64, 0x9e, 0x8f, 0xb0, 0x14, 0x2b,
0x02, 0xd0, 0x84, 0x50, 0xac, 0x97, 0xfb, 0x49, 0x9e, 0xf9, 0x89, 0x38, 0x3d, 0x98, 0xa3, 0xa0,
0xef, 0x00, 0x22, 0x22, 0x55, 0x25, 0xae, 0xe9, 0x0b, 0xc2, 0x02, 0x23, 0x54, 0x05, 0xa6, 0xef,
0x9a, 0x3e, 0xa7, 0xfe, 0x10, 0xb6, 0x02, 0x6c, 0x61, 0xe7, 0x3a, 0x45, 0x5f, 0x64, 0xf4, 0x28,
0xc2, 0xcd, 0x46, 0xdc, 0x03, 0x98, 0xd9, 0x00, 0x3b, 0x73, 0x6a, 0x7a, 0x39, 0xda, 0x7e, 0xb4,
0x0b, 0x9b, 0xa6, 0xef, 0x8f, 0xa6, 0x29, 0x7e, 0x25, 0x46, 0xb7, 0x21, 0x51, 0x33, 0x76, 0x3b,
0xb0, 0xe6, 0x10, 0xe3, 0x7c, 0x42, 0xa6, 0xec, 0x74, 0x29, 0xe9, 0x45, 0x87, 0x1c, 0x4c, 0xc8,
0x94, 0xee, 0xde, 0x84, 0x60, 0x3b, 0x9e, 0xb4, 0x96, 0x28, 0x80, 0x65, 0xab, 0x73, 0xc9, 0x75,
0x65, 0x41, 0x72, 0x9d, 0xce, 0x9e, 0xab, 0xf3, 0xd9, 0x73, 0x32, 0xff, 0xae, 0xa5, 0xf3, 0xef,
0x44, 0x72, 0x5d, 0x4f, 0x25, 0xd7, 0xf1, 0x8c, 0x79, 0xfd, 0x16, 0x19, 0xf3, 0x07, 0x00, 0x51,
0x8e, 0x49, 0xb3, 0xd2, 0x58, 0x26, 0x37, 0x73, 0x03, 0xbd, 0x2c, 0xd3, 0x4e, 0x82, 0x3e, 0x81,
0x1a, 0x33, 0x51, 0xc7, 0x33, 0x02, 0x93, 0x5a, 0xe9, 0xc6, 0x92, 0x31, 0x15, 0x4a, 0xd6, 0xf1,
0x74, 0x4a, 0x84, 0x3e, 0x85, 0x3a, 0x5d, 0x30, 0x9e, 0x0d, 0x43, 0x4b, 0x86, 0x55, 0x19, 0x9d,
0x1c, 0xf7, 0x31, 0x54, 0x3d, 0xdf, 0x18, 0x99, 0x21, 0x76, 0x2d, 0x07, 0x93, 0xc6, 0xe6, 0xb2,
0xc9, 0x3c, 0xff, 0x58, 0x12, 0xa1, 0xf7, 0x01, 0x58, 0x88, 0xe5, 0x5e, 0xb2, 0x25, 0xe2, 0x52,
0xa2, 0x8e, 0xd2, 0x59, 0xf7, 0x83, 0xfb, 0x44, 0xca, 0xab, 0xee, 0xdc, 0x22, 0x33, 0xa7, 0xe6,
0x36, 0xf2, 0x5e, 0x18, 0xc4, 0xf2, 0x02, 0xdc, 0xd8, 0xe6, 0x3b, 0x44, 0x21, 0x7d, 0x0a, 0xa0,
0xd6, 0x6e, 0x9b, 0x63, 0x73, 0x88, 0x6d, 0x71, 0x1e, 0x10, 0xea, 0x6f, 0x3b, 0x2c, 0xda, 0xab,
0x02, 0x23, 0xfa, 0x7e, 0x1d, 0x5b, 0x1b, 0xc1, 0x1d, 0xe6, 0xa1, 0xaf, 0x5b, 0x8a, 0x8a, 0x9b,
0x96, 0xdc, 0xab, 0x6f, 0x5a, 0xfe, 0x4d, 0x81, 0xed, 0xf4, 0x74, 0xbf, 0x3a, 0x57, 0x21, 0x34,
0x98, 0xcb, 0x1b, 0x74, 0xd9, 0x10, 0xe2, 0x57, 0xcd, 0x75, 0x01, 0x16, 0x1d, 0x21, 0xed, 0xc7,
0x39, 0xd8, 0xea, 0x5b, 0x66, 0x18, 0xe2, 0xe0, 0x35, 0x6e, 0x13, 0x1e, 0xcc, 0xb5, 0xbb, 0x0f,
0x72, 0x0d, 0x25, 0x16, 0xae, 0x6f, 0x79, 0x0f, 0x1b, 0xab, 0xf0, 0xf3, 0xaf, 0xa8, 0xf0, 0xb7,
0xa0, 0x30, 0x0c, 0xbc, 0x89, 0xcf, 0x22, 0x5f, 0x59, 0xe7, 0x1f, 0xb3, 0x2b, 0x0d, 0x66, 0x26,
0x45, 0x66, 0x26, 0x42, 0x2c, 0xd2, 0xb1, 0x69, 0x16, 0x12, 0xe0, 0x30, 0x98, 0x1a, 0xbc, 0x35,
0xce, 0xeb, 0x71, 0x60, 0xa0, 0x63, 0x0a, 0xd1, 0xae, 0xe1, 0x4e, 0x4a, 0x13, 0x99, 0x36, 0xf4,
0x03, 0xd8, 0xbc, 0x70, 0x5c, 0x87, 0x5c, 0x62, 0xdb, 0xf0, 0x71, 0x60, 0x61, 0x37, 0x94, 0x0f,
0x0b, 0xf2, 0x3a, 0x92, 0xa8, 0x5e, 0x84, 0xd1, 0x0e, 0xe1, 0xce, 0x11, 0x0e, 0x8f, 0x5a, 0x7d,
0xf3, 0x02, 0xf7, 0x3c, 0xc7, 0xcd, 0x64, 0xb8, 0x1a, 0x86, 0xed, 0x34, 0x97, 0x4c, 0xe2, 0x53,
0x9f, 0x34, 0x2f, 0xb0, 0xe1, 0x53, 0x1e, 0x42, 0xea, 0x32, 0x91, 0x4c, 0xb5, 0x0b, 0x68, 0x9c,
0xf9, 0xb6, 0x19, 0xe2, 0xd7, 0x94, 0xf7, 0xa6, 0x79, 0x3c, 0xb8, 0xbb, 0x60, 0x9e, 0x4c, 0x2b,
0x7a, 0x04, 0x75, 0x9a, 0x2e, 0xce, 0xcd, 0x46, 0x93, 0xc8, 0x88, 0xb7, 0xf6, 0xb7, 0x0a, 0x3c,
0xe0, 0x33, 0xf6, 0x71, 0x70, 0xed, 0x58, 0xbf, 0x90, 0x05, 0x72, 0x4e, 0xd2, 0x29, 0xaa, 0x7a,
0x59, 0x40, 0x3a, 0x36, 0x4d, 0x89, 0x06, 0x83, 0x63, 0xe6, 0x0e, 0xab, 0x3a, 0xfd, 0x99, 0xd2,
0x48, 0x3e, 0xad, 0x91, 0x7f, 0x50, 0xe0, 0xe1, 0x72, 0x01, 0x33, 0xef, 0xf5, 0xd7, 0x12, 0xf1,
0x11, 0xd4, 0xc7, 0x8e, 0x6b, 0xcc, 0x89, 0x59, 0x1d, 0x3b, 0xee, 0x4c, 0x95, 0x7f, 0xae, 0xd0,
0xe4, 0x6f, 0x28, 0xa2, 0xd1, 0xfc, 0x79, 0xae, 0xdc, 0xd8, 0x2c, 0xcb, 0xdd, 0xd4, 0x2c, 0x5b,
0xbd, 0xa1, 0x59, 0x96, 0x4f, 0x9e, 0xe7, 0xda, 0x1f, 0x2b, 0xb0, 0xd1, 0x9f, 0xba, 0xd6, 0x6b,
0xc4, 0xb8, 0x47, 0x50, 0xe4, 0xbd, 0xe5, 0x85, 0x7d, 0x56, 0x81, 0x63, 0xb5, 0x0e, 0xcb, 0xa0,
0x1c, 0xd7, 0xc6, 0x2f, 0x85, 0x9c, 0x3c, 0xa9, 0xea, 0x50, 0x88, 0xf6, 0x3f, 0x0a, 0xa0, 0xb8,
0x24, 0x99, 0x36, 0xee, 0xd6, 0xf5, 0xc2, 0x8d, 0xf2, 0xd0, 0xd4, 0x40, 0x84, 0x6e, 0x99, 0x0d,
0x27, 0x52, 0x03, 0xb9, 0x8b, 0x32, 0x0f, 0xe5, 0x07, 0xf7, 0xc7, 0x50, 0x17, 0x83, 0x92, 0x0d,
0xf0, 0x64, 0xb8, 0xae, 0x71, 0x1a, 0xd1, 0xbe, 0xd6, 0x7e, 0xc8, 0xba, 0xff, 0x5d, 0x1f, 0x07,
0x66, 0xe8, 0x05, 0xbf, 0xf8, 0x6b, 0xd5, 0x7f, 0x56, 0xd8, 0xfd, 0xff, 0x6c, 0x82, 0x4c, 0xaa,
0x7d, 0xd5, 0x14, 0x08, 0x41, 0xde, 0xc6, 0xc4, 0x62, 0x6a, 0xac, 0xea, 0xec, 0x37, 0x65, 0x2f,
0x0e, 0xed, 0x3c, 0xeb, 0x90, 0x09, 0xf6, 0x52, 0x0c, 0x71, 0x46, 0x0b, 0x1a, 0xd6, 0x24, 0x73,
0x5c, 0x9b, 0x9d, 0x5c, 0x55, 0x9d, 0xfd, 0x66, 0x9d, 0x0e, 0x6a, 0x12, 0x27, 0xe6, 0xcb, 0x41,
0x3f, 0x6b, 0x25, 0x3e, 0x36, 0x5f, 0x1a, 0x51, 0xda, 0x32, 0xf7, 0xf2, 0xaa, 0x30, 0x36, 0x5f,
0x0e, 0x78, 0xc2, 0x75, 0xe5, 0xf8, 0x86, 0x75, 0x89, 0xad, 0x2b, 0xb6, 0x8a, 0x92, 0x5e, 0xa6,
0x10, 0xd6, 0xcf, 0xd3, 0xfe, 0x42, 0x78, 0x89, 0x10, 0x24, 0xeb, 0x55, 0x2a, 0x15, 0x65, 0xe4,
0x59, 0xe6, 0xe8, 0x15, 0x02, 0xc1, 0xd8, 0x7c, 0x79, 0x4c, 0x69, 0x84, 0x54, 0x53, 0xd7, 0xc2,
0xb6, 0x61, 0x5b, 0xf2, 0x16, 0xad, 0xcc, 0x21, 0x87, 0x16, 0xd1, 0xfe, 0x50, 0x81, 0xcd, 0x58,
0x1f, 0x8b, 0x64, 0x8e, 0xc6, 0xac, 0x07, 0x11, 0x6b, 0x51, 0x96, 0x19, 0x84, 0x55, 0x8c, 0xa9,
0xe4, 0x60, 0x75, 0x2e, 0x39, 0xf8, 0x4b, 0x05, 0xb6, 0x92, 0x42, 0xfc, 0x52, 0x92, 0x83, 0x54,
0x52, 0xb3, 0x9a, 0x4a, 0x6a, 0xb4, 0x4b, 0x76, 0xdb, 0x7b, 0xd8, 0x92, 0xb7, 0x74, 0x1d, 0xf7,
0xc2, 0xcb, 0x98, 0xc1, 0x25, 0xae, 0xff, 0x72, 0x73, 0xd7, 0x7f, 0x7f, 0xa2, 0xc0, 0xdd, 0x05,
0x53, 0x65, 0xd2, 0xc2, 0x36, 0x14, 0xf9, 0x6b, 0x44, 0x36, 0x4f, 0x41, 0x17, 0x5f, 0x31, 0x33,
0x5e, 0x7d, 0x95, 0x19, 0x6b, 0xff, 0x92, 0x03, 0x98, 0x95, 0x14, 0xa8, 0x0e, 0xb9, 0xa3, 0x96,
0x38, 0x55, 0x72, 0x47, 0x2d, 0x7a, 0x6e, 0x1d, 0x61, 0x79, 0xca, 0xd3, 0x9f, 0xd4, 0xeb, 0xfa,
0x96, 0x29, 0x8f, 0x0d, 0xf6, 0x1b, 0x3d, 0x84, 0x4a, 0xcb, 0xf3, 0x03, 0xcf, 0xc2, 0x84, 0x78,
0x81, 0x38, 0x31, 0xe2, 0x20, 0x2a, 0xe6, 0x21, 0x1e, 0xe1, 0x10, 0x33, 0x6f, 0xcd, 0xeb, 0xe2,
0x8b, 0x8e, 0xe4, 0xbf, 0x74, 0xd3, 0x1d, 0x62, 0x71, 0xf7, 0x13, 0x07, 0x51, 0x09, 0x7a, 0x13,
0x99, 0x63, 0xd2, 0x9f, 0xa8, 0x09, 0xa5, 0x5e, 0x80, 0x59, 0x51, 0x26, 0x6e, 0x7a, 0xa2, 0x6f,
0xf4, 0x29, 0x6c, 0xef, 0x5b, 0xcf, 0x27, 0x4e, 0x80, 0x7b, 0x98, 0x10, 0x67, 0xec, 0x90, 0xd0,
0xa1, 0x8a, 0xbf, 0x12, 0x77, 0x3e, 0x4b, 0xb0, 0x54, 0xbe, 0x96, 0x37, 0xa6, 0xf6, 0xca, 0x4b,
0x68, 0xf1, 0x45, 0xe7, 0xd2, 0xbd, 0xd1, 0xe8, 0xdc, 0xb4, 0xae, 0x44, 0xed, 0x1c, 0x7d, 0x3f,
0xfe, 0x2b, 0x05, 0xca, 0xd1, 0xf3, 0x57, 0x54, 0x84, 0x5c, 0xf7, 0x99, 0xba, 0x82, 0x2a, 0xb0,
0x76, 0x76, 0xfa, 0xec, 0xb4, 0xfb, 0x83, 0x53, 0x55, 0x41, 0x5b, 0xa0, 0x9e, 0x76, 0x07, 0xc6,
0x41, 0xb7, 0x3b, 0xe8, 0x0f, 0xf4, 0xfd, 0x5e, 0xaf, 0x7d, 0xa8, 0xe6, 0xd0, 0x26, 0xac, 0xf7,
0x07, 0x5d, 0xbd, 0x6d, 0x0c, 0xba, 0x27, 0x07, 0xfd, 0x41, 0xf7, 0xb4, 0xad, 0xae, 0xa2, 0x06,
0x6c, 0xed, 0x1f, 0xeb, 0xed, 0xfd, 0xc3, 0x2f, 0x93, 0xe4, 0x79, 0x8a, 0xe9, 0x9c, 0xb6, 0xba,
0x27, 0xbd, 0xfd, 0x41, 0xe7, 0xe0, 0xb8, 0x6d, 0x7c, 0xd1, 0xd6, 0xfb, 0x9d, 0xee, 0xa9, 0x5a,
0xa0, 0xec, 0xf5, 0xf6, 0x51, 0xa7, 0x7b, 0x6a, 0xd0, 0x59, 0x3e, 0xeb, 0x9e, 0x9d, 0x1e, 0xaa,
0xc5, 0xc7, 0x9f, 0x40, 0x25, 0x76, 0xa7, 0x80, 0x4a, 0x90, 0xef, 0xb7, 0xf6, 0x4f, 0xd5, 0x15,
0xb4, 0x0e, 0x95, 0xfd, 0x5e, 0x4f, 0xef, 0xfe, 0x56, 0xe7, 0x64, 0x7f, 0xd0, 0x56, 0x15, 0x04,
0x50, 0x3c, 0xeb, 0xb7, 0x9f, 0xb5, 0xbf, 0x54, 0x73, 0x8f, 0x7b, 0x50, 0x4f, 0xc6, 0x59, 0xba,
0x92, 0xfe, 0x59, 0xab, 0xd5, 0xee, 0xf7, 0xf9, 0xb2, 0x06, 0x9d, 0x93, 0x76, 0xf7, 0x6c, 0xc0,
0xc7, 0xb5, 0xf6, 0x4f, 0x5b, 0xed, 0x63, 0x35, 0x47, 0x11, 0x7a, 0xbb, 0x77, 0xbc, 0xdf, 0xa2,
0x8b, 0xa0, 0x1f, 0x67, 0xa7, 0xa7, 0x9d, 0xd3, 0x23, 0x35, 0xff, 0xf8, 0x9f, 0x14, 0x28, 0x33,
0xd3, 0x7a, 0xe6, 0xb8, 0x36, 0x1d, 0xd3, 0x0d, 0x2f, 0x71, 0x40, 0xd4, 0x15, 0xaa, 0xab, 0xa3,
0x96, 0xaa, 0xa0, 0x35, 0x66, 0x5d, 0x6a, 0x8e, 0xc9, 0x68, 0x99, 0xae, 0xba, 0x4a, 0x65, 0x8c,
0xd9, 0x8d, 0x9a, 0xa7, 0xe3, 0xb8, 0x39, 0xa8, 0x05, 0x8a, 0x8c, 0x99, 0x86, 0x5a, 0xa4, 0x0c,
0x7a, 0x93, 0x50, 0x5d, 0x43, 0xd5, 0x99, 0x4d, 0xa8, 0x25, 0xd4, 0x5c, 0x66, 0x05, 0x6a, 0x99,
0xc9, 0xce, 0xf6, 0x56, 0x05, 0x3a, 0x4a, 0xee, 0xa6, 0x5a, 0xd9, 0xfb, 0x4f, 0x15, 0x72, 0xbd,
0x43, 0xb4, 0x0f, 0x30, 0xbb, 0x75, 0x46, 0x3b, 0xf3, 0xf7, 0xd0, 0xcc, 0xe7, 0x9b, 0x8d, 0x65,
0x17, 0xd4, 0xda, 0x0a, 0xfa, 0x10, 0x56, 0x07, 0xc4, 0x43, 0xe2, 0xe4, 0x9f, 0xbd, 0x7a, 0x6e,
0x6e, 0xc4, 0x20, 0x92, 0xfa, 0x5d, 0xe5, 0x43, 0x05, 0x7d, 0x1f, 0xca, 0xd1, 0x93, 0x52, 0xb4,
0xcd, 0xa9, 0xd2, 0x6f, 0x7a, 0x9b, 0x3b, 0x73, 0xf0, 0x68, 0xc6, 0x13, 0xa8, 0x27, 0x1f, 0xa5,
0xa2, 0x37, 0x38, 0xf1, 0xc2, 0x07, 0xaf, 0xcd, 0x37, 0x17, 0x23, 0x23, 0x76, 0x4f, 0x60, 0x4d,
0x3c, 0x1c, 0x45, 0x22, 0xfc, 0x24, 0x9f, 0xa1, 0x36, 0xef, 0xa4, 0xa0, 0xd1, 0xc8, 0x5f, 0x87,
0x92, 0x7c, 0xc5, 0x89, 0xee, 0x44, 0x2a, 0x8a, 0x3f, 0xa3, 0x6c, 0x6e, 0xa7, 0xc1, 0xf1, 0xc1,
0xf2, 0x35, 0xa4, 0x1c, 0x9c, 0x7a, 0x83, 0x29, 0x07, 0xa7, 0x1f, 0x4d, 0x6a, 0x2b, 0xe8, 0x08,
0xaa, 0xf1, 0x47, 0x83, 0xe8, 0x6e, 0x34, 0x4d, 0xfa, 0x19, 0x63, 0xb3, 0xb9, 0x08, 0x15, 0xd7,
0x65, 0xb2, 0x1d, 0x21, 0x75, 0xb9, 0xb0, 0x27, 0x22, 0x75, 0xb9, 0xb8, 0x83, 0xa1, 0xad, 0xa0,
0x01, 0xac, 0xa7, 0xae, 0xed, 0xd0, 0x9b, 0xf1, 0x94, 0x70, 0x8e, 0xe1, 0xbd, 0x25, 0xd8, 0xb4,
0xc1, 0x44, 0x4f, 0xeb, 0xd0, 0x4c, 0xa3, 0x89, 0xa4, 0xbc, 0xb9, 0x33, 0x07, 0x8f, 0xa4, 0x3a,
0x80, 0xda, 0x11, 0x0e, 0x7b, 0x01, 0xbe, 0xce, 0xce, 0xe3, 0x33, 0xc6, 0x63, 0xf6, 0xbc, 0x0f,
0x35, 0x53, 0xb4, 0xb1, 0x37, 0x7f, 0xaf, 0xe2, 0x73, 0x08, 0x95, 0xd8, 0xbb, 0x33, 0x24, 0x3c,
0x6b, 0xfe, 0x59, 0x5f, 0xf3, 0xee, 0x02, 0x4c, 0xc4, 0xe5, 0xfb, 0x50, 0x92, 0x77, 0x5c, 0xd2,
0x78, 0x52, 0x97, 0x6b, 0xd2, 0x78, 0xd2, 0x57, 0x61, 0xda, 0xea, 0x8f, 0x73, 0x0a, 0x3a, 0x82,
0x4a, 0xec, 0x36, 0x48, 0x4a, 0x31, 0x7f, 0x5b, 0x25, 0xa5, 0x58, 0x70, 0x75, 0xc4, 0x19, 0x7d,
0x0e, 0xb5, 0xc4, 0x8d, 0x89, 0x54, 0xcb, 0xa2, 0x5b, 0xa2, 0xe6, 0x1b, 0x0b, 0x71, 0xd1, 0xa2,
0xfa, 0xa0, 0xa6, 0xef, 0x28, 0xd0, 0xbd, 0xf8, 0xfc, 0xf3, 0x1c, 0xef, 0x2f, 0x43, 0xc7, 0x99,
0xa6, 0xdf, 0xb5, 0x49, 0xa6, 0x4b, 0xde, 0xcd, 0x49, 0xa6, 0xcb, 0x9e, 0xc3, 0x71, 0xa6, 0xe9,
0x47, 0x64, 0x92, 0xe9, 0x92, 0xf7, 0x6c, 0x92, 0xe9, 0xb2, 0xb7, 0x67, 0xda, 0x0a, 0x55, 0x65,
0xa2, 0x8f, 0x24, 0x55, 0xb9, 0xa8, 0xcd, 0x26, 0x55, 0xb9, 0xb0, 0xf1, 0xc4, 0xdd, 0x3a, 0xd9,
0xd5, 0x91, 0x6e, 0xbd, 0xb0, 0x63, 0x24, 0xdd, 0x7a, 0x71, 0x23, 0x48, 0x5b, 0x41, 0x5f, 0xc0,
0xc6, 0x5c, 0x57, 0x05, 0x89, 0x15, 0x2d, 0x6b, 0xeb, 0x34, 0x1f, 0x2c, 0xc5, 0x47, 0x7c, 0xaf,
0x64, 0x57, 0x68, 0xbe, 0x35, 0x81, 0xde, 0x8e, 0x0f, 0x5f, 0xda, 0x5b, 0x69, 0xbe, 0x73, 0x13,
0x59, 0xcc, 0x83, 0x2b, 0xb3, 0x02, 0x3a, 0x3a, 0xec, 0xe6, 0xaa, 0x7b, 0x79, 0xd8, 0xcd, 0x17,
0xdb, 0x22, 0x1a, 0x1d, 0x42, 0x25, 0x56, 0x2e, 0xa2, 0xd9, 0xd9, 0x98, 0x2a, 0x51, 0x9b, 0x77,
0x17, 0x60, 0x62, 0x1e, 0x5c, 0x8e, 0x4a, 0x26, 0x19, 0x8f, 0xd2, 0xc5, 0x5c, 0x73, 0x67, 0x0e,
0x1e, 0x3f, 0x01, 0xe2, 0x75, 0x85, 0x3c, 0x01, 0x16, 0x14, 0x3c, 0xf2, 0x04, 0x58, 0x54, 0x86,
0xf0, 0xbd, 0x9d, 0xcb, 0xcf, 0xd1, 0xcc, 0x05, 0x16, 0xd6, 0x08, 0x72, 0x6f, 0x97, 0x26, 0xf6,
0xda, 0xca, 0xc1, 0x3b, 0xff, 0xf1, 0x8f, 0x25, 0xe5, 0xdf, 0xbf, 0xba, 0xaf, 0xfc, 0xfc, 0xab,
0xfb, 0xca, 0x7f, 0x7d, 0x75, 0x5f, 0xf9, 0x9b, 0xff, 0xbe, 0xbf, 0x02, 0xaa, 0x17, 0x0c, 0x77,
0x43, 0xe7, 0xea, 0x7a, 0xf7, 0xea, 0x9a, 0xfd, 0x4f, 0xd8, 0x79, 0x91, 0xfd, 0xf9, 0xf8, 0xff,
0x03, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x0d, 0x3b, 0xf4, 0x8c, 0x36, 0x00, 0x00,
}
| sovPdpb |
call.rs | // Copyright (c) 2020 Xu Shaohua <[email protected]>. All rights reserved.
// Use of this source is governed by Apache-2.0 License that can be found
// in the LICENSE file.
extern crate alloc;
use super::sysno::*;
use crate::c_str::CString;
use crate::path::Path;
use crate::syscalls::*;
use crate::types::*;
pub fn accept(sockfd: i32, addr: &mut sockaddr_in_t, addrlen: &mut socklen_t) -> Result<(), Errno> {
let sockfd = sockfd as usize;
let addr_ptr = addr as *mut sockaddr_in_t as usize;
let addrlen_ptr = addrlen as *mut socklen_t as usize;
syscall3(SYS_ACCEPT, sockfd, addr_ptr, addrlen_ptr).map(drop)
}
/// Accept a connection on a socket.
pub fn accept4(
sockfd: i32,
addr: &mut sockaddr_in_t,
addrlen: &mut socklen_t,
flags: i32,
) -> Result<(), Errno> {
let sockfd = sockfd as usize;
let addr_ptr = addr as *mut sockaddr_in_t as usize;
let addrlen_ptr = addrlen as *mut socklen_t as usize;
let flags = flags as usize;
syscall4(SYS_ACCEPT4, sockfd, addr_ptr, addrlen_ptr, flags).map(drop)
}
/// Switch process accounting.
/// ```
/// let path = "/tmp/nc-acct";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let ret = nc::acct(path);
/// assert_eq!(ret, Err(nc::EPERM));
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn acct<P: AsRef<Path>>(filename: P) -> Result<(), Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
syscall1(SYS_ACCT, filename_ptr).map(drop)
}
/// Add a key to the kernel's key management facility.
pub fn add_key<P: AsRef<Path>>(
type_: P,
description: P,
payload: usize,
plen: size_t,
dest_keyring: key_serial_t,
) -> Result<key_serial_t, Errno> {
let type_ = CString::new(type_.as_ref());
let type_ptr = type_.as_ptr() as usize;
let description = CString::new(description.as_ref());
let description_ptr = description.as_ptr() as usize;
let plen = plen as usize;
let dest_keyring = dest_keyring as usize;
syscall5(
SYS_ADD_KEY,
type_ptr,
description_ptr,
payload,
plen,
dest_keyring,
)
.map(|ret| ret as key_serial_t)
}
/// Tune kernel clock. Returns clock state on success.
/// ```
/// let mut tm = nc::timex_t::default();
/// let ret = nc::adjtimex(&mut tm);
/// assert!(ret.is_ok());
/// assert!(tm.time.tv_sec > 1611552896);
/// ```
pub fn adjtimex(buf: &mut timex_t) -> Result<i32, Errno> {
let buf_ptr = buf as *mut timex_t as usize;
syscall1(SYS_ADJTIMEX, buf_ptr).map(|ret| ret as i32)
}
pub fn arch_specific_syscall() {
core::unimplemented!();
// syscall0(SYS_ARCH_SPECIFIC_SYSCALL);
}
/// Bind a name to a socket.
pub fn bind(sockfd: i32, addr: &sockaddr_in_t, addrlen: socklen_t) -> Result<(), Errno> {
let sockfd = sockfd as usize;
let addr_ptr = addr as *const sockaddr_in_t as usize;
let addrlen = addrlen as usize;
syscall3(SYS_BIND, sockfd, addr_ptr, addrlen).map(drop)
}
/// Perform a command on an extended BPF map or program
pub fn bpf(cmd: i32, attr: &mut bpf_attr_t, size: u32) -> Result<i32, Errno> {
let cmd = cmd as usize;
let attr_ptr = attr as *mut bpf_attr_t as usize;
let size = size as usize;
syscall3(SYS_BPF, cmd, attr_ptr, size).map(|ret| ret as i32)
}
/// Change data segment size.
pub fn brk(addr: usize) -> Result<(), Errno> {
syscall1(SYS_BRK, addr).map(drop)
}
/// Get capabilities of thread.
pub fn capget(hdrp: &mut cap_user_header_t, data: &mut cap_user_data_t) -> Result<(), Errno> {
let hdrp_ptr = hdrp as *mut cap_user_header_t as usize;
let data_ptr = data as *mut cap_user_data_t as usize;
syscall2(SYS_CAPGET, hdrp_ptr, data_ptr).map(drop)
}
/// Set capabilities of thread.
pub fn capset(hdrp: &mut cap_user_header_t, data: &cap_user_data_t) -> Result<(), Errno> {
let hdrp_ptr = hdrp as *mut cap_user_header_t as usize;
let data_ptr = data as *const cap_user_data_t as usize;
syscall2(SYS_CAPSET, hdrp_ptr, data_ptr).map(drop)
}
/// Change working directory.
/// ```
/// let path = "/tmp";
/// // Open folder directly.
/// let ret = nc::chdir(path);
/// assert!(ret.is_ok());
///
/// let mut buf = [0_u8; nc::PATH_MAX as usize + 1];
/// let ret = nc::getcwd(buf.as_mut_ptr() as usize, buf.len());
/// assert!(ret.is_ok());
/// // Remove null-terminal char.
/// let path_len = ret.unwrap() as usize - 1;
/// let new_cwd = std::str::from_utf8(&buf[..path_len]);
/// assert_eq!(new_cwd, Ok(path));
/// ```
pub fn chdir<P: AsRef<Path>>(filename: P) -> Result<(), Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
syscall1(SYS_CHDIR, filename_ptr).map(drop)
}
/// Change the root directory.
/// ```
/// let ret = nc::chroot("/");
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn chroot<P: AsRef<Path>>(filename: P) -> Result<(), Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
syscall1(SYS_CHROOT, filename_ptr).map(drop)
}
/// Tune kernel clock. Returns clock state on success.
/// ```
/// let mut tm = nc::timex_t::default();
/// let ret = nc::clock_adjtime(nc::CLOCK_REALTIME, &mut tm);
/// assert!(ret.is_ok());
/// assert!(tm.time.tv_sec > 1611552896);
/// ```
pub fn clock_adjtime(which_clock: clockid_t, tx: &mut timex_t) -> Result<(), Errno> {
let which_clock = which_clock as usize;
let tx_ptr = tx as *mut timex_t as usize;
syscall2(SYS_CLOCK_ADJTIME, which_clock, tx_ptr).map(drop)
}
/// Get resolution(precision) of the specific clock.
/// ```
/// let mut tp = nc::timespec_t::default();
/// let ret = nc::clock_getres(nc::CLOCK_BOOTTIME, &mut tp);
/// assert!(ret.is_ok());
/// assert!(tp.tv_nsec > 0);
/// ```
pub fn clock_getres(which_clock: clockid_t, tp: &mut timespec_t) -> Result<(), Errno> {
let which_clock = which_clock as usize;
let tp_ptr = tp as *mut timespec_t as usize;
syscall2(SYS_CLOCK_GETRES, which_clock, tp_ptr).map(drop)
}
/// Get time of specific clock.
/// ```
/// let mut tp = nc::timespec_t::default();
/// let ret = nc::clock_gettime(nc::CLOCK_REALTIME_COARSE, &mut tp);
/// assert!(ret.is_ok());
/// assert!(tp.tv_sec > 0);
/// ```
pub fn clock_gettime(which_clock: clockid_t, tp: &mut timespec_t) -> Result<(), Errno> {
let which_clock = which_clock as usize;
let tp_ptr = tp as *mut timespec_t as usize;
syscall2(SYS_CLOCK_GETTIME, which_clock, tp_ptr).map(drop)
}
/// High resolution sleep with a specific clock.
/// ```
/// let t = nc::timespec_t {
/// tv_sec: 1,
/// tv_nsec: 0,
/// };
/// let mut rem = nc::timespec_t::default();
/// assert!(nc::clock_nanosleep(nc::CLOCK_MONOTONIC, 0, &t, &mut rem).is_ok());
/// ```
pub fn clock_nanosleep(
which_clock: clockid_t,
flags: i32,
rqtp: ×pec_t,
rmtp: &mut timespec_t,
) -> Result<(), Errno> {
let which_clock = which_clock as usize;
let flags = flags as usize;
let rqtp_ptr = rqtp as *const timespec_t as usize;
let rmtp_ptr = rmtp as *mut timespec_t as usize;
syscall4(SYS_CLOCK_NANOSLEEP, which_clock, flags, rqtp_ptr, rmtp_ptr).map(drop)
}
/// Set time of specific clock.
/// ```
/// let mut tp = nc::timespec_t::default();
/// let ret = nc::clock_gettime(nc::CLOCK_REALTIME, &mut tp);
/// assert!(ret.is_ok());
/// assert!(tp.tv_sec > 0);
/// let ret = nc::clock_settime(nc::CLOCK_REALTIME, &tp);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn clock_settime(which_clock: clockid_t, tp: ×pec_t) -> Result<(), Errno> {
let which_clock = which_clock as usize;
let tp_ptr = tp as *const timespec_t as usize;
syscall2(SYS_CLOCK_SETTIME, which_clock, tp_ptr).map(drop)
}
/// Create a child process.
pub fn clone(
clone_flags: i32,
newsp: usize,
parent_tid: &mut i32,
child_tid: &mut i32,
tls: usize,
) -> Result<pid_t, Errno> {
let clone_flags = clone_flags as usize;
let parent_tid_ptr = parent_tid as *mut i32 as usize;
let child_tid_ptr = child_tid as *mut i32 as usize;
syscall5(
SYS_CLONE,
clone_flags,
newsp,
parent_tid_ptr,
child_tid_ptr,
tls,
)
.map(|ret| ret as pid_t)
}
pub fn clone3() {
core::unimplemented!();
// syscall0(SYS_CLONE3);
}
/// Close a file descriptor.
/// ```
/// assert!(nc::close(2).is_ok());
/// ```
pub fn close(fd: i32) -> Result<(), Errno> {
let fd = fd as usize;
syscall1(SYS_CLOSE, fd).map(drop)
}
/// Initialize a connection on a socket.
pub fn connect(sockfd: i32, addr: &sockaddr_in_t, addrlen: socklen_t) -> Result<(), Errno> {
let sockfd = sockfd as usize;
// TODO(Shaohua): Use sockaddr_t generic type.
let addr_ptr = addr as *const sockaddr_in_t as usize;
let addrlen = addrlen as usize;
syscall3(SYS_CONNECT, sockfd, addr_ptr, addrlen).map(drop)
}
/// Copy a range of data from one file to another.
/// ```
/// let path_in = "/etc/passwd";
/// let fd_in = nc::open(path_in, nc::O_RDONLY, 0);
/// assert!(fd_in.is_ok());
/// let fd_in = fd_in.unwrap();
/// let path_out = "/tmp/nc-copy-file-range";
/// let fd_out = nc::open(path_out, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(fd_out.is_ok());
/// let fd_out = fd_out.unwrap();
/// let mut off_in = 0;
/// let mut off_out = 0;
/// let copy_len = 64;
/// let ret = nc::copy_file_range(fd_in, &mut off_in, fd_out, &mut off_out, copy_len, 0);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(copy_len as nc::ssize_t));
/// assert!(nc::close(fd_in).is_ok());
/// assert!(nc::close(fd_out).is_ok());
/// assert!(nc::unlink(path_out).is_ok());
/// ```
pub fn copy_file_range(
fd_in: i32,
off_in: &mut loff_t,
fd_out: i32,
off_out: &mut loff_t,
len: size_t,
flags: u32,
) -> Result<ssize_t, Errno> {
let fd_in = fd_in as usize;
let off_in_ptr = off_in as *mut loff_t as usize;
let fd_out = fd_out as usize;
let off_out_ptr = off_out as *mut loff_t as usize;
let len = len as usize;
let flags = flags as usize;
syscall6(
SYS_COPY_FILE_RANGE,
fd_in,
off_in_ptr,
fd_out,
off_out_ptr,
len,
flags,
)
.map(|ret| ret as ssize_t)
}
/// Unlock a kernel module.
pub fn delete_module<P: AsRef<Path>>(name: P, flags: i32) -> Result<(), Errno> {
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
let flags = flags as usize;
syscall2(SYS_DELETE_MODULE, name_ptr, flags).map(drop)
}
/// Create a copy of the file descriptor `oldfd`, using the lowest available
/// file descriptor.
/// ```
/// let path = "/tmp/nc-dup-file";
/// let fd = nc::creat(path, 0o644);
/// assert!(fd.is_ok());
/// let fd = fd.unwrap();
/// let fd_dup = nc::dup(fd);
/// assert!(fd_dup.is_ok());
/// let fd_dup = fd_dup.unwrap();
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::close(fd_dup).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn dup(oldfd: i32) -> Result<i32, Errno> {
let oldfd = oldfd as usize;
syscall1(SYS_DUP, oldfd).map(|ret| ret as i32)
}
/// Save as `dup2()`, but can set the close-on-exec flag on `newfd`.
/// ```
/// let path = "/tmp/nc-dup3-file";
/// let fd = nc::creat(path, 0o644);
/// assert!(fd.is_ok());
/// let fd = fd.unwrap();
/// let newfd = 8;
/// assert!(nc::dup3(fd, newfd, nc::O_CLOEXEC).is_ok());
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::close(newfd).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn dup3(oldfd: i32, newfd: i32, flags: i32) -> Result<(), Errno> {
let oldfd = oldfd as usize;
let newfd = newfd as usize;
let flags = flags as usize;
syscall3(SYS_DUP3, oldfd, newfd, flags).map(drop)
}
/// Open an epoll file descriptor.
/// ```
/// let ret = nc::epoll_create1(nc::EPOLL_CLOEXEC);
/// assert!(ret.is_ok());
/// let poll_fd = ret.unwrap();
/// assert!(nc::close(poll_fd).is_ok());
/// ```
pub fn epoll_create1(flags: i32) -> Result<i32, Errno> {
let flags = flags as usize;
syscall1(SYS_EPOLL_CREATE1, flags).map(|ret| ret as i32)
}
/// Control interface for an epoll file descriptor.
/// ```
/// let epfd = nc::epoll_create1(nc::EPOLL_CLOEXEC);
/// assert!(epfd.is_ok());
/// let epfd = epfd.unwrap();
/// let mut fds: [i32; 2] = [0, 0];
/// let ret = nc::pipe(&mut fds);
/// assert!(ret.is_ok());
/// let mut event = nc::epoll_event_t::default();
/// event.events = nc::EPOLLIN | nc::EPOLLET;
/// event.data.fd = fds[0];
/// let ctl_ret = nc::epoll_ctl(epfd, nc::EPOLL_CTL_ADD, fds[0], &mut event);
/// assert!(ctl_ret.is_ok());
/// assert!(nc::close(fds[0]).is_ok());
/// assert!(nc::close(fds[1]).is_ok());
/// assert!(nc::close(epfd).is_ok());
/// ```
pub fn epoll_ctl(epfd: i32, op: i32, fd: i32, event: &mut epoll_event_t) -> Result<(), Errno> {
let epfd = epfd as usize;
let op = op as usize;
let fd = fd as usize;
let event_ptr = event as *mut epoll_event_t as usize;
syscall4(SYS_EPOLL_CTL, epfd, op, fd, event_ptr).map(drop)
}
/// Wait for an I/O event on an epoll file descriptor.
/// ```
/// let epfd = nc::epoll_create1(nc::EPOLL_CLOEXEC);
/// assert!(epfd.is_ok());
/// let epfd = epfd.unwrap();
/// let mut fds: [i32; 2] = [0, 0];
/// let ret = nc::pipe(&mut fds);
/// assert!(ret.is_ok());
/// let mut event = nc::epoll_event_t::default();
/// event.events = nc::EPOLLIN | nc::EPOLLET;
/// event.data.fd = fds[0];
/// let ctl_ret = nc::epoll_ctl(epfd, nc::EPOLL_CTL_ADD, fds[0], &mut event);
/// assert!(ctl_ret.is_ok());
///
/// let msg = "Hello, Rust";
/// let ret = nc::write(fds[1], msg.as_ptr() as usize, msg.len());
/// assert!(ret.is_ok());
///
/// let mut events = vec![nc::epoll_event_t::default(); 4];
/// let events_len = events.len();
/// let timeout = 0;
/// let sigmask = nc::sigset_t::default();
/// let sigmask_size = core::mem::size_of_val(&sigmask);
/// let ret = nc::epoll_pwait(
/// epfd,
/// &mut events,
/// events_len as i32,
/// timeout,
/// &sigmask,
/// sigmask_size,
/// );
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(1));
///
/// for event in &events {
/// // Ready to read
/// if event.events == nc::EPOLLIN {
/// let ready_fd = unsafe { event.data.fd };
/// assert_eq!(ready_fd, fds[0]);
/// let mut buf = vec![0_u8; 64];
/// let buf_len = buf.len();
/// let ret = nc::read(ready_fd, buf.as_mut_ptr() as usize, buf_len);
/// assert!(ret.is_ok());
/// let n_read = ret.unwrap() as usize;
/// assert_eq!(msg.as_bytes(), &buf[..n_read]);
/// }
/// }
///
/// assert!(nc::close(fds[0]).is_ok());
/// assert!(nc::close(fds[1]).is_ok());
/// assert!(nc::close(epfd).is_ok());
/// ```
pub fn epoll_pwait(
epfd: i32,
events: &mut [epoll_event_t],
max_events: i32,
timeout: i32,
sigmask: &sigset_t,
sigset_size: usize,
) -> Result<i32, Errno> {
let epfd = epfd as usize;
let events_ptr = events.as_mut_ptr() as usize;
let max_events = max_events as usize;
let timeout = timeout as usize;
let sigmask_ptr = sigmask as *const sigset_t as usize;
syscall6(
SYS_EPOLL_PWAIT,
epfd,
events_ptr,
max_events,
timeout,
sigmask_ptr,
sigset_size,
)
.map(|ret| ret as i32)
}
/// Create a file descriptor for event notification.
pub fn eventfd2(count: u32, flags: i32) -> Result<i32, Errno> {
let count = count as usize;
let flags = flags as usize;
syscall2(SYS_EVENTFD2, count, flags).map(|ret| ret as i32)
}
/// Execute a new program.
/// TODO(Shaohua): type of argv and env will be changed.
/// And return value might be changed too.
/// ```
/// let pid = nc::fork();
/// assert!(pid.is_ok());
/// let pid = pid.unwrap();
/// assert!(pid >= 0);
/// if pid == 0 {
/// // child process
/// let args = [""];
/// let env = [""];
/// let ret = nc::execve("/bin/ls", &args, &env);
/// assert!(ret.is_ok());
/// }
/// ```
pub fn execve<P: AsRef<Path>>(filename: P, argv: &[&str], env: &[&str]) -> Result<(), Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let argv_ptr = argv.as_ptr() as usize;
let env_ptr = env.as_ptr() as usize;
syscall3(SYS_EXECVE, filename_ptr, argv_ptr, env_ptr).map(drop)
}
/// Execute a new program relative to a directory file descriptor.
/// TODO(Shaohua): type of argv and env will be changed.
/// And return value might be changed too.
/// ```
/// let pid = nc::fork();
/// assert!(pid.is_ok());
/// let pid = pid.unwrap();
/// assert!(pid >= 0);
/// if pid == 0 {
/// // child process
/// let args = [""];
/// let env = [""];
/// let ret = nc::execveat(nc::AT_FDCWD, "/bin/ls", &args, &env, 0);
/// assert!(ret.is_ok());
/// }
/// ```
pub fn execveat<P: AsRef<Path>>(
fd: i32,
filename: P,
argv: &[&str],
env: &[&str],
flags: i32,
) -> Result<(), Errno> {
// FIXME(Shaohua): Convert into CString first.
let fd = fd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let argv_ptr = argv.as_ptr() as usize;
let env_ptr = env.as_ptr() as usize;
let flags = flags as usize;
syscall5(SYS_EXECVEAT, fd, filename_ptr, argv_ptr, env_ptr, flags).map(drop)
}
/// Terminate current process.
/// ```
/// nc::exit(0);
/// ```
pub fn exit(status: u8) -> ! {
let status = status as usize;
let _ret = syscall1(SYS_EXIT, status);
unreachable!();
}
/// Exit all threads in a process's thread group.
/// ```
/// nc::exit_group(0);
/// ```
pub fn exit_group(status: i32) -> ! {
let status = status as usize;
let _ret = syscall1(SYS_EXIT_GROUP, status);
unreachable!();
}
/// Check user's permission for a file.
/// ```
/// assert!(nc::faccessat(nc::AT_FDCWD, "/etc/passwd", nc::F_OK).is_ok());
/// ```
pub fn faccessat<P: AsRef<Path>>(dfd: i32, filename: P, mode: i32) -> Result<(), Errno> {
let dfd = dfd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let mode = mode as usize;
syscall3(SYS_FACCESSAT, dfd, filename_ptr, mode).map(drop)
}
/// Predeclare an access pattern for file data.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::open(path, nc::O_RDONLY, 0);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let ret = nc::fadvise64(fd, 0, 1024, nc::POSIX_FADV_NORMAL);
/// assert!(ret.is_ok());
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn fadvise64(fd: i32, offset: loff_t, len: size_t, advice: i32) -> Result<(), Errno> {
let fd = fd as usize;
let offset = offset as usize;
let len = len as usize;
let advice = advice as usize;
syscall4(SYS_FADVISE64, fd, offset, len, advice).map(drop)
}
/// Manipulate file space.
/// ```
/// let path = "/tmp/nc-fallocate";
/// let fd = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(fd.is_ok());
/// let fd = fd.unwrap();
/// let ret = nc::fallocate(fd, 0, 0, 64 * 1024);
/// assert!(ret.is_ok());
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn fallocate(fd: i32, mode: i32, offset: loff_t, len: loff_t) -> Result<(), Errno> {
let fd = fd as usize;
let mode = mode as usize;
let offset = offset as usize;
let len = len as usize;
syscall4(SYS_FALLOCATE, fd, mode, offset, len).map(drop)
}
/// Create and initialize fanotify group.
pub fn fanotify_init(flags: u32, event_f_flags: u32) -> Result<i32, Errno> {
let flags = flags as usize;
let event_f_flags = event_f_flags as usize;
syscall2(SYS_FANOTIFY_INIT, flags, event_f_flags).map(|ret| ret as i32)
}
/// Add, remove, or modify an fanotify mark on a filesystem object
pub fn fanotify_mark<P: AsRef<Path>>(
fanotify_fd: i32,
flags: u32,
mask: u64,
fd: i32,
filename: P,
) -> Result<(), Errno> {
let fanotify_fd = fanotify_fd as usize;
let flags = flags as usize;
let mask = mask as usize;
let fd = fd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
syscall5(
SYS_FANOTIFY_MARK,
fanotify_fd,
flags,
mask,
fd,
filename_ptr,
)
.map(drop)
}
/// Change working directory.
/// ```
/// let path = "/tmp";
/// // Open folder directly.
/// let fd = nc::open(path, nc::O_PATH, 0);
/// assert!(fd.is_ok());
/// let fd = fd.unwrap();
/// let ret = nc::fchdir(fd);
/// assert!(ret.is_ok());
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn fchdir(fd: i32) -> Result<(), Errno> {
let fd = fd as usize;
syscall1(SYS_FCHDIR, fd).map(drop)
}
/// Change permissions of a file.
/// ```
/// let filename = "/tmp/nc-fchmod";
/// let ret = nc::creat(filename, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::fchmod(fd, 0o600).is_ok());
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(filename).is_ok());
/// ```
pub fn fchmod(fd: i32, mode: mode_t) -> Result<(), Errno> {
let fd = fd as usize;
let mode = mode as usize;
syscall2(SYS_FCHMOD, fd, mode).map(drop)
}
/// Change permissions of a file.
/// ```
/// let filename = "/tmp/nc-fchmodat";
/// let ret = nc::creat(filename, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::fchmodat(nc::AT_FDCWD, filename, 0o600).is_ok());
/// assert!(nc::unlink(filename).is_ok());
/// ```
pub fn fchmodat<P: AsRef<Path>>(dirfd: i32, filename: P, mode: mode_t) -> Result<(), Errno> {
let dirfd = dirfd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let mode = mode as usize;
syscall3(SYS_FCHMODAT, dirfd, filename_ptr, mode).map(drop)
}
/// Change ownership of a file.
/// ```
/// let filename = "/tmp/nc-fchown";
/// let ret = nc::creat(filename, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let ret = nc::fchown(fd, 0, 0);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(filename).is_ok());
/// ```
pub fn fchown(fd: i32, user: uid_t, group: gid_t) -> Result<(), Errno> {
let fd = fd as usize;
let user = user as usize;
let group = group as usize;
syscall3(SYS_FCHOWN, fd, user, group).map(drop)
}
/// Change ownership of a file.
/// ```
/// let filename = "/tmp/nc-fchown";
/// let ret = nc::creat(filename, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let ret = nc::fchownat(nc::AT_FDCWD, filename, 0, 0, 0);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
/// assert!(nc::unlink(filename).is_ok());
/// ```
pub fn fchownat<P: AsRef<Path>>(
dirfd: i32,
filename: P,
user: uid_t,
group: gid_t,
flag: i32,
) -> Result<(), Errno> {
let dirfd = dirfd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let user = user as usize;
let group = group as usize;
let flag = flag as usize;
syscall5(SYS_FCHOWNAT, dirfd, filename_ptr, user, group, flag).map(drop)
}
/// manipulate file descriptor.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::open(path, nc::O_RDONLY, 0);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
///
/// let ret = nc::fcntl(fd, nc::F_DUPFD, 0);
/// assert!(ret.is_ok());
/// let fd2 = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::close(fd2).is_ok());
/// ```
pub fn fcntl(fd: i32, cmd: i32, arg: usize) -> Result<i32, Errno> {
let fd = fd as usize;
let cmd = cmd as usize;
syscall3(SYS_FCNTL, fd, cmd, arg).map(|ret| ret as i32)
}
/// Flush all modified in-core data (exclude metadata) refered by `fd` to disk.
/// ```
/// let path = "/tmp/nc-fdatasync";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let msg = b"Hello, Rust";
/// let ret = nc::write(fd, msg.as_ptr() as usize, msg.len());
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(msg.len() as nc::ssize_t));
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn fdatasync(fd: i32) -> Result<(), Errno> {
let fd = fd as usize;
syscall1(SYS_FDATASYNC, fd).map(drop)
}
/// Get extended attribute value.
/// ```
/// let path = "/tmp/nc-fgetxattr";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let attr_name = "user.creator";
/// let attr_value = "nc-0.0.1";
/// //let flags = 0;
/// let flags = nc::XATTR_CREATE;
/// let ret = nc::setxattr(
/// path,
/// &attr_name,
/// attr_value.as_ptr() as usize,
/// attr_value.len(),
/// flags,
/// );
/// assert!(ret.is_ok());
/// let mut buf = [0_u8; 16];
/// let buf_len = buf.len();
/// let ret = nc::fgetxattr(fd, attr_name, buf.as_mut_ptr() as usize, buf_len);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(attr_value.len() as nc::ssize_t));
/// let attr_len = ret.unwrap() as usize;
/// assert_eq!(attr_value.as_bytes(), &buf[..attr_len]);
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn fgetxattr<P: AsRef<Path>>(
fd: i32,
name: P,
value: usize,
size: size_t,
) -> Result<ssize_t, Errno> {
let fd = fd as usize;
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
let size = size as usize;
syscall4(SYS_FGETXATTR, fd, name_ptr, value, size).map(|ret| ret as ssize_t)
}
/// Load a kernel module.
pub fn finit_module<P: AsRef<Path>>(fd: i32, param_values: P, flags: i32) -> Result<(), Errno> {
let fd = fd as usize;
let param_values = CString::new(param_values.as_ref());
let param_values_ptr = param_values.as_ptr() as usize;
let flags = flags as usize;
syscall3(SYS_FINIT_MODULE, fd, param_values_ptr, flags).map(drop)
}
/// List extended attribute names.
/// ```
/// let path = "/tmp/nc-flistxattr";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let attr_name = "user.creator";
/// let attr_value = "nc-0.0.1";
/// //let flags = 0;
/// let flags = nc::XATTR_CREATE;
/// let ret = nc::setxattr(
/// path,
/// &attr_name,
/// attr_value.as_ptr() as usize,
/// attr_value.len(),
/// flags,
/// );
/// assert!(ret.is_ok());
/// let mut buf = [0_u8; 16];
/// let buf_len = buf.len();
/// let ret = nc::flistxattr(fd, buf.as_mut_ptr() as usize, buf_len);
/// let attr_len = ret.unwrap() as usize;
/// assert_eq!(&buf[..attr_len - 1], attr_name.as_bytes());
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn flistxattr(fd: i32, list: usize, size: size_t) -> Result<ssize_t, Errno> {
let fd = fd as usize;
syscall3(SYS_FLISTXATTR, fd, list, size).map(|ret| ret as ssize_t)
}
/// Apply or remove an advisory lock on an open file.
/// ```
/// let path = "/tmp/nc-flock";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let ret = nc::flock(fd, nc::LOCK_EX);
/// assert!(ret.is_ok());
/// let msg = "Hello, Rust";
/// let ret = nc::write(fd, msg.as_ptr() as usize, msg.len());
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(msg.len() as nc::ssize_t));
/// let ret = nc::flock(fd, nc::LOCK_UN);
/// assert!(ret.is_ok());
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn flock(fd: i32, operation: i32) -> Result<(), Errno> {
let fd = fd as usize;
let operation = operation as usize;
syscall2(SYS_FLOCK, fd, operation).map(drop)
}
/// Remove an extended attribute.
/// ```
/// let path = "/tmp/nc-fremovexattr";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let attr_name = "user.creator";
/// let attr_value = "nc-0.0.1";
/// let flags = nc::XATTR_CREATE;
/// let ret = nc::setxattr(
/// path,
/// &attr_name,
/// attr_value.as_ptr() as usize,
/// attr_value.len(),
/// flags,
/// );
/// assert!(ret.is_ok());
/// let ret = nc::fremovexattr(fd, attr_name);
/// assert!(ret.is_ok());
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn fremovexattr<P: AsRef<Path>>(fd: i32, name: P) -> Result<(), Errno> {
let fd = fd as usize;
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
syscall2(SYS_FREMOVEXATTR, fd, name_ptr).map(drop)
}
/// Set parameters and trigger actions on a context.
pub fn fsconfig<P: AsRef<Path>>(
fd: i32,
cmd: u32,
key: P,
value: P,
aux: i32,
) -> Result<(), Errno> {
let fd = fd as usize;
let cmd = cmd as usize;
let key = CString::new(key.as_ref());
let key_ptr = key.as_ptr() as usize;
let value = CString::new(value.as_ref());
let value_ptr = value.as_ptr() as usize;
let aux = aux as usize;
syscall5(SYS_FSCONFIG, fd, cmd, key_ptr, value_ptr, aux).map(drop)
}
/// Set extended attribute value.
/// ```
/// let path = "/tmp/nc-fsetxattr";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let attr_name = "user.creator";
/// let attr_value = "nc-0.0.1";
/// //let flags = 0;
/// let flags = nc::XATTR_CREATE;
/// let ret = nc::fsetxattr(
/// fd,
/// &attr_name,
/// attr_value.as_ptr() as usize,
/// attr_value.len(),
/// flags,
/// );
/// assert!(ret.is_ok());
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn fsetxattr<P: AsRef<Path>>(
fd: i32,
name: P,
value: usize,
size: size_t,
flags: i32,
) -> Result<(), Errno> {
let fd = fd as usize;
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
let size = size as usize;
let flags = flags as usize;
syscall5(SYS_FSETXATTR, fd, name_ptr, value, size, flags).map(drop)
}
/// Create a kernel mount representation for a new, prepared superblock.
pub fn fsmount(fs_fd: i32, flags: u32, attr_flags: u32) -> Result<i32, Errno> {
let fs_fd = fs_fd as usize;
let flags = flags as usize;
let attr_flags = attr_flags as usize;
syscall3(SYS_FSMOUNT, fs_fd, flags, attr_flags).map(|ret| ret as i32)
}
/// Open a filesystem by name so that it can be configured for mounting.
pub fn fsopen<P: AsRef<Path>>(fs_name: P, flags: u32) -> Result<(), Errno> {
let fs_name = CString::new(fs_name.as_ref());
let fs_name_ptr = fs_name.as_ptr() as usize;
let flags = flags as usize;
syscall2(SYS_FSOPEN, fs_name_ptr, flags).map(drop)
}
/// Pick a superblock into a context for reconfiguration.
pub fn fspick<P: AsRef<Path>>(dfd: i32, path: P, flags: i32) -> Result<i32, Errno> {
let dfd = dfd as usize;
let path = CString::new(path.as_ref());
let path_ptr = path.as_ptr() as usize;
let flags = flags as usize;
syscall3(SYS_FSPICK, dfd, path_ptr, flags).map(|ret| ret as i32)
}
/// Get file status about a file descriptor.
/// ```
/// let path = "/tmp";
/// // Open folder directly.
/// let fd = nc::open(path, nc::O_PATH, 0);
/// assert!(fd.is_ok());
/// let fd = fd.unwrap();
/// let mut stat = nc::stat_t::default();
/// let ret = nc::fstat(fd, &mut stat);
/// assert!(ret.is_ok());
/// // Check fd is a directory.
/// assert_eq!((stat.st_mode & nc::S_IFMT), nc::S_IFDIR);
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn fstat(fd: i32, statbuf: &mut stat_t) -> Result<(), Errno> {
let fd = fd as usize;
let statbuf_ptr = statbuf as *mut stat_t as usize;
syscall2(SYS_FSTAT, fd, statbuf_ptr).map(drop)
}
/// Get file status
/// ```
/// let path = "/etc/passwd";
/// let mut stat = nc::stat_t::default();
/// let ret = nc::fstatat(nc::AT_FDCWD, path, &mut stat, nc::AT_SYMLINK_NOFOLLOW);
/// assert!(ret.is_ok());
/// assert_eq!((stat.st_mode & nc::S_IFMT), nc::S_IFREG);
/// ```
pub fn fstatat<P: AsRef<Path>>(
dfd: i32,
filename: P,
statbuf: &mut stat_t,
flag: i32,
) -> Result<(), Errno> {
let dfd = dfd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let statbuf_ptr = statbuf as *mut stat_t as usize;
let flag = flag as usize;
syscall4(SYS_FSTATAT, dfd, filename_ptr, statbuf_ptr, flag).map(drop)
}
/// Get filesystem statistics.
/// ```
/// let path = "/usr";
/// // Open folder directly.
/// let fd = nc::open(path, nc::O_PATH, 0);
/// assert!(fd.is_ok());
/// let fd = fd.unwrap();
/// let mut statfs = nc::statfs_t::default();
/// let ret = nc::fstatfs(fd, &mut statfs);
/// assert!(ret.is_ok());
/// assert!(statfs.f_bfree > 0);
/// assert!(statfs.f_bavail > 0);
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn fstatfs(fd: i32, buf: &mut statfs_t) -> Result<(), Errno> {
let fd = fd as usize;
let buf_ptr = buf as *mut statfs_t as usize;
syscall2(SYS_FSTATFS, fd, buf_ptr).map(drop)
}
/// Flush all modified in-core data refered by `fd` to disk.
/// ```
/// let path = "/tmp/nc-fsync";
/// let ret = nc::open(path, nc::O_CREAT | nc::O_WRONLY, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let buf = b"Hello, Rust";
/// let n_write = nc::write(fd, buf.as_ptr() as usize, buf.len());
/// assert_eq!(n_write, Ok(buf.len() as isize));
/// assert!(nc::fsync(fd).is_ok());
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn fsync(fd: i32) -> Result<(), Errno> {
let fd = fd as usize;
syscall1(SYS_FSYNC, fd).map(drop)
}
/// Truncate an opened file to a specified length.
/// ```
/// let path = "/tmp/nc-ftruncate";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let ret = nc::ftruncate(fd, 64 * 1024);
/// assert!(ret.is_ok());
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn ftruncate(fd: i32, length: off_t) -> Result<(), Errno> {
let fd = fd as usize;
let length = length as usize;
syscall2(SYS_FTRUNCATE, fd, length).map(drop)
}
/// Fast user-space locking.
pub fn futex(
uaddr: &mut i32,
futex_op: i32,
val: u32,
timeout: &mut timespec_t,
uaddr2: &mut i32,
val3: i32,
) -> Result<i32, Errno> {
let uaddr_ptr = uaddr as *mut i32 as usize;
let futex_op = futex_op as usize;
let val = val as usize;
let timeout_ptr = timeout as *mut timespec_t as usize;
let uaddr2_ptr = uaddr2 as *mut i32 as usize;
let val3 = val3 as usize;
syscall6(
SYS_FUTEX,
uaddr_ptr,
futex_op,
val,
timeout_ptr,
uaddr2_ptr,
val3,
)
.map(|ret| ret as i32)
}
/// Determine CPU and NUMA node on which the calling thread is running.
/// ```
/// let mut cpu = 0;
/// let mut node = 0;
/// let mut cache = nc::getcpu_cache_t::default();
/// let ret = nc::getcpu(&mut cpu, &mut node, &mut cache);
/// assert!(ret.is_ok());
/// ```
pub fn getcpu(cpu: &mut u32, node: &mut u32, cache: &mut getcpu_cache_t) -> Result<(), Errno> {
let cpu_ptr = cpu as *mut u32 as usize;
let node_ptr = node as *mut u32 as usize;
let cache_ptr = cache as *mut getcpu_cache_t as usize;
syscall3(SYS_GETCPU, cpu_ptr, node_ptr, cache_ptr).map(drop)
}
/// Get current working directory.
/// ```
/// let mut buf = [0_u8; nc::PATH_MAX as usize + 1];
/// let ret = nc::getcwd(buf.as_mut_ptr() as usize, buf.len());
/// assert!(ret.is_ok());
/// // Remove null-terminal char.
/// let path_len = ret.unwrap() as usize - 1;
/// let cwd = std::str::from_utf8(&buf[..path_len]);
/// assert!(cwd.is_ok());
/// println!("cwd: {:?}", cwd);
/// ```
pub fn getcwd(buf: usize, size: size_t) -> Result<ssize_t, Errno> {
syscall2(SYS_GETCWD, buf, size).map(|ret| ret as ssize_t)
}
/// Get directory entries.
/// ```
/// let path = "/etc";
/// let ret = nc::open(path, nc::O_DIRECTORY, 0);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
///
/// const BUF_SIZE: usize = 4 * 1024;
/// loop {
/// // TODO(Shaohua): Only allocate one buf block.
/// let mut buf: Vec<u8> = vec![0; BUF_SIZE];
/// let ret = nc::getdents64(fd, buf.as_mut_ptr() as usize, BUF_SIZE);
/// assert!(ret.is_ok());
///
/// let buf_box = buf.into_boxed_slice();
/// let buf_box_ptr = Box::into_raw(buf_box) as *mut u8 as usize;
/// let nread = ret.unwrap() as usize;
/// if nread == 0 {
/// break;
/// }
///
/// let mut bpos: usize = 0;
/// while bpos < nread {
/// let d = (buf_box_ptr + bpos) as *mut nc::linux_dirent64_t;
/// let d_ref = unsafe { &(*d) };
/// let mut name_vec: Vec<u8> = vec![];
/// // TODO(Shaohua): Calculate string len of name.
/// for i in 0..nc::PATH_MAX {
/// let c = d_ref.d_name[i as usize];
/// if c == 0 {
/// break;
/// }
/// name_vec.push(c);
/// }
/// let name = String::from_utf8(name_vec).unwrap();
/// println!("name: {}", name);
///
/// bpos += d_ref.d_reclen as usize;
/// }
/// }
///
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn getdents64(fd: i32, dirp: usize, count: size_t) -> Result<ssize_t, Errno> {
let fd = fd as usize;
syscall3(SYS_GETDENTS64, fd, dirp, count).map(|ret| ret as ssize_t)
}
/// Get the effective group ID of the calling process.
/// ```
/// let egid = nc::getegid();
/// assert!(egid > 0);
/// ```
pub fn getegid() -> gid_t {
syscall0(SYS_GETEGID).expect("getegid() failed") as gid_t
}
/// Get the effective user ID of the calling process.
/// ```
/// let euid = nc::geteuid();
/// assert!(euid > 0);
/// ```
pub fn geteuid() -> uid_t {
syscall0(SYS_GETEUID).expect("geteuid() failed") as uid_t
}
/// Get the real group ID of the calling process.
/// ```
/// let gid = nc::getgid();
/// assert!(gid > 0);
/// ```
pub fn getgid() -> gid_t {
syscall0(SYS_GETGID).expect("getgid() failed") as gid_t
}
/// Get list of supplementary group Ids.
/// ```
/// let mut groups = vec![];
/// let ret = nc::getgroups(0, &mut groups);
/// assert!(ret.is_ok());
/// let total_num = ret.unwrap();
/// groups.resize(total_num as usize, 0);
/// let ret = nc::getgroups(total_num, &mut groups);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(total_num));
/// ```
pub fn getgroups(size: i32, group_list: &mut [gid_t]) -> Result<i32, Errno> {
let size = size as usize;
let group_ptr = group_list.as_mut_ptr() as usize;
syscall2(SYS_GETGROUPS, size, group_ptr).map(|ret| ret as i32)
}
/// Get value of an interval timer.
/// ```
/// use core::mem::size_of;
///
/// fn handle_alarm(signum: i32) {
/// assert_eq!(signum, nc::SIGALRM);
/// let msg = "Hello alarm";
/// let _ = nc::write(2, msg.as_ptr() as usize, msg.len());
/// }
///
/// let sa = nc::sigaction_t {
/// sa_handler: handle_alarm as nc::sighandler_t,
/// sa_flags: 0,
/// ..nc::sigaction_t::default()
/// };
/// let mut old_sa = nc::sigaction_t::default();
/// let ret = nc::rt_sigaction(nc::SIGALRM, &sa, &mut old_sa, size_of::<nc::sigset_t>());
/// assert!(ret.is_ok());
///
/// // Single shot timer, actived after 1 second.
/// let itv = nc::itimerval_t {
/// it_value: nc::timeval_t {
/// tv_sec: 1,
/// tv_usec: 0,
/// },
/// it_interval: nc::timeval_t {
/// tv_sec: 0,
/// tv_usec: 0,
/// },
/// };
/// let mut prev_itv = nc::itimerval_t::default();
/// let ret = nc::setitimer(nc::ITIMER_REAL, &itv, &mut prev_itv);
/// assert!(ret.is_ok());
///
/// let ret = nc::getitimer(nc::ITIMER_REAL, &mut prev_itv);
/// assert!(ret.is_ok());
/// assert!(prev_itv.it_value.tv_sec <= itv.it_value.tv_sec);
///
/// let ret = nc::pause();
/// assert_eq!(ret, Err(nc::EINTR));
///
/// let ret = nc::getitimer(nc::ITIMER_REAL, &mut prev_itv);
/// assert!(ret.is_ok());
/// assert_eq!(prev_itv.it_value.tv_sec, 0);
/// assert_eq!(prev_itv.it_value.tv_usec, 0);
/// ```
pub fn getitimer(which: i32, curr_val: &mut itimerval_t) -> Result<(), Errno> {
let which = which as usize;
let curr_val_ptr = curr_val as *mut itimerval_t as usize;
syscall2(SYS_GETITIMER, which, curr_val_ptr).map(drop)
}
/// Get name of connected peer socket.
pub fn getpeername(
sockfd: i32,
addr: &mut sockaddr_in_t,
addrlen: &mut socklen_t,
) -> Result<(), Errno> {
let sockfd = sockfd as usize;
let addr_ptr = addr as *mut sockaddr_in_t as usize;
let addrlen_ptr = addrlen as *mut socklen_t as usize;
syscall3(SYS_GETPEERNAME, sockfd, addr_ptr, addrlen_ptr).map(drop)
}
/// Returns the PGID(process group ID) of the process specified by `pid`.
/// ```
/// let ppid = nc::getppid();
/// let pgid = nc::getpgid(ppid);
/// assert!(pgid.is_ok());
/// ```
pub fn getpgid(pid: pid_t) -> Result<pid_t, Errno> {
let pid = pid as usize;
syscall1(SYS_GETPGID, pid).map(|ret| ret as pid_t)
}
/// Get the process ID (PID) of the calling process.
/// ```
/// let pid = nc::getpid();
/// assert!(pid > 0);
/// ```
pub fn getpid() -> pid_t {
syscall0(SYS_GETPID).expect("getpid() failed") as pid_t
}
/// Get the process ID of the parent of the calling process.
/// ```
/// let ppid = nc::getppid();
/// assert!(ppid > 0);
/// ```
pub fn getppid() -> pid_t {
syscall0(SYS_GETPPID).expect("getppid() failed") as pid_t
}
/// Get program scheduling priority.
/// ```
/// let ret = nc::getpriority(nc::PRIO_PROCESS, nc::getpid());
/// assert!(ret.is_ok());
/// ```
pub fn getpriority(which: i32, who: i32) -> Result<i32, Errno> {
let which = which as usize;
let who = who as usize;
syscall2(SYS_GETPRIORITY, which, who).map(|ret| {
let ret = ret as i32;
if ret > PRIO_MAX {
return PRIO_MAX - ret;
}
ret
})
}
/// Obtain a series of random bytes.
/// ```
/// let mut buf = [0_u8; 32];
/// let buf_len = buf.len();
/// let ret = nc::getrandom(&mut buf, buf_len, 0);
/// assert!(ret.is_ok());
/// let size = ret.unwrap() as usize;
/// assert!(size <= buf_len);
/// ```
pub fn getrandom(buf: &mut [u8], buf_len: usize, flags: u32) -> Result<ssize_t, Errno> {
let buf_ptr = buf.as_mut_ptr() as usize;
let flags = flags as usize;
syscall3(SYS_GETRANDOM, buf_ptr, buf_len, flags).map(|ret| ret as ssize_t)
}
/// Get real, effect and saved group ID.
/// ```
/// let mut rgid = 0;
/// let mut egid = 0;
/// let mut sgid = 0;
/// let ret = nc::getresgid(&mut rgid, &mut egid, &mut sgid);
/// assert!(ret.is_ok());
/// assert!(rgid > 0);
/// assert!(egid > 0);
/// assert!(sgid > 0);
/// ```
pub fn getresgid(rgid: &mut gid_t, egid: &mut gid_t, sgid: &mut gid_t) -> Result<(), Errno> {
let rgid_ptr = rgid as *mut gid_t as usize;
let egid_ptr = egid as *mut gid_t as usize;
let sgid_ptr = sgid as *mut gid_t as usize;
syscall3(SYS_GETRESGID, rgid_ptr, egid_ptr, sgid_ptr).map(drop)
}
/// Get real, effect and saved user ID.
/// ```
/// let mut ruid = 0;
/// let mut euid = 0;
/// let mut suid = 0;
/// let ret = nc::getresuid(&mut ruid, &mut euid, &mut suid);
/// assert!(ret.is_ok());
/// assert!(ruid > 0);
/// assert!(euid > 0);
/// assert!(suid > 0);
/// ```
pub fn getresuid(ruid: &mut uid_t, euid: &mut uid_t, suid: &mut uid_t) -> Result<(), Errno> {
let ruid_ptr = ruid as *mut uid_t as usize;
let euid_ptr = euid as *mut uid_t as usize;
let suid_ptr = suid as *mut uid_t as usize;
syscall3(SYS_GETRESUID, ruid_ptr, euid_ptr, suid_ptr).map(drop)
}
/// Get resource limit.
/// ```
/// let mut rlimit = nc::rlimit_t::default();
/// let ret = nc::getrlimit(nc::RLIMIT_NOFILE, &mut rlimit);
/// assert!(ret.is_ok());
/// assert!(rlimit.rlim_cur > 0);
/// assert!(rlimit.rlim_max > 0);
/// ```
pub fn getrlimit(resource: i32, rlim: &mut rlimit_t) -> Result<(), Errno> {
let resource = resource as usize;
let rlim_ptr = rlim as *mut rlimit_t as usize;
syscall2(SYS_GETRLIMIT, resource, rlim_ptr).map(drop)
}
/// Get resource usage.
/// ```
/// let mut usage = nc::rusage_t::default();
/// let ret = nc::getrusage(nc::RUSAGE_SELF, &mut usage);
/// assert!(ret.is_ok());
/// assert!(usage.ru_maxrss > 0);
/// assert_eq!(usage.ru_nswap, 0);
/// ```
pub fn getrusage(who: i32, usage: &mut rusage_t) -> Result<(), Errno> {
let who = who as usize;
let usage_ptr = usage as *mut rusage_t as usize;
syscall2(SYS_GETRUSAGE, who, usage_ptr).map(drop)
}
/// Get session Id.
/// ```
/// let ppid = nc::getppid();
/// let sid = nc::getsid(ppid);
/// assert!(sid > 0);
/// ```
pub fn getsid(pid: pid_t) -> pid_t {
let pid = pid as usize;
syscall1(SYS_GETSID, pid).expect("getsid() failed") as pid_t
}
/// Get current address to which the socket `sockfd` is bound.
pub fn getsockname(
sockfd: i32,
addr: &mut sockaddr_in_t,
addrlen: &mut socklen_t,
) -> Result<(), Errno> {
let sockfd = sockfd as usize;
let addr_ptr = addr as *mut sockaddr_in_t as usize;
let addrlen_ptr = addrlen as *mut socklen_t as usize;
syscall3(SYS_GETSOCKNAME, sockfd, addr_ptr, addrlen_ptr).map(drop)
}
/// Get options on sockets
pub fn getsockopt(
sockfd: i32,
level: i32,
optname: i32,
optval: &mut usize,
optlen: &mut socklen_t,
) -> Result<(), Errno> {
let sockfd = sockfd as usize;
let level = level as usize;
let optname = optname as usize;
let optval_ptr = optval as *mut usize as usize;
let optlen_ptr = optlen as *mut socklen_t as usize;
syscall5(
SYS_GETSOCKOPT,
sockfd,
level,
optname,
optval_ptr,
optlen_ptr,
)
.map(drop)
}
/// Get the caller's thread ID (TID).
/// ```
/// let tid = nc::gettid();
/// assert!(tid > 0);
/// ```
pub fn gettid() -> pid_t {
syscall0(SYS_GETTID).expect("getpid() failed") as pid_t
}
/// Get time.
/// ```
/// let mut tv = nc::timeval_t::default();
/// let mut tz = nc::timezone_t::default();
/// let ret = nc::gettimeofday(&mut tv, &mut tz);
/// assert!(ret.is_ok());
/// assert!(tv.tv_sec > 1611380386);
/// ```
pub fn gettimeofday(timeval: &mut timeval_t, tz: &mut timezone_t) -> Result<(), Errno> {
let timeval_ptr = timeval as *mut timeval_t as usize;
let tz_ptr = tz as *mut timezone_t as usize;
syscall2(SYS_GETTIMEOFDAY, timeval_ptr, tz_ptr).map(drop)
}
/// Get the real user ID of the calling process.
/// ```
/// let uid = nc::getuid();
/// assert!(uid > 0);
/// ```
pub fn getuid() -> uid_t {
syscall0(SYS_GETUID).expect("getuid() failed") as uid_t
}
/// Get extended attribute value.
/// ```
/// let path = "/tmp/nc-getxattr";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let attr_name = "user.creator";
/// let attr_value = "nc-0.0.1";
/// //let flags = 0;
/// let flags = nc::XATTR_CREATE;
/// let ret = nc::setxattr(
/// path,
/// &attr_name,
/// attr_value.as_ptr() as usize,
/// attr_value.len(),
/// flags,
/// );
/// assert!(ret.is_ok());
/// let mut buf = [0_u8; 16];
/// let buf_len = buf.len();
/// let ret = nc::getxattr(path, attr_name, buf.as_mut_ptr() as usize, buf_len);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(attr_value.len() as nc::ssize_t));
/// let attr_len = ret.unwrap() as usize;
/// assert_eq!(attr_value.as_bytes(), &buf[..attr_len]);
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn getxattr<P: AsRef<Path>>(
filename: P,
name: P,
value: usize,
size: size_t,
) -> Result<ssize_t, Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
let size = size as usize;
syscall4(SYS_GETXATTR, filename_ptr, name_ptr, value, size).map(|ret| ret as ssize_t)
}
/// Retrieve NUMA memory policy for a thread
pub fn get_mempolicy(
mode: &mut i32,
nmask: &mut usize,
maxnode: usize,
addr: usize,
flags: usize,
) -> Result<(), Errno> {
let mode_ptr = mode as *mut i32 as usize;
let nmask_ptr = nmask as *mut usize as usize;
syscall5(SYS_GET_MEMPOLICY, mode_ptr, nmask_ptr, maxnode, addr, flags).map(drop)
}
/// Get list of robust futexes.
// TODO(Shaohua): Fix argument type.
pub fn get_robust_list(
pid: pid_t,
head_ptr: &mut usize,
len_ptr: &mut size_t,
) -> Result<(), Errno> {
let pid = pid as usize;
let head_ptr = head_ptr as *mut usize as usize;
let len_ptr = len_ptr as *mut size_t as usize;
syscall3(SYS_GET_ROBUST_LIST, pid, head_ptr, len_ptr).map(drop)
}
/// Load a kernel module.
pub fn init_module<P: AsRef<Path>>(
module_image: usize,
len: usize,
param_values: P,
) -> Result<(), Errno> {
let param_values = CString::new(param_values.as_ref());
let param_values_ptr = param_values.as_ptr() as usize;
syscall3(SYS_INIT_MODULE, module_image, len, param_values_ptr).map(drop)
}
/// Add a watch to an initialized inotify instance.
/// ```
/// let ret = nc::inotify_init1(nc::IN_NONBLOCK | nc::IN_CLOEXEC);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
///
/// let path = "/etc/passwd";
/// let ret = nc::inotify_add_watch(fd, path, nc::IN_MODIFY);
/// assert!(ret.is_ok());
/// let _wd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn inotify_add_watch<P: AsRef<Path>>(fd: i32, filename: P, mask: u32) -> Result<i32, Errno> {
let fd = fd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let mask = mask as usize;
syscall3(SYS_INOTIFY_ADD_WATCH, fd, filename_ptr, mask).map(|ret| ret as i32)
}
/// Initialize an inotify instance.
/// ```
/// let ret = nc::inotify_init1(nc::IN_NONBLOCK | nc::IN_CLOEXEC);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn inotify_init1(flags: i32) -> Result<i32, Errno> {
let flags = flags as usize;
syscall1(SYS_INOTIFY_INIT1, flags).map(|ret| ret as i32)
}
/// Remove an existing watch from an inotify instance.
/// ```
/// let ret = nc::inotify_init1(nc::IN_NONBLOCK | nc::IN_CLOEXEC);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
///
/// let path = "/etc/passwd";
/// let ret = nc::inotify_add_watch(fd, path, nc::IN_MODIFY);
/// assert!(ret.is_ok());
/// let wd = ret.unwrap();
/// let ret = nc::inotify_rm_watch(fd, wd);
/// assert!(ret.is_ok());
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn inotify_rm_watch(fd: i32, wd: i32) -> Result<(), Errno> {
let fd = fd as usize;
let wd = wd as usize;
syscall2(SYS_INOTIFY_RM_WATCH, fd, wd).map(drop)
}
/// Control device.
/// ```
/// let path = "/tmp/nc-ioctl";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let mut attr: i32 = 0;
/// let cmd = -2146933247; // nc::FS_IOC_GETFLAGS
/// let ret = nc::ioctl(fd, cmd, &mut attr as *mut i32 as usize);
/// assert!(ret.is_ok());
/// println!("attr: {}", attr);
///
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn ioctl(fd: i32, cmd: i32, arg: usize) -> Result<(), Errno> {
let fd = fd as usize;
let cmd = cmd as usize;
syscall3(SYS_IOCTL, fd, cmd, arg).map(drop)
}
/// Get I/O scheduling class and priority
/// ```
/// let ret = nc::ioprio_get(nc::IOPRIO_WHO_PROCESS, nc::getpid());
/// assert!(ret.is_ok());
/// let prio = ret.unwrap();
/// let prio_class = nc::ioprio_prio_class(prio);
/// assert_eq!(prio_class, nc::IOPRIO_CLASS_NONE);
/// let _prio_data = nc::ioprio_prio_data(prio);
/// ```
pub fn ioprio_get(which: i32, who: i32) -> Result<i32, Errno> {
let which = which as usize;
let who = who as usize;
syscall2(SYS_IOPRIO_GET, which, who).map(|ret| ret as i32)
}
/// Set I/O scheduling class and priority
/// ```
/// let ret = nc::ioprio_get(nc::IOPRIO_WHO_PROCESS, 0);
/// assert!(ret.is_ok());
/// let prio = ret.unwrap();
/// let prio_class = nc::ioprio_prio_class(prio);
/// assert_eq!(prio_class, nc::IOPRIO_CLASS_NONE);
/// let prio_data = nc::ioprio_prio_data(prio);
///
/// // Higher priority
/// let new_prio_data = prio_data - 1;
/// let new_prio = nc::ioprio_prio_value(nc::IOPRIO_CLASS_BE, new_prio_data);
/// let ret = nc::ioprio_set(nc::IOPRIO_WHO_PROCESS, 0, new_prio);
/// assert!(ret.is_ok());
/// ```
pub fn ioprio_set(which: i32, who: i32, ioprio: i32) -> Result<(), Errno> {
let which = which as usize;
let who = who as usize;
let ioprio = ioprio as usize;
syscall3(SYS_IOPRIO_SET, which, who, ioprio).map(drop)
}
/// Attempts to cancel an iocb previously passed to io_submit.
/// Attempts to cancel an iocb previously passed to io_submit. If
/// the operation is successfully cancelled, the resulting event is
/// copied into the memory pointed to by result without being placed
/// into the completion queue and 0 is returned. May fail with
/// -EFAULT if any of the data structures pointed to are invalid.
/// May fail with -EINVAL if aio_context specified by ctx_id is
/// invalid. May fail with -EAGAIN if the iocb specified was not
/// cancelled. Will fail with -ENOSYS if not implemented.
pub fn io_cancel(
ctx_id: aio_context_t,
iocb: &mut iocb_t,
result: &mut io_event_t,
) -> Result<(), Errno> {
let ctx_id = ctx_id as usize;
let iocb_ptr = iocb as *mut iocb_t as usize;
let result_ptr = result as *mut io_event_t as usize;
syscall3(SYS_IO_CANCEL, ctx_id, iocb_ptr, result_ptr).map(drop)
}
/// Destroy the aio_context specified. May cancel any outstanding
/// AIOs and block on completion. Will fail with -ENOSYS if not
/// implemented. May fail with -EINVAL if the context pointed to is invalid.
pub fn io_destroy(ctx_id: aio_context_t) -> Result<(), Errno> {
let ctx_id = ctx_id as usize;
syscall1(SYS_IO_DESTROY, ctx_id).map(drop)
}
/// Attempts to read at least min_nr events and up to nr events from
/// the completion queue for the aio_context specified by ctx_id. If
/// it succeeds, the number of read events is returned. May fail with
/// -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
/// out of range, if timeout is out of range. May fail with -EFAULT
/// if any of the memory specified is invalid. May return 0 or
/// < min_nr if the timeout specified by timeout has elapsed
/// before sufficient events are available, where timeout == NULL
/// specifies an infinite timeout. Note that the timeout pointed to by
/// timeout is relative. Will fail with -ENOSYS if not implemented.
pub fn io_getevents(
ctx_id: aio_context_t,
min_nr: isize,
nr: isize,
events: &mut io_event_t,
timeout: &mut timespec_t,
) -> Result<i32, Errno> {
let ctx_id = ctx_id as usize;
let min_nr = min_nr as usize;
let nr = nr as usize;
let events_ptr = events as *mut io_event_t as usize;
let timeout_ptr = timeout as *mut timespec_t as usize;
syscall5(
SYS_IO_GETEVENTS,
ctx_id,
min_nr,
nr,
events_ptr,
timeout_ptr,
)
.map(|ret| ret as i32)
}
/// read asynchronous I/O events from the completion queue
pub fn io_pgetevents(
ctx_id: aio_context_t,
min_nr: isize,
nr: isize,
events: &mut io_event_t,
timeout: &mut timespec_t,
usig: &aio_sigset_t,
) -> Result<i32, Errno> {
let ctx_id = ctx_id as usize;
let min_nr = min_nr as usize;
let nr = nr as usize;
let events_ptr = events as *mut io_event_t as usize;
let timeout_ptr = timeout as *mut timespec_t as usize;
let usig_ptr = usig as *const aio_sigset_t as usize;
syscall6(
SYS_IO_PGETEVENTS,
ctx_id,
min_nr,
nr,
events_ptr,
timeout_ptr,
usig_ptr,
)
.map(|ret| ret as i32)
}
/// Create an asynchronous I/O context.
/// Create an aio_context capable of receiving at least nr_events.
/// ctxp must not point to an aio_context that already exists, and
/// must be initialized to 0 prior to the call. On successful
/// creation of the aio_context, *ctxp is filled in with the resulting
/// handle. May fail with -EINVAL if *ctxp is not initialized,
/// if the specified nr_events exceeds internal limits. May fail
/// with -EAGAIN if the specified nr_events exceeds the user's limit
/// of available events. May fail with -ENOMEM if insufficient kernel
/// resources are available. May fail with -EFAULT if an invalid
/// pointer is passed for ctxp. Will fail with -ENOSYS if not implemented.
pub fn io_setup(nr_events: u32, ctx_id: &mut aio_context_t) -> Result<(), Errno> {
let nr_events = nr_events as usize;
let ctx_id_ptr = ctx_id as *mut aio_context_t as usize;
syscall2(SYS_IO_SETUP, nr_events, ctx_id_ptr).map(drop)
}
/// Queue the nr iocbs pointed to by iocbpp for processing. Returns
/// the number of iocbs queued. May return -EINVAL if the aio_context
/// specified by ctx_id is invalid, if nr is < 0, if the iocb at
/// `*iocbpp[0]` is not properly initialized, if the operation specified
/// is invalid for the file descriptor in the iocb. May fail with
/// -EFAULT if any of the data structures point to invalid data. May
/// fail with -EBADF if the file descriptor specified in the first
/// iocb is invalid. May fail with -EAGAIN if insufficient resources
/// are available to queue any iocbs. Will return 0 if nr is 0. Will
/// fail with -ENOSYS if not implemented.
// TODO(Shaohua): type of iocbpp is struct iocb**
pub fn io_submit(ctx_id: aio_context_t, nr: isize, iocb: &mut iocb_t) -> Result<i32, Errno> {
let ctx_id = ctx_id as usize;
let nr = nr as usize;
let iocb_ptr = iocb as *mut iocb_t as usize;
syscall3(SYS_IO_SUBMIT, ctx_id, nr, iocb_ptr).map(|ret| ret as i32)
}
pub fn io_uring_enter(
fd: i32,
to_submit: u32,
min_complete: u32,
flags: u32,
sig: &sigset_t,
sigsetsize: size_t,
) -> Result<i32, Errno> {
let fd = fd as usize;
let to_submit = to_submit as usize;
let min_complete = min_complete as usize;
let flags = flags as usize;
let sig_ptr = sig as *const sigset_t as usize;
let sigsetsize = sigsetsize as usize;
syscall6(
SYS_IO_URING_ENTER,
fd,
to_submit,
min_complete,
flags,
sig_ptr,
sigsetsize,
)
.map(|ret| ret as i32)
}
pub fn io_uring_register(fd: i32, opcode: u32, arg: usize, nr_args: u32) -> Result<i32, Errno> {
let fd = fd as usize;
let opcode = opcode as usize;
let nr_args = nr_args as usize;
syscall4(SYS_IO_URING_REGISTER, fd, opcode, arg, nr_args).map(|ret| ret as i32)
}
pub fn io_uring_setup(entries: u32, params: &mut io_uring_params_t) -> Result<i32, Errno> {
let entries = entries as usize;
let params_ptr = params as *mut io_uring_params_t as usize;
syscall2(SYS_IO_URING_SETUP, entries, params_ptr).map(|ret| ret as i32)
}
/// Compare two processes to determine if they share a kernel resource.
pub fn kcmp(pid1: pid_t, pid2: pid_t, type_: i32, idx1: usize, idx2: usize) -> Result<i32, Errno> {
let pid1 = pid1 as usize;
let pid2 = pid2 as usize;
let type_ = type_ as usize;
syscall5(SYS_KCMP, pid1, pid2, type_, idx1, idx2).map(|ret| ret as i32)
}
/// Load a new kernel for later execution.
pub fn kexec_file_load<P: AsRef<Path>>(
kernel_fd: i32,
initrd_fd: i32,
cmdline: P,
flags: usize,
) -> Result<(), Errno> {
let kernel_fd = kernel_fd as usize;
let initrd_fd = initrd_fd as usize;
let cmdline_len = cmdline.as_ref().len();
let cmdline = CString::new(cmdline.as_ref());
let cmdline_ptr = cmdline.as_ptr() as usize;
syscall5(
SYS_KEXEC_FILE_LOAD,
kernel_fd,
initrd_fd,
cmdline_ptr,
cmdline_len,
flags,
)
.map(drop)
}
/// Load a new kernel for later execution.
pub fn kexec_load(
entry: usize,
nr_segments: usize,
segments: &mut kexec_segment_t,
flags: usize,
) -> Result<(), Errno> {
let segments_ptr = segments as *mut kexec_segment_t as usize;
syscall4(SYS_KEXEC_LOAD, entry, nr_segments, segments_ptr, flags).map(drop)
}
/// Manipulate the kernel's key management facility.
pub fn keyctl(
operation: i32,
arg2: usize,
arg3: usize,
arg4: usize,
arg5: usize,
) -> Result<usize, Errno> {
let operation = operation as usize;
syscall5(SYS_KEYCTL, operation, arg2, arg3, arg4, arg5)
}
/// Send signal to a process.
/// ```
/// let pid = nc::fork();
/// assert!(pid.is_ok());
/// let pid = pid.unwrap();
/// assert!(pid >= 0);
/// if pid == 0 {
/// // child process.
/// let args = [""];
/// let env = [""];
/// let ret = nc::execve("/usr/bin/yes", &args, &env);
/// assert!(ret.is_ok());
/// } else {
/// // parent process.
/// let ret = nc::kill(pid, nc::SIGTERM);
/// assert!(ret.is_ok());
/// }
/// ```
pub fn kill(pid: pid_t, signal: i32) -> Result<(), Errno> {
let pid = pid as usize;
let signal = signal as usize;
syscall2(SYS_KILL, pid, signal).map(drop)
}
/// Get extended attribute value.
/// ```
/// let path = "/tmp/nc-lgetxattr";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let attr_name = "user.creator";
/// let attr_value = "nc-0.0.1";
/// //let flags = 0;
/// let flags = nc::XATTR_CREATE;
/// let ret = nc::setxattr(
/// path,
/// &attr_name,
/// attr_value.as_ptr() as usize,
/// attr_value.len(),
/// flags,
/// );
/// assert!(ret.is_ok());
/// let mut buf = [0_u8; 16];
/// let buf_len = buf.len();
/// let ret = nc::lgetxattr(path, attr_name, buf.as_mut_ptr() as usize, buf_len);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(attr_value.len() as nc::ssize_t));
/// let attr_len = ret.unwrap() as usize;
/// assert_eq!(attr_value.as_bytes(), &buf[..attr_len]);
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn lgetxattr<P: AsRef<Path>>(
filename: P,
name: P,
value: usize,
size: size_t,
) -> Result<ssize_t, Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
let size = size as usize;
syscall4(SYS_LGETXATTR, filename_ptr, name_ptr, value, size).map(|ret| ret as ssize_t)
}
/// Make a new name for a file.
/// ```
/// let old_filename = "/tmp/nc-linkat-src";
/// let ret = nc::open(old_filename, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let new_filename = "/tmp/nc-linkat-dst";
/// let flags = nc::AT_SYMLINK_FOLLOW;
/// assert!(nc::linkat(nc::AT_FDCWD, old_filename, nc::AT_FDCWD, new_filename, flags).is_ok());
/// assert!(nc::unlink(old_filename).is_ok());
/// assert!(nc::unlink(new_filename).is_ok());
/// ```
pub fn linkat<P: AsRef<Path>>(
olddfd: i32,
oldfilename: P,
newdfd: i32,
newfilename: P,
flags: i32,
) -> Result<(), Errno> {
let olddfd = olddfd as usize;
let oldfilename = CString::new(oldfilename.as_ref());
let oldfilename_ptr = oldfilename.as_ptr() as usize;
let newdfd = newdfd as usize;
let newfilename = CString::new(newfilename.as_ref());
let newfilename_ptr = newfilename.as_ptr() as usize;
let flags = flags as usize;
syscall5(
SYS_LINKAT,
olddfd,
oldfilename_ptr,
newdfd,
newfilename_ptr,
flags,
)
.map(drop)
}
/// Listen for connections on a socket.
pub fn listen(sockfd: i32, backlog: i32) -> Result<(), Errno> {
let sockfd = sockfd as usize;
let backlog = backlog as usize;
syscall2(SYS_LISTEN, sockfd, backlog).map(drop)
}
/// List extended attribute names.
/// ```
/// let path = "/tmp/nc-listxattr";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let attr_name = "user.creator";
/// let attr_value = "nc-0.0.1";
/// //let flags = 0;
/// let flags = nc::XATTR_CREATE;
/// let ret = nc::setxattr(
/// path,
/// &attr_name,
/// attr_value.as_ptr() as usize,
/// attr_value.len(),
/// flags,
/// );
/// assert!(ret.is_ok());
/// let mut buf = [0_u8; 16];
/// let buf_len = buf.len();
/// let ret = nc::listxattr(path, buf.as_mut_ptr() as usize, buf_len);
/// let attr_len = ret.unwrap() as usize;
/// assert_eq!(&buf[..attr_len - 1], attr_name.as_bytes());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn listxattr<P: AsRef<Path>>(filename: P, list: usize, size: size_t) -> Result<ssize_t, Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
syscall3(SYS_LISTXATTR, filename_ptr, list, size).map(|ret| ret as ssize_t)
}
/// List extended attribute names.
/// ```
/// let path = "/tmp/nc-llistxattr";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let attr_name = "user.creator";
/// let attr_value = "nc-0.0.1";
/// //let flags = 0;
/// let flags = nc::XATTR_CREATE;
/// let ret = nc::setxattr(
/// path,
/// &attr_name,
/// attr_value.as_ptr() as usize,
/// attr_value.len(),
/// flags,
/// );
/// assert!(ret.is_ok());
/// let mut buf = [0_u8; 16];
/// let buf_len = buf.len();
/// let ret = nc::llistxattr(path, buf.as_mut_ptr() as usize, buf_len);
/// let attr_len = ret.unwrap() as usize;
/// assert_eq!(&buf[..attr_len - 1], attr_name.as_bytes());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn llistxattr<P: AsRef<Path>>(
filename: P,
list: usize,
size: size_t,
) -> Result<ssize_t, Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
syscall3(SYS_LLISTXATTR, filename_ptr, list, size).map(|ret| ret as ssize_t)
}
/// Return a directory entry's path.
// TODO(Shaohua): Returns a string.
pub fn lookup_dcookie(cookie: u64, buf: &mut [u8]) -> Result<i32, Errno> {
let cookie = cookie as usize;
let buf_ptr = buf.as_mut_ptr() as usize;
let buf_len = buf.len();
syscall3(SYS_LOOKUP_DCOOKIE, cookie, buf_ptr, buf_len).map(|ret| ret as i32)
}
/// Remove an extended attribute.
/// ```
/// let path = "/tmp/nc-lremovexattr";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let attr_name = "user.creator";
/// let attr_value = "nc-0.0.1";
/// let flags = nc::XATTR_CREATE;
/// let ret = nc::setxattr(
/// path,
/// &attr_name,
/// attr_value.as_ptr() as usize,
/// attr_value.len(),
/// flags,
/// );
/// assert!(ret.is_ok());
/// let ret = nc::lremovexattr(path, attr_name);
/// assert!(ret.is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn lremovexattr<P: AsRef<Path>>(filename: P, name: P) -> Result<(), Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
syscall2(SYS_LREMOVEXATTR, filename_ptr, name_ptr).map(drop)
}
/// Reposition file offset.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::open(path, nc::O_RDONLY, 0);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let ret = nc::lseek(fd, 42, nc::SEEK_SET);
/// assert!(ret.is_ok());
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn lseek(fd: i32, offset: off_t, whence: i32) -> Result<(), Errno> {
let fd = fd as usize;
let offset = offset as usize;
let whence = whence as usize;
syscall3(SYS_LSEEK, fd, offset, whence).map(drop)
}
/// Set extended attribute value.
/// ```
/// let path = "/tmp/nc-lsetxattr";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let attr_name = "user.creator";
/// let attr_value = "nc-0.0.1";
/// //let flags = 0;
/// let flags = nc::XATTR_CREATE;
/// let ret = nc::lsetxattr(
/// path,
/// &attr_name,
/// attr_value.as_ptr() as usize,
/// attr_value.len(),
/// flags,
/// );
/// assert!(ret.is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn lsetxattr<P: AsRef<Path>>(
filename: P,
name: P,
value: usize,
size: size_t,
flags: i32,
) -> Result<(), Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
let size = size as usize;
let flags = flags as usize;
syscall5(SYS_LSETXATTR, filename_ptr, name_ptr, value, size, flags).map(drop)
}
/// Give advice about use of memory.
/// ```
/// // Initialize an anonymous mapping with 4 pages.
/// let map_length = 4 * nc::PAGE_SIZE;
/// let addr = nc::mmap(
/// 0,
/// map_length,
/// nc::PROT_READ | nc::PROT_WRITE,
/// nc::MAP_PRIVATE | nc::MAP_ANONYMOUS,
/// -1,
/// 0,
/// );
/// assert!(addr.is_ok());
/// let addr = addr.unwrap();
///
/// // Set the third page readonly. And we will run into SIGSEGV when updating it.
/// let ret = nc::madvise(addr + 2 * nc::PAGE_SIZE, nc::PAGE_SIZE, nc::MADV_RANDOM);
/// assert!(ret.is_ok());
///
/// assert!(nc::munmap(addr, map_length).is_ok());
/// ```
pub fn madvise(addr: usize, len: size_t, advice: i32) -> Result<(), Errno> {
let len = len as usize;
let advice = advice as usize;
syscall3(SYS_MADVISE, addr, len, advice).map(drop)
}
/// Set memory policy for a memory range.
pub fn mbind(
start: usize,
len: usize,
mode: i32,
nmask: *const usize,
maxnode: usize,
flags: i32,
) -> Result<(), Errno> {
let mode = mode as usize;
let nmask = nmask as usize;
let flags = flags as usize;
syscall6(SYS_MBIND, start, len, mode, nmask, maxnode, flags).map(drop)
}
/// sys_membarrier - issue memory barriers on a set of threads
/// @cmd: Takes command values defined in enum membarrier_cmd.
/// @flags: Currently needs to be 0. For future extensions.
///
/// If this system call is not implemented, -ENOSYS is returned. If the
/// command specified does not exist, not available on the running
/// kernel, or if the command argument is invalid, this system call
/// returns -EINVAL. For a given command, with flags argument set to 0,
/// this system call is guaranteed to always return the same value until
/// reboot.
///
/// All memory accesses performed in program order from each targeted thread
/// is guaranteed to be ordered with respect to sys_membarrier(). If we use
/// the semantic "barrier()" to represent a compiler barrier forcing memory
/// accesses to be performed in program order across the barrier, and
/// smp_mb() to represent explicit memory barriers forcing full memory
/// ordering across the barrier, we have the following ordering table for
/// each pair of barrier(), sys_membarrier() and smp_mb():
///
/// The pair ordering is detailed as (O: ordered, X: not ordered):
///
/// ```text
/// barrier() smp_mb() sys_membarrier()
/// barrier() X X O
/// smp_mb() X O O
/// sys_membarrier() O O O
/// ```
pub fn membarrier(cmd: i32, flags: i32) -> Result<i32, Errno> {
let cmd = cmd as usize;
let flags = flags as usize;
syscall2(SYS_MEMBARRIER, cmd, flags).map(|ret| ret as i32)
}
/// Create an anonymous file.
pub fn memfd_create<P: AsRef<Path>>(name: P, flags: u32) -> Result<i32, Errno> {
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
let flags = flags as usize;
syscall2(SYS_MEMFD_CREATE, name_ptr, flags).map(|ret| ret as i32)
}
/// Move all pages in a process to another set of nodes
pub fn migrate_pages(
pid: pid_t,
maxnode: usize,
old_nodes: *const usize,
new_nodes: *const usize,
) -> Result<isize, Errno> {
let pid = pid as usize;
let old_nodes = old_nodes as usize;
let new_nodes = new_nodes as usize;
syscall4(SYS_MIGRATE_PAGES, pid, maxnode, old_nodes, new_nodes).map(|ret| ret as isize)
}
/// mincore() returns the memory residency status of the pages in the
/// current process's address space specified by [addr, addr + len).
/// The status is returned in a vector of bytes. The least significant
/// bit of each byte is 1 if the referenced page is in memory, otherwise
/// it is zero.
///
/// Because the status of a page can change after mincore() checks it
/// but before it returns to the application, the returned vector may
/// contain stale information. Only locked pages are guaranteed to
/// remain in memory.
///
/// return values:
/// zero - success
/// -EFAULT - vec points to an illegal address
/// -EINVAL - addr is not a multiple of PAGE_SIZE
/// -ENOMEM - Addresses in the range [addr, addr + len] are
/// invalid for the address space of this process, or specify one or
/// more pages which are not currently mapped
/// -EAGAIN - A kernel resource was temporarily unavailable.
pub fn mincore(start: usize, len: size_t, vec: *const u8) -> Result<(), Errno> {
let len = len as usize;
let vec_ptr = vec as usize;
syscall3(SYS_MINCORE, start, len, vec_ptr).map(drop)
}
/// Create a directory.
/// ```
/// let path = "/tmp/nc-mkdir";
/// let ret = nc::mkdirat(nc::AT_FDCWD, path, 0o755);
/// assert!(ret.is_ok());
/// assert!(nc::rmdir(path).is_ok());
/// ```
pub fn mkdirat<P: AsRef<Path>>(dirfd: i32, filename: P, mode: mode_t) -> Result<(), Errno> {
let dirfd = dirfd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let mode = mode as usize;
syscall3(SYS_MKDIRAT, dirfd, filename_ptr, mode).map(drop)
}
/// Create a special or ordinary file.
/// ```
/// let path = "/tmp/nc-mknodat";
/// // Create a named pipe.
/// let ret = nc::mknodat(nc::AT_FDCWD, path, nc::S_IFIFO | nc::S_IRUSR | nc::S_IWUSR, 0);
/// assert!(ret.is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn mknodat<P: AsRef<Path>>(
dirfd: i32,
filename: P,
mode: mode_t,
dev: dev_t,
) -> Result<(), Errno> {
let dirfd = dirfd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let mode = mode as usize;
let dev = dev as usize;
syscall4(SYS_MKNODAT, dirfd, filename_ptr, mode, dev).map(drop)
}
/// Lock memory.
/// ```
/// let mut passwd_buf = [0_u8; 64];
/// let ret = nc::mlock(passwd_buf.as_ptr() as usize, passwd_buf.len());
/// assert!(ret.is_ok());
/// ```
pub fn mlock(addr: usize, len: size_t) -> Result<(), Errno> {
let len = len as usize;
syscall2(SYS_MLOCK, addr, len).map(drop)
}
/// Lock memory.
/// ```
/// let mut passwd_buf = [0_u8; 64];
/// let ret = nc::mlock2(passwd_buf.as_ptr() as usize, passwd_buf.len(), nc::MCL_CURRENT);
/// assert!(ret.is_ok());
/// ```
pub fn mlock2(addr: usize, len: size_t, flags: i32) -> Result<(), Errno> {
let len = len as usize;
let flags = flags as usize;
syscall3(SYS_MLOCK2, addr, len, flags).map(drop)
}
/// Lock memory.
/// ```
/// let ret = nc::mlockall(nc::MCL_CURRENT);
/// assert!(ret.is_ok());
/// ```
pub fn mlockall(flags: i32) -> Result<(), Errno> {
let flags = flags as usize;
syscall1(SYS_MLOCKALL, flags).map(drop)
}
/// Map files or devices into memory.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::open(path, nc::O_RDONLY, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
///
/// let mut sb = nc::stat_t::default();
/// let ret = nc::fstat(fd, &mut sb);
/// assert!(ret.is_ok());
///
/// let offset: usize = 0;
/// let length: usize = sb.st_size as usize - offset;
/// // Offset for mmap must be page aligned.
/// let pa_offset: usize = offset & !(nc::PAGE_SIZE - 1);
/// let map_length = length + offset - pa_offset;
///
/// let addr = nc::mmap(
/// 0, // 0 as NULL
/// map_length,
/// nc::PROT_READ,
/// nc::MAP_PRIVATE,
/// fd,
/// pa_offset as nc::off_t,
/// );
/// assert!(addr.is_ok());
/// let addr = addr.unwrap();
///
/// let n_write = nc::write(1, addr + offset - pa_offset, length);
/// assert!(n_write.is_ok());
/// assert_eq!(n_write, Ok(length as nc::ssize_t));
/// assert!(nc::munmap(addr, map_length).is_ok());
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn mmap(
start: usize,
len: size_t,
prot: i32,
flags: i32,
fd: i32,
offset: off_t,
) -> Result<usize, Errno> {
let len = len as usize;
let prot = prot as usize;
let flags = flags as usize;
let fd = fd as usize;
let offset = offset as usize;
syscall6(SYS_MMAP, start, len, prot, flags, fd, offset)
}
/// Mount filesystem.
/// ```
/// let target_dir = "/tmp/nc-mount";
/// let ret = nc::mkdir(target_dir, 0o755);
/// assert!(ret.is_ok());
///
/// let src_dir = "/etc";
/// let fs_type = "";
/// let mount_flags = nc::MS_BIND | nc::MS_RDONLY;
/// let data = 0;
/// let ret = nc::mount(src_dir, target_dir, fs_type, mount_flags, data);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
///
/// assert!(nc::rmdir(target_dir).is_ok());
pub fn mount<P: AsRef<Path>>(
dev_name: P,
dir_name: P,
fs_type: P,
flags: usize,
data: usize,
) -> Result<(), Errno> {
let dev_name = CString::new(dev_name.as_ref());
let dev_name_ptr = dev_name.as_ptr() as usize;
let dir_name = CString::new(dir_name.as_ref());
let dir_name_ptr = dir_name.as_ptr() as usize;
let fs_type = CString::new(fs_type.as_ref());
let fs_type_ptr = fs_type.as_ptr() as usize;
syscall5(
SYS_MOUNT,
dev_name_ptr,
dir_name_ptr,
fs_type_ptr,
flags,
data,
)
.map(drop)
}
/// Move a mount from one place to another. In combination with
/// fsopen()/fsmount() this is used to install a new mount and in combination
/// with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy
/// a mount subtree.
///
/// Note the flags value is a combination of MOVE_MOUNT_* flags.
pub fn move_mount<P: AsRef<Path>>(
from_dfd: i32,
from_pathname: P,
to_dfd: i32,
to_pathname: P,
flags: u32,
) -> Result<i32, Errno> {
let from_dfd = from_dfd as usize;
let from_pathname = CString::new(from_pathname.as_ref());
let from_pathname_ptr = from_pathname.as_ptr() as usize;
let to_dfd = to_dfd as usize;
let to_pathname = CString::new(to_pathname.as_ref());
let to_pathname_ptr = to_pathname.as_ptr() as usize;
let flags = flags as usize;
syscall5(
SYS_MOVE_MOUNT,
from_dfd,
from_pathname_ptr,
to_dfd,
to_pathname_ptr,
flags,
)
.map(|ret| ret as i32)
}
/// Move individual pages of a process to another node
pub fn move_pages(
pid: pid_t,
nr_pages: usize,
pages: usize,
nodes: *const i32,
status: &mut i32,
flags: i32,
) -> Result<(), Errno> {
let pid = pid as usize;
let nodes_ptr = nodes as usize;
let status = status as *mut i32 as usize;
let flags = flags as usize;
syscall6(
SYS_MOVE_PAGES,
pid,
nr_pages,
pages,
nodes_ptr,
status,
flags,
)
.map(drop)
}
/// Set protection on a region of memory.
/// ```
/// // Initialize an anonymous mapping with 4 pages.
/// let map_length = 4 * nc::PAGE_SIZE;
/// let addr = nc::mmap(
/// 0,
/// map_length,
/// nc::PROT_READ | nc::PROT_WRITE,
/// nc::MAP_PRIVATE | nc::MAP_ANONYMOUS,
/// -1,
/// 0,
/// );
/// assert!(addr.is_ok());
/// let addr = addr.unwrap();
///
/// // Set the third page readonly. And we will run into SIGSEGV when updating it.
/// let ret = nc::mprotect(addr + 2 * nc::PAGE_SIZE, nc::PAGE_SIZE, nc::PROT_READ);
/// assert!(ret.is_ok());
///
/// assert!(nc::munmap(addr, map_length).is_ok());
/// ```
pub fn mprotect(addr: usize, len: size_t, prot: i32) -> Result<(), Errno> {
let len = len as usize;
let prot = prot as usize;
syscall3(SYS_MPROTECT, addr, len, prot).map(drop)
}
/// Get/set message queue attributes
/// ```
/// let name = "nc-mq-getsetattr";
/// let ret = nc::mq_open(
/// name,
/// nc::O_CREAT | nc::O_RDWR,
/// (nc::S_IRUSR | nc::S_IWUSR) as nc::umode_t,
/// None,
/// );
/// assert!(ret.is_ok());
/// let mq_id = ret.unwrap();
///
/// let mut attr = nc::mq_attr_t::default();
/// let ret = nc::mq_getsetattr(mq_id, None, Some(&mut attr));
/// assert!(ret.is_ok());
/// println!("attr: {:?}", attr);
///
/// assert!(nc::close(mq_id).is_ok());
/// assert!(nc::mq_unlink(name).is_ok());
/// ```
pub fn mq_getsetattr(
mqdes: mqd_t,
new_attr: Option<&mut mq_attr_t>,
old_attr: Option<&mut mq_attr_t>,
) -> Result<mqd_t, Errno> {
let mqdes = mqdes as usize;
let new_attr_ptr = if let Some(new_attr) = new_attr {
new_attr as *mut mq_attr_t as usize
} else {
0
};
let old_attr_ptr = if let Some(old_attr) = old_attr {
old_attr as *mut mq_attr_t as usize
} else {
0
};
syscall3(SYS_MQ_GETSETATTR, mqdes, new_attr_ptr, old_attr_ptr).map(|ret| ret as mqd_t)
}
/// Register for notification when a message is available
pub fn mq_notify(mqdes: mqd_t, notification: Option<&sigevent_t>) -> Result<(), Errno> {
let mqdes = mqdes as usize;
let notification_ptr = if let Some(notification) = notification {
notification as *const sigevent_t as usize
} else {
0
};
syscall2(SYS_MQ_NOTIFY, mqdes, notification_ptr).map(drop)
}
/// Open a message queue.
/// ```
/// let name = "nc-posix-mq";
/// let ret = nc::mq_open(
/// name,
/// nc::O_CREAT | nc::O_RDWR,
/// (nc::S_IRUSR | nc::S_IWUSR) as nc::umode_t,
/// None,
/// );
/// assert!(ret.is_ok());
/// let mq_id = ret.unwrap();
/// assert!(nc::close(mq_id).is_ok());
/// assert!(nc::mq_unlink(name).is_ok());
/// ```
pub fn mq_open<P: AsRef<Path>>(
name: P,
oflag: i32,
mode: umode_t,
attr: Option<&mut mq_attr_t>,
) -> Result<mqd_t, Errno> {
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
let oflag = oflag as usize;
let mode = mode as usize;
let attr_ptr = if let Some(attr) = attr {
attr as *mut mq_attr_t as usize
} else {
0
};
syscall4(SYS_MQ_OPEN, name_ptr, oflag, mode, attr_ptr).map(|ret| ret as mqd_t)
}
/// Receive a message from a message queue
/// ```
/// let name = "nc-mq-timedreceive";
/// let ret = nc::mq_open(
/// name,
/// nc::O_CREAT | nc::O_RDWR | nc::O_EXCL,
/// (nc::S_IRUSR | nc::S_IWUSR) as nc::umode_t,
/// None,
/// );
/// assert!(ret.is_ok());
/// let mq_id = ret.unwrap();
///
/// let mut attr = nc::mq_attr_t::default();
/// let ret = nc::mq_getsetattr(mq_id, None, Some(&mut attr));
/// assert!(ret.is_ok());
/// println!("attr: {:?}", attr);
///
/// let msg = "Hello, Rust";
/// let prio = 42;
/// let timeout = nc::timespec_t {
/// tv_sec: 1,
/// tv_nsec: 0,
/// };
/// let ret = nc::mq_timedsend(mq_id, msg.as_bytes(), msg.len(), prio, &timeout);
/// assert!(ret.is_ok());
///
/// let ret = nc::mq_getsetattr(mq_id, None, Some(&mut attr));
/// assert!(ret.is_ok());
/// assert_eq!(attr.mq_curmsgs, 1);
///
/// let mut buf = vec![0_u8; attr.mq_msgsize as usize];
/// let buf_len = buf.len();
/// let mut recv_prio = 0;
/// let read_timeout = nc::timespec_t {
/// tv_sec: 1,
/// tv_nsec: 0,
/// };
/// let ret = nc::mq_timedreceive(mq_id, &mut buf, buf_len, &mut recv_prio, &read_timeout);
/// if let Err(errno) = ret {
/// eprintln!("mq_timedreceive() error: {}", nc::strerror(errno));
/// }
/// assert!(ret.is_ok());
/// let n_read = ret.unwrap() as usize;
/// assert_eq!(n_read, msg.len());
///
/// assert!(nc::close(mq_id).is_ok());
/// assert!(nc::mq_unlink(name).is_ok());
/// ```
pub fn mq_timedreceive(
mqdes: mqd_t,
msg: &mut [u8],
msg_len: usize,
msg_prio: &mut u32,
abs_timeout: ×pec_t,
) -> Result<ssize_t, Errno> {
let mqdes = mqdes as usize;
let msg = CString::new(msg);
let msg_ptr = msg.as_ptr() as usize;
let msg_prio = msg_prio as *mut u32 as usize;
let abs_timeout_ptr = abs_timeout as *const timespec_t as usize;
syscall5(
SYS_MQ_TIMEDRECEIVE,
mqdes,
msg_ptr,
msg_len,
msg_prio,
abs_timeout_ptr,
)
.map(|ret| ret as ssize_t)
}
/// Send message to a message queue
/// ```
/// let name = "nc-mq-timedsend";
/// let ret = nc::mq_open(
/// name,
/// nc::O_CREAT | nc::O_RDWR,
/// (nc::S_IRUSR | nc::S_IWUSR) as nc::umode_t,
/// None,
/// );
/// assert!(ret.is_ok());
/// let mq_id = ret.unwrap();
///
/// let mut attr = nc::mq_attr_t::default();
/// let ret = nc::mq_getsetattr(mq_id, None, Some(&mut attr));
/// assert!(ret.is_ok());
/// println!("attr: {:?}", attr);
///
/// let msg = "Hello, Rust";
/// let prio = 0;
/// let timeout = nc::timespec_t {
/// tv_sec: 1,
/// tv_nsec: 0,
/// };
/// let ret = nc::mq_timedsend(mq_id, msg.as_bytes(), msg.len(), prio, &timeout);
/// assert!(ret.is_ok());
///
/// let ret = nc::mq_getsetattr(mq_id, None, Some(&mut attr));
/// assert!(ret.is_ok());
/// assert_eq!(attr.mq_curmsgs, 1);
///
/// assert!(nc::close(mq_id).is_ok());
/// assert!(nc::mq_unlink(name).is_ok());
/// ```
pub fn mq_timedsend(
mqdes: mqd_t,
msg: &[u8],
msg_len: usize,
msg_prio: u32,
abs_timeout: ×pec_t,
) -> Result<(), Errno> {
let mqdes = mqdes as usize;
let msg = CString::new(msg);
let msg_ptr = msg.as_ptr() as usize;
let msg_prio = msg_prio as usize;
let abs_timeout_ptr = abs_timeout as *const timespec_t as usize;
syscall5(
SYS_MQ_TIMEDSEND,
mqdes,
msg_ptr,
msg_len,
msg_prio,
abs_timeout_ptr,
)
.map(drop)
}
/// Remove a message queue
/// ```
/// let name = "nc-mq-unlink";
/// let ret = nc::mq_open(
/// name,
/// nc::O_CREAT | nc::O_RDWR,
/// (nc::S_IRUSR | nc::S_IWUSR) as nc::umode_t,
/// None,
/// );
/// assert!(ret.is_ok());
/// let mq_id = ret.unwrap();
/// assert!(nc::close(mq_id).is_ok());
/// assert!(nc::mq_unlink(name).is_ok());
/// ```
pub fn mq_unlink<P: AsRef<Path>>(name: P) -> Result<(), Errno> {
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
syscall1(SYS_MQ_UNLINK, name_ptr).map(drop)
}
/// Remap a virtual memory address
pub fn mremap(
addr: usize,
old_len: size_t,
new_len: size_t,
flags: usize,
new_addr: usize,
) -> Result<usize, Errno> {
let old_len = old_len as usize;
let new_len = new_len as usize;
syscall5(SYS_MREMAP, addr, old_len, new_len, flags, new_addr)
}
/// System V message control operations.
/// ```
/// let key = nc::IPC_PRIVATE;
/// let flags = nc::IPC_CREAT | nc::IPC_EXCL | (nc::S_IRUSR | nc::S_IWUSR) as i32;
/// let ret = nc::msgget(key, flags);
/// assert!(ret.is_ok());
/// let msq_id = ret.unwrap();
/// let mut buf = nc::msqid_ds_t::default();
/// let ret = nc::msgctl(msq_id, nc::IPC_RMID, &mut buf);
/// assert!(ret.is_ok());
/// ```
pub fn msgctl(msqid: i32, cmd: i32, buf: &mut msqid_ds_t) -> Result<i32, Errno> {
let msqid = msqid as usize;
let cmd = cmd as usize;
let buf_ptr = buf as *mut msqid_ds_t as usize;
syscall3(SYS_MSGCTL, msqid, cmd, buf_ptr).map(|ret| ret as i32)
}
/// Get a System V message queue identifier.
/// ```
/// let key = nc::IPC_PRIVATE;
/// let flags = nc::IPC_CREAT | nc::IPC_EXCL | (nc::S_IRUSR | nc::S_IWUSR) as i32;
/// let ret = nc::msgget(key, flags);
/// assert!(ret.is_ok());
/// let msq_id = ret.unwrap();
/// let mut buf = nc::msqid_ds_t::default();
/// let ret = nc::msgctl(msq_id, nc::IPC_RMID, &mut buf);
/// assert!(ret.is_ok());
/// ```
pub fn msgget(key: key_t, msgflg: i32) -> Result<i32, Errno> {
let key = key as usize;
let msgflg = msgflg as usize;
syscall2(SYS_MSGGET, key, msgflg).map(|ret| ret as i32)
}
/// Receive messages from a System V message queue.
/// ```
/// const MAX_MTEXT: usize = 1024;
///
/// const MTYPE_NULL: isize = 0;
/// const MTYPE_CLIENT: isize = 1;
/// const _MTYPE_SERVER: isize = 2;
///
/// #[derive(Debug, Clone, Copy)]
/// struct Message {
/// pub mtype: isize,
/// pub mtext: [u8; MAX_MTEXT],
/// }
///
/// impl Default for Message {
/// fn default() -> Self {
/// Message {
/// mtype: MTYPE_NULL,
/// mtext: [0; MAX_MTEXT],
/// }
/// }
/// }
///
/// fn main() {
/// let key = nc::IPC_PRIVATE;
/// let flags = nc::IPC_CREAT | nc::IPC_EXCL | (nc::S_IRUSR | nc::S_IWUSR) as i32;
/// let ret = nc::msgget(key, flags);
/// assert!(ret.is_ok());
/// let msq_id = ret.unwrap();
///
/// // Write to message queue.
/// let msg = "Hello, Rust";
/// let mut client_msg = Message {
/// mtype: MTYPE_CLIENT,
/// mtext: [0; MAX_MTEXT],
/// };
/// let msg_len = msg.len();
/// unsafe {
/// let src_ptr = msg.as_ptr();
/// let dst_ptr = client_msg.mtext.as_mut_ptr();
/// core::ptr::copy_nonoverlapping(src_ptr, dst_ptr, msg_len);
/// }
///
/// let ret = nc::msgsnd(msq_id, &client_msg as *const Message as usize, msg_len, 0);
/// assert!(ret.is_ok());
///
/// // Read from message queue.
/// let mut recv_msg = Message::default();
/// let ret = nc::msgrcv(
/// msq_id,
/// &mut recv_msg as *mut Message as usize,
/// MAX_MTEXT,
/// MTYPE_CLIENT,
/// 0,
/// );
/// assert!(ret.is_ok());
/// let recv_msg_len = ret.unwrap() as usize;
/// assert_eq!(recv_msg_len, msg_len);
/// let recv_text = core::str::from_utf8(&recv_msg.mtext[..recv_msg_len]);
/// assert!(recv_text.is_ok());
/// let recv_text = recv_text.unwrap();
/// assert_eq!(recv_text, msg);
/// println!("recv text: {}", recv_text);
///
/// let mut buf = nc::msqid_ds_t::default();
/// let ret = nc::msgctl(msq_id, nc::IPC_RMID, &mut buf);
/// assert!(ret.is_ok());
/// }
/// ```
pub fn msgrcv(
msqid: i32,
msgq: usize,
msgsz: size_t,
msgtyp: isize,
msgflg: i32,
) -> Result<ssize_t, Errno> {
let msqid = msqid as usize;
let msgsz = msgsz as usize;
let msgtyp = msgtyp as usize;
let msgflg = msgflg as usize;
syscall5(SYS_MSGRCV, msqid, msgq, msgsz, msgtyp, msgflg).map(|ret| ret as ssize_t)
}
/// Append the message to a System V message queue.
/// ```
/// const MAX_MTEXT: usize = 1024;
///
/// const MTYPE_NULL: isize = 0;
/// const MTYPE_CLIENT: isize = 1;
/// const _MTYPE_SERVER: isize = 2;
///
/// #[derive(Debug, Clone, Copy)]
/// struct Message {
/// pub mtype: isize,
/// pub mtext: [u8; MAX_MTEXT],
/// }
///
/// impl Default for Message {
/// fn default() -> Self {
/// Message {
/// mtype: MTYPE_NULL,
/// mtext: [0; MAX_MTEXT],
/// }
/// }
/// }
///
/// fn main() {
/// let key = nc::IPC_PRIVATE;
/// let flags = nc::IPC_CREAT | nc::IPC_EXCL | (nc::S_IRUSR | nc::S_IWUSR) as i32;
/// let ret = nc::msgget(key, flags);
/// assert!(ret.is_ok());
/// let msq_id = ret.unwrap();
///
/// // Write to message queue.
/// let msg = "Hello, Rust";
/// let mut client_msg = Message {
/// mtype: MTYPE_CLIENT,
/// mtext: [0; MAX_MTEXT],
/// };
/// let msg_len = msg.len();
/// unsafe {
/// let src_ptr = msg.as_ptr();
/// let dst_ptr = client_msg.mtext.as_mut_ptr();
/// core::ptr::copy_nonoverlapping(src_ptr, dst_ptr, msg_len);
/// }
///
/// let ret = nc::msgsnd(msq_id, &client_msg as *const Message as usize, msg_len, 0);
/// assert!(ret.is_ok());
///
/// // Read from message queue.
/// let mut recv_msg = Message::default();
/// let ret = nc::msgrcv(
/// msq_id,
/// &mut recv_msg as *mut Message as usize,
/// MAX_MTEXT,
/// MTYPE_CLIENT,
/// 0,
/// );
/// assert!(ret.is_ok());
/// let recv_msg_len = ret.unwrap() as usize;
/// assert_eq!(recv_msg_len, msg_len);
/// let recv_text = core::str::from_utf8(&recv_msg.mtext[..recv_msg_len]);
/// assert!(recv_text.is_ok());
/// let recv_text = recv_text.unwrap();
/// assert_eq!(recv_text, msg);
/// println!("recv text: {}", recv_text);
///
/// let mut buf = nc::msqid_ds_t::default();
/// let ret = nc::msgctl(msq_id, nc::IPC_RMID, &mut buf);
/// assert!(ret.is_ok());
/// }
/// ```
pub fn msgsnd(msqid: i32, msgq: usize, msgsz: size_t, msgflg: i32) -> Result<(), Errno> {
let msqid = msqid as usize;
let msgsz = msgsz as usize;
let msgflg = msgflg as usize;
syscall4(SYS_MSGSND, msqid, msgq, msgsz, msgflg).map(drop)
}
/// Synchronize a file with memory map.
pub fn msync(addr: usize, len: size_t, flags: i32) -> Result<(), Errno> {
let len = len as usize;
let flags = flags as usize;
syscall3(SYS_MSYNC, addr, len, flags).map(drop)
}
/// Unlock memory.
/// ```
/// let mut passwd_buf = [0_u8; 64];
/// let addr = passwd_buf.as_ptr() as usize;
/// let ret = nc::mlock2(addr, passwd_buf.len(), nc::MCL_CURRENT);
/// for i in 0..passwd_buf.len() {
/// passwd_buf[i] = i as u8;
/// }
/// assert!(ret.is_ok());
/// let ret = nc::munlock(addr, passwd_buf.len());
/// assert!(ret.is_ok());
/// ```
pub fn munlock(addr: usize, len: size_t) -> Result<(), Errno> {
let len = len as usize;
syscall2(SYS_MUNLOCK, addr, len).map(drop)
}
/// Unlock memory.
/// ```
/// let ret = nc::mlockall(nc::MCL_CURRENT);
/// assert!(ret.is_ok());
/// let ret = nc::munlockall();
/// assert!(ret.is_ok());
/// ```
pub fn munlockall() -> Result<(), Errno> {
syscall0(SYS_MUNLOCKALL).map(drop)
}
/// Unmap files or devices from memory.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::open(path, nc::O_RDONLY, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
///
/// let mut sb = nc::stat_t::default();
/// let ret = nc::fstat(fd, &mut sb);
/// assert!(ret.is_ok());
///
/// let offset: usize = 0;
/// let length: usize = sb.st_size as usize - offset;
/// // Offset for mmap must be page aligned.
/// let pa_offset: usize = offset & !(nc::PAGE_SIZE - 1);
/// let map_length = length + offset - pa_offset;
///
/// let addr = nc::mmap(
/// 0, // 0 as NULL
/// map_length,
/// nc::PROT_READ,
/// nc::MAP_PRIVATE,
/// fd,
/// pa_offset as nc::off_t,
/// );
/// assert!(addr.is_ok());
/// let addr = addr.unwrap();
///
/// let n_write = nc::write(1, addr + offset - pa_offset, length);
/// assert!(n_write.is_ok());
/// assert_eq!(n_write, Ok(length as nc::ssize_t));
/// assert!(nc::munmap(addr, map_length).is_ok());
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn munmap(addr: usize, len: size_t) -> Result<(), Errno> {
let len = len as usize;
syscall2(SYS_MUNMAP, addr, len).map(drop)
}
/// Obtain handle for a filename
pub fn name_to_handle_at<P: AsRef<Path>>(
dfd: i32,
filename: P,
handle: &mut file_handle_t,
mount_id: &mut i32,
flags: i32,
) -> Result<(), Errno> {
let dfd = dfd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let handle_ptr = handle as *mut file_handle_t as usize;
let mount_id_ptr = mount_id as *mut i32 as usize;
let flags = flags as usize;
syscall5(
SYS_NAME_TO_HANDLE_AT,
dfd,
filename_ptr,
handle_ptr,
mount_id_ptr,
flags,
)
.map(drop)
}
/// High resolution sleep.
/// ```
/// let t = nc::timespec_t {
/// tv_sec: 1,
/// tv_nsec: 0,
/// };
/// assert!(nc::nanosleep(&t, None).is_ok());
/// ```
pub fn nanosleep(req: ×pec_t, rem: Option<&mut timespec_t>) -> Result<(), Errno> {
let req_ptr = req as *const timespec_t as usize;
let rem_ptr = if let Some(rem) = rem {
rem as *mut timespec_t as usize
} else {
0
};
syscall2(SYS_NANOSLEEP, req_ptr, rem_ptr).map(drop)
}
/// Syscall interface to kernel nfs daemon.
/// Deprecated.
pub fn nfsservctl() {
core::unimplemented!();
// syscall0(SYS_NFSSERVCTL);
}
/// Open and possibly create a file within a directory.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::openat(nc::AT_FDCWD, path, nc::O_RDONLY, 0);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn openat<P: AsRef<Path>>(
dirfd: i32,
filename: P,
flags: i32,
mode: mode_t,
) -> Result<i32, Errno> {
let dirfd = dirfd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let flags = flags as usize;
let mode = mode as usize;
syscall4(SYS_OPENAT, dirfd, filename_ptr, flags, mode).map(|ret| ret as i32)
}
/// Obtain handle for an open file
pub fn open_by_handle_at(
mount_fd: i32,
handle: &mut file_handle_t,
flags: i32,
) -> Result<i32, Errno> {
let mount_fd = mount_fd as usize;
let handle_ptr = handle as *mut file_handle_t as usize;
let flags = flags as usize;
syscall3(SYS_OPEN_BY_HANDLE_AT, mount_fd, handle_ptr, flags).map(|ret| ret as i32)
}
pub fn open_tree<P: AsRef<Path>>(dfd: i32, filename: P, flags: u32) -> Result<i32, Errno> {
let dfd = dfd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let flags = flags as usize;
syscall3(SYS_OPEN_TREE, dfd, filename_ptr, flags).map(|ret| ret as i32)
}
/// Set up performance monitoring.
pub fn perf_event_open(
attr: &mut perf_event_attr_t,
pid: pid_t,
cpu: i32,
group_fd: i32,
flags: usize,
) -> Result<i32, Errno> {
let attr_ptr = attr as *mut perf_event_attr_t as usize;
let pid = pid as usize;
let cpu = cpu as usize;
let group_fd = group_fd as usize;
syscall5(SYS_PERF_EVENT_OPEN, attr_ptr, pid, cpu, group_fd, flags).map(|ret| ret as i32)
}
/// Set the process execution domain.
pub fn personality(persona: u32) -> Result<u32, Errno> {
let persona = persona as usize;
syscall1(SYS_PERSONALITY, persona).map(|ret| ret as u32)
}
pub fn pidfd_open() {
core::unimplemented!();
// syscall0(SYS_PIDFD_OPEN);
}
/// sys_pidfd_send_signal - Signal a process through a pidfd
/// @pidfd: file descriptor of the process
/// @sig: signal to send
/// @info: signal info
/// @flags: future flags
///
/// The syscall currently only signals via PIDTYPE_PID which covers
/// kill(<positive-pid>, <signal>. It does not signal threads or process
/// groups.
/// In order to extend the syscall to threads and process groups the @flags
/// argument should be used. In essence, the @flags argument will determine
/// what is signaled and not the file descriptor itself. Put in other words,
/// grouping is a property of the flags argument not a property of the file
/// descriptor.
///
/// Return: 0 on success, negative errno on failure
pub fn pidfd_send_signal(
pidfd: i32,
sig: i32,
info: &mut siginfo_t,
flags: u32,
) -> Result<(), Errno> {
let pidfd = pidfd as usize;
let sig = sig as usize;
let info_ptr = info as *mut siginfo_t as usize;
let flags = flags as usize;
syscall4(SYS_PIDFD_SEND_SIGNAL, pidfd, sig, info_ptr, flags).map(drop)
}
/// Create a pipe.
/// ```
/// let mut fds = [-1_i32, 2];
/// let ret = nc::pipe2(&mut fds, nc::O_CLOEXEC | nc::O_NONBLOCK);
/// assert!(ret.is_ok());
/// assert!(nc::close(fds[0]).is_ok());
/// assert!(nc::close(fds[1]).is_ok());
/// ```
pub fn pipe2(pipefd: &mut [i32; 2], flags: i32) -> Result<(), Errno> {
let pipefd_ptr = pipefd.as_mut_ptr() as usize;
let flags = flags as usize;
syscall2(SYS_PIPE2, pipefd_ptr, flags).map(drop)
}
/// Change the root filesystem.
pub fn pivot_root<P: AsRef<Path>>(new_root: P, put_old: P) -> Result<(), Errno> {
let new_root = CString::new(new_root.as_ref());
let new_root_ptr = new_root.as_ptr() as usize;
let put_old = CString::new(put_old.as_ref());
let put_old_ptr = put_old.as_ptr() as usize;
syscall2(SYS_PIVOT_ROOT, new_root_ptr, put_old_ptr).map(drop)
}
/// Create a new protection key.
pub fn pkey_alloc(flags: usize, init_val: usize) -> Result<i32, Errno> {
syscall2(SYS_PKEY_ALLOC, flags, init_val).map(|ret| ret as i32)
}
/// Free a protection key.
pub fn pkey_free(pkey: i32) -> Result<(), Errno> {
let pkey = pkey as usize;
syscall1(SYS_PKEY_FREE, pkey).map(drop)
}
/// Set protection on a region of memory.
pub fn pkey_mprotect(start: usize, len: size_t, prot: usize, pkey: i32) -> Result<(), Errno> {
let len = len as usize;
let pkey = pkey as usize;
syscall4(SYS_PKEY_MPROTECT, start, len, prot, pkey).map(drop)
}
/// Wait for some event on a file descriptor.
pub fn ppoll(
fds: &mut pollfd_t,
nfds: i32,
timeout: ×pec_t,
sigmask: &sigset_t,
sigsetsize: size_t,
) -> Result<i32, Errno> {
let fds_ptr = fds as *mut pollfd_t as usize;
let nfds = nfds as usize;
let timeout_ptr = timeout as *const timespec_t as usize;
let sigmask_ptr = sigmask as *const sigset_t as usize;
let sigsetsize = sigsetsize as usize;
syscall5(
SYS_PPOLL,
fds_ptr,
nfds,
timeout_ptr,
sigmask_ptr,
sigsetsize,
)
.map(|ret| ret as i32)
}
/// Operations on a process.
pub fn prctl(
option: i32,
arg2: usize,
arg3: usize,
arg4: usize,
arg5: usize,
) -> Result<i32, Errno> {
let option = option as usize;
let arg2 = arg2 as usize;
let arg3 = arg3 as usize;
let arg4 = arg4 as usize;
let arg5 = arg5 as usize;
syscall5(SYS_PRCTL, option, arg2, arg3, arg4, arg5).map(|ret| ret as i32)
}
/// Read from a file descriptor without changing file offset.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::open(path, nc::O_RDONLY, 0);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let mut buf = [0_u8; 128];
/// let read_count = 64;
/// let ret = nc::pread64(fd, buf.as_mut_ptr() as usize, read_count, 0);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(read_count as nc::ssize_t));
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn pread64(fd: i32, buf: usize, count: usize, offset: off_t) -> Result<ssize_t, Errno> {
let fd = fd as usize;
let offset = offset as usize;
syscall4(SYS_PREAD64, fd, buf, count, offset).map(|ret| ret as ssize_t)
}
/// Read from a file descriptor without changing file offset.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::open(path, nc::O_RDONLY, 0);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let mut buf = [[0_u8; 64]; 4];
/// let capacity = 4 * 64;
/// let mut iov = Vec::with_capacity(buf.len());
/// for ref mut item in (&mut buf).iter() {
/// iov.push(nc::iovec_t {
/// iov_len: item.len(),
/// iov_base: item.as_ptr() as usize,
/// });
/// }
/// let iov_len = iov.len();
/// let ret = nc::preadv(fd, &mut iov, 0, iov_len - 1);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(capacity as nc::ssize_t));
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn preadv(fd: i32, vec: &mut [iovec_t], pos_l: usize, pos_h: usize) -> Result<ssize_t, Errno> {
let fd = fd as usize;
let vec_ptr = vec.as_mut_ptr() as usize;
let vec_len = vec.len();
syscall5(SYS_PREADV, fd, vec_ptr, vec_len, pos_l, pos_h).map(|ret| ret as ssize_t)
}
/// Read from a file descriptor without changing file offset.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::open(path, nc::O_RDONLY, 0);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let mut buf = [[0_u8; 64]; 4];
/// let capacity = 4 * 64;
/// let mut iov = Vec::with_capacity(buf.len());
/// for ref mut item in (&mut buf).iter() {
/// iov.push(nc::iovec_t {
/// iov_len: item.len(),
/// iov_base: item.as_ptr() as usize,
/// });
/// }
/// let iov_len = iov.len();
/// let flags = 0;
/// let ret = nc::preadv2(fd, &mut iov, 0, iov_len - 1, flags);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(capacity as nc::ssize_t));
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn preadv2(
fd: i32,
vec: &mut [iovec_t],
pos_l: usize,
pos_h: usize,
flags: rwf_t,
) -> Result<ssize_t, Errno> {
let fd = fd as usize;
let vec_ptr = vec.as_mut_ptr() as usize;
let vec_len = vec.len();
let flags = flags as usize;
syscall6(SYS_PREADV2, fd, vec_ptr, vec_len, pos_l, pos_h, flags).map(|ret| ret as ssize_t)
}
/// Get/set the resource limits of an arbitary process.
/// ```
/// let mut old_limit = nc::rlimit64_t::default();
/// let ret = nc::prlimit64(nc::getpid(), nc::RLIMIT_NOFILE, None, Some(&mut old_limit));
/// assert!(ret.is_ok());
/// assert!(old_limit.rlim_cur > 0);
/// assert!(old_limit.rlim_max > 0);
/// ```
pub fn prlimit64(
pid: pid_t,
resource: i32,
new_limit: Option<&rlimit64_t>,
old_limit: Option<&mut rlimit64_t>,
) -> Result<(), Errno> {
let pid = pid as usize;
let resource = resource as usize;
let new_limit_ptr = if let Some(new_limit_ref) = new_limit {
new_limit_ref as *const rlimit64_t as usize
} else {
0
};
let old_limit_ptr = if let Some(old_limit_ref) = old_limit {
old_limit_ref as *mut rlimit64_t as usize
} else {
0
};
syscall4(SYS_PRLIMIT64, pid, resource, new_limit_ptr, old_limit_ptr).map(drop)
}
/// Transfer data between process address spaces
pub fn process_vm_readv(
pid: pid_t,
lvec: &[iovec_t],
rvec: &[iovec_t],
flags: i32,
) -> Result<ssize_t, Errno> {
let pid = pid as usize;
let lvec_ptr = lvec.as_ptr() as usize;
let lvec_len = lvec.len();
let rvec_ptr = rvec.as_ptr() as usize;
let rvec_len = rvec.len();
let flags = flags as usize;
syscall6(
SYS_PROCESS_VM_READV,
pid,
lvec_ptr,
lvec_len,
rvec_ptr,
rvec_len,
flags,
)
.map(|ret| ret as ssize_t)
}
/// Transfer data between process address spaces
pub fn process_vm_writev(
pid: pid_t,
lvec: &[iovec_t],
rvec: &[iovec_t],
flags: i32,
) -> Result<ssize_t, Errno> {
let pid = pid as usize;
let lvec_ptr = lvec.as_ptr() as usize;
let lvec_len = lvec.len();
let rvec_ptr = rvec.as_ptr() as usize;
let rvec_len = rvec.len();
let flags = flags as usize;
syscall6(
SYS_PROCESS_VM_WRITEV,
pid,
lvec_ptr,
lvec_len,
rvec_ptr,
rvec_len,
flags,
)
.map(|ret| ret as ssize_t)
}
/// Sychronous I/O multiplexing.
/// Most architectures can't handle 7-argument syscalls. So we provide a
/// 6-argument version where the sixth argument is a pointer to a structure
/// which has a pointer to the sigset_t itself followed by a size_t containing
/// the sigset size.
pub fn pselect6(
nfds: i32,
readfds: &mut fd_set_t,
writefds: &mut fd_set_t,
exceptfds: &mut fd_set_t,
timeout: ×pec_t,
sigmask: &sigset_t,
) -> Result<i32, Errno> {
let nfds = nfds as usize;
let readfds_ptr = readfds as *mut fd_set_t as usize;
let writefds_ptr = writefds as *mut fd_set_t as usize;
let exceptfds_ptr = exceptfds as *mut fd_set_t as usize;
let timeout_ptr = timeout as *const timespec_t as usize;
let sigmask_ptr = sigmask as *const sigset_t as usize;
syscall6(
SYS_PSELECT6,
nfds,
readfds_ptr,
writefds_ptr,
exceptfds_ptr,
timeout_ptr,
sigmask_ptr,
)
.map(|ret| ret as i32)
}
/// Process trace.
pub fn ptrace(request: i32, pid: pid_t, addr: usize, data: usize) -> Result<isize, Errno> {
let request = request as usize;
let pid = pid as usize;
syscall4(SYS_PTRACE, request, pid, addr, data).map(|ret| ret as isize)
}
/// Write to a file descriptor without changing file offset.
/// ```
/// let path = "/tmp/nc-pwrite64";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let buf = "Hello, Rust";
/// let ret = nc::pwrite64(fd, buf.as_ptr() as usize, buf.len(), 0);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(buf.len() as nc::ssize_t));
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn pwrite64(fd: i32, buf: usize, count: size_t, offset: off_t) -> Result<ssize_t, Errno> {
let fd = fd as usize;
let offset = offset as usize;
syscall4(SYS_PWRITE64, fd, buf, count, offset).map(|ret| ret as ssize_t)
}
/// Write to a file descriptor without changing file offset.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::open(path, nc::O_RDONLY, 0);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let mut buf = [[0_u8; 64]; 4];
/// let capacity = 4 * 64;
/// let mut iov = Vec::with_capacity(buf.len());
/// for ref mut item in (&mut buf).iter() {
/// iov.push(nc::iovec_t {
/// iov_len: item.len(),
/// iov_base: item.as_ptr() as usize,
/// });
/// }
/// let ret = nc::readv(fd, &mut iov);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(capacity as nc::ssize_t));
/// assert!(nc::close(fd).is_ok());
///
/// let path_out = "/tmp/nc-pwritev";
/// let ret = nc::open(path_out, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let ret = nc::pwritev(fd, &iov, 0, iov.len() - 1);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(capacity as nc::ssize_t));
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path_out).is_ok());
/// ```
pub fn pwritev(fd: i32, vec: &[iovec_t], pos_l: usize, pos_h: usize) -> Result<ssize_t, Errno> {
let fd = fd as usize;
let vec_ptr = vec.as_ptr() as usize;
let vec_len = vec.len();
syscall5(SYS_PWRITEV, fd, vec_ptr, vec_len, pos_l, pos_h).map(|ret| ret as ssize_t)
}
/// Write to a file descriptor without changing file offset.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::open(path, nc::O_RDONLY, 0);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let mut buf = [[0_u8; 64]; 4];
/// let capacity = 4 * 64;
/// let mut iov = Vec::with_capacity(buf.len());
/// for ref mut item in (&mut buf).iter() {
/// iov.push(nc::iovec_t {
/// iov_len: item.len(),
/// iov_base: item.as_ptr() as usize,
/// });
/// }
/// let ret = nc::readv(fd, &mut iov);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(capacity as nc::ssize_t));
/// assert!(nc::close(fd).is_ok());
///
/// let path_out = "/tmp/nc-pwritev2";
/// let ret = nc::open(path_out, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let flags = nc::RWF_DSYNC | nc::RWF_APPEND;
/// let ret = nc::pwritev2(fd, &iov, 0, iov.len() - 1, flags);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(capacity as nc::ssize_t));
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path_out).is_ok());
/// ```
pub fn pwritev2(
fd: i32,
vec: &[iovec_t],
pos_l: usize,
pos_h: usize,
flags: rwf_t,
) -> Result<ssize_t, Errno> {
let fd = fd as usize;
let vec_ptr = vec.as_ptr() as usize;
let vec_len = vec.len();
let flags = flags as usize;
syscall6(SYS_PWRITEV2, fd, vec_ptr, vec_len, pos_l, pos_h, flags).map(|ret| ret as ssize_t)
}
/// Manipulate disk quotes.
pub fn quotactl<P: AsRef<Path>>(cmd: i32, special: P, id: qid_t, addr: usize) -> Result<(), Errno> {
let cmd = cmd as usize;
let special = CString::new(special.as_ref());
let special_ptr = special.as_ptr() as usize;
let id = id as usize;
syscall4(SYS_QUOTACTL, cmd, special_ptr, id, addr).map(drop)
}
/// Read from a file descriptor.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::openat(nc::AT_FDCWD, path, nc::O_RDONLY, 0);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let mut buf = [0_u8; 4 * 1024];
/// let ret = nc::read(fd, buf.as_mut_ptr() as usize, buf.len());
/// assert!(ret.is_ok());
/// let n_read = ret.unwrap();
/// assert!(n_read <= buf.len() as nc::ssize_t);
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn read(fd: i32, buf_ptr: usize, count: size_t) -> Result<ssize_t, Errno> {
let fd = fd as usize;
syscall3(SYS_READ, fd, buf_ptr, count).map(|ret| ret as ssize_t)
}
/// Initialize file head into page cache.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::open(path, nc::O_RDONLY, 0);
/// let fd = ret.unwrap();
/// let ret = nc::readahead(fd, 0, 64);
/// assert!(ret.is_ok());
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn readahead(fd: i32, offset: off_t, count: size_t) -> Result<(), Errno> {
let fd = fd as usize;
let offset = offset as usize;
let count = count as usize;
syscall3(SYS_READAHEAD, fd, offset, count).map(drop)
}
/// Read value of a symbolic link.
/// ```
/// let oldname = "/etc/passwd";
/// let newname = "/tmp/nc-readlinkat";
/// let ret = nc::symlink(oldname, newname);
/// assert!(ret.is_ok());
/// let mut buf = [0_u8; nc::PATH_MAX as usize];
/// let ret = nc::readlinkat(nc::AT_FDCWD, newname, &mut buf);
/// assert!(ret.is_ok());
/// let n_read = ret.unwrap() as usize;
/// assert_eq!(n_read, oldname.len());
/// assert_eq!(oldname.as_bytes(), &buf[0..n_read]);
/// assert!(nc::unlink(newname).is_ok());
/// ```
pub fn readlinkat<P: AsRef<Path>>(
dirfd: i32,
filename: P,
buf: &mut [u8],
) -> Result<ssize_t, Errno> {
let dirfd = dirfd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let buf_ptr = buf.as_mut_ptr() as usize;
let buf_len = buf.len();
syscall4(SYS_READLINKAT, dirfd, filename_ptr, buf_ptr, buf_len).map(|ret| ret as ssize_t)
}
/// Read from a file descriptor into multiple buffers.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::open(path, nc::O_RDONLY, 0);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let mut buf = [[0_u8; 64]; 4];
/// let capacity = 4 * 64;
/// let mut iov = Vec::with_capacity(buf.len());
/// for ref mut item in (&mut buf).iter() {
/// // TODO(Shaohua): Replace with as_mut_ptr()
/// iov.push(nc::iovec_t {
/// iov_len: item.len(),
/// iov_base: item.as_ptr() as usize,
/// });
/// }
/// let ret = nc::readv(fd, &mut iov);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(capacity as nc::ssize_t));
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn readv(fd: i32, iov: &mut [iovec_t]) -> Result<ssize_t, Errno> {
let fd = fd as usize;
let iov_ptr = iov.as_mut_ptr() as usize;
let len = iov.len() as usize;
syscall3(SYS_READV, fd, iov_ptr, len).map(|ret| ret as ssize_t)
}
/// Reboot or enable/disable Ctrl-Alt-Del.
/// ```
/// let ret = nc::reboot(nc::LINUX_REBOOT_MAGIC1, nc::LINUX_REBOOT_MAGIC2,
/// nc::LINUX_REBOOT_CMD_RESTART, 0);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn reboot(magic: i32, magci2: i32, cmd: u32, arg: usize) -> Result<(), Errno> {
let magic = magic as usize;
let magic2 = magci2 as usize;
let cmd = cmd as usize;
syscall4(SYS_REBOOT, magic, magic2, cmd, arg).map(drop)
}
/// Receive a message from a socket.
pub fn recvfrom(
sockfd: i32,
buf: &mut [u8],
flags: i32,
src_addr: &mut sockaddr_in_t,
addrlen: &mut socklen_t,
) -> Result<ssize_t, Errno> {
let sockfd = sockfd as usize;
let buf_ptr = buf.as_mut_ptr() as usize;
let buflen = buf.len();
let flags = flags as usize;
let src_addr_ptr = src_addr as *mut sockaddr_in_t as usize;
let addrlen_ptr = addrlen as *mut socklen_t as usize;
syscall6(
SYS_RECVFROM,
sockfd,
buf_ptr,
buflen,
flags,
src_addr_ptr,
addrlen_ptr,
)
.map(|ret| ret as ssize_t)
}
/// Receives multile messages on a socket
pub fn recvmmsg(
sockfd: i32,
msgvec: &mut [mmsghdr_t],
flags: i32,
timeout: &mut timespec_t,
) -> Result<i32, Errno> {
let sockfd = sockfd as usize;
let msgvec_ptr = msgvec as *mut [mmsghdr_t] as *mut mmsghdr_t as usize;
let vlen = msgvec.len();
let flags = flags as usize;
let timeout_ptr = timeout as *mut timespec_t as usize;
syscall5(SYS_RECVMMSG, sockfd, msgvec_ptr, vlen, flags, timeout_ptr).map(|ret| ret as i32)
}
/// Receive a msg from a socket.
pub fn recvmsg(sockfd: i32, msg: &mut msghdr_t, flags: i32) -> Result<ssize_t, Errno> {
let sockfd = sockfd as usize;
let msg_ptr = msg as *mut msghdr_t as usize;
let flags = flags as usize;
syscall3(SYS_RECVMSG, sockfd, msg_ptr, flags).map(|ret| ret as ssize_t)
}
/// Create a nonlinear file mapping.
/// Deprecated.
pub fn remap_file_pages(
start: usize,
size: size_t,
prot: i32,
pgoff: off_t,
flags: i32,
) -> Result<(), Errno> {
let size = size as usize;
let prot = prot as usize;
let pgoff = pgoff as usize;
let flags = flags as usize;
syscall5(SYS_REMAP_FILE_PAGES, start, size, prot, pgoff, flags).map(drop)
}
/// Remove an extended attribute.
/// ```
/// let path = "/tmp/nc-removexattr";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let attr_name = "user.creator";
/// let attr_value = "nc-0.0.1";
/// let flags = nc::XATTR_CREATE;
/// let ret = nc::setxattr(
/// path,
/// &attr_name,
/// attr_value.as_ptr() as usize,
/// attr_value.len(),
/// flags,
/// );
/// assert!(ret.is_ok());
/// let ret = nc::removexattr(path, attr_name);
/// assert!(ret.is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn removexattr<P: AsRef<Path>>(filename: P, name: P) -> Result<(), Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
syscall2(SYS_REMOVEXATTR, filename_ptr, name_ptr).map(drop)
}
/// Change name or location of a file.
/// ```
/// let path = "/tmp/nc-renameat";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let new_path = "/tmp/nc-renameat-new";
/// let ret = nc::renameat(nc::AT_FDCWD, path, nc::AT_FDCWD, new_path);
/// assert!(ret.is_ok());
/// assert!(nc::unlink(new_path).is_ok());
/// ```
pub fn renameat<P: AsRef<Path>>(
olddfd: i32,
oldfilename: P,
newdfd: i32,
newfilename: P,
) -> Result<(), Errno> |
/// Change name or location of a file.
/// ```
/// let path = "/tmp/nc-renameat2";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let new_path = "/tmp/nc-renameat2-new";
/// let flags = nc::RENAME_NOREPLACE;
/// let ret = nc::renameat2(nc::AT_FDCWD, path, nc::AT_FDCWD, new_path, flags);
/// assert!(ret.is_ok());
/// assert!(nc::unlink(new_path).is_ok());
/// ```
pub fn renameat2<P: AsRef<Path>>(
olddfd: i32,
oldfilename: P,
newdfd: i32,
newfilename: P,
flags: i32,
) -> Result<(), Errno> {
let olddfd = olddfd as usize;
let oldfilename = CString::new(oldfilename.as_ref());
let oldfilename_ptr = oldfilename.as_ptr() as usize;
let newdfd = newdfd as usize;
let newfilename = CString::new(newfilename.as_ref());
let newfilename_ptr = newfilename.as_ptr() as usize;
let flags = flags as usize;
syscall5(
SYS_RENAMEAT2,
olddfd,
oldfilename_ptr,
newdfd,
newfilename_ptr,
flags,
)
.map(drop)
}
/// Request a key from kernel's key management facility.
pub fn request_key<P: AsRef<Path>>(
type_: P,
description: P,
callout_info: P,
dest_keyring: key_serial_t,
) -> Result<key_serial_t, Errno> {
let type_ = CString::new(type_.as_ref());
let type_ptr = type_.as_ptr() as usize;
let description = CString::new(description.as_ref());
let description_ptr = description.as_ptr() as usize;
let callout_info = CString::new(callout_info.as_ref());
let callout_info_ptr = callout_info.as_ptr() as usize;
let dest_keyring = dest_keyring as usize;
syscall4(
SYS_REQUEST_KEY,
type_ptr,
description_ptr,
callout_info_ptr,
dest_keyring,
)
.map(|ret| ret as key_serial_t)
}
/// Restart a system call after interruption by a stop signal.
pub fn restart_syscall() -> Result<i32, Errno> {
syscall0(SYS_RESTART_SYSCALL).map(|ret| ret as i32)
}
/// Setup restartable sequences for caller thread.
pub fn rseq(rseq: &mut [rseq_t], flags: i32, sig: u32) -> Result<i32, Errno> {
let rseq_ptr = rseq.as_mut_ptr() as usize;
let rseq_len = rseq.len();
let flags = flags as usize;
let sig = sig as usize;
syscall4(SYS_RSEQ, rseq_ptr, rseq_len, flags, sig).map(|ret| ret as i32)
}
/// Examine and change a signal action.
/// ```
/// use std::mem::size_of;
///
/// fn handle_sigterm(sig: i32) {
/// assert_eq!(sig, nc::SIGTERM);
/// }
///
/// let sa = nc::sigaction_t {
/// sa_handler: handle_sigterm as nc::sighandler_t,
/// sa_mask: nc::SA_RESTART | nc::SA_SIGINFO | nc::SA_ONSTACK,
/// ..nc::sigaction_t::default()
/// };
/// let mut old_sa = nc::sigaction_t::default();
/// let ret = nc::rt_sigaction(nc::SIGTERM, &sa, &mut old_sa, size_of::<nc::sigset_t>());
/// let ret = nc::kill(nc::getpid(), nc::SIGTERM);
/// assert!(ret.is_ok());
/// ```
pub fn rt_sigaction(
sig: i32,
act: &sigaction_t,
old_act: &mut sigaction_t,
sigsetsize: size_t,
) -> Result<(), Errno> {
let sig = sig as usize;
let act_ptr = act as *const sigaction_t as usize;
let old_act_ptr = old_act as *mut sigaction_t as usize;
let sigsetsize = sigsetsize as usize;
syscall4(SYS_RT_SIGACTION, sig, act_ptr, old_act_ptr, sigsetsize).map(drop)
}
/// Examine pending signals.
pub fn rt_sigpending(set: &mut [sigset_t]) -> Result<(), Errno> {
let set_ptr = set.as_mut_ptr() as usize;
syscall1(SYS_RT_SIGPENDING, set_ptr).map(drop)
}
/// Change the list of currently blocked signals.
pub fn rt_sigprocmask(how: i32, set: &sigset_t, oldset: &mut sigset_t) -> Result<(), Errno> {
let how = how as usize;
let set_ptr = set as *const sigset_t as usize;
let oldset_ptr = oldset as *mut sigset_t as usize;
syscall3(SYS_RT_SIGPROCMASK, how, set_ptr, oldset_ptr).map(drop)
}
/// Queue a signal and data.
pub fn rt_sigqueueinfo(pid: pid_t, sig: i32, uinfo: &mut siginfo_t) -> Result<(), Errno> {
let pid = pid as usize;
let sig = sig as usize;
let uinfo_ptr = uinfo as *mut siginfo_t as usize;
syscall3(SYS_RT_SIGQUEUEINFO, pid, sig, uinfo_ptr).map(drop)
}
/// Return from signal handler and cleanup stack frame.
/// Never returns.
pub fn rt_sigreturn() {
let _ = syscall0(SYS_RT_SIGRETURN);
}
/// Wait for a signal.
/// Always returns Errno, normally EINTR.
pub fn rt_sigsuspend(set: &mut sigset_t, sigsetsize: size_t) -> Result<(), Errno> {
let set_ptr = set as *mut sigset_t as usize;
let sigsetsize = sigsetsize as usize;
syscall2(SYS_RT_SIGSUSPEND, set_ptr, sigsetsize).map(drop)
}
/// Synchronously wait for queued signals.
pub fn rt_sigtimedwait(
uthese: &sigset_t,
uinfo: &mut siginfo_t,
uts: ×pec_t,
sigsetsize: size_t,
) -> Result<i32, Errno> {
let uthese_ptr = uthese as *const sigset_t as usize;
let uinfo_ptr = uinfo as *mut siginfo_t as usize;
let uts_ptr = uts as *const timespec_t as usize;
let sigsetsize = sigsetsize as usize;
syscall4(
SYS_RT_SIGTIMEDWAIT,
uthese_ptr,
uinfo_ptr,
uts_ptr,
sigsetsize,
)
.map(|ret| ret as i32)
}
/// Queue a signal and data.
pub fn rt_tgsigqueueinfo(
tgid: pid_t,
tid: pid_t,
sig: i32,
uinfo: &mut siginfo_t,
) -> Result<(), Errno> {
let tgid = tgid as usize;
let tid = tid as usize;
let sig = sig as usize;
let uinfo_ptr = uinfo as *mut siginfo_t as usize;
syscall4(SYS_RT_TGSIGQUEUEINFO, tgid, tid, sig, uinfo_ptr).map(drop)
}
/// Get a thread's CPU affinity mask.
/// ```
/// use core::mem::size_of;
///
/// const SET_BITS: usize = 16;
/// #[repr(C)]
/// #[derive(Debug, Clone, Copy, PartialEq)]
/// struct CPUSet {
/// pub bits: [usize; SET_BITS],
/// }
///
/// impl Default for CPUSet {
/// fn default() -> Self {
/// CPUSet {
/// bits: [0; SET_BITS],
/// }
/// }
/// }
///
/// impl CPUSet {
/// #[inline]
/// pub const fn size() -> usize {
/// SET_BITS * size_of::<usize>()
/// }
///
/// #[inline]
/// pub const fn bits_size() -> usize {
/// CPUSet::size() * 8
/// }
///
/// pub fn set(&mut self, pos: usize) -> Result<(), nc::Errno> {
/// if pos >= CPUSet::bits_size() {
/// return Err(nc::EINVAL);
/// }
/// let bit_pos = pos / 8 / size_of::<usize>();
/// self.bits[bit_pos] |= 1 << (pos % (8 * size_of::<usize>()));
/// Ok(())
/// }
///
/// pub fn clear(&mut self, pos: usize) -> Result<(), nc::Errno> {
/// if pos >= CPUSet::bits_size() {
/// return Err(nc::EINVAL);
/// }
/// let bit_pos = pos / 8 / size_of::<usize>();
/// self.bits[bit_pos] &= !(1 << (pos % (8 * size_of::<usize>())));
/// Ok(())
/// }
///
/// pub fn is_set(&self, pos: usize) -> Result<bool, nc::Errno> {
/// if pos >= CPUSet::bits_size() {
/// return Err(nc::EINVAL);
/// }
/// let bit_pos = pos / 8 / size_of::<usize>();
/// let ret = self.bits[bit_pos] & (1 << (pos % (8 * size_of::<usize>())));
///
/// Ok(ret != 0)
/// }
///
/// pub fn as_ptr(&self) -> &[usize] {
/// &self.bits
/// }
///
/// pub fn as_mut_ptr(&mut self) -> &mut [usize] {
/// &mut self.bits
/// }
/// }
///
/// fn main() {
/// let mut set = CPUSet::default();
/// assert!(set.set(1).is_ok());
/// println!("set(1): {:?}", set.is_set(1));
/// assert!(set.set(2).is_ok());
/// assert!(set.clear(2).is_ok());
/// println!("set(2): {:?}", set.is_set(2));
///
/// let ret = nc::sched_setaffinity(0, CPUSet::size(), set.as_ptr());
/// assert!(ret.is_ok());
///
/// let mut set2 = CPUSet::default();
/// let ret = nc::sched_getaffinity(0, CPUSet::size(), set2.as_mut_ptr());
/// assert!(ret.is_ok());
/// assert_eq!(set, set2);
/// }
/// ```
pub fn sched_getaffinity(pid: pid_t, len: usize, user_mask: &mut [usize]) -> Result<(), Errno> {
let pid = pid as usize;
let user_mask_ptr = user_mask.as_mut_ptr() as usize;
syscall3(SYS_SCHED_GETAFFINITY, pid, len, user_mask_ptr).map(drop)
}
/// Get scheduling policy and attributes
pub fn sched_getattr(
pid: pid_t,
attr: &mut sched_attr_t,
size: u32,
flags: u32,
) -> Result<(), Errno> {
let pid = pid as usize;
let attr_ptr = attr as *mut sched_attr_t as usize;
let size = size as usize;
let flags = flags as usize;
syscall4(SYS_SCHED_GETATTR, pid, attr_ptr, size, flags).map(drop)
}
/// Get scheduling paramters.
/// ```
/// let mut param = nc::sched_param_t::default();
/// let ret = nc::sched_getparam(0, &mut param);
/// assert!(ret.is_ok());
/// assert_eq!(param.sched_priority, 0);
/// ```
pub fn sched_getparam(pid: pid_t, param: &mut sched_param_t) -> Result<(), Errno> {
let pid = pid as usize;
let param_ptr = param as *mut sched_param_t as usize;
syscall2(SYS_SCHED_GETPARAM, pid, param_ptr).map(drop)
}
/// Get scheduling parameter.
/// ```
/// let ret = nc::sched_getscheduler(0);
/// assert_eq!(ret, Ok(nc::SCHED_NORMAL));
/// ```
pub fn sched_getscheduler(pid: pid_t) -> Result<i32, Errno> {
let pid = pid as usize;
syscall1(SYS_SCHED_GETSCHEDULER, pid).map(|ret| ret as i32)
}
/// Get static priority max value.
/// ```
/// let ret = nc::sched_get_priority_max(nc::SCHED_RR);
/// assert!(ret.is_ok());
/// let max_prio = ret.unwrap();
/// assert_eq!(max_prio, 99);
/// ```
pub fn sched_get_priority_max(policy: i32) -> Result<i32, Errno> {
let policy = policy as usize;
syscall1(SYS_SCHED_GET_PRIORITY_MAX, policy).map(|ret| ret as i32)
}
/// Get static priority min value.
/// ```
/// let ret = nc::sched_get_priority_min(nc::SCHED_RR);
/// assert!(ret.is_ok());
/// let min_prio = ret.unwrap();
/// assert_eq!(min_prio, 1);
/// ```
pub fn sched_get_priority_min(policy: i32) -> Result<i32, Errno> {
let policy = policy as usize;
syscall1(SYS_SCHED_GET_PRIORITY_MIN, policy).map(|ret| ret as i32)
}
/// Get the SCHED_RR interval for the named process.
/// ```
/// let mut ts = nc::timespec_t::default();
/// let ret = nc::sched_rr_get_interval(0, &mut ts);
/// assert!(ret.is_ok());
/// ```
pub fn sched_rr_get_interval(pid: pid_t, interval: &mut timespec_t) -> Result<(), Errno> {
let pid = pid as usize;
let interval_ptr = interval as *mut timespec_t as usize;
syscall2(SYS_SCHED_RR_GET_INTERVAL, pid, interval_ptr).map(drop)
}
/// Set a thread's CPU affinity mask.
/// ```
/// use core::mem::size_of;
///
/// const SET_BITS: usize = 16;
/// #[repr(C)]
/// #[derive(Debug, Clone, Copy, PartialEq)]
/// struct CPUSet {
/// pub bits: [usize; SET_BITS],
/// }
///
/// impl Default for CPUSet {
/// fn default() -> Self {
/// CPUSet {
/// bits: [0; SET_BITS],
/// }
/// }
/// }
///
/// impl CPUSet {
/// #[inline]
/// pub const fn size() -> usize {
/// SET_BITS * size_of::<usize>()
/// }
///
/// #[inline]
/// pub const fn bits_size() -> usize {
/// CPUSet::size() * 8
/// }
///
/// pub fn set(&mut self, pos: usize) -> Result<(), nc::Errno> {
/// if pos >= CPUSet::bits_size() {
/// return Err(nc::EINVAL);
/// }
/// let bit_pos = pos / 8 / size_of::<usize>();
/// self.bits[bit_pos] |= 1 << (pos % (8 * size_of::<usize>()));
/// Ok(())
/// }
///
/// pub fn clear(&mut self, pos: usize) -> Result<(), nc::Errno> {
/// if pos >= CPUSet::bits_size() {
/// return Err(nc::EINVAL);
/// }
/// let bit_pos = pos / 8 / size_of::<usize>();
/// self.bits[bit_pos] &= !(1 << (pos % (8 * size_of::<usize>())));
/// Ok(())
/// }
///
/// pub fn is_set(&self, pos: usize) -> Result<bool, nc::Errno> {
/// if pos >= CPUSet::bits_size() {
/// return Err(nc::EINVAL);
/// }
/// let bit_pos = pos / 8 / size_of::<usize>();
/// let ret = self.bits[bit_pos] & (1 << (pos % (8 * size_of::<usize>())));
///
/// Ok(ret != 0)
/// }
///
/// pub fn as_ptr(&self) -> &[usize] {
/// &self.bits
/// }
///
/// pub fn as_mut_ptr(&mut self) -> &mut [usize] {
/// &mut self.bits
/// }
/// }
///
/// fn main() {
/// let mut set = CPUSet::default();
/// assert!(set.set(1).is_ok());
/// println!("set(1): {:?}", set.is_set(1));
/// assert!(set.set(2).is_ok());
/// assert!(set.clear(2).is_ok());
/// println!("set(2): {:?}", set.is_set(2));
///
/// let ret = nc::sched_setaffinity(0, CPUSet::size(), set.as_ptr());
/// assert!(ret.is_ok());
///
/// let mut set2 = CPUSet::default();
/// let ret = nc::sched_getaffinity(0, CPUSet::size(), set2.as_mut_ptr());
/// assert!(ret.is_ok());
/// assert_eq!(set, set2);
/// }
/// ```
pub fn sched_setaffinity(pid: pid_t, len: usize, user_mask: &[usize]) -> Result<(), Errno> {
let pid = pid as usize;
let user_mask_ptr = user_mask.as_ptr() as usize;
syscall3(SYS_SCHED_SETAFFINITY, pid, len, user_mask_ptr).map(drop)
}
/// Set the RT priority of a thread.
pub fn sched_setattr(pid: pid_t, attr: &mut sched_attr_t, flags: u32) -> Result<(), Errno> {
let pid = pid as usize;
let attr_ptr = attr as *mut sched_attr_t as usize;
let flags = flags as usize;
syscall3(SYS_SCHED_SETATTR, pid, attr_ptr, flags).map(drop)
}
/// Set scheduling paramters.
/// ```
/// // This call always returns error because default scheduler is SCHED_NORMAL.
/// // We shall call sched_setscheduler() and change to realtime policy
/// // like SCHED_RR or SCHED_FIFO.
/// let sched_param = nc::sched_param_t { sched_priority: 12 };
/// let ret = nc::sched_setparam(0, &sched_param);
/// assert_eq!(ret, Err(nc::EINVAL));
/// ```
pub fn sched_setparam(pid: pid_t, param: &sched_param_t) -> Result<(), Errno> {
let pid = pid as usize;
let param_ptr = param as *const sched_param_t as usize;
syscall2(SYS_SCHED_SETPARAM, pid, param_ptr).map(drop)
}
/// Set scheduling parameter.
/// ```
/// let sched_param = nc::sched_param_t { sched_priority: 12 };
/// let ret = nc::sched_setscheduler(0, nc::SCHED_RR, &sched_param);
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn sched_setscheduler(pid: pid_t, policy: i32, param: &sched_param_t) -> Result<(), Errno> {
let pid = pid as usize;
let policy = policy as usize;
let param_ptr = param as *const sched_param_t as usize;
syscall3(SYS_SCHED_SETSCHEDULER, pid, policy, param_ptr).map(drop)
}
/// Yield the processor.
/// ```
/// assert!(nc::sched_yield().is_ok());
/// ```
pub fn sched_yield() -> Result<(), Errno> {
syscall0(SYS_SCHED_YIELD).map(drop)
}
/// Operate on Secure Computing state of the process.
pub fn seccomp(operation: u32, flags: u32, args: usize) -> Result<(), Errno> {
let operation = operation as usize;
let flags = flags as usize;
syscall3(SYS_SECCOMP, operation, flags, args).map(drop)
}
/// System V semaphore control operations
pub fn semctl(semid: i32, semnum: i32, cmd: i32, arg: usize) -> Result<i32, Errno> {
let semid = semid as usize;
let semnum = semnum as usize;
let cmd = cmd as usize;
syscall4(SYS_SEMCTL, semid, semnum, cmd, arg).map(|ret| ret as i32)
}
/// Get a System V semphore set identifier.
pub fn semget(key: key_t, nsems: i32, semflg: i32) -> Result<i32, Errno> {
let key = key as usize;
let nsems = nsems as usize;
let semflg = semflg as usize;
syscall3(SYS_SEMGET, key, nsems, semflg).map(|ret| ret as i32)
}
/// System V semphore operations.
pub fn semop(semid: i32, sops: &mut [sembuf_t]) -> Result<(), Errno> {
let semid = semid as usize;
let sops_ptr = sops.as_ptr() as usize;
let nops = sops.len();
syscall3(SYS_SEMOP, semid, sops_ptr, nops).map(drop)
}
/// System V semaphore operations
pub fn semtimedop(semid: i32, sops: &mut [sembuf_t], timeout: ×pec_t) -> Result<(), Errno> {
let semid = semid as usize;
let sops_ptr = sops.as_ptr() as usize;
let nops = sops.len();
let timeout_ptr = timeout as *const timespec_t as usize;
syscall4(SYS_SEMTIMEDOP, semid, sops_ptr, nops, timeout_ptr).map(drop)
}
/// Transfer data between two file descriptors.
pub fn sendfile(
out_fd: i32,
in_fd: i32,
offset: &mut off_t,
count: size_t,
) -> Result<ssize_t, Errno> {
let out_fd = out_fd as usize;
let in_fd = in_fd as usize;
let offset_ptr = offset as *mut off_t as usize;
let count = count as usize;
syscall4(SYS_SENDFILE, out_fd, in_fd, offset_ptr, count).map(|ret| ret as ssize_t)
}
/// Send multiple messages on a socket
pub fn sendmmsg(sockfd: i32, msgvec: &mut [mmsghdr_t], flags: i32) -> Result<i32, Errno> {
let sockfd = sockfd as usize;
let msgvec_ptr = msgvec as *mut [mmsghdr_t] as *mut mmsghdr_t as usize;
let vlen = msgvec.len();
let flags = flags as usize;
syscall4(SYS_SENDMMSG, sockfd, msgvec_ptr, vlen, flags).map(|ret| ret as i32)
}
/// Send a message on a socket. Allow sending ancillary data.
pub fn sendmsg(sockfd: i32, msg: &msghdr_t, flags: i32) -> Result<ssize_t, Errno> {
let sockfd = sockfd as usize;
let msg_ptr = msg as *const msghdr_t as usize;
let flags = flags as usize;
syscall3(SYS_SENDMSG, sockfd, msg_ptr, flags).map(|ret| ret as ssize_t)
}
/// Send a message on a socket.
pub fn sendto(
sockfd: i32,
buf: &[u8],
len: size_t,
flags: i32,
dest_addr: &sockaddr_in_t,
addrlen: socklen_t,
) -> Result<ssize_t, Errno> {
let sockfd = sockfd as usize;
let buf_ptr = buf.as_ptr() as usize;
let len = len as usize;
let flags = flags as usize;
let dest_addr_ptr = dest_addr as *const sockaddr_in_t as usize;
let addrlen = addrlen as usize;
syscall6(
SYS_SENDTO,
sockfd,
buf_ptr,
len,
flags,
dest_addr_ptr,
addrlen,
)
.map(|ret| ret as ssize_t)
}
/// Set NIS domain name.
/// ```
/// let name = "local-rust-domain";
/// let ret = nc::setdomainname(name);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn setdomainname<P: AsRef<Path>>(name: P) -> Result<(), Errno> {
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
let name_len = name.len() as usize;
syscall2(SYS_SETDOMAINNAME, name_ptr, name_len).map(drop)
}
/// Set group identify used for filesystem checkes.
/// ```
/// let ret = nc::setfsgid(0);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(nc::getgid()));
/// ```
pub fn setfsgid(fsgid: gid_t) -> Result<gid_t, Errno> {
let fsgid = fsgid as usize;
syscall1(SYS_SETFSGID, fsgid).map(|ret| ret as gid_t)
}
/// Set user identify used for filesystem checkes.
/// ```
/// let ret = nc::setfsuid(0);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(nc::getuid()));
/// ```
pub fn setfsuid(fsuid: uid_t) -> Result<uid_t, Errno> {
let fsuid = fsuid as usize;
syscall1(SYS_SETFSUID, fsuid).map(|ret| ret as uid_t)
}
/// Set the group ID of the calling process to `gid`.
/// ```
/// let ret = nc::setgid(0);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn setgid(gid: gid_t) -> Result<(), Errno> {
let gid = gid as usize;
syscall1(SYS_SETGID, gid).map(drop)
}
/// Set list of supplementary group Ids.
/// ```
/// let list = [0, 1, 2];
/// let ret = nc::setgroups(&list);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn setgroups(group_list: &[gid_t]) -> Result<(), Errno> {
let group_ptr = group_list.as_ptr() as usize;
let group_len = group_list.len();
syscall2(SYS_SETGROUPS, group_ptr, group_len).map(drop)
}
/// Set hostname
/// ```
/// let name = "rust-machine";
/// let ret = nc::sethostname(name);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn sethostname<P: AsRef<Path>>(name: P) -> Result<(), Errno> {
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
let name_len = name.len();
syscall2(SYS_SETHOSTNAME, name_ptr, name_len).map(drop)
}
/// Set value of an interval timer.
/// ```
/// use core::mem::size_of;
///
/// fn handle_alarm(signum: i32) {
/// assert_eq!(signum, nc::SIGALRM);
/// let msg = "Hello alarm";
/// let _ = nc::write(2, msg.as_ptr() as usize, msg.len());
/// }
///
/// let sa = nc::sigaction_t {
/// sa_handler: handle_alarm as nc::sighandler_t,
/// sa_flags: 0,
/// ..nc::sigaction_t::default()
/// };
/// let mut old_sa = nc::sigaction_t::default();
/// let ret = nc::rt_sigaction(nc::SIGALRM, &sa, &mut old_sa, size_of::<nc::sigset_t>());
/// assert!(ret.is_ok());
///
/// // Single shot timer, actived after 1 second.
/// let itv = nc::itimerval_t {
/// it_value: nc::timeval_t {
/// tv_sec: 1,
/// tv_usec: 0,
/// },
/// it_interval: nc::timeval_t {
/// tv_sec: 0,
/// tv_usec: 0,
/// },
/// };
/// let mut prev_itv = nc::itimerval_t::default();
/// let ret = nc::setitimer(nc::ITIMER_REAL, &itv, &mut prev_itv);
/// assert!(ret.is_ok());
///
/// let ret = nc::getitimer(nc::ITIMER_REAL, &mut prev_itv);
/// assert!(ret.is_ok());
/// assert!(prev_itv.it_value.tv_sec <= itv.it_value.tv_sec);
///
/// let ret = nc::pause();
/// assert_eq!(ret, Err(nc::EINTR));
///
/// let ret = nc::getitimer(nc::ITIMER_REAL, &mut prev_itv);
/// assert!(ret.is_ok());
/// assert_eq!(prev_itv.it_value.tv_sec, 0);
/// assert_eq!(prev_itv.it_value.tv_usec, 0);
/// ```
pub fn setitimer(
which: i32,
new_val: &itimerval_t,
old_val: &mut itimerval_t,
) -> Result<(), Errno> {
let which = which as usize;
let new_val_ptr = new_val as *const itimerval_t as usize;
let old_val_ptr = old_val as *mut itimerval_t as usize;
syscall3(SYS_SETITIMER, which, new_val_ptr, old_val_ptr).map(drop)
}
/// Reassociate thread with a namespace.
pub fn setns(fd: i32, nstype: i32) -> Result<(), Errno> {
let fd = fd as usize;
let nstype = nstype as usize;
syscall2(SYS_SETNS, fd, nstype).map(drop)
}
/// Set the process group ID (PGID) of the process specified by `pid` to `pgid`.
/// ```
/// let ret = nc::setpgid(nc::getpid(), 1);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn setpgid(pid: pid_t, pgid: pid_t) -> Result<(), Errno> {
let pid = pid as usize;
let pgid = pgid as usize;
syscall2(SYS_SETPGID, pid, pgid).map(drop)
}
/// Set program scheduling priority.
/// ```
/// let ret = nc::setpriority(nc::PRIO_PROCESS, nc::getpid(), -19);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EACCES))
/// ```
pub fn setpriority(which: i32, who: i32, prio: i32) -> Result<(), Errno> {
let which = which as usize;
let who = who as usize;
let prio = prio as usize;
syscall3(SYS_SETPRIORITY, which, who, prio).map(drop)
}
/// Set real and effective group IDs of the calling process.
/// ```
/// let ret = nc::setregid(0, 0);
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn setregid(rgid: gid_t, egid: gid_t) -> Result<(), Errno> {
let rgid = rgid as usize;
let egid = egid as usize;
syscall2(SYS_SETREGID, rgid, egid).map(drop)
}
/// Set real, effective and saved group Ids of the calling process.
/// ```
/// let ret = nc::setresgid(0, 0, 0);
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn setresgid(rgid: gid_t, egid: gid_t, sgid: gid_t) -> Result<(), Errno> {
let rgid = rgid as usize;
let egid = egid as usize;
let sgid = sgid as usize;
syscall3(SYS_SETRESGID, rgid, egid, sgid).map(drop)
}
/// Set real, effective and saved user Ids of the calling process.
/// ```
/// let ret = nc::setresuid(0, 0, 0);
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn setresuid(ruid: uid_t, euid: uid_t, suid: uid_t) -> Result<(), Errno> {
let ruid = ruid as usize;
let euid = euid as usize;
let suid = suid as usize;
syscall3(SYS_SETRESUID, ruid, euid, suid).map(drop)
}
/// Set real and effective user IDs of the calling process.
/// ```
/// let ret = nc::setreuid(0, 0);
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn setreuid(ruid: uid_t, euid: uid_t) -> Result<(), Errno> {
let ruid = ruid as usize;
let euid = euid as usize;
syscall2(SYS_SETREUID, ruid, euid).map(drop)
}
/// Set resource limit
/// ```
/// let rlimit = nc::rlimit_t {
/// rlim_cur: 128,
/// rlim_max: 128,
/// };
/// let ret = nc::setrlimit(nc::RLIMIT_NOFILE, &rlimit);
/// assert!(ret.is_ok());
/// ```
pub fn setrlimit(resource: i32, rlimit: &rlimit_t) -> Result<(), Errno> {
let resource = resource as usize;
let rlimit_ptr = rlimit as *const rlimit_t as usize;
syscall2(SYS_SETRLIMIT, resource, rlimit_ptr).map(drop)
}
/// Create a new session if the calling process is not a process group leader.
/// ```
/// let ret = nc::setsid();
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(nc::getpid()));
/// ```
pub fn setsid() -> Result<pid_t, Errno> {
syscall0(SYS_SETSID).map(|ret| ret as pid_t)
}
/// Set options on sockets.
pub fn setsockopt(
sockfd: i32,
level: i32,
optname: i32,
optval: usize,
optlen: socklen_t,
) -> Result<(), Errno> {
let sockfd = sockfd as usize;
let level = level as usize;
let optname = optname as usize;
let optlen = optlen as usize;
syscall5(SYS_SETSOCKOPT, sockfd, level, optname, optval, optlen).map(drop)
}
/// Set system time and timezone.
/// ```
/// let tv = nc::timeval_t {
/// tv_sec: 0,
/// tv_usec: 0,
/// };
/// let tz = nc::timezone_t::default();
/// let ret = nc::settimeofday(&tv, &tz);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn settimeofday(timeval: &timeval_t, tz: &timezone_t) -> Result<(), Errno> {
let timeval_ptr = timeval as *const timeval_t as usize;
let tz_ptr = tz as *const timezone_t as usize;
syscall2(SYS_SETTIMEOFDAY, timeval_ptr, tz_ptr).map(drop)
}
/// Set the effective user ID of the calling process to `uid`.
/// ```
/// let ret = nc::setuid(0);
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn setuid(uid: uid_t) -> Result<(), Errno> {
let uid = uid as usize;
syscall1(SYS_SETUID, uid).map(drop)
}
/// Set extended attribute value.
/// ```
/// let path = "/tmp/nc-setxattr";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let attr_name = "user.creator";
/// let attr_value = "nc-0.0.1";
/// //let flags = 0;
/// let flags = nc::XATTR_CREATE;
/// let ret = nc::setxattr(
/// path,
/// &attr_name,
/// attr_value.as_ptr() as usize,
/// attr_value.len(),
/// flags,
/// );
/// assert!(ret.is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn setxattr<P: AsRef<Path>>(
filename: P,
name: P,
value: usize,
size: size_t,
flags: i32,
) -> Result<(), Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
let size = size as usize;
let flags = flags as usize;
syscall5(SYS_SETXATTR, filename_ptr, name_ptr, value, size, flags).map(drop)
}
/// Set default NUMA memory policy for a thread and its children
pub fn set_mempolicy(mode: i32, nmask: *const usize, maxnode: usize) -> Result<(), Errno> {
let mode = mode as usize;
let nmask = nmask as usize;
syscall3(SYS_SET_MEMPOLICY, mode, nmask, maxnode).map(drop)
}
/// Set the robust-futex list head of a task.
pub fn set_robust_list(heads: &mut [robust_list_head_t]) -> Result<(), Errno> {
let heads_ptr = heads.as_mut_ptr() as usize;
let len = heads.len();
syscall2(SYS_SET_ROBUST_LIST, heads_ptr, len).map(drop)
}
/// Set pointer to thread ID.
pub fn set_tid_address(tid: &mut i32) -> Result<isize, Errno> {
let tid_ptr = tid as *mut i32 as usize;
syscall1(SYS_SET_TID_ADDRESS, tid_ptr).map(|ret| ret as isize)
}
/// Attach the System V shared memory segment.
/// ```
/// let size = 4 * nc::PAGE_SIZE;
/// let flags = nc::IPC_CREAT | nc::IPC_EXCL | 0o600;
/// let ret = nc::shmget(nc::IPC_PRIVATE, size, flags);
/// assert!(ret.is_ok());
/// let shmid = ret.unwrap();
///
/// let addr: usize = 0;
/// let ret = nc::shmat(shmid, addr, 0);
/// assert!(ret.is_ok());
/// let addr = ret.unwrap();
///
/// let mut buf = nc::shmid_ds_t::default();
/// let ret = nc::shmctl(shmid, nc::IPC_STAT, &mut buf);
/// assert!(ret.is_ok());
///
/// let ret = nc::shmdt(addr);
/// assert!(ret.is_ok());
///
/// let ret = nc::shmctl(shmid, nc::IPC_RMID, &mut buf);
/// assert!(ret.is_ok());
/// ```
pub fn shmat(shmid: i32, shmaddr: usize, shmflg: i32) -> Result<usize, Errno> {
let shmid = shmid as usize;
let shmflg = shmflg as usize;
syscall3(SYS_SHMAT, shmid, shmaddr, shmflg)
}
/// System V shared memory control.
/// ```
/// let size = 4 * nc::PAGE_SIZE;
/// let flags = nc::IPC_CREAT | nc::IPC_EXCL | 0o600;
/// let ret = nc::shmget(nc::IPC_PRIVATE, size, flags);
/// assert!(ret.is_ok());
/// let shmid = ret.unwrap();
/// let mut buf = nc::shmid_ds_t::default();
/// let ret = nc::shmctl(shmid, nc::IPC_RMID, &mut buf);
/// assert!(ret.is_ok());
/// ```
pub fn shmctl(shmid: i32, cmd: i32, buf: &mut shmid_ds_t) -> Result<i32, Errno> {
let shmid = shmid as usize;
let cmd = cmd as usize;
let buf_ptr = buf as *mut shmid_ds_t as usize;
syscall3(SYS_SHMCTL, shmid, cmd, buf_ptr).map(|ret| ret as i32)
}
/// Detach the System V shared memory segment.
/// ```
/// let size = 4 * nc::PAGE_SIZE;
/// let flags = nc::IPC_CREAT | nc::IPC_EXCL | 0o600;
/// let ret = nc::shmget(nc::IPC_PRIVATE, size, flags);
/// assert!(ret.is_ok());
/// let shmid = ret.unwrap();
///
/// let addr: usize = 0;
/// let ret = nc::shmat(shmid, addr, 0);
/// assert!(ret.is_ok());
/// let addr = ret.unwrap();
///
/// let mut buf = nc::shmid_ds_t::default();
/// let ret = nc::shmctl(shmid, nc::IPC_STAT, &mut buf);
/// assert!(ret.is_ok());
///
/// let ret = nc::shmdt(addr);
/// assert!(ret.is_ok());
///
/// let ret = nc::shmctl(shmid, nc::IPC_RMID, &mut buf);
/// assert!(ret.is_ok());
/// ```
pub fn shmdt(shmaddr: usize) -> Result<(), Errno> {
syscall1(SYS_SHMDT, shmaddr).map(drop)
}
/// Allocates a System V shared memory segment.
/// ```
/// let size = 4 * nc::PAGE_SIZE;
/// let flags = nc::IPC_CREAT | nc::IPC_EXCL | 0o600;
/// let ret = nc::shmget(nc::IPC_PRIVATE, size, flags);
/// assert!(ret.is_ok());
/// let _shmid = ret.unwrap();
/// ```
pub fn shmget(key: key_t, size: size_t, shmflg: i32) -> Result<i32, Errno> {
let key = key as usize;
let size = size as usize;
let shmflg = shmflg as usize;
syscall3(SYS_SHMGET, key, size, shmflg).map(|ret| ret as i32)
}
/// Shutdown part of a full-duplex connection.
pub fn shutdown(sockfd: i32, how: i32) -> Result<(), Errno> {
let sockfd = sockfd as usize;
let how = how as usize;
syscall2(SYS_SHUTDOWN, sockfd, how).map(drop)
}
/// Get/set signal stack context.
pub fn sigaltstack(uss: &sigaltstack_t, uoss: &mut sigaltstack_t) -> Result<(), Errno> {
let uss_ptr = uss as *const sigaltstack_t as usize;
let uoss_ptr = uoss as *mut sigaltstack_t as usize;
syscall2(SYS_SIGALTSTACK, uss_ptr, uoss_ptr).map(drop)
}
/// Create a file descriptor to accept signals.
pub fn signalfd4(fd: i32, mask: &[sigset_t], flags: i32) -> Result<i32, Errno> {
let fd = fd as usize;
let mask_ptr = mask.as_ptr() as usize;
let mask_len = mask.len() as usize;
let flags = flags as usize;
syscall4(SYS_SIGNALFD4, fd, mask_ptr, mask_len, flags).map(|ret| ret as i32)
}
/// Create an endpoint for communication.
pub fn socket(domain: i32, sock_type: i32, protocol: i32) -> Result<i32, Errno> {
let domain = domain as usize;
let sock_type = sock_type as usize;
let protocol = protocol as usize;
syscall3(SYS_SOCKET, domain, sock_type, protocol).map(|ret| ret as i32)
}
/// Create a pair of connected socket.
pub fn socketpair(domain: i32, type_: i32, protocol: i32, sv: [i32; 2]) -> Result<(), Errno> {
let domain = domain as usize;
let type_ = type_ as usize;
let protocol = protocol as usize;
let sv_ptr = sv.as_ptr() as usize;
syscall4(SYS_SOCKETPAIR, domain, type_, protocol, sv_ptr).map(drop)
}
/// Splice data to/from pipe.
/// ```
/// let mut fds_left = [0, 0];
/// let ret = nc::pipe(&mut fds_left);
/// assert!(ret.is_ok());
///
/// let mut fds_right = [0, 0];
/// let ret = nc::pipe(&mut fds_right);
/// assert!(ret.is_ok());
///
/// let msg = "Hello, Rust";
/// let ret = nc::write(fds_left[1], msg.as_ptr() as usize, msg.len());
/// assert!(ret.is_ok());
/// let n_write = ret.unwrap() as nc::size_t;
/// assert_eq!(n_write, msg.len());
///
/// let ret = nc::splice(
/// fds_left[0],
/// None,
/// fds_right[1],
/// None,
/// n_write,
/// nc::SPLICE_F_MOVE,
/// );
/// assert!(ret.is_ok());
///
/// let mut buf = [0u8; 64];
/// let buf_len = buf.len();
/// let ret = nc::read(fds_right[0], buf.as_mut_ptr() as usize, buf_len);
/// assert!(ret.is_ok());
/// let n_read = ret.unwrap() as nc::size_t;
/// assert_eq!(n_read, n_write);
/// let read_msg = std::str::from_utf8(&buf[..n_read]);
/// assert!(read_msg.is_ok());
/// assert_eq!(Ok(msg), read_msg);
///
/// assert!(nc::close(fds_left[0]).is_ok());
/// assert!(nc::close(fds_left[1]).is_ok());
/// assert!(nc::close(fds_right[0]).is_ok());
/// assert!(nc::close(fds_right[1]).is_ok());
/// ```
pub fn splice(
fd_in: i32,
off_in: Option<&mut loff_t>,
fd_out: i32,
off_out: Option<&mut loff_t>,
len: size_t,
flags: u32,
) -> Result<ssize_t, Errno> {
let fd_in = fd_in as usize;
let off_in_ptr = if let Some(off_in) = off_in {
off_in as *mut loff_t as usize
} else {
0
};
let fd_out = fd_out as usize;
let off_out_ptr = if let Some(off_out) = off_out {
off_out as *mut loff_t as usize
} else {
0
};
let len = len as usize;
let flags = flags as usize;
syscall6(
SYS_SPLICE,
fd_in,
off_in_ptr,
fd_out,
off_out_ptr,
len,
flags,
)
.map(|ret| ret as ssize_t)
}
/// Get filesystem statistics.
/// ```
/// let path = "/usr";
/// let mut statfs = nc::statfs_t::default();
/// let ret = nc::statfs(path, &mut statfs);
/// assert!(ret.is_ok());
/// assert!(statfs.f_bfree > 0);
/// assert!(statfs.f_bavail > 0);
/// ```
pub fn statfs<P: AsRef<Path>>(filename: P, buf: &mut statfs_t) -> Result<(), Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let buf_ptr = buf as *mut statfs_t as usize;
syscall2(SYS_STATFS, filename_ptr, buf_ptr).map(drop)
}
/// Get file status about a file (extended).
/// ```
/// let path = "/etc/passwd";
/// let mut statx = nc::statx_t::default();
/// let ret = nc::statx(nc::AT_FDCWD, path, nc::AT_SYMLINK_NOFOLLOW, nc::STATX_TYPE, &mut statx);
/// assert!(ret.is_ok());
/// // Check fd is a regular file.
/// assert_eq!((statx.stx_mode as u32 & nc::S_IFMT), nc::S_IFREG);
/// ```
pub fn statx<P: AsRef<Path>>(
dirfd: i32,
filename: P,
flags: i32,
mask: u32,
buf: &mut statx_t,
) -> Result<(), Errno> {
let dirfd = dirfd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let flags = flags as usize;
let mask = mask as usize;
let buf_ptr = buf as *mut statx_t as usize;
syscall5(SYS_STATX, dirfd, filename_ptr, flags, mask, buf_ptr).map(drop)
}
/// Stop swapping to file/device.
/// ```
/// let filename = "/dev/sda-no-exist";
/// let ret = nc::swapoff(filename);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn swapoff<P: AsRef<Path>>(filename: P) -> Result<(), Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
syscall1(SYS_SWAPOFF, filename_ptr).map(drop)
}
/// Start swapping to file/device.
/// ```
/// let filename = "/dev/sda-no-exist";
/// let ret = nc::swapon(filename, nc::SWAP_FLAG_PREFER);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
/// ```
pub fn swapon<P: AsRef<Path>>(filename: P, flags: i32) -> Result<(), Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let flags = flags as usize;
syscall2(SYS_SWAPON, filename_ptr, flags).map(drop)
}
/// Make a new name for a file.
/// ```
/// let oldname = "/etc/passwd";
/// let newname = "/tmp/nc-symlinkat";
/// let ret = nc::symlinkat(oldname, nc::AT_FDCWD, newname);
/// assert!(ret.is_ok());
/// assert!(nc::unlink(newname).is_ok());
/// ```
pub fn symlinkat<P: AsRef<Path>>(oldname: P, newdirfd: i32, newname: P) -> Result<(), Errno> {
let oldname = CString::new(oldname.as_ref());
let oldname_ptr = oldname.as_ptr() as usize;
let newname = CString::new(newname.as_ref());
let newname_ptr = newname.as_ptr() as usize;
let newdirfd = newdirfd as usize;
syscall3(SYS_SYMLINKAT, oldname_ptr, newdirfd, newname_ptr).map(drop)
}
/// Commit filesystem caches to disk.
/// ```
/// assert!(nc::sync().is_ok());
/// ```
pub fn sync() -> Result<(), Errno> {
syscall0(SYS_SYNC).map(drop)
}
/// Commit filesystem cache related to `fd` to disk.
/// let path = "/etc/passwd";
/// let ret = nc::open(path, nc::O_RDONLY, 0);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let ret = nc::syncfs(fd);
/// assert!(ret.is_ok());
/// assert!(nc::close(fd).is_ok());
pub fn syncfs(fd: i32) -> Result<(), Errno> {
let fd = fd as usize;
syscall1(SYS_SYNCFS, fd).map(drop)
}
/// Sync a file segment to disk
/// ```
/// let path = "/tmp/nc-sync-file-range";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
///
/// let msg = "Hello, Rust";
/// let ret = nc::write(fd, msg.as_ptr() as usize, msg.len());
/// assert!(ret.is_ok());
/// let n_write = ret.unwrap();
/// assert_eq!(n_write, msg.len() as nc::ssize_t);
///
/// let ret = nc::sync_file_range(
/// fd,
/// 0,
/// n_write,
/// nc::SYNC_FILE_RANGE_WAIT_BEFORE
/// | nc::SYNC_FILE_RANGE_WRITE
/// | nc::SYNC_FILE_RANGE_WAIT_AFTER,
/// );
/// assert!(ret.is_ok());
///
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn sync_file_range(fd: i32, offset: off_t, nbytes: off_t, flags: i32) -> Result<(), Errno> {
let fd = fd as usize;
let offset = offset as usize;
let nbytes = nbytes as usize;
let flags = flags as usize;
syscall4(SYS_SYNC_FILE_RANGE, fd, offset, nbytes, flags).map(drop)
}
/// Return system information.
/// ```
/// let mut info = nc::sysinfo_t::default();
/// let ret = nc::sysinfo(&mut info);
/// assert!(ret.is_ok());
/// assert!(info.uptime > 0);
/// assert!(info.freeram > 0);
/// ```
pub fn sysinfo(info: &mut sysinfo_t) -> Result<(), Errno> {
let info_ptr = info as *mut sysinfo_t as usize;
syscall1(SYS_SYSINFO, info_ptr).map(drop)
}
/// Read and/or clear kernel message ring buffer; set console_loglevel
pub fn syslog(action: i32, buf: &mut [u8]) -> Result<i32, Errno> {
let action = action as usize;
let buf_ptr = buf.as_mut_ptr() as usize;
let buf_len = buf.len();
syscall3(SYS_SYSLOG, action, buf_ptr, buf_len).map(|ret| ret as i32)
}
/// Duplicate pipe content.
/// ```
/// let mut fds_left = [0, 0];
/// let ret = nc::pipe(&mut fds_left);
/// assert!(ret.is_ok());
///
/// let mut fds_right = [0, 0];
/// let ret = nc::pipe(&mut fds_right);
/// assert!(ret.is_ok());
///
/// let msg = "Hello, Rust";
/// let ret = nc::write(fds_left[1], msg.as_ptr() as usize, msg.len());
/// assert!(ret.is_ok());
/// let n_write = ret.unwrap() as nc::size_t;
/// assert_eq!(n_write, msg.len());
///
/// let ret = nc::tee(fds_left[0], fds_right[1], n_write, nc::SPLICE_F_NONBLOCK);
/// assert!(ret.is_ok());
///
/// let mut buf = [0u8; 64];
/// let buf_len = buf.len();
/// let ret = nc::read(fds_right[0], buf.as_mut_ptr() as usize, buf_len);
/// assert!(ret.is_ok());
/// let n_read = ret.unwrap() as nc::size_t;
/// assert_eq!(n_read, n_write);
/// let read_msg = std::str::from_utf8(&buf[..n_read]);
/// assert!(read_msg.is_ok());
/// assert_eq!(Ok(msg), read_msg);
///
/// assert!(nc::close(fds_left[0]).is_ok());
/// assert!(nc::close(fds_left[1]).is_ok());
/// assert!(nc::close(fds_right[0]).is_ok());
/// assert!(nc::close(fds_right[1]).is_ok());
/// ```
pub fn tee(fd_in: i32, fd_out: i32, len: size_t, flags: u32) -> Result<ssize_t, Errno> {
let fd_in = fd_in as usize;
let fd_out = fd_out as usize;
let len = len as usize;
let flags = flags as usize;
syscall4(SYS_TEE, fd_in, fd_out, len, flags).map(|ret| ret as ssize_t)
}
/// Send a signal to a thread.
/// ```
/// let ret = nc::fork();
/// assert!(ret.is_ok());
/// let pid = ret.unwrap();
/// if pid == 0 {
/// println!("[child] pid: {}", nc::getpid());
/// let _ret = nc::pause();
/// } else {
/// let ret = nc::tgkill(pid, pid, nc::SIGTERM);
/// assert!(ret.is_ok());
/// }
/// ```
pub fn tgkill(tgid: i32, tid: i32, sig: i32) -> Result<(), Errno> {
let tgid = tgid as usize;
let tid = tid as usize;
let sig = sig as usize;
syscall3(SYS_TGKILL, tgid, tid, sig).map(drop)
}
/// Create a timer that notifies via a file descriptor.
/// ```
/// let ret = nc::timerfd_create(nc::CLOCK_MONOTONIC, nc::TFD_CLOEXEC);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn timerfd_create(clockid: i32, flags: i32) -> Result<i32, Errno> {
let clockid = clockid as usize;
let flags = flags as usize;
syscall2(SYS_TIMERFD_CREATE, clockid, flags).map(|ret| ret as i32)
}
/// Get current timer via a file descriptor.
pub fn timerfd_gettime(ufd: i32, cur_value: &mut itimerspec_t) -> Result<(), Errno> {
let ufd = ufd as usize;
let cur_value_ptr = cur_value as *mut itimerspec_t as usize;
syscall2(SYS_TIMERFD_GETTIME, ufd, cur_value_ptr).map(drop)
}
/// Set current timer via a file descriptor.
/// ```
/// let ret = nc::timerfd_create(nc::CLOCK_MONOTONIC, nc::TFD_CLOEXEC);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
///
/// let flags = 0;
/// let time = nc::itimerspec_t {
/// it_interval: nc::timespec_t::default(),
/// it_value: nc::timespec_t {
/// tv_sec: 1,
/// tv_nsec: 0,
/// },
/// };
/// let ret = nc::timerfd_settime(fd, flags, &time, None);
/// assert!(ret.is_ok());
///
/// assert!(nc::close(fd).is_ok());
/// ```
pub fn timerfd_settime(
ufd: i32,
flags: i32,
new_value: &itimerspec_t,
old_value: Option<&mut itimerspec_t>,
) -> Result<(), Errno> {
let ufd = ufd as usize;
let flags = flags as usize;
let new_value_ptr = new_value as *const itimerspec_t as usize;
let old_value_ptr = if let Some(old_value) = old_value {
old_value as *mut itimerspec_t as usize
} else {
0
};
syscall4(
SYS_TIMERFD_SETTIME,
ufd,
flags,
new_value_ptr,
old_value_ptr,
)
.map(drop)
}
/// Create a per-process timer
/// ```
/// let mut timerid = nc::timer_t::default();
/// let ret = nc::timer_create(nc::CLOCK_MONOTONIC, None, &mut timerid);
/// assert!(ret.is_ok());
/// ```
pub fn timer_create(
clock: clockid_t,
event: Option<&mut sigevent_t>,
timer_id: &mut timer_t,
) -> Result<(), Errno> {
let clock = clock as usize;
let event_ptr = if let Some(event) = event {
event as *mut sigevent_t as usize
} else {
0 as usize
};
let timer_id_ptr = timer_id as *mut timer_t as usize;
syscall3(SYS_TIMER_CREATE, clock, event_ptr, timer_id_ptr).map(drop)
}
/// Delete a per-process timer
/// ```
/// let mut timer_id = nc::timer_t::default();
/// let ret = nc::timer_create(nc::CLOCK_MONOTONIC, None, &mut timer_id);
/// assert!(ret.is_ok());
/// let ret = nc::timer_delete(timer_id);
/// assert!(ret.is_ok());
/// ```
pub fn timer_delete(timer_id: timer_t) -> Result<(), Errno> {
let timer_id = timer_id as usize;
syscall1(SYS_TIMER_DELETE, timer_id).map(drop)
}
/// Get overrun count for a per-process timer
/// ```
/// use core::mem::size_of;
///
/// fn handle_alarm(signum: i32) {
/// assert_eq!(signum, nc::SIGALRM);
/// }
///
/// fn main() {
/// const TIMER_SIG: i32 = nc::SIGRTMAX;
///
/// let sa = nc::sigaction_t {
/// sa_flags: nc::SA_SIGINFO,
/// sa_handler: handle_alarm as nc::sighandler_t,
/// ..nc::sigaction_t::default()
/// };
/// let mut old_sa = nc::sigaction_t::default();
/// let ret = nc::rt_sigaction(TIMER_SIG, &sa, &mut old_sa, size_of::<nc::sigset_t>());
/// assert!(ret.is_ok());
///
/// let tid = nc::itimerspec_t {
/// it_interval: nc::timespec_t::default(),
/// it_value: nc::timespec_t {
/// tv_sec: 1,
/// tv_nsec: 0,
/// },
/// };
/// let mut ev = nc::sigevent_t {
/// sigev_value: nc::sigval_t {
/// sival_ptr: &tid as *const nc::itimerspec_t as usize,
/// },
/// sigev_signo: TIMER_SIG,
/// sigev_notify: nc::SIGEV_SIGNAL,
/// sigev_un: nc::sigev_un_t::default(),
/// };
/// let mut timer_id = nc::timer_t::default();
/// let ret = nc::timer_create(nc::CLOCK_MONOTONIC, Some(&mut ev), &mut timer_id);
/// assert!(ret.is_ok());
/// println!("timer id: {:?}", timer_id);
///
/// let flags = 0;
/// let time = nc::itimerspec_t {
/// it_interval: nc::timespec_t::default(),
/// it_value: nc::timespec_t {
/// tv_sec: 1,
/// tv_nsec: 0,
/// },
/// };
/// let ret = nc::timer_settime(timer_id, flags, &time, None);
/// assert!(ret.is_ok());
///
/// let mut cur_time = nc::itimerspec_t::default();
/// let ret = nc::timer_gettime(timer_id, &mut cur_time);
/// assert!(ret.is_ok());
/// println!("cur time: {:?}", cur_time);
///
/// let ret = nc::pause();
/// assert_eq!(ret, Err(nc::EINTR));
///
/// let ret = nc::timer_getoverrun(timer_id);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(0));
///
/// let ret = nc::timer_delete(timer_id);
/// assert!(ret.is_ok());
/// }
/// ```
pub fn timer_getoverrun(timer_id: timer_t) -> Result<i32, Errno> {
let timer_id = timer_id as usize;
syscall1(SYS_TIMER_GETOVERRUN, timer_id).map(|ret| ret as i32)
}
/// Fetch state of per-process timer
/// ```
/// use core::mem::size_of;
///
/// fn handle_alarm(signum: i32) {
/// assert_eq!(signum, nc::SIGALRM);
/// }
///
/// fn main() {
/// const TIMER_SIG: i32 = nc::SIGRTMAX;
///
/// let sa = nc::sigaction_t {
/// sa_flags: nc::SA_SIGINFO,
/// sa_handler: handle_alarm as nc::sighandler_t,
/// ..nc::sigaction_t::default()
/// };
/// let mut old_sa = nc::sigaction_t::default();
/// let ret = nc::rt_sigaction(TIMER_SIG, &sa, &mut old_sa, size_of::<nc::sigset_t>());
/// assert!(ret.is_ok());
///
/// let tid = nc::itimerspec_t {
/// it_interval: nc::timespec_t::default(),
/// it_value: nc::timespec_t {
/// tv_sec: 1,
/// tv_nsec: 0,
/// },
/// };
/// let mut ev = nc::sigevent_t {
/// sigev_value: nc::sigval_t {
/// sival_ptr: &tid as *const nc::itimerspec_t as usize,
/// },
/// sigev_signo: TIMER_SIG,
/// sigev_notify: nc::SIGEV_SIGNAL,
/// sigev_un: nc::sigev_un_t::default(),
/// };
/// let mut timer_id = nc::timer_t::default();
/// let ret = nc::timer_create(nc::CLOCK_MONOTONIC, Some(&mut ev), &mut timer_id);
/// assert!(ret.is_ok());
/// println!("timer id: {:?}", timer_id);
///
/// let flags = 0;
/// let time = nc::itimerspec_t {
/// it_interval: nc::timespec_t::default(),
/// it_value: nc::timespec_t {
/// tv_sec: 1,
/// tv_nsec: 0,
/// },
/// };
/// let ret = nc::timer_settime(timer_id, flags, &time, None);
/// assert!(ret.is_ok());
///
/// let mut cur_time = nc::itimerspec_t::default();
/// let ret = nc::timer_gettime(timer_id, &mut cur_time);
/// assert!(ret.is_ok());
/// println!("cur time: {:?}", cur_time);
///
/// let ret = nc::pause();
/// assert_eq!(ret, Err(nc::EINTR));
///
/// let ret = nc::timer_delete(timer_id);
/// assert!(ret.is_ok());
/// }
/// ```
pub fn timer_gettime(timer_id: timer_t, curr: &mut itimerspec_t) -> Result<(), Errno> {
let timer_id = timer_id as usize;
let curr_ptr = curr as *mut itimerspec_t as usize;
syscall2(SYS_TIMER_GETTIME, timer_id, curr_ptr).map(drop)
}
/// Arm/disarm state of per-process timer
/// ```
/// use core::mem::size_of;
///
/// fn handle_alarm(signum: i32) {
/// assert_eq!(signum, nc::SIGALRM);
/// }
///
/// fn main() {
/// const TIMER_SIG: i32 = nc::SIGRTMAX;
///
/// let sa = nc::sigaction_t {
/// sa_flags: nc::SA_SIGINFO,
/// sa_handler: handle_alarm as nc::sighandler_t,
/// ..nc::sigaction_t::default()
/// };
/// let mut old_sa = nc::sigaction_t::default();
/// let ret = nc::rt_sigaction(TIMER_SIG, &sa, &mut old_sa, size_of::<nc::sigset_t>());
/// assert!(ret.is_ok());
///
/// let tid = nc::itimerspec_t {
/// it_interval: nc::timespec_t::default(),
/// it_value: nc::timespec_t {
/// tv_sec: 1,
/// tv_nsec: 0,
/// },
/// };
/// let mut ev = nc::sigevent_t {
/// sigev_value: nc::sigval_t {
/// sival_ptr: &tid as *const nc::itimerspec_t as usize,
/// },
/// sigev_signo: TIMER_SIG,
/// sigev_notify: nc::SIGEV_SIGNAL,
/// sigev_un: nc::sigev_un_t::default(),
/// };
/// let mut timer_id = nc::timer_t::default();
/// let ret = nc::timer_create(nc::CLOCK_MONOTONIC, Some(&mut ev), &mut timer_id);
/// assert!(ret.is_ok());
/// println!("timer id: {:?}", timer_id);
///
/// let flags = 0;
/// let time = nc::itimerspec_t {
/// it_interval: nc::timespec_t::default(),
/// it_value: nc::timespec_t {
/// tv_sec: 1,
/// tv_nsec: 0,
/// },
/// };
/// let ret = nc::timer_settime(timer_id, flags, &time, None);
/// assert!(ret.is_ok());
///
/// let mut cur_time = nc::itimerspec_t::default();
/// let ret = nc::timer_gettime(timer_id, &mut cur_time);
/// assert!(ret.is_ok());
/// println!("cur time: {:?}", cur_time);
///
/// let ret = nc::pause();
/// assert_eq!(ret, Err(nc::EINTR));
///
/// let ret = nc::timer_delete(timer_id);
/// assert!(ret.is_ok());
/// }
/// ```
pub fn timer_settime(
timer_id: timer_t,
flags: i32,
new_value: &itimerspec_t,
old_value: Option<&mut itimerspec_t>,
) -> Result<(), Errno> {
let timer_id = timer_id as usize;
let flags = flags as usize;
let new_value_ptr = new_value as *const itimerspec_t as usize;
let old_value_ptr = if let Some(old_value) = old_value {
old_value as *mut itimerspec_t as usize
} else {
0 as usize
};
syscall4(
SYS_TIMER_SETTIME,
timer_id,
flags,
new_value_ptr,
old_value_ptr,
)
.map(drop)
}
/// Get process times.
/// ```
/// let mut tms = nc::tms_t::default();
/// let ret = nc::times(&mut tms);
/// assert!(ret.is_ok());
/// let clock = ret.unwrap();
/// assert!(clock > 0);
/// ```
pub fn times(buf: &mut tms_t) -> Result<clock_t, Errno> {
let buf_ptr = buf as *mut tms_t as usize;
syscall1(SYS_TIMES, buf_ptr).map(|ret| ret as clock_t)
}
/// Send a signal to a thread (obsolete).
/// ```
/// let ret = nc::fork();
/// assert!(ret.is_ok());
/// let pid = ret.unwrap();
/// if pid == 0 {
/// println!("[child] pid: {}", nc::getpid());
/// let _ret = nc::pause();
/// } else {
/// let ret = nc::tkill(pid, nc::SIGTERM);
/// assert!(ret.is_ok());
/// }
/// ```
pub fn tkill(tid: i32, sig: i32) -> Result<(), Errno> {
let tid = tid as usize;
let sig = sig as usize;
syscall2(SYS_TKILL, tid, sig).map(drop)
}
/// Truncate a file to a specified length.
/// ```
/// let path = "/tmp/nc-truncate";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let ret = nc::truncate(path, 64 * 1024);
/// assert!(ret.is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn truncate<P: AsRef<Path>>(filename: P, length: off_t) -> Result<(), Errno> {
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let length = length as usize;
syscall2(SYS_TRUNCATE, filename_ptr, length).map(drop)
}
/// Set file mode creation mask.
/// ```
/// let new_mask = 0o077;
/// let ret = nc::umask(new_mask);
/// assert!(ret.is_ok());
/// let old_mask = ret.unwrap();
/// let ret = nc::umask(old_mask);
/// assert_eq!(ret, Ok(new_mask));
/// ```
pub fn umask(mode: mode_t) -> Result<mode_t, Errno> {
let mode = mode as usize;
syscall1(SYS_UMASK, mode).map(|ret| ret as mode_t)
}
/// Umount filesystem.
/// ```
/// let target_dir = "/tmp/nc-umount2";
/// let ret = nc::mkdir(target_dir, 0o755);
/// assert!(ret.is_ok());
///
/// let src_dir = "/etc";
/// let fs_type = "";
/// let mount_flags = nc::MS_BIND | nc::MS_RDONLY;
/// let data = 0;
/// let ret = nc::mount(src_dir, target_dir, fs_type, mount_flags, data);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
///
/// let flags = 0;
/// let ret = nc::umount2(target_dir, flags);
/// assert!(ret.is_err());
/// assert_eq!(ret, Err(nc::EPERM));
///
/// assert!(nc::rmdir(target_dir).is_ok());
pub fn umount2<P: AsRef<Path>>(name: P, flags: i32) -> Result<(), Errno> {
let name = CString::new(name.as_ref());
let name_ptr = name.as_ptr() as usize;
let flags = flags as usize;
syscall2(SYS_UMOUNT2, name_ptr, flags).map(drop)
}
/// Get name and information about current kernel.
/// ```
/// let mut buf = nc::utsname_t::default();
/// let ret = nc::uname(&mut buf);
/// assert!(ret.is_ok());
/// assert!(!buf.sysname.is_empty());
/// assert!(!buf.machine.is_empty());
/// ```
pub fn uname(buf: &mut utsname_t) -> Result<(), Errno> {
let buf_ptr = buf as *mut utsname_t as usize;
syscall1(SYS_UNAME, buf_ptr).map(drop)
}
/// Delete a name and possibly the file it refers to.
/// ```
/// let path = "/tmp/nc-unlinkat";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// // /tmp folder is not empty, so this call always returns error.
/// assert!(nc::unlinkat(nc::AT_FDCWD, path, nc::AT_REMOVEDIR).is_err());
/// assert!(nc::unlinkat(nc::AT_FDCWD, path, 0).is_ok());
/// ```
pub fn unlinkat<P: AsRef<Path>>(dfd: i32, filename: P, flag: i32) -> Result<(), Errno> {
let dfd = dfd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let flag = flag as usize;
syscall3(SYS_UNLINKAT, dfd, filename_ptr, flag).map(drop)
}
/// Disassociate parts of the process execution context
pub fn unshare(flags: i32) -> Result<(), Errno> {
let flags = flags as usize;
syscall1(SYS_UNSHARE, flags).map(drop)
}
/// Create a file descriptor to handle page faults in user space.
pub fn userfaultfd(flags: i32) -> Result<i32, Errno> {
let flags = flags as usize;
syscall1(SYS_USERFAULTFD, flags).map(|ret| ret as i32)
}
/// Change time timestamps with nanosecond precision.
/// ```
/// let path = "/tmp/nc-utimesat";
/// let ret = nc::open(path, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// assert!(nc::close(fd).is_ok());
/// let times = [
/// nc::timespec_t {
/// tv_sec: 100,
/// tv_nsec: 0,
/// },
/// nc::timespec_t {
/// tv_sec: 10,
/// tv_nsec: 0,
/// },
/// ];
/// let flags = nc::AT_SYMLINK_NOFOLLOW;
/// let ret = nc::utimensat(nc::AT_FDCWD, path, ×, flags);
/// assert!(ret.is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn utimensat<P: AsRef<Path>>(
dirfd: i32,
filename: P,
times: &[timespec_t; 2],
flags: i32,
) -> Result<(), Errno> {
let dirfd = dirfd as usize;
let filename = CString::new(filename.as_ref());
let filename_ptr = filename.as_ptr() as usize;
let times_ptr = times.as_ptr() as usize;
let flags = flags as usize;
syscall4(SYS_UTIMENSAT, dirfd, filename_ptr, times_ptr, flags).map(drop)
}
/// Virtually hang up the current terminal.
pub fn vhangup() -> Result<(), Errno> {
syscall0(SYS_VHANGUP).map(drop)
}
/// Splice user page into a pipe.
pub fn vmsplice(fd: i32, iov: &iovec_t, nr_segs: usize, flags: u32) -> Result<ssize_t, Errno> {
let fd = fd as usize;
let iov_ptr = iov as *const iovec_t as usize;
let flags = flags as usize;
syscall4(SYS_VMSPLICE, fd, iov_ptr, nr_segs, flags).map(|ret| ret as ssize_t)
}
/// Wait for process to change state.
/// ```
/// let ret = nc::fork();
/// match ret {
/// Err(errno) => {
/// eprintln!("fork() error: {}", nc::strerror(errno));
/// nc::exit(1);
/// }
/// Ok(0) => println!("[child] pid is: {}", nc::getpid()),
/// Ok(pid) => {
/// let mut status = 0;
/// let mut usage = nc::rusage_t::default();
/// let ret = nc::wait4(-1, &mut status, 0, &mut usage);
/// assert!(ret.is_ok());
/// println!("status: {}", status);
/// let exited_pid = ret.unwrap();
/// assert_eq!(exited_pid, pid);
/// }
/// }
/// ```
pub fn wait4(
pid: pid_t,
wstatus: &mut i32,
options: i32,
rusage: &mut rusage_t,
) -> Result<pid_t, Errno> {
let pid = pid as usize;
let wstatus_ptr = wstatus as *mut i32 as usize;
let options = options as usize;
let rusage_ptr = rusage as *mut rusage_t as usize;
syscall4(SYS_WAIT4, pid, wstatus_ptr, options, rusage_ptr).map(|ret| ret as pid_t)
}
/// Wait for process to change state
/// ```
/// let ret = nc::fork();
/// match ret {
/// Err(errno) => {
/// eprintln!("fork() error: {}", nc::strerror(errno));
/// nc::exit(1);
/// }
/// Ok(0) => println!("[child] pid is: {}", nc::getpid()),
/// Ok(pid) => {
/// let mut info = nc::siginfo_t::default();
/// let options = nc::WEXITED;
/// let mut usage = nc::rusage_t::default();
/// let ret = nc::waitid(nc::P_ALL, -1, &mut info, options, &mut usage);
/// match ret {
/// Err(errno) => eprintln!("waitid() error: {}", nc::strerror(errno)),
/// Ok(()) => {
/// let exited_pid = unsafe { info.siginfo.sifields.sigchld.pid };
/// assert_eq!(pid, exited_pid);
/// }
/// }
/// }
/// }
/// ```
pub fn waitid(
which: i32,
pid: pid_t,
info: &mut siginfo_t,
options: i32,
ru: &mut rusage_t,
) -> Result<(), Errno> {
let which = which as usize;
let pid = pid as usize;
let info_ptr = info as *mut siginfo_t as usize;
let options = options as usize;
let ru_ptr = ru as *mut rusage_t as usize;
syscall5(SYS_WAITID, which, pid, info_ptr, options, ru_ptr).map(drop)
}
/// Write to a file descriptor.
/// ```
/// let path = "/tmp/nc-write";
/// let ret = nc::open(path, nc::O_CREAT | nc::O_WRONLY, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let msg = "Hello, Rust!";
/// let ret = nc::write(fd, msg.as_ptr() as usize, msg.len());
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(msg.len() as nc::ssize_t));
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path).is_ok());
/// ```
pub fn write(fd: i32, buf_ptr: usize, count: size_t) -> Result<ssize_t, Errno> {
let fd = fd as usize;
syscall3(SYS_WRITE, fd, buf_ptr, count).map(|ret| ret as ssize_t)
}
/// Write to a file descriptor from multiple buffers.
/// ```
/// let path = "/etc/passwd";
/// let ret = nc::open(path, nc::O_RDONLY, 0);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let mut buf = [[0_u8; 64]; 4];
/// let capacity = 4 * 64;
/// let mut iov = Vec::with_capacity(buf.len());
/// for ref mut item in (&mut buf).iter() {
/// iov.push(nc::iovec_t {
/// iov_len: item.len(),
/// iov_base: item.as_ptr() as usize,
/// });
/// }
/// let ret = nc::readv(fd, &mut iov);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(capacity as nc::ssize_t));
/// assert!(nc::close(fd).is_ok());
///
/// let path_out = "/tmp/nc-writev";
/// let ret = nc::open(path_out, nc::O_WRONLY | nc::O_CREAT, 0o644);
/// assert!(ret.is_ok());
/// let fd = ret.unwrap();
/// let ret = nc::writev(fd, &iov);
/// assert!(ret.is_ok());
/// assert_eq!(ret, Ok(capacity as nc::ssize_t));
/// assert!(nc::close(fd).is_ok());
/// assert!(nc::unlink(path_out).is_ok());
/// ```
pub fn writev(fd: i32, iov: &[iovec_t]) -> Result<ssize_t, Errno> {
let fd = fd as usize;
let iov_ptr = iov.as_ptr() as usize;
let len = iov.len() as usize;
syscall3(SYS_WRITEV, fd, iov_ptr, len).map(|ret| ret as ssize_t)
}
| {
let olddfd = olddfd as usize;
let oldfilename = CString::new(oldfilename.as_ref());
let oldfilename_ptr = oldfilename.as_ptr() as usize;
let newdfd = newdfd as usize;
let newfilename = CString::new(newfilename.as_ref());
let newfilename_ptr = newfilename.as_ptr() as usize;
syscall4(
SYS_RENAMEAT,
olddfd,
oldfilename_ptr,
newdfd,
newfilename_ptr,
)
.map(drop)
} |
entry_types.py | """
Copyright 2018-present, Facebook, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from ..codegen import Codegen
from ..codegen import SIGNED_SOURCE
class CppEntryTypesCodegen(Codegen):
def __init__(self, entries):
super(CppEntryTypesCodegen, self).__init__()
self.entries = entries
if len(self.entries) > 255:
raise ValueError(
"EntryType codegen currently assumes 1-byte entry types.\n"
"See types.EntryTypeEnum for that size."
)
def preferred_filename(self):
return "EntryType.h"
def generate(self):
template = """
// %%SIGNED_SOURCE%%
#pragma once
namespace facebook {
namespace profilo {
namespace entries {
%%ENTRIES_ENUM%%
const char* to_string(EntryType type);
} // namespace entries
} // namespace profilo
} // namespace facebook
""".lstrip()
template = template.replace("%%ENTRIES_ENUM%%", self._generate_entries_enum())
template = template.replace("%%SIGNED_SOURCE%%", SIGNED_SOURCE)
return template
def _generate_entries_enum(self):
template = """
enum class EntryType {
%%NAME_TO_ID_ENTRIES%%
};
""".lstrip()
name_id_entries = ["{0.name} = {0.id},".format(x) for x in self.entries]
name_id_entries = "\n".join(name_id_entries)
name_id_entries = Codegen.indent(name_id_entries)
template = template.replace("%%NAME_TO_ID_ENTRIES%%", name_id_entries)
return template
class CppEntryTypesCppCodegen(Codegen):
def __init__(self, entries):
|
def preferred_filename(self):
return "EntryType.cpp"
def generate(self):
template = """
// %%SIGNED_SOURCE%%
#include <stdexcept>
#include <profilo/entries/EntryType.h>
namespace facebook {
namespace profilo {
namespace entries {
%%TO_STRING%%
} // namespace entries
} // namespace profilo
} // namespace facebook
""".lstrip()
template = template.replace("%%TO_STRING%%", self._generate_to_string())
template = template.replace("%%SIGNED_SOURCE%%", SIGNED_SOURCE)
return template
def _generate_to_string(self):
template = """
const char* to_string(EntryType type) {
switch(type) {
%%CASES%%
default: throw std::invalid_argument("Unknown entry type");
}
}
""".lstrip()
cases = [
'case EntryType::{0.name}: return "{0.name}";'.format(x)
for x in self.entries
]
cases = "\n".join(cases)
cases = Codegen.indent(cases)
cases = Codegen.indent(cases)
template = template.replace("%%CASES%%", cases)
return template
| super(CppEntryTypesCppCodegen, self).__init__()
self.entries = entries |
search_content_favorites_responses.go | // Code generated by go-swagger; DO NOT EDIT.
package content
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
models "github.com/billtrust/looker-go-sdk/models"
)
// SearchContentFavoritesReader is a Reader for the SearchContentFavorites structure.
type SearchContentFavoritesReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *SearchContentFavoritesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewSearchContentFavoritesOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewSearchContentFavoritesBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil |
return nil, result
case 404:
result := NewSearchContentFavoritesNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("unknown error", response, response.Code())
}
}
// NewSearchContentFavoritesOK creates a SearchContentFavoritesOK with default headers values
func NewSearchContentFavoritesOK() *SearchContentFavoritesOK {
return &SearchContentFavoritesOK{}
}
/*SearchContentFavoritesOK handles this case with default header values.
Favorite Content
*/
type SearchContentFavoritesOK struct {
Payload []*models.ContentFavorite
}
func (o *SearchContentFavoritesOK) Error() string {
return fmt.Sprintf("[GET /content_favorite/search][%d] searchContentFavoritesOK %+v", 200, o.Payload)
}
func (o *SearchContentFavoritesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewSearchContentFavoritesBadRequest creates a SearchContentFavoritesBadRequest with default headers values
func NewSearchContentFavoritesBadRequest() *SearchContentFavoritesBadRequest {
return &SearchContentFavoritesBadRequest{}
}
/*SearchContentFavoritesBadRequest handles this case with default header values.
Bad Request
*/
type SearchContentFavoritesBadRequest struct {
Payload *models.Error
}
func (o *SearchContentFavoritesBadRequest) Error() string {
return fmt.Sprintf("[GET /content_favorite/search][%d] searchContentFavoritesBadRequest %+v", 400, o.Payload)
}
func (o *SearchContentFavoritesBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Error)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewSearchContentFavoritesNotFound creates a SearchContentFavoritesNotFound with default headers values
func NewSearchContentFavoritesNotFound() *SearchContentFavoritesNotFound {
return &SearchContentFavoritesNotFound{}
}
/*SearchContentFavoritesNotFound handles this case with default header values.
Not Found
*/
type SearchContentFavoritesNotFound struct {
Payload *models.Error
}
func (o *SearchContentFavoritesNotFound) Error() string {
return fmt.Sprintf("[GET /content_favorite/search][%d] searchContentFavoritesNotFound %+v", 404, o.Payload)
}
func (o *SearchContentFavoritesNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Error)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
| {
return nil, err
} |
AlipayCommerceEducateCampusBiztaskFinishRequest.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayCommerceEducateCampusBiztaskFinishModel import AlipayCommerceEducateCampusBiztaskFinishModel
class AlipayCommerceEducateCampusBiztaskFinishRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayCommerceEducateCampusBiztaskFinishModel):
self._biz_content = value
else:
self._biz_content = AlipayCommerceEducateCampusBiztaskFinishModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.commerce.educate.campus.biztask.finish'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def | (self):
multipart_params = dict()
return multipart_params
| get_multipart_params |
take_all.py | #!/usr/bin/env python3
import logging
import os
import sys
import time
import traceback
from collections import namedtuple
from pathlib import Path
from screenshots import Client, Screenshooter
def env(name, default):
return os.environ.get(name, default)
Spec = namedtuple(
"Spec",
"commands before after geometry delay windows",
defaults=[None, None, None, env("GEOMETRY", "240x135"), env("DELAY", "1x1"), 3],
)
specs = {
"bsp": {
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=5),
"8-windows": Spec(
windows=8,
before=[
"up",
"grow_down",
"left",
"grow_left",
"down",
"right",
"grow_left",
"grow_left",
"toggle_split",
"left",
"left",
"grow_right",
"grow_right",
"grow_up",
"grow_up",
"up",
"toggle_split",
],
),
"toggle_split-from-down-left": Spec(commands=["toggle_split"]),
"toggle_split-from-right": Spec(commands=["toggle_split"], before=["right"]),
# "next": Spec(commands=["next"]), # no effects?
# "previous": Spec(commands=["previous"]), # no effects?
"left": Spec(commands=["left"], before=["right"]),
"right": Spec(commands=["right"]),
"up": Spec(commands=["up"]),
"down": Spec(commands=["down"], before=["up"]),
"shuffle_left": Spec(commands=["shuffle_left"], before=["right"]),
"shuffle_right": Spec(commands=["shuffle_right"]),
"shuffle_up": Spec(commands=["shuffle_up"]),
"shuffle_down": Spec(commands=["shuffle_down"], before=["up"]),
"grow_left": Spec(commands=["grow_left"], before=["right"]),
"grow_right": Spec(commands=["grow_right"]),
"grow_up": Spec(commands=["grow_up"]),
"grow_down": Spec(commands=["grow_down"], before=["up"]),
"flip_left": Spec(commands=["flip_left"], before=["right"]),
"flip_right": Spec(commands=["flip_right"]),
"flip_up": Spec(commands=["flip_up"]),
"flip_down": Spec(commands=["flip_down"], before=["up"]),
"normalize": Spec(
commands=["normalize"],
before=["grow_up", "grow_up", "grow_right", "grow_right"],
),
},
"columns": {
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=4, before=["left", "spawn"]),
"toggle_split": Spec(
commands=[
"toggle_split",
"toggle_split",
"down",
"toggle_split",
"toggle_split",
],
windows=4,
),
"left": Spec(commands=["left"]),
"right": Spec(commands=["right"], before=["left"]),
"up": Spec(commands=["up"], before=["down"]),
"down": Spec(commands=["down"]),
"next": Spec(commands=["next"]),
"previous": Spec(commands=["previous"]),
"shuffle_left": Spec(commands=["shuffle_left"]),
"shuffle_right": Spec(commands=["shuffle_right"], before=["left"]),
"shuffle_up": Spec(commands=["shuffle_up"], before=["down"]),
"shuffle_down": Spec(commands=["shuffle_down"]),
"grow_left": Spec(commands=["grow_left"]),
"grow_right": Spec(commands=["grow_right"], before=["left"]),
"grow_up": Spec(commands=["grow_up"], before=["down"]),
"grow_down": Spec(commands=["grow_down"]),
"normalize": Spec(
commands=["normalize"],
before=["grow_down", "grow_down", "grow_left", "grow_left"],
),
},
"floating": {
# Floating info clients lists clients from all groups,
# breaking our "kill windows" method.
# "2-windows": Spec(windows=2),
# "3-windows": Spec(windows=3),
# "4-windows": Spec(windows=4),
},
"matrix": {
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=5),
"5-windows-add": Spec(windows=5, before=["add"]),
"left": Spec(commands=["left"], windows=4),
"right": Spec(commands=["right"], before=["up", "left"], windows=4),
"up": Spec(commands=["up"], windows=4),
"down": Spec(commands=["down"], before=["up"], windows=4),
"add-delete": Spec(
commands=["add", "add", "delete", "delete", "delete", "add"],
after=["delete"],
windows=5
),
},
"max": {"max": Spec(windows=1)},
"monadtall": {
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=5),
"normalize": Spec(
commands=["normalize"],
windows=4,
before=["maximize", "shrink_main", "shrink_main"],
after=["reset"],
),
"normalize-from-main": Spec(
commands=["normalize"],
windows=4,
before=["maximize", "shrink_main", "shrink_main", "left"],
after=["reset"],
),
"reset": Spec(
commands=["reset"],
windows=4,
before=["maximize", "shrink_main", "shrink_main"],
),
"maximize": Spec(commands=["maximize"], windows=4, after=["reset"]),
"maximize-main": Spec(
commands=["maximize"], windows=4, before=["left"], after=["reset"]
),
"grow": Spec(commands=["grow", "grow", "grow", "grow"], delay="1x2"),
"grow_main": Spec(
commands=["grow_main", "grow_main", "grow_main"],
after=["reset"],
delay="1x2",
),
"shrink_main": Spec(
commands=["shrink_main", "shrink_main", "shrink_main"],
after=["reset"],
delay="1x2",
),
"shrink": Spec(commands=["shrink", "shrink", "shrink", "shrink"], delay="1x2"),
"shuffle_up": Spec(commands=["shuffle_up"]),
"shuffle_down": Spec(commands=["shuffle_down"], before=["up"]),
"flip": Spec(commands=["flip"], after=["flip"]),
# "swap": Spec(commands=["swap"]), # requires 2 args: window1 and window2
"swap_left": Spec(commands=["swap_left"], after=["reset"]),
"swap_right": Spec(commands=["swap_right"], before=["left"], after=["reset"]),
"swap_main": Spec(commands=["swap_main"], after=["reset"]),
"left": Spec(commands=["left"]),
"right": Spec(commands=["right"], before=["left"]),
},
"monadwide": {
# There seems to be a problem with directions. Up cycles through windows
# clock-wise, down cycles through windows counter-clock-wise, left and right
# works normally in the secondary columns, while left from main does nothing
# and right from main moves to the center of the second column. It's like
# the directions are mixed between normal orientation
# and a 90° rotation to the left, like monadtall. Up and down are reversed
# compared to monadtall.
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=5),
"normalize": Spec(
commands=["normalize"],
windows=4,
before=["maximize", "shrink_main", "shrink_main"],
after=["reset"],
),
"normalize-from-main": Spec(
commands=["normalize"],
windows=4,
before=["maximize", "shrink_main", "shrink_main", "down"],
after=["reset"],
),
"reset": Spec(
commands=["reset"],
windows=4,
before=["maximize", "shrink_main", "shrink_main"],
),
"maximize": Spec(commands=["maximize"], windows=4, after=["reset"]),
"maximize-main": Spec(
commands=["maximize"], windows=4, before=["down"], after=["reset"]
),
"grow": Spec(commands=["grow", "grow", "grow", "grow"], delay="1x2"),
"grow_main": Spec(
commands=["grow_main", "grow_main", "grow_main"],
after=["reset"],
delay="1x2",
),
"shrink_main": Spec(
commands=["shrink_main", "shrink_main", "shrink_main"],
after=["reset"],
delay="1x2",
),
"shrink": Spec(commands=["shrink", "shrink", "shrink", "shrink"], delay="1x2"),
"shuffle_up": Spec(commands=["shuffle_up"]),
"shuffle_down": Spec(commands=["shuffle_down"], before=["down"]),
"flip": Spec(commands=["flip"], after=["flip"]),
# "swap": Spec(commands=["swap"]), # requires 2 args: window1 and window2
"swap_left": Spec(commands=["swap_left"], before=["flip"], after=["flip"]),
"swap_right": Spec(commands=["swap_right"], before=["left"]),
"swap_main": Spec(commands=["swap_main"]),
"left": Spec(commands=["left"]),
"right": Spec(commands=["right"], before=["left"]),
},
"ratiotile": {
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=5),
"6-windows": Spec(windows=6),
"7-windows": Spec(windows=7),
"shuffle_down": Spec(
commands=["shuffle_down", "shuffle_down", "shuffle_down"],
windows=5,
delay="1x2",
),
"shuffle_up": Spec(
commands=["shuffle_up", "shuffle_up", "shuffle_up"], windows=5, delay="1x2"
),
# decrease_ratio does not seem to work
# "decrease_ratio": Spec(commands=["decrease_ratio", "decrease_ratio", "decrease_ratio", "decrease_ratio"], windows=5, delay="1x2"),
# increase_ratio does not seem to work
# "increase_ratio": Spec(commands=["increase_ratio", "increase_ratio", "increase_ratio", "increase_ratio"], windows=5, delay="1x2"),
},
"slice": {
# Slice layout freezes the session
# "next": Spec(commands=["next"]),
# "previous": Spec(commands=["previous"]),
},
"stack": {
# There seems to be a confusion between Stack and Columns layouts.
# The Columns layout says: "Extension of the Stack layout"
# and "The screen is split into columns, which can be dynamically added
# or removed", but there are no commands available to add or remove columns.
# Inversely, the Stack layout says: "Unlike the columns layout
# the number of stacks is fixed", yet the two commands
# "cmd_add" and "cmd_delete" allow for a dynamic number of stacks!
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=5),
"toggle_split": Spec(
commands=["toggle_split"],
windows=4,
before=["down", "down"],
after=["toggle_split"],
),
"down": Spec(commands=["down"], windows=4),
"up": Spec(commands=["up"], before=["down"], windows=4),
"shuffle_down": Spec(commands=["shuffle_down"], windows=4),
"shuffle_up": Spec(commands=["shuffle_up"], before=["down"], windows=4),
"add-delete": Spec(
commands=["add", "add", "spawn", "spawn", "spawn", "delete", "delete"]
),
"rotate": Spec(commands=["rotate"]),
"next": Spec(commands=["next"], before=["add", "spawn"], after=["delete"]),
"previous": Spec(
commands=["previous"], before=["add", "spawn"], after=["delete"] | commands=["client_to_next"], before=["add", "spawn"], after=["delete"]
),
"client_to_previous": Spec(
commands=["client_to_previous"], before=["add", "spawn"], after=["delete"]
),
# "client_to_stack": Spec(commands=["client_to_stack"]), # requires 1 argument
},
"tile": {
# Tile: no docstring at all in the code.
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=5),
"shuffle_down": Spec(
commands=["shuffle_down", "shuffle_down", "shuffle_down"], windows=4
),
"shuffle_up": Spec(
commands=["shuffle_up", "shuffle_up", "shuffle_up"], windows=4
),
"increase-decrease-ratio": Spec(
commands=[
"increase_ratio",
"increase_ratio",
"increase_ratio",
"decrease_ratio",
"decrease_ratio",
"decrease_ratio",
],
before=["down"],
delay="1x3",
),
"increase-decrease-nmaster": Spec(
commands=[
"increase_nmaster",
"increase_nmaster",
"increase_nmaster",
"decrease_nmaster",
"decrease_nmaster",
"decrease_nmaster",
],
delay="1x3",
),
},
"treetab": {
# TreeTab info clients lists clients from all groups,
# breaking our "kill windows" method.
# See https://github.com/qtile/qtile/issues/1459
# "1-window": Spec(windows=1),
# "2-windows": Spec(windows=2),
# "3-windows": Spec(windows=3),
# "4-windows": Spec(windows=4),
# "down": Spec(commands=["down"]),
# "up": Spec(commands=["up"]),
# "move_down": Spec(commands=["move_down"]),
# "move_up": Spec(commands=["move_up"]),
# "move_left": Spec(commands=["move_left"]),
# "move_right": Spec(commands=["move_right"]),
# "add_section": Spec(commands=["add_section"]),
# "del_section": Spec(commands=["del_section"]),
# "section_up": Spec(commands=["section_up"]),
# "section_down": Spec(commands=["section_down"]),
# "sort_windows": Spec(commands=["sort_windows"]),
# "expand_branch": Spec(commands=["expand_branch"]),
# "collapse_branch": Spec(commands=["collapse_branch"]),
# "decrease_ratio": Spec(commands=["decrease_ratio"]),
# "increase_ratio": Spec(commands=["increase_ratio"]),
},
"verticaltile": {
"3-windows": Spec(windows=3),
"4-windows": Spec(before=["up", "maximize"], windows=4),
"shuffle_down": Spec(
commands=["shuffle_down", "shuffle_down"], before=["up", "up"]
),
"shuffle_up": Spec(commands=["shuffle_up", "shuffle_up"]),
"shuffle_down-maximize": Spec(
commands=["shuffle_down", "shuffle_down"], before=["up", "maximize", "up"]
),
"shuffle_up-maximize": Spec(
commands=["shuffle_up", "shuffle_up"], before=["up", "maximize", "down"]
),
"maximize": Spec(commands=["maximize"]),
"normalize": Spec(
commands=["normalize"], before=["up", "maximize", "shrink", "shrink"]
),
"grow-shrink": Spec(
commands=["grow", "grow", "shrink", "shrink"],
before=["maximize", "shrink", "shrink"],
after=["normalize"],
delay="1x2",
),
},
"zoomy": {
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"next-or-down": Spec(commands=["next", "next"], windows=4),
"previous-or-up": Spec(commands=["previous", "previous"], windows=4),
},
}
client = Client()
output_dir = Path("docs") / "screenshots" / "layout"
def take(name, layout, spec):
"""Take the specified screenshots and optionally animate them."""
# prepare the layout
try:
client.prepare_layout(layout, spec.windows, spec.before or [])
except Exception:
client.kill_group_windows()
return False, "While preparing layout:\n" + traceback.format_exc()
time.sleep(0.5)
# initialize screenshooter, create output directory
layout_dir = output_dir / layout
layout_dir.mkdir(parents=True, exist_ok=True)
commands = spec.commands or []
screen = Screenshooter(layout_dir / name, spec.geometry, spec.delay)
errors = []
# take initial screenshot (without number if it's the only one)
screen.shoot(numbered=bool(commands))
# take screenshots for each command, animate them at the end
if commands:
for command in commands:
try:
client.run_layout_command(command)
except Exception:
errors.append(
"While running command {}:\n{}".format(
command, traceback.format_exc()
)
)
break
time.sleep(0.05)
screen.shoot()
screen.animate(clear=True)
# cleanup the layout
try:
client.clean_layout(spec.after or [])
except Exception:
errors.append("While cleaning layout:\n" + traceback.format_exc())
if errors:
return False, "\n\n".join(errors)
return True, ""
def get_selection(args):
"""Parse args of the form LAYOUT, LAYOUT:NAME or LAYOUT:NAME1,NAME2."""
if not args:
return [
(layout, sorted(specs[layout].keys())) for layout in sorted(specs.keys())
]
errors = []
selection = []
for arg in args:
if ":" in arg:
layout, names = arg.split(":")
if layout not in specs:
errors.append("There is no spec for layout " + layout)
continue
names = names.split(",")
for name in names:
if name not in specs[layout]:
errors.append("There is no spec for {}:{}".format(layout, name))
selection.append((layout, names))
else:
if arg not in specs:
errors.append("There is no spec for layout " + arg)
continue
selection.append((arg, sorted(specs[arg].keys())))
if errors:
raise LookupError("\n".join(errors))
return selection
def main(args=None):
logging.basicConfig(
filename=env("LOG_PATH", "docs/screenshots/take_all.log"),
format="%(asctime)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
# get selection of specs, exit if they don't exist
try:
selection = get_selection(args)
except LookupError as error:
logging.error("Wrong selection:\n" + str(error))
return 1
# switch to group
original_group = client.current_group()
client.switch_to_group("s")
# take screenshots/animations for each selected spec
ok = True
for layout, names in selection:
for name in names:
success, errors = take(name, layout, specs[layout][name])
if success:
logging.info("Shooting {}:{} - OK!".format(layout, name))
else:
ok = False
logging.error(
"Shooting {}:{} - failed:\n{}".format(layout, name, errors)
)
# switch back to original group
client.switch_to_group(original_group)
return 0 if ok else 1
if __name__ == "__main__":
sys.exit(main(sys.argv[1:])) | ),
"client_to_next": Spec( |
interrupt.rs | use std::collections::HashMap;
use std::fmt::Write;
use cast::u64;
use quote::Tokens;
use svd::Peripheral;
use syn::Ident;
use errors::*;
use util::{self, ToSanitizedUpperCase};
use Target;
/// Generates code for `src/interrupt.rs`
pub fn | (
target: &Target,
peripherals: &[Peripheral],
device_x: &mut String,
) -> Result<Vec<Tokens>> {
let interrupts = peripherals
.iter()
.flat_map(|p| p.interrupt.iter())
.map(|i| (i.value, i))
.collect::<HashMap<_, _>>();
let mut interrupts = interrupts.into_iter().map(|(_, v)| v).collect::<Vec<_>>();
interrupts.sort_by_key(|i| i.value);
let mut root = vec![];
let mut arms = vec![];
let mut from_arms = vec![];
let mut elements = vec![];
let mut names = vec![];
let mut variants = vec![];
// Current position in the vector table
let mut pos = 0;
let mut mod_items = vec![];
for interrupt in &interrupts {
while pos < interrupt.value {
elements.push(quote!(Vector { _reserved: 0 }));
pos += 1;
}
pos += 1;
let name_uc = Ident::new(interrupt.name.to_sanitized_upper_case());
let description = format!(
"{} - {}",
interrupt.value,
interrupt
.description
.as_ref()
.map(|s| util::respace(s))
.as_ref()
.map(|s| util::escape_brackets(s))
.unwrap_or_else(|| interrupt.name.clone())
);
let value = util::unsuffixed(u64(interrupt.value));
variants.push(quote! {
#[doc = #description]
#name_uc,
});
arms.push(quote! {
Interrupt::#name_uc => #value,
});
from_arms.push(quote! {
#value => Ok(Interrupt::#name_uc),
});
elements.push(quote!(Vector { _handler: #name_uc }));
names.push(name_uc);
}
let n = util::unsuffixed(u64(pos));
match *target {
Target::CortexM => {
for name in &names {
writeln!(device_x, "PROVIDE({} = DefaultHandler);" ,name).unwrap();
}
root.push(quote! {
#[cfg(feature = "rt")]
extern "C" {
#(fn #names();)*
}
#[doc(hidden)]
pub union Vector {
_handler: unsafe extern "C" fn(),
_reserved: u32,
}
#[cfg(feature = "rt")]
#[doc(hidden)]
#[link_section = ".vector_table.interrupts"]
#[no_mangle]
pub static __INTERRUPTS: [Vector; #n] = [
#(#elements,)*
];
});
}
Target::Msp430 => {
let aliases = names
.iter()
.map(|n| {
format!(
"
.weak {0}
{0} = DH_TRAMPOLINE",
n
)
})
.collect::<Vec<_>>()
.concat();
mod_items.push(quote! {
#[cfg(feature = "rt")]
global_asm!("
DH_TRAMPOLINE:
br #DEFAULT_HANDLER
");
#[cfg(feature = "rt")]
global_asm!(#aliases);
#[cfg(feature = "rt")]
extern "msp430-interrupt" {
#(fn #names();)*
}
#[doc(hidden)]
pub union Vector {
_handler: unsafe extern "msp430-interrupt" fn(),
_reserved: u32,
}
#[allow(renamed_and_removed_lints)]
// This currently breaks on nightly, to be removed with the line above once 1.31 is stable
#[allow(private_no_mangle_statics)]
#[cfg(feature = "rt")]
#[doc(hidden)]
#[link_section = ".vector_table.interrupts"]
#[no_mangle]
#[used]
pub static INTERRUPTS:
[Vector; #n] = [
#(#elements,)*
];
});
}
Target::RISCV => {}
Target::None => {}
}
let interrupt_enum = quote! {
/// Enumeration of all the interrupts
pub enum Interrupt {
#(#variants)*
}
unsafe impl ::bare_metal::Nr for Interrupt {
#[inline]
fn nr(&self) -> u8 {
match *self {
#(#arms)*
}
}
}
};
if *target == Target::CortexM {
root.push(interrupt_enum);
} else {
mod_items.push(quote! {
#interrupt_enum
#[derive(Debug, Copy, Clone)]
pub struct TryFromInterruptError(());
impl Interrupt {
#[inline]
pub fn try_from(value: u8) -> Result<Self, TryFromInterruptError> {
match value {
#(#from_arms)*
_ => Err(TryFromInterruptError(())),
}
}
}
});
}
if *target != Target::None {
let abi = match *target {
Target::Msp430 => "msp430-interrupt",
_ => "C",
};
if *target != Target::CortexM {
mod_items.push(quote! {
#[cfg(feature = "rt")]
#[macro_export]
macro_rules! interrupt {
($NAME:ident, $path:path, locals: {
$($lvar:ident:$lty:ty = $lval:expr;)*
}) => {
#[allow(non_snake_case)]
mod $NAME {
pub struct Locals {
$(
pub $lvar: $lty,
)*
}
}
#[allow(non_snake_case)]
#[no_mangle]
pub extern #abi fn $NAME() {
// check that the handler exists
let _ = $crate::interrupt::Interrupt::$NAME;
static mut LOCALS: self::$NAME::Locals =
self::$NAME::Locals {
$(
$lvar: $lval,
)*
};
// type checking
let f: fn(&mut self::$NAME::Locals) = $path;
f(unsafe { &mut LOCALS });
}
};
($NAME:ident, $path:path) => {
#[allow(non_snake_case)]
#[no_mangle]
pub extern #abi fn $NAME() {
// check that the handler exists
let _ = $crate::interrupt::Interrupt::$NAME;
// type checking
let f: fn() = $path;
f();
}
}
}
});
}
}
if !interrupts.is_empty() {
if *target != Target::CortexM {
root.push(quote! {
#[doc(hidden)]
pub mod interrupt {
#(#mod_items)*
}
});
root.push(quote! {
pub use self::interrupt::Interrupt;
});
}
}
Ok(root)
}
| render |
test_trigonometric.py | from sympy import (symbols, Symbol, nan, oo, zoo, I, sinh, sin, pi, atan,
acos, Rational, sqrt, asin, acot, coth, E, S, tan, tanh, cos,
cosh, atan2, exp, log, asinh, acoth, atanh, O, cancel, Matrix, re, im,
Float, Pow, gcd, sec, csc, cot, diff, simplify, Heaviside, arg,
conjugate, series, FiniteSet, asec, acsc, Mul, sinc, jn,
AccumBounds, Interval, ImageSet, Lambda, besselj)
from sympy.core.compatibility import range
from sympy.core.expr import unchanged
from sympy.core.function import ArgumentIndexError
from sympy.core.relational import Ne, Eq
from sympy.functions.elementary.piecewise import Piecewise
from sympy.sets.setexpr import SetExpr
from sympy.utilities.pytest import XFAIL, slow, raises
x, y, z = symbols('x y z')
r = Symbol('r', real=True)
k = Symbol('k', integer=True)
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
np = Symbol('p', nonpositive=True)
nn = Symbol('n', nonnegative=True)
nz = Symbol('nz', nonzero=True)
ep = Symbol('ep', extended_positive=True)
en = Symbol('en', extended_negative=True)
enp = Symbol('ep', extended_nonpositive=True)
enn = Symbol('en', extended_nonnegative=True)
enz = Symbol('enz', extended_nonzero=True)
a = Symbol('a', algebraic=True)
na = Symbol('na', nonzero=True, algebraic=True)
def test_sin():
x, y = symbols('x y')
assert sin.nargs == FiniteSet(1)
assert sin(nan) is nan
assert sin(zoo) is nan
assert sin(oo) == AccumBounds(-1, 1)
assert sin(oo) - sin(oo) == AccumBounds(-2, 2)
assert sin(oo*I) == oo*I
assert sin(-oo*I) == -oo*I
assert 0*sin(oo) is S.Zero
assert 0/sin(oo) is S.Zero
assert 0 + sin(oo) == AccumBounds(-1, 1)
assert 5 + sin(oo) == AccumBounds(4, 6)
assert sin(0) == 0
assert sin(asin(x)) == x
assert sin(atan(x)) == x / sqrt(1 + x**2)
assert sin(acos(x)) == sqrt(1 - x**2)
assert sin(acot(x)) == 1 / (sqrt(1 + 1 / x**2) * x)
assert sin(acsc(x)) == 1 / x
assert sin(asec(x)) == sqrt(1 - 1 / x**2)
assert sin(atan2(y, x)) == y / sqrt(x**2 + y**2)
assert sin(pi*I) == sinh(pi)*I
assert sin(-pi*I) == -sinh(pi)*I
assert sin(-2*I) == -sinh(2)*I
assert sin(pi) == 0
assert sin(-pi) == 0
assert sin(2*pi) == 0
assert sin(-2*pi) == 0
assert sin(-3*10**73*pi) == 0
assert sin(7*10**103*pi) == 0
assert sin(pi/2) == 1
assert sin(-pi/2) == -1
assert sin(pi*Rational(5, 2)) == 1
assert sin(pi*Rational(7, 2)) == -1
ne = symbols('ne', integer=True, even=False)
e = symbols('e', even=True)
assert sin(pi*ne/2) == (-1)**(ne/2 - S.Half)
assert sin(pi*k/2).func == sin
assert sin(pi*e/2) == 0
assert sin(pi*k) == 0
assert sin(pi*k).subs(k, 3) == sin(pi*k/2).subs(k, 6) # issue 8298
assert sin(pi/3) == S.Half*sqrt(3)
assert sin(pi*Rational(-2, 3)) == Rational(-1, 2)*sqrt(3)
assert sin(pi/4) == S.Half*sqrt(2)
assert sin(-pi/4) == Rational(-1, 2)*sqrt(2)
assert sin(pi*Rational(17, 4)) == S.Half*sqrt(2)
assert sin(pi*Rational(-3, 4)) == Rational(-1, 2)*sqrt(2)
assert sin(pi/6) == S.Half
assert sin(-pi/6) == Rational(-1, 2)
assert sin(pi*Rational(7, 6)) == Rational(-1, 2)
assert sin(pi*Rational(-5, 6)) == Rational(-1, 2)
assert sin(pi*Rational(1, 5)) == sqrt((5 - sqrt(5)) / 8)
assert sin(pi*Rational(2, 5)) == sqrt((5 + sqrt(5)) / 8)
assert sin(pi*Rational(3, 5)) == sin(pi*Rational(2, 5))
assert sin(pi*Rational(4, 5)) == sin(pi*Rational(1, 5))
assert sin(pi*Rational(6, 5)) == -sin(pi*Rational(1, 5))
assert sin(pi*Rational(8, 5)) == -sin(pi*Rational(2, 5))
assert sin(pi*Rational(-1273, 5)) == -sin(pi*Rational(2, 5))
assert sin(pi/8) == sqrt((2 - sqrt(2))/4)
assert sin(pi/10) == Rational(-1, 4) + sqrt(5)/4
assert sin(pi/12) == -sqrt(2)/4 + sqrt(6)/4
assert sin(pi*Rational(5, 12)) == sqrt(2)/4 + sqrt(6)/4
assert sin(pi*Rational(-7, 12)) == -sqrt(2)/4 - sqrt(6)/4
assert sin(pi*Rational(-11, 12)) == sqrt(2)/4 - sqrt(6)/4
assert sin(pi*Rational(104, 105)) == sin(pi/105)
assert sin(pi*Rational(106, 105)) == -sin(pi/105)
assert sin(pi*Rational(-104, 105)) == -sin(pi/105)
assert sin(pi*Rational(-106, 105)) == sin(pi/105)
assert sin(x*I) == sinh(x)*I
assert sin(k*pi) == 0
assert sin(17*k*pi) == 0
assert sin(k*pi*I) == sinh(k*pi)*I
assert sin(r).is_real is True
assert sin(0, evaluate=False).is_algebraic
assert sin(a).is_algebraic is None
assert sin(na).is_algebraic is False
q = Symbol('q', rational=True)
assert sin(pi*q).is_algebraic
qn = Symbol('qn', rational=True, nonzero=True)
assert sin(qn).is_rational is False
assert sin(q).is_rational is None # issue 8653
assert isinstance(sin( re(x) - im(y)), sin) is True
assert isinstance(sin(-re(x) + im(y)), sin) is False
assert sin(SetExpr(Interval(0, 1))) == SetExpr(ImageSet(Lambda(x, sin(x)),
Interval(0, 1)))
for d in list(range(1, 22)) + [60, 85]:
for n in range(0, d*2 + 1):
x = n*pi/d
e = abs( float(sin(x)) - sin(float(x)) )
assert e < 1e-12
def test_sin_cos():
for d in [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 24, 30, 40, 60, 120]: # list is not exhaustive...
for n in range(-2*d, d*2):
x = n*pi/d
assert sin(x + pi/2) == cos(x), "fails for %d*pi/%d" % (n, d)
assert sin(x - pi/2) == -cos(x), "fails for %d*pi/%d" % (n, d)
assert sin(x) == cos(x - pi/2), "fails for %d*pi/%d" % (n, d)
assert -sin(x) == cos(x + pi/2), "fails for %d*pi/%d" % (n, d)
def test_sin_series():
assert sin(x).series(x, 0, 9) == \
x - x**3/6 + x**5/120 - x**7/5040 + O(x**9)
def test_sin_rewrite():
assert sin(x).rewrite(exp) == -I*(exp(I*x) - exp(-I*x))/2
assert sin(x).rewrite(tan) == 2*tan(x/2)/(1 + tan(x/2)**2)
assert sin(x).rewrite(cot) == 2*cot(x/2)/(1 + cot(x/2)**2)
assert sin(sinh(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, sinh(3)).n()
assert sin(cosh(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, cosh(3)).n()
assert sin(tanh(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, tanh(3)).n()
assert sin(coth(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, coth(3)).n()
assert sin(sin(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, sin(3)).n()
assert sin(cos(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, cos(3)).n()
assert sin(tan(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, tan(3)).n()
assert sin(cot(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, cot(3)).n()
assert sin(log(x)).rewrite(Pow) == I*x**-I / 2 - I*x**I /2
assert sin(x).rewrite(csc) == 1/csc(x)
assert sin(x).rewrite(cos) == cos(x - pi / 2, evaluate=False)
assert sin(x).rewrite(sec) == 1 / sec(x - pi / 2, evaluate=False)
assert sin(cos(x)).rewrite(Pow) == sin(cos(x))
def test_sin_expansion():
# Note: these formulas are not unique. The ones here come from the
# Chebyshev formulas.
assert sin(x + y).expand(trig=True) == sin(x)*cos(y) + cos(x)*sin(y)
assert sin(x - y).expand(trig=True) == sin(x)*cos(y) - cos(x)*sin(y)
assert sin(y - x).expand(trig=True) == cos(x)*sin(y) - sin(x)*cos(y)
assert sin(2*x).expand(trig=True) == 2*sin(x)*cos(x)
assert sin(3*x).expand(trig=True) == -4*sin(x)**3 + 3*sin(x)
assert sin(4*x).expand(trig=True) == -8*sin(x)**3*cos(x) + 4*sin(x)*cos(x)
assert sin(2).expand(trig=True) == 2*sin(1)*cos(1)
assert sin(3).expand(trig=True) == -4*sin(1)**3 + 3*sin(1)
def test_sin_AccumBounds():
assert sin(AccumBounds(-oo, oo)) == AccumBounds(-1, 1)
assert sin(AccumBounds(0, oo)) == AccumBounds(-1, 1)
assert sin(AccumBounds(-oo, 0)) == AccumBounds(-1, 1)
assert sin(AccumBounds(0, 2*S.Pi)) == AccumBounds(-1, 1)
assert sin(AccumBounds(0, S.Pi*Rational(3, 4))) == AccumBounds(0, 1)
assert sin(AccumBounds(S.Pi*Rational(3, 4), S.Pi*Rational(7, 4))) == AccumBounds(-1, sin(S.Pi*Rational(3, 4)))
assert sin(AccumBounds(S.Pi/4, S.Pi/3)) == AccumBounds(sin(S.Pi/4), sin(S.Pi/3))
assert sin(AccumBounds(S.Pi*Rational(3, 4), S.Pi*Rational(5, 6))) == AccumBounds(sin(S.Pi*Rational(5, 6)), sin(S.Pi*Rational(3, 4)))
def test_sin_fdiff():
assert sin(x).fdiff() == cos(x)
raises(ArgumentIndexError, lambda: sin(x).fdiff(2))
def test_trig_symmetry():
assert sin(-x) == -sin(x)
assert cos(-x) == cos(x)
assert tan(-x) == -tan(x)
assert cot(-x) == -cot(x)
assert sin(x + pi) == -sin(x)
assert sin(x + 2*pi) == sin(x)
assert sin(x + 3*pi) == -sin(x)
assert sin(x + 4*pi) == sin(x)
assert sin(x - 5*pi) == -sin(x)
assert cos(x + pi) == -cos(x)
assert cos(x + 2*pi) == cos(x)
assert cos(x + 3*pi) == -cos(x)
assert cos(x + 4*pi) == cos(x)
assert cos(x - 5*pi) == -cos(x)
assert tan(x + pi) == tan(x)
assert tan(x - 3*pi) == tan(x)
assert cot(x + pi) == cot(x)
assert cot(x - 3*pi) == cot(x)
assert sin(pi/2 - x) == cos(x)
assert sin(pi*Rational(3, 2) - x) == -cos(x)
assert sin(pi*Rational(5, 2) - x) == cos(x)
assert cos(pi/2 - x) == sin(x)
assert cos(pi*Rational(3, 2) - x) == -sin(x)
assert cos(pi*Rational(5, 2) - x) == sin(x)
assert tan(pi/2 - x) == cot(x)
assert tan(pi*Rational(3, 2) - x) == cot(x)
assert tan(pi*Rational(5, 2) - x) == cot(x)
assert cot(pi/2 - x) == tan(x)
assert cot(pi*Rational(3, 2) - x) == tan(x)
assert cot(pi*Rational(5, 2) - x) == tan(x)
assert sin(pi/2 + x) == cos(x)
assert cos(pi/2 + x) == -sin(x)
assert tan(pi/2 + x) == -cot(x)
assert cot(pi/2 + x) == -tan(x)
def test_cos():
x, y = symbols('x y')
assert cos.nargs == FiniteSet(1)
assert cos(nan) is nan
assert cos(oo) == AccumBounds(-1, 1)
assert cos(oo) - cos(oo) == AccumBounds(-2, 2)
assert cos(oo*I) is oo
assert cos(-oo*I) is oo
assert cos(zoo) is nan
assert cos(0) == 1
assert cos(acos(x)) == x
assert cos(atan(x)) == 1 / sqrt(1 + x**2)
assert cos(asin(x)) == sqrt(1 - x**2)
assert cos(acot(x)) == 1 / sqrt(1 + 1 / x**2)
assert cos(acsc(x)) == sqrt(1 - 1 / x**2)
assert cos(asec(x)) == 1 / x
assert cos(atan2(y, x)) == x / sqrt(x**2 + y**2)
assert cos(pi*I) == cosh(pi)
assert cos(-pi*I) == cosh(pi)
assert cos(-2*I) == cosh(2)
assert cos(pi/2) == 0
assert cos(-pi/2) == 0
assert cos(pi/2) == 0
assert cos(-pi/2) == 0
assert cos((-3*10**73 + 1)*pi/2) == 0
assert cos((7*10**103 + 1)*pi/2) == 0
n = symbols('n', integer=True, even=False)
e = symbols('e', even=True)
assert cos(pi*n/2) == 0
assert cos(pi*e/2) == (-1)**(e/2)
assert cos(pi) == -1
assert cos(-pi) == -1
assert cos(2*pi) == 1
assert cos(5*pi) == -1
assert cos(8*pi) == 1
assert cos(pi/3) == S.Half
assert cos(pi*Rational(-2, 3)) == Rational(-1, 2)
assert cos(pi/4) == S.Half*sqrt(2)
assert cos(-pi/4) == S.Half*sqrt(2)
assert cos(pi*Rational(11, 4)) == Rational(-1, 2)*sqrt(2)
assert cos(pi*Rational(-3, 4)) == Rational(-1, 2)*sqrt(2)
assert cos(pi/6) == S.Half*sqrt(3)
assert cos(-pi/6) == S.Half*sqrt(3)
assert cos(pi*Rational(7, 6)) == Rational(-1, 2)*sqrt(3)
assert cos(pi*Rational(-5, 6)) == Rational(-1, 2)*sqrt(3)
assert cos(pi*Rational(1, 5)) == (sqrt(5) + 1)/4
assert cos(pi*Rational(2, 5)) == (sqrt(5) - 1)/4
assert cos(pi*Rational(3, 5)) == -cos(pi*Rational(2, 5))
assert cos(pi*Rational(4, 5)) == -cos(pi*Rational(1, 5))
assert cos(pi*Rational(6, 5)) == -cos(pi*Rational(1, 5))
assert cos(pi*Rational(8, 5)) == cos(pi*Rational(2, 5))
assert cos(pi*Rational(-1273, 5)) == -cos(pi*Rational(2, 5))
assert cos(pi/8) == sqrt((2 + sqrt(2))/4)
assert cos(pi/12) == sqrt(2)/4 + sqrt(6)/4
assert cos(pi*Rational(5, 12)) == -sqrt(2)/4 + sqrt(6)/4
assert cos(pi*Rational(7, 12)) == sqrt(2)/4 - sqrt(6)/4
assert cos(pi*Rational(11, 12)) == -sqrt(2)/4 - sqrt(6)/4
assert cos(pi*Rational(104, 105)) == -cos(pi/105)
assert cos(pi*Rational(106, 105)) == -cos(pi/105)
assert cos(pi*Rational(-104, 105)) == -cos(pi/105)
assert cos(pi*Rational(-106, 105)) == -cos(pi/105)
assert cos(x*I) == cosh(x)
assert cos(k*pi*I) == cosh(k*pi)
assert cos(r).is_real is True
assert cos(0, evaluate=False).is_algebraic
assert cos(a).is_algebraic is None
assert cos(na).is_algebraic is False
q = Symbol('q', rational=True)
assert cos(pi*q).is_algebraic
assert cos(pi*Rational(2, 7)).is_algebraic
assert cos(k*pi) == (-1)**k
assert cos(2*k*pi) == 1
for d in list(range(1, 22)) + [60, 85]:
for n in range(0, 2*d + 1):
x = n*pi/d
e = abs( float(cos(x)) - cos(float(x)) )
assert e < 1e-12
def test_issue_6190():
c = Float('123456789012345678901234567890.25', '')
for cls in [sin, cos, tan, cot]:
assert cls(c*pi) == cls(pi/4)
assert cls(4.125*pi) == cls(pi/8)
assert cls(4.7*pi) == cls((4.7 % 2)*pi)
def test_cos_series():
assert cos(x).series(x, 0, 9) == \
1 - x**2/2 + x**4/24 - x**6/720 + x**8/40320 + O(x**9)
def test_cos_rewrite():
assert cos(x).rewrite(exp) == exp(I*x)/2 + exp(-I*x)/2
assert cos(x).rewrite(tan) == (1 - tan(x/2)**2)/(1 + tan(x/2)**2)
assert cos(x).rewrite(cot) == -(1 - cot(x/2)**2)/(1 + cot(x/2)**2)
assert cos(sinh(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, sinh(3)).n()
assert cos(cosh(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, cosh(3)).n()
assert cos(tanh(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, tanh(3)).n()
assert cos(coth(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, coth(3)).n()
assert cos(sin(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, sin(3)).n()
assert cos(cos(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, cos(3)).n()
assert cos(tan(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, tan(3)).n()
assert cos(cot(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, cot(3)).n()
assert cos(log(x)).rewrite(Pow) == x**I/2 + x**-I/2
assert cos(x).rewrite(sec) == 1/sec(x)
assert cos(x).rewrite(sin) == sin(x + pi/2, evaluate=False)
assert cos(x).rewrite(csc) == 1/csc(-x + pi/2, evaluate=False)
assert cos(sin(x)).rewrite(Pow) == cos(sin(x))
def test_cos_expansion():
assert cos(x + y).expand(trig=True) == cos(x)*cos(y) - sin(x)*sin(y)
assert cos(x - y).expand(trig=True) == cos(x)*cos(y) + sin(x)*sin(y)
assert cos(y - x).expand(trig=True) == cos(x)*cos(y) + sin(x)*sin(y)
assert cos(2*x).expand(trig=True) == 2*cos(x)**2 - 1
assert cos(3*x).expand(trig=True) == 4*cos(x)**3 - 3*cos(x)
assert cos(4*x).expand(trig=True) == 8*cos(x)**4 - 8*cos(x)**2 + 1
assert cos(2).expand(trig=True) == 2*cos(1)**2 - 1
assert cos(3).expand(trig=True) == 4*cos(1)**3 - 3*cos(1)
def test_cos_AccumBounds():
assert cos(AccumBounds(-oo, oo)) == AccumBounds(-1, 1)
assert cos(AccumBounds(0, oo)) == AccumBounds(-1, 1)
assert cos(AccumBounds(-oo, 0)) == AccumBounds(-1, 1)
assert cos(AccumBounds(0, 2*S.Pi)) == AccumBounds(-1, 1)
assert cos(AccumBounds(-S.Pi/3, S.Pi/4)) == AccumBounds(cos(-S.Pi/3), 1)
assert cos(AccumBounds(S.Pi*Rational(3, 4), S.Pi*Rational(5, 4))) == AccumBounds(-1, cos(S.Pi*Rational(3, 4)))
assert cos(AccumBounds(S.Pi*Rational(5, 4), S.Pi*Rational(4, 3))) == AccumBounds(cos(S.Pi*Rational(5, 4)), cos(S.Pi*Rational(4, 3)))
assert cos(AccumBounds(S.Pi/4, S.Pi/3)) == AccumBounds(cos(S.Pi/3), cos(S.Pi/4))
def test_cos_fdiff():
assert cos(x).fdiff() == -sin(x)
raises(ArgumentIndexError, lambda: cos(x).fdiff(2))
def test_tan():
assert tan(nan) is nan
assert tan(zoo) is nan
assert tan(oo) == AccumBounds(-oo, oo)
assert tan(oo) - tan(oo) == AccumBounds(-oo, oo)
assert tan.nargs == FiniteSet(1)
assert tan(oo*I) == I
assert tan(-oo*I) == -I
assert tan(0) == 0
assert tan(atan(x)) == x
assert tan(asin(x)) == x / sqrt(1 - x**2)
assert tan(acos(x)) == sqrt(1 - x**2) / x
assert tan(acot(x)) == 1 / x
assert tan(acsc(x)) == 1 / (sqrt(1 - 1 / x**2) * x)
assert tan(asec(x)) == sqrt(1 - 1 / x**2) * x
assert tan(atan2(y, x)) == y/x
assert tan(pi*I) == tanh(pi)*I
assert tan(-pi*I) == -tanh(pi)*I
assert tan(-2*I) == -tanh(2)*I
assert tan(pi) == 0
assert tan(-pi) == 0
assert tan(2*pi) == 0
assert tan(-2*pi) == 0
assert tan(-3*10**73*pi) == 0
assert tan(pi/2) is zoo
assert tan(pi*Rational(3, 2)) is zoo
assert tan(pi/3) == sqrt(3)
assert tan(pi*Rational(-2, 3)) == sqrt(3)
assert tan(pi/4) is S.One
assert tan(-pi/4) is S.NegativeOne
assert tan(pi*Rational(17, 4)) is S.One
assert tan(pi*Rational(-3, 4)) is S.One
assert tan(pi/5) == sqrt(5 - 2*sqrt(5))
assert tan(pi*Rational(2, 5)) == sqrt(5 + 2*sqrt(5))
assert tan(pi*Rational(18, 5)) == -sqrt(5 + 2*sqrt(5))
assert tan(pi*Rational(-16, 5)) == -sqrt(5 - 2*sqrt(5))
assert tan(pi/6) == 1/sqrt(3)
assert tan(-pi/6) == -1/sqrt(3)
assert tan(pi*Rational(7, 6)) == 1/sqrt(3)
assert tan(pi*Rational(-5, 6)) == 1/sqrt(3)
assert tan(pi/8) == -1 + sqrt(2)
assert tan(pi*Rational(3, 8)) == 1 + sqrt(2) # issue 15959
assert tan(pi*Rational(5, 8)) == -1 - sqrt(2)
assert tan(pi*Rational(7, 8)) == 1 - sqrt(2)
assert tan(pi/10) == sqrt(1 - 2*sqrt(5)/5)
assert tan(pi*Rational(3, 10)) == sqrt(1 + 2*sqrt(5)/5)
assert tan(pi*Rational(17, 10)) == -sqrt(1 + 2*sqrt(5)/5)
assert tan(pi*Rational(-31, 10)) == -sqrt(1 - 2*sqrt(5)/5)
assert tan(pi/12) == -sqrt(3) + 2
assert tan(pi*Rational(5, 12)) == sqrt(3) + 2
assert tan(pi*Rational(7, 12)) == -sqrt(3) - 2
assert tan(pi*Rational(11, 12)) == sqrt(3) - 2
assert tan(pi/24).radsimp() == -2 - sqrt(3) + sqrt(2) + sqrt(6)
assert tan(pi*Rational(5, 24)).radsimp() == -2 + sqrt(3) - sqrt(2) + sqrt(6)
assert tan(pi*Rational(7, 24)).radsimp() == 2 - sqrt(3) - sqrt(2) + sqrt(6)
assert tan(pi*Rational(11, 24)).radsimp() == 2 + sqrt(3) + sqrt(2) + sqrt(6)
assert tan(pi*Rational(13, 24)).radsimp() == -2 - sqrt(3) - sqrt(2) - sqrt(6)
assert tan(pi*Rational(17, 24)).radsimp() == -2 + sqrt(3) + sqrt(2) - sqrt(6)
assert tan(pi*Rational(19, 24)).radsimp() == 2 - sqrt(3) + sqrt(2) - sqrt(6)
assert tan(pi*Rational(23, 24)).radsimp() == 2 + sqrt(3) - sqrt(2) - sqrt(6)
assert tan(x*I) == tanh(x)*I
assert tan(k*pi) == 0
assert tan(17*k*pi) == 0
assert tan(k*pi*I) == tanh(k*pi)*I
assert tan(r).is_real is None
assert tan(r).is_extended_real is True
assert tan(0, evaluate=False).is_algebraic
assert tan(a).is_algebraic is None
assert tan(na).is_algebraic is False
assert tan(pi*Rational(10, 7)) == tan(pi*Rational(3, 7))
assert tan(pi*Rational(11, 7)) == -tan(pi*Rational(3, 7))
assert tan(pi*Rational(-11, 7)) == tan(pi*Rational(3, 7))
assert tan(pi*Rational(15, 14)) == tan(pi/14)
assert tan(pi*Rational(-15, 14)) == -tan(pi/14)
assert tan(r).is_finite is None
assert tan(I*r).is_finite is True
def test_tan_series():
assert tan(x).series(x, 0, 9) == \
x + x**3/3 + 2*x**5/15 + 17*x**7/315 + O(x**9)
def test_tan_rewrite():
neg_exp, pos_exp = exp(-x*I), exp(x*I)
assert tan(x).rewrite(exp) == I*(neg_exp - pos_exp)/(neg_exp + pos_exp)
assert tan(x).rewrite(sin) == 2*sin(x)**2/sin(2*x)
assert tan(x).rewrite(cos) == cos(x - S.Pi/2, evaluate=False)/cos(x)
assert tan(x).rewrite(cot) == 1/cot(x)
assert tan(sinh(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, sinh(3)).n()
assert tan(cosh(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, cosh(3)).n()
assert tan(tanh(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, tanh(3)).n()
assert tan(coth(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, coth(3)).n()
assert tan(sin(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, sin(3)).n()
assert tan(cos(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, cos(3)).n()
assert tan(tan(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, tan(3)).n()
assert tan(cot(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, cot(3)).n()
assert tan(log(x)).rewrite(Pow) == I*(x**-I - x**I)/(x**-I + x**I)
assert 0 == (cos(pi/34)*tan(pi/34) - sin(pi/34)).rewrite(pow)
assert 0 == (cos(pi/17)*tan(pi/17) - sin(pi/17)).rewrite(pow)
assert tan(pi/19).rewrite(pow) == tan(pi/19)
assert tan(pi*Rational(8, 19)).rewrite(sqrt) == tan(pi*Rational(8, 19))
assert tan(x).rewrite(sec) == sec(x)/sec(x - pi/2, evaluate=False)
assert tan(x).rewrite(csc) == csc(-x + pi/2, evaluate=False)/csc(x)
assert tan(sin(x)).rewrite(Pow) == tan(sin(x))
assert tan(pi*Rational(2, 5), evaluate=False).rewrite(sqrt) == sqrt(sqrt(5)/8 +
Rational(5, 8))/(Rational(-1, 4) + sqrt(5)/4)
def test_tan_subs():
assert tan(x).subs(tan(x), y) == y
assert tan(x).subs(x, y) == tan(y)
assert tan(x).subs(x, S.Pi/2) is zoo
assert tan(x).subs(x, S.Pi*Rational(3, 2)) is zoo
def test_tan_expansion():
assert tan(x + y).expand(trig=True) == ((tan(x) + tan(y))/(1 - tan(x)*tan(y))).expand()
assert tan(x - y).expand(trig=True) == ((tan(x) - tan(y))/(1 + tan(x)*tan(y))).expand()
assert tan(x + y + z).expand(trig=True) == (
(tan(x) + tan(y) + tan(z) - tan(x)*tan(y)*tan(z))/
(1 - tan(x)*tan(y) - tan(x)*tan(z) - tan(y)*tan(z))).expand()
assert 0 == tan(2*x).expand(trig=True).rewrite(tan).subs([(tan(x), Rational(1, 7))])*24 - 7
assert 0 == tan(3*x).expand(trig=True).rewrite(tan).subs([(tan(x), Rational(1, 5))])*55 - 37
assert 0 == tan(4*x - pi/4).expand(trig=True).rewrite(tan).subs([(tan(x), Rational(1, 5))])*239 - 1
def test_tan_AccumBounds():
assert tan(AccumBounds(-oo, oo)) == AccumBounds(-oo, oo)
assert tan(AccumBounds(S.Pi/3, S.Pi*Rational(2, 3))) == AccumBounds(-oo, oo)
assert tan(AccumBounds(S.Pi/6, S.Pi/3)) == AccumBounds(tan(S.Pi/6), tan(S.Pi/3))
def test_tan_fdiff():
assert tan(x).fdiff() == tan(x)**2 + 1
raises(ArgumentIndexError, lambda: tan(x).fdiff(2))
def test_cot():
assert cot(nan) is nan
assert cot.nargs == FiniteSet(1)
assert cot(oo*I) == -I
assert cot(-oo*I) == I
assert cot(zoo) is nan
assert cot(0) is zoo
assert cot(2*pi) is zoo
assert cot(acot(x)) == x
assert cot(atan(x)) == 1 / x
assert cot(asin(x)) == sqrt(1 - x**2) / x
assert cot(acos(x)) == x / sqrt(1 - x**2)
assert cot(acsc(x)) == sqrt(1 - 1 / x**2) * x
assert cot(asec(x)) == 1 / (sqrt(1 - 1 / x**2) * x)
assert cot(atan2(y, x)) == x/y
assert cot(pi*I) == -coth(pi)*I
assert cot(-pi*I) == coth(pi)*I
assert cot(-2*I) == coth(2)*I
assert cot(pi) == cot(2*pi) == cot(3*pi)
assert cot(-pi) == cot(-2*pi) == cot(-3*pi)
assert cot(pi/2) == 0
assert cot(-pi/2) == 0
assert cot(pi*Rational(5, 2)) == 0
assert cot(pi*Rational(7, 2)) == 0
assert cot(pi/3) == 1/sqrt(3)
assert cot(pi*Rational(-2, 3)) == 1/sqrt(3)
assert cot(pi/4) is S.One
assert cot(-pi/4) is S.NegativeOne
assert cot(pi*Rational(17, 4)) is S.One
assert cot(pi*Rational(-3, 4)) is S.One
assert cot(pi/6) == sqrt(3)
assert cot(-pi/6) == -sqrt(3)
assert cot(pi*Rational(7, 6)) == sqrt(3)
assert cot(pi*Rational(-5, 6)) == sqrt(3)
assert cot(pi/8) == 1 + sqrt(2)
assert cot(pi*Rational(3, 8)) == -1 + sqrt(2)
assert cot(pi*Rational(5, 8)) == 1 - sqrt(2)
assert cot(pi*Rational(7, 8)) == -1 - sqrt(2)
assert cot(pi/12) == sqrt(3) + 2
assert cot(pi*Rational(5, 12)) == -sqrt(3) + 2
assert cot(pi*Rational(7, 12)) == sqrt(3) - 2
assert cot(pi*Rational(11, 12)) == -sqrt(3) - 2
assert cot(pi/24).radsimp() == sqrt(2) + sqrt(3) + 2 + sqrt(6)
assert cot(pi*Rational(5, 24)).radsimp() == -sqrt(2) - sqrt(3) + 2 + sqrt(6)
assert cot(pi*Rational(7, 24)).radsimp() == -sqrt(2) + sqrt(3) - 2 + sqrt(6)
assert cot(pi*Rational(11, 24)).radsimp() == sqrt(2) - sqrt(3) - 2 + sqrt(6)
assert cot(pi*Rational(13, 24)).radsimp() == -sqrt(2) + sqrt(3) + 2 - sqrt(6)
assert cot(pi*Rational(17, 24)).radsimp() == sqrt(2) - sqrt(3) + 2 - sqrt(6)
assert cot(pi*Rational(19, 24)).radsimp() == sqrt(2) + sqrt(3) - 2 - sqrt(6)
assert cot(pi*Rational(23, 24)).radsimp() == -sqrt(2) - sqrt(3) - 2 - sqrt(6)
assert cot(x*I) == -coth(x)*I
assert cot(k*pi*I) == -coth(k*pi)*I
assert cot(r).is_real is None
assert cot(r).is_extended_real is True
assert cot(a).is_algebraic is None
assert cot(na).is_algebraic is False
assert cot(pi*Rational(10, 7)) == cot(pi*Rational(3, 7))
assert cot(pi*Rational(11, 7)) == -cot(pi*Rational(3, 7))
assert cot(pi*Rational(-11, 7)) == cot(pi*Rational(3, 7))
assert cot(pi*Rational(39, 34)) == cot(pi*Rational(5, 34))
assert cot(pi*Rational(-41, 34)) == -cot(pi*Rational(7, 34))
assert cot(x).is_finite is None
assert cot(r).is_finite is None
i = Symbol('i', imaginary=True)
assert cot(i).is_finite is True
assert cot(x).subs(x, 3*pi) is zoo
def test_tan_cot_sin_cos_evalf():
assert abs((tan(pi*Rational(8, 15))*cos(pi*Rational(8, 15))/sin(pi*Rational(8, 15)) - 1).evalf()) < 1e-14
assert abs((cot(pi*Rational(4, 15))*sin(pi*Rational(4, 15))/cos(pi*Rational(4, 15)) - 1).evalf()) < 1e-14
@XFAIL
def test_tan_cot_sin_cos_ratsimp():
assert 1 == (tan(pi*Rational(8, 15))*cos(pi*Rational(8, 15))/sin(pi*Rational(8, 15))).ratsimp()
assert 1 == (cot(pi*Rational(4, 15))*sin(pi*Rational(4, 15))/cos(pi*Rational(4, 15))).ratsimp()
def test_cot_series():
assert cot(x).series(x, 0, 9) == \
1/x - x/3 - x**3/45 - 2*x**5/945 - x**7/4725 + O(x**9)
# issue 6210
assert cot(x**4 + x**5).series(x, 0, 1) == \
x**(-4) - 1/x**3 + x**(-2) - 1/x + 1 + O(x)
assert cot(pi*(1-x)).series(x, 0, 3) == -1/(pi*x) + pi*x/3 + O(x**3)
assert cot(x).taylor_term(0, x) == 1/x
assert cot(x).taylor_term(2, x) is S.Zero
assert cot(x).taylor_term(3, x) == -x**3/45
def test_cot_rewrite():
neg_exp, pos_exp = exp(-x*I), exp(x*I)
assert cot(x).rewrite(exp) == I*(pos_exp + neg_exp)/(pos_exp - neg_exp)
assert cot(x).rewrite(sin) == sin(2*x)/(2*(sin(x)**2))
assert cot(x).rewrite(cos) == cos(x)/cos(x - pi/2, evaluate=False)
assert cot(x).rewrite(tan) == 1/tan(x)
assert cot(sinh(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, sinh(3)).n()
assert cot(cosh(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, cosh(3)).n()
assert cot(tanh(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, tanh(3)).n()
assert cot(coth(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, coth(3)).n()
assert cot(sin(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, sin(3)).n()
assert cot(tan(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, tan(3)).n()
assert cot(log(x)).rewrite(Pow) == -I*(x**-I + x**I)/(x**-I - x**I)
assert cot(pi*Rational(4, 34)).rewrite(pow).ratsimp() == (cos(pi*Rational(4, 34))/sin(pi*Rational(4, 34))).rewrite(pow).ratsimp()
assert cot(pi*Rational(4, 17)).rewrite(pow) == (cos(pi*Rational(4, 17))/sin(pi*Rational(4, 17))).rewrite(pow)
assert cot(pi/19).rewrite(pow) == cot(pi/19)
assert cot(pi/19).rewrite(sqrt) == cot(pi/19)
assert cot(x).rewrite(sec) == sec(x - pi / 2, evaluate=False) / sec(x)
assert cot(x).rewrite(csc) == csc(x) / csc(- x + pi / 2, evaluate=False)
assert cot(sin(x)).rewrite(Pow) == cot(sin(x))
assert cot(pi*Rational(2, 5), evaluate=False).rewrite(sqrt) == (Rational(-1, 4) + sqrt(5)/4)/\
sqrt(sqrt(5)/8 + Rational(5, 8))
def test_cot_subs():
assert cot(x).subs(cot(x), y) == y
assert cot(x).subs(x, y) == cot(y)
assert cot(x).subs(x, 0) is zoo
assert cot(x).subs(x, S.Pi) is zoo
def test_cot_expansion():
assert cot(x + y).expand(trig=True) == ((cot(x)*cot(y) - 1)/(cot(x) + cot(y))).expand()
assert cot(x - y).expand(trig=True) == (-(cot(x)*cot(y) + 1)/(cot(x) - cot(y))).expand()
assert cot(x + y + z).expand(trig=True) == (
(cot(x)*cot(y)*cot(z) - cot(x) - cot(y) - cot(z))/
(-1 + cot(x)*cot(y) + cot(x)*cot(z) + cot(y)*cot(z))).expand()
assert cot(3*x).expand(trig=True) == ((cot(x)**3 - 3*cot(x))/(3*cot(x)**2 - 1)).expand()
assert 0 == cot(2*x).expand(trig=True).rewrite(cot).subs([(cot(x), Rational(1, 3))])*3 + 4
assert 0 == cot(3*x).expand(trig=True).rewrite(cot).subs([(cot(x), Rational(1, 5))])*55 - 37
assert 0 == cot(4*x - pi/4).expand(trig=True).rewrite(cot).subs([(cot(x), Rational(1, 7))])*863 + 191
def test_cot_AccumBounds():
assert cot(AccumBounds(-oo, oo)) == AccumBounds(-oo, oo)
assert cot(AccumBounds(-S.Pi/3, S.Pi/3)) == AccumBounds(-oo, oo)
assert cot(AccumBounds(S.Pi/6, S.Pi/3)) == AccumBounds(cot(S.Pi/3), cot(S.Pi/6))
def test_cot_fdiff():
assert cot(x).fdiff() == -cot(x)**2 - 1
raises(ArgumentIndexError, lambda: cot(x).fdiff(2))
def test_sinc():
assert isinstance(sinc(x), sinc)
s = Symbol('s', zero=True)
assert sinc(s) is S.One
assert sinc(S.Infinity) is S.Zero
assert sinc(S.NegativeInfinity) is S.Zero
assert sinc(S.NaN) is S.NaN
assert sinc(S.ComplexInfinity) is S.NaN
n = Symbol('n', integer=True, nonzero=True)
assert sinc(n*pi) is S.Zero
assert sinc(-n*pi) is S.Zero
assert sinc(pi/2) == 2 / pi
assert sinc(-pi/2) == 2 / pi
assert sinc(pi*Rational(5, 2)) == 2 / (5*pi)
assert sinc(pi*Rational(7, 2)) == -2 / (7*pi)
assert sinc(-x) == sinc(x)
assert sinc(x).diff() == Piecewise(((x*cos(x) - sin(x)) / x**2, Ne(x, 0)), (0, True))
assert sinc(x).diff(x).equals(sinc(x).rewrite(sin).diff(x))
assert sinc(x).diff().subs(x, 0) is S.Zero
assert sinc(x).series() == 1 - x**2/6 + x**4/120 + O(x**6)
assert sinc(x).rewrite(jn) == jn(0, x)
assert sinc(x).rewrite(sin) == Piecewise((sin(x)/x, Ne(x, 0)), (1, True))
def test_asin():
assert asin(nan) is nan
assert asin.nargs == FiniteSet(1)
assert asin(oo) == -I*oo
assert asin(-oo) == I*oo
assert asin(zoo) is zoo
# Note: asin(-x) = - asin(x)
assert asin(0) == 0
assert asin(1) == pi/2
assert asin(-1) == -pi/2
assert asin(sqrt(3)/2) == pi/3
assert asin(-sqrt(3)/2) == -pi/3
assert asin(sqrt(2)/2) == pi/4
assert asin(-sqrt(2)/2) == -pi/4
assert asin(sqrt((5 - sqrt(5))/8)) == pi/5
assert asin(-sqrt((5 - sqrt(5))/8)) == -pi/5
assert asin(S.Half) == pi/6
assert asin(Rational(-1, 2)) == -pi/6
assert asin((sqrt(2 - sqrt(2)))/2) == pi/8
assert asin(-(sqrt(2 - sqrt(2)))/2) == -pi/8
assert asin((sqrt(5) - 1)/4) == pi/10
assert asin(-(sqrt(5) - 1)/4) == -pi/10
assert asin((sqrt(3) - 1)/sqrt(2**3)) == pi/12
assert asin(-(sqrt(3) - 1)/sqrt(2**3)) == -pi/12
# check round-trip for exact values:
for d in [5, 6, 8, 10, 12]:
for n in range(-(d//2), d//2 + 1):
if gcd(n, d) == 1:
assert asin(sin(n*pi/d)) == n*pi/d
assert asin(x).diff(x) == 1/sqrt(1 - x**2)
assert asin(0.2).is_real is True
assert asin(-2).is_real is False
assert asin(r).is_real is None
assert asin(-2*I) == -I*asinh(2)
assert asin(Rational(1, 7), evaluate=False).is_positive is True
assert asin(Rational(-1, 7), evaluate=False).is_positive is False
assert asin(p).is_positive is None
assert asin(sin(Rational(7, 2))) == Rational(-7, 2) + pi
assert asin(sin(Rational(-7, 4))) == Rational(7, 4) - pi
assert unchanged(asin, cos(x))
def test_asin_series():
assert asin(x).series(x, 0, 9) == \
x + x**3/6 + 3*x**5/40 + 5*x**7/112 + O(x**9)
t5 = asin(x).taylor_term(5, x)
assert t5 == 3*x**5/40
assert asin(x).taylor_term(7, x, t5, 0) == 5*x**7/112
def test_asin_rewrite():
assert asin(x).rewrite(log) == -I*log(I*x + sqrt(1 - x**2))
assert asin(x).rewrite(atan) == 2*atan(x/(1 + sqrt(1 - x**2)))
assert asin(x).rewrite(acos) == S.Pi/2 - acos(x)
assert asin(x).rewrite(acot) == 2*acot((sqrt(-x**2 + 1) + 1)/x)
assert asin(x).rewrite(asec) == -asec(1/x) + pi/2
assert asin(x).rewrite(acsc) == acsc(1/x)
def test_asin_fdiff():
assert asin(x).fdiff() == 1/sqrt(1 - x**2)
raises(ArgumentIndexError, lambda: asin(x).fdiff(2))
def test_acos():
assert acos(nan) is nan
assert acos(zoo) is zoo
assert acos.nargs == FiniteSet(1)
assert acos(oo) == I*oo
assert acos(-oo) == -I*oo
# Note: acos(-x) = pi - acos(x)
assert acos(0) == pi/2
assert acos(S.Half) == pi/3
assert acos(Rational(-1, 2)) == pi*Rational(2, 3)
assert acos(1) == 0
assert acos(-1) == pi
assert acos(sqrt(2)/2) == pi/4
assert acos(-sqrt(2)/2) == pi*Rational(3, 4)
# check round-trip for exact values:
for d in [5, 6, 8, 10, 12]:
for num in range(d):
if gcd(num, d) == 1:
assert acos(cos(num*pi/d)) == num*pi/d
assert acos(2*I) == pi/2 - asin(2*I)
assert acos(x).diff(x) == -1/sqrt(1 - x**2)
assert acos(0.2).is_real is True
assert acos(-2).is_real is False
assert acos(r).is_real is None
assert acos(Rational(1, 7), evaluate=False).is_positive is True
assert acos(Rational(-1, 7), evaluate=False).is_positive is True
assert acos(Rational(3, 2), evaluate=False).is_positive is False
assert acos(p).is_positive is None
assert acos(2 + p).conjugate() != acos(10 + p)
assert acos(-3 + n).conjugate() != acos(-3 + n)
assert acos(Rational(1, 3)).conjugate() == acos(Rational(1, 3))
assert acos(Rational(-1, 3)).conjugate() == acos(Rational(-1, 3))
assert acos(p + n*I).conjugate() == acos(p - n*I)
assert acos(z).conjugate() != acos(conjugate(z))
def test_acos_series():
assert acos(x).series(x, 0, 8) == \
pi/2 - x - x**3/6 - 3*x**5/40 - 5*x**7/112 + O(x**8)
assert acos(x).series(x, 0, 8) == pi/2 - asin(x).series(x, 0, 8)
t5 = acos(x).taylor_term(5, x)
assert t5 == -3*x**5/40
assert acos(x).taylor_term(7, x, t5, 0) == -5*x**7/112
assert acos(x).taylor_term(0, x) == pi/2
assert acos(x).taylor_term(2, x) is S.Zero
def test_acos_rewrite():
assert acos(x).rewrite(log) == pi/2 + I*log(I*x + sqrt(1 - x**2))
assert acos(x).rewrite(atan) == \
atan(sqrt(1 - x**2)/x) + (pi/2)*(1 - x*sqrt(1/x**2))
assert acos(0).rewrite(atan) == S.Pi/2
assert acos(0.5).rewrite(atan) == acos(0.5).rewrite(log)
assert acos(x).rewrite(asin) == S.Pi/2 - asin(x)
assert acos(x).rewrite(acot) == -2*acot((sqrt(-x**2 + 1) + 1)/x) + pi/2
assert acos(x).rewrite(asec) == asec(1/x)
assert acos(x).rewrite(acsc) == -acsc(1/x) + pi/2
def test_acos_fdiff():
assert acos(x).fdiff() == -1/sqrt(1 - x**2)
raises(ArgumentIndexError, lambda: acos(x).fdiff(2))
def test_atan():
assert atan(nan) is nan
assert atan.nargs == FiniteSet(1)
assert atan(oo) == pi/2
assert atan(-oo) == -pi/2
assert atan(zoo) == AccumBounds(-pi/2, pi/2)
assert atan(0) == 0
assert atan(1) == pi/4
assert atan(sqrt(3)) == pi/3
assert atan(-(1 + sqrt(2))) == pi*Rational(-3, 8)
assert atan(sqrt((5 - 2 * sqrt(5)))) == pi/5
assert atan(-sqrt(1 - 2 * sqrt(5)/ 5)) == -pi/10
assert atan(sqrt(1 + 2 * sqrt(5) / 5)) == pi*Rational(3, 10)
assert atan(-2 + sqrt(3)) == -pi/12
assert atan(2 + sqrt(3)) == pi*Rational(5, 12)
assert atan(-2 - sqrt(3)) == pi*Rational(-5, 12)
# check round-trip for exact values:
for d in [5, 6, 8, 10, 12]:
for num in range(-(d//2), d//2 + 1):
if gcd(num, d) == 1:
assert atan(tan(num*pi/d)) == num*pi/d
assert atan(oo) == pi/2
assert atan(x).diff(x) == 1/(1 + x**2)
assert atan(r).is_real is True
assert atan(-2*I) == -I*atanh(2)
assert unchanged(atan, cot(x))
assert atan(cot(Rational(1, 4))) == Rational(-1, 4) + pi/2
assert acot(Rational(1, 4)).is_rational is False
for s in (x, p, n, np, nn, nz, ep, en, enp, enn, enz):
if s.is_real or s.is_extended_real is None:
assert s.is_nonzero is atan(s).is_nonzero
assert s.is_positive is atan(s).is_positive
assert s.is_negative is atan(s).is_negative
assert s.is_nonpositive is atan(s).is_nonpositive
assert s.is_nonnegative is atan(s).is_nonnegative
else:
|
assert s.is_extended_nonzero is atan(s).is_extended_nonzero
assert s.is_extended_positive is atan(s).is_extended_positive
assert s.is_extended_negative is atan(s).is_extended_negative
assert s.is_extended_nonpositive is atan(s).is_extended_nonpositive
assert s.is_extended_nonnegative is atan(s).is_extended_nonnegative
def test_atan_rewrite():
assert atan(x).rewrite(log) == I*(log(1 - I*x)-log(1 + I*x))/2
assert atan(x).rewrite(asin) == (-asin(1/sqrt(x**2 + 1)) + pi/2)*sqrt(x**2)/x
assert atan(x).rewrite(acos) == sqrt(x**2)*acos(1/sqrt(x**2 + 1))/x
assert atan(x).rewrite(acot) == acot(1/x)
assert atan(x).rewrite(asec) == sqrt(x**2)*asec(sqrt(x**2 + 1))/x
assert atan(x).rewrite(acsc) == (-acsc(sqrt(x**2 + 1)) + pi/2)*sqrt(x**2)/x
assert atan(-5*I).evalf() == atan(x).rewrite(log).evalf(subs={x:-5*I})
assert atan(5*I).evalf() == atan(x).rewrite(log).evalf(subs={x:5*I})
def test_atan_fdiff():
assert atan(x).fdiff() == 1/(x**2 + 1)
raises(ArgumentIndexError, lambda: atan(x).fdiff(2))
def test_atan2():
assert atan2.nargs == FiniteSet(2)
assert atan2(0, 0) is S.NaN
assert atan2(0, 1) == 0
assert atan2(1, 1) == pi/4
assert atan2(1, 0) == pi/2
assert atan2(1, -1) == pi*Rational(3, 4)
assert atan2(0, -1) == pi
assert atan2(-1, -1) == pi*Rational(-3, 4)
assert atan2(-1, 0) == -pi/2
assert atan2(-1, 1) == -pi/4
i = symbols('i', imaginary=True)
r = symbols('r', real=True)
eq = atan2(r, i)
ans = -I*log((i + I*r)/sqrt(i**2 + r**2))
reps = ((r, 2), (i, I))
assert eq.subs(reps) == ans.subs(reps)
x = Symbol('x', negative=True)
y = Symbol('y', negative=True)
assert atan2(y, x) == atan(y/x) - pi
y = Symbol('y', nonnegative=True)
assert atan2(y, x) == atan(y/x) + pi
y = Symbol('y')
assert atan2(y, x) == atan2(y, x, evaluate=False)
u = Symbol("u", positive=True)
assert atan2(0, u) == 0
u = Symbol("u", negative=True)
assert atan2(0, u) == pi
assert atan2(y, oo) == 0
assert atan2(y, -oo)== 2*pi*Heaviside(re(y)) - pi
assert atan2(y, x).rewrite(log) == -I*log((x + I*y)/sqrt(x**2 + y**2))
assert atan2(0, 0) is S.NaN
ex = atan2(y, x) - arg(x + I*y)
assert ex.subs({x:2, y:3}).rewrite(arg) == 0
assert ex.subs({x:2, y:3*I}).rewrite(arg) == -pi - I*log(sqrt(5)*I/5)
assert ex.subs({x:2*I, y:3}).rewrite(arg) == -pi/2 - I*log(sqrt(5)*I)
assert ex.subs({x:2*I, y:3*I}).rewrite(arg) == -pi + atan(Rational(2, 3)) + atan(Rational(3, 2))
i = symbols('i', imaginary=True)
r = symbols('r', real=True)
e = atan2(i, r)
rewrite = e.rewrite(arg)
reps = {i: I, r: -2}
assert rewrite == -I*log(abs(I*i + r)/sqrt(abs(i**2 + r**2))) + arg((I*i + r)/sqrt(i**2 + r**2))
assert (e - rewrite).subs(reps).equals(0)
assert atan2(0, x).rewrite(atan) == Piecewise((pi, re(x) < 0),
(0, Ne(x, 0)),
(nan, True))
assert atan2(0, r).rewrite(atan) == Piecewise((pi, r < 0), (0, Ne(r, 0)), (S.NaN, True))
assert atan2(0, i),rewrite(atan) == 0
assert atan2(0, r + i).rewrite(atan) == Piecewise((pi, r < 0), (0, True))
assert atan2(y, x).rewrite(atan) == Piecewise(
(2*atan(y/(x + sqrt(x**2 + y**2))), Ne(y, 0)),
(pi, re(x) < 0),
(0, (re(x) > 0) | Ne(im(x), 0)),
(nan, True))
assert conjugate(atan2(x, y)) == atan2(conjugate(x), conjugate(y))
assert diff(atan2(y, x), x) == -y/(x**2 + y**2)
assert diff(atan2(y, x), y) == x/(x**2 + y**2)
assert simplify(diff(atan2(y, x).rewrite(log), x)) == -y/(x**2 + y**2)
assert simplify(diff(atan2(y, x).rewrite(log), y)) == x/(x**2 + y**2)
assert str(atan2(1, 2).evalf(5)) == '0.46365'
raises(ArgumentIndexError, lambda: atan2(x, y).fdiff(3))
def test_issue_17461():
class A(Symbol):
is_extended_real = True
def _eval_evalf(self, prec):
return Float(5.0)
x = A('X')
y = A('Y')
assert abs(atan2(x, y).evalf() - 0.785398163397448) <= 1e-10
def test_acot():
assert acot(nan) is nan
assert acot.nargs == FiniteSet(1)
assert acot(-oo) == 0
assert acot(oo) == 0
assert acot(zoo) == 0
assert acot(1) == pi/4
assert acot(0) == pi/2
assert acot(sqrt(3)/3) == pi/3
assert acot(1/sqrt(3)) == pi/3
assert acot(-1/sqrt(3)) == -pi/3
assert acot(x).diff(x) == -1/(1 + x**2)
assert acot(r).is_extended_real is True
assert acot(I*pi) == -I*acoth(pi)
assert acot(-2*I) == I*acoth(2)
assert acot(x).is_positive is None
assert acot(n).is_positive is False
assert acot(p).is_positive is True
assert acot(I).is_positive is False
assert acot(Rational(1, 4)).is_rational is False
assert unchanged(acot, cot(x))
assert unchanged(acot, tan(x))
assert acot(cot(Rational(1, 4))) == Rational(1, 4)
assert acot(tan(Rational(-1, 4))) == Rational(1, 4) - pi/2
def test_acot_rewrite():
assert acot(x).rewrite(log) == I*(log(1 - I/x)-log(1 + I/x))/2
assert acot(x).rewrite(asin) == x*(-asin(sqrt(-x**2)/sqrt(-x**2 - 1)) + pi/2)*sqrt(x**(-2))
assert acot(x).rewrite(acos) == x*sqrt(x**(-2))*acos(sqrt(-x**2)/sqrt(-x**2 - 1))
assert acot(x).rewrite(atan) == atan(1/x)
assert acot(x).rewrite(asec) == x*sqrt(x**(-2))*asec(sqrt((x**2 + 1)/x**2))
assert acot(x).rewrite(acsc) == x*(-acsc(sqrt((x**2 + 1)/x**2)) + pi/2)*sqrt(x**(-2))
assert acot(-I/5).evalf() == acot(x).rewrite(log).evalf(subs={x:-I/5})
assert acot(I/5).evalf() == acot(x).rewrite(log).evalf(subs={x:I/5})
def test_acot_fdiff():
assert acot(x).fdiff() == -1/(x**2 + 1)
raises(ArgumentIndexError, lambda: acot(x).fdiff(2))
def test_attributes():
assert sin(x).args == (x,)
def test_sincos_rewrite():
assert sin(pi/2 - x) == cos(x)
assert sin(pi - x) == sin(x)
assert cos(pi/2 - x) == sin(x)
assert cos(pi - x) == -cos(x)
def _check_even_rewrite(func, arg):
"""Checks that the expr has been rewritten using f(-x) -> f(x)
arg : -x
"""
return func(arg).args[0] == -arg
def _check_odd_rewrite(func, arg):
"""Checks that the expr has been rewritten using f(-x) -> -f(x)
arg : -x
"""
return func(arg).func.is_Mul
def _check_no_rewrite(func, arg):
"""Checks that the expr is not rewritten"""
return func(arg).args[0] == arg
def test_evenodd_rewrite():
a = cos(2) # negative
b = sin(1) # positive
even = [cos]
odd = [sin, tan, cot, asin, atan, acot]
with_minus = [-1, -2**1024 * E, -pi/105, -x*y, -x - y]
for func in even:
for expr in with_minus:
assert _check_even_rewrite(func, expr)
assert _check_no_rewrite(func, a*b)
assert func(
x - y) == func(y - x) # it doesn't matter which form is canonical
for func in odd:
for expr in with_minus:
assert _check_odd_rewrite(func, expr)
assert _check_no_rewrite(func, a*b)
assert func(
x - y) == -func(y - x) # it doesn't matter which form is canonical
def test_issue_4547():
assert sin(x).rewrite(cot) == 2*cot(x/2)/(1 + cot(x/2)**2)
assert cos(x).rewrite(cot) == -(1 - cot(x/2)**2)/(1 + cot(x/2)**2)
assert tan(x).rewrite(cot) == 1/cot(x)
assert cot(x).fdiff() == -1 - cot(x)**2
def test_as_leading_term_issue_5272():
assert sin(x).as_leading_term(x) == x
assert cos(x).as_leading_term(x) == 1
assert tan(x).as_leading_term(x) == x
assert cot(x).as_leading_term(x) == 1/x
assert asin(x).as_leading_term(x) == x
assert acos(x).as_leading_term(x) == x
assert atan(x).as_leading_term(x) == x
assert acot(x).as_leading_term(x) == x
def test_leading_terms():
for func in [sin, cos, tan, cot, asin, acos, atan, acot]:
for arg in (1/x, S.Half):
eq = func(arg)
assert eq.as_leading_term(x) == eq
def test_atan2_expansion():
assert cancel(atan2(x**2, x + 1).diff(x) - atan(x**2/(x + 1)).diff(x)) == 0
assert cancel(atan(y/x).series(y, 0, 5) - atan2(y, x).series(y, 0, 5)
+ atan2(0, x) - atan(0)) == O(y**5)
assert cancel(atan(y/x).series(x, 1, 4) - atan2(y, x).series(x, 1, 4)
+ atan2(y, 1) - atan(y)) == O((x - 1)**4, (x, 1))
assert cancel(atan((y + x)/x).series(x, 1, 3) - atan2(y + x, x).series(x, 1, 3)
+ atan2(1 + y, 1) - atan(1 + y)) == O((x - 1)**3, (x, 1))
assert Matrix([atan2(y, x)]).jacobian([y, x]) == \
Matrix([[x/(y**2 + x**2), -y/(y**2 + x**2)]])
def test_aseries():
def t(n, v, d, e):
assert abs(
n(1/v).evalf() - n(1/x).series(x, dir=d).removeO().subs(x, v)) < e
t(atan, 0.1, '+', 1e-5)
t(atan, -0.1, '-', 1e-5)
t(acot, 0.1, '+', 1e-5)
t(acot, -0.1, '-', 1e-5)
def test_issue_4420():
i = Symbol('i', integer=True)
e = Symbol('e', even=True)
o = Symbol('o', odd=True)
# unknown parity for variable
assert cos(4*i*pi) == 1
assert sin(4*i*pi) == 0
assert tan(4*i*pi) == 0
assert cot(4*i*pi) is zoo
assert cos(3*i*pi) == cos(pi*i) # +/-1
assert sin(3*i*pi) == 0
assert tan(3*i*pi) == 0
assert cot(3*i*pi) is zoo
assert cos(4.0*i*pi) == 1
assert sin(4.0*i*pi) == 0
assert tan(4.0*i*pi) == 0
assert cot(4.0*i*pi) is zoo
assert cos(3.0*i*pi) == cos(pi*i) # +/-1
assert sin(3.0*i*pi) == 0
assert tan(3.0*i*pi) == 0
assert cot(3.0*i*pi) is zoo
assert cos(4.5*i*pi) == cos(0.5*pi*i)
assert sin(4.5*i*pi) == sin(0.5*pi*i)
assert tan(4.5*i*pi) == tan(0.5*pi*i)
assert cot(4.5*i*pi) == cot(0.5*pi*i)
# parity of variable is known
assert cos(4*e*pi) == 1
assert sin(4*e*pi) == 0
assert tan(4*e*pi) == 0
assert cot(4*e*pi) is zoo
assert cos(3*e*pi) == 1
assert sin(3*e*pi) == 0
assert tan(3*e*pi) == 0
assert cot(3*e*pi) is zoo
assert cos(4.0*e*pi) == 1
assert sin(4.0*e*pi) == 0
assert tan(4.0*e*pi) == 0
assert cot(4.0*e*pi) is zoo
assert cos(3.0*e*pi) == 1
assert sin(3.0*e*pi) == 0
assert tan(3.0*e*pi) == 0
assert cot(3.0*e*pi) is zoo
assert cos(4.5*e*pi) == cos(0.5*pi*e)
assert sin(4.5*e*pi) == sin(0.5*pi*e)
assert tan(4.5*e*pi) == tan(0.5*pi*e)
assert cot(4.5*e*pi) == cot(0.5*pi*e)
assert cos(4*o*pi) == 1
assert sin(4*o*pi) == 0
assert tan(4*o*pi) == 0
assert cot(4*o*pi) is zoo
assert cos(3*o*pi) == -1
assert sin(3*o*pi) == 0
assert tan(3*o*pi) == 0
assert cot(3*o*pi) is zoo
assert cos(4.0*o*pi) == 1
assert sin(4.0*o*pi) == 0
assert tan(4.0*o*pi) == 0
assert cot(4.0*o*pi) is zoo
assert cos(3.0*o*pi) == -1
assert sin(3.0*o*pi) == 0
assert tan(3.0*o*pi) == 0
assert cot(3.0*o*pi) is zoo
assert cos(4.5*o*pi) == cos(0.5*pi*o)
assert sin(4.5*o*pi) == sin(0.5*pi*o)
assert tan(4.5*o*pi) == tan(0.5*pi*o)
assert cot(4.5*o*pi) == cot(0.5*pi*o)
# x could be imaginary
assert cos(4*x*pi) == cos(4*pi*x)
assert sin(4*x*pi) == sin(4*pi*x)
assert tan(4*x*pi) == tan(4*pi*x)
assert cot(4*x*pi) == cot(4*pi*x)
assert cos(3*x*pi) == cos(3*pi*x)
assert sin(3*x*pi) == sin(3*pi*x)
assert tan(3*x*pi) == tan(3*pi*x)
assert cot(3*x*pi) == cot(3*pi*x)
assert cos(4.0*x*pi) == cos(4.0*pi*x)
assert sin(4.0*x*pi) == sin(4.0*pi*x)
assert tan(4.0*x*pi) == tan(4.0*pi*x)
assert cot(4.0*x*pi) == cot(4.0*pi*x)
assert cos(3.0*x*pi) == cos(3.0*pi*x)
assert sin(3.0*x*pi) == sin(3.0*pi*x)
assert tan(3.0*x*pi) == tan(3.0*pi*x)
assert cot(3.0*x*pi) == cot(3.0*pi*x)
assert cos(4.5*x*pi) == cos(4.5*pi*x)
assert sin(4.5*x*pi) == sin(4.5*pi*x)
assert tan(4.5*x*pi) == tan(4.5*pi*x)
assert cot(4.5*x*pi) == cot(4.5*pi*x)
def test_inverses():
raises(AttributeError, lambda: sin(x).inverse())
raises(AttributeError, lambda: cos(x).inverse())
assert tan(x).inverse() == atan
assert cot(x).inverse() == acot
raises(AttributeError, lambda: csc(x).inverse())
raises(AttributeError, lambda: sec(x).inverse())
assert asin(x).inverse() == sin
assert acos(x).inverse() == cos
assert atan(x).inverse() == tan
assert acot(x).inverse() == cot
def test_real_imag():
a, b = symbols('a b', real=True)
z = a + b*I
for deep in [True, False]:
assert sin(
z).as_real_imag(deep=deep) == (sin(a)*cosh(b), cos(a)*sinh(b))
assert cos(
z).as_real_imag(deep=deep) == (cos(a)*cosh(b), -sin(a)*sinh(b))
assert tan(z).as_real_imag(deep=deep) == (sin(2*a)/(cos(2*a) +
cosh(2*b)), sinh(2*b)/(cos(2*a) + cosh(2*b)))
assert cot(z).as_real_imag(deep=deep) == (-sin(2*a)/(cos(2*a) -
cosh(2*b)), -sinh(2*b)/(cos(2*a) - cosh(2*b)))
assert sin(a).as_real_imag(deep=deep) == (sin(a), 0)
assert cos(a).as_real_imag(deep=deep) == (cos(a), 0)
assert tan(a).as_real_imag(deep=deep) == (tan(a), 0)
assert cot(a).as_real_imag(deep=deep) == (cot(a), 0)
@XFAIL
def test_sin_cos_with_infinity():
# Test for issue 5196
# https://github.com/sympy/sympy/issues/5196
assert sin(oo) is S.NaN
assert cos(oo) is S.NaN
@slow
def test_sincos_rewrite_sqrt():
# equivalent to testing rewrite(pow)
for p in [1, 3, 5, 17]:
for t in [1, 8]:
n = t*p
# The vertices `exp(i*pi/n)` of a regular `n`-gon can
# be expressed by means of nested square roots if and
# only if `n` is a product of Fermat primes, `p`, and
# powers of 2, `t'. The code aims to check all vertices
# not belonging to an `m`-gon for `m < n`(`gcd(i, n) == 1`).
# For large `n` this makes the test too slow, therefore
# the vertices are limited to those of index `i < 10`.
for i in range(1, min((n + 1)//2 + 1, 10)):
if 1 == gcd(i, n):
x = i*pi/n
s1 = sin(x).rewrite(sqrt)
c1 = cos(x).rewrite(sqrt)
assert not s1.has(cos, sin), "fails for %d*pi/%d" % (i, n)
assert not c1.has(cos, sin), "fails for %d*pi/%d" % (i, n)
assert 1e-3 > abs(sin(x.evalf(5)) - s1.evalf(2)), "fails for %d*pi/%d" % (i, n)
assert 1e-3 > abs(cos(x.evalf(5)) - c1.evalf(2)), "fails for %d*pi/%d" % (i, n)
assert cos(pi/14).rewrite(sqrt) == sqrt(cos(pi/7)/2 + S.Half)
assert cos(pi/257).rewrite(sqrt).evalf(64) == cos(pi/257).evalf(64)
assert cos(pi*Rational(-15, 2)/11, evaluate=False).rewrite(
sqrt) == -sqrt(-cos(pi*Rational(4, 11))/2 + S.Half)
assert cos(Mul(2, pi, S.Half, evaluate=False), evaluate=False).rewrite(
sqrt) == -1
e = cos(pi/3/17) # don't use pi/15 since that is caught at instantiation
a = (
-3*sqrt(-sqrt(17) + 17)*sqrt(sqrt(17) + 17)/64 -
3*sqrt(34)*sqrt(sqrt(17) + 17)/128 - sqrt(sqrt(17) +
17)*sqrt(-8*sqrt(2)*sqrt(sqrt(17) + 17) - sqrt(2)*sqrt(-sqrt(17) + 17)
+ sqrt(34)*sqrt(-sqrt(17) + 17) + 6*sqrt(17) + 34)/64 - sqrt(-sqrt(17)
+ 17)*sqrt(-8*sqrt(2)*sqrt(sqrt(17) + 17) - sqrt(2)*sqrt(-sqrt(17) +
17) + sqrt(34)*sqrt(-sqrt(17) + 17) + 6*sqrt(17) + 34)/128 - Rational(1, 32) +
sqrt(2)*sqrt(-8*sqrt(2)*sqrt(sqrt(17) + 17) - sqrt(2)*sqrt(-sqrt(17) +
17) + sqrt(34)*sqrt(-sqrt(17) + 17) + 6*sqrt(17) + 34)/64 +
3*sqrt(2)*sqrt(sqrt(17) + 17)/128 + sqrt(34)*sqrt(-sqrt(17) + 17)/128
+ 13*sqrt(2)*sqrt(-sqrt(17) + 17)/128 + sqrt(17)*sqrt(-sqrt(17) +
17)*sqrt(-8*sqrt(2)*sqrt(sqrt(17) + 17) - sqrt(2)*sqrt(-sqrt(17) + 17)
+ sqrt(34)*sqrt(-sqrt(17) + 17) + 6*sqrt(17) + 34)/128 + 5*sqrt(17)/32
+ sqrt(3)*sqrt(-sqrt(2)*sqrt(sqrt(17) + 17)*sqrt(sqrt(17)/32 +
sqrt(2)*sqrt(-sqrt(17) + 17)/32 +
sqrt(2)*sqrt(-8*sqrt(2)*sqrt(sqrt(17) + 17) - sqrt(2)*sqrt(-sqrt(17) +
17) + sqrt(34)*sqrt(-sqrt(17) + 17) + 6*sqrt(17) + 34)/32 + Rational(15, 32))/8 -
5*sqrt(2)*sqrt(sqrt(17)/32 + sqrt(2)*sqrt(-sqrt(17) + 17)/32 +
sqrt(2)*sqrt(-8*sqrt(2)*sqrt(sqrt(17) + 17) - sqrt(2)*sqrt(-sqrt(17) +
17) + sqrt(34)*sqrt(-sqrt(17) + 17) + 6*sqrt(17) + 34)/32 +
Rational(15, 32))*sqrt(-8*sqrt(2)*sqrt(sqrt(17) + 17) - sqrt(2)*sqrt(-sqrt(17) +
17) + sqrt(34)*sqrt(-sqrt(17) + 17) + 6*sqrt(17) + 34)/64 -
3*sqrt(2)*sqrt(-sqrt(17) + 17)*sqrt(sqrt(17)/32 +
sqrt(2)*sqrt(-sqrt(17) + 17)/32 +
sqrt(2)*sqrt(-8*sqrt(2)*sqrt(sqrt(17) + 17) - sqrt(2)*sqrt(-sqrt(17) +
17) + sqrt(34)*sqrt(-sqrt(17) + 17) + 6*sqrt(17) + 34)/32 + Rational(15, 32))/32
+ sqrt(34)*sqrt(sqrt(17)/32 + sqrt(2)*sqrt(-sqrt(17) + 17)/32 +
sqrt(2)*sqrt(-8*sqrt(2)*sqrt(sqrt(17) + 17) - sqrt(2)*sqrt(-sqrt(17) +
17) + sqrt(34)*sqrt(-sqrt(17) + 17) + 6*sqrt(17) + 34)/32 +
Rational(15, 32))*sqrt(-8*sqrt(2)*sqrt(sqrt(17) + 17) - sqrt(2)*sqrt(-sqrt(17) +
17) + sqrt(34)*sqrt(-sqrt(17) + 17) + 6*sqrt(17) + 34)/64 +
sqrt(sqrt(17)/32 + sqrt(2)*sqrt(-sqrt(17) + 17)/32 +
sqrt(2)*sqrt(-8*sqrt(2)*sqrt(sqrt(17) + 17) - sqrt(2)*sqrt(-sqrt(17) +
17) + sqrt(34)*sqrt(-sqrt(17) + 17) + 6*sqrt(17) + 34)/32 + Rational(15, 32))/2 +
S.Half + sqrt(-sqrt(17) + 17)*sqrt(sqrt(17)/32 + sqrt(2)*sqrt(-sqrt(17) +
17)/32 + sqrt(2)*sqrt(-8*sqrt(2)*sqrt(sqrt(17) + 17) -
sqrt(2)*sqrt(-sqrt(17) + 17) + sqrt(34)*sqrt(-sqrt(17) + 17) +
6*sqrt(17) + 34)/32 + Rational(15, 32))*sqrt(-8*sqrt(2)*sqrt(sqrt(17) + 17) -
sqrt(2)*sqrt(-sqrt(17) + 17) + sqrt(34)*sqrt(-sqrt(17) + 17) +
6*sqrt(17) + 34)/32 + sqrt(34)*sqrt(-sqrt(17) + 17)*sqrt(sqrt(17)/32 +
sqrt(2)*sqrt(-sqrt(17) + 17)/32 +
sqrt(2)*sqrt(-8*sqrt(2)*sqrt(sqrt(17) + 17) - sqrt(2)*sqrt(-sqrt(17) +
17) + sqrt(34)*sqrt(-sqrt(17) + 17) + 6*sqrt(17) + 34)/32 +
Rational(15, 32))/32)/2)
assert e.rewrite(sqrt) == a
assert e.n() == a.n()
# coverage of fermatCoords: multiplicity > 1; the following could be
# different but that portion of the code should be tested in some way
assert cos(pi/9/17).rewrite(sqrt) == \
sin(pi/9)*sin(pi*Rational(2, 17)) + cos(pi/9)*cos(pi*Rational(2, 17))
@slow
def test_tancot_rewrite_sqrt():
# equivalent to testing rewrite(pow)
for p in [1, 3, 5, 17]:
for t in [1, 8]:
n = t*p
for i in range(1, min((n + 1)//2 + 1, 10)):
if 1 == gcd(i, n):
x = i*pi/n
if 2*i != n and 3*i != 2*n:
t1 = tan(x).rewrite(sqrt)
assert not t1.has(cot, tan), "fails for %d*pi/%d" % (i, n)
assert 1e-3 > abs( tan(x.evalf(7)) - t1.evalf(4) ), "fails for %d*pi/%d" % (i, n)
if i != 0 and i != n:
c1 = cot(x).rewrite(sqrt)
assert not c1.has(cot, tan), "fails for %d*pi/%d" % (i, n)
assert 1e-3 > abs( cot(x.evalf(7)) - c1.evalf(4) ), "fails for %d*pi/%d" % (i, n)
def test_sec():
x = symbols('x', real=True)
z = symbols('z')
assert sec.nargs == FiniteSet(1)
assert sec(zoo) is nan
assert sec(0) == 1
assert sec(pi) == -1
assert sec(pi/2) is zoo
assert sec(-pi/2) is zoo
assert sec(pi/6) == 2*sqrt(3)/3
assert sec(pi/3) == 2
assert sec(pi*Rational(5, 2)) is zoo
assert sec(pi*Rational(9, 7)) == -sec(pi*Rational(2, 7))
assert sec(pi*Rational(3, 4)) == -sqrt(2) # issue 8421
assert sec(I) == 1/cosh(1)
assert sec(x*I) == 1/cosh(x)
assert sec(-x) == sec(x)
assert sec(asec(x)) == x
assert sec(z).conjugate() == sec(conjugate(z))
assert (sec(z).as_real_imag() ==
(cos(re(z))*cosh(im(z))/(sin(re(z))**2*sinh(im(z))**2 +
cos(re(z))**2*cosh(im(z))**2),
sin(re(z))*sinh(im(z))/(sin(re(z))**2*sinh(im(z))**2 +
cos(re(z))**2*cosh(im(z))**2)))
assert sec(x).expand(trig=True) == 1/cos(x)
assert sec(2*x).expand(trig=True) == 1/(2*cos(x)**2 - 1)
assert sec(x).is_extended_real == True
assert sec(z).is_real == None
assert sec(a).is_algebraic is None
assert sec(na).is_algebraic is False
assert sec(x).as_leading_term() == sec(x)
assert sec(0).is_finite == True
assert sec(x).is_finite == None
assert sec(pi/2).is_finite == False
assert series(sec(x), x, x0=0, n=6) == 1 + x**2/2 + 5*x**4/24 + O(x**6)
# https://github.com/sympy/sympy/issues/7166
assert series(sqrt(sec(x))) == 1 + x**2/4 + 7*x**4/96 + O(x**6)
# https://github.com/sympy/sympy/issues/7167
assert (series(sqrt(sec(x)), x, x0=pi*3/2, n=4) ==
1/sqrt(x - pi*Rational(3, 2)) + (x - pi*Rational(3, 2))**Rational(3, 2)/12 +
(x - pi*Rational(3, 2))**Rational(7, 2)/160 + O((x - pi*Rational(3, 2))**4, (x, pi*Rational(3, 2))))
assert sec(x).diff(x) == tan(x)*sec(x)
# Taylor Term checks
assert sec(z).taylor_term(4, z) == 5*z**4/24
assert sec(z).taylor_term(6, z) == 61*z**6/720
assert sec(z).taylor_term(5, z) == 0
def test_sec_rewrite():
assert sec(x).rewrite(exp) == 1/(exp(I*x)/2 + exp(-I*x)/2)
assert sec(x).rewrite(cos) == 1/cos(x)
assert sec(x).rewrite(tan) == (tan(x/2)**2 + 1)/(-tan(x/2)**2 + 1)
assert sec(x).rewrite(pow) == sec(x)
assert sec(x).rewrite(sqrt) == sec(x)
assert sec(z).rewrite(cot) == (cot(z/2)**2 + 1)/(cot(z/2)**2 - 1)
assert sec(x).rewrite(sin) == 1 / sin(x + pi / 2, evaluate=False)
assert sec(x).rewrite(tan) == (tan(x / 2)**2 + 1) / (-tan(x / 2)**2 + 1)
assert sec(x).rewrite(csc) == csc(-x + pi/2, evaluate=False)
def test_sec_fdiff():
assert sec(x).fdiff() == tan(x)*sec(x)
raises(ArgumentIndexError, lambda: sec(x).fdiff(2))
def test_csc():
x = symbols('x', real=True)
z = symbols('z')
# https://github.com/sympy/sympy/issues/6707
cosecant = csc('x')
alternate = 1/sin('x')
assert cosecant.equals(alternate) == True
assert alternate.equals(cosecant) == True
assert csc.nargs == FiniteSet(1)
assert csc(0) is zoo
assert csc(pi) is zoo
assert csc(zoo) is nan
assert csc(pi/2) == 1
assert csc(-pi/2) == -1
assert csc(pi/6) == 2
assert csc(pi/3) == 2*sqrt(3)/3
assert csc(pi*Rational(5, 2)) == 1
assert csc(pi*Rational(9, 7)) == -csc(pi*Rational(2, 7))
assert csc(pi*Rational(3, 4)) == sqrt(2) # issue 8421
assert csc(I) == -I/sinh(1)
assert csc(x*I) == -I/sinh(x)
assert csc(-x) == -csc(x)
assert csc(acsc(x)) == x
assert csc(z).conjugate() == csc(conjugate(z))
assert (csc(z).as_real_imag() ==
(sin(re(z))*cosh(im(z))/(sin(re(z))**2*cosh(im(z))**2 +
cos(re(z))**2*sinh(im(z))**2),
-cos(re(z))*sinh(im(z))/(sin(re(z))**2*cosh(im(z))**2 +
cos(re(z))**2*sinh(im(z))**2)))
assert csc(x).expand(trig=True) == 1/sin(x)
assert csc(2*x).expand(trig=True) == 1/(2*sin(x)*cos(x))
assert csc(x).is_extended_real == True
assert csc(z).is_real == None
assert csc(a).is_algebraic is None
assert csc(na).is_algebraic is False
assert csc(x).as_leading_term() == csc(x)
assert csc(0).is_finite == False
assert csc(x).is_finite == None
assert csc(pi/2).is_finite == True
assert series(csc(x), x, x0=pi/2, n=6) == \
1 + (x - pi/2)**2/2 + 5*(x - pi/2)**4/24 + O((x - pi/2)**6, (x, pi/2))
assert series(csc(x), x, x0=0, n=6) == \
1/x + x/6 + 7*x**3/360 + 31*x**5/15120 + O(x**6)
assert csc(x).diff(x) == -cot(x)*csc(x)
assert csc(x).taylor_term(2, x) == 0
assert csc(x).taylor_term(3, x) == 7*x**3/360
assert csc(x).taylor_term(5, x) == 31*x**5/15120
raises(ArgumentIndexError, lambda: csc(x).fdiff(2))
def test_asec():
z = Symbol('z', zero=True)
assert asec(z) is zoo
assert asec(nan) is nan
assert asec(1) == 0
assert asec(-1) == pi
assert asec(oo) == pi/2
assert asec(-oo) == pi/2
assert asec(zoo) == pi/2
assert asec(sec(pi*Rational(13, 4))) == pi*Rational(3, 4)
assert asec(1 + sqrt(5)) == pi*Rational(2, 5)
assert asec(2/sqrt(3)) == pi/6
assert asec(sqrt(4 - 2*sqrt(2))) == pi/8
assert asec(-sqrt(4 + 2*sqrt(2))) == pi*Rational(5, 8)
assert asec(sqrt(2 + 2*sqrt(5)/5)) == pi*Rational(3, 10)
assert asec(-sqrt(2 + 2*sqrt(5)/5)) == pi*Rational(7, 10)
assert asec(sqrt(2) - sqrt(6)) == pi*Rational(11, 12)
assert asec(x).diff(x) == 1/(x**2*sqrt(1 - 1/x**2))
assert asec(x).as_leading_term(x) == log(x)
assert asec(x).rewrite(log) == I*log(sqrt(1 - 1/x**2) + I/x) + pi/2
assert asec(x).rewrite(asin) == -asin(1/x) + pi/2
assert asec(x).rewrite(acos) == acos(1/x)
assert asec(x).rewrite(atan) == (2*atan(x + sqrt(x**2 - 1)) - pi/2)*sqrt(x**2)/x
assert asec(x).rewrite(acot) == (2*acot(x - sqrt(x**2 - 1)) - pi/2)*sqrt(x**2)/x
assert asec(x).rewrite(acsc) == -acsc(x) + pi/2
raises(ArgumentIndexError, lambda: asec(x).fdiff(2))
def test_asec_is_real():
assert asec(S.Half).is_real is False
n = Symbol('n', positive=True, integer=True)
assert asec(n).is_extended_real is True
assert asec(x).is_real is None
assert asec(r).is_real is None
t = Symbol('t', real=False, finite=True)
assert asec(t).is_real is False
def test_acsc():
assert acsc(nan) is nan
assert acsc(1) == pi/2
assert acsc(-1) == -pi/2
assert acsc(oo) == 0
assert acsc(-oo) == 0
assert acsc(zoo) == 0
assert acsc(0) is zoo
assert acsc(csc(3)) == -3 + pi
assert acsc(csc(4)) == -4 + pi
assert acsc(csc(6)) == 6 - 2*pi
assert unchanged(acsc, csc(x))
assert unchanged(acsc, sec(x))
assert acsc(2/sqrt(3)) == pi/3
assert acsc(csc(pi*Rational(13, 4))) == -pi/4
assert acsc(sqrt(2 + 2*sqrt(5)/5)) == pi/5
assert acsc(-sqrt(2 + 2*sqrt(5)/5)) == -pi/5
assert acsc(-2) == -pi/6
assert acsc(-sqrt(4 + 2*sqrt(2))) == -pi/8
assert acsc(sqrt(4 - 2*sqrt(2))) == pi*Rational(3, 8)
assert acsc(1 + sqrt(5)) == pi/10
assert acsc(sqrt(2) - sqrt(6)) == pi*Rational(-5, 12)
assert acsc(x).diff(x) == -1/(x**2*sqrt(1 - 1/x**2))
assert acsc(x).as_leading_term(x) == log(x)
assert acsc(x).rewrite(log) == -I*log(sqrt(1 - 1/x**2) + I/x)
assert acsc(x).rewrite(asin) == asin(1/x)
assert acsc(x).rewrite(acos) == -acos(1/x) + pi/2
assert acsc(x).rewrite(atan) == (-atan(sqrt(x**2 - 1)) + pi/2)*sqrt(x**2)/x
assert acsc(x).rewrite(acot) == (-acot(1/sqrt(x**2 - 1)) + pi/2)*sqrt(x**2)/x
assert acsc(x).rewrite(asec) == -asec(x) + pi/2
raises(ArgumentIndexError, lambda: acsc(x).fdiff(2))
def test_csc_rewrite():
assert csc(x).rewrite(pow) == csc(x)
assert csc(x).rewrite(sqrt) == csc(x)
assert csc(x).rewrite(exp) == 2*I/(exp(I*x) - exp(-I*x))
assert csc(x).rewrite(sin) == 1/sin(x)
assert csc(x).rewrite(tan) == (tan(x/2)**2 + 1)/(2*tan(x/2))
assert csc(x).rewrite(cot) == (cot(x/2)**2 + 1)/(2*cot(x/2))
assert csc(x).rewrite(cos) == 1/cos(x - pi/2, evaluate=False)
assert csc(x).rewrite(sec) == sec(-x + pi/2, evaluate=False)
# issue 17349
assert csc(1 - exp(-besselj(I, I))).rewrite(cos) == \
-1/cos(-pi/2 - 1 + cos(I*besselj(I, I)) +
I*cos(-pi/2 + I*besselj(I, I), evaluate=False), evaluate=False)
def test_issue_8653():
n = Symbol('n', integer=True)
assert sin(n).is_irrational is None
assert cos(n).is_irrational is None
assert tan(n).is_irrational is None
def test_issue_9157():
n = Symbol('n', integer=True, positive=True)
assert atan(n - 1).is_nonnegative is True
def test_trig_period():
x, y = symbols('x, y')
assert sin(x).period() == 2*pi
assert cos(x).period() == 2*pi
assert tan(x).period() == pi
assert cot(x).period() == pi
assert sec(x).period() == 2*pi
assert csc(x).period() == 2*pi
assert sin(2*x).period() == pi
assert cot(4*x - 6).period() == pi/4
assert cos((-3)*x).period() == pi*Rational(2, 3)
assert cos(x*y).period(x) == 2*pi/abs(y)
assert sin(3*x*y + 2*pi).period(y) == 2*pi/abs(3*x)
assert tan(3*x).period(y) is S.Zero
raises(NotImplementedError, lambda: sin(x**2).period(x))
def test_issue_7171():
assert sin(x).rewrite(sqrt) == sin(x)
assert sin(x).rewrite(pow) == sin(x)
def test_issue_11864():
w, k = symbols('w, k', real=True)
F = Piecewise((1, Eq(2*pi*k, 0)), (sin(pi*k)/(pi*k), True))
soln = Piecewise((1, Eq(2*pi*k, 0)), (sinc(pi*k), True))
assert F.rewrite(sinc) == soln
def test_real_assumptions():
z = Symbol('z', real=False, finite=True)
assert sin(z).is_real is None
assert cos(z).is_real is None
assert tan(z).is_real is False
assert sec(z).is_real is None
assert csc(z).is_real is None
assert cot(z).is_real is False
assert asin(p).is_real is None
assert asin(n).is_real is None
assert asec(p).is_real is None
assert asec(n).is_real is None
assert acos(p).is_real is None
assert acos(n).is_real is None
assert acsc(p).is_real is None
assert acsc(n).is_real is None
assert atan(p).is_positive is True
assert atan(n).is_negative is True
assert acot(p).is_positive is True
assert acot(n).is_negative is True
def test_issue_14320():
assert asin(sin(2)) == -2 + pi and (-pi/2 <= -2 + pi <= pi/2) and sin(2) == sin(-2 + pi)
assert asin(cos(2)) == -2 + pi/2 and (-pi/2 <= -2 + pi/2 <= pi/2) and cos(2) == sin(-2 + pi/2)
assert acos(sin(2)) == -pi/2 + 2 and (0 <= -pi/2 + 2 <= pi) and sin(2) == cos(-pi/2 + 2)
assert acos(cos(20)) == -6*pi + 20 and (0 <= -6*pi + 20 <= pi) and cos(20) == cos(-6*pi + 20)
assert acos(cos(30)) == -30 + 10*pi and (0 <= -30 + 10*pi <= pi) and cos(30) == cos(-30 + 10*pi)
assert atan(tan(17)) == -5*pi + 17 and (-pi/2 < -5*pi + 17 < pi/2) and tan(17) == tan(-5*pi + 17)
assert atan(tan(15)) == -5*pi + 15 and (-pi/2 < -5*pi + 15 < pi/2) and tan(15) == tan(-5*pi + 15)
assert atan(cot(12)) == -12 + pi*Rational(7, 2) and (-pi/2 < -12 + pi*Rational(7, 2) < pi/2) and cot(12) == tan(-12 + pi*Rational(7, 2))
assert acot(cot(15)) == -5*pi + 15 and (-pi/2 < -5*pi + 15 <= pi/2) and cot(15) == cot(-5*pi + 15)
assert acot(tan(19)) == -19 + pi*Rational(13, 2) and (-pi/2 < -19 + pi*Rational(13, 2) <= pi/2) and tan(19) == cot(-19 + pi*Rational(13, 2))
assert asec(sec(11)) == -11 + 4*pi and (0 <= -11 + 4*pi <= pi) and cos(11) == cos(-11 + 4*pi)
assert asec(csc(13)) == -13 + pi*Rational(9, 2) and (0 <= -13 + pi*Rational(9, 2) <= pi) and sin(13) == cos(-13 + pi*Rational(9, 2))
assert acsc(csc(14)) == -4*pi + 14 and (-pi/2 <= -4*pi + 14 <= pi/2) and sin(14) == sin(-4*pi + 14)
assert acsc(sec(10)) == pi*Rational(-7, 2) + 10 and (-pi/2 <= pi*Rational(-7, 2) + 10 <= pi/2) and cos(10) == sin(pi*Rational(-7, 2) + 10)
def test_issue_14543():
assert sec(2*pi + 11) == sec(11)
assert sec(2*pi - 11) == sec(11)
assert sec(pi + 11) == -sec(11)
assert sec(pi - 11) == -sec(11)
assert csc(2*pi + 17) == csc(17)
assert csc(2*pi - 17) == -csc(17)
assert csc(pi + 17) == -csc(17)
assert csc(pi - 17) == csc(17)
x = Symbol('x')
assert csc(pi/2 + x) == sec(x)
assert csc(pi/2 - x) == sec(x)
assert csc(pi*Rational(3, 2) + x) == -sec(x)
assert csc(pi*Rational(3, 2) - x) == -sec(x)
assert sec(pi/2 - x) == csc(x)
assert sec(pi/2 + x) == -csc(x)
assert sec(pi*Rational(3, 2) + x) == csc(x)
assert sec(pi*Rational(3, 2) - x) == -csc(x)
| assert s.is_extended_nonzero is atan(s).is_nonzero
assert s.is_extended_positive is atan(s).is_positive
assert s.is_extended_negative is atan(s).is_negative
assert s.is_extended_nonpositive is atan(s).is_nonpositive
assert s.is_extended_nonnegative is atan(s).is_nonnegative |
builder.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
use llvm::{IntPredicate, RealPredicate, False, OperandBundleDef};
use llvm::{self, BasicBlock};
use common::*;
use type_::Type;
use value::Value;
use libc::{c_uint, c_char};
use rustc::ty::TyCtxt;
use rustc::ty::layout::{Align, Size};
use rustc::session::{config, Session};
use rustc_data_structures::small_c_str::SmallCStr;
use std::borrow::Cow;
use std::ops::Range;
use std::ptr;
// All Builders must have an llfn associated with them
#[must_use]
pub struct Builder<'a, 'll: 'a, 'tcx: 'll> {
pub llbuilder: &'ll mut llvm::Builder<'ll>,
pub cx: &'a CodegenCx<'ll, 'tcx>,
}
impl Drop for Builder<'a, 'll, 'tcx> {
fn drop(&mut self) {
unsafe {
llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
}
}
}
// This is a really awful way to get a zero-length c-string, but better (and a
// lot more efficient) than doing str::as_c_str("", ...) every time.
fn noname() -> *const c_char {
static CNULL: c_char = 0;
&CNULL
}
bitflags! {
pub struct MemFlags: u8 {
const VOLATILE = 1 << 0;
const NONTEMPORAL = 1 << 1;
const UNALIGNED = 1 << 2;
}
}
impl Builder<'a, 'll, 'tcx> {
pub fn new_block<'b>(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &'b str) -> Self {
let bx = Builder::with_cx(cx);
let llbb = unsafe {
let name = SmallCStr::new(name);
llvm::LLVMAppendBasicBlockInContext(
cx.llcx,
llfn,
name.as_ptr()
)
};
bx.position_at_end(llbb);
bx
}
pub fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
// Create a fresh builder from the crate context.
let llbuilder = unsafe {
llvm::LLVMCreateBuilderInContext(cx.llcx)
};
Builder {
llbuilder,
cx,
}
}
pub fn build_sibling_block<'b>(&self, name: &'b str) -> Builder<'a, 'll, 'tcx> {
Builder::new_block(self.cx, self.llfn(), name)
}
pub fn sess(&self) -> &Session {
self.cx.sess()
}
pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
self.cx.tcx
}
pub fn llfn(&self) -> &'ll Value {
unsafe {
llvm::LLVMGetBasicBlockParent(self.llbb())
}
}
pub fn llbb(&self) -> &'ll BasicBlock {
unsafe {
llvm::LLVMGetInsertBlock(self.llbuilder)
}
}
fn count_insn(&self, category: &str) {
if self.cx.sess().codegen_stats() {
self.cx.stats.borrow_mut().n_llvm_insns += 1;
}
if self.cx.sess().count_llvm_insns() {
*self.cx.stats
.borrow_mut()
.llvm_insns
.entry(category.to_string())
.or_insert(0) += 1;
}
}
pub fn set_value_name(&self, value: &'ll Value, name: &str) {
let cname = SmallCStr::new(name);
unsafe {
llvm::LLVMSetValueName(value, cname.as_ptr());
}
}
pub fn position_at_end(&self, llbb: &'ll BasicBlock) {
unsafe {
llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
}
}
pub fn position_at_start(&self, llbb: &'ll BasicBlock) {
unsafe {
llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
}
}
pub fn ret_void(&self) {
self.count_insn("retvoid");
unsafe {
llvm::LLVMBuildRetVoid(self.llbuilder);
}
}
pub fn ret(&self, v: &'ll Value) {
self.count_insn("ret");
unsafe {
llvm::LLVMBuildRet(self.llbuilder, v);
}
}
pub fn br(&self, dest: &'ll BasicBlock) {
self.count_insn("br");
unsafe {
llvm::LLVMBuildBr(self.llbuilder, dest);
}
}
pub fn cond_br(
&self,
cond: &'ll Value,
then_llbb: &'ll BasicBlock,
else_llbb: &'ll BasicBlock,
) {
self.count_insn("condbr");
unsafe {
llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
}
}
pub fn switch(
&self,
v: &'ll Value,
else_llbb: &'ll BasicBlock,
num_cases: usize,
) -> &'ll Value {
unsafe {
llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint)
}
}
pub fn invoke(&self,
llfn: &'ll Value,
args: &[&'ll Value],
then: &'ll BasicBlock,
catch: &'ll BasicBlock,
bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value {
self.count_insn("invoke");
debug!("Invoke {:?} with args ({:?})",
llfn,
args);
let args = self.check_call("invoke", llfn, args);
let bundle = bundle.map(|b| &*b.raw);
unsafe {
llvm::LLVMRustBuildInvoke(self.llbuilder,
llfn,
args.as_ptr(),
args.len() as c_uint,
then,
catch,
bundle,
noname())
}
}
pub fn unreachable(&self) {
self.count_insn("unreachable");
unsafe {
llvm::LLVMBuildUnreachable(self.llbuilder);
}
}
/* Arithmetic */
pub fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("add");
unsafe {
llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname())
}
}
pub fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("fadd");
unsafe {
llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname())
}
}
pub fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("fadd");
unsafe {
let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname());
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("sub");
unsafe {
llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname())
}
}
pub fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("fsub");
unsafe {
llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname())
}
}
pub fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("fsub");
unsafe {
let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname());
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("mul");
unsafe {
llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname())
}
}
pub fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("fmul");
unsafe {
llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname())
}
}
pub fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("fmul");
unsafe {
let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname());
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("udiv");
unsafe {
llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname())
}
}
pub fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("exactudiv");
unsafe {
llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname())
}
}
pub fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("sdiv");
unsafe {
llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname())
}
}
pub fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("exactsdiv");
unsafe {
llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname())
}
}
pub fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("fdiv");
unsafe {
llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname())
}
}
pub fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("fdiv");
unsafe {
let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname());
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("urem");
unsafe {
llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname())
}
}
pub fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("srem");
unsafe {
llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname())
}
}
pub fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("frem");
unsafe {
llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname())
}
}
pub fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("frem");
unsafe {
let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname());
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("shl");
unsafe {
llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname())
}
}
pub fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("lshr");
unsafe {
llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname())
}
}
pub fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("ashr");
unsafe {
llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname())
}
}
pub fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("and");
unsafe {
llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname())
}
}
pub fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("or");
unsafe {
llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname())
}
}
pub fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("xor");
unsafe {
llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname())
}
}
pub fn neg(&self, v: &'ll Value) -> &'ll Value {
self.count_insn("neg");
unsafe {
llvm::LLVMBuildNeg(self.llbuilder, v, noname())
}
}
pub fn fneg(&self, v: &'ll Value) -> &'ll Value {
self.count_insn("fneg");
unsafe {
llvm::LLVMBuildFNeg(self.llbuilder, v, noname())
}
}
pub fn not(&self, v: &'ll Value) -> &'ll Value {
self.count_insn("not");
unsafe {
llvm::LLVMBuildNot(self.llbuilder, v, noname())
}
}
pub fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
let bx = Builder::with_cx(self.cx);
bx.position_at_start(unsafe {
llvm::LLVMGetFirstBasicBlock(self.llfn())
});
bx.dynamic_alloca(ty, name, align)
}
pub fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
self.count_insn("alloca");
unsafe {
let alloca = if name.is_empty() {
llvm::LLVMBuildAlloca(self.llbuilder, ty, noname())
} else {
let name = SmallCStr::new(name);
llvm::LLVMBuildAlloca(self.llbuilder, ty,
name.as_ptr())
};
llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
alloca
}
}
pub fn array_alloca(&self,
ty: &'ll Type,
len: &'ll Value,
name: &str,
align: Align) -> &'ll Value {
self.count_insn("alloca");
unsafe {
let alloca = if name.is_empty() {
llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, noname())
} else {
let name = SmallCStr::new(name);
llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len,
name.as_ptr())
};
llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
alloca
}
}
pub fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value {
self.count_insn("load");
unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
llvm::LLVMSetAlignment(load, align.abi() as c_uint);
load
}
}
pub fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value {
self.count_insn("load.volatile");
unsafe {
let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
llvm::LLVMSetVolatile(insn, llvm::True);
insn
}
}
pub fn atomic_load(&self, ptr: &'ll Value, order: AtomicOrdering, size: Size) -> &'ll Value {
self.count_insn("load.atomic");
unsafe {
let load = llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order);
// LLVM requires the alignment of atomic loads to be at least the size of the type.
llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
load
}
}
pub fn range_metadata(&self, load: &'ll Value, range: Range<u128>) {
if self.sess().target.target.arch == "amdgpu" {
// amdgpu/LLVM does something weird and thinks a i64 value is
// split into a v2i32, halving the bitwidth LLVM expects,
// tripping an assertion. So, for now, just disable this
// optimization.
return;
}
unsafe {
let llty = val_ty(load);
let v = [
C_uint_big(llty, range.start),
C_uint_big(llty, range.end)
];
llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
llvm::LLVMMDNodeInContext(self.cx.llcx,
v.as_ptr(),
v.len() as c_uint));
}
}
pub fn nonnull_metadata(&self, load: &'ll Value) {
unsafe {
llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint,
llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
}
}
pub fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
self.store_with_flags(val, ptr, align, MemFlags::empty())
}
pub fn store_with_flags(
&self,
val: &'ll Value,
ptr: &'ll Value,
align: Align,
flags: MemFlags,
) -> &'ll Value {
debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
self.count_insn("store");
let ptr = self.check_store(val, ptr);
unsafe {
let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
let align = if flags.contains(MemFlags::UNALIGNED) {
1
} else {
align.abi() as c_uint
};
llvm::LLVMSetAlignment(store, align);
if flags.contains(MemFlags::VOLATILE) {
llvm::LLVMSetVolatile(store, llvm::True);
}
if flags.contains(MemFlags::NONTEMPORAL) {
// According to LLVM [1] building a nontemporal store must
// *always* point to a metadata value of the integer 1.
//
// [1]: http://llvm.org/docs/LangRef.html#store-instruction
let one = C_i32(self.cx, 1);
let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
}
store
}
}
pub fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value,
order: AtomicOrdering, size: Size) {
debug!("Store {:?} -> {:?}", val, ptr);
self.count_insn("store.atomic");
let ptr = self.check_store(val, ptr);
unsafe {
let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order);
// LLVM requires the alignment of atomic stores to be at least the size of the type.
llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
}
}
pub fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
self.count_insn("gep");
unsafe {
llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(),
indices.len() as c_uint, noname())
}
}
pub fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
self.count_insn("inboundsgep");
unsafe {
llvm::LLVMBuildInBoundsGEP(
self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname())
}
}
pub fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value {
self.count_insn("structgep");
assert_eq!(idx as c_uint as u64, idx);
unsafe {
llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname())
}
}
/* Casts */
pub fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("trunc");
unsafe {
llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname())
}
}
pub fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("zext");
unsafe {
llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname())
}
}
pub fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("sext");
unsafe {
llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname())
}
}
pub fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("fptoui");
unsafe {
llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname())
}
}
pub fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("fptosi");
unsafe {
llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname())
}
}
pub fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("uitofp");
unsafe {
llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname())
}
}
pub fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("sitofp");
unsafe {
llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname())
}
}
pub fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("fptrunc");
unsafe {
llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname())
}
}
pub fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("fpext");
unsafe {
llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname())
}
}
pub fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("ptrtoint");
unsafe {
llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname())
}
}
pub fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("inttoptr");
unsafe {
llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname())
}
}
pub fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("bitcast");
unsafe {
llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname())
}
}
pub fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("pointercast");
unsafe {
llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname())
}
}
pub fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
self.count_insn("intcast");
unsafe {
llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed)
}
}
/* Comparisons */
pub fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("icmp");
unsafe {
llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
}
}
pub fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("fcmp");
unsafe {
llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
}
}
/* Miscellaneous instructions */
pub fn empty_phi(&self, ty: &'ll Type) -> &'ll Value {
self.count_insn("emptyphi");
unsafe {
llvm::LLVMBuildPhi(self.llbuilder, ty, noname())
}
}
pub fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
assert_eq!(vals.len(), bbs.len());
let phi = self.empty_phi(ty);
self.count_insn("addincoming");
unsafe {
llvm::LLVMAddIncoming(phi, vals.as_ptr(),
bbs.as_ptr(),
vals.len() as c_uint);
phi
}
}
pub fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char,
inputs: &[&'ll Value], output: &'ll Type,
volatile: bool, alignstack: bool,
dia: AsmDialect) -> Option<&'ll Value> {
self.count_insn("inlineasm");
let volatile = if volatile { llvm::True }
else { llvm::False };
let alignstack = if alignstack { llvm::True }
else { llvm::False };
let argtys = inputs.iter().map(|v| {
debug!("Asm Input Type: {:?}", *v);
val_ty(*v)
}).collect::<Vec<_>>();
debug!("Asm Output Type: {:?}", output);
let fty = Type::func(&argtys[..], output);
unsafe {
// Ask LLVM to verify that the constraints are well-formed.
let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons);
debug!("Constraint verification result: {:?}", constraints_ok);
if constraints_ok {
let v = llvm::LLVMRustInlineAsm(
fty, asm, cons, volatile, alignstack, dia);
Some(self.call(v, inputs, None))
} else {
// LLVM has detected an issue with our constraints, bail out
None
}
}
}
pub fn call(&self, llfn: &'ll Value, args: &[&'ll Value],
bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value {
self.count_insn("call");
debug!("Call {:?} with args ({:?})",
llfn,
args);
let args = self.check_call("call", llfn, args);
let bundle = bundle.map(|b| &*b.raw);
unsafe {
llvm::LLVMRustBuildCall(self.llbuilder, llfn, args.as_ptr(),
args.len() as c_uint, bundle, noname())
}
}
pub fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("minnum");
unsafe {
let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs);
instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0")
}
}
pub fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("maxnum");
unsafe {
let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs);
instr.expect("LLVMRustBuildMaxNum is not available in LLVM version < 6.0")
}
}
pub fn select(
&self, cond: &'ll Value,
then_val: &'ll Value,
else_val: &'ll Value,
) -> &'ll Value {
self.count_insn("select");
unsafe {
llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname())
}
}
#[allow(dead_code)]
pub fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
self.count_insn("vaarg");
unsafe {
llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname())
}
}
pub fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
self.count_insn("extractelement");
unsafe {
llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname())
}
}
pub fn insert_element(
&self, vec: &'ll Value,
elt: &'ll Value,
idx: &'ll Value,
) -> &'ll Value {
self.count_insn("insertelement");
unsafe {
llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname())
}
}
pub fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value {
self.count_insn("shufflevector");
unsafe {
llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname())
}
}
pub fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
unsafe {
let elt_ty = val_ty(elt);
let undef = llvm::LLVMGetUndef(Type::vector(elt_ty, num_elts as u64));
let vec = self.insert_element(undef, elt, C_i32(self.cx, 0));
let vec_i32_ty = Type::vector(Type::i32(self.cx), num_elts as u64);
self.shuffle_vector(vec, undef, C_null(vec_i32_ty))
}
}
pub fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.fadd_fast");
unsafe {
// FIXME: add a non-fast math version once
// https://bugs.llvm.org/show_bug.cgi?id=36732
// is fixed.
let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src)
.expect("LLVMRustBuildVectorReduceFAdd is not available in LLVM version < 5.0");
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.fmul_fast");
unsafe {
// FIXME: add a non-fast math version once
// https://bugs.llvm.org/show_bug.cgi?id=36732
// is fixed.
let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src)
.expect("LLVMRustBuildVectorReduceFMul is not available in LLVM version < 5.0");
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.add");
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src);
instr.expect("LLVMRustBuildVectorReduceAdd is not available in LLVM version < 5.0")
}
}
pub fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.mul");
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src);
instr.expect("LLVMRustBuildVectorReduceMul is not available in LLVM version < 5.0")
}
}
pub fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.and");
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src);
instr.expect("LLVMRustBuildVectorReduceAnd is not available in LLVM version < 5.0")
}
}
pub fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.or");
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src);
instr.expect("LLVMRustBuildVectorReduceOr is not available in LLVM version < 5.0")
}
}
pub fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.xor");
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src);
instr.expect("LLVMRustBuildVectorReduceXor is not available in LLVM version < 5.0")
}
}
pub fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.fmin");
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false);
instr.expect("LLVMRustBuildVectorReduceFMin is not available in LLVM version < 5.0")
}
}
pub fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.fmax");
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false);
instr.expect("LLVMRustBuildVectorReduceFMax is not available in LLVM version < 5.0")
}
}
pub fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.fmin_fast");
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true)
.expect("LLVMRustBuildVectorReduceFMin is not available in LLVM version < 5.0");
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.fmax_fast");
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true)
.expect("LLVMRustBuildVectorReduceFMax is not available in LLVM version < 5.0");
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
self.count_insn("vector.reduce.min");
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed);
instr.expect("LLVMRustBuildVectorReduceMin is not available in LLVM version < 5.0")
}
}
pub fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
self.count_insn("vector.reduce.max");
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed);
instr.expect("LLVMRustBuildVectorReduceMax is not available in LLVM version < 5.0")
}
}
pub fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
self.count_insn("extractvalue");
assert_eq!(idx as c_uint as u64, idx);
unsafe {
llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname())
}
}
pub fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value,
idx: u64) -> &'ll Value {
self.count_insn("insertvalue");
assert_eq!(idx as c_uint as u64, idx);
unsafe {
llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
noname())
}
}
pub fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value,
num_clauses: usize) -> &'ll Value {
self.count_insn("landingpad");
unsafe {
llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn,
num_clauses as c_uint, noname())
}
}
pub fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) {
unsafe {
llvm::LLVMAddClause(landing_pad, clause);
}
}
pub fn set_cleanup(&self, landing_pad: &'ll Value) {
self.count_insn("setcleanup");
unsafe {
llvm::LLVMSetCleanup(landing_pad, llvm::True);
}
}
pub fn resume(&self, exn: &'ll Value) -> &'ll Value {
self.count_insn("resume");
unsafe {
llvm::LLVMBuildResume(self.llbuilder, exn)
}
}
pub fn cleanup_pad(&self,
parent: Option<&'ll Value>,
args: &[&'ll Value]) -> &'ll Value {
self.count_insn("cleanuppad");
let name = const_cstr!("cleanuppad");
let ret = unsafe {
llvm::LLVMRustBuildCleanupPad(self.llbuilder,
parent,
args.len() as c_uint,
args.as_ptr(),
name.as_ptr())
};
ret.expect("LLVM does not have support for cleanuppad")
}
pub fn cleanup_ret(
&self, cleanup: &'ll Value,
unwind: Option<&'ll BasicBlock>,
) -> &'ll Value {
self.count_insn("cleanupret");
let ret = unsafe {
llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind)
};
ret.expect("LLVM does not have support for cleanupret")
}
pub fn catch_pad(&self,
parent: &'ll Value,
args: &[&'ll Value]) -> &'ll Value {
self.count_insn("catchpad");
let name = const_cstr!("catchpad");
let ret = unsafe {
llvm::LLVMRustBuildCatchPad(self.llbuilder, parent,
args.len() as c_uint, args.as_ptr(),
name.as_ptr())
};
ret.expect("LLVM does not have support for catchpad")
}
pub fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value {
self.count_insn("catchret");
let ret = unsafe {
llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind)
};
ret.expect("LLVM does not have support for catchret")
}
pub fn catch_switch(
&self,
parent: Option<&'ll Value>,
unwind: Option<&'ll BasicBlock>,
num_handlers: usize,
) -> &'ll Value {
self.count_insn("catchswitch");
let name = const_cstr!("catchswitch");
let ret = unsafe {
llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind,
num_handlers as c_uint,
name.as_ptr())
};
ret.expect("LLVM does not have support for catchswitch")
}
pub fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
unsafe {
llvm::LLVMRustAddHandler(catch_switch, handler);
}
}
pub fn set_personality_fn(&self, personality: &'ll Value) {
unsafe {
llvm::LLVMSetPersonalityFn(self.llfn(), personality);
}
}
// Atomic Operations
pub fn atomic_cmpxchg(
&self,
dst: &'ll Value,
cmp: &'ll Value,
src: &'ll Value,
order: AtomicOrdering,
failure_order: AtomicOrdering,
weak: llvm::Bool,
) -> &'ll Value {
unsafe {
llvm::LLVMRustBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src,
order, failure_order, weak)
}
}
pub fn atomic_rmw(
&self,
op: AtomicRmwBinOp,
dst: &'ll Value,
src: &'ll Value,
order: AtomicOrdering,
) -> &'ll Value {
unsafe {
llvm::LLVMBuildAtomicRMW(self.llbuilder, op, dst, src, order, False)
}
}
pub fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) {
unsafe {
llvm::LLVMRustBuildAtomicFence(self.llbuilder, order, scope);
}
}
pub fn | (&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) {
unsafe {
llvm::LLVMAddCase(s, on_val, dest)
}
}
pub fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
self.count_insn("addincoming");
unsafe {
llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
}
}
pub fn set_invariant_load(&self, load: &'ll Value) {
unsafe {
llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint,
llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
}
}
/// Returns the ptr value that should be used for storing `val`.
fn check_store<'b>(&self,
val: &'ll Value,
ptr: &'ll Value) -> &'ll Value {
let dest_ptr_ty = val_ty(ptr);
let stored_ty = val_ty(val);
let stored_ptr_ty = stored_ty.ptr_to();
assert_eq!(dest_ptr_ty.kind(), llvm::TypeKind::Pointer);
if dest_ptr_ty == stored_ptr_ty {
ptr
} else {
debug!("Type mismatch in store. \
Expected {:?}, got {:?}; inserting bitcast",
dest_ptr_ty, stored_ptr_ty);
self.bitcast(ptr, stored_ptr_ty)
}
}
/// Returns the args that should be used for a call to `llfn`.
fn check_call<'b>(&self,
typ: &str,
llfn: &'ll Value,
args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
let mut fn_ty = val_ty(llfn);
// Strip off pointers
while fn_ty.kind() == llvm::TypeKind::Pointer {
fn_ty = fn_ty.element_type();
}
assert!(fn_ty.kind() == llvm::TypeKind::Function,
"builder::{} not passed a function, but {:?}", typ, fn_ty);
let param_tys = fn_ty.func_params();
let all_args_match = param_tys.iter()
.zip(args.iter().map(|&v| val_ty(v)))
.all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
if all_args_match {
return Cow::Borrowed(args);
}
let casted_args: Vec<_> = param_tys.into_iter()
.zip(args.iter())
.enumerate()
.map(|(i, (expected_ty, &actual_val))| {
let actual_ty = val_ty(actual_val);
if expected_ty != actual_ty {
debug!("Type mismatch in function call of {:?}. \
Expected {:?} for param {}, got {:?}; injecting bitcast",
llfn, expected_ty, i, actual_ty);
self.bitcast(actual_val, expected_ty)
} else {
actual_val
}
})
.collect();
Cow::Owned(casted_args)
}
pub fn lifetime_start(&self, ptr: &'ll Value, size: Size) {
self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
}
pub fn lifetime_end(&self, ptr: &'ll Value, size: Size) {
self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
}
/// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
/// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
/// and the intrinsic for `lt` and passes them to `emit`, which is in
/// charge of generating code to call the passed intrinsic on whatever
/// block of generated code is targeted for the intrinsic.
///
/// If LLVM lifetime intrinsic support is disabled (i.e. optimizations
/// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) {
if self.cx.sess().opts.optimize == config::OptLevel::No {
return;
}
let size = size.bytes();
if size == 0 {
return;
}
let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
let ptr = self.pointercast(ptr, Type::i8p(self.cx));
self.call(lifetime_intrinsic, &[C_u64(self.cx, size), ptr], None);
}
}
| add_case |
timeseries.py | from .alphavantage import AlphaVantage as av
class TimeSeries(av):
"""This class implements all the api calls to times series
"""
@av._output_format
@av._call_api_on_func
def get_intraday(self, symbol, interval='15min', outputsize='compact'):
|
@av._output_format
@av._call_api_on_func
def get_intraday_extended(self, symbol, interval='15min', slice='year1month1', adjusted=True):
""" Return extended intraday time series in one csv_reader object.
It raises ValueError when problems arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
interval: time interval between two conscutive values,
supported values are '1min', '5min', '15min', '30min', '60min'
(default '15min')
slice: the trailing 2 years of intraday data is evenly divided into
24 "slices" - year1month1, year1month2, ..., year2month12
adjusted: By default, adjusted=true and the output time series is
adjusted by historical split and dividend events.
Set adjusted=false to query raw (as-traded) intraday values.
"""
_FUNCTION_KEY = "TIME_SERIES_INTRADAY_EXTENDED"
return _FUNCTION_KEY, "Time Series ({})".format(interval), 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_daily(self, symbol, outputsize='compact'):
""" Return daily time series in two json objects as data and
meta_data. It raises ValueError when problems arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
outputsize: The size of the call, supported values are
'compact' and 'full; the first returns the last 100 points in the
data series, and 'full' returns the full-length daily times
series, commonly above 1MB (default 'compact')
"""
_FUNCTION_KEY = "TIME_SERIES_DAILY"
return _FUNCTION_KEY, 'Time Series (Daily)', 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_daily_adjusted(self, symbol, outputsize='compact'):
""" Return daily adjusted (date, daily open, daily high, daily low,
daily close, daily split/dividend-adjusted close, daily volume)
time series in two json objects as data and
meta_data. It raises ValueError when problems arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
outputsize: The size of the call, supported values are
'compact' and 'full; the first returns the last 100 points in the
data series, and 'full' returns the full-length daily times
series, commonly above 1MB (default 'compact')
"""
_FUNCTION_KEY = "TIME_SERIES_DAILY_ADJUSTED"
return _FUNCTION_KEY, 'Time Series (Daily)', 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_weekly(self, symbol):
""" Return weekly time series in two json objects as data and
meta_data. It raises ValueError when problems arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
"""
_FUNCTION_KEY = "TIME_SERIES_WEEKLY"
return _FUNCTION_KEY, 'Weekly Time Series', 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_weekly_adjusted(self, symbol):
""" weekly adjusted time series (last trading day of each week,
weekly open, weekly high, weekly low, weekly close, weekly adjusted
close, weekly volume, weekly dividend) of the equity specified,
covering up to 20 years of historical data.
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
"""
_FUNCTION_KEY = "TIME_SERIES_WEEKLY_ADJUSTED"
return _FUNCTION_KEY, 'Weekly Adjusted Time Series', 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_monthly(self, symbol):
""" Return monthly time series in two json objects as data and
meta_data. It raises ValueError when problems arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
"""
_FUNCTION_KEY = "TIME_SERIES_MONTHLY"
return _FUNCTION_KEY, 'Monthly Time Series', 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_monthly_adjusted(self, symbol):
""" Return monthly time series in two json objects as data and
meta_data. It raises ValueError when problems arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
"""
_FUNCTION_KEY = "TIME_SERIES_MONTHLY_ADJUSTED"
return _FUNCTION_KEY, 'Monthly Adjusted Time Series', 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_quote_endpoint(self, symbol):
""" Return the latest price and volume information for a
security of your choice
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
"""
_FUNCTION_KEY = "GLOBAL_QUOTE"
return _FUNCTION_KEY, 'Global Quote', None
@av._output_format
@av._call_api_on_func
def get_symbol_search(self, keywords):
""" Return best matching symbols and market information
based on keywords. It raises ValueError when problems arise
Keyword Arguments:
keywords: the keywords to query on
"""
_FUNCTION_KEY = "SYMBOL_SEARCH"
return _FUNCTION_KEY, 'bestMatches', None
| """ Return intraday time series in two json objects as data and
meta_data. It raises ValueError when problems arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
interval: time interval between two conscutive values,
supported values are '1min', '5min', '15min', '30min', '60min'
(default '15min')
outputsize: The size of the call, supported values are
'compact' and 'full; the first returns the last 100 points in the
data series, and 'full' returns the full-length intraday times
series, commonly above 1MB (default 'compact')
"""
_FUNCTION_KEY = "TIME_SERIES_INTRADAY"
return _FUNCTION_KEY, "Time Series ({})".format(interval), 'Meta Data' |
_mean_y_projection.py | from .._tier0 import execute
from .._tier0 import plugin_function
from .._tier0 import Image
from .._tier0 import create_2d_zx
@plugin_function(output_creator=create_2d_zx, categories=['projection'])
def mean_y_projection(source : Image, destination : Image = None) -> Image:
| """Determines the mean average intensity projection of an image along Y.
Parameters
----------
source : Image
destination : Image
Returns
-------
destination
Examples
--------
>>> import pyclesperanto_prototype as cle
>>> cle.mean_y_projection(source, destination)
References
----------
.. [1] https://clij.github.io/clij2-docs/reference_meanYProjection
"""
parameters = {
"dst":destination,
"src":source,
}
execute(__file__, '../clij-opencl-kernels/kernels/mean_y_projection_x.cl', 'mean_y_projection', destination.shape, parameters)
return destination |
|
root.go | // Copyright © 2021 Elis Lulja
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package root
import (
"context"
"os"
"os/signal"
"syscall"
"time"
redis "github.com/go-redis/redis/v8"
"github.com/rs/zerolog"
"github.com/spf13/cobra"
)
const (
defaultPubChannel string = "poll-result"
)
var (
log zerolog.Logger
locale, _ = time.LoadLocation("Europe/Rome")
)
func i | ) {
output := zerolog.ConsoleWriter{
Out: os.Stdout,
TimeFormat: "15:04:05",
}
zerolog.TimestampFunc = func() time.Time {
return time.Now().In(locale)
}
log = zerolog.New(output).With().Timestamp().Logger().Level(zerolog.InfoLevel)
}
// NewRootCommand returns the root command
func NewRootCommand() *cobra.Command {
opts := &options{
redis: &redisOptions{},
}
cmd := &cobra.Command{
Use: "event-handler",
Short: "handle events coming from the kube-scraper project",
Long: `The event handler subscribes to events published by the Kube Scraper
project and reacts to them accirding to the channel and the event type.
In order to react to events, the redis address must be provided along with
the channel name.`,
PreRun: func(_ *cobra.Command, _ []string) {
if opts.debug {
log.Level(zerolog.DebugLevel)
}
},
Run: func(_ *cobra.Command, _ []string) {
run(opts)
},
}
// Flags
cmd.Flags().StringVar(&opts.redis.address, "redis-address", "", "the address where to connect to redis")
cmd.Flags().StringVar(&opts.redis.pubChannel, "redis-pub-channel", defaultPubChannel, "redis channel where to subscribe from")
cmd.MarkFlagRequired("redis-address")
return cmd
}
func run(opts *options) {
// -- Init
log.Info().Msg("starting...")
ctx, canc := context.WithCancel(context.Background())
exitChan := make(chan struct{})
signalChan := make(chan os.Signal, 1)
signal.Notify(
signalChan,
syscall.SIGHUP, // kill -SIGHUP XXXX
syscall.SIGINT, // kill -SIGINT XXXX or Ctrl+c
syscall.SIGQUIT, // kill -SIGQUIT XXXX
)
go func() {
defer close(exitChan)
// -- Get redis client
rdb, err := func() (*redis.Client, error) {
_rdb := redis.NewClient(&redis.Options{Addr: opts.redis.address})
rdCtx, rdCanc := context.WithTimeout(ctx, 15*time.Second)
defer rdCanc()
if _, err := _rdb.Ping(rdCtx).Result(); err != nil {
log.Err(err).Msg("could not receive ping from redis, exiting...")
return nil, err
}
return _rdb, nil
}()
if err != nil {
signalChan <- os.Interrupt
return
}
log.Info().Msg("connected to redis")
defer rdb.Close()
sub := rdb.Subscribe(ctx, opts.redis.pubChannel)
defer sub.Close()
iface, err := sub.Receive(ctx)
if err != nil {
log.Err(err).Str("channel", opts.redis.pubChannel).Msg("could not subscribe to channel")
signalChan <- os.Interrupt
return
}
l := log.With().Str("channel", opts.redis.pubChannel).Logger()
switch iface.(type) {
case *redis.Subscription:
l.Info().Msg("subscribed to channel")
case *redis.Message:
go handleEvent(iface.(*redis.Message).Payload)
case *redis.Pong:
// pong received
default:
l.Error().Msg("error while getting subscription")
}
l.Info().Str("channel", opts.redis.pubChannel).Msg("listening for events...")
ch := sub.Channel()
select {
case msg := <-ch:
l.Info().Msg("received message")
go handleEvent(msg.Payload)
case <-ctx.Done():
return
}
}()
<-signalChan
log.Info().Msg("exit requested")
// -- Close all connections and shut down
canc()
<-exitChan
log.Info().Msg("goodbye!")
}
| nit( |
state.go | package holders
import "os"
type StateConfig struct {
mode string
}
func (c *StateConfig) Mode() string {
return c.mode
}
var StateConfigSingleton *StateConfig
func LoadStateConfig() | {
StateConfigSingleton = &StateConfig {
mode: os.Getenv("NODE_ENV"),
}
} |
|
mod.rs | use std::time::Duration;
use futures::{
future::{try_select, Either},
pin_mut,
};
use tokio::time::delay_for;
use crate::{
docker::{
bitcoin::{self, BitcoindComitScriptsHttpWalletEndpoint},
delete_container, delete_network,
},
print_progress,
};
mod start;
pub async fn start() | nc fn new_miner(endpoint: BitcoindComitScriptsHttpWalletEndpoint) -> anyhow::Result<()> {
loop {
delay_for(Duration::from_secs(1)).await;
bitcoin::mine_a_block(&endpoint.to_string()).await?;
}
}
pub async fn clean_up() {
let _ = delete_container("bitcoin").await;
let _ = delete_container("ethereum").await;
let _ = delete_container("cnd_0").await;
let _ = delete_container("cnd_1").await;
let _ = delete_network().await;
if let Ok(path) = crate::temp_fs::dir_path() {
let _ = tokio::fs::remove_dir_all(path).await;
}
}
| {
if crate::temp_fs::dir_exist().await {
eprintln!("It seems that `start-env` is already running.\nIf it is not the case, run `yarn comit-scripts force-clean-env` and try again.");
::std::process::exit(1);
}
let ctrl_c = tokio::signal::ctrl_c();
let start_env = self::start::execute();
pin_mut!(start_env);
pin_mut!(ctrl_c);
let result = try_select(start_env, ctrl_c).await;
match result {
Ok(Either::Left((self::start::Environment { bitcoind, .. }, ctrl_c))) => {
tokio::spawn(new_miner(bitcoind.comit_scripts_wallet_endpoint));
let _ = ctrl_c.await;
}
Err(Either::Left((start_env_error, _))) => {
eprintln!("Failed to start environment: {:?}", start_env_error)
}
_ => {}
}
print_progress!("🧹 Cleaning up");
clean_up().await;
println!("✓");
}
asy |
query.go | package main
import (
"context"
"fmt"
"math"
"net/http"
"path"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/oklog/run"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/route"
"github.com/prometheus/prometheus/discovery/file"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/tsdb/labels"
"github.com/thanos-io/thanos/pkg/component"
"github.com/thanos-io/thanos/pkg/discovery/cache"
"github.com/thanos-io/thanos/pkg/discovery/dns"
"github.com/thanos-io/thanos/pkg/extprom"
extpromhttp "github.com/thanos-io/thanos/pkg/extprom/http"
"github.com/thanos-io/thanos/pkg/prober"
"github.com/thanos-io/thanos/pkg/query"
v1 "github.com/thanos-io/thanos/pkg/query/api"
"github.com/thanos-io/thanos/pkg/runutil"
grpcserver "github.com/thanos-io/thanos/pkg/server/grpc"
httpserver "github.com/thanos-io/thanos/pkg/server/http"
"github.com/thanos-io/thanos/pkg/store"
"github.com/thanos-io/thanos/pkg/tls"
"github.com/thanos-io/thanos/pkg/tracing"
"github.com/thanos-io/thanos/pkg/ui"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
kingpin "gopkg.in/alecthomas/kingpin.v2"
)
// registerQuery registers a query command.
func registerQuery(m map[string]setupFunc, app *kingpin.Application) {
comp := component.Query
cmd := app.Command(comp.String(), "query node exposing PromQL enabled Query API with data retrieved from multiple store nodes")
httpBindAddr, httpGracePeriod := regHTTPFlags(cmd)
grpcBindAddr, grpcGracePeriod, grpcCert, grpcKey, grpcClientCA := regGRPCFlags(cmd)
secure := cmd.Flag("grpc-client-tls-secure", "Use TLS when talking to the gRPC server").Default("false").Bool()
cert := cmd.Flag("grpc-client-tls-cert", "TLS Certificates to use to identify this client to the server").Default("").String()
key := cmd.Flag("grpc-client-tls-key", "TLS Key for the client's certificate").Default("").String()
caCert := cmd.Flag("grpc-client-tls-ca", "TLS CA Certificates to use to verify gRPC servers").Default("").String()
serverName := cmd.Flag("grpc-client-server-name", "Server name to verify the hostname on the returned gRPC certificates. See https://tools.ietf.org/html/rfc4366#section-3.1").Default("").String()
webRoutePrefix := cmd.Flag("web.route-prefix", "Prefix for API and UI endpoints. This allows thanos UI to be served on a sub-path. This option is analogous to --web.route-prefix of Promethus.").Default("").String()
webExternalPrefix := cmd.Flag("web.external-prefix", "Static prefix for all HTML links and redirect URLs in the UI query web interface. Actual endpoints are still served on / or the web.route-prefix. This allows thanos UI to be served behind a reverse proxy that strips a URL sub-path.").Default("").String()
webPrefixHeaderName := cmd.Flag("web.prefix-header", "Name of HTTP request header used for dynamic prefixing of UI links and redirects. This option is ignored if web.external-prefix argument is set. Security risk: enable this option only if a reverse proxy in front of thanos is resetting the header. The --web.prefix-header=X-Forwarded-Prefix option can be useful, for example, if Thanos UI is served via Traefik reverse proxy with PathPrefixStrip option enabled, which sends the stripped prefix value in X-Forwarded-Prefix header. This allows thanos UI to be served on a sub-path.").Default("").String()
queryTimeout := modelDuration(cmd.Flag("query.timeout", "Maximum time to process query by query node.").
Default("2m"))
maxConcurrentQueries := cmd.Flag("query.max-concurrent", "Maximum number of queries processed concurrently by query node.").
Default("20").Int()
replicaLabels := cmd.Flag("query.replica-label", "Labels to treat as a replica indicator along which data is deduplicated. Still you will be able to query without deduplication using 'dedup=false' parameter.").
Strings()
instantDefaultMaxSourceResolution := modelDuration(cmd.Flag("query.instant.default.max_source_resolution", "default value for max_source_resolution for instant queries. If not set, defaults to 0s only taking raw resolution into account. 1h can be a good value if you use instant queries over time ranges that incorporate times outside of your raw-retention.").Default("0s").Hidden())
selectorLabels := cmd.Flag("selector-label", "Query selector labels that will be exposed in info endpoint (repeated).").
PlaceHolder("<name>=\"<value>\"").Strings()
stores := cmd.Flag("store", "Addresses of statically configured store API servers (repeatable). The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect store API servers through respective DNS lookups.").
PlaceHolder("<store>").Strings()
fileSDFiles := cmd.Flag("store.sd-files", "Path to files that contain addresses of store API servers. The path can be a glob pattern (repeatable).").
PlaceHolder("<path>").Strings()
fileSDInterval := modelDuration(cmd.Flag("store.sd-interval", "Refresh interval to re-read file SD files. It is used as a resync fallback.").
Default("5m"))
// TODO(bwplotka): Grab this from TTL at some point.
dnsSDInterval := modelDuration(cmd.Flag("store.sd-dns-interval", "Interval between DNS resolutions.").
Default("30s"))
dnsSDResolver := cmd.Flag("store.sd-dns-resolver", fmt.Sprintf("Resolver to use. Possible options: [%s, %s]", dns.GolangResolverType, dns.MiekgdnsResolverType)).
Default(string(dns.GolangResolverType)).Hidden().String()
unhealthyStoreTimeout := modelDuration(cmd.Flag("store.unhealthy-timeout", "Timeout before an unhealthy store is cleaned from the store UI page.").Default("5m"))
enableAutodownsampling := cmd.Flag("query.auto-downsampling", "Enable automatic adjustment (step / 5) to what source of data should be used in store gateways if no max_source_resolution param is specified.").
Default("false").Bool()
enablePartialResponse := cmd.Flag("query.partial-response", "Enable partial response for queries if no partial_response param is specified. --no-query.partial-response for disabling.").
Default("true").Bool()
defaultEvaluationInterval := modelDuration(cmd.Flag("query.default-evaluation-interval", "Set default evaluation interval for sub queries.").Default("1m"))
storeResponseTimeout := modelDuration(cmd.Flag("store.response-timeout", "If a Store doesn't send any data in this specified duration then a Store will be ignored and partial data will be returned if it's enabled. 0 disables timeout.").Default("0ms"))
m[comp.String()] = func(g *run.Group, logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, _ bool) error {
selectorLset, err := parseFlagLabels(*selectorLabels)
if err != nil {
return errors.Wrap(err, "parse federation labels")
}
lookupStores := map[string]struct{}{}
for _, s := range *stores {
if _, ok := lookupStores[s]; ok {
return errors.Errorf("Address %s is duplicated for --store flag.", s)
}
lookupStores[s] = struct{}{}
}
var fileSD *file.Discovery
if len(*fileSDFiles) > 0 {
conf := &file.SDConfig{
Files: *fileSDFiles,
RefreshInterval: *fileSDInterval,
}
fileSD = file.NewDiscovery(conf, logger)
}
promql.SetDefaultEvaluationInterval(time.Duration(*defaultEvaluationInterval))
return runQuery(
g,
logger,
reg,
tracer,
*grpcBindAddr,
time.Duration(*grpcGracePeriod),
*grpcCert,
*grpcKey,
*grpcClientCA,
*secure,
*cert,
*key,
*caCert,
*serverName,
*httpBindAddr,
time.Duration(*httpGracePeriod),
*webRoutePrefix,
*webExternalPrefix,
*webPrefixHeaderName,
*maxConcurrentQueries,
time.Duration(*queryTimeout),
time.Duration(*storeResponseTimeout),
*replicaLabels,
selectorLset,
*stores,
*enableAutodownsampling,
*enablePartialResponse,
fileSD,
time.Duration(*dnsSDInterval),
*dnsSDResolver,
time.Duration(*unhealthyStoreTimeout),
time.Duration(*instantDefaultMaxSourceResolution),
component.Query,
)
}
}
func storeClientGRPCOpts(logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, secure bool, cert, key, caCert, serverName string) ([]grpc.DialOption, error) {
grpcMets := grpc_prometheus.NewClientMetrics()
grpcMets.EnableClientHandlingTimeHistogram(
grpc_prometheus.WithHistogramBuckets([]float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}),
)
dialOpts := []grpc.DialOption{
// We want to make sure that we can receive huge gRPC messages from storeAPI.
// On TCP level we can be fine, but the gRPC overhead for huge messages could be significant.
// Current limit is ~2GB.
// TODO(bplotka): Split sent chunks on store node per max 4MB chunks if needed.
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)),
grpc.WithUnaryInterceptor(
grpc_middleware.ChainUnaryClient(
grpcMets.UnaryClientInterceptor(),
tracing.UnaryClientInterceptor(tracer),
),
),
grpc.WithStreamInterceptor(
grpc_middleware.ChainStreamClient(
grpcMets.StreamClientInterceptor(),
tracing.StreamClientInterceptor(tracer),
),
),
}
if reg != nil {
reg.MustRegister(grpcMets)
}
if !secure {
return append(dialOpts, grpc.WithInsecure()), nil
}
level.Info(logger).Log("msg", "enabling client to server TLS")
tlsCfg, err := tls.NewClientConfig(logger, cert, key, caCert, serverName)
if err != nil {
return nil, err
}
return append(dialOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsCfg))), nil
}
// runQuery starts a server that exposes PromQL Query API. It is responsible for querying configured
// store nodes, merging and duplicating the data to satisfy user query.
func | (
g *run.Group,
logger log.Logger,
reg *prometheus.Registry,
tracer opentracing.Tracer,
grpcBindAddr string,
grpcGracePeriod time.Duration,
grpcCert string,
grpcKey string,
grpcClientCA string,
secure bool,
cert string,
key string,
caCert string,
serverName string,
httpBindAddr string,
httpGracePeriod time.Duration,
webRoutePrefix string,
webExternalPrefix string,
webPrefixHeaderName string,
maxConcurrentQueries int,
queryTimeout time.Duration,
storeResponseTimeout time.Duration,
replicaLabels []string,
selectorLset labels.Labels,
storeAddrs []string,
enableAutodownsampling bool,
enablePartialResponse bool,
fileSD *file.Discovery,
dnsSDInterval time.Duration,
dnsSDResolver string,
unhealthyStoreTimeout time.Duration,
instantDefaultMaxSourceResolution time.Duration,
comp component.Component,
) error {
// TODO(bplotka in PR #513 review): Move arguments into struct.
duplicatedStores := prometheus.NewCounter(prometheus.CounterOpts{
Name: "thanos_query_duplicated_store_address",
Help: "The number of times a duplicated store addresses is detected from the different configs in query",
})
reg.MustRegister(duplicatedStores)
dialOpts, err := storeClientGRPCOpts(logger, reg, tracer, secure, cert, key, caCert, serverName)
if err != nil {
return errors.Wrap(err, "building gRPC client")
}
fileSDCache := cache.New()
dnsProvider := dns.NewProvider(
logger,
extprom.WrapRegistererWithPrefix("thanos_querier_store_apis_", reg),
dns.ResolverType(dnsSDResolver),
)
var (
stores = query.NewStoreSet(
logger,
reg,
func() (specs []query.StoreSpec) {
// Add DNS resolved addresses from static flags and file SD.
for _, addr := range dnsProvider.Addresses() {
specs = append(specs, query.NewGRPCStoreSpec(addr))
}
specs = removeDuplicateStoreSpecs(logger, duplicatedStores, specs)
return specs
},
dialOpts,
unhealthyStoreTimeout,
)
proxy = store.NewProxyStore(logger, stores.Get, component.Query, selectorLset, storeResponseTimeout)
queryableCreator = query.NewQueryableCreator(logger, proxy)
engine = promql.NewEngine(
promql.EngineOpts{
Logger: logger,
Reg: reg,
MaxConcurrent: maxConcurrentQueries,
// TODO(bwplotka): Expose this as a flag: https://github.com/thanos-io/thanos/issues/703.
MaxSamples: math.MaxInt32,
Timeout: queryTimeout,
},
)
)
// Periodically update the store set with the addresses we see in our cluster.
{
ctx, cancel := context.WithCancel(context.Background())
g.Add(func() error {
return runutil.Repeat(5*time.Second, ctx.Done(), func() error {
stores.Update(ctx)
return nil
})
}, func(error) {
cancel()
stores.Close()
})
}
// Run File Service Discovery and update the store set when the files are modified.
if fileSD != nil {
var fileSDUpdates chan []*targetgroup.Group
ctxRun, cancelRun := context.WithCancel(context.Background())
fileSDUpdates = make(chan []*targetgroup.Group)
g.Add(func() error {
fileSD.Run(ctxRun, fileSDUpdates)
return nil
}, func(error) {
cancelRun()
})
ctxUpdate, cancelUpdate := context.WithCancel(context.Background())
g.Add(func() error {
for {
select {
case update := <-fileSDUpdates:
// Discoverers sometimes send nil updates so need to check for it to avoid panics.
if update == nil {
continue
}
fileSDCache.Update(update)
stores.Update(ctxUpdate)
dnsProvider.Resolve(ctxUpdate, append(fileSDCache.Addresses(), storeAddrs...))
case <-ctxUpdate.Done():
return nil
}
}
}, func(error) {
cancelUpdate()
close(fileSDUpdates)
})
}
// Periodically update the addresses from static flags and file SD by resolving them using DNS SD if necessary.
{
ctx, cancel := context.WithCancel(context.Background())
g.Add(func() error {
return runutil.Repeat(dnsSDInterval, ctx.Done(), func() error {
dnsProvider.Resolve(ctx, append(fileSDCache.Addresses(), storeAddrs...))
return nil
})
}, func(error) {
cancel()
})
}
// Start query API + UI HTTP server.
statusProber := prober.New(comp, logger, reg)
{
router := route.New()
// Redirect from / to /webRoutePrefix.
if webRoutePrefix != "" {
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, webRoutePrefix, http.StatusFound)
})
}
flagsMap := map[string]string{
// TODO(bplotka in PR #513 review): pass all flags, not only the flags needed by prefix rewriting.
"web.external-prefix": webExternalPrefix,
"web.prefix-header": webPrefixHeaderName,
}
ins := extpromhttp.NewInstrumentationMiddleware(reg)
ui.NewQueryUI(logger, reg, stores, flagsMap).Register(router.WithPrefix(webRoutePrefix), ins)
api := v1.NewAPI(logger, reg, engine, queryableCreator, enableAutodownsampling, enablePartialResponse, replicaLabels, instantDefaultMaxSourceResolution)
api.Register(router.WithPrefix(path.Join(webRoutePrefix, "/api/v1")), tracer, logger, ins)
// Initiate HTTP listener providing metrics endpoint and readiness/liveness probes.
srv := httpserver.New(logger, reg, comp, statusProber,
httpserver.WithListen(httpBindAddr),
httpserver.WithGracePeriod(httpGracePeriod),
)
srv.Handle("/", router)
g.Add(func() error {
statusProber.Healthy()
return srv.ListenAndServe()
}, func(err error) {
statusProber.NotReady(err)
defer statusProber.NotHealthy(err)
srv.Shutdown(err)
})
}
// Start query (proxy) gRPC StoreAPI.
{
tlsCfg, err := tls.NewServerConfig(log.With(logger, "protocol", "gRPC"), grpcCert, grpcKey, grpcClientCA)
if err != nil {
return errors.Wrap(err, "setup gRPC server")
}
s := grpcserver.New(logger, reg, tracer, comp, proxy,
grpcserver.WithListen(grpcBindAddr),
grpcserver.WithGracePeriod(grpcGracePeriod),
grpcserver.WithTLSConfig(tlsCfg),
)
g.Add(func() error {
statusProber.Ready()
return s.ListenAndServe()
}, func(error) {
statusProber.NotReady(err)
s.Shutdown(err)
})
}
level.Info(logger).Log("msg", "starting query node")
return nil
}
func removeDuplicateStoreSpecs(logger log.Logger, duplicatedStores prometheus.Counter, specs []query.StoreSpec) []query.StoreSpec {
set := make(map[string]query.StoreSpec)
for _, spec := range specs {
addr := spec.Addr()
if _, ok := set[addr]; ok {
level.Warn(logger).Log("msg", "Duplicate store address is provided - %v", addr)
duplicatedStores.Inc()
}
set[addr] = spec
}
deduplicated := make([]query.StoreSpec, 0, len(set))
for _, value := range set {
deduplicated = append(deduplicated, value)
}
return deduplicated
}
| runQuery |
learn-more.tsx | import cn from 'classnames';
import styleUtils from './utils.module.css';
import styles from './contact.module.css'; |
export default function LearnMore() {
return (
<div className={cn(styleUtils.appear, styleUtils['appear-fifth'], styles.contact)}>
Learn more on{' '}
<a href={REPO} className={styles['contact-email']} target="_blank" rel="noopener noreferrer">
GitHub
</a>
.
</div>
);
} | import { REPO } from '@lib/constants'; |
setup.py | import codecs
import os
import re
from setuptools import find_packages, setup
###############################################################################
# Using setup.py from Attrs as a template for finding components, awesome config.
# Original reference: https://github.com/python-attrs/attrs/blob/master/setup.py
NAME = "mutatest"
PACKAGES = find_packages()
META_PATH = os.path.join("mutatest", "__init__.py")
KEYWORDS = ["mutatest", "mutation", "testing", "test", "mutant", "mutate", "pytest"]
PROJECT_URLS = {
"Documentation": "https://mutatest.readthedocs.io/",
"Bug Tracker": "https://github.com/EvanKepner/mutatest/issues",
"Source Code": "https://github.com/EvanKepner/mutatest",
}
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Natural Language :: English",
"Environment :: Console",
"Framework :: Pytest",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
"Topic :: Software Development :: Testing",
"Topic :: Software Development :: Testing :: Unit",
]
# Built to run with pytest, but not an installation requirement for the API
INSTALL_REQUIRES = ["coverage>=4.4"]
EXTRAS_REQUIRE = {
"docs": ["coverage", "ipython", "sphinx"], # kept in docs/requirements.txt for RTD
"tests": [
"pytest >= 4.0.0",
"freezegun",
"coverage",
"pytest-cov",
"pytest-xdist",
"tox",
"virtualenv",
"hypothesis",
],
"qa": ["mypy", "black", "pre-commit", "isort"],
}
EXTRAS_REQUIRE["dev"] = EXTRAS_REQUIRE["tests"] + EXTRAS_REQUIRE["docs"] + EXTRAS_REQUIRE["qa"]
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
|
VERSION = find_meta("version")
URL = find_meta("url")
LONG = "\n\n".join([read("README.rst"), read("CHANGELOG.rst"), read("AUTHORS.rst")])
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=URL,
project_urls=PROJECT_URLS,
version=VERSION,
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
keywords=KEYWORDS,
long_description=LONG,
packages=PACKAGES,
python_requires=">=3.7.0",
zip_safe=False,
entry_points={"console_scripts": ["mutatest=mutatest.cli:cli_main"]},
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
include_package_data=True,
)
| """
Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta), META_FILE, re.M)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta)) |
type.go | package lark
// ----- message
// MsgType 消息类型
type MsgType string
const (
MsgTypeText MsgType = "text" // 文本
MsgTypePost MsgType = "post" // 富文本
MsgTypeImage MsgType = "image" // 图片
MsgTypeFile MsgType = "file" // 文件
MsgTypeAudio MsgType = "audio" // 语音
MsgTypeMedia MsgType = "media" // 视频
MsgTypeSticker MsgType = "sticker" // 表情包
MsgTypeInteractive MsgType = "interactive" // 卡片消息
MsgTypeShareChat MsgType = "share_chat" // 分享群卡片
MsgTypeShareUser MsgType = "share_user" // 分享个人卡片
)
// ----- contact
// ContainerIDType 容器类型
type ContainerIDType string
const (
ContainerIDTypeChat ContainerIDType = "chat"
)
// IDType ID类型
type IDType string
const (
IDTypeUserID IDType = "user_id" // 以 user_id 来识别成员
IDTypeUnionID IDType = "union_id" // 以 union_id 来识别成员
IDTypeOpenID IDType = "open_id" // 以 open_id 来识别成员
IDTypeAppID IDType = "app_id" // 以 app_id 来识别成员
IDTypeChatID IDType = "chat_id" // 以 chat_id 来识别成员
IDTypeEmail IDType = "email" // 以 email 来识别成员
)
// DepartmentIDType ID类型
type DepartmentIDType string
const (
DepartmentIDTypeDepartmentID DepartmentIDType = "department_id" // 以 department_id 来识别
DepartmentIDTypeOpenDepartmentID DepartmentIDType = "open_department_id" // 以 open_department_id 来识别
)
func DepartmentIDTypePtr(v DepartmentIDType) *DepartmentIDType {
return &v
}
type MailUserType string
const (
MailUserTypeUser MailUserType = "USER" // 内部用户
MailUserTypeDepartment MailUserType = "DEPARTMENT" // 部门
MailUserTypeCompany MailUserType = "COMPANY" // 全员
MailUserTypeExternalUser MailUserType = "EXTERNAL_USER" // 外部用户
MailUserTypeMailGroup MailUserType = "MAIL_GROUP" // 邮件组
MailUserTypeOtherMember MailUserType = "OTHER_MEMBER" // 内部成员
)
func IDTypePtr(idType IDType) *IDType {
return &idType
}
// EmployeeType 用户类型
type EmployeeType string
const (
EmployeeTypeID EmployeeType = "employee_id" // 员工id
EmployeeTypeNo EmployeeType = "employee_no" // 员工工号
)
// ----- chat
type ChatType string
const (
ChatTypePrivate ChatType = "private"
ChatTypePublic ChatType = "public"
)
// ----- file
// ImageType 图片类型
type ImageType string
const (
ImageTypeMessage ImageType = "message" // 用于发送消息
ImageTypeAvatar ImageType = "avatar" // 用于设置头像
)
// FileType 文件类型
type FileType string
const (
FileTypeOpus FileType = "opus" // 上传opus音频文件;其他格式的音频文件,请转为opus格式后上传,转换方式可参考:ffmpeg -i SourceFile.mp3 -acodec libopus -ac 1 -ar 16000 TargetFile.opus
FileTypeMp4 FileType = "mp4" // 上传mp4视频文件
FileTypePdf FileType = "pdf" // 上传pdf格式文件
FileTypeDoc FileType = "doc" // 上传doc格式文件
FileTypeXls FileType = "xls" // 上传xls格式文件
FileTypePpt FileType = "ppt" // 上传ppt格式文件
FileTypeStream FileType = "stream" // 上传stream格式文件
)
// ----- calendar
// CalendarRole 对日历的访问权限
type CalendarRole string
const (
CalendarRoleUnknown CalendarRole = "unknown" // 未知权限
CalendarRoleFreeBusyReader CalendarRole = "free_busy_reader" // 游客,只能看到忙碌/空闲信息
CalendarRoleReader CalendarRole = "reader" // 订阅者,查看所有日程详情
CalendarRoleWriter CalendarRole = "writer" // 编辑者,创建及修改日程
CalendarRoleOwner CalendarRole = "owner" // 管理员,管理日历及共享设置
)
// 参与人类型
type CalendarEventAttendeeType string
const (
CalendarEventAttendeeTypeUser CalendarEventAttendeeType = "user" // 用户
CalendarEventAttendeeTypeChat CalendarEventAttendeeType = "chat" // 群组
CalendarEventAttendeeTypeResource CalendarEventAttendeeType = "resource" // 会议室
CalendarEventAttendeeTypeThirdParty CalendarEventAttendeeType = "user" // 邮箱
)
func CalendarEventAttendeeTypePtr(v CalendarEventAttendeeType) *CalendarEventAttendeeType {
return &v
}
type CalendarType string
const (
CalendarTypeUnknown CalendarType = "unknown" // 未知类型
CalendarTypePrimary CalendarType = "primary" // 用户或应用的主日历
CalendarTypeShared CalendarType = "shared" // 由用户或应用创建的共享日历
CalendarTypeGoogle CalendarType = "google" // 用户绑定的谷歌日历
CalendarTypeResource CalendarType = "resource" // 会议室日历
CalendarTypeExchange CalendarType = "exchange" // 用户绑定的Exchange日历
)
type CalendarPermission string
const (
CalendarPermissionPrivate = "private" // 私密
CalendarPermissionShowOnlyFreeBusy = "show_only_free_busy" // 仅展示忙闲信息
CalendarPermissionPublic = "public" // 他人可查看日程详情
)
type I18nNames struct {
ZhCn string `json:"zh_cn,omitempty"` // 中文名, 示例值: "群聊"
EnUs string `json:"en_us,omitempty"` // 英文名, 示例值: "group chat"
JaJp string `json:"ja_jp,omitempty"` // 日文名, 示例值: "グループチャット"
}
type Sender struct {
ID string `json:"id,omitempty"` // 该字段标识发送者的id
IDType IDType `json:"id_type,omitempty"` // 该字段标识发送者的id类型
SenderType string `json:"sender_type,omitempty"` // 该字段标识发送者的类型
}
type MessageBody struct {
Content string `json:"content,omitempty"` // 消息jsonContent
}
type Mention struct {
Key string `json:"key,omitempty"` // mention key
ID string `json:"id,omitempty"` // 用户open id
IDType IDType `json:"id_type,omitempty"` // id 可以是open_id,user_id或者union_id
Name string `json:"name,omitempty"` // 被at用户的姓名
}
// AddMemberPermission 加 user/bot 入群权限
type AddMemberPermission string
const (
AddMemberPermissionAllMembers AddMemberPermission = "all_members"
AddMemberPermissionOnlyOwner AddMemberPermission = "only_owner"
)
func AddMemberPermissionPtr(v AddMemberPermission) *AddMemberPermission {
return &v
}
// MessageVisibility 入群消息可见性
type MessageVisibility string
const (
MessageVisibilityOnlyOwner MessageVisibility = "only_owner"
MessageVisibilityAllMembers MessageVisibility = "all_members"
MessageVisibilityNotAnyone MessageVisibility = "not_anyone"
) |
// MembershipApproval 加群审批
type MembershipApproval string
const (
MembershipApprovalNoApprovalRequired MembershipApproval = "no_approval_required"
MembershipApprovalApprovalRequired MembershipApproval = "approval_required"
)
func MembershipApprovalPtr(v MembershipApproval) *MembershipApproval {
return &v
}
// ModerationPermission 发言权限
type ModerationPermission string
const (
ModerationPermissionAllMembers ModerationPermission = "all_members"
ModerationPermissionOnlyOwner ModerationPermission = "only_owner"
ModerationPermissionModeratorList ModerationPermission = "moderator_list"
)
func ModerationPermissionPtr(v ModerationPermission) *ModerationPermission {
return &v
}
// ShareCardPermission 群分享权限
type ShareCardPermission string
const (
ShareCardPermissionAllowed ShareCardPermission = "allowed"
ShareCardPermissionNotAllowed ShareCardPermission = "not_allowed"
)
func ShareCardPermissionPtr(v ShareCardPermission) *ShareCardPermission {
return &v
}
// AtAllPermission at 所有人权限
type AtAllPermission string
const (
AtAllPermissionAllMembers AtAllPermission = "all_members"
AtAllPermissionOnlyOwner AtAllPermission = "only_owner"
)
func AtAllPermissionPtr(v AtAllPermission) *AtAllPermission {
return &v
}
// EditPermission 群编辑权限
type EditPermission string
const (
EditPermissionAllMembers EditPermission = "all_members"
EditPermissionOnlyOwner EditPermission = "only_owner"
)
func EditPermissionPtr(v EditPermission) *EditPermission {
return &v
}
// ----- helpdesk
type HelpdeskCustomizedField struct {
ID string `json:"id"` // id ,示例值:"123"
Value string `json:"value"` // value ,示例值:"value"
KeyName string `json:"key_name"` // key name ,示例值:"key"
}
// 下拉列表选项
type HelpdeskDropdownOption struct {
Tag string `json:"tag,omitempty"` // 选项ID
DisplayName string `json:"display_name,omitempty"` // 展示名称
Children []*HelpdeskDropdownOption `json:"children,omitempty"` // 同上:选项列表,只适用于多层下拉列表(最多可以设置三级下拉列表)
}
// 知识库分类
type HelpdeskCategory struct {
CategoryID string `json:"category_id,omitempty"` // 知识库分类ID
ID string `json:"id,omitempty"` // 知识库分类ID,(旧版,请使用category_id)
Name string `json:"name,omitempty"` // 名称
ParentID string `json:"parent_id,omitempty"` // 父知识库分类ID
HelpdeskID string `json:"helpdesk_id,omitempty"` // 服务台ID
Language string `json:"language,omitempty"` // 语言
Children []*HelpdeskCategory `json:"children,omitempty"` // 子分类详情
}
// ----- ehr
// EHR 附件
type EHRAttachment struct {
ID string `json:"id,omitempty"` // 下载文件所需要的 Token
MimeType string `json:"mime_type,omitempty"` // 文件类型
Name string `json:"name,omitempty"` // 名称
Size int `json:"size,omitempty"` // 大小
}
// EHR 教育经历
type EHREducation struct {
Level int `json:"level,omitempty"` // 学历, 可选值有: `1`:小学, `2`:初中, `3`:高中, `4`:职业高级中学, `5`:中等专业学校, `6`:大专, `7`:本科, `8`:硕士, `9`:博士
School string `json:"school,omitempty"` // 毕业学校
Major string `json:"major,omitempty"` // 专业
Degree int `json:"degree,omitempty"` // 学位, 可选值有: `1`:学士, `2`:硕士, `3`:博士
Start string `json:"start,omitempty"` // 开始日期
End string `json:"end,omitempty"` // 结束日期
}
// EHR 紧急联系人
type EHREmergencyContact struct {
Name string `json:"name,omitempty"` // 紧急联系人姓名
Relationship int `json:"relationship,omitempty"` // 与紧急联系人的关系, 可选值有: `1`:父母, `2`:配偶, `3`:子女, `4`:兄弟姐妹, `5`:朋友, `6`:其他
Mobile string `json:"mobile,omitempty"` // 手机号
}
// EHR 工作经历
type EHRWorkExperience struct {
Company string `json:"company,omitempty"` // 公司
Department string `json:"department,omitempty"` // 部门
Job string `json:"job,omitempty"` // 职位
Start string `json:"start,omitempty"` // 开始日期
End string `json:"end,omitempty"` // 截止日期
Description string `json:"description,omitempty"` // 工作描述
}
// ----- event
type EventHeaderV2 struct {
EventID string `json:"event_id,omitempty"` // 事件 ID
EventType EventType `json:"event_type,omitempty"` // 事件类型
CreateTime string `json:"create_time,omitempty"` // 事件创建时间戳(单位:毫秒)
Token string `json:"token,omitempty"` // 事件 Token
AppID string `json:"app_id,omitempty"` // 应用 ID
TenantKey string `json:"tenant_key,omitempty"` // 租户 Key
}
type EventHeaderV1 struct {
UUID string `json:"event_id,omitempty"` // 事件 ID
EventType EventType `json:"event_type,omitempty"` // 事件类型
TS string `json:"create_time,omitempty"` // 事件创建时间戳(单位:毫秒)
Token string `json:"token,omitempty"` // 事件 Token
} |
func MessageVisibilityPtr(v MessageVisibility) *MessageVisibility {
return &v
} |
multi_plane_base.py | import numpy as np
from lenstronomy.Cosmo.background import Background
from lenstronomy.LensModel.profile_list_base import ProfileListBase
import lenstronomy.Util.constants as const
__all__ = ['MultiPlaneBase']
class MultiPlaneBase(ProfileListBase):
"""
Multi-plane lensing class
The lens model deflection angles are in units of reduced deflections from the specified redshift of the lens to the
source redshift of the class instance.
"""
def __init__(self, lens_model_list, lens_redshift_list, z_source_convention, cosmo=None,
numerical_alpha_class=None, cosmo_interp=False, z_interp_stop=None, num_z_interp=100):
"""
A description of the recursive multi-plane formalism can be found e.g. here: https://arxiv.org/abs/1312.1536
:param lens_model_list: list of lens model strings
:param lens_redshift_list: list of floats with redshifts of the lens models indicated in lens_model_list
:param z_source_convention: float, redshift of a source to define the reduced deflection angles of the lens
models. If None, 'z_source' is used.
:param cosmo: instance of astropy.cosmology
:param numerical_alpha_class: an instance of a custom class for use in NumericalAlpha() lens model
(see documentation in Profiles/numerical_alpha)
"""
if z_interp_stop is None:
z_interp_stop = z_source_convention
self._cosmo_bkg = Background(cosmo, interp=cosmo_interp, z_stop=z_interp_stop, num_interp=num_z_interp)
self._z_source_convention = z_source_convention
if len(lens_redshift_list) > 0:
z_lens_max = np.max(lens_redshift_list)
if z_lens_max >= z_source_convention:
raise ValueError('deflector redshifts higher or equal the source redshift convention (%s >= %s for the reduced lens'
' model quantities not allowed (leads to negative reduced deflection angles!'
% (z_lens_max, z_source_convention))
if not len(lens_model_list) == len(lens_redshift_list):
raise ValueError("The length of lens_model_list does not correspond to redshift_list")
self._lens_redshift_list = lens_redshift_list
super(MultiPlaneBase, self).__init__(lens_model_list, numerical_alpha_class=numerical_alpha_class,
lens_redshift_list=lens_redshift_list,
z_source_convention=z_source_convention)
if len(lens_model_list) < 1:
self._sorted_redshift_index = []
else:
self._sorted_redshift_index = self._index_ordering(lens_redshift_list)
z_before = 0
T_z = 0
self._T_ij_list = []
self._T_z_list = []
# Sort redshift for vectorized reduced2physical factor calculation
if len(lens_model_list)<1:
self._reduced2physical_factor = []
else:
z_sort = np.array(self._lens_redshift_list)[self._sorted_redshift_index]
z_source_array = np.ones(z_sort.shape)*z_source_convention
self._reduced2physical_factor = self._cosmo_bkg.d_xy(0, z_source_convention) / self._cosmo_bkg.d_xy(z_sort, z_source_array)
for idex in self._sorted_redshift_index:
z_lens = self._lens_redshift_list[idex]
if z_before == z_lens:
delta_T = 0
else:
T_z = self._cosmo_bkg.T_xy(0, z_lens)
delta_T = self._cosmo_bkg.T_xy(z_before, z_lens)
self._T_ij_list.append(delta_T)
self._T_z_list.append(T_z)
z_before = z_lens
def ray_shooting_partial(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_lens,
include_z_start=False, T_ij_start=None, T_ij_end=None):
"""
ray-tracing through parts of the coin, starting with (x,y) co-moving distances and angles (alpha_x, alpha_y)
at redshift z_start and then backwards to redshift z_stop
:param x: co-moving position [Mpc]
:param y: co-moving position [Mpc]
:param alpha_x: ray angle at z_start [arcsec]
:param alpha_y: ray angle at z_start [arcsec]
:param z_start: redshift of start of computation
:param z_stop: redshift where output is computed
:param kwargs_lens: lens model keyword argument list
:param include_z_start: bool, if True, includes the computation of the deflection angle at the same redshift as
the start of the ray-tracing. ATTENTION: deflection angles at the same redshift as z_stop will be computed always!
This can lead to duplications in the computation of deflection angles.
:param T_ij_start: transverse angular distance between the starting redshift to the first lens plane to follow.
If not set, will compute the distance each time this function gets executed.
:param T_ij_end: transverse angular distance between the last lens plane being computed and z_end.
If not set, will compute the distance each time this function gets executed.
:return: co-moving position and angles at redshift z_stop
"""
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
alpha_x = np.array(alpha_x)
alpha_y = np.array(alpha_y)
z_lens_last = z_start
first_deflector = True
for i, idex in enumerate(self._sorted_redshift_index):
z_lens = self._lens_redshift_list[idex]
if self._start_condition(include_z_start, z_lens, z_start) and z_lens <= z_stop:
if first_deflector is True:
if T_ij_start is None:
if z_start == 0:
delta_T = self._T_ij_list[0]
else:
delta_T = self._cosmo_bkg.T_xy(z_start, z_lens)
else:
delta_T = T_ij_start
first_deflector = False
else:
delta_T = self._T_ij_list[i]
x, y = self._ray_step_add(x, y, alpha_x, alpha_y, delta_T)
alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i)
z_lens_last = z_lens
if T_ij_end is None:
if z_lens_last == z_stop:
delta_T = 0
else:
delta_T = self._cosmo_bkg.T_xy(z_lens_last, z_stop)
else:
delta_T = T_ij_end
x, y = self._ray_step_add(x, y, alpha_x, alpha_y, delta_T)
return x, y, alpha_x, alpha_y
def transverse_distance_start_stop(self, z_start, z_stop, include_z_start=False):
"""
computes the transverse distance (T_ij) that is required by the ray-tracing between the starting redshift and
the first deflector afterwards and the last deflector before the end of the ray-tracing.
:param z_start: redshift of the start of the ray-tracing
:param z_stop: stop of ray-tracing
:param include_z_start: boolean, if True includes the computation of the starting position if the first
deflector is at z_start
:return: T_ij_start, T_ij_end
"""
z_lens_last = z_start
first_deflector = True
T_ij_start = None
for i, idex in enumerate(self._sorted_redshift_index):
z_lens = self._lens_redshift_list[idex]
if self._start_condition(include_z_start, z_lens, z_start) and z_lens <= z_stop:
if first_deflector is True:
T_ij_start = self._cosmo_bkg.T_xy(z_start, z_lens)
first_deflector = False
z_lens_last = z_lens
T_ij_end = self._cosmo_bkg.T_xy(z_lens_last, z_stop)
return T_ij_start, T_ij_end
def geo_shapiro_delay(self, theta_x, theta_y, kwargs_lens, z_stop, T_z_stop=None, T_ij_end=None):
"""
geometric and Shapiro (gravitational) light travel time relative to a straight path through the coordinate (0,0)
Negative sign means earlier arrival time
:param theta_x: angle in x-direction on the image
:param theta_y: angle in y-direction on the image
:param kwargs_lens: lens model keyword argument list
:param z_stop: redshift of the source to stop the backwards ray-tracing
:param T_z_stop: optional, transversal angular distance from z=0 to z_stop
:param T_ij_end: optional, transversal angular distance between the last lensing plane and the source plane
:return: dt_geo, dt_shapiro, [days]
"""
dt_grav = np.zeros_like(theta_x, dtype=float)
dt_geo = np.zeros_like(theta_x, dtype=float)
x = np.zeros_like(theta_x, dtype=float)
y = np.zeros_like(theta_y, dtype=float)
alpha_x = np.array(theta_x, dtype=float)
alpha_y = np.array(theta_y, dtype=float)
i = 0
z_lens_last = 0
for i, index in enumerate(self._sorted_redshift_index):
z_lens = self._lens_redshift_list[index]
if z_lens <= z_stop:
T_ij = self._T_ij_list[i]
x_new, y_new = self._ray_step(x, y, alpha_x, alpha_y, T_ij)
if i == 0:
pass
elif T_ij > 0:
T_j = self._T_z_list[i]
T_i = self._T_z_list[i - 1]
beta_i_x, beta_i_y = x / T_i, y / T_i
beta_j_x, beta_j_y = x_new / T_j, y_new / T_j
dt_geo_new = self._geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij)
dt_geo += dt_geo_new
x, y = x_new, y_new
dt_grav_new = self._gravitational_delay(x, y, kwargs_lens, i, z_lens)
alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i)
dt_grav += dt_grav_new
z_lens_last = z_lens
if T_ij_end is None:
T_ij_end = self._cosmo_bkg.T_xy(z_lens_last, z_stop)
T_ij = T_ij_end
x_new, y_new = self._ray_step(x, y, alpha_x, alpha_y, T_ij)
if T_z_stop is None:
T_z_stop = self._cosmo_bkg.T_xy(0, z_stop)
T_j = T_z_stop
T_i = self._T_z_list[i]
beta_i_x, beta_i_y = x / T_i, y / T_i
beta_j_x, beta_j_y = x_new / T_j, y_new / T_j
dt_geo_new = self._geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij)
dt_geo += dt_geo_new
return dt_geo, dt_grav
@staticmethod
def _index_ordering(redshift_list):
"""
:param redshift_list: list of redshifts
:return: indexes in ascending order to be evaluated (from z=0 to z=z_source)
"""
redshift_list = np.array(redshift_list)
#sort_index = np.argsort(redshift_list[redshift_list < z_source])
sort_index = np.argsort(redshift_list)
#if len(sort_index) < 1:
# Warning("There is no lens object between observer at z=0 and source at z=%s" % z_source)
return sort_index
def _reduced2physical_deflection(self, alpha_reduced, index_lens):
"""
alpha_reduced = D_ds/Ds alpha_physical
:param alpha_reduced: reduced deflection angle
:param index_lens: integer, index of the deflector plane
:return: physical deflection angle
"""
factor = self._reduced2physical_factor[index_lens]
return alpha_reduced * factor
def _gravitational_delay(self, x, y, kwargs_lens, index, z_lens):
|
@staticmethod
def _geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij):
"""
:param beta_i_x: angle on the sky at plane i
:param beta_i_y: angle on the sky at plane i
:param beta_j_x: angle on the sky at plane j
:param beta_j_y: angle on the sky at plane j
:param T_i: transverse diameter distance to z_i
:param T_j: transverse diameter distance to z_j
:param T_ij: transverse diameter distance from z_i to z_j
:return: excess delay relative to a straight line
"""
d_beta_x = beta_j_x - beta_i_x
d_beta_y = beta_j_y - beta_i_y
tau_ij = T_i * T_j / T_ij * const.Mpc / const.c / const.day_s * const.arcsec**2
return tau_ij * (d_beta_x ** 2 + d_beta_y ** 2) / 2
def _lensing_potential2time_delay(self, potential, z_lens, z_source):
"""
transforms the lensing potential (in units arcsec^2) to a gravitational time-delay as measured at z=0
:param potential: lensing potential
:param z_lens: redshift of the deflector
:param z_source: redshift of source for the definition of the lensing quantities
:return: gravitational time-delay in units of days
"""
D_dt = self._cosmo_bkg.ddt(z_lens, z_source)
delay_days = const.delay_arcsec2days(potential, D_dt)
return delay_days
def _co_moving2angle(self, x, y, index):
"""
transforms co-moving distances Mpc into angles on the sky (radian)
:param x: co-moving distance
:param y: co-moving distance
:param index: index of plane
:return: angles on the sky
"""
T_z = self._T_z_list[index]
theta_x = x / T_z
theta_y = y / T_z
return theta_x, theta_y
@staticmethod
def _ray_step(x, y, alpha_x, alpha_y, delta_T):
"""
ray propagation with small angle approximation
:param x: co-moving x-position
:param y: co-moving y-position
:param alpha_x: deflection angle in x-direction at (x, y)
:param alpha_y: deflection angle in y-direction at (x, y)
:param delta_T: transverse angular diameter distance to the next step
:return: co-moving position at the next step (backwards)
"""
x_ = x + alpha_x * delta_T
y_ = y + alpha_y * delta_T
return x_, y_
@staticmethod
def _ray_step_add(x, y, alpha_x, alpha_y, delta_T):
"""
ray propagation with small angle approximation
:param x: co-moving x-position
:param y: co-moving y-position
:param alpha_x: deflection angle in x-direction at (x, y)
:param alpha_y: deflection angle in y-direction at (x, y)
:param delta_T: transverse angular diameter distance to the next step
:return: co-moving position at the next step (backwards)
"""
x += alpha_x * delta_T
y += alpha_y * delta_T
return x, y
def _add_deflection(self, x, y, alpha_x, alpha_y, kwargs_lens, index):
"""
adds the physical deflection angle of a single lens plane to the deflection field
:param x: co-moving distance at the deflector plane
:param y: co-moving distance at the deflector plane
:param alpha_x: physical angle (radian) before the deflector plane
:param alpha_y: physical angle (radian) before the deflector plane
:param kwargs_lens: lens model parameter kwargs
:param index: index of the lens model to be added in sorted redshift list convention
:param idex_lens: redshift of the deflector plane
:return: updated physical deflection after deflector plane (in a backwards ray-tracing perspective)
"""
theta_x, theta_y = self._co_moving2angle(x, y, index)
k = self._sorted_redshift_index[index]
alpha_x_red, alpha_y_red = self.func_list[k].derivatives(theta_x, theta_y, **kwargs_lens[k])
alpha_x_phys = self._reduced2physical_deflection(alpha_x_red, index)
alpha_y_phys = self._reduced2physical_deflection(alpha_y_red, index)
return alpha_x - alpha_x_phys, alpha_y - alpha_y_phys
@staticmethod
def _start_condition(inclusive, z_lens, z_start):
"""
:param inclusive: boolean, if True selects z_lens including z_start, else only selects z_lens > z_start
:param z_lens: deflector redshift
:param z_start: starting redshift (lowest redshift)
:return: boolean of condition
"""
if inclusive:
return z_lens >= z_start
else:
return z_lens > z_start
| """
:param x: co-moving coordinate at the lens plane
:param y: co-moving coordinate at the lens plane
:param kwargs_lens: lens model keyword arguments
:param z_lens: redshift of the deflector
:param index: index of the lens model in sorted redshfit convention
:return: gravitational delay in units of days as seen at z=0
"""
theta_x, theta_y = self._co_moving2angle(x, y, index)
k = self._sorted_redshift_index[index]
potential = self.func_list[k].function(theta_x, theta_y, **kwargs_lens[k])
delay_days = self._lensing_potential2time_delay(potential, z_lens, z_source=self._z_source_convention)
return -delay_days |
GraphicStitchingFour.tsx | /**
* @file GraphicStitchingFour 圆形组合
* @author Auto Generated by IconPark
*/
/* tslint:disable: max-line-length */
/* eslint-disable max-len */
import React from 'react';
import {ISvgIconProps, IconWrapper} from '../runtime';
export default IconWrapper(
'graphic-stitching-four',
true,
(props: ISvgIconProps) => (
<svg
width={props.size}
height={props.size}
viewBox="0 0 48 48"
fill="none"
>
<circle
cx="39"
cy="9"
r="5"
fill={props.colors[1]}
stroke={props.colors[0]}
strokeWidth={props.strokeWidth}
strokeLinecap={props.strokeLinecap}
strokeLinejoin={props.strokeLinejoin}
/>
<circle
cx="9"
cy="39"
r="5"
fill={props.colors[1]}
stroke={props.colors[0]}
strokeWidth={props.strokeWidth}
strokeLinecap={props.strokeLinecap}
strokeLinejoin={props.strokeLinejoin}
/>
<rect
x="4"
y="4"
width="10"
height="10"
fill={props.colors[1]}
stroke={props.colors[0]}
strokeWidth={props.strokeWidth}
strokeLinecap={props.strokeLinecap}
strokeLinejoin={props.strokeLinejoin}
/>
<rect
x="34"
y="34"
width="10"
height="10"
fill={props.colors[1]}
stroke={props.colors[0]}
strokeWidth={props.strokeWidth}
strokeLinecap={props.strokeLinecap}
strokeLinejoin={props.strokeLinejoin}
/>
<path | strokeWidth={props.strokeWidth}
strokeLinecap={props.strokeLinecap}
strokeLinejoin={props.strokeLinejoin}
/>
<path
d="M34 39H14"
stroke={props.colors[0]}
strokeWidth={props.strokeWidth}
strokeLinecap={props.strokeLinecap}
strokeLinejoin={props.strokeLinejoin}
/>
<path
d="M9 34L9 14"
stroke={props.colors[0]}
strokeWidth={props.strokeWidth}
strokeLinecap={props.strokeLinecap}
strokeLinejoin={props.strokeLinejoin}
/>
<path
d="M39 34L39 14"
stroke={props.colors[0]}
strokeWidth={props.strokeWidth}
strokeLinecap={props.strokeLinecap}
strokeLinejoin={props.strokeLinejoin}
/>
</svg>
)
); | d="M34 9H14"
stroke={props.colors[0]} |
test_clickhouse.py | import time
import e2e.clickhouse as clickhouse
import e2e.kubectl as kubectl
import e2e.yaml_manifest as yaml_manifest
import e2e.settings as settings
import e2e.util as util
from testflows.core import *
from testflows.asserts import error
@TestScenario
@Name("test_ch_001. Insert quorum")
def test_ch_001(self):
util.require_keeper(keeper_type=self.context.keeper_type)
quorum_template = "manifests/chit/tpl-clickhouse-21.8.yaml"
chit_data = yaml_manifest.get_manifest_data(util.get_full_path(quorum_template))
kubectl.launch(f"delete chit {chit_data['metadata']['name']}", ns=settings.test_namespace, ok_to_fail=True)
kubectl.create_and_check(
"manifests/chi/test-ch-001-insert-quorum.yaml",
{
"apply_templates": {quorum_template},
"pod_count": 2,
"do_not_delete": 1,
})
chi = yaml_manifest.get_chi_name(util.get_full_path("manifests/chi/test-ch-001-insert-quorum.yaml"))
chi_data = kubectl.get("chi", ns=settings.test_namespace, name=chi)
util.wait_clickhouse_cluster_ready(chi_data)
host0 = "chi-test-ch-001-insert-quorum-default-0-0"
host1 = "chi-test-ch-001-insert-quorum-default-0-1"
create_table = """
create table t1 on cluster default (a Int8, d Date default today())
Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
partition by d order by a
TTL d + interval 5 second
SETTINGS merge_with_ttl_timeout=5""".replace('\r', '').replace('\n', '')
create_mv_table2 = """
create table t2 on cluster default (a Int8)
Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
partition by tuple() order by a""".replace('\r', '').replace('\n', '')
create_mv_table3 = """
create table t3 on cluster default (a Int8)
Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}')
partition by tuple() order by a""".replace('\r', '').replace('\n', '')
create_mv2 = "create materialized view t_mv2 on cluster default to t2 as select a from t1"
create_mv3 = "create materialized view t_mv3 on cluster default to t3 as select a from t1"
with Given("Tables t1, t2, t3 and MVs t1->t2, t1-t3 are created"):
clickhouse.query(chi, create_table)
clickhouse.query(chi, create_mv_table2)
clickhouse.query(chi, create_mv_table3)
clickhouse.query(chi, create_mv2)
clickhouse.query(chi, create_mv3)
with When("Add a row to an old partition"):
clickhouse.query(chi, "insert into t1(a,d) values(6, today()-1)", host=host0)
with When("Stop fetches for t1 at replica1"):
clickhouse.query(chi, "system stop fetches default.t1", host=host1)
with Then("Wait 10 seconds and the data should be dropped by TTL"):
time.sleep(10)
out = clickhouse.query(chi, "select count() from t1 where a=6", host=host0)
assert out == "0", error()
with When("Resume fetches for t1 at replica1"):
clickhouse.query(chi, "system start fetches default.t1", host=host1)
time.sleep(5)
with Then("Inserts should resume"):
clickhouse.query(chi, "insert into t1(a) values(7)", host=host0)
clickhouse.query(chi, "insert into t1(a) values(1)")
with When("Stop fetches for t2 at replica1"):
clickhouse.query(chi, "system stop fetches default.t2", host=host1)
with Then("Insert should fail since it can not reach the quorum"):
out = clickhouse.query_with_error(chi, "insert into t1(a) values(2)", host=host0)
assert "Timeout while waiting for quorum" in out, error()
# kubectl(f"exec {host0}-0 -n test -- cp /var/lib//clickhouse/data/default/t2/all_1_1_0/a.mrk2 /var/lib//clickhouse/data/default/t2/all_1_1_0/a.bin")
# with Then("Corrupt data part in t2"):
# kubectl(f"exec {host0}-0 -n test -- sed -i \"s/b/c/\" /var/lib/clickhouse/data/default/t2/all_1_1_0/a.bin")
with When("Resume fetches for t2 at replica1"):
clickhouse.query(chi, "system start fetches default.t2", host=host1)
i = 0
while "2" != clickhouse.query(chi, "select active_replicas from system.replicas where database='default' and table='t1'", pod=host0) and i < 10:
with Then("Not ready, wait 5 seconds"):
time.sleep(5)
i += 1
with Then("Inserts should fail with an error regarding not satisfied quorum"):
out = clickhouse.query_with_error(chi, "insert into t1(a) values(3)", host=host0)
assert "Quorum for previous write has not been satisfied yet" in out, error()
with And("Second insert of the same block should pass"):
clickhouse.query(chi, "insert into t1(a) values(3)", host=host0)
with And("Insert of the new block should fail"):
out = clickhouse.query_with_error(chi, "insert into t1(a) values(4)", host=host0)
assert "Quorum for previous write has not been satisfied yet" in out, error()
with And("Second insert of the same block with 'deduplicate_blocks_in_dependent_materialized_views' setting should fail"):
out = clickhouse.query_with_error(
chi,
"set deduplicate_blocks_in_dependent_materialized_views=1; insert into t1(a) values(5)",
host=host0
)
assert "Quorum for previous write has not been satisfied yet" in out, error()
out = clickhouse.query_with_error(
chi, "select t1.a t1_a, t2.a t2_a from t1 left outer join t2 using (a) order by t1_a settings join_use_nulls=1"
)
note(out)
# cat /var/log/clickhouse-server/clickhouse-server.log | grep t2 | grep -E "all_1_1_0|START|STOP"
@TestScenario
@Name("test_ch_002. Row-level security")
def test_ch_002(self):
kubectl.create_and_check(
"manifests/chi/test-ch-002-row-level.yaml",
{
"apply_templates": {"manifests/chit/tpl-clickhouse-21.8.yaml"},
"do_not_delete": 1,
})
chi = "test-ch-002-row-level"
create_table = """create table test (d Date default today(), team LowCardinality(String), user String) Engine = MergeTree() PARTITION BY d ORDER BY d;"""
with When("Create test table"):
clickhouse.query(chi, create_table)
with And("Insert some data"):
clickhouse.query(
chi, "INSERT INTO test(team, user) values('team1', 'user1'),('team2', 'user2'),('team3', 'user3'),('team4', 'user4')"
)
with Then("Make another query for different users. It should be restricted to corresponding team by row-level security"):
for user in ['user1', 'user2', 'user3', 'user4']:
out = clickhouse.query(chi, "select user from test", user=user, pwd=user)
assert out == user, error()
with Then("Make a count() query for different users. It should be restricted to corresponding team by row-level security"):
for user in ['user1', 'user2', 'user3', 'user4']:
out = clickhouse.query(chi, "select count() from test", user=user, pwd=user)
assert out == "1", error()
kubectl.delete_chi(chi)
@TestFeature
@Name("e2e.test_clickhouse")
def | (self):
util.clean_namespace(delete_chi=False)
all_tests = [
test_ch_001,
test_ch_002,
]
run_test = all_tests
# placeholder for selective test running
# run_test = [test_ch_002]
for t in run_test:
Scenario(test=t)()
| test |
expected_default_config_ts_simple_app.js | (function (lwc) {
'use strict';
function stylesheet(hostSelector, shadowSelector, nativeShadow) {
return ["\n", (nativeShadow ? (":host {color: var(--lwc-my-color);}") : [hostSelector, " {color: var(--lwc-my-color);}"].join('')), "\n"].join('');
}
var _implicitStylesheets = [stylesheet];
function tmpl($api, $cmp, $slotset, $ctx) {
const {
d: api_dynamic,
h: api_element
} = $api;
return [api_element("div", {
key: 0
}, [api_dynamic($cmp.x)])];
}
var _tmpl = lwc.registerTemplate(tmpl);
tmpl.stylesheets = [];
if (_implicitStylesheets) {
tmpl.stylesheets.push.apply(tmpl.stylesheets, _implicitStylesheets);
}
tmpl.stylesheetTokens = {
hostAttribute: "ts-foo_foo-host",
shadowAttribute: "ts-foo_foo"
};
class Foo extends lwc.LightningElement {
constructor(...args) {
super(...args);
this.x = void 0;
}
}
lwc.registerDecorators(Foo, {
publicProps: {
x: {
config: 0
}
}
});
var _tsFoo = lwc.registerComponent(Foo, {
tmpl: _tmpl
});
function | ($api, $cmp, $slotset, $ctx) {
const {
c: api_custom_element,
h: api_element
} = $api;
return [api_element("div", {
classMap: {
"container": true
},
key: 1
}, [api_custom_element("ts-foo", _tsFoo, {
props: {
"x": "1"
},
key: 0
}, [])])];
}
var _tmpl$1 = lwc.registerTemplate(tmpl$1);
tmpl$1.stylesheets = [];
tmpl$1.stylesheetTokens = {
hostAttribute: "ts-app_app-host",
shadowAttribute: "ts-app_app"
};
class App extends lwc.LightningElement {
constructor() {
super();
}
}
var App$1 = lwc.registerComponent(App, {
tmpl: _tmpl$1
});
function doNothing() {
return;
}
// @ts-ignore
const container = document.getElementById('main');
const element = lwc.createElement('ts-app', {
is: App$1
});
container.appendChild(element); // testing relative import works
console.log('>>', doNothing());
}(LWC)); | tmpl$1 |
mod.rs | //! Boa parser implementation.
mod cursor;
pub mod error;
mod expression;
mod function;
mod statement;
#[cfg(test)]
mod tests;
use self::error::{ParseError, ParseResult};
use crate::syntax::ast::{node::StatementList, Token};
use cursor::Cursor;
/// Trait implemented by parsers.
///
/// This makes it possible to abstract over the underlying implementation of a parser.
trait TokenParser: Sized {
/// Output type for the parser.
type Output; // = Node; waiting for https://github.com/rust-lang/rust/issues/29661
/// Parses the token stream using the current parser.
///
/// This method needs to be provided by the implementor type.
fn parse(self, cursor: &mut Cursor<'_>) -> Result<Self::Output, ParseError>;
/// Tries to parse the following tokens with this parser.
///
/// It will return the cursor to the initial position if an error occurs during parsing.
fn try_parse(self, cursor: &mut Cursor<'_>) -> Option<Self::Output> {
let initial_pos = cursor.pos();
if let Ok(node) = self.parse(cursor) {
Some(node)
} else {
cursor.seek(initial_pos);
None
}
}
}
/// Boolean representing if the parser should allow a `yield` keyword.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct AllowYield(bool);
impl From<bool> for AllowYield {
fn from(allow: bool) -> Self {
Self(allow)
}
}
/// Boolean representing if the parser should allow a `await` keyword.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct AllowAwait(bool);
impl From<bool> for AllowAwait {
fn from(allow: bool) -> Self {
Self(allow)
}
}
/// Boolean representing if the parser should allow a `in` keyword.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct AllowIn(bool);
impl From<bool> for AllowIn {
fn from(allow: bool) -> Self {
Self(allow)
}
}
/// Boolean representing if the parser should allow a `return` keyword.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct AllowReturn(bool);
impl From<bool> for AllowReturn {
fn from(allow: bool) -> Self {
Self(allow)
}
}
/// Boolean representing if the parser should allow a `default` keyword.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct AllowDefault(bool);
impl From<bool> for AllowDefault {
fn from(allow: bool) -> Self {
Self(allow)
}
}
#[derive(Debug)]
pub struct Parser<'a> {
/// Cursor in the parser, the internal structure used to read tokens.
cursor: Cursor<'a>,
}
impl<'a> Parser<'a> {
/// Create a new parser, using `tokens` as input
pub fn new(tokens: &'a [Token]) -> Self {
Self {
cursor: Cursor::new(tokens),
}
}
/// Parse all expressions in the token array
pub fn parse_all(&mut self) -> Result<StatementList, ParseError> {
Script.parse(&mut self.cursor)
}
}
/// Parses a full script.
///
/// More information:
/// - [ECMAScript specification][spec]
///
/// [spec]: https://tc39.es/ecma262/#prod-Script
#[derive(Debug, Clone, Copy)]
pub struct Script;
impl TokenParser for Script {
type Output = StatementList;
fn parse(self, cursor: &mut Cursor<'_>) -> Result<Self::Output, ParseError> |
}
/// Parses a script body.
///
/// More information:
/// - [ECMAScript specification][spec]
///
/// [spec]: https://tc39.es/ecma262/#prod-ScriptBody
#[derive(Debug, Clone, Copy)]
pub struct ScriptBody;
impl TokenParser for ScriptBody {
type Output = StatementList;
fn parse(self, cursor: &mut Cursor<'_>) -> Result<Self::Output, ParseError> {
self::statement::StatementList::new(false, false, false, false).parse(cursor)
}
}
| {
if cursor.peek(0).is_some() {
ScriptBody.parse(cursor)
} else {
Ok(StatementList::from(Vec::new()))
}
} |
CXData.py | """
.. module:: CXData2.py
:platform: Unix
:synopsis: A class for coherent X-ray phasing data.
.. moduleauthor:: David Vine <[email protected]>
"""
import scipy as sp
import numpy as np
import scipy.fftpack as spf
import scipy.ndimage as spn
from numpy.random import uniform
from numpy import pad
import os
import pdb
import pylab
import shutil
import sys
import operator
from round_scan import round_roi
import glob
import multiprocessing as mp
import time
from matplotlib import cm
from images2gif import writeGif
from CXFileReader import CXFileReader
from cxparams import CXParams as CXP
debug = True
def fft2(x):
# Wrapped for fft2 that handles CXData objects and ndarrays
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(spf.fft2(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(fft2(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return spf.fft2(x)
else:
raise Exception('Unknown data type passed to fft2')
def ifft2(x):
# Wrapped for ifft2 that handles CXData objects
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(spf.ifft2(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(ifft2(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return spf.ifft2(x)
else:
raise Exception('Unknown data type passed to ifft2')
def fftshift(x):
# Wrapper for fftshift that handles CXData objects
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(spf.fftshift(x.data[i]))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return spf.fftshift(x)
else:
raise Exception('Unknown data type passed to fftshift')
def abs(x):
# Wrapper for abs that handles CXData objects
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(np.abs(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(abs(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return np.abs(x)
else:
raise Exception('Unknown data type passed to abs')
def angle(x):
# Wrapper for angle that handles CXData objects
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.angle(x.data[i]))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return sp.angle(x)
else:
raise Exception('Unknown data type passed to angle')
def exp(x):
# Wrapper for exp that handles CXData objects
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.exp(x.data[i]))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return sp.exp(x)
else:
raise Exception('Unknown data type passed to exp')
def log(x):
# Wrapper for exp that handles CXData objects
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.log(x.data[i]))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return sp.log(x)
else:
raise Exception('Unknown data type passed to log')
def conj(x):
"""
Wrapper for conjugate on a CXData object.
"""
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.conj(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(conj(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return sp.conj(x)
else:
raise Exception('Unknown data type passed to conj')
def sqrt(x):
"""
Wrapper for square root on a CXData object.
"""
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
l.append(sp.sqrt(x.data[i]))
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(x.modes)):
l.append(exp(x.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, (int, float, complex, np.ndarray)):
return sp.sqrt(x)
else:
raise Exception('Unknown data type passed to sqrt')
def sum(x):
"""
Sum over arrays.
"""
if isinstance(x, CXData):
l=[]
for i in xrange(len(x)):
if i==0:
l.append(x.data[0])
else:
l[0] += x.data[i]
return CXData(data=l)
elif isinstance(x, CXModal):
l=[]
for mode in range(len(self.modes)):
l.append(sum(self.modes[mode]))
return CXModal(modes=l)
elif isinstance(x, np.ndarray):
return sp.sum(x)
else:
raise Exception('Unknown data type pass to sum')
def worker(func):
def worker2(self=None, *args, **kwargs):
try:
kwargs['no_decorate']
return func(self, args[0], args[1], args[2], args[3], args[4], args[5])
except KeyError:
cnt = 0
jobs, results = args[0], args[1]
while True:
job_args = jobs.get()
if job_args[0]==None: # Deal with Poison Pill
print '{}: Exiting. {:d} jobs completed.'.format(mp.current_process().name, cnt)
jobs.task_done()
break
if job_args[0]%np.floor(job_args[1]/10)==0:
print 'Processed {:d} out of {:d} files.'.format(job_args[0], job_args[1])
res = func(self, *job_args)
cnt+=1
jobs.task_done()
results.put(res)
return worker2
return worker2
class CXData(CXFileReader):
"""
Defines a class for holding and interacting with coherent x-ray data.
...
Attributes
----------
data: list
list of complex arrays that hold all of the phase retrieval data.
name: str
name of instance. Used for logging.
savepath: str
location where this data should be saved.
Methods
-------
"""
def __init__(self, *args, **kwargs):
self.data = None
self.savepath = None
for kw in kwargs:
# Data attribute must be a list of arrays
if kw=='data':
if isinstance(kwargs['data'], list):
self.data = kwargs['data']
elif isinstance(kwargs['data'], np.ndarray):
self.data = [kwargs['data']]
else:
setattr(self, kw, kwargs[kw])
def __repr__(self):
try:
s=repr(self.data[0])
except:
s=''
try:
return '<{} at {}>\n{} arrays ({:d}x{:d}px).\n{}'.format(self.__class__,
hex(id(self)), len(self.data), self.data[0].shape[0], self.data[0].shape[1], s)
except AttributeError:
return '<{} at {}>\nNo data attribute present.'.format(self.__class__, hex(id(self)))
def __add__(self, other):
if isinstance(other, CXData):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]+other.data[i])
return CXData(data=l)
elif isinstance(other, (int, float, complex)):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]+other)
return CXData(data=l)
elif isinstance(other, CXModal):
return CXModal.__add__(other, self)
def __iadd__(self, other):
if isinstance(other, CXData):
for i in xrange(len(self.data)):
self.data[i]+=other.data[i]
return self
elif isinstance(other, (int, float, complex)):
for i in xrange(len(self.data)):
self.data[i]+=other
return self
elif isinstance(other, CXModal):
raise("The meaning of += is ambiguous for these datatypes")
def __sub__(self, other):
if isinstance(other, CXData):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]-other.data[i])
return CXData(data=l)
elif isinstance(other, (int, float, complex)):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]-other)
return CXData(data=l)
elif isinstance(other, CXModal):
return CXModal.__sub__(other, self)*-1.0
def __isub__(self, other):
if isinstance(other, CXData):
for i in xrange(len(self.data)):
self.data[i]-=other.data[i]
return self
elif isinstance(other, (int, float, complex)):
for i in xrange(len(self.data)):
self.data[i]-=other.data
return self
elif isinstance(other, CXModal):
raise("The meaning of -= is ambiguous for these datatypes")
def __pow__(self, power):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]**power)
return CXData(data=l)
def __mul__(self, other):
if isinstance(other, CXData):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]*other.data[i])
return CXData(data=l)
elif isinstance(other, (int, float, complex)):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]*other)
return CXData(data=l)
elif isinstance(other, CXModal):
return CXModal.__mul__(other, self)
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
if isinstance(other, CXData):
for i in xrange(len(self.data)):
self.data[i]*=other.data[i]
return self
elif isinstance(other, (int, float, complex)):
for i in xrange(len(self.data)):
self.data[i]*=other
return self
elif isinstance(other, CXModal):
raise("The meaning of *= is ambiguous for these datatypes")
def __div__(self, other):
if isinstance(other, CXData):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]/other.data[i])
return CXData(data=l)
elif isinstance(other, (int, float, complex)):
l=[]
for i in xrange(len(self.data)):
l.append(self.data[i]/other)
return CXData(data=l)
elif isinstance(other, CXModal):
raise("The meaning of / is ambiguous for these datatypes")
def __rdiv__(self, other):
return self.__mul__(other)
def __idiv__(self, other):
if isinstance(other, CXData):
for i in xrange(len(self.data)):
self.data[i]/=other.data[i]
return self
elif isinstance(other, (int, float, complex)):
for i in xrange(len(self.data)):
self.data[i]/=other
return self
elif isinstance(other, CXModal):
raise("The meaning of /= is ambiguous for these datatypes")
def __len__(self):
return len(self.data)
def __del__(self):
# Remove this instance from the CXData __all__ variable
try:
print 'Deleting {}'.format(self.kwargs['itype'])
CXData.__all__.pop(self.kwargs['itype'])
except (AttributeError, KeyError):
pass
def __getitem__(self, s):
"""
Allows extracting a subarray from self.data or a single array from a list of arrays.
Implements subpixel shifting for seamless indexing of a fractional number of pixels.
The returned array must be an integer number of pixels.
E.g a[0:100.6] doesn't make any sense
but a[0.6:100.6] does.
a[0] is equivalent to a.data[0]
"""
if isinstance(s, int):
return CXData(data=self.data[s])
else:
y, x = s
xstart = x.start or 0
xstop = x.stop or self.data[0].shape[0]-1
ystart = y.start or 0
ystop = y.stop or self.data[0].shape[1]-1
dx, dy = -np.mod(xstart, 1), -np.mod(ystart, 1)
l = []
for data in self.data:
l.append(self.shift(data[xstart // 1:xstop // 1, ystart //1: ystop //1], dx, dy))
return CXData(data=l)
def __setitem__(self, s, arr):
"""
Embed a smaller array in a larger array.
a[s] = arr
"""
if isinstance(s, int):
if len(arr)>1:
raise Exception('Cannot set single array with list of arrays.')
self.data[s]=arr.data[0]
else:
y, x = s
xstart = x.start or 0
xstop = x.stop or self.data[0].shape[0]-1
ystart = y.start or 0
ystop = y.stop or self.data[0].shape[1]-1
dx, dy = np.mod(xstart, 1), np.mod(ystart, 1)
l=[]
if isinstance(arr, CXData):
for i, data in enumerate(self.data):
l.append(data.copy())
l[i][xstart // 1:xstop // 1, ystart //1: ystop //1] = self.shift(arr.data[i], dx, dy)
self.data = l
elif isinstance(arr, np.ndarray):
for i, data in enumerate(self.data):
l.append(data.copy())
l[i][xstart // 1:xstop // 1, ystart //1:ystop //1] = self.shift(arr, dx, dy)
self.data = l
elif isinstance(arr, (int, float)):
for i, data in enumerate(self.data):
l.append(data.copy())
l[i][xstart // 1:xstop // 1, ystart //1: ystop //1] = arr
l[i] = self.shift(l[i], dx, dy)
self.data = l
@staticmethod
def inner_product(u, v):
return sp.sum((conj(u)*v).data[0])/(u.data[0].shape[0]*u.data[0].shape[1])
@staticmethod
def proj_u_v(u, v):
return u*(CXData.inner_product(v, u)/CXData.inner_product(u, u))
def max(self):
"""
Return a list of maximum (absolute) value(s) of (complex) array(s).
"""
if len(self.data)==1:
return abs(self.data[0]).max()
else:
return [abs(element).max() for element in self.data]
def min(self):
"""
Return a list of minimum (absolute) value(s) of (complex) array(s).
"""
if len(self.data)==1:
return abs(self.data[0]).min()
else:
return [abs(element).min() for element in self.data]
def normalise(self, val=1.):
"""
Rebase data from 0 to 1.
"""
if CXP.reconstruction.verbose:
CXP.log.info('Rebasing data from 0 to {:3.2f}'.format(val))
for i in xrange(len(self.data)):
self.data[i] -= abs(self.data[i]).min()
self.data[i] /= abs(self.data[i]).max()
self.data[i] *= val
def append(self, other):
if isinstance(other, CXData):
for data in other.data:
self.data.append(data)
elif isinstance(other, np.ndarray):
self.data.append(other)
def square_root(self):
if CXP.reconstruction.verbose:
CXP.log.info('Taking square root.')
for i in xrange(len(self.data)):
self.data[i] = pow(self.data[i], 0.5)
def fft_shift(self):
if CXP.reconstruction.verbose:
CXP.log.info('Performing FFT shift.')
for i in xrange(len(self.data)):
self.data[i] = spf.fftshift(self.data[i])
def len(self):
return len(self.data)
@staticmethod
def shift_inner(arr, nx, ny, window=False, padding='reflect'):
"""
Shifts an array by nx and ny respectively.
"""
if ((nx % 1. == 0.) and (ny % 1. ==0)):
return sp.roll(sp.roll(arr, int(ny), axis=0),
int(nx), axis=1)
else:
atype = arr.dtype
if padding:
x, y = arr.shape
pwx, pwy = int(pow(2., np.ceil(np.log2(1.5*arr.shape[0])))), int(pow(2., np.ceil(np.log2(1.5*arr.shape[1]))))
pwx2, pwy2 = (pwx-x)/2, (pwy-y)/2
if pad=='zero':
arr = pad.with_constant(arr, pad_width=((pwx2, pwx2), (pwy2, pwy2)))
else:
arr = pad.with_reflect(arr, pad_width=((pwx2, pwx2), (pwy2, pwy2)))
phaseFactor = sp.exp(complex(0., -2.*sp.pi)*(ny*spf.fftfreq(arr.shape[0])[:, np.newaxis]+nx*spf.fftfreq(arr.shape[1])[np.newaxis, :]))
if window:
window = spf.fftshift(CXData._tukeywin(arr.shape[0], alpha=0.35))
arr = spf.ifft2(spf.fft2(arr)*phaseFactor*window)
else:
arr = spf.ifft2(spf.fft2(arr)*phaseFactor)
if padding:
arr = arr[pwx/4:3*pwx/4, pwy/4:3*pwy/4]
if atype == 'complex':
return arr
else:
return np.real(arr)
@staticmethod
def | (x, nx, ny, **kwargs):
if isinstance(x, CXData):
l=[]
for data in x.data:
l.append(CXData.shift_inner(data.copy(), nx, ny, **kwargs))
return CXData(data=l)
elif isinstance(x, np.ndarray):
return CXData.shift_inner(x, nx, ny)
def ishift(self, nx, ny, **kwargs):
# Inplace version of shift
l=[]
for data in self.data:
for data in self.data:
l.append(self.shift_inner(data.copy(), nx, ny, kwargs))
self.data = l
return self
def rot90(self, i):
# Rotate by 90 degrees i times
if CXP.reconstruction.verbose:
CXP.log.info('Rotating data by {:d}'.format(i*90))
for j, data in enumerate(self.data):
self.data[j] = sp.rot90(data, i)
def find_dead_pixels(self):
# Return coordinates of pixels with a standard deviation of zero
dead_pix = sp.where(abs(np.std(self.data, axis=0))<machine_precision)
if CXP.reconstruction.verbose:
CXP.log.info('Found {0:d} dead pixels'.format(len(dead_pix)))
return dead_pix
def zero_dead_pixels(self):
if CXP.reconstruction.verbose:
CXP.log.info('Setting dead pixels to zero')
self.data[self.find_dead_pixels()]=0.
def threshhold(self, threshhold=None):
if not threshhold:
threshhold = CXP.preprocessing.threshhold_raw_data
if CXP.reconstruction.verbose:
CXP.log.info('Applying threshhold to data at {:3.2f} and rebasing to 0.'.format(threshhold))
for i, data in enumerate(self.data):
tdata = sp.where(data<threshhold, threshhold, data)
tdata-=tdata.min()
self.data[i]=tdata
def symmetrize_array_shape(self, qxqy0=None, desired_shape=None):
x0, y0 = self.data[0].shape
if desired_shape is None:
desired_shape = CXP.preprocessing.desired_array_shape
if qxqy0 is None:
qx, qy = CXP.preprocessing.qx0qy0
else:
qx, qy = qxqy0
if CXP.reconstruction.verbose:
CXP.log.info('Symmetrizing array shape.\n\tCurrent shape:\t{}x{}\n\tNew shape:\t{}x{}\n\tCentred on:\t{},{}'.format(
x0, y0, desired_shape, desired_shape, qx, qy))
# Cropping or padding?
qx_lower, qx_upper = qx-desired_shape/2, qx+desired_shape/2
qy_lower, qy_upper = qy-desired_shape/2, qy+desired_shape/2
if qx_lower<0: # Crop
nxl, mxl = np.abs(qx_lower), 0
else: # Pad
nxl, mxl = 0, qx_lower
if qy_lower<0: # Crop
nyl, myl = np.abs(qy_lower), 0
else: # Pad
nyl, myl = 0, qy_lower
if qx_upper<x0: # Crop
nxu, mxu = desired_shape, qx+desired_shape/2
else: # Pad
nxu, mxu = x0-qx_lower, x0
if qy_upper<y0: # Crop
nyu, myu = desired_shape, qy+desired_shape/2
else: # Pad
nyu, myu = y0-qy_lower, y0
for i in range(len(self.data)):
tmp = sp.zeros((desired_shape, desired_shape))
tmp[nxl:nxu, nyl:nyu] = self.data[i][mxl:mxu, myl:myu]
self.data[i] = tmp
CXP.p = CXP.preprocessing.desired_array_shape
def treat_beamstop(self):
factor = CXP.measurement.beam_stop_factor.keys()[0]
x0, y0 = CXP.measurement.beam_stop_factor[factor][0]
x1, y1 = CXP.measurement.beam_stop_factor[factor][1]
for i in range(len(self.data)):
self.data[i][x0:x1, y0:y1]*=factor
def save(self, path=None):
if path:
filepath = path
else:
filepath = self.savepath
try:
CXP.log.info('Saving {} to:\n\t{}'.format(self.name, filepath))
except AttributeError:
CXP.log.info('Saving to:\n\t{}'.format(filepath))
try:
np.savez(filepath, *self.data)
except IOError as e:
CXP.log.error(e)
raise Exception('Could not save {} to {}'.format(self.kwargs['name'], path))
def load(self, path=None):
if path:
filepath = path
else:
filepath = self.filename
CXP.log.info('Loading data from:\n\t{}'.format(filepath))
try:
self.data = self.openup(filepath)
except IOError as e:
CXP.log.error(e)
raise Exception('Could not load file from {}'.format(filepath))
if not isinstance(self.data, list):
self.data = [self.data]
def init_data(self, *args, **kwargs):
if args[0] == 'det_mod':
if CXP.actions.preprocess_data:
self.read_in_data()
else:
self.load()
elif args[0] == 'probe_det_mod':
if CXP.actions.preprocess_data:
# Get list of white files
CXP.log.info('Preprocessing probe detector modulus.')
if CXP.io.whitefield_filename not in [None, '']: # If whitefields were measured
wfilename, wfilerange, wn_acqs = [CXP.io.whitefield_filename, CXP.io.whitefield_filename_range,
CXP.measurement.n_acqs_whitefield]
self.pattern = wfilename.count('{')
if self.pattern == 1:
wf = [wfilename.format(i) for i in range(wfilerange[0], wfilerange[1])]
elif self.pattern == 2:
wf = [wfilename.format(wfilerange[0], i) for i in range(wn_acqs)]
elif self.pattern == 3:
wf = glob.glob(wfilename.split('}')[0]+'}*')
res = self.preprocess_data_stack(0, 1, wf, self.pattern, None, None, no_decorate=True)
self.data = res[1]
else: #Guesstimate the whitefield from the average of the diffraction patterns
pass
else:
self.load(CXData.raw_data_filename_string.format('probe_det_mod'))
try:
probe = self.__class__.__all__['probe']
probe.data[0] = spf.ifft2(self.data[0]*exp(complex(0., 1.)*sp.angle(spf.fft2(probe.data[0]))))
CXP.log.info('Applied probe modulus constraint.')
except (AttributeError, KeyError):
pass
elif args[0] == 'dark':
if CXP.actions.preprocess_data:
# Get list of dark files
CXP.log.info('Preprocessing darkfield.')
dfilename, dfilerange, dn_acqs = [CXP.io.darkfield_filename, CXP.io.darkfield_filename_range,
CXP.measurement.n_acqs_darkfield]
self.pattern = dfilename.count('{')
if self.pattern == 1:
df = [dfilename.format(i) for i in range(dfilerange[0], dfilerange[1])]
elif self.pattern == 2:
df = [dfilename.format(dfilerange[0], i) for i in range(dn_acqs)]
elif self.pattern == 3:
df = glob.glob(dfilename.split('}')[0]+'}*')
res = self.preprocess_data_stack(0, 1, df, self.pattern, None, None, no_decorate=True)
self.data = res[1]
else:
self.load(CXData.raw_data_filename_string.format('probe_det_mod'))
def read_in_data(self):
self.completed_filenames = [] # Keep track of what's been processed already for online analysis
self.job_filenames = [] # Bundle stack of images for preprocessing
self.pattern = None
# Determine which files to read in
CXP.log.info('Reading in & preprocessing raw data...')
#Pattern 1: 'image_{:d}.xxx'
#Pattern 2: 'image_{:d}_{:d}.xxx'
#Pattern 3: 'image_{:d}_{:d}_{val}.xxx'
if self.pattern is None: # Pattern is not yet dertermined
filename, filerange, n_acqs = [CXP.io.data_filename, CXP.io.data_filename_range, CXP.measurement.n_acqs_data]
self.pattern = filename.count('{')
CXP.log.info('Detected filename pattern: {:d}'.format(self.pattern))
if self.pattern == 0:
raise Exception('NamingConventionError:\nPlease read CXParams for more info on file naming conventions.')
try:
n0, n1 = filerange[0], filerange[1]+1
except IndexError:
n0 = n1 = filerange[0]
if CXP.io.darkfield_filename is not '': # dark
try:
dark = self.__class__.__all__['dark']
CXP.log.info('Found darkfield.')
except KeyError:
dark = CXData(itype='dark')
dark.save()
else:
CXP.log.info('Not processing darkfields.')
dark = None
if CXP.io.whitefield_filename is not '': # white
try:
probe_det_mod = self.__class__.__all__['probe_det_mod']
CXP.log.info('Found probe detector modulus.')
except KeyError:
probe_det_mod = CXData(itype='probe_det_mod')
probe_det_mod.save()
else:
CXP.log.info('Not processing whitefields.')
probe_det_mod = None
old_verbosity = CXP.reconstruction.verbose
CXP.reconstruction.verbose = False
jobs = mp.JoinableQueue()
results = mp.Queue()
n_processes = mp.cpu_count()
then = time.time()
cnt=0
missing_frames = False
l=[]
CXP.log.info('Dividing raw data into jobs over {:d} processes.'.format(n_processes))
for i in range(n0, n1):
if self.pattern == 1:
s = [filename.format(i)]
else:
s = glob.glob((filename.split('}')[0]+'}*').format(i))
# Include only files that haven't been processed yet
# s = [fn for fn in s if fn not in self.completed_filenames]
if len(s)==0:
CXP.log.error('Globbed 0 files in CXData@read_in_files')
sys.exit(1)
if self.pattern==1:
try:
s=s[0]
self.completed_filenames.append(s)
if cnt<n_acqs:
l.append(s)
cnt+=1
if cnt>=n_acqs:
self.job_filenames.append(l)
cnt=0
l=[]
except IndexError:
missing_frames = True
CXP.log.error('Missing frame: {:s}'.format(filename.format(i)))
else:
self.completed_filenames+=s
self.job_filenames.append(s)
if missing_frames:
print "There were missing frames. Choose 'c' to continue or 'q' to quit."
pdb.set_trace()
p = [mp.Process(target=self.preprocess_data_stack, args=(jobs, results))
for i in range(n_processes)]
for process in p:
process.start()
n_jobs = len(self.job_filenames)
for i in range(n_jobs):
jobs.put((i, n_jobs, self.job_filenames[i], self.pattern, probe_det_mod, dark))
# Add Poison Pill
for i in range(n_processes):
jobs.put((None, None, None, None, None, None))
CXP.log.info('{:3.2f} seconds elapsed dividing jobs between processes.'.format(time.time()-then))
then = time.time()
cnt = 0
self.data = [None]*n_jobs
while True:
if not results.empty():
i, data = results.get()
self.data[i] = data[0]
cnt+=1
elif cnt==n_jobs:
break
jobs.join()
jobs.close()
results.close()
for process in p:
process.join()
CXP.log.info('{:3.2f} seconds elapsed preprocessing data.'.format(time.time()-then))
CXP.reconstruction.verbose = old_verbosity
#self._sequence_dir = '/'.join([CXP.io.base_dir, CXP.io.scan_id, 'sequences'])
#self._cur_sequence_dir = self._sequence_dir+'/sequence_{:d}'.format(CXP.reconstruction.sequence)
#self.save(path=self._cur_sequence_dir+'/det_mod.npy')
@worker
def preprocess_data_stack(self, stack_num, n_jobs, file_list, pattern, white, dark):
# Average, merge and preprocess a stack of images
# Typically a stack corresponds to one ptychographic position
l=[]
tmp=None
# First - average according to the pattern
if pattern in [1, 2]:
# Averaging only
for filename in file_list:
if tmp is None:
tmp = self.openup(filename)
else:
tmp += self.openup(filename)
l.append(tmp/len(file_list))
elif pattern == 3:
# Average then merge
d={}
unique_times = list(set([t.split('_')[3] for t in file_list]))
for filename in file_list:
t = filename.split('.')[0].split('_')[-1]
if t not in d.keys():
d[t] = (1, self.openup(filename))
else:
d[t][0] += 1
d[t][1] += self.openup(filename)
for key, (i, val) in d.iteritems():
val /= i
# Check for saturated values and merge variable exposure times
max_time = max(unique_times)
if CXP.preprocessing.saturation_level>0:
for key in d.keys():
wh = sp.where(d[key]>=CXP.preprocessing.saturation_level)
d[key][wh] = 0
if tmp == 0:
tmp = d[key] * max_time/float(key)
else:
tmp += d[key] * max_time/float(key)
l.append(tmp)
else:
raise Exception('NamingConventionError')
# Do preprocessing
data = CXData()
data.data = l
if CXP.measurement.beam_stop:
data.treat_beamstop()
data.symmetrize_array_shape()
# CCD Specific Preprocessing
if CXP.preprocessing.detector_type == 'ccd':
try:
# Dark field correction
if dark is not None:
print('Dark field correcting data')
data-=dark
# Dark correct white field
if white is not None:
print('Dark field correcting whitefield')
white-=dark
except UnboundLocalError:
print('No darkfield subtraction performed.')
# PAD Specific Preprocessing
elif CXP.preprocessing.detector_type == 'pad':
pass
# Threshhold data
if CXP.preprocessing.threshhold_raw_data > 0:
data.threshhold()
if white is not None:
white.threshhold()
# Bin data
if CXP.preprocessing.bin > 1:
data.bin()
if white is not None:
white.bin()
if CXP.preprocessing.rot90!=0:
data.rot90(CXP.preprocessing.rot90)
if white is not None:
white.rot90(CXP.preprocessing.rot90)
# Take square root
data.square_root()
if white is not None:
white.square_root()
# Put in FFT shifted
data.fft_shift()
if white is not None:
white.fft_shift()
return (stack_num, data.data)
def bin(self, n=None):
"""
Bin a square array by grouping nxn pixels.
Array size must be a multiple of n.
"""
if n is None:
n=CXP.preprocessing.bin
# Now the detector pixel size has changed so we should update that
CXP.experiment.dx_d *= n
CXP.log.info('After binning new detector pixel size: {2.2e}'.format(CXP.experiment.dx_d))
nx, ny = self.data[0].shape[0], self.data[0].shape[1]
if not nx==ny:
raise Exception('Array to be binned must be square')
if not sp.mod(nx, n)==0.:
raise Exception('Array size must be a multiple of binning factor')
if n>nx:
raise Exception('Binning factor must be smaller than array size')
nn = nx/n
l = []
for i in xrange(len(self.data)):
tmp = sp.zeros((nn, nn))
for p in xrange(nn):
for q in xrange(nn):
tmp[p, q] = sp.sum(self.data[i][p*n:(p+1)*n, q*n:(q+1)*n])
l.append(tmp)
self.data=l
def show(self, i=0, phase=False, log=False):
if phase:
pylab.matshow(angle(self.data[i]), cmap=cm.hsv)
else:
if log:
pylab.matshow(sp.log10(abs(self.data[i])))
else:
pylab.matshow(abs(self.data[i]))
pylab.colorbar()
pylab.show()
def plot(self, i=0, phase=False):
pylab.figure()
if phase:
pylab.plot(np.angle(self.data[i][:, self.data[i].shape[0]/2]), label='Horizontal')
pylab.plot(np.angle(self.data[i][self.data[i].shape[1]/2, :]), label='Vertical')
else:
pylab.plot(np.abs(self.data[i][:, self.data[i].shape[0]/2]), label='Horizontal')
pylab.plot(np.abs(self.data[i][self.data[i].shape[1]/2, :]), label='Vertical')
pylab.legend()
def copy(self):
return CXData(data=[np.copy(arr) for arr in self.data])
class CXModal(object):
def __init__(self, *args, **kwargs):
self.modes = []
self.savepath = None
for kw in kwargs:
# Data attribute must be a list of arrays
if kw=='modes':
if isinstance(kwargs['modes'], list):
self.modes = kwargs['modes']
elif isinstance(kwargs['modes'], CXData):
self.modes = [kwargs['modes']]
else:
setattr(self, kw, kwargs[kw])
def __repr__(self):
try:
s=repr(self.modes[0].data[0])
except:
s=''
try:
return '<{} at {}>\n{:d} modes containing {:d} arrays ({:d}x{:d}px).\n{}'.format(self.__class__,
hex(id(self)), len(self.modes), len(self.modes[0]), self.modes[0].data[0].shape[0],
self.modes[0].data[0].shape[1], s)
except AttributeError:
return '<{} at {}>\nNo modes attribute present.'.format(self.__class__, hex(id(self)))
def __getitem__(self, s):
return self.modes[s]
def __setitem__(self, s, modes):
self.modes[s] = modes
@staticmethod
def _addsubmuldiv(operation, this, other):
if isinstance(other, CXModal):
l=[]
for mode in xrange(len(this.modes)):
l.append(CXData(data=[operation(this.modes[mode].data[i], other.modes[mode].data[i]) for i in range(len(this.modes[mode].data))]))
return CXModal(modes=l)
elif isinstance(other, CXData):
l=[]
for mode in xrange(len(this.modes)):
l.append(CXData(data=[operation(this.modes[mode].data[i], other.data[i]) for i in range(len(this.modes[mode].data))]))
return CXModal(modes=l)
elif isinstance(other, (int, float, complex)):
l=[]
for mode in xrange(len(this.modes)):
l.append(CXData(data=[operation(this.modes[mode].data[i], other) for i in range(len(this.modes[mode].data))]))
return CXModal(modes=l)
@staticmethod
def _iaddsubmuldiv(operation, this, other):
if isinstance(other, CXModal):
for mode in xrange(len(this.modes)):
for i in range(len(this.modes[mode])):
this.modes[mode].data[i]=operation(this.modes[mode].data[i], other.modes[mode].data[i])
return this
elif isinstance(other, CXData):
for mode in xrange(len(this.modes)):
for i in range(len(this.modes[mode])):
this.modes[mode].data[i] = operation(this.modes[mode].data[i], other.data[i])
return this
elif isinstance(other, (int, float, complex)):
for mode in xrange(len(this.modes)):
for i in range(len(this.modes[mode])):
this.modes[mode].data[i] = operation(this.modes[mode].data[i], other)
return this
def __add__(self, other):
return CXModal._addsubmuldiv(operator.add, self, other)
def __radd__(self, other):
return CXModal._addsubmuldiv(operator.add, self, other)
def __iadd__(self, other):
return CXModal._iaddsubmuldiv(operator.iadd, self, other)
def __sub__(self, other):
return CXModal._addsubmuldiv(operator.sub, self, other)
def __rsub__(self, other):
return CXModal._addsubmuldiv(operator.sub, other, self)
def __isub__(self, other):
return CXModal._iaddsubmuldiv(operator.isub, self, other)
def __mul__(self, other):
return CXModal._addsubmuldiv(operator.mul, self, other)
def __rmul__(self, other):
return CXModal._addsubmuldiv(operator.mul, self, other)
def __imul__(self, other):
return CXModal._addsubmuldiv(operator.imul, self, other)
def __div__(self, other):
return CXModal._addsubmuldiv(operator.div, self, other)
def __rdiv__(self, other):
return CXModal._addsubmuldiv(operator.div, self, other)
def __idiv__(self, other):
return CXModal._addsubmuldiv(operator.idiv, self, other)
def __pow__(self, power):
return CXModal(modes=[self.modes[mode]**power for mode in range(len(self.modes))])
def __len__(self):
return len(self.modes)
def copy(self):
return CXModal(modes=[self.modes[mode].copy() for mode in range(len(self))])
@staticmethod
def modal_sum(modal):
return CXData(data=[ reduce(CXData.__add__, [ modal[mode][i] for mode in range(len(modal.modes)) ]).data[0] for i in range(len(modal[0].data))])
def getat(self, i):
"""
.. method::setat(self, i)
return all modes at position i
"""
return CXModal(modes=[self.modes[mode][i] for mode in range(len(self))])
def setat(self, i, modal):
"""
.. method::getat(self, i)
set all modes at position i
"""
for mode in range(len(self)):
self.modes[mode][i] = modal.modes[mode][0]
def normalise(self):
mode_sum_max = CXModal.modal_sum(abs(self)).data[0].max()
for mode in range(len(self)):
self.modes[mode] /= mode_sum_max
def orthogonalise(self):
ortho = CXModal(modes=self[0][0].copy())
for i in range(1, len(self)):
tmp = self[i][0].copy()
for j in range(i-1, -1, -1):
tmp -= CXData.proj_u_v(ortho[j][0], self[i][0])
ortho.modes.append(tmp)
return CXModal(modes=ortho.modes) | shift |
tracehelper_test.go | // Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exporterhelper
import (
"context"
"errors"
"sync"
"testing"
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
otlptrace "github.com/open-telemetry/opentelemetry-proto/gen/go/trace/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opencensus.io/trace"
"github.com/open-telemetry/opentelemetry-collector/config/configmodels"
"github.com/open-telemetry/opentelemetry-collector/consumer/consumerdata"
"github.com/open-telemetry/opentelemetry-collector/exporter"
"github.com/open-telemetry/opentelemetry-collector/observability"
"github.com/open-telemetry/opentelemetry-collector/observability/observabilitytest"
"github.com/open-telemetry/opentelemetry-collector/obsreport"
)
const (
fakeTraceReceiverName = "fake_receiver_trace"
fakeTraceExporterType = "fake_trace_exporter"
fakeTraceExporterName = "fake_trace_exporter/with_name"
fakeTraceParentSpanName = "fake_trace_parent_span_name"
)
var (
fakeTraceExporterConfig = &configmodels.ExporterSettings{
TypeVal: fakeTraceExporterType,
NameVal: fakeTraceExporterName,
Disabled: false,
}
)
// TODO https://github.com/open-telemetry/opentelemetry-collector/issues/266
// Migrate tests to use testify/assert instead of t.Fatal pattern.
func TestTraceExporter_InvalidName(t *testing.T) {
te, err := NewTraceExporter(nil, newPushTraceData(0, nil))
require.Nil(t, te)
require.Equal(t, errNilConfig, err)
}
func TestTraceExporter_NilPushTraceData(t *testing.T) {
te, err := NewTraceExporter(fakeTraceExporterConfig, nil)
require.Nil(t, te)
require.Equal(t, errNilPushTraceData, err)
}
func TestTraceExporter_Default(t *testing.T) {
td := consumerdata.TraceData{}
te, err := NewTraceExporter(fakeTraceExporterConfig, newPushTraceData(0, nil))
assert.NotNil(t, te)
assert.Nil(t, err)
assert.Nil(t, te.ConsumeTraceData(context.Background(), td))
assert.Nil(t, te.Shutdown())
}
func TestTraceExporter_Default_ReturnError(t *testing.T) {
td := consumerdata.TraceData{}
want := errors.New("my_error")
te, err := NewTraceExporter(fakeTraceExporterConfig, newPushTraceData(0, want))
require.Nil(t, err)
require.NotNil(t, te)
err = te.ConsumeTraceData(context.Background(), td)
require.Equalf(t, want, err, "ConsumeTraceData returns: Want %v Got %v", want, err)
}
func TestTraceExporter_WithRecordMetrics(t *testing.T) {
te, err := NewTraceExporter(fakeTraceExporterConfig, newPushTraceData(0, nil))
require.Nil(t, err)
require.NotNil(t, te)
checkRecordedMetricsForTraceExporter(t, te, nil, 0)
}
func TestTraceExporter_WithRecordMetrics_NonZeroDropped(t *testing.T) {
te, err := NewTraceExporter(fakeTraceExporterConfig, newPushTraceData(1, nil))
require.Nil(t, err)
require.NotNil(t, te)
checkRecordedMetricsForTraceExporter(t, te, nil, 1)
}
func TestTraceExporter_WithRecordMetrics_ReturnError(t *testing.T) {
want := errors.New("my_error")
te, err := NewTraceExporter(fakeTraceExporterConfig, newPushTraceData(0, want))
require.Nil(t, err)
require.NotNil(t, te)
checkRecordedMetricsForTraceExporter(t, te, want, 0)
}
func TestTraceExporter_WithSpan(t *testing.T) {
te, err := NewTraceExporter(fakeTraceExporterConfig, newPushTraceData(0, nil))
require.Nil(t, err)
require.NotNil(t, te)
checkWrapSpanForTraceExporter(t, te, nil, 1)
}
func TestTraceExporter_WithSpan_NonZeroDropped(t *testing.T) {
te, err := NewTraceExporter(fakeTraceExporterConfig, newPushTraceData(1, nil))
require.Nil(t, err)
require.NotNil(t, te)
checkWrapSpanForTraceExporter(t, te, nil, 1)
}
func TestTraceExporter_WithSpan_ReturnError(t *testing.T) {
want := errors.New("my_error")
te, err := NewTraceExporter(fakeTraceExporterConfig, newPushTraceData(0, want))
require.Nil(t, err)
require.NotNil(t, te)
checkWrapSpanForTraceExporter(t, te, want, 1)
}
func TestTraceExporter_WithShutdown(t *testing.T) {
shutdownCalled := false
shutdown := func() error { shutdownCalled = true; return nil }
te, err := NewTraceExporter(fakeTraceExporterConfig, newPushTraceData(0, nil), WithShutdown(shutdown))
assert.NotNil(t, te)
assert.Nil(t, err)
assert.Nil(t, te.Shutdown())
assert.True(t, shutdownCalled)
}
func TestTraceExporter_WithShutdown_ReturnError(t *testing.T) {
want := errors.New("my_error")
shutdownErr := func() error { return want }
te, err := NewTraceExporter(fakeTraceExporterConfig, newPushTraceData(0, nil), WithShutdown(shutdownErr))
assert.NotNil(t, te)
assert.Nil(t, err)
assert.Equal(t, te.Shutdown(), want)
}
func newPushTraceData(droppedSpans int, retError error) traceDataPusher {
return func(ctx context.Context, td consumerdata.TraceData) (int, error) {
return droppedSpans, retError
}
}
func checkRecordedMetricsForTraceExporter(t *testing.T, te exporter.TraceExporter, wantError error, droppedSpans int) {
doneFn := observabilitytest.SetupRecordedMetricsTest()
defer doneFn()
spans := make([]*tracepb.Span, 2)
td := consumerdata.TraceData{Spans: spans}
ctx := observability.ContextWithReceiverName(context.Background(), fakeTraceReceiverName)
const numBatches = 7
for i := 0; i < numBatches; i++ {
require.Equal(t, wantError, te.ConsumeTraceData(ctx, td))
}
err := observabilitytest.CheckValueViewExporterReceivedSpans(fakeTraceReceiverName, fakeTraceExporterName, numBatches*len(spans))
require.Nilf(t, err, "CheckValueViewExporterReceivedSpans: Want nil Got %v", err)
err = observabilitytest.CheckValueViewExporterDroppedSpans(fakeTraceReceiverName, fakeTraceExporterName, numBatches*droppedSpans)
require.Nilf(t, err, "CheckValueViewExporterDroppedSpans: Want nil Got %v", err)
}
func generateTraceTraffic(t *testing.T, te exporter.TraceExporter, numRequests int, wantError error) {
td := consumerdata.TraceData{Spans: make([]*tracepb.Span, 1)}
ctx, span := trace.StartSpan(context.Background(), fakeTraceParentSpanName, trace.WithSampler(trace.AlwaysSample()))
defer span.End()
for i := 0; i < numRequests; i++ {
require.Equal(t, wantError, te.ConsumeTraceData(ctx, td))
}
}
func checkWrapSpanForTraceExporter(t *testing.T, te exporter.TraceExporter, wantError error, numSpans int64) {
ocSpansSaver := new(testOCTraceExporter)
trace.RegisterExporter(ocSpansSaver)
defer trace.UnregisterExporter(ocSpansSaver)
const numRequests = 5
generateTraceTraffic(t, te, numRequests, wantError)
// Inspection time!
ocSpansSaver.mu.Lock()
defer ocSpansSaver.mu.Unlock()
require.NotEqual(t, 0, len(ocSpansSaver.spanData), "No exported span data.")
gotSpanData := ocSpansSaver.spanData
require.Equal(t, numRequests+1, len(gotSpanData))
parentSpan := gotSpanData[numRequests]
require.Equalf(t, fakeTraceParentSpanName, parentSpan.Name, "SpanData %v", parentSpan)
for _, sd := range gotSpanData[:numRequests] {
require.Equalf(t, parentSpan.SpanContext.SpanID, sd.ParentSpanID, "Exporter span not a child\nSpanData %v", sd)
require.Equalf(t, errToStatus(wantError), sd.Status, "SpanData %v", sd)
sentSpans := numSpans
var failedToSendSpans int64
if wantError != nil {
sentSpans = 0
failedToSendSpans = numSpans
}
require.Equalf(t, sentSpans, sd.Attributes[obsreport.SentSpansKey], "SpanData %v", sd)
require.Equalf(t, failedToSendSpans, sd.Attributes[obsreport.FailedToSendSpansKey], "SpanData %v", sd)
}
}
type testOCTraceExporter struct {
mu sync.Mutex
spanData []*trace.SpanData
}
func (tote *testOCTraceExporter) ExportSpan(sd *trace.SpanData) {
tote.mu.Lock()
defer tote.mu.Unlock()
tote.spanData = append(tote.spanData, sd)
}
func TestOTLPTraceExporter_InvalidName(t *testing.T) {
te, err := NewOTLPTraceExporter(nil, newPushOTLPTrace(0, nil))
require.Nil(t, te)
require.Equal(t, errNilConfig, err)
}
func TestOTLPTraceExporter_NilPushTraceData(t *testing.T) {
te, err := NewOTLPTraceExporter(fakeTraceExporterConfig, nil)
require.Nil(t, te)
require.Equal(t, errNilPushTraceData, err) | td := consumerdata.OTLPTraceData{}
te, err := NewOTLPTraceExporter(fakeTraceExporterConfig, newPushOTLPTrace(0, nil))
assert.NotNil(t, te)
assert.Nil(t, err)
assert.Nil(t, te.ConsumeOTLPTrace(context.Background(), td))
assert.Nil(t, te.Shutdown())
}
func TestOTLPTraceExporter_Default_ReturnError(t *testing.T) {
td := consumerdata.OTLPTraceData{}
want := errors.New("my_error")
te, err := NewOTLPTraceExporter(fakeTraceExporterConfig, newPushOTLPTrace(0, want))
require.Nil(t, err)
require.NotNil(t, te)
err = te.ConsumeOTLPTrace(context.Background(), td)
require.Equalf(t, want, err, "ConsumeTraceData returns: Want %v Got %v", want, err)
}
func TestOTLPTraceExporter_WithRecordMetrics(t *testing.T) {
te, err := NewOTLPTraceExporter(fakeTraceExporterConfig, newPushOTLPTrace(0, nil))
require.Nil(t, err)
require.NotNil(t, te)
checkRecordedMetricsForOTLPTraceExporter(t, te, nil, 0)
}
func TestOTLPTraceExporter_WithRecordMetrics_NonZeroDropped(t *testing.T) {
te, err := NewOTLPTraceExporter(fakeTraceExporterConfig, newPushOTLPTrace(1, nil))
require.Nil(t, err)
require.NotNil(t, te)
checkRecordedMetricsForOTLPTraceExporter(t, te, nil, 1)
}
func TestOTLPTraceExporter_WithRecordMetrics_ReturnError(t *testing.T) {
want := errors.New("my_error")
te, err := NewOTLPTraceExporter(fakeTraceExporterConfig, newPushOTLPTrace(0, want))
require.Nil(t, err)
require.NotNil(t, te)
checkRecordedMetricsForOTLPTraceExporter(t, te, want, 0)
}
func TestOTLPTraceExporter_WithSpan(t *testing.T) {
te, err := NewOTLPTraceExporter(fakeTraceExporterConfig, newPushOTLPTrace(0, nil))
require.Nil(t, err)
require.NotNil(t, te)
checkWrapSpanForOTLPTraceExporter(t, te, nil, 1)
}
func TestOTLPTraceExporter_WithSpan_NonZeroDropped(t *testing.T) {
te, err := NewOTLPTraceExporter(fakeTraceExporterConfig, newPushOTLPTrace(1, nil))
require.Nil(t, err)
require.NotNil(t, te)
checkWrapSpanForOTLPTraceExporter(t, te, nil, 1)
}
func TestOTLPTraceExporter_WithSpan_ReturnError(t *testing.T) {
want := errors.New("my_error")
te, err := NewOTLPTraceExporter(fakeTraceExporterConfig, newPushOTLPTrace(0, want))
require.Nil(t, err)
require.NotNil(t, te)
checkWrapSpanForOTLPTraceExporter(t, te, want, 1)
}
func TestOTLPTraceExporter_WithShutdown(t *testing.T) {
shutdownCalled := false
shutdown := func() error { shutdownCalled = true; return nil }
te, err := NewOTLPTraceExporter(fakeTraceExporterConfig, newPushOTLPTrace(0, nil), WithShutdown(shutdown))
assert.NotNil(t, te)
assert.Nil(t, err)
assert.Nil(t, te.Shutdown())
assert.True(t, shutdownCalled)
}
func TestOTLPTraceExporter_WithShutdown_ReturnError(t *testing.T) {
want := errors.New("my_error")
shutdownErr := func() error { return want }
te, err := NewOTLPTraceExporter(fakeTraceExporterConfig, newPushOTLPTrace(0, nil), WithShutdown(shutdownErr))
assert.NotNil(t, te)
assert.Nil(t, err)
assert.Equal(t, te.Shutdown(), want)
}
func newPushOTLPTrace(droppedSpans int, retError error) otlpTraceDataPusher {
return func(ctx context.Context, td consumerdata.OTLPTraceData) (int, error) {
return droppedSpans, retError
}
}
func checkRecordedMetricsForOTLPTraceExporter(t *testing.T, te exporter.OTLPTraceExporter, wantError error, droppedSpans int) {
doneFn := observabilitytest.SetupRecordedMetricsTest()
defer doneFn()
spans := make([]*otlptrace.Span, 2)
td := consumerdata.NewOTLPTraceData([]*otlptrace.ResourceSpans{{Spans: spans}})
ctx := observability.ContextWithReceiverName(context.Background(), fakeTraceReceiverName)
const numBatches = 7
for i := 0; i < numBatches; i++ {
require.Equal(t, wantError, te.ConsumeOTLPTrace(ctx, td))
}
err := observabilitytest.CheckValueViewExporterReceivedSpans(fakeTraceReceiverName, fakeTraceExporterName, numBatches*len(spans))
require.Nilf(t, err, "CheckValueViewExporterReceivedSpans: Want nil Got %v", err)
err = observabilitytest.CheckValueViewExporterDroppedSpans(fakeTraceReceiverName, fakeTraceExporterName, numBatches*droppedSpans)
require.Nilf(t, err, "CheckValueViewExporterDroppedSpans: Want nil Got %v", err)
}
func generateOTLPTraceTraffic(t *testing.T, te exporter.OTLPTraceExporter, numRequests int, wantError error) {
td := consumerdata.NewOTLPTraceData([]*otlptrace.ResourceSpans{{Spans: []*otlptrace.Span{{}}}})
ctx, span := trace.StartSpan(context.Background(), fakeTraceParentSpanName, trace.WithSampler(trace.AlwaysSample()))
defer span.End()
for i := 0; i < numRequests; i++ {
require.Equal(t, wantError, te.ConsumeOTLPTrace(ctx, td))
}
}
func checkWrapSpanForOTLPTraceExporter(t *testing.T, te exporter.OTLPTraceExporter, wantError error, numSpans int64) {
ocSpansSaver := new(testOCTraceExporter)
trace.RegisterExporter(ocSpansSaver)
defer trace.UnregisterExporter(ocSpansSaver)
const numRequests = 5
generateOTLPTraceTraffic(t, te, numRequests, wantError)
// Inspection time!
ocSpansSaver.mu.Lock()
defer ocSpansSaver.mu.Unlock()
require.NotEqual(t, 0, len(ocSpansSaver.spanData), "No exported span data.")
gotSpanData := ocSpansSaver.spanData
require.Equal(t, numRequests+1, len(gotSpanData))
parentSpan := gotSpanData[numRequests]
require.Equalf(t, fakeTraceParentSpanName, parentSpan.Name, "SpanData %v", parentSpan)
for _, sd := range gotSpanData[:numRequests] {
require.Equalf(t, parentSpan.SpanContext.SpanID, sd.ParentSpanID, "Exporter span not a child\nSpanData %v", sd)
require.Equalf(t, errToStatus(wantError), sd.Status, "SpanData %v", sd)
sentSpans := numSpans
var failedToSendSpans int64
if wantError != nil {
sentSpans = 0
failedToSendSpans = numSpans
}
require.Equalf(t, sentSpans, sd.Attributes[obsreport.SentSpansKey], "SpanData %v", sd)
require.Equalf(t, failedToSendSpans, sd.Attributes[obsreport.FailedToSendSpansKey], "SpanData %v", sd)
}
} | }
func TestOTLPTraceExporter_Default(t *testing.T) { |
lib.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Various data structures used by the Rust compiler. The intention
//! is that code in here should be not be *specific* to rustc, so that
//! it can be easily unit tested and so forth.
//!
//! # Note
//!
//! This API is completely unstable and subject to change.
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(in_band_lifetimes)]
#![feature(impl_header_lifetime_elision)]
#![feature(unboxed_closures)]
#![feature(fn_traits)]
#![feature(unsize)]
#![feature(specialization)]
#![feature(optin_builtin_traits)]
#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![cfg_attr(not(stage0), feature(nll))]
#![cfg_attr(not(stage0), feature(infer_outlives_requirements))]
#![feature(allow_internal_unstable)]
#![feature(vec_resize_with)]
#![cfg_attr(unix, feature(libc))]
#![cfg_attr(test, feature(test))]
extern crate core;
extern crate ena;
#[macro_use]
extern crate log;
extern crate serialize as rustc_serialize; // used by deriving
#[cfg(unix)]
extern crate libc;
extern crate parking_lot;
#[macro_use]
extern crate cfg_if;
extern crate stable_deref_trait;
extern crate rustc_rayon as rayon;
extern crate rustc_rayon_core as rayon_core;
extern crate rustc_hash;
extern crate serialize;
#[cfg_attr(test, macro_use)]
extern crate smallvec;
// See librustc_cratesio_shim/Cargo.toml for a comment explaining this.
#[allow(unused_extern_crates)]
extern crate rustc_cratesio_shim;
pub use rustc_serialize::hex::ToHex;
pub mod svh;
pub mod array_vec;
pub mod base_n;
pub mod bitslice;
pub mod bitvec;
pub mod const_cstr;
pub mod flock;
pub mod fx;
pub mod graph;
pub mod indexed_set;
pub mod indexed_vec;
pub mod obligation_forest;
pub mod owning_ref;
pub mod ptr_key;
pub mod sip128;
pub mod small_c_str;
pub mod small_vec;
pub mod snapshot_map;
pub use ena::snapshot_vec;
pub mod sorted_map;
#[macro_use] pub mod stable_hasher;
pub mod sync;
pub mod tiny_list;
pub mod thin_vec;
pub mod transitive_relation;
pub mod tuple_slice;
pub use ena::unify;
pub mod vec_linked_list;
pub mod work_queue;
pub mod fingerprint;
pub struct OnDrop<F: Fn()>(pub F);
impl<F: Fn()> OnDrop<F> {
/// Forgets the function which prevents it from running.
/// Ensure that the function owns no memory, otherwise it will be leaked.
pub fn disable(self) |
}
impl<F: Fn()> Drop for OnDrop<F> {
fn drop(&mut self) {
(self.0)();
}
}
// See comments in src/librustc/lib.rs
#[doc(hidden)]
pub fn __noop_fix_for_27438() {}
| {
std::mem::forget(self);
} |
quota_summary_list_builder.go | /*
Copyright (c) 2020 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// IMPORTANT: This file has been generated automatically, refrain from modifying it manually as all
// your changes will be lost when the file is generated again.
package v1 // github.com/openshift-online/ocm-sdk-go/accountsmgmt/v1
// QuotaSummaryListBuilder contains the data and logic needed to build
// 'quota_summary' objects.
type QuotaSummaryListBuilder struct {
items []*QuotaSummaryBuilder
}
// NewQuotaSummaryList creates a new builder of 'quota_summary' objects.
func NewQuotaSummaryList() *QuotaSummaryListBuilder |
// Items sets the items of the list.
func (b *QuotaSummaryListBuilder) Items(values ...*QuotaSummaryBuilder) *QuotaSummaryListBuilder {
b.items = make([]*QuotaSummaryBuilder, len(values))
copy(b.items, values)
return b
}
// Copy copies the items of the given list into this builder, discarding any previous items.
func (b *QuotaSummaryListBuilder) Copy(list *QuotaSummaryList) *QuotaSummaryListBuilder {
if list == nil || list.items == nil {
b.items = nil
} else {
b.items = make([]*QuotaSummaryBuilder, len(list.items))
for i, v := range list.items {
b.items[i] = NewQuotaSummary().Copy(v)
}
}
return b
}
// Build creates a list of 'quota_summary' objects using the
// configuration stored in the builder.
func (b *QuotaSummaryListBuilder) Build() (list *QuotaSummaryList, err error) {
items := make([]*QuotaSummary, len(b.items))
for i, item := range b.items {
items[i], err = item.Build()
if err != nil {
return
}
}
list = new(QuotaSummaryList)
list.items = items
return
}
| {
return new(QuotaSummaryListBuilder)
} |
test_communication_identity_client_async.py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from azure.communication.administration.aio import CommunicationIdentityClient
from azure_devtools.scenario_tests import RecordingProcessor
from devtools_testutils import ResourceGroupPreparer
from _shared.helper import URIIdentityReplacer
from _shared.asynctestcase import AsyncCommunicationTestCase
from _shared.testcase import BodyReplacerProcessor
from _shared.communication_service_preparer import CommunicationServicePreparer
class CommunicationIdentityClientTestAsync(AsyncCommunicationTestCase):
def setUp(self):
super(CommunicationIdentityClientTestAsync, self).setUp()
self.recording_processors.extend([
BodyReplacerProcessor(keys=["id", "token"]),
URIIdentityReplacer()])
@ResourceGroupPreparer(random_name_enabled=True)
@CommunicationServicePreparer()
@pytest.mark.live_test_only
@pytest.mark.asyncio
@AsyncCommunicationTestCase.await_prepared_test
async def test_create_user(self, connection_string):
identity_client = CommunicationIdentityClient.from_connection_string(connection_string)
async with identity_client:
user = await identity_client.create_user()
assert user.identifier is not None
@ResourceGroupPreparer(random_name_enabled=True)
@CommunicationServicePreparer()
@pytest.mark.live_test_only
@pytest.mark.asyncio
@AsyncCommunicationTestCase.await_prepared_test
async def test_issue_token(self, connection_string):
identity_client = CommunicationIdentityClient.from_connection_string(connection_string)
async with identity_client: | user = await identity_client.create_user()
token_response = await identity_client.issue_token(user, scopes=["chat"])
assert user.identifier is not None
assert token_response.token is not None
@ResourceGroupPreparer(random_name_enabled=True)
@CommunicationServicePreparer()
@pytest.mark.live_test_only
@pytest.mark.asyncio
@AsyncCommunicationTestCase.await_prepared_test
async def test_revoke_tokens(self, connection_string):
identity_client = CommunicationIdentityClient.from_connection_string(connection_string)
async with identity_client:
user = await identity_client.create_user()
token_response = await identity_client.issue_token(user, scopes=["chat"])
await identity_client.revoke_tokens(user)
assert user.identifier is not None
assert token_response.token is not None
@ResourceGroupPreparer(random_name_enabled=True)
@CommunicationServicePreparer()
@pytest.mark.live_test_only
@pytest.mark.asyncio
@AsyncCommunicationTestCase.await_prepared_test
async def test_delete_user(self, connection_string):
identity_client = CommunicationIdentityClient.from_connection_string(connection_string)
async with identity_client:
user = await identity_client.create_user()
await identity_client.delete_user(user)
assert user.identifier is not None | |
probfoil_1.py | """Implementation of the OpenProbFOIL algorithm.
"""
from __future__ import print_function
from problog.program import PrologFile
from problog.logic import term2str, Term, Var
from data import DataFile
from language import TypeModeLanguage
from rule import FOILRule, FOILRuleB
from learn_1 import CandidateBeam, LearnEntail
from scipy.optimize import minimize
from ad import gh
from ad.admath import *
from numpy import seterr, arange, concatenate
from logging import getLogger
import logging
from subprocess import Popen, PIPE
from time import time
import argparse
import sys
import os
import random
import psycopg2
from score import rates, accuracy, m_estimate_relative, precision, recall, m_estimate_future_relative, significance, pvalue2chisquare
import rule
from eval import getLogList
from getSQLQuery import getSQLQuery # Get SQL Query for Numeric SS
from getExpression import getExpression # Get Expression for Symbolic SS
from copy import copy
from itertools import product
import pickle
class ProbFOIL(LearnEntail):
def | (self, data, beam_size=5, logger='probfoil', minhc = 0.00001, minpca = 0.00001, lr1 = 0.001, lr2 = 0.0001, iterations = 10000, maxAmieRules = None, ssh = False, cwLearning = False, quotes = False, m=1, cost=1, l=None, p=None, disableTypeConstraints = False, closed_world=False, global_score = 'cross_entropy', optimization_method = 'incremental', candidate_rules = 'amie', **kwargs):
self.candidate_rules = candidate_rules
self.pad = 33
self.logFile = kwargs['log']
LearnEntail.__init__(self, data, TypeModeLanguage(**kwargs), logger=logger, **kwargs)
read_start = time()
#self.negatives = set()
#self.negativeThreshold = 0.9
if self.candidate_rules != "amie":
self.load(data) # for types and modes
self.totalExamples = len(self._examples)
getLogger('probfoil').info('%-*s: %d' % (self.pad, "Number of examples (M)", self.totalExamples))
getLogger('probfoil').info('%-*s: %.4f' % (self.pad, "Positive probabilistic part (P)", sum(self._scores_correct)))
getLogger('probfoil').info('%-*s: %.4f' % (self.pad, "Negative probabilistic part (N)", self.totalExamples - sum(self._scores_correct)))
else:
self.data = data
self._time_read = time() - read_start
self._beamsize = beam_size
self._m_estimate = m
self._max_length = l
self.open_world = not(closed_world)
self.global_score = global_score
self.optimization_method = optimization_method
self.minpca = minpca
self.minhc = minhc
self.tolerance = 1e-12
#self.maxIncrement = [0.001, 0.0002]
self.maxIncrement = [0.00001, 0.00001]
self.iterations = iterations
self.misclassificationCost = 1
#self.testFile = test
self.learningRate = [lr1, lr2]
self.stepCheck = 500
self.closedWorldNegativesFactor = 1
self.openWorldNegativesFactor = 1
self.loadRule = None
self.learnAllRules = True
self.ssh = ssh
self.replaceDB = True
self.enforceTypeConstraints = not(disableTypeConstraints)
self.allowRecursion = False
self.factsWithQuotes = quotes
self.cwLearning = cwLearning
self.maxAmieRules = maxAmieRules
self.terminateAtFixedPoint = False
if p is None:
self._min_significance = None
else:
self._min_significance = pvalue2chisquare(p)
getLogger('probfoil').info('%-*s: %d' % (self.pad, "Beam Size", self._beamsize))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "m-estimate Parameter", str(self._m_estimate)))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Tolerance Parameter", str(self.tolerance)))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Max Increments", str(self.maxIncrement)))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Learning Rate", str(self.learningRate)))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Closed World Negatives' Factor", str(self.closedWorldNegativesFactor)))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Open World Negatives' Factor", str(self.openWorldNegativesFactor)))
getLogger('probfoil').info('%-*s: %d' % (self.pad, "#Iterations in SGD", self.iterations))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Misclassification Cost of -ves", str(self.misclassificationCost)))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Min Significance", str(self._min_significance)))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Max Rule Length", str(self._max_length)))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Open World Setting", self.open_world))
#getLogger('probfoil').info('%-*s: %s' % (self.pad, "Test File", str(self.testFile)))
self.interrupted = False # Set it as True if you want to force it to learn just 1 rule
self.lastRuleMerged = False
self._stats_evaluations = 0
self._stats_numericSS = 0
self._stats_symbolicSS = 0
self._stats_getSQLQuery = 0
self._stats_getExpression = 0
self._time_numericSS = 0
self._time_symbolicSS = 0
self._time_optimization = 0
self._time_getSQLQuery = 0
self._time_getCanonicalForm = 0
self._time_executeQuery = 0
self._time_executePSQL = 0
self._time_getQueryProbability = 0
self._time_getExpression = 0
self._time_getQueryExpression = 0
self._time_learn = 0
def best_rule(self, current):
"""Find the best rule to extend the current rule set.
:param current:
:return:
"""
timeStartOverall = time()
current_rule = FOILRule(target=self.target, previous=current, correct = self._scores_correct)
current_rule.scores = [1.0] * self.totalExamples
current_rule.score = self._compute_rule_score(current_rule)
c_tp, c_fp, c_tn, c_fn = rates(current_rule)
current_rule.score_cmp = (current_rule.score, c_tp)
current_rule.processed = False
current_rule.probation = False
current_rule.avoid_literals = set()
if current:
prev_tp = rates(current)[0]
else:
prev_tp = 0.0
current_rule.query = [""]*self.totalExamples
current_rule.lossStringAcc = ""
current_rule.lossStringSL = ""
current_rule.lossStringCE = ""
current_rule.expressionList = [""]*self.totalExamples
best_rule = current_rule
self.canonicalRuleList = []
if self.candidate_rules == "amie":
selectedRule = None
if self.scoreList == [None]*len(self.AmieRuleList):
ruleList = []
for i, (headLitral, amieLiteralList) in enumerate(self.AmieRuleList):
if i in self.selectedAmieRules:
ruleList.append(None)
continue
self.canonicalRuleList = []
rule = current_rule
for literal in amieLiteralList:
rule = rule & literal
rule.scores = [1.0] * len(self._scores_correct)
rule.expressionList = [""]*self.totalExamples
rule.lossStringAcc = ""
rule.lossStringSL = ""
rule.lossStringCE = ""
rule.expressionList = [""]*self.totalExamples
#rule.confidence = self.stdConfidenceList[i]
getLogger('probfoil').debug('Evaluating Rule\t\t\t\t\t: %s' % rule)
#rule.scores = self._compute_scores_predict(rule)
rule.scores = [1.0] * len(self._scores_correct)
#getLogger('probfoil').log(9, 'Predicted scores\t\t\t\t\t: %s' % str([0 if round(item,1) == 0 else 1 if round(item,1) == 1 else round(item,1) for item in rule.scores]))
self._stats_evaluations += 1
#rule.score = self._compute_rule_score(rule)
rule.score = 1.0
self.scoreList[i] = rule.score
ruleList.append(rule)
if rule.score > best_rule.score:
best_rule = rule
selectedRule = i
getLogger('probfoil').debug('Candidate Score List\t\t\t: ' + str(self.scoreList))
else:
#maxIndex, maxScore = max(enumerate(self.scoreList), key=lambda v: v[1])
maxIndex = None
maxScore = None
for i, score in enumerate(self.scoreList):
if i in self.selectedAmieRules:
continue
if score > maxScore:
maxScore = score
maxIndex = i
if maxIndex == None or maxIndex >= len(self.AmieRuleList):
self.breakNow = True
return None
selectedRule = maxIndex
headLitral, amieLiteralList = self.AmieRuleList[maxIndex]
self.canonicalRuleList = []
best_rule = current_rule
for literal in amieLiteralList:
best_rule = best_rule & literal
best_rule.scores = [1.0] * len(self._scores_correct)
best_rule.expressionList = [""]*self.totalExamples
best_rule.lossStringAcc = ""
best_rule.lossStringSL = ""
best_rule.lossStringCE = ""
best_rule.expressionList = [""]*self.totalExamples
#rule.confidence = self.stdConfidenceList[i]
getLogger('probfoil').debug('Evaluating Rule\t\t\t\t\t: %s' % best_rule)
#best_rule.scores = self._compute_scores_predict(best_rule)
best_rule.scores = [1.0] * len(self._scores_correct)
#getLogger('probfoil').log(9, 'Predicted scores\t\t\t\t\t: %s' % str([0 if round(item,1) == 0 else 1 if round(item,1) == 1 else round(item,1) for item in best_rule.scores]))
self._stats_evaluations += 1
#best_rule.score = self._compute_rule_score(best_rule)
best_rule.score = 1.0
best_rule.max_x = 1.0
if len(best_rule.get_literals()) == 1:
if not self.trueAdded:
self.trueAdded = True
else: #Select another rule
maxIndex, maxScore = max(enumerate(self.scoreList), key=lambda v: v[1])
best_rule = ruleList[maxIndex]
selectedRule = maxIndex
elif str(best_rule.get_literals()[1]) == 'fail':
if not self.failAdded:
self.failAdded = True
else: #Select another rule
maxIndex, maxScore = max(enumerate(self.scoreList), key=lambda v: v[1])
best_rule = ruleList[maxIndex]
selectedRule = maxIndex
if selectedRule != None:
self.selectedAmieRules.append(selectedRule)
if best_rule == None:
self.breakNow = True
return None
self._select_rule(best_rule)
return best_rule
try:
candidates = CandidateBeam(self._beamsize)
candidates.push(current_rule)
iteration = 1
time_start = time()
while candidates:
next_candidates = CandidateBeam(self._beamsize)
getLogger('probfoil').debug('\n%-*s: %s [%s]' % (self.pad-1, "Best rule so far", best_rule, best_rule.score))
time_total = time() - time_start
getLogger('probfoil').debug('%-*s: %.1fs' % (self.pad-1, "Time - intermediate rule", time_total))
time_start = time()
getLogger('probfoil').debug('%-*s: %s' % (self.pad-1, "Candidates - iteration", str(iteration)))
getLogger('probfoil').debug(candidates)
iteration += 1
while candidates:
current_rule = candidates.pop()
current_rule_literal_avoid = set(current_rule.avoid_literals)
getLogger('probfoil').debug('TO AVOID: %s => %s' % (current_rule, current_rule.avoid_literals))
c_tp, c_fp, c_tn, c_fn = rates(current_rule)
if self._max_length and len(current_rule) >= self._max_length:
pass
else:
for ref in self.language.refine(current_rule):
if ref in current_rule.avoid_literals: # or ref.prototype in current_rule.avoid_literals:
getLogger('probfoil').debug('SKIPPED literal %s for rule %s' % (ref, current_rule))
continue
rule = current_rule & ref
rule.expressionList = [""]*self.totalExamples
rule.lossStringAcc = ""
rule.lossStringSL = ""
rule.lossStringCE = ""
#rule.ruleAscii = self.getRuleAscii(rule)
getLogger('probfoil').debug('%-*s: %s' % (self.pad-1, "Evaluating Rule", str(rule)))
time_start1 = time()
rule.scores = self._compute_scores_predict(rule)
time_total1 = time() - time_start1
getLogger('probfoil').log(8,'%-*s: %.1fs' % (self.pad, "Time - scores prediction", time_total1))
getLogger('probfoil').log(9,'%-*s: %s' % (self.pad, "Predicted scores", str([0 if round(item,1) == 0 else 1 if round(item,1) == 1 else round(item,1) for item in rule.scores])))
self._stats_evaluations += 1
rule.score = self._compute_rule_score(rule)
r_tp, r_fp, r_tn, r_fn = rates(rule)
rule.score_cmp = (rule.score, r_tp)
rule.score_future = self._compute_rule_future_score(rule)
rule.processed = False
rule.avoid_literals = current_rule_literal_avoid
if prev_tp > r_tp - self.tolerance: # new rule has no tp improvement over previous
getLogger('probfoil').debug('%s %s %s %s [REJECT coverage] %s' % (rule, rule.score, rates(rule), rule.score_future, prev_tp))
# remove this literal for all sibling self.rules
current_rule_literal_avoid.add(ref)
current_rule_literal_avoid.add(ref.prototype)
elif rule.score_future <= best_rule.score:
getLogger('probfoil').debug('%s %s %s %s [REJECT potential] %s' % (rule, rule.score, rates(rule), rule.score_future, best_rule.score))
# remove this literal for all sibling self.rules
current_rule_literal_avoid.add(ref)
current_rule_literal_avoid.add(ref.prototype)
elif r_fp > c_fp - self.tolerance: # and not rule.has_new_variables():
# no fp eliminated and no new variables
getLogger('probfoil').debug('%s %s %s %s [REJECT noimprov] %s' % (rule, rule.score, rates(rule), rule.score_future, best_rule.score))
# remove this literal for all sibling self.rules
# current_rule_literal_avoid.add(ref)
# current_rule_literal_avoid.add(ref.prototype)
elif r_fp > c_fp - self.tolerance and current_rule.probation:
getLogger('probfoil').debug('%s %s %s %s [REJECT probation] %s' % (rule, rule.score, rates(rule), rule.score_future, best_rule.score))
elif r_fp < self.tolerance:
# This rule can not be improved by adding a literal.
# We reject it for future exploration,
# but we do consider it for best rule.
getLogger('probfoil').debug('%s %s %s %s [REJECT* fp] %s' % (rule, rule.score, rates(rule), rule.score_future, prev_tp))
if rule.score_cmp > best_rule.score_cmp:
getLogger('probfoil').debug('BETTER RULE %s %s > %s' % (rule, rule.score_cmp, best_rule.score_cmp))
best_rule = rule
#self.queryCurrentRule = rule.query
else:
if r_fp > c_fp - self.tolerance:
rule.probation = True
else:
rule.probation = False
if next_candidates.push(rule):
getLogger('probfoil').debug('%s %s %s %s [ACCEPT]' % (rule, rule.score, rates(rule), rule.score_future))
else:
getLogger('probfoil').debug('%s %s %s %s [REJECT beam]' % (rule, rule.score, rates(rule), rule.score_future))
if rule.score_cmp > best_rule.score_cmp:
getLogger('probfoil').debug('BETTER RULE %s %s > %s' % (rule, rule.score_cmp, best_rule.score_cmp))
best_rule = rule
#self.queryCurrentRule = rule.query
candidates = next_candidates
except KeyboardInterrupt:
self.interrupted = True
getLogger('probfoil').info('LEARNING INTERRUPTED BY USER')
while best_rule.parent and best_rule.parent.score > best_rule.score - self.tolerance:
best_rule = best_rule.parent
#self.queryCurrentRule = best_rule.query
self._select_rule(best_rule)
timeOverall = time() - timeStartOverall
getLogger('probfoil').debug('%-*s: %.1fs' % (self.pad-1, "Time - best_rule", timeOverall))
return best_rule
def regularize(self, a, factor = 1):
if isinstance(a, float) or isinstance(a, int):
if a > 1-factor*self.tolerance:
return 1-factor*self.tolerance
elif a < factor*self.tolerance:
return factor*self.tolerance
else:
return a
elif isinstance(a, str):
a = float(a)
if a > 1-factor*self.tolerance:
return str(eval("1 - " + str(factor*self.tolerance)))
elif a < factor*self.tolerance:
return str(factor*self.tolerance)
else:
return str(a)
def initial_hypothesis(self):
initial = FOILRule(self.target)
initial = initial & Term('fail')
initial.accuracy = 0
initial.scores = [0.0] * self.totalExamples
if self.learnAllRules == False:
initial.correct = self._scores_correct
initial.expressionList = [""]*self.totalExamples
initial.replaceableQuery = ''
initial.lossStringAcc = ''
initial.lossStringSL = ''
initial.lossStringCE = ''
initial.score = self._compute_rule_score(initial)
initial.avoid_literals = set()
trueRule = FOILRule(self.target, previous = initial)
trueRule.accuracy = 0
trueRule.scores = [1.0] * self.totalExamples
if self.learnAllRules == False:
trueRule.correct = self._scores_correct
trueRule.expressionList = [""]*self.totalExamples
trueRule.replaceableQuery = ''
trueRule.lossStringAcc = ''
trueRule.lossStringSL = ''
trueRule.lossStringCE = ''
trueRule.score = self._compute_rule_score(trueRule)
self._select_rule(trueRule)
trueRule.avoid_literals = set()
self.trueAdded = True
return trueRule
def connect_PSQLDB(self, name = None):
if self.ssh:
if name == None:
conn = psycopg2.connect(user = "arcchit", password = "arcchit", host = "localhost")
else:
conn = psycopg2.connect(dbname = name, user = "arcchit", password = "arcchit", host = "localhost")
else:
if name == None:
conn = psycopg2.connect(dbname = 'postgres', user = self.user)
else:
conn = psycopg2.connect(dbname = name, user = self.user)
return conn
def initialize_PSQLDB(self):
# ----------------------------------- Initialize PSQL Database -----------------------------------
time_start = time()
outputString = Popen("echo $USER", stdout=PIPE, shell=True).communicate()
self.user = outputString[0][0:len(outputString[0])-1]
conn = self.connect_PSQLDB(None)
conn.autocommit = True
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = conn.cursor()
counter = 0
if self.replaceDB:
try:
cursor.execute("DROP DATABASE IF EXISTS " + self.name +";")
cursor.execute("CREATE DATABASE " + self.name + ";")
except:
self.replaceDB = False
if self.replaceDB == False:
replaceName = False
while True:
try:
if counter == 0:
cursor.execute("CREATE DATABASE " + self.name + ";")
else:
cursor.execute("CREATE DATABASE " + self.name + str(counter) + ";")
replaceName = True
break
except Exception as e:
getLogger('probfoil').error('%-*s: %s' % (self.pad-1, "Exception Occurred", str(e)[:-1]))
counter += 1
if replaceName:
self.name = self.name + str(counter)
getLogger('probfoil').debug('%-*s: %s' % (self.pad-1, "Created PSQL Database", self.name))
cursor.close();
conn.close();
self.conn = self.connect_PSQLDB(self.name)
#self.conn = psycopg2.connect(dbname = self.name, user = self.user)
self.conn.autocommit = True
self.conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.cursor = self.conn.cursor()
self.cursor.execute("SET client_min_messages = error;")
# Aggregate functions for Symbolic Safe Sample
self.cursor.execute("DROP AGGREGATE IF EXISTS ior (double precision);")
self.cursor.execute("DROP AGGREGATE IF EXISTS ior (text);")
self.cursor.execute("DROP FUNCTION IF EXISTS ior_sfunc (text, double precision);")
#self.cursor.execute("CREATE OR REPLACE FUNCTION ior_sfunc (text, double precision) returns text AS $$select concat('max(', $1, '*(1 - ', cast($2 AS text), '), 0.00001)')$$ LANGUAGE SQL;")
self.cursor.execute("CREATE OR REPLACE FUNCTION ior_sfunc (text, double precision) returns text AS $$select concat($1, '*(1 - ', cast($2 AS text), ')')$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS ior_finalfunc (text);")
self.cursor.execute("CREATE OR REPLACE FUNCTION ior_finalfunc (text) returns text AS $$select concat('(1 - ', $1, ')')$$ LANGUAGE SQL;")
self.cursor.execute("CREATE AGGREGATE ior (double precision) (sfunc = ior_sfunc, stype = text, finalfunc = ior_finalfunc, initcond = '1');")
self.cursor.execute("DROP FUNCTION IF EXISTS ior_sfunc (text, text);")
#self.cursor.execute("CREATE OR REPLACE FUNCTION ior_sfunc (text, text) returns text AS $$select concat('max(', $1, '*(1 - ', $2, '), 0.00001)')$$ LANGUAGE SQL;")
self.cursor.execute("CREATE OR REPLACE FUNCTION ior_sfunc (text, text) returns text AS $$select concat($1, '*(1 - ', $2, ')')$$ LANGUAGE SQL;")
self.cursor.execute("CREATE AGGREGATE ior (text) (sfunc = ior_sfunc, stype = text, finalfunc = ior_finalfunc, initcond = '1');")
self.cursor.execute("DROP AGGREGATE IF EXISTS l_ior (double precision);")
self.cursor.execute("DROP AGGREGATE IF EXISTS l_ior (text);")
self.cursor.execute("DROP FUNCTION IF EXISTS l_ior_sfunc (text, double precision);")
self.cursor.execute("CREATE OR REPLACE FUNCTION l_ior_sfunc (text, double precision) returns text AS $$select concat($1, ' + ', cast($2 AS text))$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS l_ior_finalfunc (text);")
self.cursor.execute("CREATE OR REPLACE FUNCTION l_ior_finalfunc (text) returns text AS $$select concat('(', $1, ')')$$ LANGUAGE SQL;")
self.cursor.execute("CREATE AGGREGATE l_ior (double precision) (sfunc = l_ior_sfunc, stype = text, finalfunc = l_ior_finalfunc, initcond = '0');")
self.cursor.execute("DROP FUNCTION IF EXISTS l_ior_sfunc (text, text);")
self.cursor.execute("CREATE OR REPLACE FUNCTION l_ior_sfunc (text, text) returns text AS $$select concat($1, ' + ', $2)$$ LANGUAGE SQL;")
self.cursor.execute("CREATE AGGREGATE l_ior (text) (sfunc = l_ior_sfunc, stype = text, finalfunc = l_ior_finalfunc, initcond = '0');")
self.cursor.execute("DROP FUNCTION IF EXISTS l1prod (double precision, double precision);")
#self.cursor.execute("CREATE OR REPLACE FUNCTION l1prod (double precision, double precision) returns text AS $$select concat('(max(', cast($1 AS text), ',', cast($2 AS text), ') + log(exp(', cast($1 AS text), ' - max(', cast($1 AS text), ',', cast($2 AS text), ')) + exp(', cast($2 AS text), ' - max(', cast($1 AS text), ',', cast($2 AS text), ')) - exp(', cast($1 AS text), '+', cast($2 AS text), '- max(', cast($1 AS text), ',', cast($2 AS text), '))))')$$ LANGUAGE SQL;")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1prod (double precision, double precision) returns text AS $$select concat('log(exp(', cast($1 AS text), ') + exp(', cast($2 AS text), ') - exp(', cast($1 AS text),'+', cast($2 AS text),'))')$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS l1prod (text, double precision);")
#self.cursor.execute("CREATE OR REPLACE FUNCTION l1prod (text, double precision) returns text AS $$select concat('(max(', $1, ',', cast($2 AS text), ') + log(exp(', $1, ' - max(', $1, ',', cast($2 AS text), ')) + exp(', cast($2 AS text), ' - max(', $1, ',', cast($2 AS text), ')) - exp(', $1, '+', cast($2 AS text), '- max(', $1, ',', cast($2 AS text), '))))')$$ LANGUAGE SQL;")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1prod (text, double precision) returns text AS $$select concat('log(exp(', $1, ') + exp(', cast($2 AS text), ') - exp(', $1,'+', cast($2 AS text),'))')$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS l1prod (double precision, text);")
#self.cursor.execute("CREATE OR REPLACE FUNCTION l1prod (double precision, text) returns text AS $$select concat('(max(', cast($1 AS text), ',', $2, ') + log(exp(', cast($1 AS text), ' - max(', cast($1 AS text), ',', $2, ')) + exp(', $2, ' - max(', cast($1 AS text), ',', $2, ')) - exp(', cast($1 AS text), '+', $2, '- max(', cast($1 AS text), ',', $2, '))))')$$ LANGUAGE SQL;")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1prod (double precision, text) returns text AS $$select concat('log(exp(', cast($1 AS text), ') + exp(', $2, ') - exp(', cast($1 AS text),'+', $2,'))')$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS l1prod (text, text);")
#self.cursor.execute("CREATE OR REPLACE FUNCTION l1prod (text, text) returns text AS $$select concat('(max(',$1, ',', $2, ') + log(exp(', $1, ' - max(', $1, ',', $2, ')) + exp(', $2, ' - max(', $1, ',', $2, ')) - exp(', $1, '+', $2, '- max(', $1, ',', $2, '))))')$$ LANGUAGE SQL;")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1prod (text, text) returns text AS $$select concat('log(exp(', $1, ') + exp(', $2, ') - exp(', $1,'+', $2,'))')$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS l1diff (double precision, double precision);")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1diff (double precision, double precision) returns text AS $$select concat('log(1 - exp(', cast($2 AS text), ') + exp(', cast($1 AS text), '))')$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS l1diff (text, double precision);")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1diff (text, double precision) returns text AS $$select concat('log(1 - exp(', cast($2 AS text), ') + exp(', $1, '))')$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS l1diff (double precision, text);")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1diff (double precision, text) returns text AS $$select concat('log(1 - exp(', $2, ') + exp(', cast($1 AS text), '))')$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS l1diff (text, text);")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1diff (text, text) returns text AS $$select concat('log(1 - exp(', $2, ') + exp(', $1, '))')$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS l1sum (double precision, double precision);")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1sum (double precision, double precision) returns text AS $$select concat('log(exp(', cast($1 AS text), ') + exp(', cast($2 AS text), ') - 1)')$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS l1sum (text, double precision);")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1sum (text, double precision) returns text AS $$select concat('log(exp(', $1, ') + exp(', cast($2 AS text), ') - 1)')$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS l1sum (double precision, text);")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1sum (double precision, text) returns text AS $$select concat('log(exp(', cast($1 AS text), ') + exp(', $2, ') - 1)')$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS l1sum (text, text);")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1sum (text, text) returns text AS $$select concat('log(exp(', $1, ') + exp(', $2, ') - 1)')$$ LANGUAGE SQL;")
# Aggregate functions for Numeric Safe Sample
self.cursor.execute("CREATE OR REPLACE FUNCTION ior_sfunc_n (double precision, double precision) RETURNS double precision AS 'select max(val) from (VALUES($1 * (1.0 - $2)), (0.00001)) AS Vals(val)' LANGUAGE SQL;")
self.cursor.execute("CREATE OR REPLACE FUNCTION ior_finalfunc_n (double precision) RETURNS double precision AS 'select 1.0 - $1' LANGUAGE SQL;")
self.cursor.execute("DROP AGGREGATE IF EXISTS ior_n (double precision);")
self.cursor.execute("CREATE AGGREGATE ior_n (double precision) (sfunc = ior_sfunc_n, stype = double precision, finalfunc = ior_finalfunc_n, initcond = '1.0');")
self.cursor.execute("CREATE OR REPLACE FUNCTION l_ior_sfunc_n (double precision, double precision) RETURNS double precision AS 'select $1 + $2' LANGUAGE SQL;")
self.cursor.execute("DROP AGGREGATE IF EXISTS l_ior_n (double precision);")
self.cursor.execute("CREATE AGGREGATE l_ior_n (double precision) (sfunc = l_ior_sfunc_n, stype = double precision, initcond = '0.0');")
#self.cursor.execute("CREATE OR REPLACE FUNCTION l1prod_n (double precision, double precision) RETURNS double precision AS 'select m + ln(exp($1-m) + exp($2-m) - exp($1+$2-m)) from(select max(val) as m from (VALUES($1), ($2)) AS Vals(val)) as foo' LANGUAGE SQL;")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1prod_n (double precision, double precision) RETURNS double precision AS 'select case when $1 > -745 AND $2 > -745 then m + ln(exp($1-m) + exp($2-m) - exp($1+$2-m)) else m end from(select max(val) as m from (VALUES($1), ($2)) AS Vals(val)) as foo' LANGUAGE SQL;")
#self.cursor.execute("CREATE OR REPLACE FUNCTION l1diff_n (double precision, double precision) RETURNS double precision AS 'select ln(1 - exp($2) + exp($1))' LANGUAGE SQL;")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1diff_n (double precision, double precision) RETURNS double precision AS 'select case when $1 >= -745 and $2 >= -745 and 1+exp($1)-exp($2) > 0 then ln(1 - exp($2) + exp($1)) when $1 >= -745 and $2 >= -745 and 1+exp($1)-exp($2) <= 0 then NULL when $1 >= -745 and $2 < -745 then ln(1+exp($1)) when $1 < -745 and $2 > 0 then NULL when $1 < -745 and $2 <= 0 and $2 >= -745 then ln(1-exp($2)) else 0 end' LANGUAGE SQL;")
#self.cursor.execute("CREATE OR REPLACE FUNCTION l1sum_n (double precision, double precision) RETURNS double precision AS 'select ln(exp($1) + exp($2) - 1)' LANGUAGE SQL;")
self.cursor.execute("CREATE OR REPLACE FUNCTION l1sum_n (double precision, double precision) RETURNS double precision AS 'select case when $1 >= -745 and $2 >= -745 and exp($1)+exp($2)-1 > 0 then ln(exp($1) + exp($2) - 1) when $1 >= -745 and $2 >= -745 and exp($1)+exp($2)-1 <= 0 then NULL when $1 > 0 and $2 < -745 then ln(exp($1)-1) when $1 < -745 and $2 > 0 then ln(exp($2)-1) else NULL end' LANGUAGE SQL;")
# Aggregate functions for Automatic Differentiation
self.cursor.execute("DROP AGGREGATE IF EXISTS ior_ad (double precision);")
self.cursor.execute("DROP AGGREGATE IF EXISTS ior_ad (text);")
self.cursor.execute("DROP FUNCTION IF EXISTS ior_sfunc_ad (text, double precision);")
self.cursor.execute("CREATE OR REPLACE FUNCTION ior_sfunc_ad (text, double precision) returns text AS $$select concat($1, ' a = a*(1 - ', cast($2 AS text), ');')$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS ior_finalfunc_ad (text);")
self.cursor.execute("CREATE OR REPLACE FUNCTION ior_finalfunc_ad (text) returns text AS $$select concat($1, ' p = 1 - a;')$$ LANGUAGE SQL;")
self.cursor.execute("CREATE AGGREGATE ior_ad (double precision) (sfunc = ior_sfunc_ad, stype = text, finalfunc = ior_finalfunc_ad, initcond = '');")
self.cursor.execute("DROP FUNCTION IF EXISTS ior_sfunc_ad (text, text);")
self.cursor.execute("CREATE OR REPLACE FUNCTION ior_sfunc_ad (text, text) returns text AS $$select concat($1, ' a = a*(1 - ', $2, ');')$$ LANGUAGE SQL;")
self.cursor.execute("CREATE AGGREGATE ior_ad (text) (sfunc = ior_sfunc_ad, stype = text, finalfunc = ior_finalfunc_ad, initcond = '');")
self.cursor.execute("DROP AGGREGATE IF EXISTS l_ior_ad (double precision);")
self.cursor.execute("DROP AGGREGATE IF EXISTS l_ior_ad (text);")
self.cursor.execute("DROP FUNCTION IF EXISTS l_ior_sfunc_ad (text, double precision);")
self.cursor.execute("CREATE OR REPLACE FUNCTION l_ior_sfunc_ad (text, double precision) returns text AS $$select concat($1, ' p = p + ', cast($2 AS text), ';')$$ LANGUAGE SQL;")
self.cursor.execute("DROP FUNCTION IF EXISTS l_ior_finalfunc_ad (text);")
self.cursor.execute("CREATE OR REPLACE FUNCTION l_ior_finalfunc_ad (text) returns text AS $$select $1 $$ LANGUAGE SQL;")
self.cursor.execute("CREATE AGGREGATE l_ior_ad (double precision) (sfunc = l_ior_sfunc_ad, stype = text, finalfunc = l_ior_finalfunc_ad, initcond = '');")
self.cursor.execute("DROP FUNCTION IF EXISTS l_ior_sfunc_ad (text, text);")
self.cursor.execute("CREATE OR REPLACE FUNCTION l_ior_sfunc_ad (text, text) returns text AS $$select concat($1, ' p = p + ', $2, ';')$$ LANGUAGE SQL;")
self.cursor.execute("CREATE AGGREGATE l_ior_ad (text) (sfunc = l_ior_sfunc_ad, stype = text, finalfunc = l_ior_finalfunc_ad, initcond = '');")
time_total = time() - time_start
getLogger('probfoil').debug('%-*s: %.1fs' % (self.pad-1, "Time - initialize PSQLDB", time_total))
def learn_readFile(self, inputFile = None, initializePSQLDB = True):
# ------------------------------------- Read the input file --------------------------------------
time_start = time()
self.predicateDict = {}
self.constantDict = {}
self.closedWorldTotal = {}
self.canonicalRuleList = []
self.queryDict = {}
self.symbolicQueryDict = {}
self.previousInstantiatedTableSet = set()
self.lams = {}
self.negativeWeight = 1 #Remove later
self.totalPositiveExamples = 0
self.universalConstantId = {}
self.universalConstantCount = 0
if self.candidate_rules != "amie":
if inputFile == None:
self.InputFile = str(self._data._source_files[0])
else:
self.InputFile = inputFile
self.name = self.InputFile[self.InputFile.rfind("/")+1:self.InputFile.rfind(".")].replace(".","_").lower()
if initializePSQLDB:
self.initialize_PSQLDB()
else:
outputString = Popen("echo $USER", stdout=PIPE, shell=True).communicate()
self.user = outputString[0][0:len(outputString[0])-1]
#self.conn = psycopg2.connect(dbname = self.name, user = self.user)
self.conn = self.connect_PSQLDB(self.name)
self.conn.autocommit = True
self.conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.cursor = self.conn.cursor()
self.cursor.execute("SET client_min_messages = error;")
self.modeSet = set()
self.targetArity = self._target._Term__arity
self.targetPredicate = self._target._Term__functor
#self.hypothesisAscii = 64 + self.targetArity
self.hypothesisFreeVars = 0
for predicate, modes in self._language._modes:
self.modeSet.add(predicate)
for predicate, types in self._language._types.items():
if predicate[0] != self.targetPredicate:
sql_query = "CREATE TABLE IF NOT EXISTS " + predicate[0] + " ("
i = 0
while i < predicate[1]:
sql_query = sql_query + "v" + str(i) + " integer, "
i = i + 1
sql_query = sql_query + "p double precision);"
self.cursor.execute(sql_query)
#else:
#self.targetBaseList = types
if predicate[0] not in self.predicateDict:
self.predicateDict[predicate[0]] = types
self.closedWorldTotal[predicate[0]] = 0
if predicate[0] != self.targetPredicate:
self.lams[predicate[0]] = 0
for type in types:
if type not in self.constantDict:
#TODO: Old
self.constantDict[type] = self.language.get_type_values(type)
for item in self._data._database._ClauseDB__nodes:
if hasattr(item, 'probability') and item.functor in self.modeSet:
self.closedWorldTotal[item.functor] += 1
factString = ""
for i, arg in enumerate(item.args):
if factString == "":
factString = str(self.constantDict[self.predicateDict[item.functor][i]].index(arg))
else:
factString = factString + ", " + str(self.constantDict[self.predicateDict[item.functor][i]].index(arg))
if item.probability is None:
prob = str(eval("1 - " + str(self.tolerance)))
elif item.probability._Term__functor >= 1 - self.tolerance:
prob = str(eval("1 - " + str(self.tolerance)))
else:
prob = str(item.probability._Term__functor)
self.cursor.execute("INSERT INTO " + item.functor + " VALUES (" + factString + ", " + prob + ");")
else:
self._scores_correct = []
self._examples = []
if inputFile == None:
self.InputFile = self.data[0]
else:
self.InputFile = inputFile
self.name = self.InputFile[self.InputFile.rfind("/")+1:self.InputFile.rfind(".")].replace(".","_").lower()
try:
outputString = Popen("echo $USER", stdout=PIPE, shell=True).communicate()
self.user = outputString[0][0:len(outputString[0])-1]
if not initializePSQLDB:
#conn = psycopg2.connect(dbname = self.name, user = self.user)
conn = self.connect_PSQLDB(self.name)
except Exception as e:
getLogger('probfoil').warning("The database " + self.name + " is not initialized before.")
getLogger('probfoil').warning(e)
initializePSQLDB = True
if initializePSQLDB:
self.initialize_PSQLDB()
else:
#self.conn = psycopg2.connect(dbname = self.name, user = self.user)
self.conn = self.connect_PSQLDB(self.name)
self.conn.autocommit = True
self.conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.cursor = self.conn.cursor()
self.cursor.execute("SET client_min_messages = error;")
self.targetPredicate = ""
def read(file):
inputf = open(file, 'r')
for line in inputf:
#Pre-processing
line = line.replace(" ","")
if line == "\n":
continue
elif line[0] == "%":
continue
#Reading Lines
if line[:5] == "base(":
predicate = line[5:].split("(")[0]
types = line[5:].split("(")[1].split(")")[-3].split(",")
arity = len(types)
if arity != 2:
getLogger('probfoil').error("Arity of Predicate (" + predicate + ") is " + str(arity) + " instead of 2.")
return
for type in types:
if type not in self.constantDict:
self.constantDict[type] = {}
self.predicateDict[predicate] = types
self.closedWorldTotal[predicate] = 0
self.lams[predicate] = 0
if initializePSQLDB:
sql_query = "CREATE TABLE IF NOT EXISTS " + predicate + " ("
i = 0
while i < arity:
sql_query = sql_query + "v" + str(i) + " integer, "
i += 1
sql_query = sql_query + "p double precision);"
self.cursor.execute(sql_query)
elif line[:6] == "learn(":
if self.target is not None:
continue
self.targetPredicate = line.split("(")[1].split("/")[0]
self.targetArity = int(line.split("/")[1].split(")")[0])
arguments = [Var("A"), Var("B")]
self._target = Term(str(self.targetPredicate), *arguments)
#self.hypothesisAscii = 64 + self.targetArity
self.hypothesisFreeVars = 0
if self.targetArity != 2:
getLogger('probfoil').error("Arity of Target Predicate (" + self.targetPredicate + ") is " + str(self.targetArity) + " instead of 2.")
return
elif line[:5] == "mode(":
#Mode is not required when generating candidates from AMIE
continue
else:
#Read Probabilistic Fact
prob = "0"
predicate = ""
if "::" in line.split('"')[0]:
predicate = line.split("::")[1].split("(")[0]
prob = line.split("::")[0]
if float(prob) > 1 - self.tolerance:
prob = str(eval("1 - " + str(self.tolerance)))
else:
predicate = line.split("(")[0]
prob = str(eval("1 - " + str(self.tolerance)))
self.closedWorldTotal[predicate] += 1
if self.factsWithQuotes:
subject = line.split('(')[1].split('","')[0] +'"'
object = '"' + '('.join(line.split('(')[1:]).split('","')[1][:-3]
else:
subject = line.split('(')[1].split(",")[0]
object = line.split(')')[-2].split(",")[1]
if subject not in self.universalConstantId:
self.universalConstantId[subject] = self.universalConstantCount
self.constantDict[self.predicateDict[predicate][0]][subject] = self.universalConstantCount
self.universalConstantCount += 1
if object not in self.universalConstantId:
self.universalConstantId[object] = self.universalConstantCount
self.constantDict[self.predicateDict[predicate][1]][object] = self.universalConstantCount
self.universalConstantCount += 1
if initializePSQLDB:
#subjectIndex = self.constantDict[self.predicateDict[predicate][0]][subject]
#objectIndex = self.constantDict[self.predicateDict[predicate][1]][object]
subjectIndex = self.universalConstantId[subject]
objectIndex = self.universalConstantId[object]
self.cursor.execute("INSERT INTO " + predicate + " VALUES (" + str(subjectIndex) + ", " + str(objectIndex) + ", " + prob + ");")
if predicate == self.targetPredicate:
args = [subject,object]
prob = float(prob)
if args in self.examples:
oldProb = self._scores_correct[self.examples.index(args)]
newProb = prob + oldProb - prob*oldProb
self._scores_correct[self.examples.index(args)] = newProb
#if oldProb < self.negativeThreshold and newProb >= self.negativeThreshold:
# self.negatives.remove(self.examples.index(args))
else:
self._examples.append(args)
self._scores_correct.append(prob)
#if prob < self.negativeThreshold:
# self.negatives.add(len(self.examples)-1)
inputf.close()
if self.target is not None:
self.targetArity = self._target._Term__arity
self.targetPredicate = self._target._Term__functor
#self.hypothesisAscii = 64 + self.targetArity
self.hypothesisFreeVars = 0
if self.targetArity != 2:
getLogger('probfoil').error("Arity of Target Predicate (" + self.targetPredicate + ") is " + str(self.targetArity) + " instead of 2.")
return
if inputFile == None:
for file in self.data:
read(file)
else:
read(inputFile)
self.totalExamples = len(self.examples)
getLogger('probfoil').info('%-*s: %d' % (self.pad, "Number of examples (M)", self.totalExamples))
getLogger('probfoil').info('%-*s: %.4f' % (self.pad, "Positive probabilistic part (P)", sum(self._scores_correct)))
getLogger('probfoil').info('%-*s: %.4f' % (self.pad, "Negative probabilistic part (N)", self.totalExamples - sum(self._scores_correct)))
self.predicateList = self.predicateDict.keys()
self.predicateList.remove(self.targetPredicate)
self.lams.pop(self.targetPredicate, None)
time_total = time() - time_start
self._time_read = self._time_read + time_total
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Target Base List", str(self.predicateDict[self.targetPredicate])))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Predicate Dict", str(self.predicateDict)))
getLogger('probfoil').log(8, '%-*s: %s' % (self.pad, "Universal Constant Dict", str(self.universalConstantId)))
getLogger('probfoil').debug('%-*s: %.1fs' % (self.pad-1, "Time - readFile", time_total))
def convertProblogToAmie(self):
if not(os.path.exists(self.TSVFile)):
inputf = open(self.InputFile, 'r')
outputf = open(self.TSVFile, 'w+')
for line in inputf:
line = line.replace(" ","")
if line == "\n" or line[0] == "%" or line[:4] == "mode" or line[:5] == "learn":
continue
elif line[:4] == "base":
predicate = line.split('(')[1]
attributes = line.split('(')[2].split(')')[0].split(',')
outputf.write("<" + predicate + ">\t<http://www.w3.org/2000/01/rdf-schema#domain>\t<" + attributes[0] + ">\n")
outputf.write("<" + predicate + ">\t<http://www.w3.org/2000/01/rdf-schema#range>\t<" + attributes[1] + ">\n")
else:
#Read Probabilistic Fact
if self.factsWithQuotes:
if "::" in line.split('"')[0]:
predicate = line.split("::")[1].split("(")[0]
else:
predicate = line.split("(")[0]
subject = line.split('(')[1].split('","')[0] +'"'
object = '"' + '('.join(line.split('(')[1:]).split('","')[1][:-3]
attributes = [subject, object]
else:
predicate = line.split(':')[2].split('(')[0]
attributes = line.split(':')[2].split('(')[1].split(')')[0].split(',')
outputf.write("<" + attributes[0] + ">\t<" + predicate + ">\t<" + attributes[1] + ">\n")
inputf.close()
outputf.close()
def getAmieRules(self):
minhc = self.minhc
minpca = self.minpca
if self.ssh:
path = "Documents/OpenProbFOIL/"
if self.allowRecursion:
if self._max_length != None:
amieQuery = "ssh himec04 " + '"' + "java -jar " + path + "amie_plus.jar -maxad " + str(self._max_length) + " -minhc " + str(minhc) + " -minpca " + str(minpca) + " -htr '<" + self.targetPredicate + ">' -oute " + path + self.TSVFile + '"'
else:
amieQuery = "ssh himec04 " + '"' + "java -jar " + path + "amie_plus.jar -minhc " + str(minhc) + " -minpca " + str(minpca) + " -htr '<" + self.targetPredicate + ">' -oute " + path+ self.TSVFile + '"'
else:
if self._max_length != None:
amieQuery = "ssh himec04 " + '"' + "java -jar " + path + "amie_plus.jar -maxad " + str(self._max_length) + " -minhc " + str(minhc) + " -minpca " + str(minpca) + " -htr '<" + self.targetPredicate + ">' -bexr '<" + self.targetPredicate + ">' -oute " + path + self.TSVFile + '"'
else:
amieQuery = "ssh himec04 " + '"' + "java -jar " + path + "amie_plus.jar -minhc " + str(minhc) + " -minpca " + str(minpca) + " -htr '<" + self.targetPredicate + ">' -bexr '<" + self.targetPredicate + ">' -oute " + path+ self.TSVFile + '"'
else:
if self.allowRecursion:
if self._max_length != None:
amieQuery = "java -jar amie_plus.jar -maxad " + str(self._max_length) + " -minhc " + str(minhc) + " -minpca " + str(minpca) + " -htr '<" + self.targetPredicate + ">' -oute " + self.TSVFile
else:
amieQuery = "java -jar amie_plus.jar -minhc " + str(minhc) + " -minpca " + str(minpca) + " -htr '<" + self.targetPredicate + ">' -oute " + self.TSVFile
else:
if self._max_length != None:
amieQuery = "java -jar amie_plus.jar -maxad " + str(self._max_length) + " -minhc " + str(minhc) + " -minpca " + str(minpca) + " -htr '<" + self.targetPredicate + ">' -bexr '<" + self.targetPredicate + ">' -oute " + self.TSVFile
else:
amieQuery = "java -jar amie_plus.jar -minhc " + str(minhc) + " -minpca " + str(minpca) + " -htr '<" + self.targetPredicate + ">' -bexr '<" + self.targetPredicate + ">' -oute " + self.TSVFile
getLogger('probfoil').debug('Running AMIE+ : %s' % amieQuery)
outputString = Popen(amieQuery, stdout=PIPE, shell=True).communicate()
outputList = outputString[0].split('\n')[13:-4]
ruleList = []
coverageList = []
stdConfidenceList = []
pcaConfidenceList = []
for row in outputList:
line = row.split("\t")[0]
confidence = row.split("\t")[1].replace(",", ".")
stdConfidence = row.split("\t")[2].replace(",", ".")
pcaConfidence = row.split("\t")[3].replace(",", ".")
head = line.split("=>")[1].split("<")[1].split(">")[0]
body = line.split("=>")[0]
i = 0
bodyItems = []
while i < len(body):
if body[i] == "?":
bodyItems.append(body[i+1].upper())
i += 2
continue
elif body[i] == "<":
start = i+1
while body[i] != ">":
i += 1
bodyItems.append(body[start:i])
i += 1
headVar1 = line.split("=>")[1].split("?")[1][0].upper()
headVar2 = line.split("=>")[1].split("?")[2][0].upper()
replaceVariable = False
headDict = {}
bodyDict = {}
maxAscii = 65
if headVar1 != "A" or headVar2 != "B":
i = 0
while i < len(bodyItems):
if i % 3 == 0:
ascii = ord(bodyItems[i])
maxAscii = max(maxAscii, ascii)
i += 2
elif i % 3 == 2:
ascii = ord(bodyItems[i])
maxAscii = max(maxAscii, ascii)
i += 1
if headVar1 != "A":
headDict[headVar1] = "A"
headDict["A"] = chr(maxAscii + 1)
maxAscii += 1
if headVar1 != "B":
headDict[headVar1] = "B"
headDict["B"] = chr(maxAscii + 1)
maxAscii += 1
replaceVariable = True
i = 0
bodyList = []
bodyDict = {}
bodyDict["A"] = "A"
bodyDict["B"] = "B"
maxAscii = 66
while i < len(bodyItems):
if i % 3 == 0:
var1 = bodyItems[i]
if replaceVariable == True and bodyItems[i] in headDict:
var1 = headDict[bodyItems[i]]
if var1 in bodyDict:
var1 = bodyDict[var1]
else:
bodyDict[var1] = chr(maxAscii + 1)
var1 = chr(maxAscii + 1)
maxAscii += 1
elif i % 3 == 1:
relation = bodyItems[i]
elif i % 3 == 2:
var2 = bodyItems[i]
if replaceVariable == True and bodyItems[i] in headDict:
var2 = headDict[bodyItems[i]]
if var2 in bodyDict:
var2 = bodyDict[var2]
else:
bodyDict[var2] = chr(maxAscii + 1)
var2 = chr(maxAscii + 1)
maxAscii += 1
arguments = [Var(var1), Var(var2)]
literal = Term(str(relation), *arguments)
bodyList.append(literal)
i += 1
headArguments = [Var(headVar1), Var(headVar2)]
headLiteral = Term(str(head), *headArguments)
rule = (headLiteral, bodyList)
addRule = True
if self.enforceTypeConstraints:
varDict = {}
for literal in bodyList:
for i, arg in enumerate(literal.args):
type = self.predicateDict[literal.functor][i]
ascii = ord(str(arg))
if ascii < 65 + self.targetArity:
if type != self.predicateDict[self.targetPredicate][ascii-65]:
#Type Mismatch in the Rule
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Removing Rule from AMIE List", str(rule)))
addRule = False
if arg in varDict:
if type != varDict[arg]:
#Type Mismatch in the Rule
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Removing Rule from AMIE List", str(rule)))
addRule = False
else:
varDict[arg] = type
if addRule:
ruleList.append(rule)
coverageList.append(confidence)
stdConfidenceList.append(stdConfidence)
pcaConfidenceList.append(pcaConfidence)
if len(ruleList) == 0:
getLogger('probfoil').error('%-*s' % (self.pad, "No significant and type consistent rules returned by AMIE"))
self.breakNow = True
return (ruleList, coverageList, stdConfidenceList, pcaConfidenceList)
else:
a = zip(stdConfidenceList, ruleList, coverageList, pcaConfidenceList)
b = sorted(a, reverse=True)
stdConfidenceList, ruleList, coverageList, pcaConfidenceList = zip(*b)
if self.maxAmieRules != None:
i = int(self.maxAmieRules)
return (ruleList[:i], coverageList[:i], stdConfidenceList[:i], pcaConfidenceList[:i])
else:
return (ruleList, coverageList, stdConfidenceList, pcaConfidenceList)
def getAmieHypothesis(self):
oldRule = FOILRule(self.target)
oldRule = oldRule & Term('fail')
oldRule = FOILRule(self.target, previous = oldRule)
for (headLiteral, amieLiteralList) in self.AmieRuleList:
newRule = FOILRule(target=self.target, previous=oldRule)
for literal in amieLiteralList:
newRule = newRule & literal
oldRule = newRule
return oldRule
def getPRCurves(self, cscores, pscores, deterministic = True):
a = zip(pscores, cscores)
b = sorted(a, reverse=True)
pscores, cscores = zip(*b)
thresholdList = sorted(list(set(pscores)), reverse = True)
#Incremental Deterministic Precision
precisionList = []
recallList = []
tplist = []
tnlist = []
fplist = []
fnlist = []
tp = 0.0
fp = 0.0
#tn = sum(weights) - len(self.old_examples)
fn = float(sum([1 if item > 0 else 0 for item in cscores]))
counter = 0
for threshold in thresholdList:
for predicted, correct in zip(pscores[counter:], cscores[counter:]):
if predicted >= threshold:
#This is a predicted positive example
if correct > 0:
tp += 1
fn -= 1
else:
#tn -= 1
fp += 1
counter += 1
else:
break
tplist.append(tp)
#tnlist.append(tn)
fplist.append(fp)
fnlist.append(fn)
if tp + fp == 0:
precision = 0.0
else:
precision = tp / (tp + fp)
if tp + fn == 0:
recall = 0.0
else:
recall = tp / (tp + fn)
precisionList.append(precision)
recallList.append(recall)
getLogger('probfoil').log(9, "tpList : " + str(tplist))
getLogger('probfoil').log(9, "fpList : " + str(fplist))
getLogger('probfoil').log(9, "fnList : " + str(fnlist))
#getLogger('probfoil').log(9, "tnList : " + str(tnlist) + "\n")
getLogger('probfoil').log(9, "recallList : " + str(recallList))
getLogger('probfoil').log(9, "precisionList : " + str(precisionList) + "\n")
return (recallList, precisionList)
def learn_parseRules(self, hypothesis, merge = True):
time_start = time()
clauses = hypothesis.to_clauses()
ruleList = []
hypothesis.probabilityList = []
hypothesis.confidenceList = []
hypothesis.bodyList = []
literalSetList = []
hypothesis.predicateList = []
rule = hypothesis
while rule.previous != None:
ruleList.append(rule)
prob = rule.get_rule_probability()
if prob == None:
prob = 1 - self.tolerance
hypothesis.probabilityList.append(prob)
#hypothesis.confidenceList.append(rule.confidence)
body = rule.get_literals()[1:]
hypothesis.bodyList.append(body)
literalSet = set()
for literal in body:
literalSet.add(literal)
predicate = literal.functor
if predicate not in hypothesis.predicateList and predicate not in ["true", "fail", "false"]:
hypothesis.predicateList.append(predicate)
literalSetList.append(literalSet)
rule = rule.previous
if merge :
i = 0
iRule = hypothesis
while i < len(hypothesis.bodyList):
j = i + 1
previousjRule = iRule
jRule = iRule.previous
while j < len(hypothesis.bodyList):
if literalSetList[i] == literalSetList[j]:
# Merge rules i and j
# Update Prob of first rule
p1 = hypothesis.probabilityList[i]
p2 = hypothesis.probabilityList[j]
p = p1 + p2 - p1*p2
hypothesis.probabilityList[i] = p
if p > 1-self.tolerance:
iRule.set_rule_probability(None)
else:
iRule.set_rule_probability(p)
# Delete second rule
previousjRule.previous = jRule.previous
if j == len(hypothesis.bodyList) - 1:
self.lastRuleMerged = True
del hypothesis.bodyList[j]
del hypothesis.probabilityList[j]
del literalSetList[j]
continue
j += 1
previousjRule = jRule
jRule = jRule.previous
iRule = iRule.previous
i += 1
hypothesis.probabilityList.reverse()
hypothesis.bodyList.reverse()
hypothesis.predicateList.reverse()
for i, prob in enumerate(hypothesis.probabilityList):
hypothesis.predicateList.append("p_" + str(i))
tableName = "p_" + str(i)
self.cursor.execute("DROP TABLE IF EXISTS " + tableName + ";")
self.cursor.execute("CREATE TABLE " + tableName + " (v0 integer, p double precision);")
if tableName not in self.lams:
if prob < 1 - self.tolerance:
self.lams[tableName] = prob
else:
self.lams[tableName] = 1 - self.tolerance
time_total = time() - time_start
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Probability List", str(hypothesis.probabilityList)))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Body List", str(hypothesis.bodyList)))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Predicate List", str(hypothesis.predicateList)))
getLogger('probfoil').info('%-*s: %.1fs' % (self.pad, "Time - parseRules", time_total))
def learn_getQueryString(self, hypothesis):
time_start = time()
# --------------------------- Make a single query out of the hypothesis ---------------------------
#ruleAscii = 64 + self.targetArity
freeVarId = 0
for i, body in enumerate(hypothesis.bodyList):
replaceDict = {}
for j, literal in enumerate(body):
varList = []
for arg in literal.args:
if ord(str(arg)) > 64 + self.targetArity:
if str(arg) in replaceDict:
varList.append(Var(replaceDict[str(arg)]))
else:
#replaceDict[str(arg)] = chr(ruleAscii + 1)
#varList.append(Var(chr(ruleAscii + 1)))
#ruleAscii += 1
replaceDict[str(arg)] = "V" + str(freeVarId)
varList.append(Var("V" + str(freeVarId)))
freeVarId += 1
else:
varList.append(arg)
body[j] = Term(literal.functor, *varList)
#p = Term("p_" + str(i), *[Var(chr(ruleAscii+1))])
#ruleAscii += 1
p = Term("p_" + str(i), *[Var("V" + str(freeVarId))])
freeVarId += 1
body.append(p)
#hypothesis.maxAscii = ruleAscii
hypothesis.totalFreeVars = freeVarId
hypothesis.queryString = " v ".join([str(item)[1:-1].replace(" ","") for item in hypothesis.bodyList])
time_total = time() - time_start
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Body List", str(hypothesis.bodyList)))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Query String", hypothesis.queryString))
getLogger('probfoil').info('%-*s: %.1fs' % (self.pad, "Time - getQueryString", time_total))
def instantiateTables(self, instantiatedTableSet):
# ---------------------------------- Create Instantiated Tables ----------------------------------
getLogger('probfoil').log(8, 'Instantiated Table Set\t\t\t: %s' % str(instantiatedTableSet))
getLogger('probfoil').log(8, 'Previous Instantiated Table Set\t: %s' % str(self.previousInstantiatedTableSet))
for tableItem in instantiatedTableSet - self.previousInstantiatedTableSet:
tableNames = tableItem.split("_")
table = tableNames[0]
argList = tableNames[1:]
selectString = ""
whereString = ""
count = 0
for i,arg in enumerate(argList):
if arg != "all":
if whereString == "":
whereString = "v" + str(i) + " = " + str(arg)
else:
whereString = whereString + " AND v" + str(i) + " = " + str(arg)
else:
if selectString == "":
selectString = "v" + str(i) + " as v" + str(count)
else:
selectString = selectString + ", v" + str(i) + " as v" + str(count)
count += 1
if selectString == "":
self.cursor.execute("Select ior(p) from " + table + " where " + whereString +";")
prob = self.cursor.fetchone()[0]
# Create a table by the name 'newTable' which will have exactly 1 free variable
self.cursor.execute("DROP TABLE IF EXISTS " + tableItem + ";")
self.cursor.execute("CREATE TABLE " + tableItem + " (v0 integer, p double precision);")
if prob != "(1 - 1)":
prob = eval(prob)
if prob > 1 - self.tolerance:
prob = 1 - self.tolerance
self.cursor.execute("INSERT INTO " + tableItem + " VALUES (0, "+str(prob)+");")
elif whereString == "":
getLogger('probfoil').error('Exception Occurred: Empty selectString or whereString in ' % tableItem)
return
else:
selectString = selectString + ", p"
getLogger('probfoil').log(8, 'Probfoil: CREATE TABLE IF NOT EXISTS %s AS (SELECT %s FROM %s WHERE %s);' % (tableItem, selectString, table, whereString))
self.cursor.execute("CREATE TABLE IF NOT EXISTS " + tableItem + " AS (SELECT " + selectString + " FROM " + table + " WHERE " + whereString + ");")
self.previousInstantiatedTableSet.add(tableItem)
def getQueryForExample(self, hypothesis, example):
instantiatedTableSet = set()
if hypothesis.replaceableQuery == '':
hypothesis.replaceableTables = set()
#ruleAscii = hypothesis.maxAscii
freeVarId = hypothesis.totalFreeVars
k = 1
start = 0
self.replaceBody = False
instantiatedQueryString = ""
while k < len(hypothesis.queryString):
if hypothesis.queryString[k-1]=="(":
replacePredicate = False
table = hypothesis.queryString[start:k-1]
newTable = table
left = k-1
l = 0
if ord(hypothesis.queryString[k]) <= 64 + self.targetArity:
#baseNumber = ord(hypothesis.queryString[k]) - 65
#base = self.predicateDict[self.targetPredicate][baseNumber]
#value = example[baseNumber]
#newTable = newTable + "_" + str(self.universalConstantId[value])
newTable += "_" + hypothesis.queryString[k]
self.replaceBody = True
replacePredicate = True
else:
newTable = newTable + "_all"
elif hypothesis.queryString[k-1]=="," and hypothesis.queryString[k-2]!=")":
l = l + 1
if ord(hypothesis.queryString[k]) <= 64 + self.targetArity:
#baseNumber = ord(hypothesis.queryString[k]) - 65
#base = self.predicateDict[self.targetPredicate][baseNumber]
#value = example[baseNumber]
#newTable = newTable + "_" + str(self.universalConstantId[value])
newTable += "_" + hypothesis.queryString[k]
self.replaceBody = True
replacePredicate = True
else:
newTable = newTable + "_all"
elif hypothesis.queryString[k]==")":
if replacePredicate == True: #This means that the literal contains at least 1 fixed variable that needs instantiation
#Replace 'hypothesis.queryString[left:k+1]' by only free variables
# If variableString = '(A,B,C)' ==> '(C)'
# If variableString = '(A,B)' or '(A)' ==> then don't add the newTable. Instead execute a query to get the probability of a tuple when A and B are instantiated.
tableNames = newTable.split("_")
table = tableNames[0]
argList = tableNames[1:]
varList = hypothesis.queryString[left+1:k].split(",")
reallyReplacePredicate = False
varString = ""
for j, arg in enumerate(argList):
if arg == "all":
reallyReplacePredicate = True
if varString == "":
varString = varList[j]
else:
varString = varString + "," + varList[j]
if reallyReplacePredicate == True: #Eg: '(A,B,C)' ==> '(C)'
hypothesis.replaceableTables.add(newTable)
instantiatedQueryString = instantiatedQueryString + newTable + "(" + varString + ")"
#instantiatedQueryString = instantiatedQueryString + newTable + hypothesis.queryString[left:k+1]
else: #This means that the literal contains only fixed variables which needs instantiation, Eg: (A,B) or (A)
#Calculate the probability of a tuple from a fully instantiated table name
#'author_0_1' ==> Create and execute a psql query which subsets 'author' on v0 = 0 and v1 = 1 and then aggregate by ior
hypothesis.replaceableTables.add(newTable)
'''
whereString = ""
for j, arg in enumerate(argList):
if whereString == "":
whereString = "v" + str(j) + " = " + str(arg)
else:
whereString = whereString + " AND v" + str(j) + " = " + str(arg)
self.cursor.execute("Select ior(p) from " + table + " where " + whereString +";")
prob = self.cursor.fetchone()[0]
# Create a table by the name 'newTable' which will have exactly 1 free variable
self.cursor.execute("DROP TABLE IF EXISTS " + newTable + ";")
self.cursor.execute("CREATE TABLE " + newTable + " (v0 integer, p double precision);")
if prob != "(1 - 1)":
prob = eval(prob)
if prob > 1 - self.tolerance:
prob = 1 - self.tolerance
self.cursor.execute("INSERT INTO " + newTable + " VALUES (0, "+str(prob)+");")
'''
#instantiatedQueryString = instantiatedQueryString + newTable + "(" + chr(ruleAscii + 1) +")"
#ruleAscii += 1
instantiatedQueryString = instantiatedQueryString + newTable + "(V" + str(freeVarId) +")"
freeVarId += 1
else:
'''
if table in self.deterministicFactDict and len(self.deterministicFactDict[table]) != 0:
getLogger('probfoil').log(8, table + " has deterministic tuples.")
else:
'''
instantiatedQueryString = instantiatedQueryString + hypothesis.queryString[start:k+1]
start = k+1
elif (hypothesis.queryString[k]=="," and hypothesis.queryString[k-1]==")") or hypothesis.queryString[k]=="~":
instantiatedQueryString = instantiatedQueryString + hypothesis.queryString[k]
start = k+1
elif hypothesis.queryString[k-3:k] == " v ":
# Add a dummy variable wrt to current prob. Later reset the prob to 1
instantiatedQueryString = instantiatedQueryString + " v "
start = k
k = k + 1
clauseList = instantiatedQueryString.split(' v ')
if '' not in clauseList:
instantiatedQueryString = ''
for clause in clauseList:
clauseSplit = clause.split(',')
clauseSplit[:] = (value for value in clauseSplit if value != '')
if instantiatedQueryString == '':
instantiatedQueryString = ','.join(clauseSplit)
else:
instantiatedQueryString = instantiatedQueryString + ' v ' + ','.join(clauseSplit)
hypothesis.replaceableQuery = instantiatedQueryString
else:
hypothesis.replaceableQuery = ''
query = copy(hypothesis.replaceableQuery)
for i, value in enumerate(example):
query = query.replace(chr(65+i), str(self.universalConstantId[value]))
instantiatedTables = set()
for element in hypothesis.replaceableTables:
table = copy(element)
for i, value in enumerate(example):
table = table.replace(chr(65+i), str(self.universalConstantId[value]))
instantiatedTables.add(table)
self.instantiateTables(instantiatedTables)
return query
def learn_getQueryList(self, hypothesis):
time_start = time()
# ------------------------ Query for each of the examples using SafeSapmle -----------------------
i = 0
while i < self.totalExamples:
example = self.examples[i]
self.querySS[i] = self.getQueryForExample(hypothesis, example)
i = i + 1
#hypothesis.maxAscii = ruleAscii
hypothesis.totalFreeVars = freeVarId
getLogger('probfoil').log(9, 'Query List\t\t\t\t\t\t: %s' % str(self.querySS[0]))
hypothesis.querySS = copy(self.querySS) # TO DO: Need to Speed up
time_total = time() - time_start
getLogger('probfoil').debug('Time - getQueryList\t\t\t\t: %.1fs' % time_total)
def executeCanonicalExpression(self, SQLQuery, tableList, variableMapping):
if SQLQuery in ["Failed to parse", None, "Query is unsafe"]:
return None
outputString = None
trueSQLQuery = ""
i = 0
while i < len(SQLQuery):
if SQLQuery[i] == "<":
start = i + 1
while SQLQuery[i] != ">":
i += 1
expression = SQLQuery[start:i]
trueExpression = ""
if expression[0:5] == "table":
tableNumber = int(expression[5:])
trueExpression = tableList[tableNumber]
else:
# Replace Domains and Lambdas appropriately
# Eg:z_table2 >> z_author_0_0 >> z_author
# Eg:y_A >> y_researcher; y_B >> y_paper >> Actual value of number of different constants as 'papers'
j = 0
lastEnd = 0
while j < len(expression)-1:
if expression[j:j+2] == "z_":
trueExpression = trueExpression + expression[lastEnd:j]
start = j
j += 2
while (expression[j].isalpha() or expression[j].isdigit()) and j < len(expression):
j += 1
tableString = expression[start+2:j]
tableNumber = int(tableString[5:])
table = tableList[tableNumber]
actualTable = table.split('_')[0]
if actualTable == "p":
#trueExpression = trueExpression + "0"
trueExpression = trueExpression + "z_" + str(table)
else:
trueExpression = trueExpression + "z_" + str(actualTable)
lastEnd = j
continue
elif expression[j:j+2] == "y_":
trueExpression = trueExpression + expression[lastEnd:j]
start = j
variableString = expression[start+2]
k = 3
while expression[start+k].isdigit():
variableString = variableString + expression[start+k]
k += 1
if start + k == len(expression):
break
j += k
if variableMapping[variableString] == "p":
domain = 1
elif variableMapping[variableString] == "all":
domain = 1
else:
domain = len(self.constantDict[variableMapping[variableString]])
trueExpression = trueExpression + str(domain)
lastEnd = j
continue
j += 1
trueExpression = trueExpression + expression[lastEnd:]
trueSQLQuery = trueSQLQuery + trueExpression
else:
trueSQLQuery = trueSQLQuery + SQLQuery[i]
i += 1
try:
self.cursor.execute(trueSQLQuery)
output = self.cursor.fetchall()
if output[0][0] not in ["Failed to parse", None, "Query is unsafe"]:
outputString = "(1 - exp(" + output[0][0] + "))"
except psycopg2.Error as e:
getLogger('probfoil').error('Exception Occurred\t\t\t\t: %s' % str(e))
getLogger('probfoil').warning("Execute Expression >> SQL error \t: " + e.pgerror[:-1])
getLogger('probfoil').warning("Execute Expression >> Query \t: " + trueSQLQuery)
return outputString
def simpleEval(self, l):
l = l.replace("log(-0 + 1)", "0")
l = l.replace("*1 ", " ")
l = l.replace(" 1*", " ")
l = l.replace("(0 + ", "(")
l = l.replace("(0 - ", "(-")
l = l.replace(" - 0)", ")")
l = l.replace(" + 0)", ")")
l = l.replace(",0 + ", ",")
l = l.replace(",0 - ", ",-")
l = l.replace(" - 0,", ",")
l = l.replace(" + 0,", ",")
l = l.replace(")*1)", "))")
i = 0
consecutiveBracket = False
multiplicativeIndicator = False
operatorStack = []
while i < len(l):
if i + 3 < len(l) and l[i:i+3] in ['exp', 'log', 'max', 'Max']:
operatorStack.append('o')
i += 3
continue
if l[i] == "(":
operatorStack.append('(')
left = i+1
consecutiveBracket = True
if l[i-1] == "*":
multiplicativeIndicator = True
elif l[i] == ",":
if consecutiveBracket == False:
consecutiveBracket = True
left = i+1
elif consecutiveBracket == True:
#print("Old i \t\t\t= " + str(i))
right = i
expression = l[left:right]
#print("Expression \t\t= " + expression)
try:
ans = eval(expression)
l = l[:left] + str(ans) + l[right:]
i = left + len(str(ans)) + 1
left = i
continue
except:
expression = expression.replace("- ","-")
expression = expression.replace("+ ","+")
expression = expression.replace("+-","-")
exprList = expression.split(" ")
numericExprList = []
symbolicExpr = ""
for j, expr in enumerate(exprList):
try:
number = eval(expr)
numericExprList.append(number)
except:
if symbolicExpr == "":
if expr[0] == '+':
symbolicExpr = expr[1:]
else:
symbolicExpr = expr
else:
if expr[0] == '+':
symbolicExpr = symbolicExpr + " + " + expr[1:]
elif expr[0] == '-':
symbolicExpr = symbolicExpr + " - " + expr[1:]
else:
symbolicExpr = symbolicExpr + " + " + expr
if symbolicExpr != "" and numericExprList != []:
newExpression = symbolicExpr + " + " + str(sum(numericExprList))
elif symbolicExpr == "":
newExpression = str(sum(numericExprList))
elif numericExprList == []:
newExpression = symbolicExpr
#print("New Expression \t\t\t= " + newExpression)
l = l[:left] + newExpression + l[right:]
i = left + len(newExpression) + 1
left = i
continue
elif l[i] == ")":
if i + 1 < len(l) and l[i+1] == "*":
multiplicativeIndicator = True
if consecutiveBracket == True:
right = i
expression = l[left:right]
try:
ans = eval(expression)
if len(operatorStack) > 1 and operatorStack[-1] == "(" and operatorStack[-2] == "o":
l = l[:left] + str(ans) + l[right:]
i = left + len(str(ans))
operatorStack.pop()
operatorStack.pop()
elif multiplicativeIndicator == True:
l = l[:left] + str(ans) + l[right:]
i = left + len(str(ans))
multiplicativeIndicator = False
else:
l = l[:left-1] + str(ans) + l[right+1:]
i = left - 1 + len(str(ans))
operatorStack.pop()
consecutiveBracket = False
continue
except:
expression = expression.replace("- ","-")
expression = expression.replace("+ ","+")
expression = expression.replace("+-","-")
exprList = expression.split(" ")
numericExprList = []
symbolicExpr = ""
for j, expr in enumerate(exprList):
try:
number = eval(expr)
numericExprList.append(number)
except:
if symbolicExpr == "":
if expr[0] == '+':
symbolicExpr = expr[1:]
else:
symbolicExpr = expr
else:
if expr[0] == '+':
symbolicExpr = symbolicExpr + " + " + expr[1:]
elif expr[0] == '-':
symbolicExpr = symbolicExpr + " - " + expr[1:]
else:
symbolicExpr = symbolicExpr + " + " + expr
if symbolicExpr != "" and numericExprList != []:
newExpression = symbolicExpr + " + " + str(sum(numericExprList))
elif symbolicExpr == "":
newExpression = str(sum(numericExprList))
elif numericExprList == []:
newExpression = symbolicExpr
if len(operatorStack) >= 2 and operatorStack[-1] == "(" and operatorStack[-2] == "o":
l = l[:left] + newExpression + l[right:]
i = left + len(newExpression) + 1
operatorStack.pop()
operatorStack.pop()
elif multiplicativeIndicator == True:
l = l[:left] + newExpression + l[right:]
i = left + len(newExpression) + 1
multiplicativeIndicator = False
else:
l = l[:left-1] + newExpression + l[right+1:]
i = left + len(newExpression)
operatorStack.pop()
consecutiveBracket = False
continue
i += 1
return l
def getQueryExpression(self, query):
#query = "r1(A) v r2(B),r3(C) v r4(D),r5(E),r3(F) v r6(G),r1(H),r7(I)" #Test
if query in ['true', '']:
return '1'
conjunctList = query.split(' v ')
if len(conjunctList) > 1:
newConjunctList = self.partitionUCQ(query)
mainExpression = ''
for conjunct in newConjunctList:
expression = self.getConjunctExpression(conjunct)
if expression != None:
if mainExpression == '':
mainExpression = '(1 - ' + expression + ')'
else:
mainExpression = mainExpression + '*(1 - ' + expression + ')'
if mainExpression != '':
mainExpression = '(1 - ' + mainExpression + ')'
else:
mainExpression = None
else:
mainExpression = self.getConjunctExpression(query)
return mainExpression
def getConjunctExpression(self, query):
# query = subpartof_10_14([),p_11(N) #Test
canonicalQuery, tableList, variableMapping = self.getCanonicalForm(query)
canonicalExpression = ""
if canonicalQuery in self.symbolicQueryDict:
canonicalExpression = self.symbolicQueryDict[canonicalQuery]
else:
time_start = time()
canonicalExpression = getExpression(canonicalQuery, self.open_world)
self._time_getExpression = self._time_getExpression + time() - time_start
self._stats_getExpression += 1
self.symbolicQueryDict[canonicalQuery] = canonicalExpression
outputString = self.executeCanonicalExpression(canonicalExpression, tableList, variableMapping)
return outputString
def getLossForExample(self, hypothesis, i):
if hypothesis.expressionList[i] == '':
example = self.examples[i]
query = self.getQueryForExample(hypothesis, example)
outputString = self.getQueryExpression(query)
if outputString in ["Failed to parse", None, "Query is unsafe"]:
return "0"
if outputString != "1":
term = "(" + outputString + ")"
else:
term = "1"
for j, predicate in enumerate(sorted(hypothesis.predicateList, reverse=True)):
term = term.replace("z_"+predicate,"y["+str(j)+"]")
hypothesis.expressionList[i] = term
else:
term = hypothesis.expressionList[i]
loss = '0'
correct = self._scores_correct[i]
if self.global_score == "accuracy":
if i in self.CWNegatives:
loss = loss + " +" + str(self.CWNegativeWeight) +"*" + term + ""
elif i in self.OWNegatives:
loss = loss + " +" + str(self.OWNegativeWeight) +"*" + term + ""
else:
loss = loss + " +abs(" + str(correct) + " -" + term + ")"
return loss[3:]
elif self.global_score == "squared_loss":
if i in self.CWNegatives:
loss = loss + " +" + str(self.CWNegativeWeight) +"*(" + term + ")**2"
elif i in self.OWNegatives:
loss = loss + " +" + str(self.OWNegativeWeight) +"*(" + term + ")**2"
else:
loss = loss + " + (" + str(correct) + " -" + term + ")**2"
return loss[3:]
elif self.global_score == "cross_entropy":
if i in self.CWNegatives:
loss = loss + " -" + str(self.CWNegativeWeight) + "*log(max(1-(" + term + ")," + str(self.tolerance) + "))"
elif i in self.OWNegatives:
loss = loss + " -" + str(self.OWNegativeWeight) + "*log(max(1-(" + term + ")," + str(self.tolerance) + "))"
else:
loss = loss + " -" + str(correct) + "*log(max(" + term + "," + str(self.tolerance) + ")) -(1-" + str(correct) + ")*log(max(1-(" + term + ")," + str(self.tolerance) + "))"
return loss[2:]
def learn_getLossString(self, hypothesis, scoreType = None, withTolerance = False):
time_start = time()
getLogger('loss').log(8, 'Hypothesis\t\t\t\t\t\t: %s \n' % str(hypothesis))
loss = "0"
predicateList = sorted(hypothesis.predicateList, reverse=True)
if scoreType == "accuracy" or (scoreType == None and self.global_score == "accuracy"):
if hypothesis.lossStringAcc != "":
getLogger('probfoil').log(9, 'Returning old loss string for accuracy')
return hypothesis.lossStringAcc
#getLogger('probfoil').log(9, 'Query List\t\t\t\t\t\t: %s' % str(hypothesis.querySS[:10]))
for i, correct in enumerate(self._scores_correct):
query = hypothesis.querySS[i]
outputString = self.getQueryExpression(query)
if outputString not in ["Failed to parse", None, "Query is unsafe"]:
if outputString != "1":
term = "(" + outputString + ")"
else:
term = "1"
for j, predicate in enumerate(predicateList):
term = term.replace("z_"+predicate,"y["+str(j)+"]")
hypothesis.expressionList[i] = term
if i in self.CWNegatives:
loss = loss + " +" + str(self.CWNegativeWeight) +"*" + term + ""
elif i in self.OWNegatives:
loss = loss + " +" + str(self.OWNegativeWeight) +"*" + term + ""
else:
loss = loss + " +abs(" + str(correct) + " -" + term + ")"
else:
continue
hypothesis.lossStringAcc = loss[3:]
getLogger('loss').log(8, 'Loss String\t\t\t\t\t\t: %s \n' % hypothesis.lossStringAcc)
return hypothesis.lossStringAcc
if scoreType == "squared_loss" or (scoreType == None and self.global_score == "squared_loss"):
if hypothesis.lossStringSL != "":
getLogger('probfoil').log(9, 'Returning old loss string for squared_loss')
return hypothesis.lossStringSL
#getLogger('probfoil').log(9, 'Query List\t\t\t\t\t\t: %s' % str(hypothesis.querySS[:10]))
for i, correct in enumerate(self._scores_correct):
query = hypothesis.querySS[i]
outputString = self.getQueryExpression(query)
if outputString not in ["Failed to parse", None, "Query is unsafe"]:
if outputString != "1":
term = "(" + outputString + ")"
else:
term = "1"
for j, predicate in enumerate(predicateList):
term = term.replace("z_"+predicate,"y["+str(j)+"]")
hypothesis.expressionList[i] = term
if i in self.CWNegatives:
loss = loss + " +" + str(self.CWNegativeWeight) +"*(" + term + ")**2"
elif i in self.OWNegatives:
loss = loss + " +" + str(self.OWNegativeWeight) +"*(" + term + ")**2"
else:
loss = loss + " + (" + str(correct) + " -" + term + ")**2"
else:
continue
hypothesis.lossStringSL = loss[3:]
getLogger('loss').log(8, 'Loss String\t\t\t\t\t\t: %s \n' % hypothesis.lossStringSL)
return hypothesis.lossStringSL
elif scoreType == "cross_entropy" or (scoreType == None and self.global_score == "cross_entropy"):
if hypothesis.lossStringCE != "":
getLogger('probfoil').log(9, 'Returning old loss string for cross entropy')
return hypothesis.lossStringCE
#getLogger('probfoil').log(9, 'Query List\t\t\t\t\t\t: %s' % str(hypothesis.querySS[:10]))
for i, correct in enumerate(self._scores_correct):
query = hypothesis.querySS[i]
outputString = self.getQueryExpression(query)
if outputString not in ["Failed to parse", None, "Query is unsafe"]:
if outputString != "1":
term = outputString
for j, predicate in enumerate(predicateList):
term = term.replace("z_"+predicate,"y["+str(j)+"]")
if i in self.CWNegatives:
loss = loss + " -" + str(self.CWNegativeWeight) + "*log(max(1-(" + term + ")," + str(self.tolerance) + "))"
elif i in self.OWNegatives:
loss = loss + " -" + str(self.OWNegativeWeight) + "*log(max(1-(" + term + ")," + str(self.tolerance) + "))"
else:
loss = loss + " -" + str(correct) + "*log(max(" + term + "," + str(self.tolerance) + ")) -(1-" + str(correct) + ")*log(max(1-(" + term + ")," + str(self.tolerance) + "))"
hypothesis.expressionList[i] = term
else:
continue
else:
continue
hypothesis.lossStringCE = loss[2:]
getLogger('loss').log(8, 'Loss String\t\t\t\t\t\t: %s \n' % hypothesis.lossStringCE)
return hypothesis.lossStringCE
def getGlobalScoreCE(self, hypothesis, expression, lam):
if 'y' not in expression:
# The expression does not contain any variables
return -1*eval(expression)
y =[]
for predicate in hypothesis.predicateList:
y.append(lam[predicate])
try:
entropy = -1*eval(expression)
except Exception as e:
getLogger('probfoil').error('Exception Occurred\t\t\t\t: %s' % str(e)[:-1])
entropy = None
getLogger('probfoil').log(8, 'Exception in Global Score for entropy\t: %s' % expression)
getLogger('probfoil').warning('y = %s' % str(y))
return entropy
def getGlobalScore(self, hypothesis, lam, scoreType = None):
lossString = self.learn_getLossString(hypothesis, scoreType)
gScore = 0
if scoreType == "accuracy" or (scoreType == None and self.global_score == "accuracy"):
lambdaString = lossString
def evalFunc(y):
if not isinstance(y, list):
y = [y]
if len(y) != len(hypothesis.predicateList):
getLogger('probfoil').warning("Length of 'y' = %d isn't same as length of Clause List = %d" % (len(y), len(hypothesis.predicateList)))
return 0
ans = 0
predicateList = sorted(hypothesis.predicateList, reverse=True)
try:
expression = lambdaString
ans = eval(expression)
except:
expression1 = lambdaString
for i, predicate in enumerate(predicateList):
if y[i] < 1:
logString = str(float(log(1-y[i])))
else:
logString = "-float('inf')"
expression1 = expression1.replace("log(-z_"+ predicate + " + 1)",logString)
#logValue = float(log(1-y))
#expression1 = lambdaString.replace("log(-z + 1)", str(logValue))
logList, logLocation = getLogList(expression1)
logOutput = []
for item in logList:
try:
for i, predicate in enumerate(predicateList):
item = item.replace("z_" + predicate,str(y[i]))
output = eval(item)
except:
getLogger('probfoil').warning("Exception occurred in logOutput")
getLogger('probfoil').warning("item\t\t\t\t:" + item)
getLogger('probfoil').warning("Lambda Values\t\t\t\t:" + str(y))
output = 0.0
logOutput.append(output)
#At each logLocation, replace the log with either the output or with -Inf
start = 0
expression2 = ""
for i, (j, k) in enumerate(logLocation):
expression2 = expression2 + expression1[start:j]
if logOutput[i] > 0:
expression2 = expression2 + "log(" + str(logOutput[i]) + ")"
else:
expression2 = expression2 + "-float('inf')"
start = k
expression2 = expression2 + expression1[start:]
try:
ans = eval(expression2)
except Exception as e:
getLogger('probfoil').error('Exception Occurred\t\t\t\t: %s' % str(e)[:-1])
getLogger('probfoil').warning('Exception\t\t\t\t\t: %s' % expression2)
return ans
lamList = []
for predicate in hypothesis.predicateList:
lamList.append(lam[predicate])
try:
loss = evalFunc(lamList)
except Exception as e:
getLogger('probfoil').error('Exception Occurred\t\t\t\t: %s' % str(e)[:-1])
loss = None
getLogger('probfoil').log(8, 'Exception in Global Score for accuracy\t: %s' % lossString)
getLogger('probfoil').warning('y = %s' % str(y))
gScore = 1 - (loss/(self.totalWeightedExamples))
getLogger('probfoil').debug('GScore - Accuracy\t\t\t\t: %s' % str(gScore))
if scoreType == "squared_loss" or (scoreType == None and self.global_score == "squared_loss"):
lambdaString = lossString
def evalFunc1(y):
if not isinstance(y, list):
y = [y]
if len(y) != len(hypothesis.predicateList):
getLogger('probfoil').warning("Length of 'y' = %d isn't same as length of Clause List = %d" % (len(y), len(hypothesis.predicateList)))
return 0
ans = 0
predicateList = sorted(hypothesis.predicateList, reverse=True)
try:
expression = lambdaString
ans = eval(expression)
except:
expression1 = lambdaString
for i, predicate in enumerate(predicateList):
if y[i] < 1:
logString = str(float(log(1-y[i])))
else:
logString = "-float('inf')"
expression1 = expression1.replace("log(-z_"+ predicate + " + 1)",logString)
#logValue = float(log(1-y))
#expression1 = lambdaString.replace("log(-z + 1)", str(logValue))
logList, logLocation = getLogList(expression1)
logOutput = []
for item in logList:
try:
for i, predicate in enumerate(predicateList):
item = item.replace("z_" + predicate,str(y[i]))
output = eval(item)
except:
getLogger('probfoil').warning("Exception occurred in logOutput")
getLogger('probfoil').warning("item\t\t\t\t:" + item)
getLogger('probfoil').warning("Lambda Values\t\t\t\t:" + str(y))
output = 0.0
logOutput.append(output)
#At each logLocation, replace the log with either the output or with -Inf
start = 0
expression2 = ""
for i, (j, k) in enumerate(logLocation):
expression2 = expression2 + expression1[start:j]
if logOutput[i] > 0:
expression2 = expression2 + "log(" + str(logOutput[i]) + ")"
else:
expression2 = expression2 + "-float('inf')"
start = k
expression2 = expression2 + expression1[start:]
try:
ans = eval(expression2)
except Exception as e:
getLogger('probfoil').error('Exception Occurred\t\t\t\t: %s' % str(e)[:-1])
getLogger('probfoil').warning('Exception\t\t\t\t\t: %s' % expression2)
return ans
lamList = []
for predicate in hypothesis.predicateList:
lamList.append(lam[predicate])
try:
loss = evalFunc1(lamList)
except Exception as e:
getLogger('probfoil').error('Exception Occurred\t\t\t\t: %s' % str(e)[:-1])
loss = None
getLogger('probfoil').log(8, 'Exception in Global Score for squared_loss\t: %s' % lossString)
getLogger('probfoil').warning('y = %s' % str(y))
gScore = -1*loss
getLogger('probfoil').debug('GScore - Squared Loss\t\t\t: %s' % str(gScore))
elif scoreType == "cross_entropy" or (scoreType == None and self.global_score == "cross_entropy"):
gScore = self.getGlobalScoreCE(hypothesis, lossString, lam)
getLogger('probfoil').debug('GScore - Cross Entropy\t\t\t: %s' % str(gScore))
return gScore
def getGlobalScore_again(self, hypothesis, lam, scoreType = None):
time_start = time()
getLogger('loss').log(8, 'Hypothesis\t\t\t\t\t\t: %s \n' % str(hypothesis))
loss = 0
gScore = 0
y =[]
for predicate in hypothesis.predicateList:
y.append(lam[predicate])
if scoreType == "accuracy" or (scoreType == None and self.global_score == "accuracy"):
for i, (correct, term) in enumerate(zip(hypothesis.scores, self._scores_correct)):
term = hypothesis.scores[i]
if i in self.CWNegatives:
loss = loss + self.CWNegativeWeight*term
elif i in self.OWNegatives:
loss = loss + self.OWNegativeWeight*term
else:
loss = loss + abs(correct-term)
gScore = 1 - (loss/(self.totalWeightedExamples))
getLogger('probfoil').debug('Loss - Absolute Error\t\t\t: %s' % str(loss))
getLogger('probfoil').debug('GScore - totalWeightedExamples\t: %s' % str(self.totalWeightedExamples))
getLogger('probfoil').debug('GScore - Weighted Accuracy\t\t: %s' % str(gScore))
elif scoreType == "squared_loss" or (scoreType == None and self.global_score == "squared_loss"):
for i, (correct, term) in enumerate(zip(hypothesis.scores, self._scores_correct)):
term = hypothesis.scores[i]
if i in self.CWNegatives:
loss = loss + self.CWNegativeWeight*(term**2)
elif i in self.OWNegatives:
loss = loss + self.OWNegativeWeight*(term**2)
else:
loss = loss + abs(correct-term)**2
gScore = -1*loss
getLogger('probfoil').debug('Loss - squared_loss\t\t\t: %s' % str(loss))
getLogger('probfoil').debug('GScore - squared_loss\t\t\t: %s' % str(gScore))
elif scoreType == "cross_entropy" or (scoreType == None and self.global_score == "cross_entropy"):
for i, (correct, term) in enumerate(zip(hypothesis.scores, self._scores_correct)):
term = hypothesis.scores[i]
if i in self.CWNegatives:
loss = loss - self.CWNegativeWeight*log(max(1-(term),self.tolerance))
elif i in self.OWNegatives:
loss = loss - self.OWNegativeWeight*log(max(1-(term),self.tolerance))
else:
loss = loss - correct*log(max(term,self.tolerance)) - (1-correct)*log(max(1-term,self.tolerance))
gScore = -1*loss
getLogger('probfoil').debug('Loss - cross_entropy\t\t\t: %s' % str(loss))
getLogger('probfoil').debug('GScore - cross_entropy\t\t\t: %s' % str(gScore))
getLogger('loss').log(8, 'Loss Value\t\t\t\t\t\t: %s \n' % str(loss))
return gScore
def learn_getGradient(self, lossString):
exec("lossFunc = lambda y : " + lossString)
gradient, hessian = gh(lossFunc)
return gradient
def learn_initializeLambdas(self, hypothesis, learnAllRules = False):
oldLamList = []
getLogger('probfoil').log(9, str(hypothesis.to_clauses()))
if learnAllRules:
y = []
for j, predicate in enumerate(hypothesis.predicateList):
if len(predicate) > 2 and predicate[:3] == "p_0":
y.append(self.regularize(self.lams[predicate], 5))
elif len(predicate) > 2 and predicate[:2] == "p_":
i = 2
while i < len(predicate) and predicate[i].isdigit():
i += 1
index = int(predicate[2:i])
confidence = float(self.stdConfidenceList[index-1])
prob = self.rule_getConditionalProbability(index-1)
if confidence != prob:
getLogger('probfoil').log(9, 'Amie Confidence Value for %s is %s' % (str(hypothesis.to_clauses()[index+1]), str(confidence)))
getLogger('probfoil').log(9, 'Conditional Probability for %s is %s' % (str(hypothesis.to_clauses()[index+1]), str(prob)))
else:
getLogger('probfoil').log(9, 'Conditional Probability for %s is %s' % (str(hypothesis.to_clauses()[index+1]), str(prob)))
y.append(self.regularize(prob, 5))
#y.append(prob)
else:
if self.cwLearning:
y.append(0.0)
continue
k = 1
for base in self.predicateDict[predicate]:
k = k*len(self.constantDict[base])
y.append(self.regularize(float(self.closedWorldTotal[predicate])/k, 5))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Lambdas initialized to", str(y)))
return y
if len(hypothesis.to_clauses()) == 3:
# Hypothesis has 'Fail','True' and 1st Rule. Running SGD for the first time.
#indices = {0:hypothesis.predicateList.index('p_0'), 1:hypothesis.predicateList.index('p_1')}
y = []
for j, predicate in enumerate(hypothesis.predicateList):
if len(predicate) > 2 and predicate[:3] == "p_0":
k = 1
for base in self.predicateDict[self.targetPredicate]:
k = k*len(self.constantDict[base])
y.append(self.regularize(float(self.closedWorldTotal[self.targetPredicate])/k, 5))
elif len(predicate) > 2 and predicate[:3] == "p_1":
prob = float(self.stdConfidenceList[self.selectedAmieRules[-1]])
getLogger('probfoil').log(9, 'Conditional Probability for %s is %s' % (str(hypothesis.to_clauses()[2]), str(prob)))
y.append(self.regularize(prob, 5))
else:
if self.cwLearning:
y.append(0.0)
continue
k = 1
for base in self.predicateDict[predicate]:
k = k*len(self.constantDict[base])
y.append(self.regularize(float(self.closedWorldTotal[predicate])/k, 5))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Lambdas initialized to", str(y)))
return y
else:
y = []
for j, predicate in enumerate(hypothesis.predicateList):
if len(predicate) > 2 and predicate[:2] == "p_":
index = int(predicate[2:])
if index == len(hypothesis.to_clauses()) - 2:
prob = float(self.stdConfidenceList[self.selectedAmieRules[-1]])
getLogger('probfoil').log(9, 'Conditional Probability for %s is %s' % (str(hypothesis.to_clauses()[index+1]), str(prob)))
y.append(self.regularize(prob, 5))
else:
y.append(self.regularize(self.lams[predicate], 5))
else:
if self.cwLearning:
y.append(0.0)
continue
if self.lams[predicate] == 0:
k = 1
for base in self.predicateDict[predicate]:
k = k*len(self.constantDict[base])
y.append(self.regularize(float(self.closedWorldTotal[predicate])/k, 5))
else:
y.append(self.regularize(self.lams[predicate], 5))
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Lambdas initialized to", str(y)))
return y
def learn_stochasticGradientDescent(self, hypothesis):
time_start = time()
oldLamList = self.learn_initializeLambdas(hypothesis, self.learnAllRules)
newLamList = oldLamList
iterations = self.iterations
#globalLoss = self.learn_getLossString(hypothesis)
#lossList = []
#lamList = []
# Full Batch
fixedPointReached = False
sameCount = 0
superOldLamList = copy(oldLamList)
errorCount = 0
updateCount = 0
for k in range(0, iterations):
i = random.randint(0, self.totalExamples - 1)
#term = hypothesis.expressionList[i]
term = self.getLossForExample(hypothesis, i)
if self.global_score == "cross_entropy":
if term not in ["Failed to parse", None, "Query is unsafe"]:
if i in self.CWNegatives:
loss = " -" + str(self.CWNegativeWeight) + "*log(max(1-(" + term + ")," + str(self.tolerance) + "))"
elif i in self.OWNegatives:
loss = " -" + str(self.OWNegativeWeight) + "*log(max(1-(" + term + ")," + str(self.tolerance) + "))"
else:
correct = self._scores_correct[i]
loss = " -" + str(correct) + "*log(max(" + term + "," + str(self.tolerance) + ")) -(1-" + str(correct) + ")*log(max(1-(" + term + ")," + str(self.tolerance) + "))"
else:
continue
elif self.global_score == "accuracy":
if term not in ["Failed to parse", None, "Query is unsafe"]:
if i in self.CWNegatives:
loss = str(self.CWNegativeWeight) +"*" + term + ""
elif i in self.OWNegatives:
loss = str(self.OWNegativeWeight) +"*" + term + ""
else:
loss = "abs(" + str(self._scores_correct[i]) + " -" + term + ")"
else:
continue
elif self.global_score == "squared_loss":
if term not in ["Failed to parse", None, "Query is unsafe"]:
if i in self.CWNegatives:
loss = str(self.CWNegativeWeight) +"*(" + term + ")**2"
elif i in self.OWNegatives:
loss = str(self.OWNegativeWeight) +"*(" + term + ")**2"
else:
loss = "(" + str(self._scores_correct[i]) + " -" + term + ")**2"
else:
continue
expression = loss
#getLogger('probfoil').debug('%d.\tLoss = %s' % (i, str(loss)))
exec("evalFunc = lambda y : " + expression)
gradient, hessian = gh(evalFunc)
try:
#Update Lambdas for Rule Weights
grad = gradient(oldLamList)
#grad = evalFunc(oldLamList).gradient(oldLamList)
oldgrad = grad
grad = [self.learningRate[0]*component for component in grad]
maxRatio = 1
for j, predicate in enumerate(hypothesis.predicateList):
if len(predicate) > 2 and predicate[:2] == "p_":
if maxRatio < abs(grad[j]/self.maxIncrement[0]):
maxRatio = abs(grad[j]/self.maxIncrement[0])
for j, predicate in enumerate(hypothesis.predicateList):
if len(predicate) > 2 and predicate[:2] == "p_":
newLamList[j] = oldLamList[j] - grad[j]/maxRatio
#newLamList[j] = oldLamList[j] - grad[j]
if newLamList[j] < 5*self.tolerance:
newLamList[j] = 5*self.tolerance
elif newLamList[j] > 1 - 5*self.tolerance:
newLamList[j] = 1-5*self.tolerance
elif self.cwLearning == False:
if newLamList[j] < 5*self.tolerance:
newLamList[j] = 5*self.tolerance
elif newLamList[j] > 1 - 5*self.tolerance:
newLamList[j] = 1-5*self.tolerance
oldLamList = copy(newLamList)
#getLogger('probfoil').debug('%d.\tOld Gradient = %s.\tNew Gradient = %s' % (i, str(oldgrad), str(grad)))
#getLogger('probfoil').debug('%d.\tOld = %s.\tRatio = %s.\tNew = %s' % (i, str(grad), maxRatio, str([item/maxRatio for item in grad])))
if self.cwLearning == False:
#Update Lambdas for Non-target Predicates
grad = gradient(oldLamList)
#grad = evalFunc(oldLamList).gradient(oldLamList)
oldgrad = grad
grad = [self.learningRate[1]*component for component in grad]
maxRatio = 1
for j, predicate in enumerate(hypothesis.predicateList):
if len(predicate) <= 2 or predicate[:2] != "p_":
if maxRatio < abs(grad[j]/self.maxIncrement[1]):
maxRatio = abs(grad[j]/self.maxIncrement[1])
for j, predicate in enumerate(hypothesis.predicateList):
if len(predicate) <= 2 or predicate[:2] != "p_":
newLamList[j] = oldLamList[j] - grad[j]/maxRatio
#newLamList[j] = oldLamList[j] - grad[j]
if newLamList[j] < 5*self.tolerance:
newLamList[j] = 5*self.tolerance
elif newLamList[j] > 1 - 5*self.tolerance:
newLamList[j] = 1-5*self.tolerance
if self.terminateAtFixedPoint and newLamList == superOldLamList:
if sameCount == 100:
fixedPointReached = True
else:
sameCount += 1
else:
sameCount = 0
oldLamList = copy(newLamList)
superOldLamList = copy(newLamList)
#getLogger('probfoil').debug('%d.\tOld Gradient = %s.\tNew Gradient = %s' % (i, str(oldgrad), str(grad)))
#getLogger('probfoil').debug('%d.\tOld = %s.\tRatio = %s.\tNew = %s' % (i, str(grad), maxRatio, str([item/maxRatio for item in grad])))
#getLogger('probfoil').debug('%d.\tLambdas = %s' % (i, str(newLamList)))
if k % self.stepCheck == 0:
getLogger('probfoil').debug(str(time()) + ' : ' + str(k) + ' iterations completed out of ' + str(iterations))
'''
if k % self.stepCheck == 0 or k == iterations - 1 or fixedPointReached:
y = newLamList
loss = eval(globalLoss)
getLogger('probfoil').debug('%d Loss: %s ==> %s' % (k, str(newLamList), str(loss)))
lamList.append(copy(newLamList))
lossList.append(loss)
if k == iterations - 1:
# Hard checking for loss in closed world scenario
y = []
for predicate in hypothesis.predicateList:
if predicate[:2] == "p_":
ruleNumber = int(predicate[2])
prob = hypothesis.probabilityList[ruleNumber]
if prob > 1 - self.tolerance:
prob = 1 - self.tolerance
y.append(prob)
else:
y.append(0)
loss = eval(globalLoss)
getLogger('probfoil').debug('%s Loss: %s ==> %s' % ("Closed World", str(y), str(loss)))
#lamList.append(copy(y))
#lossList.append(loss)
'''
if self.terminateAtFixedPoint and fixedPointReached:
getLogger('probfoil').debug('Fixed point reach at iteration: ' + str(k))
break
updateCount += 1
except Exception as e:
#getLogger('probfoil').error('Exception Occurred\t\t\t\t: %s' % str(e))
#getLogger('probfoil').warning('Exception in gradient\t\t\t: %s, %s' % (str(oldLamList), expression))
#getLogger('probfoil').warning('Example[%s] : %s' % (str(i), str(self.examples[i])))
errorCount += 1
oldLamList = newLamList
selectedLamList = newLamList
#selectedLamList, minLoss = min(zip(lamList, lossList), key=lambda v: v[1])
#minIndex, minLoss = min(enumerate(lossList), key=lambda v: v[1])
#selectedLamList = lamList[minIndex]
#getLogger('probfoil').debug('Loss List\t\t\t\t\t\t: ' + str(lossList))
#getLogger('probfoil').debug('Selected Iteration of SGD \t\t: ' + str(minIndex*self.stepCheck))
newLam = copy(self.lams)
for predicate, lam in zip(hypothesis.predicateList, selectedLamList):
newLam[predicate] = lam
getLogger('probfoil').debug('Updated Lambda\t\t\t\t\t: ' + str(newLam))
time_total = time() - time_start
self._time_optimization += time_total
getLogger('probfoil').debug('Time - SGD\t\t\t\t\t\t: %.1fs' % time_total)
return newLam#, minLoss
def learn_updateScores(self, hypothesis, newLam):
if self.learnAllRules:
rule = hypothesis
ruleCount = len(hypothesis.to_clauses()) - 2
while rule.previous != None:
rule.max_x = newLam["p_"+str(ruleCount)]
if rule.max_x > 1 - self.tolerance:
rule.set_rule_probability(None)
else:
rule.set_rule_probability(rule.max_x)
ruleCount -= 1
rule = rule.previous
else:
def getUpdatedScores(rule, ruleCount):
if rule.previous is None:
ruleCount = -1
return ruleCount, rule.scores
else:
ruleCount, updatedScores = getUpdatedScores(rule.previous, ruleCount)
ruleCount += 1
self.canonicalRuleList = []
if self.learnAllRules == False:
rule.oldProb = rule.max_x
rule.oldScores = rule.scores
rule.max_x = newLam["p_"+str(ruleCount)]
self._select_rule(rule)
if (self.global_score == "accuracy" and rule.lossStringAcc == "") or (self.global_score == "cross_entropy" and rule.lossStringCE == ""):
rule.scores = self._compute_scores_predict_again(rule)
else:
y =[]
for predicate in hypothesis.predicateList:
y.append(newLam[predicate])
for i, expression in enumerate(rule.expressionList):
try:
if expression != '':
rule.scores[i] = eval(expression)
except Exception as e:
getLogger('probfoil').error('Exception Occurred\t\t\t\t: %s' % str(e))
getLogger('probfoil').warning('Exception occurred in self.learn_updateScores with %dth expression: (y = %s) %s' %(i, str(y), expression))
return ruleCount, rule.scores
ruleCount, hypothesis.scores = getUpdatedScores(hypothesis, -1)
return hypothesis
def learn_pruneHypothesis(self, hypothesis):
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Semi - Final Hypothesis", str(hypothesis.to_clauses())))
# Edit the weights of the rules to 1 from 1-self.tolerance and remove those rules whose weights are <= self.tolerance
rule = hypothesis
previousRule = rule
pruneIndicator = False
while rule.previous != None:
prob = rule.get_rule_probability()
if prob == None:
prob = 1
if prob >= 1 - 6*self.tolerance:
rule.set_rule_probability(None)
elif prob <= 6*self.tolerance:
#No need to update weighted accuracy when the rule is dropped. The dropped rule was inconsequential.
previousRule.previous = rule.previous
rule = rule.previous
pruneIndicator = True
continue
previousRule = rule
rule = rule.previous
# Drop first rule if it's probability is insignificant
prob = hypothesis.get_rule_probability()
if prob == None:
prob = 1
if hypothesis.previous.previous != None and prob <= 6*self.tolerance:
hypothesis = hypothesis.previous
pruneIndicator = True
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Final Hypothesis", str(hypothesis.to_clauses())))
return hypothesis, pruneIndicator
def learn_closePSQLDB(self, drop = False):
# ----------------------- Close the PSQL connection and drop the database ------------------------
self.cursor.close()
self.conn.close()
#conn = psycopg2.connect(dbname = 'postgres', user = self.user)
conn = self.connect_PSQLDB(None)
conn.autocommit = True
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = conn.cursor()
if drop:
cursor.execute("DROP DATABASE " + self.name + ";")
getLogger('probfoil').info('DROP DATABASE\t\t\t\t\t: %s' % self.name)
cursor.close()
conn.close()
def learn(self):
self.learn_readFile()
startLearn = time()
# ------------------------------ Get Negative Examples using AMIE+ -------------------------------
self.TSVFile = self.InputFile[:self.InputFile.rfind(".")].replace(".","_") + "_amie.tsv"
self.breakNow = False
self.convertProblogToAmie()
self.AmieRuleList, self.coverageList, self.stdConfidenceList, self.pcaConfidenceList = self.getAmieRules()
s = '================ Candidate Rules obtained from AMIE+ ================\n'
for candidate, coverage, stdConfidence, pcaConfidence in zip(self.AmieRuleList, self.coverageList, self.stdConfidenceList, self.pcaConfidenceList):
s += str(candidate) + '\t' + str(coverage) + '\t' + str(stdConfidence) + '\t' + str(pcaConfidence) + '\n'
s += '===================================================================='
getLogger('probfoil').debug(s)
self.selectedAmieRules = []
self.trueAdded = False
self.failAdded = True
failedAttempts = 0
self.scoreList = self.stdConfidenceList
amieHypothesis = self.getAmieHypothesis()
if self.breakNow:
return amieHypothesis
self.train_examples = copy(self._examples)
self.rule_getNegativeExamples(amieHypothesis)
# ---------------------------------------- Start Learning ----------------------------------------
if self.learnAllRules:
k = 1
for base in self.predicateDict[self.targetPredicate]:
k = k*len(self.constantDict[base])
self.lams['p_0'] = self.regularize(float(self.closedWorldTotal[self.targetPredicate])/k, 1)
for i, confidence in enumerate(self.stdConfidenceList):
self.lams['p_'+str(i+1)] = self.regularize(float(confidence), 1)
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Self.lams", str(self.lams)))
next_hypothesis = amieHypothesis
next_hypothesis.accuracy = 0
next_hypothesis.scores = [1.0] * self.totalExamples
#next_hypothesis.correct = self._scores_correct
next_hypothesis.expressionList = [""]*self.totalExamples
next_hypothesis.replaceableQuery = ''
next_hypothesis.lossStringAcc = ''
next_hypothesis.lossStringSL = ''
next_hypothesis.lossStringCE = ''
next_hypothesis.score = 0.0
#getLogger('probfoil').info('%-*s: %s' % (self.pad, "Hypothesis", str(next_hypothesis.to_clauses())))
self.trueAdded = True
hypothesis = next_hypothesis
else:
hypothesis = self.initial_hypothesis()
counter = self.loadRule
name = self.InputFile[self.InputFile.rfind("/")+1:self.InputFile.rfind(".")].replace(".","_").lower()
current_score = None
while True:
if self.learnAllRules == False:
if self.loadRule != None:
try:
filename = 'Logs/' + name + "_" + str(counter)+'.pckl'
f = open(filename, 'rb')
obj = pickle.load(f)
self._examples = obj[0]
self._scores_correct = obj[1]
self.constantDict = obj[2]
self.closedWorldTotal = obj[3]
self.targetPredicate = obj[4]
self.targetArity = obj[5]
#self.hypothesisAscii = obj[6]
self.hypothesisFreeVars = obj[6]
self._target = obj[7]
self.predicateDict = obj[8]
self.lams = obj[9]
f.close()
except:
filename = 'Logs/' + name + "_" + str(counter)+'.pckl'
f = open(filename, 'wb')
obj = []
obj.append(self._examples)
obj.append(self._scores_correct)
obj.append(self.constantDict)
obj.append(self.closedWorldTotal)
obj.append(self.targetPredicate)
obj.append(self.targetArity)
#obj.append(self.hypothesisAscii)
obj.append(self.hypothesisFreeVars)
obj.append(self._target)
obj.append(self.predicateDict)
obj.append(self.lams)
obj.append(hypothesis)
pickle.dump(obj, f)
f.close()
counter += 1
next_hypothesis = self.best_rule(hypothesis)
if self.candidate_rules == "amie" and self.breakNow:
break
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Hypothesis", str(next_hypothesis.to_clauses())))
self.learn_parseRules(next_hypothesis)
self.learn_getQueryString(next_hypothesis)
if self.open_world:
#self.learn_getQueryList(next_hypothesis)
#start = time()
#next_hypothesis.gscore = self.getGlobalScore(next_hypothesis, self.lams)
#end = time() - start
#getLogger('probfoil').info('GScore before optimization\t\t: %s' % str(next_hypothesis.gscore))
#getLogger('probfoil').info('Got GScore in\t\t\t\t\t: %ss' % str(end))
newLam = self.lams
if len(next_hypothesis.predicateList) > 0:
if self.optimization_method == "incremental":
#newLam, loss = self.learn_stochasticGradientDescent(next_hypothesis)
newLam = self.learn_stochasticGradientDescent(next_hypothesis)
#next_hypothesis.gscore = -1*loss
elif self.optimization_method == "batch":
newLam = self.learn_optimizeLambda(next_hypothesis)
self.learn_updateScores(next_hypothesis, newLam)
# Update the rule scores one by one based on the updated probabilities of rules
# Why? To select the next rule properly from the candidate rules
# Should I update the rule.scores with new Lambdas too? Yes
else:
newLam = copy(self.lams)
#next_hypothesis.gscore = self.getGlobalScore(next_hypothesis, newLam)
# next_hypothesis.accuracy = accuracy(next_hypothesis)
#getLogger('probfoil').info('GScore after optimization\t\t: %s' % str(next_hypothesis.gscore))
# --------------------------------------- Continue Learning --------------------------------------
time_start = time()
if self.learnAllRules == False:
getLogger('probfoil').info('Rule Learned\t\t\t\t\t\t: %s' % next_hypothesis)
#s = significance(next_hypothesis)
#if self._min_significance is not None and s < self._min_significance:
# getLogger('probfoil').warning('Significance of %s < Minimum Significance Threshold of %s' % (s, self._min_significance))
# break
#getLogger('probfoil').debug('Current Score\t\t\t\t\t: ' + str(current_score))
#getLogger('probfoil').debug('New Score\t\t\t\t\t\t: ' + str(next_hypothesis.gscore))
hypothesis = next_hypothesis
if self.open_world:
self.lams = newLam
time_total = time() - time_start
getLogger('probfoil').debug('Time - deciding on hypothesis\t: %.1fs\n' % time_total)
if self.interrupted or self.learnAllRules:
break
if hypothesis.get_literal() and hypothesis.get_literal().functor == '_recursive':
break # can't extend after recursive
'''
if hasattr(hypothesis, 'gscore'):
gscore = hypothesis.gscore
else:
gscore = None
'''
hypothesis, pruneIndicator = self.learn_pruneHypothesis(hypothesis)
'''
if pruneIndicator:
#if gscore != None:
# hypothesis.gscore = gscore
self.learn_parseRules(hypothesis)
self.learn_getQueryString(hypothesis)
hypothesis.replaceableQuery = ''
self.learn_getQueryList(hypothesis)
if hypothesis.previous is not None:
hypothesis.previous.scores = [0.0]*self.totalExamples
if hypothesis.parent is not None:
hypothesis.parent.scores = [1.0]*self.totalExamples
hypothesis.scores = self._compute_scores_predict_again(hypothesis)
if hasattr(hypothesis, 'gscore'):
if self.global_score == "accuracy" and hypothesis.gscore != None:
hypothesis.weightedAccuracy = hypothesis.gscore
elif self.global_score == "cross_entropy" and hypothesis.gscore != None:
hypothesis.crossEntropy = hypothesis.gscore
elif self.global_score == "squared_loss" and hypothesis.gscore != None:
hypothesis.squaredLoss = hypothesis.gscore
if not(hasattr(hypothesis, 'weightedAccuracy') and hypothesis.weightedAccuracy != ""):
hypothesis.weightedAccuracy = self.getGlobalScore_again(hypothesis, self.lams, scoreType = "accuracy")
if not(hasattr(hypothesis, 'crossEntropy') and hypothesis.crossEntropy != ""):
hypothesis.crossEntropy = self.getGlobalScore_again(hypothesis, self.lams, scoreType = "cross_entropy")
if not(hasattr(hypothesis, 'squaredLoss') and hypothesis.squaredLoss != ""):
hypothesis.squaredLoss = self.getGlobalScore_again(hypothesis, self.lams, scoreType = "squared_loss")
hypothesis.correct = self._scores_correct
hypothesis.tp, hypothesis.fp, hypothesis.tn, hypothesis.fn = rates(hypothesis)
hypothesis.precision = precision(hypothesis)
hypothesis.recall = recall(hypothesis)
'''
self.learn_closePSQLDB(drop = True)
self._time_learn = time() - startLearn
return hypothesis
def rule_intersect2Tables(self, mainTable, mainVarList, newTable, newVarList):
unifiedTableName = "dummy" + str(self.dummyCount)
self.dummyCount += 1
unifiedVarList = mainVarList
if mainTable != newTable:
firstTableIdentifier = mainTable
secondTableIdentifier = newTable
else:
firstTableIdentifier = 'table0'
secondTableIdentifier = 'table1'
whereList = []
selectList = []
for i, var in enumerate(mainVarList):
selectList.append(firstTableIdentifier + '.v' + str(i))
for i, var in enumerate(newVarList):
if var not in mainVarList:
unifiedVarList.append(newVarList[i])
selectList.append(secondTableIdentifier + '.v' + str(i))
else:
whereList.append(firstTableIdentifier + '.v' + str(mainVarList.index(var)) + ' = ' + secondTableIdentifier + '.v' + str(i))
selectList = [ item + ' as v' + str(i) for i, item in enumerate(selectList)]
selectString = ', '.join(selectList)
whereString = ' and '.join(whereList)
if whereString == '':
#Take Cross join of both tables
self.cursor.execute('DROP TABLE IF EXISTS ' + unifiedTableName + ';')
sqlQuery = 'CREATE TABLE ' + unifiedTableName + ' AS (select distinct ' + selectString + ' from ' + mainTable + ' as ' + firstTableIdentifier + ' cross join ' + newTable + ' as ' + secondTableIdentifier + ');'
getLogger('probfoil').log(9, sqlQuery)
self.cursor.execute(sqlQuery)
else:
#Take Inner join with respect to whereString
self.cursor.execute('DROP TABLE IF EXISTS ' + unifiedTableName + ';')
sqlQuery = 'CREATE TABLE ' + unifiedTableName + ' AS (select distinct ' + selectString + ' from ' + mainTable + ' as ' + firstTableIdentifier + ' inner join ' + newTable + ' as ' + secondTableIdentifier + ' on ' + whereString + ');'
getLogger('probfoil').log(9, sqlQuery)
self.cursor.execute(sqlQuery)
return unifiedTableName, unifiedVarList
def rule_unify2Tables(self, firstTable, firstVarList, secondTable, secondVarList):
unifiedTableName = "dummy" + str(self.dummyCount)
self.dummyCount += 1
#Align the first and second select strings to ['A','B']
firstSelectList = ["", ""]
for i, var in enumerate(firstVarList):
if var == 'A':
firstSelectList[0] = firstTable + '.v' + str(i) + ' as v0'
elif var == 'B':
firstSelectList[1] = firstTable + '.v' + str(i) + ' as v1'
firstSelectString = ', '.join(firstSelectList)
secondSelectList = ["", ""]
for i, var in enumerate(secondVarList):
if var == 'A':
secondSelectList[0] = secondTable + '.v' + str(i) + ' as v0'
elif var == 'B':
secondSelectList[1] = secondTable + '.v' + str(i) + ' as v1'
secondSelectString = ', '.join(secondSelectList)
#Unify both tables
self.cursor.execute('DROP TABLE IF EXISTS ' + unifiedTableName + ';')
#sqlQuery = 'CREATE TABLE ' + unifiedTableName + ' AS (select distinct ' + firstSelectString + ' from ' + firstTable + ' union select distinct ' + secondSelectString + ' from ' + secondTable + ');'
sqlQuery = 'CREATE TABLE ' + unifiedTableName + ' AS (select distinct ' + firstTable + '.v0 as v0, ' + firstTable + '.v1 as v1 from ' + firstTable + ' union select distinct ' + secondTable + '.v0 as v0, ' + secondTable + '.v1 as v1 from ' + secondTable + ');'
getLogger('probfoil').log(9, sqlQuery)
self.cursor.execute(sqlQuery)
return unifiedTableName
def rule_predict1rule(self, rule):
# r(A,B):-r1(A,C),r2(B,C),r3(C,D),r4(E).
# Assuming target arity = 2
# varDict = {'A':[(r1,0)], 'B':[(r2,0)], 'C':[(r1,1),(r2,1),(r3,0)], 'D':[(r3,1)], 'E':[(r4,0)]}
# varList = [['A','C'],['B','C'],['C','D'],['E']]
# tableList = ['r1','r2','r3',r4']
# Get prediction set for this rule by running a nested inner join SQL query
literalList = rule.get_literals()[1:]
count = 0
table1 = literalList[0].functor
varList = []
tableList = []
for i, literal in enumerate(literalList):
tableList.append(literal.functor)
#tableList.append(literal._Term__functor)
argList = literal.args
varList.append([])
for j, arg in enumerate(argList):
variable = term2str(arg)
varList[i].append(variable)
unifiedVarSet = set()
for vars in varList:
unifiedVarSet = unifiedVarSet.union(set(vars))
if 'A' not in unifiedVarSet and 'B' not in unifiedVarSet:
unifiedTableName = "dummy" + str(self.dummyCount)
self.dummyCount += 1
self.cursor.execute('DROP TABLE IF EXISTS ' + unifiedTableName + ';')
sqlQuery = 'CREATE TABLE ' + unifiedTableName + ' AS (select distinct dummyA.v0 as v0, dummyB.v0 as v1 from dummyA cross join dummyB);'
getLogger('probfoil').log(9, sqlQuery)
self.cursor.execute(sqlQuery)
return unifiedTableName
newTable = tableList[0]
unifiedVarList = varList[0]
for (table, vars) in zip(tableList[1:], varList[1:]):
newTable, unifiedVarList = self.rule_intersect2Tables(newTable, unifiedVarList, table, vars)
if 'A' not in unifiedVarSet:
unifiedTableName = "dummy" + str(self.dummyCount)
self.dummyCount += 1
self.cursor.execute('DROP TABLE IF EXISTS ' + unifiedTableName + ';')
AIndex = len(unifiedVarList)
unifiedVarList.append('A')
sqlQuery = 'CREATE TABLE ' + unifiedTableName + ' AS (select distinct dummyA.v0 as v0, ' + newTable + '.v' + str(unifiedVarList.index('B')) + ' as v1 from ' + newTable + ' cross join dummyA);'
getLogger('probfoil').log(9, sqlQuery)
self.cursor.execute(sqlQuery)
return unifiedTableName
elif 'B' not in unifiedVarSet:
unifiedTableName = "dummy" + str(self.dummyCount)
self.dummyCount += 1
self.cursor.execute('DROP TABLE IF EXISTS ' + unifiedTableName + ';')
BIndex = len(unifiedVarList)
unifiedVarList.append('B')
sqlQuery = 'CREATE TABLE ' + unifiedTableName + ' AS (select distinct ' + newTable + '.v' + str(unifiedVarList.index('A')) + ' as v0, dummyB.v0 as v1 from ' + newTable + ' cross join dummyB);'
getLogger('probfoil').log(9, sqlQuery)
self.cursor.execute(sqlQuery)
return unifiedTableName
else:
#Prune newTable to keep only A and B columns
unifiedTableName = "dummy" + str(self.dummyCount)
self.dummyCount += 1
self.cursor.execute('DROP TABLE IF EXISTS ' + unifiedTableName + ';')
sqlQuery = 'CREATE TABLE ' + unifiedTableName + ' AS (select distinct v' + str(unifiedVarList.index('A')) + ' as v0, v' + str(unifiedVarList.index('B')) + ' as v1 from ' + newTable + ');'
getLogger('probfoil').log(9, sqlQuery)
self.cursor.execute(sqlQuery)
return unifiedTableName
def rule_predictAllRules(self, rules):
self.dummyCount = 0
#Creating DummyA and DummyB
for i in range(0,self.targetArity):
sqlQuery = 'select distinct ' + self.targetPredicate + '.v' + str(i) + ' as v0 from ' + self.targetPredicate
entity = self.predicateDict[self.targetPredicate][i]
for pred in self.predicateDict:
if pred == self.targetPredicate:
continue
entityList = self.predicateDict[pred]
for j, predEntity in enumerate(entityList):
if predEntity == entity:
sqlQuery = sqlQuery + ' union select distinct ' + pred + '.v' + str(j) + ' as v0 from ' + pred
self.cursor.execute('DROP TABLE IF EXISTS dummy' + chr(65+i) + ';')
sqlQuery = 'CREATE TABLE dummy' + chr(65+i) + ' AS (select distinct * from (' + sqlQuery + ') as a);'
getLogger('probfoil').log(9, sqlQuery)
self.cursor.execute(sqlQuery)
#negativeExamples = set()
while len(rules.get_literals()) <= 1 or Term('fail') in rules.get_literals():
rules = rules.previous
if rules == None:
emptyTable = "dummy" + str(self.dummyCount)
self.dummyCount += 1
self.cursor.execute('DROP TABLE IF EXISTS ' + emptyTable + ';')
sqlQuery = 'CREATE TABLE ' + emptyTable + ' (v0 integer, v1 interger, p double precision);'
getLogger('probfoil').log(9, sqlQuery)
self.cursor.execute(sqlQuery)
return emptyTable
table = self.rule_predict1rule(rules)
if rules.previous != None:
rule = rules.previous
while rule != None:
if len(rule.get_literals()) > 1 and Term('fail') not in rule.get_literals():
newTable = self.rule_predict1rule(rule)
table = self.rule_unify2Tables(table, ['A','B'], newTable, ['A','B'])
rule = rule.previous
unifiedTableName = "dummy" + str(self.dummyCount)
self.dummyCount += 1
self.cursor.execute('DROP TABLE IF EXISTS ' + unifiedTableName + ';')
sqlQuery = 'CREATE TABLE ' + unifiedTableName + ' AS (select distinct v0, v1 from ' + table + ');'
getLogger('probfoil').log(9, sqlQuery)
self.cursor.execute(sqlQuery)
return unifiedTableName
def rule_getNegativeExamples(self, rules):
startNegative = time()
subjectConstantList = {v: k for k, v in self.constantDict[self.predicateDict[self.targetPredicate][0]].iteritems()}
objectConstantList = {v: k for k, v in self.constantDict[self.predicateDict[self.targetPredicate][1]].iteritems()}
universalConstantList = {v: k for k, v in self.universalConstantId.iteritems()}
#subjectConstantList = self.constantDict[self.predicateDict[self.targetPredicate][0]]
#objectConstantList = self.constantDict[self.predicateDict[self.targetPredicate][1]]
self.totalPositiveExamples = len(self._examples)
getLogger('probfoil').info('%-*s: %d' % (self.pad, "Total positive examples (#P)", self.totalPositiveExamples))
#------------------------------------ Get Closed World Negatives ------------------------------------
table = self.rule_predictAllRules(rules)
self.cursor.execute('select count(*) from ' + table + ';')
totalPredictions = str(self.cursor.fetchone()[0])
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Total CW Predictions", totalPredictions))
CWPrediction = "dummy" + str(self.dummyCount)
self.dummyCount += 1
self.cursor.execute('DROP TABLE IF EXISTS ' + CWPrediction + ';')
sqlQuery = 'CREATE TABLE ' + CWPrediction + ' AS (select distinct ' + table + '.v0, ' + table + '.v1 from ' + table + ' where not exists (select 1 from ' + self.targetPredicate + ' where ' + self.targetPredicate + '.v0 = ' + table + '.v0 and ' + self.targetPredicate + '.v1 = ' + table + '.v1));'
getLogger('probfoil').log(9, sqlQuery)
self.cursor.execute(sqlQuery)
self.cursor.execute('select count(*) from ' + CWPrediction + ';')
totalPredictions = str(self.cursor.fetchone()[0])
getLogger('probfoil').info('%-*s: %s' % (self.pad, "Total CW Negative Predictions", totalPredictions))
sqlQuery = 'select * from ' + CWPrediction + ' order by random() limit ' + str(self.closedWorldNegativesFactor*self.totalPositiveExamples*2) + ';'
getLogger('probfoil').log(9, sqlQuery)
self.cursor.execute(sqlQuery)
predictionList = self.cursor.fetchall()
start = time()
CWNegativeExamples = []
counter = 0
#random.shuffle(predictionList)
for (a,b) in predictionList:
if counter == self.closedWorldNegativesFactor*self.totalPositiveExamples:
break
example = [universalConstantList[a], universalConstantList[b]]
if example not in self.examples:
CWNegativeExamples.append(example)
counter += 1
self.CWNegatives = set(range(self.totalPositiveExamples,self.totalPositiveExamples+counter))
self.totalCWNegativeExamples = len(CWNegativeExamples)
self.CWNegatives = set(range(self.totalPositiveExamples,self.totalPositiveExamples + self.totalCWNegativeExamples))
getLogger('probfoil').info('%-*s: %d' % (self.pad, "Total CW negative examples", self.totalCWNegativeExamples))
self.cursor.execute('select count(*) from ' + CWPrediction + ';')
totalCWNegativeTuples = self.cursor.fetchone()[0]
if self.totalCWNegativeExamples != 0:
self.CWNegativeWeight = float(totalCWNegativeTuples)*self.misclassificationCost/self.totalCWNegativeExamples
else:
self.CWNegativeWeight = 1
getLogger('probfoil').log(9, '%-*s: %s' % (self.pad, "CW Negative Weight", str(self.CWNegativeWeight)))
getLogger('probfoil').log(9, '%-*s: %s' % (self.pad, "#CW Negative Examples", str(self.totalCWNegativeExamples)))
#------------------------------------- Get Open World Negatives --------------------------------------
table = self.rule_unify2Tables(table, ['A','B'], self.targetPredicate, ['A','B'])
self.cursor.execute('select count(*) from ' + table + ';')
totalCWTuples = self.cursor.fetchone()[0]
# totalCWTuples contains both positives and negatives
self.cursor.execute('select * from ' + table + ';')
totalCWList = self.cursor.fetchall()
numberOfSubjects = len(subjectConstantList)
numberOfObjects = len(objectConstantList)
OWNegativeExamples = []
sample = 0
sampleCap = self.openWorldNegativesFactor*self.totalPositiveExamples
iteration = 0
iterationCap = 2*numberOfSubjects*numberOfObjects
while True:
if sample == sampleCap or iteration == iterationCap:
break
j = random.randint(0, numberOfSubjects - 1)
k = random.randint(0, numberOfObjects - 1)
example = [subjectConstantList[subjectConstantList.keys()[j]], objectConstantList[objectConstantList.keys()[k]]]
if (j,k) not in totalCWList:
OWNegativeExamples.append(example)
sample += 1
iteration += 1
self.totalOWNegativeExamples = len(OWNegativeExamples)
self.OWNegatives = set(range(self.totalPositiveExamples + self.totalCWNegativeExamples, self.totalPositiveExamples + self.totalCWNegativeExamples + self.totalOWNegativeExamples))
k = 1
for base in self.predicateDict[self.targetPredicate]:
k = k*len(self.constantDict[base])
totalOWNegativeExamples = k - totalCWTuples
getLogger('probfoil').info('%-*s: %d' % (self.pad, "Total OW negative examples", totalOWNegativeExamples))
if self.totalOWNegativeExamples != 0:
self.OWNegativeWeight = float(totalOWNegativeExamples)*self.misclassificationCost/self.totalOWNegativeExamples
else:
self.OWNegativeWeight = 1
getLogger('probfoil').log(9, '%-*s: %s' % (self.pad, "OW Negative Weight", str(self.OWNegativeWeight)))
getLogger('probfoil').log(9, '%-*s: %s' % (self.pad, "#OW Negative Examples", str(self.totalOWNegativeExamples)))
self._scores_correct = self._scores_correct + [0]*self.totalCWNegativeExamples + [0]*self.totalOWNegativeExamples
getLogger('loss').log(8, '%-*s: %s' % (self.pad, "self._examples", str(self._examples)))
getLogger('loss').log(8, '%-*s: %s' % (self.pad, "CWNegativeExamples", str(CWNegativeExamples)))
getLogger('loss').log(8, '%-*s: %s' % (self.pad, "OWNegativeExamples", str(OWNegativeExamples)))
self._examples = self._examples + CWNegativeExamples + OWNegativeExamples
self.totalExamples = self.totalPositiveExamples + self.totalCWNegativeExamples + self.totalOWNegativeExamples
self.totalWeightedExamples = (self.totalPositiveExamples + self.CWNegativeWeight*self.totalCWNegativeExamples + self.OWNegativeWeight*self.totalOWNegativeExamples)
self.querySS = [""]*self.totalExamples
totalNegative = time() - startNegative
getLogger('probfoil').log(9, '%-*s: %ss' % (self.pad, "Total time in getting negatives", str(totalNegative)))
iteration = int(table[5:])
while iteration != -1:
self.cursor.execute('drop table dummy' + str(iteration) + ';')
iteration -= 1
def rule_getConditionalProbability(self, ruleIndex):
# Numerator = |Prediction of Rule (intersection) Positive Examples|
# Denominator = |Prediction of Rule|
#table, varList = self.rulewisePredictions[self.selectedAmieRules[-1]]
(headLiteral, amieLiteralList) = self.AmieRuleList[ruleIndex]
rule = FOILRule(headLiteral)
for literal in amieLiteralList:
rule = rule & literal
table = self.rule_predict1rule(rule)
targetTable = self.targetPredicate
joinedTable, joinedVarList = self.rule_intersect2Tables(targetTable, ['A','B'], table, ['A','B'])
self.cursor.execute('select count(*) from ' + joinedTable + ';')
numerator = float(str(self.cursor.fetchone()[0]))
self.cursor.execute('select count(*) from ' + table + ';')
denominator = float(str(self.cursor.fetchone()[0]))
if denominator == 0:
# Bogus Rule
return 1-self.tolerance
else:
prob = numerator/denominator
getLogger('probfoil').log(9, '%-*s: %s' % (self.pad, "# Predictions of Rule" + str(ruleIndex) + " intersected with examples", str(numerator)))
getLogger('probfoil').log(9, '%-*s: %s' % (self.pad, "# Predictions of Rule" + str(ruleIndex), str(denominator)))
#return self.regularize(prob, 5)
return prob
def _compute_rule_score(self, rule):
return m_estimate_relative(rule, self._m_estimate)
def _compute_rule_future_score(self, rule):
return m_estimate_future_relative(rule, self._m_estimate)
def _select_rule(self, rule):
pass
def statistics(self):
statList = []
if self.learnAllRules == False:
statList.append(('Rule evaluations', self._stats_evaluations))
#statList.append(('Numeric SS calls', self._stats_numericSS))
#statList.append(('Symbolic SS calls', self._stats_symbolicSS))
statList.append(('Get SQL Query calls', self._stats_getSQLQuery))
if self.open_world:
statList.append(('Get Expression calls', self._stats_getExpression))
statList.append(('Read Time', str(round(self._time_read,2)) + "s"))
#statList.append(('Numeric SS', str(round(self._time_numericSS,2)) + "s"))
#statList.append(('Symbolic SS', str(round(self._time_symbolicSS,2)) + "s"))
#statList.append(('Get SQL Query', str(round(self._time_getSQLQuery,2)) + "s"))
#statList.append(('Get Canonical Form', str(round(self._time_getCanonicalForm,2)) + "s"))
#statList.append(('Execute Query', str(round(self._time_executeQuery,2)) + "s"))
#statList.append(('Execute PSQL', str(round(self._time_executePSQL,2)) + "s"))
#statList.append(('Probability - Total', str(round(self._time_getQueryProbability,2)) + "s"))
if self.open_world:
statList.append(('Get Expression', str(round(self._time_getExpression,2)) + "s"))
#statList.append(('Expression - Total', str(round(self._time_getQueryExpression,2)) + "s"))
statList.append(('Optimization', str(round(self._time_optimization,2)) + "s"))
statList.append(('Learn time', str(round(self._time_learn,2)) + "s"))
return statList
def print_output(self, hypothesis):
printList = []
if self.interrupted:
printList.append('================ PARTIAL THEORY ================')
else:
printList.append('================= FINAL THEORY =================')
if self.open_world:
lamDict = {}
for predicate in self.lams:
if len(predicate) > 2 and predicate[:2] == "p_":
continue
elif self.lams[predicate] == 0:
continue
else:
lamDict[predicate] = self.lams[predicate]
if len(lamDict) == 1:
printList.append('Open World Probability = ' + str(lamDict))
elif len(lamDict) > 1:
printList.append('Open World Probabilities = ' + str(lamDict))
rule = hypothesis
rules = rule.to_clauses(rule.target.functor)
# First rule is failing rule: don't print it if there are other rules.
if len(rules) > 1:
for rule in rules[1:]:
printList.append(str(rule))
else:
printList.append(str(rules[0]))
'''
printList.append('==================== SCORES ====================')
printList.append(' Weighted Accuracy:\t%s' % str(hypothesis.weightedAccuracy))
printList.append(' Cross Entropy:\t%s' % str(hypothesis.crossEntropy))
printList.append(' Squared Loss:\t%s' % str(hypothesis.squaredLoss))
printList.append(' Precision:\t%s' % str(hypothesis.precision))
printList.append(' Recall:\t%s' % str(hypothesis.recall))
printList.append(' True Positives:\t%s' % str(hypothesis.tp))
printList.append(' True Negatives:\t%s' % str(hypothesis.tn))
printList.append(' False Positives:\t%s' % str(hypothesis.fp))
printList.append(' False Negatives:\t%s' % str(hypothesis.fn))
'''
for line in printList:
getLogger('probfoil').info(line)
print(line)
class ProbFOIL2(ProbFOIL):
def __init__(self, *args, **kwargs):
ProbFOIL.__init__(self, *args, **kwargs)
def _select_rule(self, rule):
# set rule probability and update scores
if hasattr(rule, 'max_x'):
#x = round(rule.max_x, 8)
x = rule.max_x
else:
x = 1.0
if x > 1 - self.tolerance:
rule.set_rule_probability(None)
else:
rule.set_rule_probability(x)
if rule.previous is None:
scores_previous = [0.0] * len(rule.scores)
else:
scores_previous = rule.previous.scores
for i, lu in enumerate(zip(scores_previous, rule.scores)):
l, u = lu
s = u - l
rule.scores[i] = l + x * s
def _compute_rule_future_score(self, rule):
return self._compute_rule_score(rule, future=True)
def _compute_rule_score(self, rule, future=False):
return self._compute_rule_score_slow(rule, future)
def _compute_rule_score_slow(self, rule, future=False):
if rule.previous is None:
scores_previous = [0.0] * len(rule.scores)
else:
scores_previous = rule.previous.scores
data = list(zip(self._scores_correct, scores_previous, rule.scores))
max_x = 0.0
max_score = 0.0
max_tp = 0.0
max_fp = 0.0
def eval_x(x, data, future=False):
pos = 0.0
all = 0.0
tp = 0.0
fp = 0.0
tp_p = 0.0
fp_p = 0.0
for p, l, u in data:
pr = l + x * (u - l)
tp += min(p, pr)
fp += max(0, pr - p)
tp_p += min(p, l)
fp_p += max(0, l - p)
pos += p
all += 1
if future:
fp = fp_p
m = self._m_estimate
if pos - tp_p == 0 and all - tp_p - fp_p == 0:
mpnp = 1
else:
mpnp = m * ((pos - tp_p) / (all - tp_p - fp_p))
score = (tp - tp_p + mpnp) / (tp + fp - tp_p - fp_p + m)
return tp, fp, round(score, 12) # Rounding to 12 decimal places to avoid float precision error
tp_x, fp_x, score_x = eval_x(1.0, data, future)
if score_x > max_score:
max_x = 1.0
max_tp = tp_x
max_fp = fp_x
max_score = score_x
if not future:
getLogger('probfoil').log(7, '%s: x=%s (%s %s) -> %s' % (rule, 1.0, tp_x, fp_x, score_x))
xSet = set()
for p, l, u in data:
if u - l < self.tolerance:
continue
x = (p - l) / (u - l)
if x > 1.0 or x < 0.0 or x in xSet:
# Don't check for absurd probabilities
# Don't check for those possible probabilities which have already been checked
continue
xSet.add(x)
tp_x, fp_x, score_x = eval_x(x, data, future)
if not future:
getLogger('probfoil').log(7, '%s: x=%s (%s %s %s) (%s %s) -> %s' % (rule, x, p, l, u, tp_x, fp_x, score_x))
if score_x > max_score:
max_x = x
max_tp = tp_x
max_fp = fp_x
max_score = score_x
'''
xCandidates = [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1]
for x in xCandidates:
if x in xSet:
continue
xSet.add(x)
tp_x, fp_x, score_x = eval_x(x, data, future)
if not future:
getLogger('probfoil').log(9, '%s: x=%s (%s %s) -> %s' % (rule, x, tp_x, fp_x, score_x))
if score_x > max_score:
max_x = x
max_tp = tp_x
max_fp = fp_x
max_score = score_x
'''
if not future:
getLogger('probfoil').log(9, '%s\t: [BEST] x=%s (%s %s) -> %s' % (rule, max_x, max_tp, max_fp, max_score))
rule.max_x = max_x
rule.max_tp = max_tp
rule.max_fp = max_fp
#if max_x < self.tolerance:
# return 0.0
return max_score
def _compute_rule_score_fast(self, rule, future=False):
if rule.previous is None:
scores_previous = [0.0] * len(rule.scores)
else:
scores_previous = rule.previous.scores
pos = 0.0
all = 0.0
tp_prev = 0.0
fp_prev = 0.0
fp_base = 0.0
tp_base = 0.0
ds_total = 0.0
pl_total = 0.0
if not future:
getLogger('probfoil').log(5, '%s: %s' % (rule, list(zip(self._scores_correct, scores_previous, rule.scores))))
values = []
for p, l, u in zip(self._scores_correct, scores_previous, rule.scores):
pos += p
all += 1.0
tp_prev += min(l, p)
fp_prev += max(0, l - p)
ds = u - l # improvement on previous prediction (note: ds >= 0)
if ds == 0: # no improvement
pass
elif p < l: # lower is overestimate
fp_base += ds
elif p > u: # upper is underestimate
tp_base += ds
else: # correct value still achievable
ds_total += ds
pl_total += p - l
y = (p - l) / (u - l) # for x equal to this value, prediction == correct
values.append((y, p, l, u))
neg = all - pos
mpnp = self._m_estimate * (pos / all)
def comp_m_estimate(tp, fp):
score = (tp + mpnp) / (tp + fp + self._m_estimate)
# print (self._m_estimate, mpnp, tp, fp, score)
return score
max_x = 1.0
tp_x = pl_total + tp_base + tp_prev
if future:
fp_x = fp_prev + fp_base
else:
fp_x = ds_total - pl_total + fp_base + fp_prev
score_x = comp_m_estimate(tp_x, fp_x)
max_score = score_x
max_tp = tp_x
max_fp = fp_x
if values:
values = sorted(values)
if not future:
getLogger('probfoil').log(5, '%s: %s' % (rule, [map(lambda vv: round(vv, 3), vvv) for vvv in values]))
tp_x, fp_x, tn_x, fn_x = 0.0, 0.0, 0.0, 0.0
ds_running = 0.0
pl_running = 0.0
prev_y = None
for y, p, l, u in values + [(None, 0.0, 0.0, 0.0)]: # extra element forces compute at end
if y is None or prev_y is not None and y > prev_y:
# There is a change in y-value.
x = prev_y # set current value of x
tp_x = pl_running + x * (ds_total - ds_running) + x * tp_base + tp_prev
if future:
fp_x = fp_prev
else:
fp_x = x * ds_running - pl_running + x * fp_base + fp_prev
score_x = comp_m_estimate(tp_x, fp_x)
if not future:
getLogger('probfoil').log(6, '%s: x=%s (%s %s) -> %s' % (rule, x, tp_x, fp_x, score_x))
if max_score is None or score_x > max_score:
max_score = score_x
max_x = x
max_tp = tp_x
max_fp = fp_x
# if not future:
# rts = rates(rule)
# est = m_estimate(rule)
# print(x, tp_x, fp_x, rts, score_x, est)
# # assert abs(tp_x - rts[0]) < self.tolerance
# # assert abs(fp_x - rts[1]) < self.tolerance
# # assert abs(est - score_x) < self.tolerance
prev_y = y
pl_running += p - l
ds_running += u - l
assert abs(ds_running - ds_total) < self.tolerance
assert abs(pl_running - pl_total) < self.tolerance
if not future:
getLogger('probfoil').log(6, '%s: [BEST] x=%s (%s %s) -> %s' % (rule, max_x, tp_x, fp_x, score_x))
rule.max_x = max_x
rule.max_tp = max_tp
rule.max_fp = max_fp
return max_score
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs='+')
parser.add_argument('-1', '--det-rules', action='store_true', dest='probfoil1', help='learn deterministic rules')
parser.add_argument('-m', help='parameter m for m-estimate', type=float, default=argparse.SUPPRESS)
parser.add_argument('-b', '--beam-size', type=int, default=5, help='size of beam for beam search')
parser.add_argument('-p', '--significance', type=float, default=None, help='rule significance threshold', dest='p')
parser.add_argument('-l', '--length', dest='l', type=int, default=None, help='maximum rule length')
parser.add_argument('-v', action='count', dest='verbose', default=None, help='increase verbosity (repeat for more)')
parser.add_argument('--symmetry-breaking', action='store_true', help='avoid symmetries in refinement operator')
parser.add_argument('-t', '--target', type=str, help='specify predicate/arity to learn (overrides settings file)')
parser.add_argument('--log', help='write log to file', default=None)
parser.add_argument('-c', '--closed-world', action='store_true', help='Closed World Indicator (Input -c to learn on closed world setting)')
parser.add_argument('-g', '--global-score', type=str, default = 'cross_entropy', help="specify global scoring function as either 'accuracy' or 'cross_entropy' (Default is 'cross_entropy')")
parser.add_argument('-o', '--optimization-method', type=str, default = 'incremental', help="specify optimization method of lambda as either 'batch' or 'incremental' (Default is 'incremental')")
parser.add_argument('-r', '--candidate-rules', type=str, default = 'amie', help="specify generation method of candidate rules as either 'probfoil' or 'amie' (Default is 'amie')")
parser.add_argument('-w', '--cost', type=float, default = 1.0, help="Misclassification Cost for negative examples")
#parser.add_argument('--test', type = str, help='Test Dataset File', default=None)
parser.add_argument('--minpca', type=float, default=0.00001, help='Minimum PCA Confidence Threshold for Amie', dest='minpca')
parser.add_argument('--minhc', type=float, default=0.00001, help='Minimum Standard Confidence Threshold for Amie', dest='minhc')
parser.add_argument('-q', '--quotes', action='store_true', help='Input -q to denote an input file with facts enclosed in double quotes')
parser.add_argument('--ssh', action='store_true', help='Input --ssh if the code is running on PINACS/HIMECS')
parser.add_argument('--cwLearning', action='store_true', help='Input --cwLearning for learning rule weights with SGD in Closed World')
parser.add_argument('-i', '--iterations', type=int, default=10000, help='Number of iterations of SGD', dest='iterations')
parser.add_argument('-a', '--maxAmieRules', type=int, default=None, help='Maximum number of candidate rules to be learned from AMIE', dest='maxAmieRules')
parser.add_argument('-d','--disableTypeConstraints', action='store_true', help='Input -d to ignore type constraints for learned rules')
parser.add_argument('--lr1', type=float, default=0.001, help='Learning Rate for Rule Weights', dest='lr1')
parser.add_argument('--lr2', type=float, default=0.0001, help='Learning Rate for Lambdas', dest='lr2')
return parser
class ProbLogLogFormatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self)
def format(self, message):
msg = str(message.msg) % message.args
lines = msg.split('\n')
if message.levelno < 10:
linestart = '[LVL%s] ' % message.levelno
else:
linestart = '[%s] ' % message.levelname
return linestart + ('\n' + linestart).join(lines)
def init_logger(verbose=None, name='problog', out=None):
"""Initialize default logger.
:param verbose: verbosity level (0: WARNING, 1: INFO, 2: DEBUG)
:type verbose: int
:param name: name of the logger (default: problog)
:type name: str
:return: result of ``logging.getLogger(name)``
:rtype: logging.Logger
"""
if out is None:
out = sys.stdout
logger = logging.getLogger(name)
ch = logging.StreamHandler(out)
# formatter = logging.Formatter('[%(levelname)s] %(message)s')
formatter = ProbLogLogFormatter()
ch.setFormatter(formatter)
logger.addHandler(ch)
if not verbose:
logger.setLevel(logging.WARNING)
elif verbose == 1:
logger.setLevel(logging.INFO)
logger.info('Output level\t\t\t\t\t\t: INFO')
elif verbose == 2:
logger.setLevel(logging.DEBUG)
logger.debug('Output level\t\t\t\t\t: DEBUG')
else:
level = max(1, 12 - verbose) # between 9 and 1
logger.setLevel(level)
logger.log(level, 'Output level\t\t\t\t\t\t: %s' % level)
return logger
def main(argv=sys.argv[1:]):
args = argparser().parse_args(argv)
if args.log is None:
logfile = None
lossfile = None
else:
logfile = open(args.log, 'w')
if args.verbose > 3:
lossfile = open(args.log[:-4] + ".loss", 'w')
log = init_logger(verbose=args.verbose, name='probfoil', out=logfile)
if args.verbose > 3:
log_loss = init_logger(verbose=args.verbose, name='loss', out=lossfile)
log.info('Arguments\t\t\t\t\t\t: %s' % ' '.join(argv))
# Load input files
if args.candidate_rules != "amie":
data = DataFile(*(PrologFile(source) for source in args.files))
else:
data = args.files
if args.probfoil1:
learn_class = ProbFOIL
else:
learn_class = ProbFOIL2
time_start = time()
learn = learn_class(data, logger='probfoil', **vars(args))
hypothesis = learn.learn()
time_total = time() - time_start
log.info('\n==================== OUTPUT ====================')
print ('\n=================== SETTINGS ===================')
log.info('\n=================== SETTINGS ===================')
for kv in vars(args).items():
print('%20s:\t%s' % kv)
log.info('%20s:\t%s' % kv)
learn.print_output(hypothesis)
printList = []
printList.append('================== STATISTICS ==================')
for name, value in learn.statistics():
printList.append('%20s:\t%s' % (name, value))
printList.append(' Total time:\t%.4fs' % time_total)
for line in printList:
log.info(line)
print(line)
if logfile:
logfile.close()
if __name__ == '__main__':
main()
# try:
# main()
# os.system('say "Your Program has Finished"')
# except Exception as e:
# print(e)
# os.system('say "Your Program has encountered an error."')
| __init__ |
ucb0stat.rs | #[doc = "Reader of register UCB0STAT"]
pub type R = crate::R<u8, super::UCB0STAT>;
#[doc = "Writer for register UCB0STAT"]
pub type W = crate::W<u8, super::UCB0STAT>;
#[doc = "Register UCB0STAT `reset()`'s with value 0"]
impl crate::ResetValue for super::UCB0STAT {
type Type = u8;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `UCALIFG`"]
pub type UCALIFG_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `UCALIFG`"]
pub struct UCALIFG_W<'a> {
w: &'a mut W,
}
impl<'a> UCALIFG_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u8) & 0x01);
self.w
}
}
#[doc = "Reader of field `UCSTTIFG`"]
pub type UCSTTIFG_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `UCSTTIFG`"]
pub struct UCSTTIFG_W<'a> {
w: &'a mut W,
}
impl<'a> UCSTTIFG_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits =
(self.w.bits & !(0x01 << 1)) | (((value as u8) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `UCSTPIFG`"]
pub type UCSTPIFG_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `UCSTPIFG`"]
pub struct UCSTPIFG_W<'a> {
w: &'a mut W,
}
impl<'a> UCSTPIFG_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits =
(self.w.bits & !(0x01 << 2)) | (((value as u8) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `UCNACKIFG`"]
pub type UCNACKIFG_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `UCNACKIFG`"]
pub struct UCNACKIFG_W<'a> {
w: &'a mut W,
}
impl<'a> UCNACKIFG_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits =
(self.w.bits & !(0x01 << 3)) | (((value as u8) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `UCBBUSY`"]
pub type UCBBUSY_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `UCBBUSY`"]
pub struct UCBBUSY_W<'a> {
w: &'a mut W,
}
impl<'a> UCBBUSY_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits =
(self.w.bits & !(0x01 << 4)) | (((value as u8) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `UCGC`"]
pub type UCGC_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `UCGC`"]
pub struct UCGC_W<'a> {
w: &'a mut W,
}
impl<'a> UCGC_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits =
(self.w.bits & !(0x01 << 5)) | (((value as u8) & 0x01) << 5);
self.w
}
}
#[doc = "Reader of field `UCSCLLOW`"]
pub type UCSCLLOW_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `UCSCLLOW`"]
pub struct UCSCLLOW_W<'a> {
w: &'a mut W,
}
impl<'a> UCSCLLOW_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits =
(self.w.bits & !(0x01 << 6)) | (((value as u8) & 0x01) << 6);
self.w
}
}
#[doc = "Reader of field `UCLISTEN`"]
pub type UCLISTEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `UCLISTEN`"]
pub struct UCLISTEN_W<'a> {
w: &'a mut W,
}
impl<'a> UCLISTEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits =
(self.w.bits & !(0x01 << 7)) | (((value as u8) & 0x01) << 7);
self.w
}
}
impl R {
#[doc = "Bit 0 - Arbitration Lost interrupt Flag"]
#[inline(always)]
pub fn ucalifg(&self) -> UCALIFG_R {
UCALIFG_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - START Condition interrupt Flag"]
#[inline(always)]
pub fn ucsttifg(&self) -> UCSTTIFG_R {
UCSTTIFG_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - STOP Condition interrupt Flag"]
#[inline(always)]
pub fn ucstpifg(&self) -> UCSTPIFG_R {
UCSTPIFG_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - NAK Condition interrupt Flag"]
#[inline(always)]
pub fn ucnackifg(&self) -> UCNACKIFG_R {
UCNACKIFG_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Bus Busy Flag"]
#[inline(always)]
pub fn ucbbusy(&self) -> UCBBUSY_R {
UCBBUSY_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - General Call address received Flag"]
#[inline(always)]
pub fn | (&self) -> UCGC_R {
UCGC_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - SCL low"]
#[inline(always)]
pub fn ucscllow(&self) -> UCSCLLOW_R {
UCSCLLOW_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - USCI Listen mode"]
#[inline(always)]
pub fn uclisten(&self) -> UCLISTEN_R {
UCLISTEN_R::new(((self.bits >> 7) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Arbitration Lost interrupt Flag"]
#[inline(always)]
pub fn ucalifg(&mut self) -> UCALIFG_W {
UCALIFG_W { w: self }
}
#[doc = "Bit 1 - START Condition interrupt Flag"]
#[inline(always)]
pub fn ucsttifg(&mut self) -> UCSTTIFG_W {
UCSTTIFG_W { w: self }
}
#[doc = "Bit 2 - STOP Condition interrupt Flag"]
#[inline(always)]
pub fn ucstpifg(&mut self) -> UCSTPIFG_W {
UCSTPIFG_W { w: self }
}
#[doc = "Bit 3 - NAK Condition interrupt Flag"]
#[inline(always)]
pub fn ucnackifg(&mut self) -> UCNACKIFG_W {
UCNACKIFG_W { w: self }
}
#[doc = "Bit 4 - Bus Busy Flag"]
#[inline(always)]
pub fn ucbbusy(&mut self) -> UCBBUSY_W {
UCBBUSY_W { w: self }
}
#[doc = "Bit 5 - General Call address received Flag"]
#[inline(always)]
pub fn ucgc(&mut self) -> UCGC_W {
UCGC_W { w: self }
}
#[doc = "Bit 6 - SCL low"]
#[inline(always)]
pub fn ucscllow(&mut self) -> UCSCLLOW_W {
UCSCLLOW_W { w: self }
}
#[doc = "Bit 7 - USCI Listen mode"]
#[inline(always)]
pub fn uclisten(&mut self) -> UCLISTEN_W {
UCLISTEN_W { w: self }
}
}
| ucgc |
platform_versioner_test.go | package platform
import (
"testing"
openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/rest"
)
type FakeDiscoverer struct {
info PlatformInfo
serverInfo *version.Info
groupList *v1.APIGroupList
doc *openapi_v2.Document
client rest.Interface
ServerVersionError error
ServerGroupsError error
OpenAPISchemaError error
}
func (d FakeDiscoverer) ServerVersion() (*version.Info, error) {
if d.ServerVersionError != nil {
return nil, d.ServerVersionError
}
return d.serverInfo, nil
}
func (d FakeDiscoverer) ServerGroups() (*v1.APIGroupList, error) {
if d.ServerGroupsError != nil {
return nil, d.ServerGroupsError
}
return d.groupList, nil
}
func (d FakeDiscoverer) OpenAPISchema() (*openapi_v2.Document, error) {
if d.OpenAPISchemaError != nil {
return nil, d.OpenAPISchemaError
}
return d.doc, nil
}
func (d FakeDiscoverer) RESTClient() rest.Interface {
return d.client
}
type FakePlatformVersioner struct {
Info PlatformInfo
Err error
}
func (pv FakePlatformVersioner) GetPlatformInfo(d Discoverer, cfg *rest.Config) (PlatformInfo, error) {
if pv.Err != nil {
return pv.Info, pv.Err
}
return pv.Info, nil
}
func TestK8SBasedPlatformVersioner_GetPlatformInfo(t *testing.T) {
pv := K8SBasedPlatformVersioner{}
fakeErr := errors.New("uh oh")
cases := []struct {
label string
discoverer Discoverer
config *rest.Config
expectedInfo PlatformInfo
expectedErr bool
}{
{
label: "case 1", // trigger error in client.ServerVersion(), only Name present on Info
discoverer: FakeDiscoverer{
ServerVersionError: fakeErr,
},
config: &rest.Config{},
expectedInfo: PlatformInfo{Name: Kubernetes},
expectedErr: true,
},
{
label: "case 2", // trigger error in client.ServerGroups(), K8S major/minor now present
discoverer: FakeDiscoverer{
ServerGroupsError: fakeErr,
serverInfo: &version.Info{
Major: "1",
Minor: "2",
},
},
config: &rest.Config{},
expectedInfo: PlatformInfo{Name: Kubernetes, K8SVersion: "1.2"},
expectedErr: true,
},
{
label: "case 3", // trigger no errors, simulate K8S platform (no OCP route present)
discoverer: FakeDiscoverer{
serverInfo: &version.Info{
Major: "1",
Minor: "2",
},
groupList: &v1.APIGroupList{
TypeMeta: v1.TypeMeta{},
Groups: []v1.APIGroup{},
},
},
config: &rest.Config{},
expectedInfo: PlatformInfo{Name: Kubernetes, K8SVersion: "1.2"},
expectedErr: false,
},
{
label: "case 4", // trigger no errors, simulate OCP route present
discoverer: FakeDiscoverer{
serverInfo: &version.Info{
Major: "1",
Minor: "2",
},
groupList: &v1.APIGroupList{
TypeMeta: v1.TypeMeta{},
Groups: []v1.APIGroup{{Name: "route.openshift.io"}},
},
},
config: &rest.Config{},
expectedInfo: PlatformInfo{Name: OpenShift, K8SVersion: "1.2"},
expectedErr: false,
},
}
for _, c := range cases {
info, err := pv.GetPlatformInfo(c.discoverer, c.config)
assert.Equal(t, c.expectedInfo, info, c.label+": mismatch in returned PlatformInfo")
if c.expectedErr {
assert.Error(t, err, c.label+": expected error, but none occurred")
}
}
}
func TestClientCallVersionComparsion(t *testing.T) {
pv := K8SBasedPlatformVersioner{}
testcases := []struct {
label string
discoverer Discoverer
config *rest.Config
expectedInfo int
expectedErr bool
}{
{
label: "case 1",
discoverer: FakeDiscoverer{
serverInfo: &version.Info{
Major: "1",
Minor: "16",
},
groupList: &v1.APIGroupList{
TypeMeta: v1.TypeMeta{},
Groups: []v1.APIGroup{{Name: "route.openshift.io"}},
},
},
config: &rest.Config{},
expectedInfo: 0,
expectedErr: false,
},
{
label: "case 2",
discoverer: FakeDiscoverer{
serverInfo: &version.Info{
Major: "1",
Minor: "16+",
},
groupList: &v1.APIGroupList{
TypeMeta: v1.TypeMeta{},
Groups: []v1.APIGroup{{Name: "route.openshift.io"}},
},
},
config: &rest.Config{}, | {
label: "case 3",
discoverer: FakeDiscoverer{
serverInfo: &version.Info{
Major: "1",
Minor: "14+",
},
groupList: &v1.APIGroupList{
TypeMeta: v1.TypeMeta{},
Groups: []v1.APIGroup{{Name: "route.openshift.io"}},
},
},
config: &rest.Config{},
expectedInfo: -1,
expectedErr: false,
},
{
label: "case 4",
discoverer: FakeDiscoverer{
serverInfo: &version.Info{
Major: "1",
Minor: "14not",
},
groupList: &v1.APIGroupList{
TypeMeta: v1.TypeMeta{},
Groups: []v1.APIGroup{{Name: "route.openshift.io"}},
},
},
config: &rest.Config{},
expectedInfo: -1,
expectedErr: true,
},
}
versionToTest := "4.3"
for _, tc := range testcases {
res, err := pv.CompareOpenShiftVersion(tc.discoverer, tc.config, versionToTest)
if tc.expectedErr {
assert.Error(t, err, "expected error")
} else {
assert.NoError(t, err, "unexpected error")
}
assert.Equal(t, tc.expectedInfo, res, "The expected and actual versions should be the same.")
}
} | expectedInfo: 0,
expectedErr: false,
}, |
formatConfirmedCases.ts | type DataType = {
attr: '検査実施人数'
value: number
children: [
{
attr: '陽性患者数'
value: number
children: [
{
attr: '入院中'
value: number
children: [
{
attr: '軽症・中等症'
value: number
},
{
attr: '重症'
value: number
},
{
attr: 'その他'
value: number
}
]
}, | value: number
},
{
attr: '死亡'
value: number
},
{
attr: '不明'
value: number
}
]
}
]
}
type ConfirmedCasesType = {
検査実施人数: number
陽性者数: number
入院中: number
軽症中等症: number
重症: number
死亡: number
退院: number
不明: number
その他: number
}
/**
* Format for *Chart component
*
* @param data - Raw data
*/
export default (data: DataType) => {
const formattedData: ConfirmedCasesType = {
検査実施人数: data.value,
陽性者数: data.children[0].value,
入院中: data.children[0].children[0].value,
軽症中等症: data.children[0].children[0].children[0].value,
重症: data.children[0].children[0].children[1].value,
死亡: data.children[0].children[2].value,
退院: data.children[0].children[1].value,
不明: data.children[0].children[3].value,
その他: data.children[0].children[0].children[2].value
}
return formattedData
} | {
attr: '退院' |
flow.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as td
class Flow(nn.Module):
"""
Building both normalizing flows and neural flows.
Example:
| >>> torch.manual_seed(123)
>>> dim = 2
>>> flow = st.Flow(st.UnitNormal(dim), [st.Affine(dim)])
>>> x = torch.rand(1, dim)
>>> y, ljd = flow(x)
>>> y_inv, ljd_inv = flow.inverse(y)
Args:
base_dist (Type[torch.distributions]): Base distribution
transforms (List[st.flows]): List of invertible transformations
"""
def __init__(self, base_dist=None, transforms=[]):
super().__init__()
self.base_dist = base_dist
self.transforms = nn.ModuleList(transforms)
def forward(self, x, latent=None, mask=None, t=None, reverse=False, **kwargs):
"""
Args:
x (tensor): Input sampled from base density with shape (..., dim)
latent (tensor, optional): Conditional vector with shape (..., latent_dim)
Default: None
mask (tensor): Masking tensor with shape (..., 1)
Default: None
t (tensor, optional): Flow time end point. Default: None
reverse (bool, optional): Whether to perform an inverse. Default: False
Returns:
y (tensor): Output that follows target density (..., dim)
log_jac_diag (tensor): Log-Jacobian diagonal (..., dim)
"""
transforms = self.transforms[::-1] if reverse else self.transforms
_mask = 1 if mask is None else mask
log_jac_diag = torch.zeros_like(x).to(x)
for f in transforms:
if reverse:
x, ld = f.inverse(x * _mask, latent=latent, mask=mask, t=t, **kwargs)
else:
x, ld = f.forward(x * _mask, latent=latent, mask=mask, t=t, **kwargs)
log_jac_diag += ld * _mask
return x, log_jac_diag
def inverse(self, y, latent=None, mask=None, t=None, **kwargs):
""" Inverse of forward function with the same arguments. """
return self.forward(y, latent=latent, mask=mask, t=t, reverse=True, **kwargs)
def log_prob(self, x, **kwargs):
"""
Calculates log-probability of a sample.
Args:
x (tensor): Input with shape (..., dim)
Returns:
log_prob (tensor): Log-probability of the input with shape (..., 1)
"""
if self.base_dist is None:
raise ValueError('Please define `base_dist` if you need log-probability')
x, log_jac_diag = self.inverse(x, **kwargs)
log_prob = self.base_dist.log_prob(x) + log_jac_diag.sum(-1)
return log_prob.unsqueeze(-1)
def sample(self, num_samples, latent=None, mask=None, **kwargs):
"""
Transforms samples from the base to the target distribution.
Uses reparametrization trick.
Args:
num_samples (tuple or int): Shape of samples
latent (tensor): Latent conditioning vector with shape (..., latent_dim)
Returns:
x (tensor): Samples from target distribution with shape (*num_samples, dim)
"""
if self.base_dist is None:
raise ValueError('Please define `base_dist` if you need sampling')
if isinstance(num_samples, int):
num_samples = (num_samples,)
x = self.base_dist.rsample(num_samples)
x, log_jac_diag = self.forward(x, **kwargs)
return x | >>> import stribor as st
|
base_train.py | import tensorflow as tf
class BaseTrain:
"""Standard base_train-class for easy multiple-inheritance.
It is responsible for defining the functions to be implemented with any child.
Attributes:
sess: Tensorflow session to use.
model: Model to be trained.
data: Data_loader object to interact with dataset.
config: Config object to store data related to training, testing and validation.
logger: Logger object to use tensorboard.
"""
def __init__(self, sess, model, data, config, logger):
self.model = model
self.config = config
self.sess = sess
self.data = data
self.logger = logger
if not self.config.pretrain: # If not pretrain then initialize variables.
self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
self.sess.run(self.init)
def train(self):
"""Train the model for the number of epochs in config.num_epochs.
Calls validate_epoch if config.use_val is set to true and per config.val_per_epoch.
Returns:
"""
for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):
self.data.prepare_new_epoch_data()
self.train_epoch()
if self.config.use_val and (
cur_epoch % self.config.val_per_epoch == 0 or cur_epoch == self.config.num_epochs):
self.validate_epoch()
self.sess.run(self.model.increment_cur_epoch_tensor)
def train_epoch(self):
"""Implements the logic of training_epoch:
-Loop over the batches of the training data and call the train step for each.
-Add any summaries you want using the summary
"""
raise NotImplemented
def | (self):
"""Implements the logic of the train step:
-Run the tensorflow session
-Returns:
Any of the metrics needs to be summarized.
"""
raise NotImplementedError
def validate_epoch(self):
"""Implements the logic of validation_epoch:
-Loop over the batches of the validation data and call the validate step for each.
-Add any summaries you want using the summary
"""
raise NotImplemented
def validate_step(self):
"""Implements the logic of the validate step:
-Run the tensorflow session
-Returns:
Any of the metrics needs to be summarized.
"""
raise NotImplemented
| train_step |
bindata.go | // Code generated by go-bindata.
// sources:
// config/mysql.yml
// config/storage.yml
// DO NOT EDIT!
package assets
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _configMysqlYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\xce\x41\xae\xc2\x30\x0c\x84\xe1\xbd\x4f\x31\x27\xa8\x92\xd7\x27\x84\x7c\x19\xe4\x26\x2e\x20\xa5\x38\x8a\x5d\xb8\x3e\xea\x86\x35\x1b\x76\xb3\x99\x5f\x5f\xb3\x22\x8d\x09\x90\x5a\x87\xba\x33\x72\x9a\xd2\x94\xa7\x9c\x13\xcf\x73\x3a\x11\xb0\xbb\x0e\x46\x68\xc4\xdf\x4c\x40\x17\xf7\x97\x8d\xca\x9f\x45\x40\x95\x90\x45\x5c\x19\xeb\xb0\xeb\x72\x8f\x4b\xd5\xa7\x36\xeb\x9b\x3e\xe2\x38\x0d\x0b\x2b\xd6\x18\x51\x3a\x01\xe5\x26\xc3\x35\x18\x7b\xac\xe7\x6d\xf9\x27\x0a\xf5\xf8\x09\xe4\x08\x7f\x23\x78\x07\x00\x00\xff\xff\x8c\x67\x33\x28\x0b\x01\x00\x00")
func | () ([]byte, error) {
return bindataRead(
_configMysqlYml,
"config/mysql.yml",
)
}
func configMysqlYml() (*asset, error) {
bytes, err := configMysqlYmlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/mysql.yml", size: 267, mode: os.FileMode(420), modTime: time.Unix(1508780974, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _configStorageYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\xcf\x41\xaa\x83\x40\x0c\xc6\xf1\xfd\x9c\x22\x17\x78\x2f\xa8\x3b\x2f\xe0\x09\xda\xad\xc4\x9a\xaa\xd4\x4c\x64\x92\xf6\xfc\x45\x29\xb4\x83\xa5\xe0\x6e\xf8\xf8\xfd\x03\xe3\x6c\x5e\x07\x00\x73\x4d\x34\xb0\xad\x6f\x80\x3f\x88\x24\x5c\x43\x3f\xd9\xad\xd8\x16\x80\x85\x7c\xac\xe1\x1f\x5d\x16\x7c\xe9\xb6\xa3\x99\xe2\x85\x53\xbb\x9e\xc1\xb7\xfe\xec\xcb\x43\x7d\xb9\xeb\xab\x43\x7d\x15\x42\xcf\x0f\x9e\x75\x11\x8e\xbf\x7e\x96\xb4\xd3\xec\x32\x4a\x74\xdc\x66\xbc\x26\x1d\xba\xc9\xb3\x20\x92\x15\x7b\xbf\xae\x5f\xf9\xa8\xc2\x39\x3f\x19\x27\x43\x67\xf7\xb2\xca\x68\x93\xbb\xb3\xce\x77\x61\xc3\x26\x3c\x03\x00\x00\xff\xff\xbc\x98\xce\xf9\x9c\x01\x00\x00")
func configStorageYmlBytes() ([]byte, error) {
return bindataRead(
_configStorageYml,
"config/storage.yml",
)
}
func configStorageYml() (*asset, error) {
bytes, err := configStorageYmlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "config/storage.yml", size: 412, mode: os.FileMode(420), modTime: time.Unix(1508792066, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"config/mysql.yml": configMysqlYml,
"config/storage.yml": configStorageYml,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"config": &bintree{nil, map[string]*bintree{
"mysql.yml": &bintree{configMysqlYml, map[string]*bintree{}},
"storage.yml": &bintree{configStorageYml, map[string]*bintree{}},
}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}
| configMysqlYmlBytes |
magefile.go | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
// or more contributor license agreements. Licensed under the Elastic License;
// you may not use this file except in compliance with the Elastic License.
//go:build mage
// +build mage
package main
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/magefile/mage/mg"
"github.com/magefile/mage/sh"
"github.com/pkg/errors"
devtools "github.com/elastic/beats/v7/dev-tools/mage"
// mage:import
_ "github.com/elastic/beats/v7/dev-tools/mage/target/common"
// mage:import
_ "github.com/elastic/beats/v7/dev-tools/mage/target/unittest"
// mage:import
_ "github.com/elastic/beats/v7/dev-tools/mage/target/integtest/notests"
// mage:import
_ "github.com/elastic/beats/v7/dev-tools/mage/target/test"
)
const (
hubID = "elastic"
logDriverName = "elastic-logging-plugin"
dockerPluginName = hubID + "/" + logDriverName
packageStagingDir = "build/package/"
packageEndDir = "build/distributions/"
rootImageName = "rootfsimage"
dockerfileTmpl = "Dockerfile.tmpl"
)
var (
buildDir = filepath.Join(packageStagingDir, logDriverName)
dockerExportPath = filepath.Join(packageStagingDir, "temproot.tar")
platformMap = map[string]map[string]interface{}{
"amd64": map[string]interface{}{
"from": "alpine:3.10",
},
"arm64": map[string]interface{}{
"from": "arm64v8/alpine:3.10",
},
}
)
func init() {
devtools.BeatLicense = "Elastic License"
devtools.BeatDescription = "The Docker Logging Driver is a docker plugin for the Elastic Stack."
devtools.Platforms = devtools.Platforms.Filter("linux/amd64 linux/arm64")
}
// getPluginName returns the fully qualified name:version string.
func getPluginName() (string, error) {
version, err := devtools.BeatQualifiedVersion()
if err != nil {
return "", errors.Wrap(err, "error getting beats version")
}
return dockerPluginName + ":" + version, nil
}
// createContainer builds the plugin and creates the container that will later become the rootfs used by the plugin
func createContainer(ctx context.Context, cli *client.Client, arch string) error {
dockerLogBeatDir, err := os.Getwd()
if err != nil {
return errors.Wrap(err, "error getting work dir")
}
if !strings.Contains(dockerLogBeatDir, "dockerlogbeat") {
return errors.Errorf("not in dockerlogbeat directory: %s", dockerLogBeatDir)
}
dockerfile := filepath.Join(packageStagingDir, "Dockerfile")
err = devtools.ExpandFile(dockerfileTmpl, dockerfile, platformMap[arch])
if err != nil {
return errors.Wrap(err, "error while expanding Dockerfile template")
}
// start to build the root container that'll be used to build the plugin
tmpDir, err := ioutil.TempDir("", "dockerBuildTar")
if err != nil {
return errors.Wrap(err, "error locating temp dir")
}
defer sh.Rm(tmpDir)
tarPath := filepath.Join(tmpDir, "tarRoot.tar")
err = sh.RunV("tar", "cf", tarPath, "./")
if err != nil {
return errors.Wrap(err, "error creating tar")
}
buildContext, err := os.Open(tarPath)
if err != nil {
return errors.Wrap(err, "error opening temp dur")
}
defer buildContext.Close()
buildOpts := types.ImageBuildOptions{
Tags: []string{rootImageName},
Dockerfile: dockerfile,
}
// build, wait for output
buildResp, err := cli.ImageBuild(ctx, buildContext, buildOpts)
if err != nil {
return errors.Wrap(err, "error building final container image")
}
defer buildResp.Body.Close()
// This blocks until the build operation completes
buildStr, errBufRead := ioutil.ReadAll(buildResp.Body)
if errBufRead != nil {
return errors.Wrap(err, "error reading from docker output")
}
fmt.Printf("%s\n", string(buildStr))
return nil
}
// BuildContainer builds docker rootfs container root
// There's a somewhat complicated process for this:
// * Create a container to build the plugin itself
// * Copy that to a bare-bones container that will become the runc container used by docker
// * Export that container
// * Unpack the tar from the exported container
// * send this to the plugin create API endpoint
func BuildContainer(ctx context.Context) error {
// setup
cli, err := newDockerClient(ctx)
if err != nil {
return errors.Wrap(err, "error creating docker client")
}
devtools.CreateDir(packageStagingDir)
devtools.CreateDir(packageEndDir)
err = os.MkdirAll(filepath.Join(buildDir, "rootfs"), 0755)
if err != nil {
return errors.Wrap(err, "error creating build dir")
}
for _, plat := range devtools.Platforms {
arch := plat.GOARCH()
if runtime.GOARCH != arch {
fmt.Println("Skippping building for", arch, "as runtime is different")
continue
}
err = createContainer(ctx, cli, arch)
if err != nil {
return errors.Wrap(err, "error creating base container")
}
// create the container that will become our rootfs
CreatedContainerBody, err := cli.ContainerCreate(ctx, &container.Config{Image: rootImageName}, nil, nil, nil, "")
if err != nil {
return errors.Wrap(err, "error creating container")
}
defer func() {
// cleanup
if _, noClean := os.LookupEnv("DOCKERLOGBEAT_NO_CLEANUP"); !noClean {
err = cleanDockerArtifacts(ctx, CreatedContainerBody.ID, cli)
if err != nil {
fmt.Fprintf(os.Stderr, "Error cleaning up docker: %s", err)
}
}
}()
fmt.Printf("Got image: %#v\n", CreatedContainerBody.ID)
file, err := os.Create(dockerExportPath)
if err != nil {
return errors.Wrap(err, "error creating tar archive")
}
// export the container to a tar file
exportReader, err := cli.ContainerExport(ctx, CreatedContainerBody.ID)
if err != nil {
return errors.Wrap(err, "error exporting container")
}
_, err = io.Copy(file, exportReader)
if err != nil {
return errors.Wrap(err, "error writing exported container")
}
// misc prepare operations
err = devtools.Copy("config.json", filepath.Join(buildDir, "config.json"))
if err != nil {
return errors.Wrap(err, "error copying config.json")
}
// unpack the tar file into a root directory, which is the format needed for the docker plugin create tool
err = sh.RunV("tar", "-xf", dockerExportPath, "-C", filepath.Join(buildDir, "rootfs"))
if err != nil {
return errors.Wrap(err, "error unpacking exported container")
}
}
return nil
}
func cleanDockerArtifacts(ctx context.Context, containerID string, cli *client.Client) error {
fmt.Printf("Removing container %s\n", containerID)
err := cli.ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true, Force: true})
if err != nil {
return errors.Wrap(err, "error removing container")
}
resp, err := cli.ImageRemove(ctx, rootImageName, types.ImageRemoveOptions{Force: true})
if err != nil {
return errors.Wrap(err, "error removing image")
}
fmt.Printf("Removed image: %#v\n", resp)
return nil
}
// Uninstall removes working objects and containers
func Uninstall(ctx context.Context) error {
cli, err := newDockerClient(ctx)
if err != nil {
return errors.Wrap(err, "error creating docker client")
}
// check to see if we have a plugin we need to remove
plugins, err := cli.PluginList(ctx, filters.Args{})
if err != nil {
return errors.Wrap(err, "error getting list of plugins")
}
toRemoveName := ""
for _, plugin := range plugins {
if strings.Contains(plugin.Name, logDriverName) {
toRemoveName = plugin.Name
break
}
}
if toRemoveName == "" {
return nil
}
err = cli.PluginDisable(ctx, toRemoveName, types.PluginDisableOptions{Force: true})
if err != nil {
return errors.Wrap(err, "error disabling plugin")
}
err = cli.PluginRemove(ctx, toRemoveName, types.PluginRemoveOptions{Force: true})
if err != nil {
return errors.Wrap(err, "error removing plugin")
}
return nil
}
// Install installs the plugin
func Install(ctx context.Context) error {
mg.Deps(Uninstall)
if _, err := os.Stat(filepath.Join(packageStagingDir, "rootfs")); os.IsNotExist(err) {
mg.Deps(Build)
}
name, err := getPluginName()
if err != nil {
return err
}
cli, err := newDockerClient(ctx)
if err != nil |
archive, err := tar(buildDir, "rootfs", "config.json")
if err != nil {
return errors.Wrap(err, "error creating archive of work dir")
}
err = cli.PluginCreate(ctx, archive, types.PluginCreateOptions{RepoName: name})
if err != nil {
return errors.Wrap(err, "error creating plugin")
}
err = cli.PluginEnable(ctx, name, types.PluginEnableOptions{})
if err != nil {
return errors.Wrap(err, "error enabling plugin")
}
return nil
}
func tar(dir string, files ...string) (io.Reader, error) {
var archive bytes.Buffer
var stdErr bytes.Buffer
args := append([]string{"-C", dir, "-cf", "-"}, files...)
_, err := sh.Exec(nil, &archive, &stdErr, "tar", args...)
if err != nil {
return nil, errors.Wrap(err, stdErr.String())
}
return &archive, nil
}
// Export exports a "ready" root filesystem and config.json into a tarball
func Export() error {
version, err := devtools.BeatQualifiedVersion()
if err != nil {
return errors.Wrap(err, "error getting beats version")
}
if devtools.Snapshot {
version = version + "-SNAPSHOT"
}
for _, plat := range devtools.Platforms {
arch := plat.GOARCH()
tarballName := fmt.Sprintf("%s-%s-%s-%s.tar.gz", logDriverName, version, "docker-plugin", arch)
outpath := filepath.Join("../..", packageEndDir, tarballName)
err = os.Chdir(packageStagingDir)
if err != nil {
return errors.Wrap(err, "error changing directory")
}
err = sh.RunV("tar", "zcf", outpath,
filepath.Join(logDriverName, "rootfs"),
filepath.Join(logDriverName, "config.json"))
if err != nil {
return errors.Wrap(err, "error creating release tarball")
}
return errors.Wrap(devtools.CreateSHA512File(outpath), "failed to create .sha512 file")
}
return nil
}
// CrossBuild cross-builds the beat for all target platforms.
func CrossBuild() error {
return devtools.CrossBuild()
}
// Build builds the base container used by the docker plugin
func Build() {
mg.SerialDeps(CrossBuild, BuildContainer)
}
// GolangCrossBuild build the Beat binary inside the golang-builder.
// Do not use directly, use crossBuild instead.
func GolangCrossBuild() error {
buildArgs := devtools.DefaultBuildArgs()
buildArgs.CGO = false
buildArgs.Static = true
buildArgs.OutputDir = "build/plugin"
return devtools.GolangCrossBuild(buildArgs)
}
// Package builds a "release" tarball that can be used later with `docker plugin create`
func Package() {
start := time.Now()
defer func() { fmt.Println("package ran for", time.Since(start)) }()
if !isSupportedPlatform() {
fmt.Println(">> package: skipping because no supported platform is enabled")
return
}
mg.SerialDeps(Build, Export)
}
func isSupportedPlatform() bool {
_, isAMD64Selected := devtools.Platforms.Get("linux/amd64")
_, isARM64Selected := devtools.Platforms.Get("linux/arm64")
arch := runtime.GOARCH
if arch == "amd64" && isARM64Selected {
devtools.Platforms = devtools.Platforms.Remove("linux/arm64")
} else if arch == "arm64" && isAMD64Selected {
devtools.Platforms = devtools.Platforms.Remove("linux/amd64")
}
return len(devtools.Platforms) > 0
}
// BuildAndInstall builds and installs the plugin
func BuildAndInstall() {
mg.SerialDeps(Build, Install)
}
// Update is currently a dummy test for the `testsuite` target
func Update() {
fmt.Println(">> update: There is no Update for The Elastic Log Plugin")
}
func newDockerClient(ctx context.Context) (*client.Client, error) {
cli, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return nil, err
}
cli.NegotiateAPIVersion(ctx)
return cli, nil
}
| {
return errors.Wrap(err, "error creating docker client")
} |
resize.min.js | "use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=void 0;var resizeTimeout,_xeUtils=_interopRequireDefault(require("xe-utils/methods/xe-utils")),_conf=_interopRequireDefault(require("../../conf")),_dom=_interopRequireDefault(require("../../tools/src/dom"));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function _classCallCheck(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function _defineProperties(e,t){for(var r=0;r<t.length;r++){var i=t[r];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(e,i.key,i)}}function _createClass(e,t,r){return t&&_defineProperties(e.prototype,t),r&&_defineProperties(e,r),e}var eventStore=[],defaultInterval=250,ResizeObserverPolyfill=function(){function | (e){_classCallCheck(this,t),this.tarList=[],this.callback=e}return _createClass(t,[{key:"observe",value:function(e){var t=this;e&&(this.tarList.includes(e)||this.tarList.push({target:e,width:e.clientWidth,heighe:e.clientHeight}),eventStore.length||eventListener(),eventStore.some(function(e){return e===t})||eventStore.push(this))}},{key:"unobserve",value:function(t){_xeUtils.default.remove(eventStore,function(e){return e.tarList.includes(t)})}},{key:"disconnect",value:function(){var t=this;_xeUtils.default.remove(eventStore,function(e){return e===t})}}]),t}(),Resize=_dom.default.browse.isDoc&&window.ResizeObserver||ResizeObserverPolyfill;function eventListener(){clearTimeout(resizeTimeout),resizeTimeout=setTimeout(eventHandle,_conf.default.resizeInterval||defaultInterval)}function eventHandle(){eventStore.length&&(eventStore.forEach(function(s){s.tarList.forEach(function(e){var t=e.target,r=e.width,i=e.heighe,n=t.clientWidth,o=t.clientHeight;(n&&r!==n||o&&i!==o)&&(e.width=n,e.heighe=o,requestAnimationFrame(s.callback))})}),eventListener())}var _default=Resize;exports.default=_default; | t |
check_attr.rs | //! This module implements some validity checks for attributes.
//! In particular it verifies that `#[inline]` and `#[repr]` attributes are
//! attached to items that actually support them and if there are
//! conflicts between multiple such attributes attached to the same
//! item.
use rustc_middle::hir::map::Map;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::TyCtxt;
use rustc_ast::{ast, AttrStyle, Attribute, Lit, LitKind, NestedMetaItem};
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{pluralize, struct_span_err, Applicability};
use rustc_feature::{AttributeDuplicates, AttributeType, BuiltinAttribute, BUILTIN_ATTRIBUTE_MAP};
use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc_hir::{self, FnSig, ForeignItem, HirId, Item, ItemKind, TraitItem, CRATE_HIR_ID};
use rustc_hir::{MethodKind, Target};
use rustc_session::lint::builtin::{
CONFLICTING_REPR_HINTS, INVALID_DOC_ATTRIBUTES, UNUSED_ATTRIBUTES,
};
use rustc_session::parse::feature_err;
use rustc_span::symbol::{sym, Symbol};
use rustc_span::{MultiSpan, Span, DUMMY_SP};
use std::collections::hash_map::Entry;
pub(crate) fn target_from_impl_item<'tcx>(
tcx: TyCtxt<'tcx>,
impl_item: &hir::ImplItem<'_>,
) -> Target {
match impl_item.kind {
hir::ImplItemKind::Const(..) => Target::AssocConst,
hir::ImplItemKind::Fn(..) => {
let parent_hir_id = tcx.hir().get_parent_item(impl_item.hir_id());
let containing_item = tcx.hir().expect_item(parent_hir_id);
let containing_impl_is_for_trait = match &containing_item.kind {
hir::ItemKind::Impl(impl_) => impl_.of_trait.is_some(),
_ => bug!("parent of an ImplItem must be an Impl"),
};
if containing_impl_is_for_trait {
Target::Method(MethodKind::Trait { body: true })
} else {
Target::Method(MethodKind::Inherent)
}
}
hir::ImplItemKind::TyAlias(..) => Target::AssocTy,
}
}
#[derive(Clone, Copy)]
enum ItemLike<'tcx> {
Item(&'tcx Item<'tcx>),
ForeignItem(&'tcx ForeignItem<'tcx>),
}
struct CheckAttrVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
}
impl CheckAttrVisitor<'tcx> {
/// Checks any attribute.
fn check_attributes(
&self,
hir_id: HirId,
span: &Span,
target: Target,
item: Option<ItemLike<'_>>,
) {
let mut doc_aliases = FxHashMap::default();
let mut is_valid = true;
let mut specified_inline = None;
let mut seen = FxHashMap::default();
let attrs = self.tcx.hir().attrs(hir_id);
for attr in attrs {
let attr_is_valid = match attr.name_or_empty() {
sym::inline => self.check_inline(hir_id, attr, span, target),
sym::non_exhaustive => self.check_non_exhaustive(hir_id, attr, span, target),
sym::marker => self.check_marker(hir_id, attr, span, target),
sym::target_feature => self.check_target_feature(hir_id, attr, span, target),
sym::track_caller => {
self.check_track_caller(hir_id, &attr.span, attrs, span, target)
}
sym::doc => self.check_doc_attrs(
attr,
hir_id,
target,
&mut specified_inline,
&mut doc_aliases,
),
sym::no_link => self.check_no_link(hir_id, &attr, span, target),
sym::export_name => self.check_export_name(hir_id, &attr, span, target),
sym::rustc_layout_scalar_valid_range_start
| sym::rustc_layout_scalar_valid_range_end => {
self.check_rustc_layout_scalar_valid_range(&attr, span, target)
}
sym::allow_internal_unstable => {
self.check_allow_internal_unstable(hir_id, &attr, span, target, &attrs)
}
sym::rustc_allow_const_fn_unstable => {
self.check_rustc_allow_const_fn_unstable(hir_id, &attr, span, target)
}
sym::naked => self.check_naked(hir_id, attr, span, target),
sym::rustc_legacy_const_generics => {
self.check_rustc_legacy_const_generics(&attr, span, target, item)
}
sym::rustc_clean
| sym::rustc_dirty
| sym::rustc_if_this_changed
| sym::rustc_then_this_would_need => self.check_rustc_dirty_clean(&attr),
sym::cmse_nonsecure_entry => self.check_cmse_nonsecure_entry(attr, span, target),
sym::default_method_body_is_const => {
self.check_default_method_body_is_const(attr, span, target)
}
sym::must_not_suspend => self.check_must_not_suspend(&attr, span, target),
sym::must_use => self.check_must_use(hir_id, &attr, span, target),
sym::rustc_const_unstable
| sym::rustc_const_stable
| sym::unstable
| sym::stable
| sym::rustc_promotable => self.check_stability_promotable(&attr, span, target),
_ => true,
};
is_valid &= attr_is_valid;
// lint-only checks
match attr.name_or_empty() {
sym::cold => self.check_cold(hir_id, attr, span, target),
sym::link_name => self.check_link_name(hir_id, attr, span, target),
sym::link_section => self.check_link_section(hir_id, attr, span, target),
sym::no_mangle => self.check_no_mangle(hir_id, attr, span, target),
sym::deprecated | sym::rustc_deprecated => {
self.check_deprecated(hir_id, attr, span, target)
}
sym::macro_use | sym::macro_escape => self.check_macro_use(hir_id, attr, target),
sym::path => self.check_generic_attr(hir_id, attr, target, &[Target::Mod]),
sym::cfg_attr => self.check_cfg_attr(hir_id, attr),
sym::plugin_registrar => self.check_plugin_registrar(hir_id, attr, target),
sym::macro_export => self.check_macro_export(hir_id, attr, target),
sym::ignore | sym::should_panic | sym::proc_macro_derive => {
self.check_generic_attr(hir_id, attr, target, &[Target::Fn])
}
sym::automatically_derived => {
self.check_generic_attr(hir_id, attr, target, &[Target::Impl])
}
sym::no_implicit_prelude => {
self.check_generic_attr(hir_id, attr, target, &[Target::Mod])
}
_ => {}
}
let builtin = attr.ident().and_then(|ident| BUILTIN_ATTRIBUTE_MAP.get(&ident.name));
if hir_id != CRATE_HIR_ID {
if let Some(BuiltinAttribute { type_: AttributeType::CrateLevel, .. }) =
attr.ident().and_then(|ident| BUILTIN_ATTRIBUTE_MAP.get(&ident.name))
{
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
let msg = match attr.style {
ast::AttrStyle::Outer => {
"crate-level attribute should be an inner attribute: add an exclamation \
mark: `#![foo]`"
}
ast::AttrStyle::Inner => "crate-level attribute should be in the root module",
};
lint.build(msg).emit()
});
}
}
if let Some(BuiltinAttribute { duplicates, .. }) = builtin {
check_duplicates(self.tcx, attr, hir_id, *duplicates, &mut seen);
}
// Warn on useless empty attributes.
if matches!(
attr.name_or_empty(),
sym::macro_use
| sym::allow
| sym::warn
| sym::deny
| sym::forbid
| sym::feature
| sym::repr
| sym::target_feature
) && attr.meta_item_list().map_or(false, |list| list.is_empty())
{
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build("unused attribute")
.span_suggestion(
attr.span,
"remove this attribute",
String::new(),
Applicability::MachineApplicable,
)
.note(&format!(
"attribute `{}` with an empty list has no effect",
attr.name_or_empty()
))
.emit();
});
}
}
if !is_valid {
return;
}
if matches!(target, Target::Closure | Target::Fn | Target::Method(_) | Target::ForeignFn) {
self.tcx.ensure().codegen_fn_attrs(self.tcx.hir().local_def_id(hir_id));
}
self.check_repr(attrs, span, target, item, hir_id);
self.check_used(attrs, target);
}
fn inline_attr_str_error_with_macro_def(&self, hir_id: HirId, attr: &Attribute, sym: &str) {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build(&format!(
"`#[{}]` is ignored on struct fields, match arms and macro defs",
sym,
))
.warn(
"this was previously accepted by the compiler but is \
being phased out; it will become a hard error in \
a future release!",
)
.note(
"see issue #80564 <https://github.com/rust-lang/rust/issues/80564> \
for more information",
)
.emit();
});
}
fn inline_attr_str_error_without_macro_def(&self, hir_id: HirId, attr: &Attribute, sym: &str) {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build(&format!("`#[{}]` is ignored on struct fields and match arms", sym))
.warn(
"this was previously accepted by the compiler but is \
being phased out; it will become a hard error in \
a future release!",
)
.note(
"see issue #80564 <https://github.com/rust-lang/rust/issues/80564> \
for more information",
)
.emit();
});
}
/// Checks if an `#[inline]` is applied to a function or a closure. Returns `true` if valid.
fn check_inline(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) -> bool {
match target {
Target::Fn
| Target::Closure
| Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
Target::Method(MethodKind::Trait { body: false }) | Target::ForeignFn => {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build("`#[inline]` is ignored on function prototypes").emit()
});
true
}
// FIXME(#65833): We permit associated consts to have an `#[inline]` attribute with
// just a lint, because we previously erroneously allowed it and some crates used it
// accidentally, to to be compatible with crates depending on them, we can't throw an
// error here.
Target::AssocConst => {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build("`#[inline]` is ignored on constants")
.warn(
"this was previously accepted by the compiler but is \
being phased out; it will become a hard error in \
a future release!",
)
.note(
"see issue #65833 <https://github.com/rust-lang/rust/issues/65833> \
for more information",
)
.emit();
});
true
}
// FIXME(#80564): Same for fields, arms, and macro defs
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "inline");
true
}
_ => {
struct_span_err!(
self.tcx.sess,
attr.span,
E0518,
"attribute should be applied to function or closure",
)
.span_label(*span, "not a function or closure")
.emit();
false
}
}
}
fn check_generic_attr(
&self,
hir_id: HirId,
attr: &Attribute,
target: Target,
allowed_targets: &[Target],
) {
if !allowed_targets.iter().any(|t| t == &target) {
let name = attr.name_or_empty();
let mut i = allowed_targets.iter();
// Pluralize
let b = i.next().map_or_else(String::new, |t| t.to_string() + "s");
let supported_names = i.enumerate().fold(b, |mut b, (i, allowed_target)| {
if allowed_targets.len() > 2 && i == allowed_targets.len() - 2 {
b.push_str(", and ");
} else if allowed_targets.len() == 2 && i == allowed_targets.len() - 2 {
b.push_str(" and ");
} else {
b.push_str(", ");
}
// Pluralize
b.push_str(&(allowed_target.to_string() + "s"));
b
});
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build(&format!("`#[{name}]` only has an effect on {}", supported_names))
.emit();
});
}
}
/// Checks if `#[naked]` is applied to a function definition.
fn check_naked(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) -> bool {
match target {
Target::Fn
| Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[allow_internal_unstable]` attribute with just a lint, because we previously
// erroneously allowed it and some crates used it accidentally, to to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "naked");
true
}
_ => {
self.tcx
.sess
.struct_span_err(
attr.span,
"attribute should be applied to a function definition",
)
.span_label(*span, "not a function definition")
.emit();
false
}
}
}
/// Checks if `#[cmse_nonsecure_entry]` is applied to a function definition.
fn check_cmse_nonsecure_entry(&self, attr: &Attribute, span: &Span, target: Target) -> bool {
match target {
Target::Fn
| Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
_ => {
self.tcx
.sess
.struct_span_err(
attr.span,
"attribute should be applied to a function definition",
)
.span_label(*span, "not a function definition")
.emit();
false
}
}
}
/// Checks if a `#[track_caller]` is applied to a non-naked function. Returns `true` if valid.
fn check_track_caller(
&self,
hir_id: HirId,
attr_span: &Span,
attrs: &'hir [Attribute],
span: &Span,
target: Target,
) -> bool {
match target {
_ if attrs.iter().any(|attr| attr.has_name(sym::naked)) => {
struct_span_err!(
self.tcx.sess,
*attr_span,
E0736,
"cannot use `#[track_caller]` with `#[naked]`",
)
.emit();
false
}
Target::Fn | Target::Method(..) | Target::ForeignFn | Target::Closure => true,
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[track_caller]` attribute with just a lint, because we previously
// erroneously allowed it and some crates used it accidentally, to to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
for attr in attrs {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "track_caller");
}
true
}
_ => {
struct_span_err!(
self.tcx.sess,
*attr_span,
E0739,
"attribute should be applied to function"
)
.span_label(*span, "not a function")
.emit();
false
}
}
}
/// Checks if the `#[non_exhaustive]` attribute on an `item` is valid. Returns `true` if valid.
fn check_non_exhaustive(
&self,
hir_id: HirId,
attr: &Attribute,
span: &Span,
target: Target,
) -> bool {
match target {
Target::Struct | Target::Enum | Target::Variant => true,
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[non_exhaustive]` attribute with just a lint, because we previously
// erroneously allowed it and some crates used it accidentally, to to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "non_exhaustive");
true
}
_ => {
struct_span_err!(
self.tcx.sess,
attr.span,
E0701,
"attribute can only be applied to a struct or enum"
)
.span_label(*span, "not a struct or enum")
.emit();
false
}
}
}
/// Checks if the `#[marker]` attribute on an `item` is valid. Returns `true` if valid.
fn check_marker(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) -> bool {
match target {
Target::Trait => true,
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[marker]` attribute with just a lint, because we previously
// erroneously allowed it and some crates used it accidentally, to to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "marker");
true
}
_ => {
self.tcx
.sess
.struct_span_err(attr.span, "attribute can only be applied to a trait")
.span_label(*span, "not a trait")
.emit();
false
}
}
}
/// Checks if the `#[target_feature]` attribute on `item` is valid. Returns `true` if valid.
fn check_target_feature(
&self,
hir_id: HirId,
attr: &Attribute,
span: &Span,
target: Target,
) -> bool {
match target {
Target::Fn
| Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
// FIXME: #[target_feature] was previously erroneously allowed on statements and some
// crates used this, so only emit a warning.
Target::Statement => {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build("attribute should be applied to a function")
.warn(
"this was previously accepted by the compiler but is \
being phased out; it will become a hard error in \
a future release!",
)
.span_label(*span, "not a function")
.emit();
});
true
}
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[target_feature]` attribute with just a lint, because we previously
// erroneously allowed it and some crates used it accidentally, to to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "target_feature");
true
}
_ => {
self.tcx
.sess
.struct_span_err(attr.span, "attribute should be applied to a function")
.span_label(*span, "not a function")
.emit();
false
}
}
}
fn doc_attr_str_error(&self, meta: &NestedMetaItem, attr_name: &str) {
self.tcx
.sess
.struct_span_err(
meta.span(),
&format!("doc {0} attribute expects a string: #[doc({0} = \"a\")]", attr_name),
)
.emit();
}
fn check_doc_alias_value(
&self,
meta: &NestedMetaItem,
doc_alias: &str,
hir_id: HirId,
target: Target,
is_list: bool,
aliases: &mut FxHashMap<String, Span>,
) -> bool {
let tcx = self.tcx;
let err_fn = move |span: Span, msg: &str| {
tcx.sess.span_err(
span,
&format!(
"`#[doc(alias{})]` {}",
if is_list { "(\"...\")" } else { " = \"...\"" },
msg,
),
);
false
};
if doc_alias.is_empty() {
return err_fn(
meta.name_value_literal_span().unwrap_or_else(|| meta.span()),
"attribute cannot have empty value",
);
}
if let Some(c) =
doc_alias.chars().find(|&c| c == '"' || c == '\'' || (c.is_whitespace() && c != ' '))
{
self.tcx.sess.span_err(
meta.name_value_literal_span().unwrap_or_else(|| meta.span()),
&format!(
"{:?} character isn't allowed in `#[doc(alias{})]`",
c,
if is_list { "(\"...\")" } else { " = \"...\"" },
),
);
return false;
}
if doc_alias.starts_with(' ') || doc_alias.ends_with(' ') {
return err_fn(
meta.name_value_literal_span().unwrap_or_else(|| meta.span()),
"cannot start or end with ' '",
);
}
if let Some(err) = match target {
Target::Impl => Some("implementation block"),
Target::ForeignMod => Some("extern block"),
Target::AssocTy => {
let parent_hir_id = self.tcx.hir().get_parent_item(hir_id);
let containing_item = self.tcx.hir().expect_item(parent_hir_id);
if Target::from_item(containing_item) == Target::Impl {
Some("type alias in implementation block")
} else {
None
}
}
Target::AssocConst => {
let parent_hir_id = self.tcx.hir().get_parent_item(hir_id);
let containing_item = self.tcx.hir().expect_item(parent_hir_id);
// We can't link to trait impl's consts.
let err = "associated constant in trait implementation block";
match containing_item.kind {
ItemKind::Impl(hir::Impl { of_trait: Some(_), .. }) => Some(err),
_ => None,
}
}
// we check the validity of params elsewhere
Target::Param => return false,
_ => None,
} {
return err_fn(meta.span(), &format!("isn't allowed on {}", err));
}
let item_name = self.tcx.hir().name(hir_id);
if &*item_name.as_str() == doc_alias {
return err_fn(meta.span(), "is the same as the item's name");
}
let span = meta.span();
if let Err(entry) = aliases.try_insert(doc_alias.to_owned(), span) {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, span, |lint| {
lint.build("doc alias is duplicated")
.span_label(*entry.entry.get(), "first defined here")
.emit();
});
}
true
}
fn check_doc_alias(
&self,
meta: &NestedMetaItem,
hir_id: HirId,
target: Target,
aliases: &mut FxHashMap<String, Span>,
) -> bool {
if let Some(values) = meta.meta_item_list() {
let mut errors = 0;
for v in values {
match v.literal() {
Some(l) => match l.kind {
LitKind::Str(s, _) => {
if !self.check_doc_alias_value(
v,
&s.as_str(),
hir_id,
target,
true,
aliases,
) {
errors += 1;
}
}
_ => {
self.tcx
.sess
.struct_span_err(
v.span(),
"`#[doc(alias(\"a\"))]` expects string literals",
)
.emit();
errors += 1;
}
},
None => {
self.tcx
.sess
.struct_span_err(
v.span(),
"`#[doc(alias(\"a\"))]` expects string literals",
)
.emit();
errors += 1;
}
}
}
errors == 0
} else if let Some(doc_alias) = meta.value_str().map(|s| s.to_string()) {
self.check_doc_alias_value(meta, &doc_alias, hir_id, target, false, aliases)
} else {
self.tcx
.sess
.struct_span_err(
meta.span(),
"doc alias attribute expects a string `#[doc(alias = \"a\")]` or a list of \
strings `#[doc(alias(\"a\", \"b\"))]`",
)
.emit();
false
}
}
fn check_doc_keyword(&self, meta: &NestedMetaItem, hir_id: HirId) -> bool {
let doc_keyword = meta.value_str().map(|s| s.to_string()).unwrap_or_else(String::new);
if doc_keyword.is_empty() {
self.doc_attr_str_error(meta, "keyword");
return false;
}
match self.tcx.hir().find(hir_id).and_then(|node| match node {
hir::Node::Item(item) => Some(&item.kind),
_ => None,
}) {
Some(ItemKind::Mod(ref module)) => {
if !module.item_ids.is_empty() {
self.tcx
.sess
.struct_span_err(
meta.span(),
"`#[doc(keyword = \"...\")]` can only be used on empty modules",
)
.emit();
return false;
}
}
_ => {
self.tcx
.sess
.struct_span_err(
meta.span(),
"`#[doc(keyword = \"...\")]` can only be used on modules",
)
.emit();
return false;
}
}
if !rustc_lexer::is_ident(&doc_keyword) {
self.tcx
.sess
.struct_span_err(
meta.name_value_literal_span().unwrap_or_else(|| meta.span()),
&format!("`{}` is not a valid identifier", doc_keyword),
)
.emit();
return false;
}
true
}
/// Checks `#[doc(inline)]`/`#[doc(no_inline)]` attributes. Returns `true` if valid.
///
/// A doc inlining attribute is invalid if it is applied to a non-`use` item, or
/// if there are conflicting attributes for one item.
///
/// `specified_inline` is used to keep track of whether we have
/// already seen an inlining attribute for this item.
/// If so, `specified_inline` holds the value and the span of
/// the first `inline`/`no_inline` attribute.
fn check_doc_inline(
&self,
attr: &Attribute,
meta: &NestedMetaItem,
hir_id: HirId,
target: Target,
specified_inline: &mut Option<(bool, Span)>,
) -> bool {
if target == Target::Use || target == Target::ExternCrate {
let do_inline = meta.name_or_empty() == sym::inline;
if let Some((prev_inline, prev_span)) = *specified_inline {
if do_inline != prev_inline {
let mut spans = MultiSpan::from_spans(vec![prev_span, meta.span()]);
spans.push_span_label(prev_span, String::from("this attribute..."));
spans.push_span_label(
meta.span(),
String::from("...conflicts with this attribute"),
);
self.tcx
.sess
.struct_span_err(spans, "conflicting doc inlining attributes")
.help("remove one of the conflicting attributes")
.emit();
return false;
}
true
} else {
*specified_inline = Some((do_inline, meta.span()));
true
}
} else {
self.tcx.struct_span_lint_hir(
INVALID_DOC_ATTRIBUTES,
hir_id,
meta.span(),
|lint| {
let mut err = lint.build(
"this attribute can only be applied to a `use` item",
);
err.span_label(meta.span(), "only applicable on `use` items");
if attr.style == AttrStyle::Outer {
err.span_label(
self.tcx.hir().span(hir_id),
"not a `use` item",
);
}
err.note("read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#inline-and-no_inline for more information")
.emit();
},
);
false
}
}
/// Checks that an attribute is *not* used at the crate level. Returns `true` if valid.
fn check_attr_not_crate_level(
&self,
meta: &NestedMetaItem,
hir_id: HirId,
attr_name: &str,
) -> bool {
if CRATE_HIR_ID == hir_id {
self.tcx
.sess
.struct_span_err(
meta.span(),
&format!(
"`#![doc({} = \"...\")]` isn't allowed as a crate-level attribute",
attr_name,
),
)
.emit();
return false;
}
true
}
/// Checks that an attribute is used at the crate level. Returns `true` if valid.
fn check_attr_crate_level(
&self,
attr: &Attribute,
meta: &NestedMetaItem,
hir_id: HirId,
) -> bool {
if hir_id != CRATE_HIR_ID {
self.tcx.struct_span_lint_hir(
INVALID_DOC_ATTRIBUTES,
hir_id,
meta.span(),
|lint| {
let mut err = lint.build(
"this attribute can only be applied at the crate level",
);
if attr.style == AttrStyle::Outer && self.tcx.hir().get_parent_item(hir_id) == CRATE_HIR_ID {
if let Ok(mut src) =
self.tcx.sess.source_map().span_to_snippet(attr.span)
{
src.insert(1, '!');
err.span_suggestion_verbose(
attr.span,
"to apply to the crate, use an inner attribute",
src,
Applicability::MaybeIncorrect,
);
} else {
err.span_help(
attr.span,
"to apply to the crate, use an inner attribute",
);
}
}
err.note("read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#at-the-crate-level for more information")
.emit();
},
);
return false;
}
true
}
/// Checks that `doc(test(...))` attribute contains only valid attributes. Returns `true` if
/// valid.
fn check_test_attr(&self, meta: &NestedMetaItem, hir_id: HirId) -> bool {
let mut is_valid = true;
if let Some(metas) = meta.meta_item_list() {
for i_meta in metas {
match i_meta.name_or_empty() {
sym::attr | sym::no_crate_inject => {}
_ => {
self.tcx.struct_span_lint_hir(
INVALID_DOC_ATTRIBUTES,
hir_id,
i_meta.span(),
|lint| {
lint.build(&format!(
"unknown `doc(test)` attribute `{}`",
rustc_ast_pretty::pprust::path_to_string(
&i_meta.meta_item().unwrap().path
),
))
.emit();
},
);
is_valid = false;
}
}
}
} else {
self.tcx.struct_span_lint_hir(INVALID_DOC_ATTRIBUTES, hir_id, meta.span(), |lint| {
lint.build("`#[doc(test(...)]` takes a list of attributes").emit();
});
is_valid = false;
}
is_valid
}
/// Runs various checks on `#[doc]` attributes. Returns `true` if valid.
///
/// `specified_inline` should be initialized to `None` and kept for the scope
/// of one item. Read the documentation of [`check_doc_inline`] for more information.
///
/// [`check_doc_inline`]: Self::check_doc_inline
fn check_doc_attrs(
&self,
attr: &Attribute,
hir_id: HirId,
target: Target,
specified_inline: &mut Option<(bool, Span)>,
aliases: &mut FxHashMap<String, Span>,
) -> bool {
let mut is_valid = true;
if let Some(list) = attr.meta().and_then(|mi| mi.meta_item_list().map(|l| l.to_vec())) {
for meta in &list {
if let Some(i_meta) = meta.meta_item() {
match i_meta.name_or_empty() {
sym::alias
if !self.check_attr_not_crate_level(&meta, hir_id, "alias")
|| !self.check_doc_alias(&meta, hir_id, target, aliases) =>
{
is_valid = false
}
sym::keyword
if !self.check_attr_not_crate_level(&meta, hir_id, "keyword")
|| !self.check_doc_keyword(&meta, hir_id) =>
{
is_valid = false
}
sym::html_favicon_url
| sym::html_logo_url
| sym::html_playground_url
| sym::issue_tracker_base_url
| sym::html_root_url
| sym::html_no_source
| sym::test
if !self.check_attr_crate_level(&attr, &meta, hir_id) =>
{
is_valid = false;
}
sym::inline | sym::no_inline
if !self.check_doc_inline(
&attr,
&meta,
hir_id,
target,
specified_inline,
) =>
{
is_valid = false;
}
// no_default_passes: deprecated
// passes: deprecated
// plugins: removed, but rustdoc warns about it itself
sym::alias
| sym::cfg
| sym::cfg_hide
| sym::hidden
| sym::html_favicon_url
| sym::html_logo_url
| sym::html_no_source
| sym::html_playground_url
| sym::html_root_url
| sym::inline
| sym::issue_tracker_base_url
| sym::keyword
| sym::masked
| sym::no_default_passes
| sym::no_inline
| sym::notable_trait
| sym::passes
| sym::plugins => {}
sym::test => {
if !self.check_test_attr(&meta, hir_id) {
is_valid = false;
}
}
sym::primitive => {
if !self.tcx.features().rustdoc_internals {
self.tcx.struct_span_lint_hir(
INVALID_DOC_ATTRIBUTES,
hir_id,
i_meta.span,
|lint| {
let mut diag = lint.build(
"`doc(primitive)` should never have been stable",
);
diag.emit();
},
);
}
}
_ => {
self.tcx.struct_span_lint_hir(
INVALID_DOC_ATTRIBUTES,
hir_id,
i_meta.span,
|lint| {
let mut diag = lint.build(&format!(
"unknown `doc` attribute `{}`",
rustc_ast_pretty::pprust::path_to_string(&i_meta.path),
));
if i_meta.has_name(sym::spotlight) {
diag.note(
"`doc(spotlight)` was renamed to `doc(notable_trait)`",
);
diag.span_suggestion_short(
i_meta.span,
"use `notable_trait` instead",
String::from("notable_trait"),
Applicability::MachineApplicable,
);
diag.note("`doc(spotlight)` is now a no-op");
}
if i_meta.has_name(sym::include) {
if let Some(value) = i_meta.value_str() {
// if there are multiple attributes, the suggestion would suggest deleting all of them, which is incorrect
let applicability = if list.len() == 1 {
Applicability::MachineApplicable
} else {
Applicability::MaybeIncorrect
};
let inner = if attr.style == AttrStyle::Inner {
"!"
} else {
""
};
diag.span_suggestion(
attr.meta().unwrap().span,
"use `doc = include_str!` instead",
format!(
"#{}[doc = include_str!(\"{}\")]",
inner, value
),
applicability,
);
}
}
diag.emit();
},
);
is_valid = false;
}
}
} else {
self.tcx.struct_span_lint_hir(
INVALID_DOC_ATTRIBUTES,
hir_id,
meta.span(),
|lint| {
lint.build(&"invalid `doc` attribute").emit();
},
);
is_valid = false;
}
}
}
is_valid
}
/// Warns against some misuses of `#[must_use]`
fn check_must_use(
&self,
hir_id: HirId,
attr: &Attribute,
span: &Span,
_target: Target,
) -> bool {
let node = self.tcx.hir().get(hir_id);
if let Some(fn_node) = node.fn_kind() {
if let rustc_hir::IsAsync::Async = fn_node.asyncness() {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build(
"`must_use` attribute on `async` functions \
applies to the anonymous `Future` returned by the \
function, not the value within",
)
.span_label(
*span,
"this attribute does nothing, the `Future`s \
returned by async functions are already `must_use`",
)
.emit();
});
}
}
// For now, its always valid
true
}
/// Checks if `#[must_not_suspend]` is applied to a function. Returns `true` if valid.
fn check_must_not_suspend(&self, attr: &Attribute, span: &Span, target: Target) -> bool {
match target {
Target::Struct | Target::Enum | Target::Union | Target::Trait => true,
_ => {
self.tcx
.sess
.struct_span_err(attr.span, "`must_not_suspend` attribute should be applied to a struct, enum, or trait")
.span_label(*span, "is not a struct, enum, or trait")
.emit();
false
}
}
}
/// Checks if `#[cold]` is applied to a non-function. Returns `true` if valid.
fn check_cold(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) {
match target {
Target::Fn | Target::Method(..) | Target::ForeignFn | Target::Closure => {}
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[cold]` attribute with just a lint, because we previously
// erroneously allowed it and some crates used it accidentally, to to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "cold");
}
_ => {
// FIXME: #[cold] was previously allowed on non-functions and some crates used
// this, so only emit a warning.
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build("attribute should be applied to a function")
.warn(
"this was previously accepted by the compiler but is \
being phased out; it will become a hard error in \
a future release!",
)
.span_label(*span, "not a function")
.emit();
});
}
}
}
/// Checks if `#[link_name]` is applied to an item other than a foreign function or static.
fn check_link_name(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) {
match target {
Target::ForeignFn | Target::ForeignStatic => {}
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[link_name]` attribute with just a lint, because we previously
// erroneously allowed it and some crates used it accidentally, to to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "link_name");
}
_ => {
// FIXME: #[cold] was previously allowed on non-functions/statics and some crates
// used this, so only emit a warning.
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
let mut diag =
lint.build("attribute should be applied to a foreign function or static");
diag.warn(
"this was previously accepted by the compiler but is \
being phased out; it will become a hard error in \
a future release!",
);
// See issue #47725
if let Target::ForeignMod = target {
if let Some(value) = attr.value_str() {
diag.span_help(
attr.span,
&format!(r#"try `#[link(name = "{}")]` instead"#, value),
);
} else {
diag.span_help(attr.span, r#"try `#[link(name = "...")]` instead"#);
}
}
diag.span_label(*span, "not a foreign function or static");
diag.emit();
});
}
}
}
/// Checks if `#[no_link]` is applied to an `extern crate`. Returns `true` if valid.
fn check_no_link(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) -> bool {
match target {
Target::ExternCrate => true,
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[no_link]` attribute with just a lint, because we previously
// erroneously allowed it and some crates used it accidentally, to to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "no_link");
true
}
_ => {
self.tcx
.sess
.struct_span_err(
attr.span,
"attribute should be applied to an `extern crate` item",
)
.span_label(*span, "not an `extern crate` item")
.emit();
false
}
}
}
fn is_impl_item(&self, hir_id: HirId) -> bool {
matches!(self.tcx.hir().get(hir_id), hir::Node::ImplItem(..))
}
/// Checks if `#[export_name]` is applied to a function or static. Returns `true` if valid.
fn check_export_name(
&self,
hir_id: HirId,
attr: &Attribute,
span: &Span,
target: Target,
) -> bool {
match target {
Target::Static | Target::Fn => true,
Target::Method(..) if self.is_impl_item(hir_id) => true,
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[export_name]` attribute with just a lint, because we previously
// erroneously allowed it and some crates used it accidentally, to to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "export_name");
true
}
_ => {
self.tcx
.sess
.struct_span_err(
attr.span,
"attribute should be applied to a free function, impl method or static",
)
.span_label(*span, "not a free function, impl method or static")
.emit();
false
}
}
}
fn check_rustc_layout_scalar_valid_range(
&self,
attr: &Attribute,
span: &Span,
target: Target,
) -> bool {
if target != Target::Struct {
self.tcx
.sess
.struct_span_err(attr.span, "attribute should be applied to a struct")
.span_label(*span, "not a struct")
.emit();
return false;
}
let list = match attr.meta_item_list() {
None => return false,
Some(it) => it,
};
if matches!(&list[..], &[NestedMetaItem::Literal(Lit { kind: LitKind::Int(..), .. })]) {
true
} else {
self.tcx
.sess
.struct_span_err(attr.span, "expected exactly one integer literal argument")
.emit();
false
}
}
/// Checks if `#[rustc_legacy_const_generics]` is applied to a function and has a valid argument.
fn check_rustc_legacy_const_generics(
&self,
attr: &Attribute,
span: &Span,
target: Target,
item: Option<ItemLike<'_>>,
) -> bool {
let is_function = matches!(target, Target::Fn | Target::Method(..));
if !is_function {
self.tcx
.sess
.struct_span_err(attr.span, "attribute should be applied to a function")
.span_label(*span, "not a function")
.emit();
return false;
}
let list = match attr.meta_item_list() {
// The attribute form is validated on AST.
None => return false,
Some(it) => it,
};
let (decl, generics) = match item {
Some(ItemLike::Item(Item {
kind: ItemKind::Fn(FnSig { decl, .. }, generics, _),
..
})) => (decl, generics),
_ => bug!("should be a function item"),
};
for param in generics.params {
match param.kind {
hir::GenericParamKind::Const { .. } => {}
_ => {
self.tcx
.sess
.struct_span_err(
attr.span,
"#[rustc_legacy_const_generics] functions must \
only have const generics",
)
.span_label(param.span, "non-const generic parameter")
.emit();
return false;
}
}
}
if list.len() != generics.params.len() {
self.tcx
.sess
.struct_span_err(
attr.span,
"#[rustc_legacy_const_generics] must have one index for each generic parameter",
)
.span_label(generics.span, "generic parameters")
.emit();
return false;
}
let arg_count = decl.inputs.len() as u128 + generics.params.len() as u128;
let mut invalid_args = vec![];
for meta in list {
if let Some(LitKind::Int(val, _)) = meta.literal().map(|lit| &lit.kind) {
if *val >= arg_count {
let span = meta.span();
self.tcx
.sess
.struct_span_err(span, "index exceeds number of arguments")
.span_label(
span,
format!(
"there {} only {} argument{}",
if arg_count != 1 { "are" } else { "is" },
arg_count,
pluralize!(arg_count)
),
)
.emit();
return false;
}
} else {
invalid_args.push(meta.span());
}
}
if !invalid_args.is_empty() {
self.tcx
.sess
.struct_span_err(invalid_args, "arguments should be non-negative integers")
.emit();
false
} else {
true
}
}
/// Checks that the dep-graph debugging attributes are only present when the query-dep-graph
/// option is passed to the compiler.
fn check_rustc_dirty_clean(&self, attr: &Attribute) -> bool {
if self.tcx.sess.opts.debugging_opts.query_dep_graph {
true
} else {
self.tcx
.sess
.struct_span_err(attr.span, "attribute requires -Z query-dep-graph to be enabled")
.emit();
false
}
}
/// Checks if `#[link_section]` is applied to a function or static.
fn check_link_section(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) {
match target {
Target::Static | Target::Fn | Target::Method(..) => {}
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[link_section]` attribute with just a lint, because we previously
// erroneously allowed it and some crates used it accidentally, to to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "link_section");
}
_ => {
// FIXME: #[link_section] was previously allowed on non-functions/statics and some
// crates used this, so only emit a warning.
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build("attribute should be applied to a function or static")
.warn(
"this was previously accepted by the compiler but is \
being phased out; it will become a hard error in \
a future release!",
)
.span_label(*span, "not a function or static")
.emit();
});
}
}
}
/// Checks if `#[no_mangle]` is applied to a function or static.
fn check_no_mangle(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) {
match target {
Target::Static | Target::Fn => {}
Target::Method(..) if self.is_impl_item(hir_id) => {}
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[no_mangle]` attribute with just a lint, because we previously
// erroneously allowed it and some crates used it accidentally, to to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "no_mangle");
}
// FIXME: #[no_mangle] was previously allowed on non-functions/statics, this should be an error
// The error should specify that the item that is wrong is specifically a *foreign* fn/static
// otherwise the error seems odd
Target::ForeignFn | Target::ForeignStatic => {
let foreign_item_kind = match target {
Target::ForeignFn => "function",
Target::ForeignStatic => "static",
_ => unreachable!(),
};
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build(&format!(
"`#[no_mangle]` has no effect on a foreign {}",
foreign_item_kind
))
.warn(
"this was previously accepted by the compiler but is \
being phased out; it will become a hard error in \
a future release!",
)
.span_label(*span, format!("foreign {}", foreign_item_kind))
.note("symbol names in extern blocks are not mangled")
.span_suggestion(
attr.span,
"remove this attribute",
String::new(),
Applicability::MachineApplicable,
)
.emit();
});
}
_ => {
// FIXME: #[no_mangle] was previously allowed on non-functions/statics and some
// crates used this, so only emit a warning.
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build(
"attribute should be applied to a free function, impl method or static",
)
.warn(
"this was previously accepted by the compiler but is \
being phased out; it will become a hard error in \
a future release!",
)
.span_label(*span, "not a free function, impl method or static")
.emit();
});
}
}
}
/// Checks if the `#[repr]` attributes on `item` are valid.
fn check_repr(
&self,
attrs: &'hir [Attribute],
span: &Span,
target: Target,
item: Option<ItemLike<'_>>,
hir_id: HirId,
) {
// Extract the names of all repr hints, e.g., [foo, bar, align] for:
// ```
// #[repr(foo)]
// #[repr(bar, align(8))]
// ```
let hints: Vec<_> = attrs
.iter()
.filter(|attr| attr.has_name(sym::repr))
.filter_map(|attr| attr.meta_item_list())
.flatten()
.collect();
let mut int_reprs = 0;
let mut is_c = false;
let mut is_simd = false;
let mut is_transparent = false;
for hint in &hints {
if !hint.is_meta_item() {
struct_span_err!(
self.tcx.sess,
hint.span(),
E0565,
"meta item in `repr` must be an identifier"
)
.emit();
continue;
}
let (article, allowed_targets) = match hint.name_or_empty() {
sym::C => {
is_c = true;
match target {
Target::Struct | Target::Union | Target::Enum => continue,
_ => ("a", "struct, enum, or union"),
}
}
sym::align => {
if let (Target::Fn, true) = (target, !self.tcx.features().fn_align) {
feature_err(
&self.tcx.sess.parse_sess,
sym::fn_align,
hint.span(),
"`repr(align)` attributes on functions are unstable",
)
.emit();
}
match target {
Target::Struct | Target::Union | Target::Enum | Target::Fn => continue,
_ => ("a", "struct, enum, function, or union"),
}
}
sym::packed => {
if target != Target::Struct && target != Target::Union {
("a", "struct or union")
} else {
continue;
}
}
sym::simd => {
is_simd = true;
if target != Target::Struct {
("a", "struct")
} else {
continue;
}
}
sym::transparent => {
is_transparent = true;
match target {
Target::Struct | Target::Union | Target::Enum => continue,
_ => ("a", "struct, enum, or union"),
}
}
sym::no_niche => {
if !self.tcx.features().enabled(sym::no_niche) {
feature_err(
&self.tcx.sess.parse_sess,
sym::no_niche,
hint.span(),
"the attribute `repr(no_niche)` is currently unstable",
)
.emit();
}
match target {
Target::Struct | Target::Enum => continue,
_ => ("a", "struct or enum"),
}
}
sym::i8
| sym::u8
| sym::i16
| sym::u16
| sym::i32
| sym::u32
| sym::i64
| sym::u64
| sym::i128
| sym::u128
| sym::isize
| sym::usize => {
int_reprs += 1;
if target != Target::Enum {
("an", "enum")
} else {
continue;
}
}
_ => {
struct_span_err!(
self.tcx.sess,
hint.span(),
E0552,
"unrecognized representation hint"
)
.emit();
continue;
}
};
struct_span_err!(
self.tcx.sess,
hint.span(),
E0517,
"{}",
&format!("attribute should be applied to {} {}", article, allowed_targets)
)
.span_label(*span, &format!("not {} {}", article, allowed_targets))
.emit();
}
// Just point at all repr hints if there are any incompatibilities.
// This is not ideal, but tracking precisely which ones are at fault is a huge hassle.
let hint_spans = hints.iter().map(|hint| hint.span());
// Error on repr(transparent, <anything else apart from no_niche>).
let non_no_niche = |hint: &&NestedMetaItem| hint.name_or_empty() != sym::no_niche;
let non_no_niche_count = hints.iter().filter(non_no_niche).count();
if is_transparent && non_no_niche_count > 1 {
let hint_spans: Vec<_> = hint_spans.clone().collect();
struct_span_err!(
self.tcx.sess,
hint_spans,
E0692,
"transparent {} cannot have other repr hints",
target
)
.emit();
}
// Warn on repr(u8, u16), repr(C, simd), and c-like-enum-repr(C, u8)
if (int_reprs > 1)
|| (is_simd && is_c)
|| (int_reprs == 1
&& is_c
&& item.map_or(false, |item| {
if let ItemLike::Item(item) = item {
return is_c_like_enum(item);
}
return false;
}))
{
self.tcx.struct_span_lint_hir(
CONFLICTING_REPR_HINTS,
hir_id,
hint_spans.collect::<Vec<Span>>(),
|lint| {
lint.build("conflicting representation hints")
.code(rustc_errors::error_code!(E0566))
.emit();
},
);
}
}
fn check_used(&self, attrs: &'hir [Attribute], target: Target) {
for attr in attrs {
if attr.has_name(sym::used) && target != Target::Static {
self.tcx
.sess
.span_err(attr.span, "attribute must be applied to a `static` variable");
}
}
}
/// Outputs an error for `#[allow_internal_unstable]` which can only be applied to macros.
/// (Allows proc_macro functions)
fn check_allow_internal_unstable(
&self,
hir_id: HirId,
attr: &Attribute,
span: &Span,
target: Target,
attrs: &[Attribute],
) -> bool {
debug!("Checking target: {:?}", target);
match target {
Target::Fn => {
for attr in attrs {
if self.tcx.sess.is_proc_macro_attr(attr) {
debug!("Is proc macro attr");
return true;
}
}
debug!("Is not proc macro attr");
false
}
Target::MacroDef => true,
// FIXME(#80564): We permit struct fields and match arms to have an
// `#[allow_internal_unstable]` attribute with just a lint, because we previously
// erroneously allowed it and some crates used it accidentally, to to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm => {
self.inline_attr_str_error_without_macro_def(
hir_id,
attr,
"allow_internal_unstable",
);
true
}
_ => {
self.tcx
.sess
.struct_span_err(attr.span, "attribute should be applied to a macro")
.span_label(*span, "not a macro")
.emit();
false
}
}
}
/// Outputs an error for `#[allow_internal_unstable]` which can only be applied to macros.
/// (Allows proc_macro functions)
fn check_rustc_allow_const_fn_unstable(
&self,
hir_id: HirId,
attr: &Attribute,
span: &Span,
target: Target,
) -> bool {
match target {
Target::Fn | Target::Method(_)
if self.tcx.is_const_fn_raw(self.tcx.hir().local_def_id(hir_id)) =>
{
true
}
// FIXME(#80564): We permit struct fields and match arms to have an
// `#[allow_internal_unstable]` attribute with just a lint, because we previously
// erroneously allowed it and some crates used it accidentally, to to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "allow_internal_unstable");
true
}
_ => {
self.tcx
.sess
.struct_span_err(attr.span, "attribute should be applied to `const fn`")
.span_label(*span, "not a `const fn`")
.emit();
false
}
}
}
/// default_method_body_is_const should only be applied to trait methods with default bodies.
fn check_default_method_body_is_const(
&self,
attr: &Attribute,
span: &Span,
target: Target,
) -> bool {
match target {
Target::Method(MethodKind::Trait { body: true }) => true,
_ => {
self.tcx
.sess
.struct_span_err(
attr.span,
"attribute should be applied to a trait method with body",
)
.span_label(*span, "not a trait method or missing a body")
.emit();
false
}
}
}
fn check_stability_promotable(&self, attr: &Attribute, _span: &Span, target: Target) -> bool {
match target {
Target::Expression => {
self.tcx
.sess
.struct_span_err(attr.span, "attribute cannot be applied to an expression")
.emit();
false
}
_ => true,
}
}
fn check_deprecated(&self, hir_id: HirId, attr: &Attribute, _span: &Span, target: Target) {
match target {
Target::Closure | Target::Expression | Target::Statement | Target::Arm => {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build("attribute is ignored here").emit();
});
}
_ => {}
}
}
fn check_macro_use(&self, hir_id: HirId, attr: &Attribute, target: Target) {
let name = attr.name_or_empty();
match target {
Target::ExternCrate | Target::Mod => {}
_ => {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build(&format!(
"`#[{name}]` only has an effect on `extern crate` and modules"
))
.emit();
});
}
}
}
fn check_macro_export(&self, hir_id: HirId, attr: &Attribute, target: Target) {
if target != Target::MacroDef {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build("`#[macro_export]` only has an effect on macro definitions").emit();
});
}
}
fn check_cfg_attr(&self, hir_id: HirId, attr: &Attribute) {
if let Some((_, attrs)) = rustc_parse::parse_cfg_attr(&attr, &self.tcx.sess.parse_sess) {
if attrs.is_empty() {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build("`#[cfg_attr]` does not expand to any attributes").emit();
});
}
}
}
fn check_plugin_registrar(&self, hir_id: HirId, attr: &Attribute, target: Target) {
if target != Target::Fn {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build("`#[plugin_registrar]` only has an effect on functions").emit();
});
}
}
}
impl Visitor<'tcx> for CheckAttrVisitor<'tcx> {
type Map = Map<'tcx>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::OnlyBodies(self.tcx.hir())
}
fn | (&mut self, item: &'tcx Item<'tcx>) {
// Historically we've run more checks on non-exported than exported macros,
// so this lets us continue to run them while maintaining backwards compatibility.
// In the long run, the checks should be harmonized.
if let ItemKind::Macro(ref macro_def) = item.kind {
let def_id = item.def_id.to_def_id();
if macro_def.macro_rules && !self.tcx.has_attr(def_id, sym::macro_export) {
check_non_exported_macro_for_invalid_attrs(self.tcx, item);
}
}
let target = Target::from_item(item);
self.check_attributes(item.hir_id(), &item.span, target, Some(ItemLike::Item(item)));
intravisit::walk_item(self, item)
}
fn visit_generic_param(&mut self, generic_param: &'tcx hir::GenericParam<'tcx>) {
let target = Target::from_generic_param(generic_param);
self.check_attributes(generic_param.hir_id, &generic_param.span, target, None);
intravisit::walk_generic_param(self, generic_param)
}
fn visit_trait_item(&mut self, trait_item: &'tcx TraitItem<'tcx>) {
let target = Target::from_trait_item(trait_item);
self.check_attributes(trait_item.hir_id(), &trait_item.span, target, None);
intravisit::walk_trait_item(self, trait_item)
}
fn visit_field_def(&mut self, struct_field: &'tcx hir::FieldDef<'tcx>) {
self.check_attributes(struct_field.hir_id, &struct_field.span, Target::Field, None);
intravisit::walk_field_def(self, struct_field);
}
fn visit_arm(&mut self, arm: &'tcx hir::Arm<'tcx>) {
self.check_attributes(arm.hir_id, &arm.span, Target::Arm, None);
intravisit::walk_arm(self, arm);
}
fn visit_foreign_item(&mut self, f_item: &'tcx ForeignItem<'tcx>) {
let target = Target::from_foreign_item(f_item);
self.check_attributes(
f_item.hir_id(),
&f_item.span,
target,
Some(ItemLike::ForeignItem(f_item)),
);
intravisit::walk_foreign_item(self, f_item)
}
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
let target = target_from_impl_item(self.tcx, impl_item);
self.check_attributes(impl_item.hir_id(), &impl_item.span, target, None);
intravisit::walk_impl_item(self, impl_item)
}
fn visit_stmt(&mut self, stmt: &'tcx hir::Stmt<'tcx>) {
// When checking statements ignore expressions, they will be checked later.
if let hir::StmtKind::Local(ref l) = stmt.kind {
self.check_attributes(l.hir_id, &stmt.span, Target::Statement, None);
}
intravisit::walk_stmt(self, stmt)
}
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
let target = match expr.kind {
hir::ExprKind::Closure(..) => Target::Closure,
_ => Target::Expression,
};
self.check_attributes(expr.hir_id, &expr.span, target, None);
intravisit::walk_expr(self, expr)
}
fn visit_variant(
&mut self,
variant: &'tcx hir::Variant<'tcx>,
generics: &'tcx hir::Generics<'tcx>,
item_id: HirId,
) {
self.check_attributes(variant.id, &variant.span, Target::Variant, None);
intravisit::walk_variant(self, variant, generics, item_id)
}
fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
self.check_attributes(param.hir_id, ¶m.span, Target::Param, None);
intravisit::walk_param(self, param);
}
}
fn is_c_like_enum(item: &Item<'_>) -> bool {
if let ItemKind::Enum(ref def, _) = item.kind {
for variant in def.variants {
match variant.data {
hir::VariantData::Unit(..) => { /* continue */ }
_ => return false,
}
}
true
} else {
false
}
}
fn check_invalid_crate_level_attr(tcx: TyCtxt<'_>, attrs: &[Attribute]) {
const ATTRS_TO_CHECK: &[Symbol] = &[
sym::macro_export,
sym::repr,
sym::path,
sym::automatically_derived,
sym::start,
sym::rustc_main,
];
for attr in attrs {
for attr_to_check in ATTRS_TO_CHECK {
if attr.has_name(*attr_to_check) {
tcx.sess
.struct_span_err(
attr.span,
&format!(
"`{}` attribute cannot be used at crate level",
attr_to_check.to_ident_string()
),
)
.emit();
}
}
}
}
fn check_non_exported_macro_for_invalid_attrs(tcx: TyCtxt<'_>, item: &Item<'_>) {
let attrs = tcx.hir().attrs(item.hir_id());
for attr in attrs {
if attr.has_name(sym::inline) {
struct_span_err!(
tcx.sess,
attr.span,
E0518,
"attribute should be applied to function or closure",
)
.span_label(attr.span, "not a function or closure")
.emit();
}
}
}
fn check_mod_attrs(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
let check_attr_visitor = &mut CheckAttrVisitor { tcx };
tcx.hir().visit_item_likes_in_module(module_def_id, &mut check_attr_visitor.as_deep_visitor());
if module_def_id.is_top_level_module() {
check_attr_visitor.check_attributes(CRATE_HIR_ID, &DUMMY_SP, Target::Mod, None);
check_invalid_crate_level_attr(tcx, tcx.hir().krate_attrs());
}
}
pub(crate) fn provide(providers: &mut Providers) {
*providers = Providers { check_mod_attrs, ..*providers };
}
fn check_duplicates(
tcx: TyCtxt<'_>,
attr: &Attribute,
hir_id: HirId,
duplicates: AttributeDuplicates,
seen: &mut FxHashMap<Symbol, Span>,
) {
use AttributeDuplicates::*;
if matches!(duplicates, WarnFollowingWordOnly) && !attr.is_word() {
return;
}
match duplicates {
DuplicatesOk => {}
WarnFollowing | FutureWarnFollowing | WarnFollowingWordOnly | FutureWarnPreceding => {
match seen.entry(attr.name_or_empty()) {
Entry::Occupied(mut entry) => {
let (this, other) = if matches!(duplicates, FutureWarnPreceding) {
let to_remove = entry.insert(attr.span);
(to_remove, attr.span)
} else {
(attr.span, *entry.get())
};
tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, this, |lint| {
let mut db = lint.build("unused attribute");
db.span_note(other, "attribute also specified here").span_suggestion(
this,
"remove this attribute",
String::new(),
Applicability::MachineApplicable,
);
if matches!(duplicates, FutureWarnFollowing | FutureWarnPreceding) {
db.warn(
"this was previously accepted by the compiler but is \
being phased out; it will become a hard error in \
a future release!",
);
}
db.emit();
});
}
Entry::Vacant(entry) => {
entry.insert(attr.span);
}
}
}
ErrorFollowing | ErrorPreceding => match seen.entry(attr.name_or_empty()) {
Entry::Occupied(mut entry) => {
let (this, other) = if matches!(duplicates, ErrorPreceding) {
let to_remove = entry.insert(attr.span);
(to_remove, attr.span)
} else {
(attr.span, *entry.get())
};
tcx.sess
.struct_span_err(
this,
&format!("multiple `{}` attributes", attr.name_or_empty()),
)
.span_note(other, "attribute also specified here")
.span_suggestion(
this,
"remove this attribute",
String::new(),
Applicability::MachineApplicable,
)
.emit();
}
Entry::Vacant(entry) => {
entry.insert(attr.span);
}
},
}
}
| visit_item |
mod.rs | pub mod gate;
pub use gate::Gate;
use noir_field::FieldElement;
use crate::native_types::Witness;
#[derive(Clone, Debug)]
pub struct Circuit {
pub current_witness_index: u32,
pub gates: Vec<Gate>,
pub public_inputs: PublicInputs,
}
#[derive(Clone, Debug)]
pub struct PublicInputs(pub Vec<Witness>);
impl PublicInputs {
/// Returns the witness index of each public input
pub fn indices(&self) -> Vec<u32> {
self.0
.iter()
.map(|witness| witness.witness_index() as u32)
.collect()
}
}
#[derive(Clone, Debug)]
pub struct Selector(pub String, pub FieldElement);
impl Default for Selector {
fn default() -> Selector |
}
| {
Selector("zero".to_string(), FieldElement::zero())
} |
array.go | // Copyright 2021 FerretDB Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import "fmt"
// Array represents BSON array.
//
// Zero value is a valid empty array.
type Array struct {
s []any
}
// MakeArray creates an empty array with set capacity.
func MakeArray(capacity int) *Array {
if capacity == 0 {
return new(Array)
}
return &Array{s: make([]any, 0, capacity)}
}
// NewArray creates an array with the given values.
func NewArray(values ...any) (*Array, error) {
for i, value := range values {
if err := validateValue(value); err != nil {
return nil, fmt.Errorf("types.NewArray: index %d: %w", i, err)
}
}
return &Array{s: values}, nil
}
// MustNewArray is a NewArray that panics in case of error.
//
// Deprecated: use `must.NotFail(NewArray(...))` instead.
func | (values ...any) *Array {
a, err := NewArray(values...)
if err != nil {
panic(err)
}
return a
}
func (a *Array) compositeType() {}
// DeepCopy returns a deep copy of this Array.
func (a *Array) DeepCopy() *Array {
if a == nil {
panic("types.Array.DeepCopy: nil array")
}
return deepCopy(a).(*Array)
}
// Len returns the number of elements in the array.
//
// It returns 0 for nil Array.
func (a *Array) Len() int {
if a == nil {
return 0
}
return len(a.s)
}
// Get returns a value at the given index.
func (a *Array) Get(index int) (any, error) {
if l := a.Len(); index < 0 || index >= l {
return nil, fmt.Errorf("types.Array.Get: index %d is out of bounds [0-%d)", index, l)
}
return a.s[index], nil
}
// GetByPath returns a value by path - a sequence of indexes and keys.
func (a *Array) GetByPath(path ...string) (any, error) {
return getByPath(a, path...)
}
// Set sets the value at the given index.
func (a *Array) Set(index int, value any) error {
if l := a.Len(); index < 0 || index >= l {
return fmt.Errorf("types.Array.Set: index %d is out of bounds [0-%d)", index, l)
}
if err := validateValue(value); err != nil {
return fmt.Errorf("types.Array.Set: %w", err)
}
a.s[index] = value
return nil
}
// Append appends given values to the array.
func (a *Array) Append(values ...any) error {
for _, value := range values {
if err := validateValue(value); err != nil {
return fmt.Errorf("types.Array.Append: %w", err)
}
}
if a.s == nil {
a.s = values
return nil
}
a.s = append(a.s, values...)
return nil
}
// RemoveByPath removes document by path, doing nothing if the key does not exist.
func (a *Array) RemoveByPath(keys ...string) {
removeByPath(a, keys...)
}
| MustNewArray |
jsonpath.go | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import (
"bytes"
"fmt"
"io"
"reflect"
"strconv"
"k8s.io/kubernetes/third_party/golang/template"
)
type JSONPath struct {
name string
parser *Parser
stack [][]reflect.Value //push and pop values in different scopes
cur []reflect.Value //current scope values
beginRange int
inRange int
endRange int
}
func New(name string) *JSONPath {
return &JSONPath{
name: name,
beginRange: 0,
inRange: 0,
endRange: 0,
}
}
// Parse parse the given template, return error
func (j *JSONPath) Parse(text string) (err error) {
j.parser, err = Parse(j.name, text)
return
}
// Execute bounds data into template and write the result
func (j *JSONPath) Execute(wr io.Writer, data interface{}) error {
fullResults, err := j.FindResults(data)
if err != nil {
return err
}
for ix := range fullResults {
if err := j.PrintResults(wr, fullResults[ix]); err != nil {
return err
}
}
return nil
}
func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
if j.parser == nil {
return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name)
}
j.cur = []reflect.Value{reflect.ValueOf(data)}
nodes := j.parser.Root.Nodes
fullResult := [][]reflect.Value{}
for i := 0; i < len(nodes); i++ {
node := nodes[i]
results, err := j.walk(j.cur, node)
if err != nil {
return nil, err
}
//encounter an end node, break the current block
if j.endRange > 0 && j.endRange <= j.inRange {
j.endRange -= 1
break
}
//encounter a range node, start a range loop
if j.beginRange > 0 {
j.beginRange -= 1
j.inRange += 1
for k, value := range results {
j.parser.Root.Nodes = nodes[i+1:]
if k == len(results)-1 {
j.inRange -= 1
}
nextResults, err := j.FindResults(value.Interface())
if err != nil {
return nil, err
}
fullResult = append(fullResult, nextResults...)
}
break
}
fullResult = append(fullResult, results)
}
return fullResult, nil
}
// PrintResults write the results into writer
func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
for i, r := range results {
text, err := j.evalToText(r)
if err != nil {
return err
}
if i != len(results)-1 {
text = append(text, ' ')
}
if _, err = wr.Write(text); err != nil {
return err
}
}
return nil
}
// walk visits tree rooted at the given node in DFS order
func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) {
switch node := node.(type) {
case *ListNode:
return j.evalList(value, node)
case *TextNode:
return []reflect.Value{reflect.ValueOf(string(node.Text))}, nil
case *FieldNode:
return j.evalField(value, node)
case *ArrayNode:
return j.evalArray(value, node)
case *FilterNode:
return j.evalFilter(value, node)
case *IntNode:
return j.evalInt(value, node)
case *FloatNode:
return j.evalFloat(value, node)
case *WildcardNode:
return j.evalWildcard(value, node)
case *RecursiveNode:
return j.evalRecursive(value, node)
case *UnionNode:
return j.evalUnion(value, node)
case *IdentifierNode:
return j.evalIdentifier(value, node)
default:
return value, fmt.Errorf("unexpected Node %v", node)
}
}
// evalInt evaluates IntNode | func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) {
result := make([]reflect.Value, len(input))
for i := range input {
result[i] = reflect.ValueOf(node.Value)
}
return result, nil
}
// evalFloat evaluates FloatNode
func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) {
result := make([]reflect.Value, len(input))
for i := range input {
result[i] = reflect.ValueOf(node.Value)
}
return result, nil
}
// evalList evaluates ListNode
func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) {
var err error
curValue := value
for _, node := range node.Nodes {
curValue, err = j.walk(curValue, node)
if err != nil {
return curValue, err
}
}
return curValue, nil
}
// evalIdentifier evaluates IdentifierNode
func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) {
results := []reflect.Value{}
switch node.Name {
case "range":
j.stack = append(j.stack, j.cur)
j.beginRange += 1
results = input
case "end":
if j.endRange < j.inRange { //inside a loop, break the current block
j.endRange += 1
break
}
// the loop is about to end, pop value and continue the following execution
if len(j.stack) > 0 {
j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1]
} else {
return results, fmt.Errorf("not in range, nothing to end")
}
default:
return input, fmt.Errorf("unrecongnized identifier %v", node.Name)
}
return results, nil
}
// evalArray evaluates ArrayNode
func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, value := range input {
if value.Kind() == reflect.Interface {
value = reflect.ValueOf(value.Interface())
}
if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
return input, fmt.Errorf("%v is not array or slice", value)
}
params := node.Params
if !params[0].Known {
params[0].Value = 0
}
if params[0].Value < 0 {
params[0].Value += value.Len()
}
if !params[1].Known {
params[1].Value = value.Len()
}
if params[1].Value < 0 {
params[1].Value += value.Len()
}
if !params[2].Known {
value = value.Slice(params[0].Value, params[1].Value)
} else {
value = value.Slice3(params[0].Value, params[1].Value, params[2].Value)
}
for i := 0; i < value.Len(); i++ {
result = append(result, value.Index(i))
}
}
return result, nil
}
// evalUnion evaluates UnionNode
func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, listNode := range node.Nodes {
temp, err := j.evalList(input, listNode)
if err != nil {
return input, err
}
result = append(result, temp...)
}
return result, nil
}
// evalField evaluates filed of struct or key of map.
func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) {
results := []reflect.Value{}
for _, value := range input {
var result reflect.Value
if value.Kind() == reflect.Interface {
value = reflect.ValueOf(value.Interface())
}
if value.Kind() == reflect.Struct {
result = value.FieldByName(node.Value)
} else if value.Kind() == reflect.Map {
result = value.MapIndex(reflect.ValueOf(node.Value))
}
if result.IsValid() {
results = append(results, result)
}
}
if len(results) == 0 {
return results, fmt.Errorf("%s is not found", node.Value)
}
return results, nil
}
// evalWildcard extract all contents of the given value
func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) {
results := []reflect.Value{}
for _, value := range input {
kind := value.Kind()
if kind == reflect.Struct {
for i := 0; i < value.NumField(); i++ {
results = append(results, value.Field(i))
}
} else if kind == reflect.Map {
for _, key := range value.MapKeys() {
results = append(results, value.MapIndex(key))
}
} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
for i := 0; i < value.Len(); i++ {
results = append(results, value.Index(i))
}
}
}
return results, nil
}
// evalRecursive visit the given value recursively and push all of them to result
func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, value := range input {
results := []reflect.Value{}
kind := value.Kind()
if kind == reflect.Struct {
for i := 0; i < value.NumField(); i++ {
results = append(results, value.Field(i))
}
} else if kind == reflect.Map {
for _, key := range value.MapKeys() {
results = append(results, value.MapIndex(key))
}
} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
for i := 0; i < value.Len(); i++ {
results = append(results, value.Index(i))
}
}
if len(results) != 0 {
result = append(result, value)
output, err := j.evalRecursive(results, node)
if err != nil {
return result, err
}
result = append(result, output...)
}
}
return result, nil
}
// evalFilter filter array according to FilterNode
func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) {
results := []reflect.Value{}
for _, value := range input {
if value.Kind() == reflect.Interface {
value = reflect.ValueOf(value.Interface())
}
if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
return input, fmt.Errorf("%v is not array or slice", value)
}
for i := 0; i < value.Len(); i++ {
temp := []reflect.Value{value.Index(i)}
lefts, err := j.evalList(temp, node.Left)
//case exists
if node.Operator == "exists" {
if len(lefts) > 0 {
results = append(results, value.Index(i))
}
continue
}
if err != nil {
return input, err
}
var left, right interface{}
if len(lefts) != 1 {
return input, fmt.Errorf("can only compare one element at a time")
}
left = lefts[0].Interface()
rights, err := j.evalList(temp, node.Right)
if err != nil {
return input, err
}
if len(rights) != 1 {
return input, fmt.Errorf("can only compare one element at a time")
}
right = rights[0].Interface()
pass := false
switch node.Operator {
case "<":
pass, err = template.Less(left, right)
case ">":
pass, err = template.Greater(left, right)
case "==":
pass, err = template.Equal(left, right)
case "!=":
pass, err = template.NotEqual(left, right)
case "<=":
pass, err = template.LessEqual(left, right)
case ">=":
pass, err = template.GreaterEqual(left, right)
default:
return results, fmt.Errorf("unrecognized filter operator %s", node.Operator)
}
if err != nil {
return results, err
}
if pass {
results = append(results, value.Index(i))
}
}
}
return results, nil
}
// evalToText translates reflect value to corresponding text
func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) {
if v.Kind() == reflect.Interface {
v = reflect.ValueOf(v.Interface())
}
var buffer bytes.Buffer
switch v.Kind() {
case reflect.Invalid:
//pass
case reflect.Ptr:
text, err := j.evalToText(reflect.Indirect(v))
if err != nil {
return nil, err
}
buffer.Write(text)
case reflect.Bool:
if variable := v.Bool(); variable {
buffer.WriteString("True")
} else {
buffer.WriteString("False")
}
case reflect.Float32:
buffer.WriteString(strconv.FormatFloat(v.Float(), 'f', -1, 32))
case reflect.Float64:
buffer.WriteString(strconv.FormatFloat(v.Float(), 'f', -1, 64))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
buffer.WriteString(strconv.FormatInt(v.Int(), 10))
case reflect.String:
buffer.WriteString(v.String())
case reflect.Array, reflect.Slice:
buffer.WriteString("[")
for i := 0; i < v.Len(); i++ {
text, err := j.evalToText(v.Index(i))
if err != nil {
return nil, err
}
buffer.Write(text)
if i != v.Len()-1 {
buffer.WriteString(", ")
}
}
buffer.WriteString("]")
case reflect.Struct:
buffer.WriteString("{")
for i := 0; i < v.NumField(); i++ {
text, err := j.evalToText(v.Field(i))
if err != nil {
return nil, err
}
pair := fmt.Sprintf("%s: %s", v.Type().Field(i).Name, text)
buffer.WriteString(pair)
if i != v.NumField()-1 {
buffer.WriteString(", ")
}
}
buffer.WriteString("}")
case reflect.Map:
buffer.WriteString("{")
for i, key := range v.MapKeys() {
text, err := j.evalToText(v.MapIndex(key))
if err != nil {
return nil, err
}
pair := fmt.Sprintf("%s: %s", key, text)
buffer.WriteString(pair)
if i != len(v.MapKeys())-1 {
buffer.WriteString(", ")
}
}
buffer.WriteString("}")
default:
return nil, fmt.Errorf("%v is not printable", v.Kind())
}
return buffer.Bytes(), nil
} | |
manage_index.py | from modules import index
import argparse
commands = ["cleanup", "re-index"]
parser = argparse.ArgumentParser(description='Manager for the Inverted Index.')
parser.add_argument('command', choices=commands, help='Command to perform on index.')
parser.add_argument('--in_s3', action='store_true', help='If passed, the index will be loaded from the S3 bucket')
parser.add_argument('--file_path', nargs='?', const='index.json', help='The file path for the index.')
| inv_index = index.InvertedIndex(from_file=True, in_s3=args.in_s3, file_path=args.file_path or 'index.json')
if args.command == "cleanup":
inv_index.cleanup() | args = parser.parse_args() |
DataStreamy.ts | import { byteCount, ByteCount, byteCountToNumber } from "../kacheryTypes"
export interface DataStreamyProgress {
bytesLoaded: ByteCount
bytesTotal: ByteCount
}
class DataStreamyProducer {
#cancelled = false
#onCancelledCallbacks: (() => void)[] = []
#lastUnorderedDataIndex: number = -1
#unorderedDataChunksByIndex = new Map<number, Buffer>()
#unorderedEndNumDataChunks: number | null = null
constructor(private dataStream: DataStreamy) {
}
onCancelled(cb: () => void) {
if (this.#cancelled) {
cb()
}
this.#onCancelledCallbacks.push(cb)
}
isCancelled() {
return this.#cancelled
}
error(err: Error) {
if (this.#cancelled) return
this.dataStream._producer_error(err)
}
start(size: ByteCount | null) {
if (this.#cancelled) return
this.dataStream._producer_start(size)
}
end() {
if (this.#cancelled) return
this.dataStream._producer_end()
}
data(buf: Buffer) {
if (this.#cancelled) return
// memoryLeakTest.push(buf)
this.dataStream._producer_data(buf)
}
unorderedData(index: number, buf: Buffer) {
this.#unorderedDataChunksByIndex.set(index, buf)
while (this.#unorderedDataChunksByIndex.has(this.#lastUnorderedDataIndex + 1)) {
this.#lastUnorderedDataIndex ++
const buf = this.#unorderedDataChunksByIndex.get(this.#lastUnorderedDataIndex)
/* istanbul ignore next */
if (!buf) throw Error('Unexpected no buf in unorderedData')
this.#unorderedDataChunksByIndex.delete(this.#lastUnorderedDataIndex)
this.data(buf)
if (this.#unorderedEndNumDataChunks !== null) {
if (this.#lastUnorderedDataIndex === this.#unorderedEndNumDataChunks - 1) {
this.end()
}
else if (this.#lastUnorderedDataIndex > this.#unorderedEndNumDataChunks - 1) {
throw Error('Unexpected lastUnorderedDataIndex')
}
}
}
}
unorderedEnd(numDataChunks: number) {
if (this.#lastUnorderedDataIndex >= numDataChunks - 1) {
this.end()
}
else {
this.#unorderedEndNumDataChunks = numDataChunks
}
}
incrementBytes(numBytes: ByteCount) {
if (this.#cancelled) return
this.dataStream._producer_incrementBytes(numBytes)
}
reportBytesLoaded(numBytes: ByteCount) {
if (this.#cancelled) return
this.dataStream._producer_reportBytesLoaded(numBytes)
}
setProgress(progress: DataStreamyProgress) {
this.dataStream._producer_setProgress(progress)
}
_cancel() {
if (this.#cancelled) return
this.#cancelled = true
this.#onCancelledCallbacks.forEach(cb => {cb()})
this.dataStream._producer_error(Error('Cancelled'))
}
}
export default class | {
#producer: DataStreamyProducer
// state
#completed = false
#finished = false
#started = false
#size: ByteCount | null = null
#bytesLoaded: ByteCount = byteCount(0)
#error: Error | null = null
#pendingDataChunks: Buffer[] = []
// callbacks
#onStartedCallbacks: ((size: ByteCount | null) => void)[] = []
#onDataCallbacks: ((data: Buffer) => void)[] = []
#onFinishedCallbacks: (() => void)[] = []
#onCompleteCallbacks: (() => void)[] = []
#onErrorCallbacks: ((err: Error) => void)[] = []
#onProgressCallbacks: ((progress: DataStreamyProgress) => void)[] = []
constructor() {
this.#producer = new DataStreamyProducer(this)
}
onStarted(callback: ((size: ByteCount | null) => void)) {
if (this.#started) {
callback(this.#size)
}
this.#onStartedCallbacks.push(callback)
}
onData(callback: ((data: Buffer) => void)) {
if ((this.#onDataCallbacks.length > 0) && (byteCountToNumber(this.#bytesLoaded) > 0)) {
throw Error('onData already called in DataStreamy, and we have already received data')
}
this.#pendingDataChunks.forEach((ch: Buffer) => {
callback(ch)
})
this.#pendingDataChunks = []
this.#onDataCallbacks.push(callback)
}
async allData(): Promise<Buffer> {
return new Promise<Buffer>((resolve, reject) => {
const buffers: Buffer[] = []
this.onData(buf => buffers.push(buf))
this.onFinished(() => {
resolve(Buffer.concat(buffers))
})
this.onError((err) => {
reject(err)
})
})
}
onFinished(callback: (() => void)) {
if (this.#finished) {
// important to use setTimeout here because we want to get data before finished (if both are already available)
setTimeout(() => {
callback()
}, 0)
}
this.#onFinishedCallbacks.push(callback)
}
onError(callback: ((err: Error) => void)) {
if (this.#error) {
// I think it is important to use setTimeout here
setTimeout(() => {
if (!this.#error) throw Error('error')
callback(this.#error)
}, 0)
}
this.#onErrorCallbacks.push(callback)
}
onProgress(callback: (progress: DataStreamyProgress) => void) {
if ((byteCountToNumber(this.#bytesLoaded) > 0) && (this.#size)) {
callback({bytesLoaded: this.#bytesLoaded, bytesTotal: this.#size})
}
this.#onProgressCallbacks.push(callback)
}
bytesLoaded(): ByteCount {
return this.#bytesLoaded
}
bytesTotal(): ByteCount | null {
return this.#size
}
cancel() {
if (this.#completed) return
this.#producer._cancel()
}
isComplete() {
return this.#completed
}
producer() {
return this.#producer
}
_producer_error(err: Error) {
if (this.#completed) return
this._handle_complete()
this.#error = err
this.#onErrorCallbacks.forEach(cb => {cb(err)})
}
_producer_start(size: ByteCount | null) {
if (this.#completed) return
if (this.#started) return
this.#started = true
this.#size = size
this.#onStartedCallbacks.forEach(cb => {
cb(size)
})
}
_producer_end() {
if (this.#completed) return
this._handle_complete()
this.#finished = true
this.#onFinishedCallbacks.forEach(cb => {cb()})
}
_handle_complete() {
this.#completed = true
this.#onCompleteCallbacks.forEach(cb => {cb()})
if (this.#pendingDataChunks.length > 0) {
setTimeout(() => {
this.#pendingDataChunks = []
}, 1000)
}
}
_producer_data(buf: Buffer) {
if (this.#completed) return
if (!this.#started) {
this.#started = true
this.#onStartedCallbacks.forEach(cb => {
cb(null)
})
}
this.#onDataCallbacks.forEach(cb => {
cb(buf)
})
this._producer_incrementBytes(byteCount(buf.length))
if (this.#onDataCallbacks.length === 0) {
this.#pendingDataChunks.push(buf)
}
}
_producer_incrementBytes(numBytes: ByteCount) {
this._producer_reportBytesLoaded(byteCount(byteCountToNumber(this.#bytesLoaded) + byteCountToNumber(numBytes)))
}
_producer_reportBytesLoaded(numBytes: ByteCount) {
this.#bytesLoaded = numBytes
const s = this.#size
if (s !== null) {
this.#onProgressCallbacks.forEach(cb => {
cb({bytesLoaded: this.#bytesLoaded, bytesTotal: s})
})
}
}
_producer_setProgress(progress: DataStreamyProgress) {
this.#bytesLoaded = progress.bytesLoaded
if (progress.bytesTotal) {
this.#size = progress.bytesTotal
}
const s = this.#size
if (s !== null) {
this.#onProgressCallbacks.forEach(cb => {
cb({bytesLoaded: this.#bytesLoaded, bytesTotal: s})
})
}
}
} | DataStreamy |
play.rs | use crate::arg::{Endianness, Arguments};
use crate::error::Error;
use cpal::StreamConfig;
use cpal::traits::{DeviceTrait, StreamTrait};
use std::io::Read;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
/** When no sample rate is specified, the playback will try to select the value
* that gets the closest to this number and that is still supported. */
pub const PREFERRED_SAMPLE_RATE: u32 = 48000;
/** When no channel count is specified, the playback will try to select the
* value that gets the closest to this number and that is still supported. */
pub const PREFERRED_CHANNELS: u16 = 2;
/** When no sample format is specified, the playback will try to select the
* value that gets the closest to this number and that is still supported. */
pub const PREFERRED_SAMPLE_FORMAT: cpal::SampleFormat = cpal::SampleFormat::I16;
/** When no sample endian is specified, the playback will try to select the
* value that gets the closest to this number and that is still supported. */
pub const PREFERRED_SAMPLE_ENDIAN: Endianness = Endianness::Little;
/** Plays audio from a given source. */
pub fn play<R>(args: &Arguments, mut source: R)
where R: Read + Send + 'static {
eprint!("playing <file> ");
if let Some((index, name)) = args.device_pick() {
eprint!("to device {} ({}) ", index, name);
} else {
eprint!("to the default device ");
}
if let Some((index, name)) = args.host_pick() {
eprintln!("within host {} ({})", index, name);
} else {
eprintln!("within the default host");
}
let format = args.config(
PREFERRED_SAMPLE_RATE,
PREFERRED_CHANNELS,
PREFERRED_SAMPLE_FORMAT);
let format = match format {
Ok(format) => format,
Err(what) => {
eprintln!("error: {}", what);
std::process::exit(1)
}
};
let endian = args.endianness().unwrap_or(PREFERRED_SAMPLE_ENDIAN);
eprint!("playing as: {:?}{}, ",
format.sample_format(),
match endian {
Endianness::Little => "LE",
Endianness::Big => "BE",
Endianness::Native => "",
});
eprint!("{} channels, ", format.channels());
eprintln!("{}Hz", format.sample_rate().0);
/* Create the output stream. */
let end0 = Arc::new(AtomicBool::new(false));
let end1 = end0.clone();
let device = args.device();
let output = device.build_output_stream_raw(
&format.config(),
format.sample_format(),
move |data, info| {
let result = source.read_exact(data.bytes_mut());
match result {
/*Ok(result) =>
eprintln!("{:?}: fed {} bytes with {} bytes",
info.timestamp().playback,
data.bytes().len(),
result),*/
Err(what) => if what.kind() == std::io::ErrorKind::UnexpectedEof {
eprintln!("e o f");
end1.store(true, Ordering::Relaxed);
} else {
eprintln!("error: data read failed: {}", what);
std::process::exit(1);
},
_ => {}
}
let samples = data.bytes().len() / data.sample_format().sample_size();
let per_sec = format.sample_rate().0 * format.channels() as u32;
let projected = samples as f64 / per_sec as f64;
},
|what| {
eprintln!("error: output stream failed: {}", what);
std::process::exit(1); | Err(what) => {
eprintln!("error: could not initialize output stream: {}", what);
std::process::exit(1);
}
};
output.play();
while !end0.load(Ordering::Relaxed) { }
output.pause();
} | });
let output = match output {
Ok(output) => output, |
login-page.component.ts | import { Component, OnInit } from '@angular/core';
import { FormControl, FormGroup, Validators } from '@angular/forms';
import { Router } from '@angular/router';
import { IUser } from 'src/interface';
import { AuthService } from '../shared/components/services/auth.service';
@Component({
selector: 'app-login-page',
templateUrl: './login-page.component.html',
styleUrls: ['./login-page.component.scss'],
})
export class LoginPageComponent implements OnInit {
public myForm!: FormGroup;
public submited: boolean = false;
private message: string;
constructor(public auth: AuthService, private router: Router) {}
ngOnInit(): void {
this.myForm = new FormGroup({
email: new FormControl(null, [Validators.required, Validators.email]),
password: new FormControl(null, [
Validators.required,
Validators.minLength(4),
]),
});
}
public submit() {
if (this.myForm.invalid) {
return;
}
this.submited = true;
const user: IUser = {
email: this.myForm.value.email, | this.auth.login(user).subscribe(
() => {
this.myForm.reset();
this.router.navigate(['/admin', 'dashboard']);
this.submited = false;
},
() => {
this.submited = false;
}
);
}
public email() {
return this.myForm.controls['email'];
}
public password() {
return this.myForm.controls['password'];
}
} | password: this.myForm.value.password,
};
|
oetest.py | # Copyright (C) 2013 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# Main unittest module used by testimage.bbclass
# This provides the oeRuntimeTest base class which is inherited by all tests in meta/lib/oeqa/runtime.
# It also has some helper functions and it's responsible for actually starting the tests
import os, re, mmap, sys
import unittest
import inspect
import subprocess
import signal
import shutil
import functools
try:
import bb
except ImportError:
pass
import logging
import oeqa.runtime
# Exported test doesn't require sdkext
try:
import oeqa.sdkext
except ImportError:
pass
from oeqa.utils.decorators import LogResults, gettag, getResults
logger = logging.getLogger("BitBake")
def getVar(obj):
#extend form dict, if a variable didn't exists, need find it in testcase
class VarDict(dict):
def __getitem__(self, key):
return gettag(obj, key)
return VarDict()
def checkTags(tc, tagexp):
return eval(tagexp, None, getVar(tc))
def filterByTagExp(testsuite, tagexp):
if not tagexp:
return testsuite
caseList = []
for each in testsuite:
if not isinstance(each, unittest.BaseTestSuite):
if checkTags(each, tagexp):
caseList.append(each)
else:
caseList.append(filterByTagExp(each, tagexp))
return testsuite.__class__(caseList)
@LogResults
class oeTest(unittest.TestCase):
pscmd = "ps"
longMessage = True
@classmethod
def hasPackage(self, pkg):
"""
True if the full package name exists in the manifest, False otherwise.
"""
return pkg in oeTest.tc.pkgmanifest
@classmethod
def | (self, match):
"""
True if match exists in the manifest as a regular expression substring,
False otherwise.
"""
for s in oeTest.tc.pkgmanifest:
if re.match(match, s):
return True
return False
@classmethod
def hasFeature(self,feature):
if feature in oeTest.tc.imagefeatures or \
feature in oeTest.tc.distrofeatures:
return True
else:
return False
class oeRuntimeTest(oeTest):
def __init__(self, methodName='runTest'):
self.target = oeRuntimeTest.tc.target
super(oeRuntimeTest, self).__init__(methodName)
def setUp(self):
# Install packages in the DUT
self.tc.install_uninstall_packages(self.id())
# Check if test needs to run
if self.tc.sigterm:
self.fail("Got SIGTERM")
elif (type(self.target).__name__ == "QemuTarget"):
self.assertTrue(self.target.check(), msg = "Qemu not running?")
self.setUpLocal()
# a setup method before tests but after the class instantiation
def setUpLocal(self):
pass
def tearDown(self):
# Uninstall packages in the DUT
self.tc.install_uninstall_packages(self.id(), False)
res = getResults()
# If a test fails or there is an exception dump
# for QemuTarget only
if (type(self.target).__name__ == "QemuTarget" and
(self.id() in res.getErrorList() or
self.id() in res.getFailList())):
self.tc.host_dumper.create_dir(self._testMethodName)
self.tc.host_dumper.dump_host()
self.target.target_dumper.dump_target(
self.tc.host_dumper.dump_dir)
print ("%s dump data stored in %s" % (self._testMethodName,
self.tc.host_dumper.dump_dir))
self.tearDownLocal()
# Method to be run after tearDown and implemented by child classes
def tearDownLocal(self):
pass
def getmodule(pos=2):
# stack returns a list of tuples containg frame information
# First element of the list the is current frame, caller is 1
frameinfo = inspect.stack()[pos]
modname = inspect.getmodulename(frameinfo[1])
#modname = inspect.getmodule(frameinfo[0]).__name__
return modname
def skipModule(reason, pos=2):
modname = getmodule(pos)
if modname not in oeTest.tc.testsrequired:
raise unittest.SkipTest("%s: %s" % (modname, reason))
else:
raise Exception("\nTest %s wants to be skipped.\nReason is: %s" \
"\nTest was required in TEST_SUITES, so either the condition for skipping is wrong" \
"\nor the image really doesn't have the required feature/package when it should." % (modname, reason))
def skipModuleIf(cond, reason):
if cond:
skipModule(reason, 3)
def skipModuleUnless(cond, reason):
if not cond:
skipModule(reason, 3)
_buffer_logger = ""
def custom_verbose(msg, *args, **kwargs):
global _buffer_logger
if msg[-1] != "\n":
_buffer_logger += msg
else:
_buffer_logger += msg
try:
bb.plain(_buffer_logger.rstrip("\n"), *args, **kwargs)
except NameError:
logger.info(_buffer_logger.rstrip("\n"), *args, **kwargs)
_buffer_logger = ""
class TestContext(object):
def __init__(self, d, exported=False):
self.d = d
self.testsuites = self._get_test_suites()
if exported:
path = [os.path.dirname(os.path.abspath(__file__))]
extrapath = ""
else:
path = d.getVar("BBPATH").split(':')
extrapath = "lib/oeqa"
self.testslist = self._get_tests_list(path, extrapath)
self.testsrequired = self._get_test_suites_required()
self.filesdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runtime/files")
self.corefilesdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files")
self.imagefeatures = d.getVar("IMAGE_FEATURES").split()
self.distrofeatures = d.getVar("DISTRO_FEATURES").split()
# get testcase list from specified file
# if path is a relative path, then relative to build/conf/
def _read_testlist(self, fpath, builddir):
if not os.path.isabs(fpath):
fpath = os.path.join(builddir, "conf", fpath)
if not os.path.exists(fpath):
bb.fatal("No such manifest file: ", fpath)
tcs = []
for line in open(fpath).readlines():
line = line.strip()
if line and not line.startswith("#"):
tcs.append(line)
return " ".join(tcs)
# return test list by type also filter if TEST_SUITES is specified
def _get_tests_list(self, bbpath, extrapath):
testslist = []
type = self._get_test_namespace()
# This relies on lib/ under each directory in BBPATH being added to sys.path
# (as done by default in base.bbclass)
for testname in self.testsuites:
if testname != "auto":
if testname.startswith("oeqa."):
testslist.append(testname)
continue
found = False
for p in bbpath:
if os.path.exists(os.path.join(p, extrapath, type, testname + ".py")):
testslist.append("oeqa." + type + "." + testname)
found = True
break
elif os.path.exists(os.path.join(p, extrapath, type, testname.split(".")[0] + ".py")):
testslist.append("oeqa." + type + "." + testname)
found = True
break
if not found:
bb.fatal('Test %s specified in TEST_SUITES could not be found in lib/oeqa/runtime under BBPATH' % testname)
if "auto" in self.testsuites:
def add_auto_list(path):
files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
for f in files:
module = 'oeqa.' + type + '.' + f[:-3]
if module not in testslist:
testslist.append(module)
for p in bbpath:
testpath = os.path.join(p, 'lib', 'oeqa', type)
bb.debug(2, 'Searching for tests in %s' % testpath)
if os.path.exists(testpath):
add_auto_list(testpath)
return testslist
def getTestModules(self):
"""
Returns all the test modules in the testlist.
"""
import pkgutil
modules = []
for test in self.testslist:
if re.search("\w+\.\w+\.test_\S+", test):
test = '.'.join(t.split('.')[:3])
module = pkgutil.get_loader(test)
modules.append(module)
return modules
def getModulefromID(self, test_id):
"""
Returns the test module based on a test id.
"""
module_name = ".".join(test_id.split(".")[:3])
modules = self.getTestModules()
for module in modules:
if module.name == module_name:
return module
return None
def getTests(self, test):
'''Return all individual tests executed when running the suite.'''
# Unfortunately unittest does not have an API for this, so we have
# to rely on implementation details. This only needs to work
# for TestSuite containing TestCase.
method = getattr(test, '_testMethodName', None)
if method:
# leaf case: a TestCase
yield test
else:
# Look into TestSuite.
tests = getattr(test, '_tests', [])
for t1 in tests:
for t2 in self.getTests(t1):
yield t2
def loadTests(self):
setattr(oeTest, "tc", self)
testloader = unittest.TestLoader()
testloader.sortTestMethodsUsing = None
suites = [testloader.loadTestsFromName(name) for name in self.testslist]
suites = filterByTagExp(suites, getattr(self, "tagexp", None))
# Determine dependencies between suites by looking for @skipUnlessPassed
# method annotations. Suite A depends on suite B if any method in A
# depends on a method on B.
for suite in suites:
suite.dependencies = []
suite.depth = 0
for test in self.getTests(suite):
methodname = getattr(test, '_testMethodName', None)
if methodname:
method = getattr(test, methodname)
depends_on = getattr(method, '_depends_on', None)
if depends_on:
for dep_suite in suites:
if depends_on in [getattr(t, '_testMethodName', None) for t in self.getTests(dep_suite)]:
if dep_suite not in suite.dependencies and \
dep_suite is not suite:
suite.dependencies.append(dep_suite)
break
else:
logger.warning("Test %s was declared as @skipUnlessPassed('%s') but that test is either not defined or not active. Will run the test anyway." %
(test, depends_on))
# Use brute-force topological sort to determine ordering. Sort by
# depth (higher depth = must run later), with original ordering to
# break ties.
def set_suite_depth(suite):
for dep in suite.dependencies:
new_depth = set_suite_depth(dep) + 1
if new_depth > suite.depth:
suite.depth = new_depth
return suite.depth
for index, suite in enumerate(suites):
set_suite_depth(suite)
suite.index = index
def cmp(a, b):
return (a > b) - (a < b)
def cmpfunc(a, b):
return cmp((a.depth, a.index), (b.depth, b.index))
suites.sort(key=functools.cmp_to_key(cmpfunc))
self.suite = testloader.suiteClass(suites)
return self.suite
def runTests(self):
logger.info("Test modules %s" % self.testslist)
if hasattr(self, "tagexp") and self.tagexp:
logger.info("Filter test cases by tags: %s" % self.tagexp)
logger.info("Found %s tests" % self.suite.countTestCases())
runner = unittest.TextTestRunner(verbosity=2)
if 'bb' in sys.modules:
runner.stream.write = custom_verbose
return runner.run(self.suite)
class RuntimeTestContext(TestContext):
def __init__(self, d, target, exported=False):
super(RuntimeTestContext, self).__init__(d, exported)
self.target = target
self.pkgmanifest = {}
manifest = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"),
d.getVar("IMAGE_LINK_NAME") + ".manifest")
nomanifest = d.getVar("IMAGE_NO_MANIFEST")
if nomanifest is None or nomanifest != "1":
try:
with open(manifest) as f:
for line in f:
(pkg, arch, version) = line.strip().split()
self.pkgmanifest[pkg] = (version, arch)
except IOError as e:
bb.fatal("No package manifest file found. Did you build the image?\n%s" % e)
def _get_test_namespace(self):
return "runtime"
def _get_test_suites(self):
testsuites = []
manifests = (self.d.getVar("TEST_SUITES_MANIFEST") or '').split()
if manifests:
for manifest in manifests:
testsuites.extend(self._read_testlist(manifest,
self.d.getVar("TOPDIR")).split())
else:
testsuites = self.d.getVar("TEST_SUITES").split()
return testsuites
def _get_test_suites_required(self):
return [t for t in self.d.getVar("TEST_SUITES").split() if t != "auto"]
def loadTests(self):
super(RuntimeTestContext, self).loadTests()
if oeTest.hasPackage("procps"):
oeRuntimeTest.pscmd = "ps -ef"
def extract_packages(self):
"""
Find packages that will be needed during runtime.
"""
modules = self.getTestModules()
bbpaths = self.d.getVar("BBPATH").split(":")
shutil.rmtree(self.d.getVar("TEST_EXTRACTED_DIR"))
shutil.rmtree(self.d.getVar("TEST_PACKAGED_DIR"))
for module in modules:
json_file = self._getJsonFile(module)
if json_file:
needed_packages = self._getNeededPackages(json_file)
self._perform_package_extraction(needed_packages)
def _perform_package_extraction(self, needed_packages):
"""
Extract packages that will be needed during runtime.
"""
import oe.path
extracted_path = self.d.getVar("TEST_EXTRACTED_DIR")
packaged_path = self.d.getVar("TEST_PACKAGED_DIR")
for key,value in needed_packages.items():
packages = ()
if isinstance(value, dict):
packages = (value, )
elif isinstance(value, list):
packages = value
else:
bb.fatal("Failed to process needed packages for %s; "
"Value must be a dict or list" % key)
for package in packages:
pkg = package["pkg"]
rm = package.get("rm", False)
extract = package.get("extract", True)
if extract:
dst_dir = os.path.join(extracted_path, pkg)
else:
dst_dir = os.path.join(packaged_path)
# Extract package and copy it to TEST_EXTRACTED_DIR
pkg_dir = self._extract_in_tmpdir(pkg)
if extract:
# Same package used for more than one test,
# don't need to extract again.
if os.path.exists(dst_dir):
continue
oe.path.copytree(pkg_dir, dst_dir)
shutil.rmtree(pkg_dir)
# Copy package to TEST_PACKAGED_DIR
else:
self._copy_package(pkg)
def _getJsonFile(self, module):
"""
Returns the path of the JSON file for a module, empty if doesn't exitst.
"""
module_file = module.path
json_file = "%s.json" % module_file.rsplit(".", 1)[0]
if os.path.isfile(module_file) and os.path.isfile(json_file):
return json_file
else:
return ""
def _getNeededPackages(self, json_file, test=None):
"""
Returns a dict with needed packages based on a JSON file.
If a test is specified it will return the dict just for that test.
"""
import json
needed_packages = {}
with open(json_file) as f:
test_packages = json.load(f)
for key,value in test_packages.items():
needed_packages[key] = value
if test:
if test in needed_packages:
needed_packages = needed_packages[test]
else:
needed_packages = {}
return needed_packages
def _extract_in_tmpdir(self, pkg):
""""
Returns path to a temp directory where the package was
extracted without dependencies.
"""
from oeqa.utils.package_manager import get_package_manager
pkg_path = os.path.join(self.d.getVar("TEST_INSTALL_TMP_DIR"), pkg)
pm = get_package_manager(self.d, pkg_path)
extract_dir = pm.extract(pkg)
shutil.rmtree(pkg_path)
return extract_dir
def _copy_package(self, pkg):
"""
Copy the RPM, DEB or IPK package to dst_dir
"""
from oeqa.utils.package_manager import get_package_manager
pkg_path = os.path.join(self.d.getVar("TEST_INSTALL_TMP_DIR"), pkg)
dst_dir = self.d.getVar("TEST_PACKAGED_DIR")
pm = get_package_manager(self.d, pkg_path)
pkg_info = pm.package_info(pkg)
file_path = pkg_info[pkg]["filepath"]
shutil.copy2(file_path, dst_dir)
shutil.rmtree(pkg_path)
def install_uninstall_packages(self, test_id, pkg_dir, install):
"""
Check if the test requires a package and Install/Uninstall it in the DUT
"""
test = test_id.split(".")[4]
module = self.getModulefromID(test_id)
json = self._getJsonFile(module)
if json:
needed_packages = self._getNeededPackages(json, test)
if needed_packages:
self._install_uninstall_packages(needed_packages, pkg_dir, install)
def _install_uninstall_packages(self, needed_packages, pkg_dir, install=True):
"""
Install/Uninstall packages in the DUT without using a package manager
"""
if isinstance(needed_packages, dict):
packages = [needed_packages]
elif isinstance(needed_packages, list):
packages = needed_packages
for package in packages:
pkg = package["pkg"]
rm = package.get("rm", False)
extract = package.get("extract", True)
src_dir = os.path.join(pkg_dir, pkg)
# Install package
if install and extract:
self.target.connection.copy_dir_to(src_dir, "/")
# Uninstall package
elif not install and rm:
self.target.connection.delete_dir_structure(src_dir, "/")
class ImageTestContext(RuntimeTestContext):
def __init__(self, d, target, host_dumper):
super(ImageTestContext, self).__init__(d, target)
self.tagexp = d.getVar("TEST_SUITES_TAGS")
self.host_dumper = host_dumper
self.sigterm = False
self.origsigtermhandler = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self._sigterm_exception)
def _sigterm_exception(self, signum, stackframe):
bb.warn("TestImage received SIGTERM, shutting down...")
self.sigterm = True
self.target.stop()
def install_uninstall_packages(self, test_id, install=True):
"""
Check if the test requires a package and Install/Uninstall it in the DUT
"""
pkg_dir = self.d.getVar("TEST_EXTRACTED_DIR")
super(ImageTestContext, self).install_uninstall_packages(test_id, pkg_dir, install)
class ExportTestContext(RuntimeTestContext):
def __init__(self, d, target, exported=False, parsedArgs={}):
"""
This class is used when exporting tests and when are executed outside OE environment.
parsedArgs can contain the following:
- tag: Filter test by tag.
"""
super(ExportTestContext, self).__init__(d, target, exported)
tag = parsedArgs.get("tag", None)
self.tagexp = tag if tag != None else d.getVar("TEST_SUITES_TAGS")
self.sigterm = None
def install_uninstall_packages(self, test_id, install=True):
"""
Check if the test requires a package and Install/Uninstall it in the DUT
"""
export_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
extracted_dir = self.d.getVar("TEST_EXPORT_EXTRACTED_DIR")
pkg_dir = os.path.join(export_dir, extracted_dir)
super(ExportTestContext, self).install_uninstall_packages(test_id, pkg_dir, install)
| hasPackageMatch |
production.py | from kombu.utils.url import safequote
from .base import *
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = '/var/www/edumate/static/'
STATIC_URL = '/static/'
MEDIA_ROOT = '/var/www/edumate/media/'
MEDIA_URL = '/media/'
# Email | # https://docs.djangoproject.com/en/2.2/topics/email/#email-backends
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('SENDGRID_API_KEY')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
AZURE_STORAGE_KEY = config('AZURE_STORAGE_KEY')
AZURE_STORAGE_ACCOUNT = config('AZURE_STORAGE_ACCOUNT')
INSTALLED_APPS += [
'storages',
]
AZURE_ACCOUNT_KEY = AZURE_STORAGE_KEY
AZURE_ACCOUNT_NAME = AZURE_STORAGE_ACCOUNT
DEFAULT_FILE_STORAGE = 'edumate.azure.AzureMediaStorage'
STATICFILES_STORAGE = 'edumate.azure.AzureStaticStorage'
STATIC_LOCATION = 'static'
MEDIA_LOCATION = 'media'
AZURE_CUSTOM_DOMAIN = f'{AZURE_ACCOUNT_NAME}.blob.core.windows.net'
STATIC_URL = f'https://{AZURE_CUSTOM_DOMAIN}/{STATIC_LOCATION}/'
MEDIA_URL = f'https://{AZURE_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/'
BROKER_URL = config('CELERY_REDIS_LOCATION')
BROKER_TRANSPORT_OPTIONS = {
'polling_interval': 10,
'visibility_timeout': 3600
} | |
router_test.go | package routing
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/gorilla/mux"
"github.com/stretchr/testify/assert"
"github.com/kiali/kiali/config"
)
func TestDrawPathProperly(t *testing.T) |
func testRoute(router *mux.Router, name string, method string, t *testing.T) {
var path = router.Get(name)
if path == nil {
t.Error("path is not registered into router")
}
var methods, err = path.GetMethods()
if err != nil {
t.Error(err)
}
if len(methods) != 1 && methods[0] != method {
t.Error("Root path is not registered with method")
}
}
func TestWebRootRedirect(t *testing.T) {
oldConfig := config.Get()
defer config.Set(oldConfig)
conf := new(config.Config)
conf.Server.WebRoot = "/test"
config.Set(conf)
router := NewRouter()
ts := httptest.NewServer(router)
defer ts.Close()
client := &http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
resp, err := client.Get(ts.URL + "/")
if err != nil {
t.Fatal(err)
}
// body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, 302, resp.StatusCode, "Response should redirect to the webroot")
assert.Equal(t, "/test/", resp.Header.Get("Location"), "Response should redirect to the webroot")
}
func TestSimpleRoute(t *testing.T) {
conf := new(config.Config)
config.Set(conf)
router := NewRouter()
ts := httptest.NewServer(router)
defer ts.Close()
resp, err := http.Get(ts.URL + "/healthz")
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 200, resp.StatusCode, "Response should be ok")
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, "", string(body), "Response should be empty")
}
func TestRedirectWithSetWebRootKeepsParams(t *testing.T) {
oldConfig := config.Get()
defer config.Set(oldConfig)
conf := new(config.Config)
conf.Server.WebRoot = "/test"
config.Set(conf)
router := NewRouter()
ts := httptest.NewServer(router)
defer ts.Close()
client := &http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
resp, err := client.Get(ts.URL + "/test")
if err != nil {
t.Fatal(err)
}
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, 200, resp.StatusCode, "Response should not redirect")
resp, err = client.Get(ts.URL + "/test/")
if err != nil {
t.Fatal(err)
}
body2, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, 200, resp.StatusCode, string(body2))
assert.Equal(t, string(body), string(body2), "Response with and without the trailing slash on the webroot are not the same")
}
| {
conf := new(config.Config)
config.Set(conf)
router := NewRouter()
testRoute(router, "Root", "GET", t)
} |
info.py | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from rally.cli import cliutils
from rally.cli.commands import plugin
class InfoCommands(object):
"""[Deprecated since 0.1.1] Allows you to get quick doc of rally entities.
"""
@cliutils.args("--query", dest="query", type=str, help="Search query.")
def find(self, query):
|
def list(self):
"""List main entities in Rally for which rally info find works.
Lists task scenario groups, deploy engines and server providers.
"""
print("This command was deprecated, and will be removed in 0.2.0 use:")
print("rally plugin list")
plugin.PluginCommands().list()
return 1
| """Search for an entity that matches the query and print info about it.
:param query: search query.
"""
print("This command was deprecated, and will be removed in 0.2.0 use:")
print("rally plugin show %s" % query)
plugin.PluginCommands().show(query)
return 1 |
landmark.py | import sys
from pymunk import Body, Circle, ShapeFilter
from configsingleton import ConfigSingleton
from common import *
from common.drawing import draw_circle
class Landmark(object):
def __init__(self, mask, radius):
self.body = Body(0, 0, Body.STATIC)
self.body.position = 0, 0
self.body.angle = 0
self.body.velocity = 0, 0
self.body.angular_velocity = 0
self.shape = Circle(self.body, radius)
self.mask = mask
self.shape.filter = ShapeFilter(categories = mask)
if mask == ARC_LANDMARK_MASK:
self.shape.color = 0, 255, 0
elif mask == POLE_LANDMARK_MASK:
self.shape.color = 0, 0, 255
elif mask == BLAST_LANDMARK_MASK:
self.shape.color = 255, 0, 0
else:
sys.exit("Unknown landmark mask: " + str(mask))
# The following is just to set the appropriate params to visualize below
config = ConfigSingleton.get_instance()
self.vis_range_max = \
config.getfloat("RangeScan:landmarks", "range_max") \
+ radius
self.vis_inside_radius = \
config.getfloat("LandmarkCircleController", "inside_radius") \
+ radius
self.vis_outside_radius = \
config.getfloat("LandmarkCircleController", "outside_radius") \
+ radius
def visualize_params(self):
| centre = (self.body.position.x, self.body.position.y)
draw_circle(centre, self.vis_range_max, (255, 255, 255))
if self.mask == ARC_LANDMARK_MASK:
draw_circle(centre, self.vis_inside_radius, (0, 255, 0))
draw_circle(centre, self.vis_outside_radius, (255, 0, 0)) |
|
importation.py | from typing import List, Callable
from autumn.curve import scale_up_function
def get_importation_rate_func_as_birth_rates(
importation_times: List[float],
importation_n_cases: List[float],
detect_prop_func,
starting_pops: list,
):
|
# dummy proportions for now:
# FIXME: These are parameters!
IMPORTATION_PROPS_BY_AGE = {
"0": 0.04,
"5": 0.04,
"10": 0.04,
"15": 0.04,
"20": 0.08,
"25": 0.09,
"30": 0.09,
"35": 0.09,
"40": 0.09,
"45": 0.08,
"50": 0.08,
"55": 0.08,
"60": 0.04,
"65": 0.04,
"70": 0.04,
"75": 0.04,
}
| """
When imported cases are explicitly simulated as part of the modelled population. They enter the late_infectious
compartment through a birth process
"""
# inflate importation numbers to account for undetected cases (assumed to be asymptomatic or sympt non hospital)
for i, time in enumerate(importation_times):
importation_n_cases[i] /= detect_prop_func(time)
# scale-up curve for importation numbers
importation_numbers_scale_up = scale_up_function(
importation_times, importation_n_cases, method=4, smoothness=5.0, bound_low=0.0
)
def recruitment_rate(t):
return importation_numbers_scale_up(t) / sum(starting_pops)
return recruitment_rate |
identifier_test.go | package work | import "testing"
func TestMakeIdentifier(t *testing.T) {
t.Parallel()
id := makeIdentifier()
if len(id) < 10 {
t.Errorf("expected a string of length 10 at least")
}
} | |
memory.rs | const MEM_SIZE: usize = 4096;
pub struct Memory {
memory: [u8; MEM_SIZE], // 4kb RAM
}
impl Memory {
pub fn new() -> Memory {
Memory {
memory: [0; MEM_SIZE]
}
}
pub fn | (&self) -> *const u8 {
self.memory.as_ptr()
}
pub fn get_instr(&self, pc: u16) -> u16 {
(self.memory[pc as usize] as u16) << 8 | self.memory[pc as usize+1] as u16
}
pub fn map_range(&mut self, start: usize, size: usize, target: &[u8]) {
self.memory[start..start+size].copy_from_slice(target);
}
pub fn get_range(&self, start: usize, size: usize) -> &[u8] {
&self.memory[start..start+size]
}
// set single byte at specific address
pub fn set(&mut self, index: u16, data: u8) {
self.memory[index as usize] = data;
}
// get single byte at specific address
pub fn get(&self, index: u16) -> u8 {
self.memory[index as usize]
}
}
| as_ptr |
0003_auto_20180225_1551.py | # Generated by Django 2.0.2 on 2018-02-25 14:51
from django.db import migrations
import lists.models
class Migration(migrations.Migration):
| dependencies = [
('lists', '0002_auto_20180225_1540'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='due_date',
field=lists.models.DueDateField(blank=True),
),
] |
|
hotkeys.js | var myKeyQueue = [];
document.addEventListener("keydown", function(e) {
var code = {key:(e.charCode !== 0 ? e.charCode : e.keyCode), shift:e.shiftKey};
myKeyQueue.push(code);
processKeyQueue();
});
function processKeyQueue() {
var key = myKeyQueue[0].key;
var shift = myKeyQueue[0].shift;
myKeyQueue.splice(0, 1);
if(key === 27) { //escape
} else if(key === 13 || key === 32) { //enter / space
} else if(key === 38 || key === 87) { //w
} else if(key === 40 || key === 83) { //s
} else if(key === 37 || key === 65) { //a
} else if(key === 39 || key === 68) { //d
} else if(key === 66) { //b
game.buyIce()
} else if(key === 81) { //q
} else if(key === 69) { //e
} else if(key === 82) { //r
} else if(key === 76) { //l
} else if(key === 72) { //h
} else if(key === 73) { //i
} else if(key === 85) { //u
} else if(key === 49) { //1
} else if(key === 50) { //2
} else if(key === 51) { //3
} else if(key === 52) { //4
} else if(key === 53) { //5
} else if(key === 54) { //6
} else if(key === 55) { //7
}
}
var keys = {32: 1, 37: 1, 38: 1, 39: 1, 40: 1};
function preventDefault(e) {
e = e || window.event;
if (e.preventDefault)
e.preventDefault();
e.returnValue = false;
}
function preventDefaultForScrollKeys(e) {
if (keys[e.keyCode]) {
preventDefault(e);
return false;
}
}
function disableScroll() {
document.onkeydown = preventDefaultForScrollKeys;
}
disableScroll();
var backgroundGrid = document.getElementById('mainContainer');
var rclickStartingPoint;
| rclickStartingPoint = {x:e.pageX, y:e.pageY};
}
};
backgroundGrid.onmousemove = function(e) {
if((e.which && e.which === 3) || (e.buttons && e.buttons === 2)) {
var dragToPoint = {x:e.pageX, y:e.pageY};
var offsetx = Math.ceil((dragToPoint.x - rclickStartingPoint.x)/1.5);
var offsety = Math.ceil((dragToPoint.y - rclickStartingPoint.y)/1.5);
window.scrollBy(offsetx, offsety);
rclickStartingPoint = dragToPoint;
}
};
backgroundGrid.onmouseup = function(e) {
if((e.which && e.which === 3) || (e.buttons && e.buttons === 2)) {
return;
}
};
document.getElementById('shipSpawnSlider1').oninput = function() {
game.hangars[0].y = (100 - this.value) * 3.5;
}; | backgroundGrid.onmousedown = function(e) {
if((e.which && e.which === 3) || (e.buttons && e.buttons === 2)) { //Right click |
main.go | package main
import (
"fmt"
"github.com/ChaosXu/nerv/cmd/agent-cli/cmd"
"os"
)
var (
Version = "main.min.build"
)
func main() {
if err := cmd.RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(-1)
} | } |
|
hydrogen.py | from sfepy.linalg import norm_l2_along_axis
from quantum_common import common
def fun_v(ts, coor, mode=None, region=None, ig=None):
from numpy import sqrt
if not mode == 'qp': return
out = {}
C = 0.5
r = norm_l2_along_axis(coor, axis=1)
V = - C * 1.0 / r
V.shape = (V.shape[0], 1, 1)
out['V'] = V
return out
def | ():
l = common(fun_v, n_eigs=5, tau=-1.0)
return l
| define |
prova1.py | import math
class Robo:
def __init__(self,nome):
self.__nome = nome
self.__posicao = [0.0,0.0]
self.__em_op = False
@property
def nome(self):
return self.__nome
@nome.setter
def nome(self, alterar_nome):
self.__nome = alterar_nome
@property
def posicao(self):
return self.__posicao
def __str__(self):
return(f'Robô: {self.__nome}, {self.__em_op} em {self.__posicao}')
def distancia(self,nposicao):
self.nposicao = nposicao | self.__posicao = nposicao
class SistemaMultiRobos():
def __init__(self,quantidade):
self.__robos= []
for i in range(quantidade):
self.__robos.append(Robo(i))
def _acha_robo_ocioso(self):
for i in self.__robos:
if i.__em_op== False:
return (f'Robô: {i} livre')
def imprime_robos(self):
for i in self.__robos:
print(i)
def despacha(self, coordenadas):
pass
if __name__ == '__main__':
smr = SistemaMultiRobos(3) # sistema com 3 robôs
smr.imprime_robos()
smr.despacha((5.0, 5.0))
smr.imprime_robos()
smr.despacha((-5.0, -5.0))
smr.imprime_robos()
smr.despacha((0.0, -10.0))
smr.imprime_robos()
smr.despacha((15.0, 15.0))
smr.imprime_robos() |
print(math.sqrt(((self.__posicao[0]-self.nposicao[0])**2)+((self.__posicao[1]-self.nposicao[1])**2)))
def move(self,nposicao): |
tlslistener.go | package net
import (
"context"
"crypto/tls"
"fmt"
"net"
"sync/atomic"
"time"
)
// TLSListener is a TLS listener that provides accept with context.
type TLSListener struct {
tcp *net.TCPListener
listener net.Listener
heartBeat time.Duration
closed uint32
}
var defaultTLSListenerOptions = tlsListenerOptions{
heartBeat: time.Millisecond * 200,
}
type tlsListenerOptions struct {
heartBeat time.Duration
}
// A TLSListenerOption sets options such as heartBeat parameters, etc.
type TLSListenerOption interface {
applyTLSListener(*tlsListenerOptions)
}
// NewTLSListener creates tcp listener.
// Known networks are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only).
func NewTLSListener(network string, addr string, tlsCfg *tls.Config, opts ...TLSListenerOption) (*TLSListener, error) {
cfg := defaultTLSListenerOptions
for _, o := range opts {
o.applyTLSListener(&cfg)
}
tcp, err := newNetTCPListen(network, addr)
if err != nil {
return nil, fmt.Errorf("cannot create new tls listener: %w", err)
}
tls := tls.NewListener(tcp, tlsCfg)
return &TLSListener{
tcp: tcp,
listener: tls,
heartBeat: cfg.heartBeat,
}, nil
}
// AcceptWithContext waits with context for a generic Conn.
func (l *TLSListener) AcceptWithContext(ctx context.Context) (net.Conn, error) {
for {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
if atomic.LoadUint32(&l.closed) == 1 {
return nil, ErrListenerIsClosed
}
err := l.SetDeadline(time.Now().Add(l.heartBeat))
if err != nil {
return nil, fmt.Errorf("cannot set deadline to accept connection: %w", err)
}
rw, err := l.listener.Accept()
if err != nil {
if isTemporary(err) {
continue
}
return nil, fmt.Errorf("cannot accept connection: %w", err)
}
return rw, nil |
// SetDeadline sets deadline for accept operation.
func (l *TLSListener) SetDeadline(t time.Time) error {
return l.tcp.SetDeadline(t)
}
// Accept waits for a generic Conn.
func (l *TLSListener) Accept() (net.Conn, error) {
return l.AcceptWithContext(context.Background())
}
// Close closes the connection.
func (l *TLSListener) Close() error {
if !atomic.CompareAndSwapUint32(&l.closed, 0, 1) {
return nil
}
return l.listener.Close()
}
// Addr represents a network end point address.
func (l *TLSListener) Addr() net.Addr {
return l.listener.Addr()
} | }
} |
views.py | from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from .models import Requirement#, CreateRequirement
from django.forms.models import model_to_dict
# Create your views here.
class RequirementIndex(generic.ListView):
model = Requirement
template_name = 'requirements/index.html'
context_object_name = 'requirement_list'
paginate_by = 10
| def get_queryset(self):
return Requirement.objects.all()
class RequirementDetail(generic.DetailView):
model = Requirement
template_name = 'requirements/detail.html'
# Add a dictionary containing the model information to the context when
# rendering the view.
#def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# requirement_object_dictionary = Requirement.objects.filter(id=context['requirement'].id).values()[0]
# context['requirement_object'] = requirement_object_dictionary
# return context
class RequirementUpdate(generic.UpdateView):
model = Requirement
template_name = 'requirements/edit.html'
fields = [
'description',
'parent',
'is_constraint',
'min_measure_of_effectiveness',
'target_measure_of_effectiveness',
'rationale',
'remarks',
'acceptance_criteria_type',
'priority',
'status'
]
class RequirementCreate(generic.CreateView):
model = Requirement
template_name = 'requirements/create.html'
fields = [
'description',
'parent',
'is_constraint',
'min_measure_of_effectiveness',
'target_measure_of_effectiveness',
'rationale',
'remarks',
'acceptance_criteria_type',
'priority',
'status'
] | |
test_compynent.py | #!/usr/bin/env python
"""Tests for `compynent` package."""
from contextlib import AbstractContextManager, contextmanager
from compynent import System
class InitCounter(AbstractContextManager):
def __init__(self):
self.cnt = -1
def incr(self):
self.cnt += 1
return self.cnt
def __enter__(self):
self.cnt = 0
return self
def __exit__(self, *args):
self.cnt = -1
class Config(AbstractContextManager):
def __init__(self, init_counter):
self._counter = init_counter
def __enter__(self):
self.bar = 1
self.incr = 10
self._when = self._counter.incr()
return self
def __exit__(self, *args):
self.bar = None
self.incr = None
class Counter(AbstractContextManager):
def __init__(self, counter, config: Config):
self._config = config
self._counter = counter
def increment(self):
self.counter += self._config.incr
def __enter__(self):
self.counter = self._config.bar
self._when = self._counter.incr()
return self
def __exit__(self, *args):
self.counter = None
class App(AbstractContextManager):
def __init__(self, cfg: Config, counter: Counter, init_counter):
self._config = cfg
self._counter = counter
self._init_counter = init_counter
def get_counter(self):
return self._counter.counter
def incr_counter(self):
return self._counter.increment()
def __enter__(self):
self._when = self._init_counter.incr()
return self
def __exit__(self, *args):
pass
def sys_config():
return {'app': (App, ['counter', 'cfg', 'init_counter']),
'init_counter': (InitCounter, []),
'cfg': (Config, ['init_counter']),
'counter': (Counter, {'cfg': 'config',
'init_counter': 'counter'})}
def test_dag():
sys = System(sys_config())
assert sys.order == ['init_counter', 'cfg', 'counter', 'app']
pass
def test_system_map():
sys = System(sys_config())
# assert top level
with sys.start() as ctx:
assert isinstance(ctx['app'], App)
assert isinstance(ctx['cfg'], Config)
assert isinstance(ctx['counter'], Counter)
# assert dependencies
assert ctx['app']._config is ctx['cfg']
assert ctx['app']._counter is ctx['counter']
assert ctx['counter']._config is ctx['cfg']
def test_initialization_order():
with System(sys_config()).start() as ctx:
pass
assert ctx['cfg']._when == 1
assert ctx['counter']._when == 2
assert ctx['app']._when == 3
def | ():
with System(sys_config()).start() as ctx:
assert ctx['app'].get_counter() == 1
ctx['app'].incr_counter()
assert ctx['app'].get_counter() == 11
assert ctx['app'].get_counter() is None
def test_using_generators():
@contextmanager
def make_counter():
counter = [0]
try:
yield counter
finally:
counter[0] -= 1
@contextmanager
def make_outer(counter):
yield counter[0] + 1
system = System({'cnt': (make_counter, []),
'outer': (make_outer, {'cnt': 'counter'})})
with system.start() as ctx:
assert ctx['cnt'] == [0]
ctx['cnt'][0] = 123
assert ctx['cnt'] == [122]
| test_context_management |
_x11_common.py | from __future__ import division, absolute_import, print_function
from ooxcb.protocol import (
xtest,
)
from ooxcb.constant import (
ButtonPress,
ButtonRelease,
KeyPress,
KeyRelease,
MotionNotify
)
import ooxcb
from ooxcb.keysymdef import keysyms
import subprocess
import os
from ._common import BackendActionBuilder
xtest.mixin()
class _ActionsTransaction(object):
def __init__(self, backend):
self._conn = backend._conn
self._actions_builder = BackendActionBuilder(backend)
def __enter__(self):
return self._actions_builder
def __exit__(self, *args):
#with self._conn.bunch():
self._actions_builder.execute()
return False
class GeistXBase(object):
KEY_NAME_TO_CODE = keysyms
KEY_NAME_TO_CODE_IGNORE_CASE = {name.lower(): value
for name, value in keysyms.iteritems()}
def __init__(self, **kwargs):
display = kwargs.get('display', ':0')
self._display = display
self._conn = ooxcb.connect(display)
self._root = self._conn.setup.roots[self._conn.pref_screen].root
@property
def display(self):
|
def create_process(self, command, shell=True, stdout=None, stderr=None,
env=None):
"""
Execute a process using subprocess.Popen, setting the backend's DISPLAY
"""
env = env if env is not None else dict(os.environ)
env['DISPLAY'] = self.display
return subprocess.Popen(command, shell=shell,
stdout=stdout, stderr=stderr,
env=env)
def actions_transaction(self):
return _ActionsTransaction(self)
def _get_key_code_from_name(self, name):
if name == 'shift':
symb = GeistXBase.KEY_NAME_TO_CODE['Shift_L']
elif name in GeistXBase.KEY_NAME_TO_CODE:
symb = GeistXBase.KEY_NAME_TO_CODE[name]
elif name.lower() in GeistXBase.KEY_NAME_TO_CODE_IGNORE_CASE:
symb = GeistXBase.KEY_NAME_TO_CODE_IGNORE_CASE[name]
else:
raise ValueError('unhandled key %r' % (name,))
return self._conn.keysyms.get_keycode(symb)
def key_down(self, name):
key_code = self._get_key_code_from_name(name)
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
KeyPress,
detail=key_code
)
def key_up(self, name):
key_code = self._get_key_code_from_name(name)
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
KeyRelease,
detail=key_code
)
def button_down(self, button_num):
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
ButtonPress,
detail=button_num
)
def button_up(self, button_num):
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
ButtonRelease,
detail=button_num
)
def move(self, point):
x, y = point
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
MotionNotify,
rootX=x,
rootY=y,
)
def cursor_position(self):
reply = self._root.query_pointer().reply()
return reply.root_x, reply.root_y
def close(self):
if hasattr(self, '_conn'):
self._conn.disconnect()
del self._conn
def __del__(self):
self.close()
| return self._display |
SelectionManager.js | "use strict";
var __extends = (this && this.__extends) || (function () {
var extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
Object.defineProperty(exports, "__esModule", { value: true });
var MouseHelper_1 = require("./utils/MouseHelper");
var Browser = require("./utils/Browser");
var EventEmitter_1 = require("./EventEmitter");
var SelectionModel_1 = require("./SelectionModel");
var Buffer_1 = require("./Buffer");
var DRAG_SCROLL_MAX_THRESHOLD = 50;
var DRAG_SCROLL_MAX_SPEED = 15;
var DRAG_SCROLL_INTERVAL = 50;
var WORD_SEPARATORS = ' ()[]{}\'"';
var NON_BREAKING_SPACE_CHAR = String.fromCharCode(160);
var ALL_NON_BREAKING_SPACE_REGEX = new RegExp(NON_BREAKING_SPACE_CHAR, 'g');
var SelectionMode;
(function (SelectionMode) {
SelectionMode[SelectionMode["NORMAL"] = 0] = "NORMAL";
SelectionMode[SelectionMode["WORD"] = 1] = "WORD";
SelectionMode[SelectionMode["LINE"] = 2] = "LINE";
})(SelectionMode || (SelectionMode = {}));
var SelectionManager = (function (_super) {
__extends(SelectionManager, _super);
function SelectionManager(_terminal, _buffer, _charMeasure) {
var _this = _super.call(this) || this;
_this._terminal = _terminal;
_this._buffer = _buffer;
_this._charMeasure = _charMeasure;
_this._enabled = true;
_this._initListeners();
_this.enable();
_this._model = new SelectionModel_1.SelectionModel(_terminal);
_this._activeSelectionMode = SelectionMode.NORMAL;
return _this;
}
SelectionManager.prototype._initListeners = function () {
var _this = this;
this._mouseMoveListener = function (event) { return _this._onMouseMove(event); };
this._mouseUpListener = function (event) { return _this._onMouseUp(event); };
this._buffer.lines.on('trim', function (amount) { return _this._onTrim(amount); });
};
SelectionManager.prototype.disable = function () {
this.clearSelection();
this._enabled = false;
};
SelectionManager.prototype.enable = function () {
this._enabled = true;
};
SelectionManager.prototype.setBuffer = function (buffer) {
this._buffer = buffer;
this.clearSelection();
};
Object.defineProperty(SelectionManager.prototype, "selectionStart", {
get: function () { return this._model.finalSelectionStart; },
enumerable: true,
configurable: true
});
Object.defineProperty(SelectionManager.prototype, "selectionEnd", {
get: function () { return this._model.finalSelectionEnd; },
enumerable: true,
configurable: true
});
Object.defineProperty(SelectionManager.prototype, "hasSelection", {
get: function () {
var start = this._model.finalSelectionStart;
var end = this._model.finalSelectionEnd;
if (!start || !end) {
return false;
}
return start[0] !== end[0] || start[1] !== end[1];
},
enumerable: true,
configurable: true
});
Object.defineProperty(SelectionManager.prototype, "selectionText", { | if (!start || !end) {
return '';
}
var startRowEndCol = start[1] === end[1] ? end[0] : null;
var result = [];
result.push(this._buffer.translateBufferLineToString(start[1], true, start[0], startRowEndCol));
for (var i = start[1] + 1; i <= end[1] - 1; i++) {
var bufferLine = this._buffer.lines.get(i);
var lineText = this._buffer.translateBufferLineToString(i, true);
if (bufferLine.isWrapped) {
result[result.length - 1] += lineText;
}
else {
result.push(lineText);
}
}
if (start[1] !== end[1]) {
var bufferLine = this._buffer.lines.get(end[1]);
var lineText = this._buffer.translateBufferLineToString(end[1], true, 0, end[0]);
if (bufferLine.isWrapped) {
result[result.length - 1] += lineText;
}
else {
result.push(lineText);
}
}
var formattedResult = result.map(function (line) {
return line.replace(ALL_NON_BREAKING_SPACE_REGEX, ' ');
}).join(Browser.isMSWindows ? '\r\n' : '\n');
return formattedResult;
},
enumerable: true,
configurable: true
});
SelectionManager.prototype.clearSelection = function () {
this._model.clearSelection();
this._removeMouseDownListeners();
this.refresh();
};
SelectionManager.prototype.refresh = function (isNewSelection) {
var _this = this;
if (!this._refreshAnimationFrame) {
this._refreshAnimationFrame = window.requestAnimationFrame(function () { return _this._refresh(); });
}
if (Browser.isLinux && isNewSelection) {
var selectionText = this.selectionText;
if (selectionText.length) {
this.emit('newselection', this.selectionText);
}
}
};
SelectionManager.prototype._refresh = function () {
this._refreshAnimationFrame = null;
this.emit('refresh', { start: this._model.finalSelectionStart, end: this._model.finalSelectionEnd });
};
SelectionManager.prototype.selectAll = function () {
this._model.isSelectAllActive = true;
this.refresh();
};
SelectionManager.prototype._onTrim = function (amount) {
var needsRefresh = this._model.onTrim(amount);
if (needsRefresh) {
this.refresh();
}
};
SelectionManager.prototype._getMouseBufferCoords = function (event) {
var coords = this._terminal.mouseHelper.getCoords(event, this._terminal.element, this._charMeasure, this._terminal.options.lineHeight, this._terminal.cols, this._terminal.rows, true);
if (!coords) {
return null;
}
coords[0]--;
coords[1]--;
coords[1] += this._terminal.buffer.ydisp;
return coords;
};
SelectionManager.prototype._getMouseEventScrollAmount = function (event) {
var offset = MouseHelper_1.MouseHelper.getCoordsRelativeToElement(event, this._terminal.element)[1];
var terminalHeight = this._terminal.rows * Math.ceil(this._charMeasure.height * this._terminal.options.lineHeight);
if (offset >= 0 && offset <= terminalHeight) {
return 0;
}
if (offset > terminalHeight) {
offset -= terminalHeight;
}
offset = Math.min(Math.max(offset, -DRAG_SCROLL_MAX_THRESHOLD), DRAG_SCROLL_MAX_THRESHOLD);
offset /= DRAG_SCROLL_MAX_THRESHOLD;
return (offset / Math.abs(offset)) + Math.round(offset * (DRAG_SCROLL_MAX_SPEED - 1));
};
SelectionManager.prototype.onMouseDown = function (event) {
if (event.button === 2 && this.hasSelection) {
return;
}
if (event.button !== 0) {
return;
}
if (!this._enabled) {
var shouldForceSelection = Browser.isMac ? event.altKey : event.shiftKey;
if (!shouldForceSelection) {
return;
}
event.stopPropagation();
}
event.preventDefault();
this._dragScrollAmount = 0;
if (this._enabled && event.shiftKey) {
this._onIncrementalClick(event);
}
else {
if (event.detail === 1) {
this._onSingleClick(event);
}
else if (event.detail === 2) {
this._onDoubleClick(event);
}
else if (event.detail === 3) {
this._onTripleClick(event);
}
}
this._addMouseDownListeners();
this.refresh(true);
};
SelectionManager.prototype._addMouseDownListeners = function () {
var _this = this;
this._terminal.element.ownerDocument.addEventListener('mousemove', this._mouseMoveListener);
this._terminal.element.ownerDocument.addEventListener('mouseup', this._mouseUpListener);
this._dragScrollIntervalTimer = setInterval(function () { return _this._dragScroll(); }, DRAG_SCROLL_INTERVAL);
};
SelectionManager.prototype._removeMouseDownListeners = function () {
this._terminal.element.ownerDocument.removeEventListener('mousemove', this._mouseMoveListener);
this._terminal.element.ownerDocument.removeEventListener('mouseup', this._mouseUpListener);
clearInterval(this._dragScrollIntervalTimer);
this._dragScrollIntervalTimer = null;
};
SelectionManager.prototype._onIncrementalClick = function (event) {
if (this._model.selectionStart) {
this._model.selectionEnd = this._getMouseBufferCoords(event);
}
};
SelectionManager.prototype._onSingleClick = function (event) {
this._model.selectionStartLength = 0;
this._model.isSelectAllActive = false;
this._activeSelectionMode = SelectionMode.NORMAL;
this._model.selectionStart = this._getMouseBufferCoords(event);
if (!this._model.selectionStart) {
return;
}
this._model.selectionEnd = null;
var line = this._buffer.lines.get(this._model.selectionStart[1]);
if (!line) {
return;
}
if (line.length >= this._model.selectionStart[0]) {
return;
}
var char = line[this._model.selectionStart[0]];
if (char[Buffer_1.CHAR_DATA_WIDTH_INDEX] === 0) {
this._model.selectionStart[0]++;
}
};
SelectionManager.prototype._onDoubleClick = function (event) {
var coords = this._getMouseBufferCoords(event);
if (coords) {
this._activeSelectionMode = SelectionMode.WORD;
this._selectWordAt(coords);
}
};
SelectionManager.prototype._onTripleClick = function (event) {
var coords = this._getMouseBufferCoords(event);
if (coords) {
this._activeSelectionMode = SelectionMode.LINE;
this._selectLineAt(coords[1]);
}
};
SelectionManager.prototype._onMouseMove = function (event) {
event.stopImmediatePropagation();
var previousSelectionEnd = this._model.selectionEnd ? [this._model.selectionEnd[0], this._model.selectionEnd[1]] : null;
this._model.selectionEnd = this._getMouseBufferCoords(event);
if (!this._model.selectionEnd) {
this.refresh(true);
return;
}
if (this._activeSelectionMode === SelectionMode.LINE) {
if (this._model.selectionEnd[1] < this._model.selectionStart[1]) {
this._model.selectionEnd[0] = 0;
}
else {
this._model.selectionEnd[0] = this._terminal.cols;
}
}
else if (this._activeSelectionMode === SelectionMode.WORD) {
this._selectToWordAt(this._model.selectionEnd);
}
this._dragScrollAmount = this._getMouseEventScrollAmount(event);
if (this._dragScrollAmount > 0) {
this._model.selectionEnd[0] = this._terminal.cols - 1;
}
else if (this._dragScrollAmount < 0) {
this._model.selectionEnd[0] = 0;
}
if (this._model.selectionEnd[1] < this._buffer.lines.length) {
var char = this._buffer.lines.get(this._model.selectionEnd[1])[this._model.selectionEnd[0]];
if (char && char[Buffer_1.CHAR_DATA_WIDTH_INDEX] === 0) {
this._model.selectionEnd[0]++;
}
}
if (!previousSelectionEnd ||
previousSelectionEnd[0] !== this._model.selectionEnd[0] ||
previousSelectionEnd[1] !== this._model.selectionEnd[1]) {
this.refresh(true);
}
};
SelectionManager.prototype._dragScroll = function () {
if (this._dragScrollAmount) {
this._terminal.scrollDisp(this._dragScrollAmount, false);
if (this._dragScrollAmount > 0) {
this._model.selectionEnd = [this._terminal.cols - 1, this._terminal.buffer.ydisp + this._terminal.rows];
}
else {
this._model.selectionEnd = [0, this._terminal.buffer.ydisp];
}
this.refresh();
}
};
SelectionManager.prototype._onMouseUp = function (event) {
this._removeMouseDownListeners();
};
SelectionManager.prototype._convertViewportColToCharacterIndex = function (bufferLine, coords) {
var charIndex = coords[0];
for (var i = 0; coords[0] >= i; i++) {
var char = bufferLine[i];
if (char[Buffer_1.CHAR_DATA_WIDTH_INDEX] === 0) {
charIndex--;
}
else if (char[Buffer_1.CHAR_DATA_CHAR_INDEX].length > 1 && coords[0] !== i) {
charIndex += char[Buffer_1.CHAR_DATA_CHAR_INDEX].length - 1;
}
}
return charIndex;
};
SelectionManager.prototype.setSelection = function (col, row, length) {
this._model.clearSelection();
this._removeMouseDownListeners();
this._model.selectionStart = [col, row];
this._model.selectionStartLength = length;
this.refresh();
};
SelectionManager.prototype._getWordAt = function (coords) {
var bufferLine = this._buffer.lines.get(coords[1]);
if (!bufferLine) {
return null;
}
var line = this._buffer.translateBufferLineToString(coords[1], false);
var startIndex = this._convertViewportColToCharacterIndex(bufferLine, coords);
var endIndex = startIndex;
var charOffset = coords[0] - startIndex;
var leftWideCharCount = 0;
var rightWideCharCount = 0;
var leftLongCharOffset = 0;
var rightLongCharOffset = 0;
if (line.charAt(startIndex) === ' ') {
while (startIndex > 0 && line.charAt(startIndex - 1) === ' ') {
startIndex--;
}
while (endIndex < line.length && line.charAt(endIndex + 1) === ' ') {
endIndex++;
}
}
else {
var startCol = coords[0];
var endCol = coords[0];
if (bufferLine[startCol][Buffer_1.CHAR_DATA_WIDTH_INDEX] === 0) {
leftWideCharCount++;
startCol--;
}
if (bufferLine[endCol][Buffer_1.CHAR_DATA_WIDTH_INDEX] === 2) {
rightWideCharCount++;
endCol++;
}
if (bufferLine[endCol][Buffer_1.CHAR_DATA_CHAR_INDEX].length > 1) {
rightLongCharOffset += bufferLine[endCol][Buffer_1.CHAR_DATA_CHAR_INDEX].length - 1;
endIndex += bufferLine[endCol][Buffer_1.CHAR_DATA_CHAR_INDEX].length - 1;
}
while (startCol > 0 && startIndex > 0 && !this._isCharWordSeparator(bufferLine[startCol - 1])) {
var char = bufferLine[startCol - 1];
if (char[Buffer_1.CHAR_DATA_WIDTH_INDEX] === 0) {
leftWideCharCount++;
startCol--;
}
else if (char[Buffer_1.CHAR_DATA_CHAR_INDEX].length > 1) {
leftLongCharOffset += char[Buffer_1.CHAR_DATA_CHAR_INDEX].length - 1;
startIndex -= char[Buffer_1.CHAR_DATA_CHAR_INDEX].length - 1;
}
startIndex--;
startCol--;
}
while (endCol < bufferLine.length && endIndex + 1 < line.length && !this._isCharWordSeparator(bufferLine[endCol + 1])) {
var char = bufferLine[endCol + 1];
if (char[Buffer_1.CHAR_DATA_WIDTH_INDEX] === 2) {
rightWideCharCount++;
endCol++;
}
else if (char[Buffer_1.CHAR_DATA_CHAR_INDEX].length > 1) {
rightLongCharOffset += char[Buffer_1.CHAR_DATA_CHAR_INDEX].length - 1;
endIndex += char[Buffer_1.CHAR_DATA_CHAR_INDEX].length - 1;
}
endIndex++;
endCol++;
}
}
endIndex++;
var start = startIndex
+ charOffset
- leftWideCharCount
+ leftLongCharOffset;
var length = Math.min(this._terminal.cols, endIndex
- startIndex
+ leftWideCharCount
+ rightWideCharCount
- leftLongCharOffset
- rightLongCharOffset);
return { start: start, length: length };
};
SelectionManager.prototype._selectWordAt = function (coords) {
var wordPosition = this._getWordAt(coords);
if (wordPosition) {
this._model.selectionStart = [wordPosition.start, coords[1]];
this._model.selectionStartLength = wordPosition.length;
}
};
SelectionManager.prototype._selectToWordAt = function (coords) {
var wordPosition = this._getWordAt(coords);
if (wordPosition) {
this._model.selectionEnd = [this._model.areSelectionValuesReversed() ? wordPosition.start : (wordPosition.start + wordPosition.length), coords[1]];
}
};
SelectionManager.prototype._isCharWordSeparator = function (charData) {
if (charData[Buffer_1.CHAR_DATA_WIDTH_INDEX] === 0) {
return false;
}
return WORD_SEPARATORS.indexOf(charData[Buffer_1.CHAR_DATA_CHAR_INDEX]) >= 0;
};
SelectionManager.prototype._selectLineAt = function (line) {
this._model.selectionStart = [0, line];
this._model.selectionStartLength = this._terminal.cols;
};
return SelectionManager;
}(EventEmitter_1.EventEmitter));
exports.SelectionManager = SelectionManager;
//# sourceMappingURL=SelectionManager.js.map | get: function () {
var start = this._model.finalSelectionStart;
var end = this._model.finalSelectionEnd; |
GenericFunctions.ts | import { OptionsWithUri } from 'request';
import {
IExecuteFunctions,
IExecuteSingleFunctions,
ILoadOptionsFunctions,
} from 'n8n-core';
import {
IDataObject
} from 'n8n-workflow';
export async function zohoApiRequest(this: IExecuteFunctions | IExecuteSingleFunctions | ILoadOptionsFunctions, method: string, resource: string, body: any = {}, qs: IDataObject = {}, uri?: string, option: IDataObject = {}): Promise<any> { // tslint:disable-line:no-any
const options: OptionsWithUri = {
headers: {
'Content-Type': 'application/json',
},
method,
body: {
data: [
body,
],
},
qs,
uri: uri || `https://www.zohoapis.com/crm/v2${resource}`,
json: true,
};
try {
//@ts-ignore
return await this.helpers.requestOAuth2.call(this, 'zohoOAuth2Api', options);
} catch (error) {
if (error.response && error.response.body && error.response.body.message) {
// Try to return the error prettier
throw new Error(`Zoho error response [${error.statusCode}]: ${error.response.body.message}`);
}
throw error;
}
}
export async function | (this: IExecuteFunctions | ILoadOptionsFunctions, propertyName: string ,method: string, endpoint: string, body: any = {}, query: IDataObject = {}): Promise<any> { // tslint:disable-line:no-any
const returnData: IDataObject[] = [];
let responseData;
let uri: string | undefined;
query.per_page = 200;
query.page = 0;
do {
responseData = await zohoApiRequest.call(this, method, endpoint, body, query, uri);
uri = responseData.info.more_records;
returnData.push.apply(returnData, responseData[propertyName]);
query.page++;
} while (
responseData.info.more_records !== undefined &&
responseData.info.more_records === true
);
return returnData;
}
| zohoApiRequestAllItems |
validation.pipe.ts | import {
PipeTransform,
Injectable,
ArgumentMetadata,
BadRequestException,
} from '@nestjs/common';
import { validate, ValidationError } from 'class-validator';
import { plainToClass } from 'class-transformer';
import { ListErrors } from '../types';
@Injectable()
export class | implements PipeTransform<any> {
async transform(value: any, { metatype }: ArgumentMetadata) {
if (!metatype || !this.toValidate(metatype)) return value;
const object = plainToClass(metatype, value);
const validationErrors = await validate(object);
if (validationErrors.length > 0)
throw new BadRequestException(this.parseErrors(validationErrors));
return value;
}
private toValidate(metatype: Function): boolean {
const types: Function[] = [String, Boolean, Number, Array, Object];
return !types.includes(metatype);
}
private parseErrors(
validationErrors: ValidationError[],
response: ListErrors = { errors: [] },
) {
validationErrors.forEach(error => {
const title = 'Validation failed';
const { dataType } = error.target as any;
for (const constraint in error.constraints) {
const source = {};
if (dataType === 'query') source['parameter'] = `${error.property}`;
else source['pointer'] = `/data/attributes/${error.property}`;
return response.errors.push({
source,
title,
detail: error.constraints[constraint],
});
}
let actualErrorChildren = error.children;
const properties = [error.property];
while (!actualErrorChildren[0].constraints) {
properties.push(actualErrorChildren[0].property);
actualErrorChildren = actualErrorChildren[0].children;
}
for (const actualError of actualErrorChildren) {
const source = {};
if (dataType === 'query')
source['parameter'] = `${properties.join('/')}/${
actualError.property
}`;
else
source['pointer'] = `/data/attributes/${properties.join('/')}/${
actualError.property
}`;
response.errors.push({
title,
source,
detail:
actualError.constraints[Object.keys(actualError.constraints)[0]],
});
}
});
return response;
}
}
| ValidationPipe |
part1.go | package main
import (
"bufio"
"fmt"
"os"
"strconv"
)
func _ValidateNumber(number int, numbers []int) bool {
if len(numbers) < 25 {
return true
}
for _, recordedNum := range numbers[len(numbers)-25:] {
requiredOtherNum := number - recordedNum
if requiredOtherNum == recordedNum {
continue
}
for _, otherRecordedNum := range numbers[len(numbers)-25:] {
if otherRecordedNum == requiredOtherNum {
return true
}
}
}
return false
}
func main() | {
scanner := bufio.NewScanner(os.Stdin)
numbers := make([]int, 0)
for scanner.Scan() {
text := scanner.Text()
number, _ := strconv.Atoi(text)
if !_ValidateNumber(number, numbers) {
fmt.Println(number)
return
}
numbers = append(numbers, number)
}
} |
|
mnemonic.rs | use crate::address::EthereumAddress;
use crate::extended_private_key::EthereumExtendedPrivateKey;
use crate::extended_public_key::EthereumExtendedPublicKey;
use crate::format::EthereumFormat;
use crate::network::EthereumNetwork;
use crate::private_key::EthereumPrivateKey;
use crate::public_key::EthereumPublicKey;
use crate::wordlist::EthereumWordlist;
use wagyu_model::{ExtendedPrivateKey, Mnemonic, MnemonicCount, MnemonicError, MnemonicExtended};
use bitvec::prelude::*;
use hmac::Hmac;
use pbkdf2::pbkdf2;
use rand::Rng;
use sha2::{Digest, Sha256, Sha512};
use std::{fmt, marker::PhantomData, ops::Div, str, str::FromStr};
const PBKDF2_ROUNDS: usize = 2048;
const PBKDF2_BYTES: usize = 64;
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
/// Represents an Ethereum mnemonic
pub struct EthereumMnemonic<N: EthereumNetwork, W: EthereumWordlist> {
/// Initial entropy in multiples of 32 bits
entropy: Vec<u8>,
/// PhantomData
_network: PhantomData<N>,
/// PhantomData
_wordlist: PhantomData<W>,
}
impl<N: EthereumNetwork, W: EthereumWordlist> MnemonicCount for EthereumMnemonic<N, W> {
/// Returns a new mnemonic given the word count.
fn new_with_count<R: Rng>(rng: &mut R, word_count: u8) -> Result<Self, MnemonicError> {
let length: usize = match word_count {
12 => 16,
15 => 20,
18 => 24,
21 => 28,
24 => 32,
wc => return Err(MnemonicError::InvalidWordCount(wc)),
};
let entropy: [u8; 32] = rng.gen();
Ok(Self {
entropy: entropy[0..length].to_vec(),
_network: PhantomData,
_wordlist: PhantomData,
})
}
}
impl<N: EthereumNetwork, W: EthereumWordlist> Mnemonic for EthereumMnemonic<N, W> {
type Address = EthereumAddress;
type Format = EthereumFormat;
type PrivateKey = EthereumPrivateKey;
type PublicKey = EthereumPublicKey;
/// Returns a new mnemonic.
fn new<R: Rng>(rng: &mut R) -> Result<Self, MnemonicError> {
let entropy: [u8; 16] = rng.gen();
Ok(Self {
entropy: entropy.to_vec(),
_network: PhantomData,
_wordlist: PhantomData,
})
}
/// Returns the mnemonic for the given phrase.
fn from_phrase(phrase: &str) -> Result<Self, MnemonicError> {
let mnemonic = phrase.split(" ").collect::<Vec<&str>>();
let length = match mnemonic.len() {
12 => 128,
15 => 160,
18 => 192,
21 => 224,
24 => 256,
wc => return Err(MnemonicError::InvalidWordCount(wc as u8)),
};
let mut entropy: BitVec<Msb0, u8> = BitVec::new();
for word in mnemonic {
let index = W::get_index(word)?;
let index_u8: [u8; 2] = (index as u16).to_be_bytes();
let index_slice = &BitVec::from_slice(&index_u8)[5..];
entropy.append(&mut BitVec::<Msb0, u8>::from_bitslice(index_slice));
}
let mnemonic = Self {
entropy: entropy[..length].as_slice().to_vec(),
_network: PhantomData,
_wordlist: PhantomData,
};
// Ensures the checksum word matches the checksum word in the given phrase.
match phrase == mnemonic.to_phrase()? {
true => Ok(mnemonic),
false => Err(MnemonicError::InvalidPhrase(phrase.into())),
}
}
/// Returns the phrase of the corresponding mnemonic.
fn to_phrase(&self) -> Result<String, MnemonicError> {
let length: i32 = match self.entropy.len() {
16 => 12,
20 => 15, | entropy_len => return Err(MnemonicError::InvalidEntropyLength(entropy_len)),
};
// Compute the checksum by taking the first ENT / 32 bits of the SHA256 hash
let mut sha256 = Sha256::new();
sha256.input(self.entropy.as_slice());
let hash = sha256.result();
let hash_0 = BitVec::<Msb0, u8>::from_element(hash[0]);
let (checksum, _) = hash_0.split_at(length.div(3) as usize);
// Convert the entropy bytes into bits and append the checksum
let mut encoding = BitVec::<Msb0, u8>::from_vec(self.entropy.clone());
encoding.append(&mut checksum.to_vec());
// Compute the phrase in 11 bit chunks which encode an index into the word list
let wordlist = W::get_all();
let phrase = encoding
.chunks(11)
.map(|index| {
// Convert a vector of 11 bits into a u11 number.
let index = index
.iter()
.enumerate()
.map(|(i, &bit)| (bit as u16) * 2u16.pow(10 - i as u32))
.sum::<u16>();
wordlist[index as usize]
})
.collect::<Vec<&str>>();
Ok(phrase.join(" "))
}
/// Returns the private key of the corresponding mnemonic.
fn to_private_key(&self, password: Option<&str>) -> Result<Self::PrivateKey, MnemonicError> {
Ok(self.to_extended_private_key(password)?.to_private_key())
}
/// Returns the public key of the corresponding mnemonic.
fn to_public_key(&self, password: Option<&str>) -> Result<Self::PublicKey, MnemonicError> {
Ok(self.to_extended_private_key(password)?.to_public_key())
}
/// Returns the address of the corresponding mnemonic.
fn to_address(&self, password: Option<&str>, format: &Self::Format) -> Result<Self::Address, MnemonicError> {
Ok(self.to_extended_private_key(password)?.to_address(format)?)
}
}
impl<N: EthereumNetwork, W: EthereumWordlist> MnemonicExtended for EthereumMnemonic<N, W> {
type ExtendedPrivateKey = EthereumExtendedPrivateKey<N>;
type ExtendedPublicKey = EthereumExtendedPublicKey<N>;
/// Returns the extended private key of the corresponding mnemonic.
fn to_extended_private_key(&self, password: Option<&str>) -> Result<Self::ExtendedPrivateKey, MnemonicError> {
Ok(Self::ExtendedPrivateKey::new_master(
self.to_seed(password)?.as_slice(),
&EthereumFormat::Standard,
)?)
}
/// Returns the extended public key of the corresponding mnemonic.
fn to_extended_public_key(&self, password: Option<&str>) -> Result<Self::ExtendedPublicKey, MnemonicError> {
Ok(self.to_extended_private_key(password)?.to_extended_public_key())
}
}
impl<N: EthereumNetwork, W: EthereumWordlist> EthereumMnemonic<N, W> {
/// Compares the given phrase against the phrase extracted from its entropy.
pub fn verify_phrase(phrase: &str) -> bool {
Self::from_phrase(phrase).is_ok()
}
/// Returns a seed using the given password and mnemonic.
fn to_seed(&self, password: Option<&str>) -> Result<Vec<u8>, MnemonicError> {
let mut seed = vec![0u8; PBKDF2_BYTES];
let salt = format!("mnemonic{}", password.unwrap_or(""));
pbkdf2::<Hmac<Sha512>>(&self.to_phrase()?.as_bytes(), salt.as_bytes(), PBKDF2_ROUNDS, &mut seed);
Ok(seed)
}
}
impl<N: EthereumNetwork, W: EthereumWordlist> FromStr for EthereumMnemonic<N, W> {
type Err = MnemonicError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::from_phrase(s)
}
}
impl<N: EthereumNetwork, W: EthereumWordlist> fmt::Display for EthereumMnemonic<N, W> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}",
match self.to_phrase() {
Ok(phrase) => phrase,
_ => return Err(fmt::Error),
}
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::network::*;
use crate::wordlist::*;
use hex;
use rand::SeedableRng;
use rand_xorshift::XorShiftRng;
fn test_new_with_count<N: EthereumNetwork, W: EthereumWordlist>(word_count: u8) {
let rng = &mut XorShiftRng::seed_from_u64(1231275789u64);
let mnemonic = EthereumMnemonic::<N, W>::new_with_count(rng, word_count).unwrap();
test_from_phrase::<N, W>(&mnemonic.entropy, &mnemonic.to_phrase().unwrap());
}
fn test_from_phrase<N: EthereumNetwork, W: EthereumWordlist>(expected_entropy: &Vec<u8>, phrase: &str) {
let mnemonic = EthereumMnemonic::<N, W>::from_phrase(phrase).unwrap();
assert_eq!(&expected_entropy[..], &mnemonic.entropy[..]);
assert_eq!(phrase, mnemonic.to_phrase().unwrap());
}
fn test_to_phrase<N: EthereumNetwork, W: EthereumWordlist>(expected_phrase: &str, entropy: &Vec<u8>) {
let mnemonic = EthereumMnemonic::<N, W> {
entropy: entropy.clone(),
_network: PhantomData,
_wordlist: PhantomData,
};
assert_eq!(&entropy[..], &mnemonic.entropy[..]);
assert_eq!(expected_phrase, mnemonic.to_phrase().unwrap());
}
fn test_verify_phrase<N: EthereumNetwork, W: EthereumWordlist>(phrase: &str) {
assert!(EthereumMnemonic::<N, W>::verify_phrase(phrase));
}
fn test_to_seed<N: EthereumNetwork, W: EthereumWordlist>(
expected_seed: &str,
password: Option<&str>,
mnemonic: EthereumMnemonic<N, W>,
) {
assert_eq!(expected_seed, &hex::encode(mnemonic.to_seed(password).unwrap()))
}
fn test_to_extended_private_key<N: EthereumNetwork, W: EthereumWordlist>(
expected_extended_private_key: &str,
password: Option<&str>,
phrase: &str,
) {
let mnemonic = EthereumMnemonic::<N, W>::from_phrase(phrase).unwrap();
let extended_private_key = mnemonic.to_extended_private_key(password).unwrap();
assert_eq!(expected_extended_private_key, extended_private_key.to_string());
}
/// Test vectors from https://github.com/trezor/python-mnemonic/blob/master/vectors.json
mod english {
use super::*;
type N = Mainnet;
type W = English;
const PASSWORD: &str = "TREZOR";
const NO_PASSWORD_STR: &str = "5eb00bbddcf069084889a8ab9155568165f5c453ccb85e70811aaed6f6da5fc19a5ac40b389cd370d086206dec8aa6c43daea6690f20ad3d8d48b2d2ce9e38e4";
// (entropy, phrase, seed, extended_private_key)
const KEYPAIRS: [(&str, &str, &str, &str); 26] = [
(
"00000000000000000000000000000000",
"abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about",
"c55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04",
"xprv9s21ZrQH143K3h3fDYiay8mocZ3afhfULfb5GX8kCBdno77K4HiA15Tg23wpbeF1pLfs1c5SPmYHrEpTuuRhxMwvKDwqdKiGJS9XFKzUsAF"
),
(
"7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f",
"legal winner thank year wave sausage worth useful legal winner thank yellow",
"2e8905819b8723fe2c1d161860e5ee1830318dbf49a83bd451cfb8440c28bd6fa457fe1296106559a3c80937a1c1069be3a3a5bd381ee6260e8d9739fce1f607",
"xprv9s21ZrQH143K2gA81bYFHqU68xz1cX2APaSq5tt6MFSLeXnCKV1RVUJt9FWNTbrrryem4ZckN8k4Ls1H6nwdvDTvnV7zEXs2HgPezuVccsq"
),
(
"80808080808080808080808080808080",
"letter advice cage absurd amount doctor acoustic avoid letter advice cage above",
"d71de856f81a8acc65e6fc851a38d4d7ec216fd0796d0a6827a3ad6ed5511a30fa280f12eb2e47ed2ac03b5c462a0358d18d69fe4f985ec81778c1b370b652a8",
"xprv9s21ZrQH143K2shfP28KM3nr5Ap1SXjz8gc2rAqqMEynmjt6o1qboCDpxckqXavCwdnYds6yBHZGKHv7ef2eTXy461PXUjBFQg6PrwY4Gzq"
),
(
"ffffffffffffffffffffffffffffffff",
"zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo wrong",
"ac27495480225222079d7be181583751e86f571027b0497b5b5d11218e0a8a13332572917f0f8e5a589620c6f15b11c61dee327651a14c34e18231052e48c069",
"xprv9s21ZrQH143K2V4oox4M8Zmhi2Fjx5XK4Lf7GKRvPSgydU3mjZuKGCTg7UPiBUD7ydVPvSLtg9hjp7MQTYsW67rZHAXeccqYqrsx8LcXnyd"
),
(
"000000000000000000000000000000000000000000000000",
"abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon agent",
"035895f2f481b1b0f01fcf8c289c794660b289981a78f8106447707fdd9666ca06da5a9a565181599b79f53b844d8a71dd9f439c52a3d7b3e8a79c906ac845fa",
"xprv9s21ZrQH143K3mEDrypcZ2usWqFgzKB6jBBx9B6GfC7fu26X6hPRzVjzkqkPvDqp6g5eypdk6cyhGnBngbjeHTe4LsuLG1cCmKJka5SMkmU"
),
(
"7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f",
"legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal will",
"f2b94508732bcbacbcc020faefecfc89feafa6649a5491b8c952cede496c214a0c7b3c392d168748f2d4a612bada0753b52a1c7ac53c1e93abd5c6320b9e95dd",
"xprv9s21ZrQH143K3Lv9MZLj16np5GzLe7tDKQfVusBni7toqJGcnKRtHSxUwbKUyUWiwpK55g1DUSsw76TF1T93VT4gz4wt5RM23pkaQLnvBh7"
),
(
"808080808080808080808080808080808080808080808080",
"letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter always",
"107d7c02a5aa6f38c58083ff74f04c607c2d2c0ecc55501dadd72d025b751bc27fe913ffb796f841c49b1d33b610cf0e91d3aa239027f5e99fe4ce9e5088cd65",
"xprv9s21ZrQH143K3VPCbxbUtpkh9pRG371UCLDz3BjceqP1jz7XZsQ5EnNkYAEkfeZp62cDNj13ZTEVG1TEro9sZ9grfRmcYWLBhCocViKEJae"
),
(
"ffffffffffffffffffffffffffffffffffffffffffffffff",
"zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo when",
"0cd6e5d827bb62eb8fc1e262254223817fd068a74b5b449cc2f667c3f1f985a76379b43348d952e2265b4cd129090758b3e3c2c49103b5051aac2eaeb890a528",
"xprv9s21ZrQH143K36Ao5jHRVhFGDbLP6FCx8BEEmpru77ef3bmA928BxsqvVM27WnvvyfWywiFN8K6yToqMaGYfzS6Db1EHAXT5TuyCLBXUfdm"
),
(
"0000000000000000000000000000000000000000000000000000000000000000",
"abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art",
"bda85446c68413707090a52022edd26a1c9462295029f2e60cd7c4f2bbd3097170af7a4d73245cafa9c3cca8d561a7c3de6f5d4a10be8ed2a5e608d68f92fcc8",
"xprv9s21ZrQH143K32qBagUJAMU2LsHg3ka7jqMcV98Y7gVeVyNStwYS3U7yVVoDZ4btbRNf4h6ibWpY22iRmXq35qgLs79f312g2kj5539ebPM"
),
(
"7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f",
"legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth title",
"bc09fca1804f7e69da93c2f2028eb238c227f2e9dda30cd63699232578480a4021b146ad717fbb7e451ce9eb835f43620bf5c514db0f8add49f5d121449d3e87",
"xprv9s21ZrQH143K3Y1sd2XVu9wtqxJRvybCfAetjUrMMco6r3v9qZTBeXiBZkS8JxWbcGJZyio8TrZtm6pkbzG8SYt1sxwNLh3Wx7to5pgiVFU"
),
(
"8080808080808080808080808080808080808080808080808080808080808080",
"letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic bless",
"c0c519bd0e91a2ed54357d9d1ebef6f5af218a153624cf4f2da911a0ed8f7a09e2ef61af0aca007096df430022f7a2b6fb91661a9589097069720d015e4e982f",
"xprv9s21ZrQH143K3CSnQNYC3MqAAqHwxeTLhDbhF43A4ss4ciWNmCY9zQGvAKUSqVUf2vPHBTSE1rB2pg4avopqSiLVzXEU8KziNnVPauTqLRo"
),
(
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
"zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo vote",
"dd48c104698c30cfe2b6142103248622fb7bb0ff692eebb00089b32d22484e1613912f0a5b694407be899ffd31ed3992c456cdf60f5d4564b8ba3f05a69890ad",
"xprv9s21ZrQH143K2WFF16X85T2QCpndrGwx6GueB72Zf3AHwHJaknRXNF37ZmDrtHrrLSHvbuRejXcnYxoZKvRquTPyp2JiNG3XcjQyzSEgqCB"
),
(
"9e885d952ad362caeb4efe34a8e91bd2",
"ozone drill grab fiber curtain grace pudding thank cruise elder eight picnic",
"274ddc525802f7c828d8ef7ddbcdc5304e87ac3535913611fbbfa986d0c9e5476c91689f9c8a54fd55bd38606aa6a8595ad213d4c9c9f9aca3fb217069a41028",
"xprv9s21ZrQH143K2oZ9stBYpoaZ2ktHj7jLz7iMqpgg1En8kKFTXJHsjxry1JbKH19YrDTicVwKPehFKTbmaxgVEc5TpHdS1aYhB2s9aFJBeJH"
),
(
"6610b25967cdcca9d59875f5cb50b0ea75433311869e930b",
"gravity machine north sort system female filter attitude volume fold club stay feature office ecology stable narrow fog",
"628c3827a8823298ee685db84f55caa34b5cc195a778e52d45f59bcf75aba68e4d7590e101dc414bc1bbd5737666fbbef35d1f1903953b66624f910feef245ac",
"xprv9s21ZrQH143K3uT8eQowUjsxrmsA9YUuQQK1RLqFufzybxD6DH6gPY7NjJ5G3EPHjsWDrs9iivSbmvjc9DQJbJGatfa9pv4MZ3wjr8qWPAK"
),
(
"68a79eaca2324873eacc50cb9c6eca8cc68ea5d936f98787c60c7ebc74e6ce7c",
"hamster diagram private dutch cause delay private meat slide toddler razor book happy fancy gospel tennis maple dilemma loan word shrug inflict delay length",
"64c87cde7e12ecf6704ab95bb1408bef047c22db4cc7491c4271d170a1b213d20b385bc1588d9c7b38f1b39d415665b8a9030c9ec653d75e65f847d8fc1fc440",
"xprv9s21ZrQH143K2XTAhys3pMNcGn261Fi5Ta2Pw8PwaVPhg3D8DWkzWQwjTJfskj8ofb81i9NP2cUNKxwjueJHHMQAnxtivTA75uUFqPFeWzk"
),
(
"c0ba5a8e914111210f2bd131f3d5e08d",
"scheme spot photo card baby mountain device kick cradle pact join borrow",
"ea725895aaae8d4c1cf682c1bfd2d358d52ed9f0f0591131b559e2724bb234fca05aa9c02c57407e04ee9dc3b454aa63fbff483a8b11de949624b9f1831a9612",
"xprv9s21ZrQH143K3FperxDp8vFsFycKCRcJGAFmcV7umQmcnMZaLtZRt13QJDsoS5F6oYT6BB4sS6zmTmyQAEkJKxJ7yByDNtRe5asP2jFGhT6"
),
(
"6d9be1ee6ebd27a258115aad99b7317b9c8d28b6d76431c3",
"horn tenant knee talent sponsor spell gate clip pulse soap slush warm silver nephew swap uncle crack brave",
"fd579828af3da1d32544ce4db5c73d53fc8acc4ddb1e3b251a31179cdb71e853c56d2fcb11aed39898ce6c34b10b5382772db8796e52837b54468aeb312cfc3d",
"xprv9s21ZrQH143K3R1SfVZZLtVbXEB9ryVxmVtVMsMwmEyEvgXN6Q84LKkLRmf4ST6QrLeBm3jQsb9gx1uo23TS7vo3vAkZGZz71uuLCcywUkt"
),
(
"9f6a2878b2520799a44ef18bc7df394e7061a224d2c33cd015b157d746869863",
"panda eyebrow bullet gorilla call smoke muffin taste mesh discover soft ostrich alcohol speed nation flash devote level hobby quick inner drive ghost inside",
"72be8e052fc4919d2adf28d5306b5474b0069df35b02303de8c1729c9538dbb6fc2d731d5f832193cd9fb6aeecbc469594a70e3dd50811b5067f3b88b28c3e8d",
"xprv9s21ZrQH143K2WNnKmssvZYM96VAr47iHUQUTUyUXH3sAGNjhJANddnhw3i3y3pBbRAVk5M5qUGFr4rHbEWwXgX4qrvrceifCYQJbbFDems"
),
(
"23db8160a31d3e0dca3688ed941adbf3",
"cat swing flag economy stadium alone churn speed unique patch report train",
"deb5f45449e615feff5640f2e49f933ff51895de3b4381832b3139941c57b59205a42480c52175b6efcffaa58a2503887c1e8b363a707256bdd2b587b46541f5",
"xprv9s21ZrQH143K4G28omGMogEoYgDQuigBo8AFHAGDaJdqQ99QKMQ5J6fYTMfANTJy6xBmhvsNZ1CJzRZ64PWbnTFUn6CDV2FxoMDLXdk95DQ"
),
(
"8197a4a47f0425faeaa69deebc05ca29c0a5b5cc76ceacc0",
"light rule cinnamon wrap drastic word pride squirrel upgrade then income fatal apart sustain crack supply proud access",
"4cbdff1ca2db800fd61cae72a57475fdc6bab03e441fd63f96dabd1f183ef5b782925f00105f318309a7e9c3ea6967c7801e46c8a58082674c860a37b93eda02",
"xprv9s21ZrQH143K3wtsvY8L2aZyxkiWULZH4vyQE5XkHTXkmx8gHo6RUEfH3Jyr6NwkJhvano7Xb2o6UqFKWHVo5scE31SGDCAUsgVhiUuUDyh"
),
(
"066dca1a2bb7e8a1db2832148ce9933eea0f3ac9548d793112d9a95c9407efad",
"all hour make first leader extend hole alien behind guard gospel lava path output census museum junior mass reopen famous sing advance salt reform",
"26e975ec644423f4a4c4f4215ef09b4bd7ef924e85d1d17c4cf3f136c2863cf6df0a475045652c57eb5fb41513ca2a2d67722b77e954b4b3fc11f7590449191d",
"xprv9s21ZrQH143K3rEfqSM4QZRVmiMuSWY9wugscmaCjYja3SbUD3KPEB1a7QXJoajyR2T1SiXU7rFVRXMV9XdYVSZe7JoUXdP4SRHTxsT1nzm"
),
(
"f30f8c1da665478f49b001d94c5fc452",
"vessel ladder alter error federal sibling chat ability sun glass valve picture",
"2aaa9242daafcee6aa9d7269f17d4efe271e1b9a529178d7dc139cd18747090bf9d60295d0ce74309a78852a9caadf0af48aae1c6253839624076224374bc63f",
"xprv9s21ZrQH143K2QWV9Wn8Vvs6jbqfF1YbTCdURQW9dLFKDovpKaKrqS3SEWsXCu6ZNky9PSAENg6c9AQYHcg4PjopRGGKmdD313ZHszymnps"
),
(
"c10ec20dc3cd9f652c7fac2f1230f7a3c828389a14392f05",
"scissors invite lock maple supreme raw rapid void congress muscle digital elegant little brisk hair mango congress clump",
"7b4a10be9d98e6cba265566db7f136718e1398c71cb581e1b2f464cac1ceedf4f3e274dc270003c670ad8d02c4558b2f8e39edea2775c9e232c7cb798b069e88",
"xprv9s21ZrQH143K4aERa2bq7559eMCCEs2QmmqVjUuzfy5eAeDX4mqZffkYwpzGQRE2YEEeLVRoH4CSHxianrFaVnMN2RYaPUZJhJx8S5j6puX"
),
(
"f585c11aec520db57dd353c69554b21a89b20fb0650966fa0a9d6f74fd989d8f",
"void come effort suffer camp survey warrior heavy shoot primary clutch crush open amazing screen patrol group space point ten exist slush involve unfold",
"01f5bced59dec48e362f2c45b5de68b9fd6c92c6634f44d6d40aab69056506f0e35524a518034ddc1192e1dacd32c1ed3eaa3c3b131c88ed8e7e54c49a5d0998",
"xprv9s21ZrQH143K39rnQJknpH1WEPFJrzmAqqasiDcVrNuk926oizzJDDQkdiTvNPr2FYDYzWgiMiC63YmfPAa2oPyNB23r2g7d1yiK6WpqaQS"
),
(
"d292b36884b647974ff2167649e8255c8226a942",
"spoon night surface annual good slight divert drift iron exercise announce ribbon carbon feed answer",
"1c662e030a65b8e943a7f7fb304a1ecf415dcd1c99bfd587efae245ca9270058e853df0070abe61af152756c63a0b67ed74bf6e916b112289499e6052ccacc19",
"xprv9s21ZrQH143K3pskpuVw5DMEBZ1hWZnVxwTpPc4QqjCPHbinjx5dyosHqPubQbGRoKdPci6hYRdr2QNDc2GwhCpSEAtKMrsjiBbYJJLfFj9"
),
(
"608945c274e181d9376c651255db6481ccb525532554eaea611cbbd1",
"gauge enforce identify truth blossom uncle tank million banner put summer adjust slender naive erode pride turtle fantasy elbow jeans bar",
"79da8e9aaeea7b28f9045fb0e4763fef5a7aae300b34c9f32aa8bb9a4aacd99896943beb22bbf9b50646658fd72cdf993b16a7cb5b7a77d1b443cf41f5183067",
"xprv9s21ZrQH143K2Cy1ePyrB2tRcm97F6YFMzDZkhy9QS6PeCDtiDuZLrtt9WBfWhXEz8W5KbSnF7nWBKFzStfs8UPeyzbrCPPbHLC25HB8aFe"
)
];
#[test]
fn new() {
let word_counts: [u8; 5] = [12, 15, 18, 21, 24];
word_counts.iter().for_each(|word_count| {
test_new_with_count::<N, W>(*word_count);
})
}
#[test]
fn from_phrase() {
KEYPAIRS.iter().for_each(|(entropy_str, phrase, _, _)| {
let entropy: Vec<u8> = Vec::from(hex::decode(entropy_str).unwrap());
test_from_phrase::<N, W>(&entropy, phrase);
})
}
#[test]
fn to_phrase() {
KEYPAIRS.iter().for_each(|(entropy_str, phrase, _, _)| {
let entropy: Vec<u8> = Vec::from(hex::decode(entropy_str).unwrap());
test_to_phrase::<N, W>(phrase, &entropy);
})
}
#[test]
fn verify_phrase() {
KEYPAIRS.iter().for_each(|(_, phrase, _, _)| {
test_verify_phrase::<N, W>(phrase);
});
}
#[test]
fn to_seed() {
KEYPAIRS.iter().for_each(|(entropy_str, _, expected_seed, _)| {
let entropy: Vec<u8> = Vec::from(hex::decode(entropy_str).unwrap());
let mnemonic = EthereumMnemonic::<N, W> {
entropy,
_network: PhantomData,
_wordlist: PhantomData,
};
test_to_seed::<N, W>(expected_seed, Some(PASSWORD), mnemonic);
});
}
#[test]
fn to_seed_no_password() {
let (entropy_str, _, _, _) = KEYPAIRS[0];
let entropy: Vec<u8> = Vec::from(hex::decode(entropy_str).unwrap());
let mnemonic = EthereumMnemonic::<N, W> {
entropy,
_network: PhantomData,
_wordlist: PhantomData,
};
test_to_seed::<N, W>(NO_PASSWORD_STR, None, mnemonic);
}
#[test]
fn to_extended_private_key() {
KEYPAIRS
.iter()
.for_each(|(_, phrase, _, expected_extended_private_key)| {
test_to_extended_private_key::<N, W>(expected_extended_private_key, Some(PASSWORD), phrase);
});
}
}
mod test_invalid {
use super::*;
type N = Mainnet;
type W = English;
const INVALID_WORD_COUNT: u8 = 11;
const INVALID_PHRASE_LENGTH: &str =
"abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about";
const INVALID_PHRASE_WORD: &str =
"abandoz abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about";
const INVALID_PHRASE_CHECKSUM: &str =
"abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon";
#[test]
#[should_panic(expected = "InvalidWordCount(11)")]
fn new_invalid_word_count() {
let rng = &mut XorShiftRng::seed_from_u64(1231275789u64);
let _mnemonic = EthereumMnemonic::<N, W>::new_with_count(rng, INVALID_WORD_COUNT).unwrap();
}
#[test]
#[should_panic(expected = "InvalidWord(\"abandoz\")")]
fn from_phrase_invalid_word() {
let _mnemonic = EthereumMnemonic::<N, W>::from_phrase(INVALID_PHRASE_WORD).unwrap();
}
#[test]
#[should_panic(expected = "InvalidWordCount(13)")]
fn from_phrase_invalid_length() {
let _mnemonic = EthereumMnemonic::<N, W>::from_phrase(INVALID_PHRASE_LENGTH).unwrap();
}
#[test]
#[should_panic(
expected = "InvalidPhrase(\"abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon\")"
)]
fn from_phrase_invalid_checksum() {
let _mnemonic = EthereumMnemonic::<N, W>::from_phrase(INVALID_PHRASE_CHECKSUM).unwrap();
}
#[test]
fn verify_invalid_phrase() {
assert!(!EthereumMnemonic::<N, W>::verify_phrase(INVALID_PHRASE_LENGTH));
}
}
} | 24 => 18,
28 => 21,
32 => 24, |
avro.pb.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.22.0
// protoc v3.12.3
// source: google/cloud/bigquery/storage/v1beta1/avro.proto
package storage
import (
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// Avro schema.
type AvroSchema struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Json serialized schema, as described at
// https://avro.apache.org/docs/1.8.1/spec.html
Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"`
}
func (x *AvroSchema) Reset() {
*x = AvroSchema{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_bigquery_storage_v1beta1_avro_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *AvroSchema) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AvroSchema) ProtoMessage() {}
func (x *AvroSchema) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_bigquery_storage_v1beta1_avro_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AvroSchema.ProtoReflect.Descriptor instead.
func (*AvroSchema) Descriptor() ([]byte, []int) {
return file_google_cloud_bigquery_storage_v1beta1_avro_proto_rawDescGZIP(), []int{0}
}
func (x *AvroSchema) GetSchema() string {
if x != nil {
return x.Schema
}
return ""
}
// Avro rows.
type AvroRows struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Binary serialized rows in a block.
SerializedBinaryRows []byte `protobuf:"bytes,1,opt,name=serialized_binary_rows,json=serializedBinaryRows,proto3" json:"serialized_binary_rows,omitempty"`
// The count of rows in the returning block.
RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
}
func (x *AvroRows) Reset() {
*x = AvroRows{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_bigquery_storage_v1beta1_avro_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *AvroRows) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AvroRows) ProtoMessage() {}
func (x *AvroRows) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_bigquery_storage_v1beta1_avro_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AvroRows.ProtoReflect.Descriptor instead.
func (*AvroRows) Descriptor() ([]byte, []int) {
return file_google_cloud_bigquery_storage_v1beta1_avro_proto_rawDescGZIP(), []int{1}
}
func (x *AvroRows) GetSerializedBinaryRows() []byte {
if x != nil |
return nil
}
func (x *AvroRows) GetRowCount() int64 {
if x != nil {
return x.RowCount
}
return 0
}
var File_google_cloud_bigquery_storage_v1beta1_avro_proto protoreflect.FileDescriptor
var file_google_cloud_bigquery_storage_v1beta1_avro_proto_rawDesc = []byte{
0x0a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x62,
0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f,
0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x61, 0x76, 0x72, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x12, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
0x2e, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x22, 0x24, 0x0a, 0x0a, 0x41, 0x76, 0x72,
0x6f, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22,
0x5d, 0x0a, 0x08, 0x41, 0x76, 0x72, 0x6f, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x73,
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79,
0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x73, 0x65, 0x72,
0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x52, 0x6f, 0x77,
0x73, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02,
0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x72, 0x6f, 0x77, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x84,
0x01, 0x0a, 0x29, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
0x6f, 0x75, 0x64, 0x2e, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x73, 0x74, 0x6f,
0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x42, 0x09, 0x41, 0x76,
0x72, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x4c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63,
0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2f, 0x73, 0x74,
0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x3b, 0x73, 0x74,
0x6f, 0x72, 0x61, 0x67, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_google_cloud_bigquery_storage_v1beta1_avro_proto_rawDescOnce sync.Once
file_google_cloud_bigquery_storage_v1beta1_avro_proto_rawDescData = file_google_cloud_bigquery_storage_v1beta1_avro_proto_rawDesc
)
func file_google_cloud_bigquery_storage_v1beta1_avro_proto_rawDescGZIP() []byte {
file_google_cloud_bigquery_storage_v1beta1_avro_proto_rawDescOnce.Do(func() {
file_google_cloud_bigquery_storage_v1beta1_avro_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_bigquery_storage_v1beta1_avro_proto_rawDescData)
})
return file_google_cloud_bigquery_storage_v1beta1_avro_proto_rawDescData
}
var file_google_cloud_bigquery_storage_v1beta1_avro_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_google_cloud_bigquery_storage_v1beta1_avro_proto_goTypes = []interface{}{
(*AvroSchema)(nil), // 0: google.cloud.bigquery.storage.v1beta1.AvroSchema
(*AvroRows)(nil), // 1: google.cloud.bigquery.storage.v1beta1.AvroRows
}
var file_google_cloud_bigquery_storage_v1beta1_avro_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_google_cloud_bigquery_storage_v1beta1_avro_proto_init() }
func file_google_cloud_bigquery_storage_v1beta1_avro_proto_init() {
if File_google_cloud_bigquery_storage_v1beta1_avro_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_google_cloud_bigquery_storage_v1beta1_avro_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AvroSchema); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_bigquery_storage_v1beta1_avro_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AvroRows); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_cloud_bigquery_storage_v1beta1_avro_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_google_cloud_bigquery_storage_v1beta1_avro_proto_goTypes,
DependencyIndexes: file_google_cloud_bigquery_storage_v1beta1_avro_proto_depIdxs,
MessageInfos: file_google_cloud_bigquery_storage_v1beta1_avro_proto_msgTypes,
}.Build()
File_google_cloud_bigquery_storage_v1beta1_avro_proto = out.File
file_google_cloud_bigquery_storage_v1beta1_avro_proto_rawDesc = nil
file_google_cloud_bigquery_storage_v1beta1_avro_proto_goTypes = nil
file_google_cloud_bigquery_storage_v1beta1_avro_proto_depIdxs = nil
}
| {
return x.SerializedBinaryRows
} |
ch5_sort_stack_1.py | import educative.course1.stacks_queues.stack as s
input_data = [23, 60, 12, 42, 4, 97, 2]
expected_output_data = [2, 4, 12, 23, 42, 60, 97]
# This solution uses a second stack
# 1. until input stack is not empty, we pop the top value and compare it
# with the top value of the second stack
# 2. if value > top of stack 2, we insert the popped value in stack 2
# 3. else while popped value < top of stack 2, we keep pushing top of stack 2 to stack 1
# 4. finally when stack 2 is empty we push the popped value and start over again
# 5. The output will be a sorted stack
# ---------------------------------------------
# NOTE - This can also be done by recursion ---
# ---------------------------------------------
def sort_stack_1(stack):
result = s.Stack(stack.capacity, True) # suppress_printing = True
while not stack.is_empty():
value = stack.pop()
if not result.is_empty() and value >= int(result.peek()):
|
else:
while not result.is_empty() and value < int(result.peek()):
stack.push(result.pop())
result.push(value)
return result.prettify()
def main():
input_stack = s.Stack(len(input_data), True) # suppress_printing = True
[input_stack.push(x) for x in input_data]
expected_output_stack = s.Stack(len(expected_output_data), True) # suppress_printing = True
[expected_output_stack.push(x) for x in expected_output_data]
print("Input: \n" + str(input_stack.prettify()))
print("Expected: \n" + str(expected_output_stack.prettify()))
print("Output: \n" + str(sort_stack_1(input_stack)))
if __name__ == '__main__':
main()
| result.push(value) |
source_manager.go | package adapter
import (
"io/ioutil"
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
type SourceConfig struct {
Sources map[string]SourceInfo `json:"sources"`
}
type SourceInfo struct {
URI string `json:"uri"`
}
type SourceManager struct {
adapter *Adapter
sources map[string]*Source
}
func NewSourceManager(adapter *Adapter) *SourceManager {
return &SourceManager{
adapter: adapter,
sources: make(map[string]*Source),
}
}
func (sm *SourceManager) Initialize() error {
config, err := sm.LoadSourceConfig(viper.GetString("source.config"))
if err != nil {
return err
}
// Initializing sources
for name, info := range config.Sources {
log.WithFields(log.Fields{
"name": name,
"uri": info.URI,
"method": "POST",
}).Info("Initializing source")
source := NewSource(sm.adapter, name, &info)
err := source.Init()
if err != nil {
log.Error(err)
return err
}
sm.sources[name] = source
}
return nil
}
func (sm *SourceManager) LoadSourceConfig(filename string) (*SourceConfig, error) {
| // Open configuration file
jsonFile, err := os.Open(filename)
if err != nil {
return nil, err
}
defer jsonFile.Close()
// Read
byteValue, _ := ioutil.ReadAll(jsonFile)
var config SourceConfig
json.Unmarshal(byteValue, &config)
return &config, nil
} | |
condition_operator_name.go | // Copyright (c) 2016, 2018, 2022, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Code generated. DO NOT EDIT.
// Cloud Guard APIs
//
// A description of the Cloud Guard APIs
//
package cloudguard
import (
"strings"
)
// ConditionOperatorNameEnum Enum with underlying type: string
type ConditionOperatorNameEnum string
// Set of constants representing the allowable values for ConditionOperatorNameEnum
const (
ConditionOperatorNameAnd ConditionOperatorNameEnum = "AND"
ConditionOperatorNameOr ConditionOperatorNameEnum = "OR"
ConditionOperatorNameIn ConditionOperatorNameEnum = "IN"
ConditionOperatorNameNotIn ConditionOperatorNameEnum = "NOT_IN"
ConditionOperatorNameEquals ConditionOperatorNameEnum = "EQUALS"
ConditionOperatorNameNotEquals ConditionOperatorNameEnum = "NOT_EQUALS"
ConditionOperatorNameLessThan ConditionOperatorNameEnum = "LESS_THAN"
ConditionOperatorNameGreaterThan ConditionOperatorNameEnum = "GREATER_THAN"
ConditionOperatorNameRange ConditionOperatorNameEnum = "RANGE"
)
var mappingConditionOperatorNameEnum = map[string]ConditionOperatorNameEnum{
"AND": ConditionOperatorNameAnd,
"OR": ConditionOperatorNameOr,
"IN": ConditionOperatorNameIn,
"NOT_IN": ConditionOperatorNameNotIn,
"EQUALS": ConditionOperatorNameEquals,
"NOT_EQUALS": ConditionOperatorNameNotEquals,
"LESS_THAN": ConditionOperatorNameLessThan,
"GREATER_THAN": ConditionOperatorNameGreaterThan,
"RANGE": ConditionOperatorNameRange,
}
// GetConditionOperatorNameEnumValues Enumerates the set of values for ConditionOperatorNameEnum
func GetConditionOperatorNameEnumValues() []ConditionOperatorNameEnum {
values := make([]ConditionOperatorNameEnum, 0)
for _, v := range mappingConditionOperatorNameEnum {
values = append(values, v)
}
return values
}
// GetConditionOperatorNameEnumStringValues Enumerates the set of values in String for ConditionOperatorNameEnum
func GetConditionOperatorNameEnumStringValues() []string {
return []string{
"AND",
"OR",
"IN",
"NOT_IN",
"EQUALS",
"NOT_EQUALS",
"LESS_THAN",
"GREATER_THAN",
"RANGE",
}
}
// GetMappingConditionOperatorNameEnum performs case Insensitive comparison on enum value and return the desired enum
func | (val string) (ConditionOperatorNameEnum, bool) {
mappingConditionOperatorNameEnumIgnoreCase := make(map[string]ConditionOperatorNameEnum)
for k, v := range mappingConditionOperatorNameEnum {
mappingConditionOperatorNameEnumIgnoreCase[strings.ToLower(k)] = v
}
enum, ok := mappingConditionOperatorNameEnumIgnoreCase[strings.ToLower(val)]
return enum, ok
}
| GetMappingConditionOperatorNameEnum |
navigation.js | (function() {
var DateView = kendo.DateView,
keys = kendo.keys,
dateview,
anchor,
input;
module("kendo.ui.DatePicker API", {
setup: function() {
kendo.effects.disable();
kendo.ns = "kendo-";
input = $("<input />").appendTo(QUnit.fixture);
anchor = $("<input />").appendTo(QUnit.fixture);
},
teardown: function() {
kendo.effects.enable();
if (dateview) {
dateview.destroy();
}
kendo.destroy(QUnit.fixture);
kendo.ns = "";
}
});
test("click enter should raise change event if dateview is closed", function() {
var datepicker = input.kendoDatePicker().data("kendoDatePicker");
datepicker.close();
stub(datepicker, { _change: datepicker._change });
input.focus().val("10/10/2000");
datepicker._keydown({
currentTarget: document.createElement("input"),
keyCode: keys.ENTER,
preventDefault: $.noop
});
equal(datepicker.calls("_change"), 1);
});
test("navigate down should persist current viewedateviewalue", function() {
var value = new Date(2000, 10, 10, 22, 22, 22),
upEvent = { keyCode: keys.UP, ctrlKey: true, preventDefault: $.noop },
downEvent = { keyCode: keys.DOWN, ctrlKey: true, preventDefault: $.noop };
dateview = new DateView({
value: value,
min: new Date(1999, 10, 10),
max: new Date(2111, 10, 10),
start: "month",
depth: "month"
});
dateview.open();
dateview.move(upEvent);
dateview.move(upEvent);
dateview.move(downEvent);
dateview.move(downEvent);
equal(+dateview._current, +value);
});
//MONTH View
test("navigate should not move selection if value is bigger than max", function() {
var event = { keyCode: keys.RIGHT, preventDefault: $.noop },
date = new Date(2000, 11, 1);
dateview = new DateView({
depth: "month",
start: "month",
min: new Date(1900, 10, 10),
value: date,
max: date
});
dateview.open();
dateview.move(event);
equal(+dateview._current, +date);
equal(dateview.calendar._table.find(".k-state-focused").text(), date.getDate() + "");
});
test("navigate should not move selection if value is less than min", function() {
var event = { keyCode: keys.LEFT, preventDefault: $.noop },
date = new Date(2000, 11, 1);
dateview = new DateView({start: "month", depth: "month", value: date, min: date, max: new Date(2100, 10, 10)});
dateview.open();
dateview.move(event);
equal(+dateview._current, +date);
equal(dateview.calendar._table.find(".k-state-focused").text(), date.getDate() + "");
});
test("navigate should focus next day in month view", function() {
dateview = new DateView({
start: "month",
depth: "month",
min: new Date(1999, 10, 10),
max: new Date(2111, 10, 10)
});
var event = { keyCode: keys.RIGHT, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
kendo.calendar.views[0].setDate(focusedDate, 1);
dateview.open();
dateview.move(event);
equal(dateview.calendar._table.find(".k-state-focused").text(), focusedDate.getDate() + "");
});
test("navigate should focus previous day in month view", function() {
dateview = new DateView({start: "month", depth: "month", min: new Date(1999, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.LEFT, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
focusedDate.setDate(focusedDate.getDate() - 1);
dateview.open();
dateview.move(event);
equal(dateview.calendar._table.find(".k-state-focused").text(), focusedDate.getDate() + "");
});
test("navigate should focus day on previous row in month view", function() {
dateview = new DateView({start: "month", depth: "month", min: new Date(1999, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.UP, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
focusedDate.setDate(focusedDate.getDate() - 7);
dateview.open();
dateview.move(event);
equal(dateview.calendar._table.find(".k-state-focused").text(), focusedDate.getDate() + "");
});
test("navigate should focus day on next row in month view", function() {
dateview = new DateView({start: "month", depth: "month", min: new Date(1999, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.DOWN, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
focusedDate.setDate(focusedDate.getDate() + 7);
dateview.open();
dateview.move(event);
equal(dateview.calendar._table.find(".k-state-focused").text(), focusedDate.getDate() + "");
});
//YEAR VIEW
test("navigate should focus next month in year view", function() {
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1999, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.RIGHT, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
dateview.open();
dateview.calendar.navigateUp();
focusedDate.setMonth(focusedDate.getMonth() + 1);
dateview.move(event);
equal(dateview.calendar._table.find(".k-state-focused").text(), "Dec");
});
test("navigate should focus previous month in year view", function() {
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1999, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.LEFT, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
dateview.open();
dateview.calendar.navigateUp();
focusedDate.setMonth(focusedDate.getMonth() - 1);
dateview.move(event);
equal(+dateview._current, +focusedDate);
equal(dateview.calendar._table.find(".k-state-focused").text(), "Oct");
});
test("navigate should focus month on previous row in year view", function() {
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1999, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.UP, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
dateview.open();
dateview.calendar.navigateUp();
focusedDate.setMonth(focusedDate.getMonth() - 4);
dateview.move(event);
equal(+dateview._current, +focusedDate);
equal(dateview.calendar._table.find(".k-state-focused").text(), "Jul");
});
test("navigate should focus month on next row in year view", function() {
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1999, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.DOWN, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
dateview.open();
dateview.calendar.navigateUp();
focusedDate.setMonth(focusedDate.getMonth() + 4);
dateview.move(event);
equal(+dateview._current, +focusedDate);
equal(dateview.calendar._table.find(".k-state-focused").text(), "Mar");
});
//DECADE VIEW
test("navigate should focus next year in decade view", function() {
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1999, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.RIGHT, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
dateview.open();
dateview.calendar.navigate(null, "decade");
focusedDate.setFullYear(focusedDate.getFullYear() + 1);
dateview.move(event);
equal(+dateview._current, +focusedDate);
equal(dateview.calendar._table.find(".k-state-focused").text(), "2001");
});
test("navigate should focus previous year in decade view", function() {
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1999, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.LEFT, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
dateview.open();
dateview.calendar.navigate(null, "decade");
focusedDate.setFullYear(focusedDate.getFullYear() - 1);
dateview.move(event);
equal(+dateview._current, +focusedDate);
equal(dateview.calendar._table.find(".k-state-focused").text(), "1999");
ok(!dateview.calendar._table.find(".k-state-focused").hasClass("k-other-month"));
});
test("navigate should focus year on previous row in decade view", function() {
dateview = new DateView({
depth: "month",
start: "month",
value: new Date(2000, 10, 10),
min: new Date(1900, 10, 10),
max: new Date(2111, 10, 10)
});
var event = { keyCode: keys.UP, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
dateview.open();
dateview.calendar.navigate(null, "decade");
focusedDate.setFullYear(focusedDate.getFullYear() - 4);
dateview.move(event);
equal(+dateview._current, +focusedDate);
equal(dateview.calendar._table.find(".k-state-focused").text(), "1996");
});
test("navigate should focus year on next row in decade view", function() {
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1900, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.DOWN, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
dateview.open();
dateview.calendar.navigate(null, "decade");
focusedDate.setFullYear(focusedDate.getFullYear() + 4);
dateview.move(event);
equal(+dateview._current, +focusedDate);
equal(dateview.calendar._table.find(".k-state-focused").text(), "2004");
});
//CENTURY VIEW
test("navigate should focus next decade in century view", function() {
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1900, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.RIGHT, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
dateview.open();
dateview.calendar.navigate(null, "century");
focusedDate.setFullYear(focusedDate.getFullYear() + 10);
dateview.move(event);
equal(+dateview._current, +focusedDate);
equal(dateview.calendar._table.find(".k-state-focused").text(), "2010 - 2019");
});
test("navigate should focus previous decade in century view", function() {
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1900, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.LEFT, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
dateview.open();
dateview.calendar.navigate(null, "century");
focusedDate.setFullYear(focusedDate.getFullYear() - 10);
dateview.move(event);
equal(+dateview._current, +focusedDate);
equal(dateview.calendar._table.find(".k-state-focused").text(), "1990 - 1999");
});
test("navigate should focus decade on previous row in century view", function() {
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1900, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.UP, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
dateview.open();
dateview.calendar.navigate(null, "century");
focusedDate.setFullYear(focusedDate.getFullYear() - 40);
dateview.move(event);
equal(+dateview._current, +focusedDate);
equal(dateview.calendar._table.find(".k-state-focused").text(), "1960 - 1969");
});
test("navigate should focus decade on next row in century view", function() {
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1900, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.DOWN, preventDefault: $.noop },
focusedDate = new Date(dateview._current);
dateview.open();
dateview.calendar.navigate(null, "century");
focusedDate.setFullYear(focusedDate.getFullYear() + 40);
dateview.move(event);
equal(+dateview._current, +focusedDate);
equal(dateview.calendar._table.find(".k-state-focused").text(), "2040 - 2049");
});
//Navigate through views
test("navigate down", function() {
var event = { keyCode: keys.DOWN, ctrlKey: true, preventDefault: $.noop };
dateview = new DateView({
value: new Date(2000, 10, 10),
start: "month",
depth: "month",
min: new Date(1900, 10, 10),
max: new Date(2111, 10, 10)
});
dateview.open();
stub(dateview.calendar, { navigateDown: dateview.calendar.navigateDown });
dateview.calendar.navigateUp();
dateview.calendar._focus(dateview._current);
dateview.move(event);
equal(dateview.calendar.calls("navigateDown"), 1);
});
test("navigate up", function() {
var event = { keyCode: keys.UP, ctrlKey: true, preventDefault: $.noop };
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1900, 10, 10), max: new Date(2111, 10, 10)});
dateview.open();
stub(dateview.calendar, "navigateUp");
dateview.move(event);
equal(dateview.calendar.calls("navigateUp"), 1);
});
test("navigate down selects date", function() {
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1900, 10, 10), max: new Date(2111, 10, 10)});
var event = { keyCode: keys.DOWN, ctrlKey: true, preventDefault: $.noop },
selectedDate = new Date(2000, 10, 15);
dateview.open();
dateview.calendar._focus(selectedDate);
dateview.move(event);
equal(+dateview.calendar.value(), +selectedDate);
});
test("navigate left", function() {
var event = { keyCode: keys.LEFT, ctrlKey: true, preventDefault: $.noop };
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1900, 10, 10), max: new Date(2111, 10, 10)});
dateview.open();
stub(dateview.calendar, "navigateToPast");
dateview.move(event);
equal(dateview.calendar.calls("navigateToPast"), 1);
});
test("navigate right", function() {
var event = { keyCode: keys.RIGHT, ctrlKey: true, preventDefault: $.noop };
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1900, 10, 10), max: new Date(2111, 10, 10)});
dateview.open();
stub(dateview.calendar, "navigateToFuture");
dateview.move(event);
equal(dateview.calendar.calls("navigateToFuture"), 1);
});
test("Home should focus first day of current month", function() {
var event = { keyCode: keys.HOME, preventDefault: $.noop };
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1900, 10, 10), max: new Date(2111, 10, 10)});
dateview.open();
dateview.move(event);
var value = dateview.calendar.element.find(".k-state-focused").children(":first").attr("data-kendo-value");
equal(value, "2000/10/1");
});
test("End should focus last day of current month", function() {
var event = { keyCode: keys.END, preventDefault: $.noop };
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1900, 10, 10), max: new Date(2111, 10, 10)});
dateview.open();
dateview.move(event);
var value = dateview.calendar.element.find(".k-state-focused").children(":first").attr("data-kendo-value");
equal(value, "2000/10/30");
}); | var event = { keyCode: keys.PAGEUP, preventDefault: $.noop };
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1900, 10, 10), max: new Date(2111, 10, 10)});
dateview.open();
stub(dateview.calendar, {navigateToPast: dateview.calendar.navigateToPast});
dateview.move(event);
equal(dateview.calendar.calls("navigateToPast"), 1);
});
test("PageDown should focus same day in next month", function() {
var event = { keyCode: keys.PAGEDOWN, preventDefault: $.noop };
dateview = new DateView({value: new Date(2000, 10, 10), start: "month", depth: "month", min: new Date(1900, 10, 10), max: new Date(2111, 10, 10)});
dateview.open();
stub(dateview.calendar, {navigateToFuture: dateview.calendar.navigateToFuture});
dateview.move(event);
equal(dateview.calendar.calls("navigateToFuture"), 1);
});
test("Enter should close date if select date", function() {
var event = { keyCode: keys.ENTER, preventDefault: $.noop };
dateview = new DateView({
anchor: anchor,
value: new Date(2000, 10, 10),
min: new Date(1900, 10, 10),
max: new Date(2100, 10, 10),
start: "month",
depth: "month"
});
dateview.open();
dateview.calendar._focus(dateview._current);
dateview.move(event);
ok(!dateview.popup.visible());
});
test("Enter should focus viewedDate", function() {
var event = { keyCode: keys.ENTER, preventDefault: $.noop };
dateview = new DateView({
anchor: anchor,
value: new Date(2000, 10, 10),
min: new Date(1900, 10, 10),
max: new Date(2100, 10, 10),
start: "month",
depth: "month"
});
dateview.open();
dateview.calendar.navigate(new Date(2000, 10, 10), "year");
dateview.calendar._focus(dateview._current);
dateview.move(event);
ok(dateview.popup.visible());
equal(dateview.calendar._table.find(".k-state-focused").length, 1);
});
test("Enter should select date", function() {
dateview = new DateView({
anchor: anchor,
value: new Date(2000, 10, 10),
min: new Date(1900, 10, 10),
max: new Date(2100, 10, 10),
start: "month",
depth: "month"
});
var called, event = { keyCode: keys.ENTER, preventDefault: $.noop },
focused = new Date(2000, 10, 11);
dateview.open();
stub(dateview.calendar, {navigateDown: dateview.calendar.navigateDown});
dateview.calendar._focus(focused);
dateview.move(event);
equal(+dateview.calendar.args("navigateDown")[0], +focused);
});
test("Enter should navigate down", function() {
var event = { keyCode: keys.ENTER, preventDefault: $.noop };
dateview = new DateView({
anchor: anchor,
value: new Date(2010, 10, 10),
min: new Date(1900, 10, 10),
max: new Date(2100, 10, 10),
start: "month",
depth: "month"
});
dateview.open();
stub(dateview.calendar, {navigateDown: dateview.calendar.navigateDown});
dateview.calendar.navigateUp();
dateview.calendar._focus(dateview._current);
dateview.move(event);
equal(dateview.calendar.calls("navigateDown"), 1);
});
test("Esc should close dateView", function() {
var event = { keyCode: keys.ESC, preventDefault: $.noop };
dateview = new DateView({
anchor: anchor,
value: new Date(2000, 10, 10),
min: new Date(1900, 10, 10),
max: new Date(2100, 10, 10),
start: "month",
depth: "month"
});
dateview.open();
stub(dateview.popup, "close");
dateview.move(event);
equal(dateview.popup.calls("close"), 1);
});
test("DateView prevents default action when ESC is pressed and popup is opened", 1, function() {
var event = {
keyCode: keys.ESC,
preventDefault: function() {
ok(true);
}
};
dateview = new DateView({
anchor: anchor,
value: new Date(2000, 10, 10),
min: new Date(1900, 10, 10),
max: new Date(2100, 10, 10),
start: "month",
depth: "month"
});
dateview.open();
dateview.move(event);
});
test("type invalide date does not clear input", function() {
datepicker = input.kendoDatePicker({value: new Date()}).data("kendoDatePicker");
var value = "invalid date";
input.focus().val(value).blur();
equal(input.val(), value);
equal(datepicker.value(), null);
});
test("click on selected date should close the dateView", 1, function() {
dateview = new DateView({
min: new Date(1800, 1, 1),
max: new Date(2800, 1, 1),
start: "month",
depth: "month",
anchor: anchor,
clearBlurTimeout: $.noop,
close: function() {
ok(true);
}
});
dateview.value(new Date());
dateview.open();
dateview.calendar
.element
.find(".k-state-selected")
.click();
});
test("Alt + Down should open the calendar", function() {
var event = { type: "keydown", keyCode: keys.DOWN, altKey: true, preventDefault: $.noop };
datepicker = input.kendoDatePicker().data("kendoDatePicker");
stub(datepicker.dateView, "open");
input.trigger(event);
equal(datepicker.dateView.calls("open"), 1);
});
test("Alt + UP should close the calendar", function() {
var event = { type: "keydown", keyCode: keys.UP, altKey: true, preventDefault: $.noop };
datepicker = input.kendoDatePicker().data("kendoDatePicker");
stub(datepicker.dateView, "close");
input.trigger(event);
equal(datepicker.dateView.calls("close"), 1);
});
test("DatePicker does not update the input if the entered value is the same but in diff format", function() {
datepicker = input.kendoDatePicker({
format: "dd MMM yyyy",
parseFormats: ["yyyy/MM/dd"],
value: kendo.toString(today, "dd MMM yyyy")
}).data("kendoDatePicker");
var today = new Date(),
todayDiffFormat = kendo.toString(today, "yyyy/MM/dd");
input.val(todayDiffFormat);
//simulate change
datepicker._change(input.val());
equal(input.val(), kendo.toString(today, "dd MMM yyyy"));
});
test("DatePicker does not call change on blur if no text change", function() {
var date = new Date(1919, 0, 1);
datepicker = input.kendoDatePicker({
format: "MM/dd/yy",
value: new Date(date)
}).data("kendoDatePicker");
datepicker.options.parseFormats = ["MM/dd/yyyy", "MM/dd/yy"];
//simulate change
input.focus().blur();
equal(+datepicker.value(), +date);
});
test("DatePicker does not call change on ENTER if no text change", function() {
var date = new Date(1919, 0, 1);
datepicker = input.kendoDatePicker({
format: "MM/dd/yy",
value: new Date(date)
}).data("kendoDatePicker");
datepicker.options.parseFormats = ["MM/dd/yyyy", "MM/dd/yy"];
//simulate change
input.focus().trigger({
type: "keydown",
keyCode: kendo.keys.ENTER
});
equal(+datepicker.value(), +date);
});
test("DatePicker does set focused date of calendar if no text change", function() {
var date = new Date(1919, 0, 1);
datepicker = input.kendoDatePicker({
format: "MM/dd/yy",
value: new Date(date)
}).data("kendoDatePicker");
datepicker.options.parseFormats = ["MM/dd/yyyy", "MM/dd/yy"];
input.focus();
datepicker.open();
equal(+datepicker.dateView._current, +date);
});
test("click enter should raise change event if dateview is closed", function() {
var datepicker = input.kendoDatePicker().data("kendoDatePicker");
input.focus();
datepicker.open();
datepicker.dateView.calendar.element.find(".k-nav-fast").click();
ok(datepicker.dateView.popup.visible());
});
test("navigate should focus min month in year view", function() {
var date = new Date(),
openEvent = { type: "keydown", keyCode: keys.DOWN, altKey: true, preventDefault: $.noop },
downEvent = { type: "keydown", keyCode: keys.DOWN, altKey: false, preventDefault: $.noop },
upEvent = { type: "keydown", keyCode: keys.UP, altKey: false, preventDefault: $.noop };
var datepicker = input.kendoDatePicker({
min: date,
start:"year"
}).data("kendoDatePicker");
input.trigger(openEvent);
input.trigger(downEvent);
input.trigger(upEvent);
equal(datepicker.dateView.calendar._table.find(".k-state-focused").text(), kendo.toString(date, "MMM"));
});
})(); |
test("PageUp should focus same day in previous month", function() { |
User.js | const mongoose = require("mongoose");
const Schema = mongoose.Schema;
// Create Schema | const UserSchema = new Schema({
name:{
type:String,
required:true
},
email:{
type:String,
required:true
},
password:{
type:String,
required:true
},
avatar:{
type:String
},
date:{
type:Date,
default:Date.now
}
})
module.exports = User = mongoose.model("users",UserSchema); | |
main.rs | extern crate sgx_types;
extern crate sgx_urts;
extern crate base64;
extern crate reqwest;
extern crate dirs;
// networking apt install libzmq3-dev
extern crate zmq;
extern crate serde_json;
// errors
#[macro_use]
extern crate failure;
extern crate rustc_hex as hex;
//enigma utils
extern crate enigma_tools_u;
#[macro_use]
extern crate serde_derive;
extern crate serde;
//use sgx_types::*;
use std::thread;
// enigma modules
mod esgx;
mod evm_u;
mod networking;
mod common_u;
pub use esgx::general::ocall_get_home;
use networking::{surface_server, constants};
#[allow(unused_variables, unused_mut)]
fn main() {
/* this is an example of initiating an enclave */
let enclave = match esgx::general::init_enclave_wrapper() {
Ok(r) => {
println!("[+] Init Enclave Successful {}!", r.geteid());
r
},
Err(x) => {
println!("[-] Init Enclave Failed {}!", x.as_str());
return;
},
};
let eid = enclave.geteid();
let child = thread::spawn(move || {
let mut server = surface_server::Server::new(constants::CONNECTION_STR, eid);
server.run();
});
child.join().unwrap();
enclave.destroy();
}
#[cfg(test)]
mod tests {
use esgx::general::init_enclave_wrapper;
use sgx_types::*;
extern { fn ecall_run_tests(eid: sgx_enclave_id_t) -> sgx_status_t; }
#[test]
pub fn | () {
// initiate the enclave
let enclave = match init_enclave_wrapper() {
Ok(r) => {
println!("[+] Init Enclave Successful {}!", r.geteid());
r
},
Err(x) => {
println!("[-] Init Enclave Failed {}!", x.as_str());
assert_eq!(0,1);
return;
},
};
let ret = unsafe { ecall_run_tests(enclave.geteid()) };
assert_eq!(ret,sgx_status_t::SGX_SUCCESS);
enclave.destroy();
}
}
| test_enclave_internal |
main.js | $(document).ready(function(){
var items = $('#stage li'),
itemsByTags = {};
| items.each(function(i){
var elem = $(this),
tags = elem.data('tags').split(',');
// Adding a data-id attribute. Required by the Quicksand plugin:
elem.attr('data-id',i);
$.each(tags,function(key,value){
// Removing extra whitespace:
value = $.trim(value);
if(!(value in itemsByTags)){
// Create an empty array to hold this item:
itemsByTags[value] = [];
}
// Each item is added to one array per tag:
itemsByTags[value].push(elem);
});
});
// Creating the "Everything" option in the menu:
createList('All',items);
// Looping though the arrays in itemsByTags:
$.each(itemsByTags,function(k,v){
createList(k,v);
});
$('#filter a').live('click',function(e){
var link = $(this);
link.addClass('active').siblings().removeClass('active');
// Using the Quicksand plugin to animate the li items.
// It uses data('list') defined by our createList function:
$('#stage').quicksand(link.data('list').find('li'),
function() {
$("a.fsand").fancybox({
'overlayShow' : true,
});
$(".play").click(function() {
$.fancybox({
'padding' : 0,
'autoScale' : false,
'transitionIn' : 'none',
'transitionOut' : 'none',
'title' : this.title,
'width' : 680,
'height' : 495,
'href' : this.href.replace(new RegExp("watch\\?v=", "i"), 'v/'),
'type' : 'swf',
'swf' : {
'wmode' : 'transparent',
'allowfullscreen' : 'true'
}
});
return false;
});
});
e.preventDefault();
});
$('#filter a:first').click();
function createList(text,items){
// This is a helper function that takes the
// text of a menu button and array of li items
// Creating an empty unordered list:
var ul = $('<ul>',{'class':'hidden'});
$.each(items,function(){
// Creating a copy of each li item
// and adding it to the list:
$(this).clone().appendTo(ul);
});
ul.appendTo('#gal-container');
// Creating a menu item. The unordered list is added
// as a data parameter (available via .data('list'):
var a = $('<a>',{
html: text,
href:'#',
data: {list:ul}
}).appendTo('#filter');
}
$("a#example1").fancybox({
'overlayShow' : false,
'transitionIn' : 'elastic',
'transitionOut' : 'elastic'
});
});
//]]> | // Looping though all the li items:
|
als.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.22.0
// protoc v3.10.1
// source: envoy/service/accesslog/v2/als.proto
package envoy_service_accesslog_v2
import (
context "context"
_ "github.com/cncf/udpa/go/udpa/annotations"
core "github.com/datawire/ambassador/pkg/api/envoy/api/v2/core"
v2 "github.com/datawire/ambassador/pkg/api/envoy/data/accesslog/v2"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type StreamAccessLogsResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *StreamAccessLogsResponse) Reset() {
*x = StreamAccessLogsResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_envoy_service_accesslog_v2_als_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *StreamAccessLogsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StreamAccessLogsResponse) ProtoMessage() {}
func (x *StreamAccessLogsResponse) ProtoReflect() protoreflect.Message {
mi := &file_envoy_service_accesslog_v2_als_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StreamAccessLogsResponse.ProtoReflect.Descriptor instead.
func (*StreamAccessLogsResponse) Descriptor() ([]byte, []int) {
return file_envoy_service_accesslog_v2_als_proto_rawDescGZIP(), []int{0}
}
type StreamAccessLogsMessage struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Identifier *StreamAccessLogsMessage_Identifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
// Types that are assignable to LogEntries:
// *StreamAccessLogsMessage_HttpLogs
// *StreamAccessLogsMessage_TcpLogs
LogEntries isStreamAccessLogsMessage_LogEntries `protobuf_oneof:"log_entries"`
}
func (x *StreamAccessLogsMessage) Reset() {
*x = StreamAccessLogsMessage{}
if protoimpl.UnsafeEnabled {
mi := &file_envoy_service_accesslog_v2_als_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *StreamAccessLogsMessage) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StreamAccessLogsMessage) ProtoMessage() {}
func (x *StreamAccessLogsMessage) ProtoReflect() protoreflect.Message {
mi := &file_envoy_service_accesslog_v2_als_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StreamAccessLogsMessage.ProtoReflect.Descriptor instead.
func (*StreamAccessLogsMessage) Descriptor() ([]byte, []int) {
return file_envoy_service_accesslog_v2_als_proto_rawDescGZIP(), []int{1}
}
func (x *StreamAccessLogsMessage) GetIdentifier() *StreamAccessLogsMessage_Identifier {
if x != nil {
return x.Identifier
}
return nil
}
func (m *StreamAccessLogsMessage) GetLogEntries() isStreamAccessLogsMessage_LogEntries {
if m != nil {
return m.LogEntries
}
return nil
}
func (x *StreamAccessLogsMessage) GetHttpLogs() *StreamAccessLogsMessage_HTTPAccessLogEntries {
if x, ok := x.GetLogEntries().(*StreamAccessLogsMessage_HttpLogs); ok {
return x.HttpLogs
}
return nil
}
func (x *StreamAccessLogsMessage) GetTcpLogs() *StreamAccessLogsMessage_TCPAccessLogEntries {
if x, ok := x.GetLogEntries().(*StreamAccessLogsMessage_TcpLogs); ok {
return x.TcpLogs
}
return nil
}
type isStreamAccessLogsMessage_LogEntries interface {
isStreamAccessLogsMessage_LogEntries()
}
type StreamAccessLogsMessage_HttpLogs struct {
HttpLogs *StreamAccessLogsMessage_HTTPAccessLogEntries `protobuf:"bytes,2,opt,name=http_logs,json=httpLogs,proto3,oneof"`
}
type StreamAccessLogsMessage_TcpLogs struct {
TcpLogs *StreamAccessLogsMessage_TCPAccessLogEntries `protobuf:"bytes,3,opt,name=tcp_logs,json=tcpLogs,proto3,oneof"`
}
func (*StreamAccessLogsMessage_HttpLogs) isStreamAccessLogsMessage_LogEntries() {}
func (*StreamAccessLogsMessage_TcpLogs) isStreamAccessLogsMessage_LogEntries() {}
type StreamAccessLogsMessage_Identifier struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Node *core.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
LogName string `protobuf:"bytes,2,opt,name=log_name,json=logName,proto3" json:"log_name,omitempty"`
}
func (x *StreamAccessLogsMessage_Identifier) Reset() {
*x = StreamAccessLogsMessage_Identifier{}
if protoimpl.UnsafeEnabled {
mi := &file_envoy_service_accesslog_v2_als_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *StreamAccessLogsMessage_Identifier) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StreamAccessLogsMessage_Identifier) ProtoMessage() {}
func (x *StreamAccessLogsMessage_Identifier) ProtoReflect() protoreflect.Message {
mi := &file_envoy_service_accesslog_v2_als_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StreamAccessLogsMessage_Identifier.ProtoReflect.Descriptor instead.
func (*StreamAccessLogsMessage_Identifier) Descriptor() ([]byte, []int) {
return file_envoy_service_accesslog_v2_als_proto_rawDescGZIP(), []int{1, 0}
}
func (x *StreamAccessLogsMessage_Identifier) GetNode() *core.Node {
if x != nil {
return x.Node
}
return nil
}
func (x *StreamAccessLogsMessage_Identifier) GetLogName() string {
if x != nil {
return x.LogName
}
return ""
}
type StreamAccessLogsMessage_HTTPAccessLogEntries struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
LogEntry []*v2.HTTPAccessLogEntry `protobuf:"bytes,1,rep,name=log_entry,json=logEntry,proto3" json:"log_entry,omitempty"`
}
func (x *StreamAccessLogsMessage_HTTPAccessLogEntries) Reset() {
*x = StreamAccessLogsMessage_HTTPAccessLogEntries{}
if protoimpl.UnsafeEnabled {
mi := &file_envoy_service_accesslog_v2_als_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *StreamAccessLogsMessage_HTTPAccessLogEntries) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StreamAccessLogsMessage_HTTPAccessLogEntries) ProtoMessage() {}
func (x *StreamAccessLogsMessage_HTTPAccessLogEntries) ProtoReflect() protoreflect.Message {
mi := &file_envoy_service_accesslog_v2_als_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StreamAccessLogsMessage_HTTPAccessLogEntries.ProtoReflect.Descriptor instead.
func (*StreamAccessLogsMessage_HTTPAccessLogEntries) Descriptor() ([]byte, []int) {
return file_envoy_service_accesslog_v2_als_proto_rawDescGZIP(), []int{1, 1}
}
func (x *StreamAccessLogsMessage_HTTPAccessLogEntries) GetLogEntry() []*v2.HTTPAccessLogEntry {
if x != nil {
return x.LogEntry
}
return nil
}
type StreamAccessLogsMessage_TCPAccessLogEntries struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
LogEntry []*v2.TCPAccessLogEntry `protobuf:"bytes,1,rep,name=log_entry,json=logEntry,proto3" json:"log_entry,omitempty"`
}
func (x *StreamAccessLogsMessage_TCPAccessLogEntries) Reset() {
*x = StreamAccessLogsMessage_TCPAccessLogEntries{}
if protoimpl.UnsafeEnabled {
mi := &file_envoy_service_accesslog_v2_als_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *StreamAccessLogsMessage_TCPAccessLogEntries) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StreamAccessLogsMessage_TCPAccessLogEntries) ProtoMessage() {}
func (x *StreamAccessLogsMessage_TCPAccessLogEntries) ProtoReflect() protoreflect.Message {
mi := &file_envoy_service_accesslog_v2_als_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StreamAccessLogsMessage_TCPAccessLogEntries.ProtoReflect.Descriptor instead.
func (*StreamAccessLogsMessage_TCPAccessLogEntries) Descriptor() ([]byte, []int) {
return file_envoy_service_accesslog_v2_als_proto_rawDescGZIP(), []int{1, 2}
}
func (x *StreamAccessLogsMessage_TCPAccessLogEntries) GetLogEntry() []*v2.TCPAccessLogEntry {
if x != nil {
return x.LogEntry
}
return nil
}
var File_envoy_service_accesslog_v2_als_proto protoreflect.FileDescriptor
var file_envoy_service_accesslog_v2_als_proto_rawDesc = []byte{
0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f,
0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x6c, 0x73,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e,
0x76, 0x32, 0x1a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32,
0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x61, 0x63, 0x63,
0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f,
0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74,
0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x63, 0x65, 0x73,
0x73, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x05,
0x0a, 0x17, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f,
0x67, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x5e, 0x0a, 0x0a, 0x69, 0x64, 0x65,
0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e,
0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x61, 0x63,
0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61,
0x6d, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0a, 0x69,
0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x09, 0x68, 0x74, 0x74,
0x70, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x65,
0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x61, 0x63, 0x63,
0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x45,
0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x48, 0x00, 0x52, 0x08, 0x68, 0x74, 0x74, 0x70, 0x4c, 0x6f,
0x67, 0x73, 0x12, 0x64, 0x0a, 0x08, 0x74, 0x63, 0x70, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x03,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
0x32, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f,
0x67, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x43, 0x50, 0x41, 0x63, 0x63,
0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x48, 0x00, 0x52,
0x07, 0x74, 0x63, 0x70, 0x4c, 0x6f, 0x67, 0x73, 0x1a, 0x67, 0x0a, 0x0a, 0x49, 0x64, 0x65, 0x6e,
0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69,
0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x08, 0xfa,
0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a,
0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42,
0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x4e, 0x61, 0x6d,
0x65, 0x1a, 0x6a, 0x0a, 0x14, 0x48, 0x54, 0x54, 0x50, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c,
0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x09, 0x6c, 0x6f, 0x67,
0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65,
0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x41, 0x63, 0x63, 0x65, 0x73,
0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01,
0x02, 0x08, 0x01, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x1a, 0x68, 0x0a,
0x13, 0x54, 0x43, 0x50, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74,
0x72, 0x69, 0x65, 0x73, 0x12, 0x51, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x65, 0x6e, 0x74, 0x72,
0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
0x32, 0x2e, 0x54, 0x43, 0x50, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x6c,
0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x12, 0x0a, 0x0b, 0x6c, 0x6f, 0x67, 0x5f, 0x65,
0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x32, 0x96, 0x01, 0x0a, 0x10,
0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x12, 0x81, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x63, 0x65, 0x73,
0x73, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e,
0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c,
0x6f, 0x67, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x34, 0x2e, 0x65, 0x6e, 0x76,
0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73,
0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63,
0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x28, 0x01, 0x42, 0x41, 0x0a, 0x28, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32,
0x42, 0x08, 0x41, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x88, 0x01, 0x01, 0xba,
0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_envoy_service_accesslog_v2_als_proto_rawDescOnce sync.Once
file_envoy_service_accesslog_v2_als_proto_rawDescData = file_envoy_service_accesslog_v2_als_proto_rawDesc
)
func file_envoy_service_accesslog_v2_als_proto_rawDescGZIP() []byte {
file_envoy_service_accesslog_v2_als_proto_rawDescOnce.Do(func() {
file_envoy_service_accesslog_v2_als_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_service_accesslog_v2_als_proto_rawDescData)
})
return file_envoy_service_accesslog_v2_als_proto_rawDescData
}
var file_envoy_service_accesslog_v2_als_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_envoy_service_accesslog_v2_als_proto_goTypes = []interface{}{
(*StreamAccessLogsResponse)(nil), // 0: envoy.service.accesslog.v2.StreamAccessLogsResponse
(*StreamAccessLogsMessage)(nil), // 1: envoy.service.accesslog.v2.StreamAccessLogsMessage
(*StreamAccessLogsMessage_Identifier)(nil), // 2: envoy.service.accesslog.v2.StreamAccessLogsMessage.Identifier
(*StreamAccessLogsMessage_HTTPAccessLogEntries)(nil), // 3: envoy.service.accesslog.v2.StreamAccessLogsMessage.HTTPAccessLogEntries
(*StreamAccessLogsMessage_TCPAccessLogEntries)(nil), // 4: envoy.service.accesslog.v2.StreamAccessLogsMessage.TCPAccessLogEntries
(*core.Node)(nil), // 5: envoy.api.v2.core.Node
(*v2.HTTPAccessLogEntry)(nil), // 6: envoy.data.accesslog.v2.HTTPAccessLogEntry
(*v2.TCPAccessLogEntry)(nil), // 7: envoy.data.accesslog.v2.TCPAccessLogEntry
}
var file_envoy_service_accesslog_v2_als_proto_depIdxs = []int32{
2, // 0: envoy.service.accesslog.v2.StreamAccessLogsMessage.identifier:type_name -> envoy.service.accesslog.v2.StreamAccessLogsMessage.Identifier
3, // 1: envoy.service.accesslog.v2.StreamAccessLogsMessage.http_logs:type_name -> envoy.service.accesslog.v2.StreamAccessLogsMessage.HTTPAccessLogEntries
4, // 2: envoy.service.accesslog.v2.StreamAccessLogsMessage.tcp_logs:type_name -> envoy.service.accesslog.v2.StreamAccessLogsMessage.TCPAccessLogEntries
5, // 3: envoy.service.accesslog.v2.StreamAccessLogsMessage.Identifier.node:type_name -> envoy.api.v2.core.Node
6, // 4: envoy.service.accesslog.v2.StreamAccessLogsMessage.HTTPAccessLogEntries.log_entry:type_name -> envoy.data.accesslog.v2.HTTPAccessLogEntry
7, // 5: envoy.service.accesslog.v2.StreamAccessLogsMessage.TCPAccessLogEntries.log_entry:type_name -> envoy.data.accesslog.v2.TCPAccessLogEntry
1, // 6: envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs:input_type -> envoy.service.accesslog.v2.StreamAccessLogsMessage
0, // 7: envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs:output_type -> envoy.service.accesslog.v2.StreamAccessLogsResponse
7, // [7:8] is the sub-list for method output_type
6, // [6:7] is the sub-list for method input_type
6, // [6:6] is the sub-list for extension type_name
6, // [6:6] is the sub-list for extension extendee
0, // [0:6] is the sub-list for field type_name
}
func init() { file_envoy_service_accesslog_v2_als_proto_init() }
func file_envoy_service_accesslog_v2_als_proto_init() {
if File_envoy_service_accesslog_v2_als_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_envoy_service_accesslog_v2_als_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StreamAccessLogsResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_envoy_service_accesslog_v2_als_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StreamAccessLogsMessage); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_envoy_service_accesslog_v2_als_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StreamAccessLogsMessage_Identifier); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_envoy_service_accesslog_v2_als_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StreamAccessLogsMessage_HTTPAccessLogEntries); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_envoy_service_accesslog_v2_als_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StreamAccessLogsMessage_TCPAccessLogEntries); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_envoy_service_accesslog_v2_als_proto_msgTypes[1].OneofWrappers = []interface{}{
(*StreamAccessLogsMessage_HttpLogs)(nil),
(*StreamAccessLogsMessage_TcpLogs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_envoy_service_accesslog_v2_als_proto_rawDesc,
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_envoy_service_accesslog_v2_als_proto_goTypes,
DependencyIndexes: file_envoy_service_accesslog_v2_als_proto_depIdxs,
MessageInfos: file_envoy_service_accesslog_v2_als_proto_msgTypes,
}.Build()
File_envoy_service_accesslog_v2_als_proto = out.File
file_envoy_service_accesslog_v2_als_proto_rawDesc = nil
file_envoy_service_accesslog_v2_als_proto_goTypes = nil
file_envoy_service_accesslog_v2_als_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// AccessLogServiceClient is the client API for AccessLogService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type AccessLogServiceClient interface {
StreamAccessLogs(ctx context.Context, opts ...grpc.CallOption) (AccessLogService_StreamAccessLogsClient, error)
}
type accessLogServiceClient struct {
cc grpc.ClientConnInterface
}
func NewAccessLogServiceClient(cc grpc.ClientConnInterface) AccessLogServiceClient |
func (c *accessLogServiceClient) StreamAccessLogs(ctx context.Context, opts ...grpc.CallOption) (AccessLogService_StreamAccessLogsClient, error) {
stream, err := c.cc.NewStream(ctx, &_AccessLogService_serviceDesc.Streams[0], "/envoy.service.accesslog.v2.AccessLogService/StreamAccessLogs", opts...)
if err != nil {
return nil, err
}
x := &accessLogServiceStreamAccessLogsClient{stream}
return x, nil
}
type AccessLogService_StreamAccessLogsClient interface {
Send(*StreamAccessLogsMessage) error
CloseAndRecv() (*StreamAccessLogsResponse, error)
grpc.ClientStream
}
type accessLogServiceStreamAccessLogsClient struct {
grpc.ClientStream
}
func (x *accessLogServiceStreamAccessLogsClient) Send(m *StreamAccessLogsMessage) error {
return x.ClientStream.SendMsg(m)
}
func (x *accessLogServiceStreamAccessLogsClient) CloseAndRecv() (*StreamAccessLogsResponse, error) {
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
m := new(StreamAccessLogsResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// AccessLogServiceServer is the server API for AccessLogService service.
type AccessLogServiceServer interface {
StreamAccessLogs(AccessLogService_StreamAccessLogsServer) error
}
// UnimplementedAccessLogServiceServer can be embedded to have forward compatible implementations.
type UnimplementedAccessLogServiceServer struct {
}
func (*UnimplementedAccessLogServiceServer) StreamAccessLogs(AccessLogService_StreamAccessLogsServer) error {
return status.Errorf(codes.Unimplemented, "method StreamAccessLogs not implemented")
}
func RegisterAccessLogServiceServer(s *grpc.Server, srv AccessLogServiceServer) {
s.RegisterService(&_AccessLogService_serviceDesc, srv)
}
func _AccessLogService_StreamAccessLogs_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(AccessLogServiceServer).StreamAccessLogs(&accessLogServiceStreamAccessLogsServer{stream})
}
type AccessLogService_StreamAccessLogsServer interface {
SendAndClose(*StreamAccessLogsResponse) error
Recv() (*StreamAccessLogsMessage, error)
grpc.ServerStream
}
type accessLogServiceStreamAccessLogsServer struct {
grpc.ServerStream
}
func (x *accessLogServiceStreamAccessLogsServer) SendAndClose(m *StreamAccessLogsResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *accessLogServiceStreamAccessLogsServer) Recv() (*StreamAccessLogsMessage, error) {
m := new(StreamAccessLogsMessage)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _AccessLogService_serviceDesc = grpc.ServiceDesc{
ServiceName: "envoy.service.accesslog.v2.AccessLogService",
HandlerType: (*AccessLogServiceServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "StreamAccessLogs",
Handler: _AccessLogService_StreamAccessLogs_Handler,
ClientStreams: true,
},
},
Metadata: "envoy/service/accesslog/v2/als.proto",
}
| {
return &accessLogServiceClient{cc}
} |
index.ts | /*
* Copyright (c) 2016-2020 Martin Donath <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* ----------------------------------------------------------------------------
* Functions
* ------------------------------------------------------------------------- */
/**
* Set focusable property
*
* @param el - Element
* @param value - Tabindex value
*/
export function | (
el: HTMLElement, value = 0
): void {
el.setAttribute("tabindex", value.toString())
}
/**
* Reset focusable property
*
* @param el - Element
*/
export function resetFocusable(
el: HTMLElement
): void {
el.removeAttribute("tabindex")
}
/**
* Set scroll lock
*
* @param el - Scrollable element
* @param value - Vertical offset
*/
export function setScrollLock(
el: HTMLElement, value: number
): void {
el.setAttribute("data-md-state", "lock")
el.style.top = `-${value}px`
}
/**
* Reset scroll lock
*
* @param el - Scrollable element
*/
export function resetScrollLock(
el: HTMLElement
): void {
const value = -1 * parseInt(el.style.top, 10)
el.removeAttribute("data-md-state")
el.style.top = ""
if (value)
window.scrollTo(0, value)
}
| setFocusable |
admin-components.js | Vue.component('submission-row',{
props: ['name','address','logo','description','date'],
template: `<tr>
<td><div v-text="name"></div></td>
<td><div v-text="address"></div></td>
<td><div v-text="logo"></div></td>
<td><div v-text="description"></div></td>
<td><div v-text="date"></div></td>
</tr>`
});
Vue.component("submissions-list",{
props:['data'], | <thead><tr>
<th>Company Name</th>
<th>Address</th>
<th>Logo</th>
<th>Description</th>
<th>Submission Date</th>
</tr></thead>
<tr is="submission-row" v-for="(obj, index) in data" :key="index" v-bind:name="data[index].cmpName" v-bind:address="data[index].cmpAddress" v-bind:logo="data[index].cmpLogo" v-bind:description="data[index].cmpDescription" v-bind:date="data[index].date"></tr>
</table>
</ul>`
}); | template: `
<ul>
<table class="table table-striped table-sm"> |
load_net.py | """
Utility file to select GraphNN model as
selected by the user
"""
from nets.molecules_graph_regression.gated_gcn_net import GatedGCNNet
from nets.molecules_graph_regression.gcn_net import GCNNet
from nets.molecules_graph_regression.gat_net import GATNet
from nets.molecules_graph_regression.graphsage_net import GraphSageNet
from nets.molecules_graph_regression.mo_net import MoNet as MoNet_
from nets.molecules_graph_regression.mlp_net import MLPNet
def GatedGCN(net_params):
return GatedGCNNet(net_params)
def GCN(net_params):
|
def GAT(net_params):
return GATNet(net_params)
def GraphSage(net_params):
return GraphSageNet(net_params)
def MoNet(net_params):
return MoNet_(net_params)
def MLP(net_params):
return MLPNet(net_params)
def gnn_model(MODEL_NAME, net_params):
models = {
'GatedGCN': GatedGCN,
'GCN': GCN,
'GAT': GAT,
'GraphSage': GraphSage,
'MoNet': MoNet,
'MLP': MLP
}
return models[MODEL_NAME](net_params)
| return GCNNet(net_params) |
identity_test.go | package peer
import (
"bytes"
"encoding/binary"
"testing"
"github.com/perlin-network/myNoise/crypto/blake2b"
)
var (
publicKey1 = []byte("12345678901234567890123456789012")
publicKey2 = []byte("12345678901234567890123456789011")
publicKey3 = []byte("12345678901234567890123456789013")
address = "localhost:12345"
id1 = CreateID(address, publicKey1)
id2 = CreateID(address, publicKey2)
id3 = CreateID(address, publicKey3)
)
func TestCreateID(t *testing.T) {
t.Parallel()
if !bytes.Equal(id1.Id, blake2b.New().HashBytes(publicKey1)) {
t.Errorf("PublicKey = %s, want %s", id1.Id, publicKey1)
}
if id1.Address != address {
t.Errorf("Address = %s, want %s", id1.Address, address)
}
}
func TestString(t *testing.T) {
t.Parallel()
want := "ID{Address: localhost:12345, Id: [73 44 127 92 143 18 83 102 101 246 108 105 60 227 86 107 128 15 61 7 191 108 178 184 1 152 19 41 78 16 131 58]}"
if id1.String() != want {
t.Errorf("String() = %s, want %s", id1.String(), want)
}
}
func TestEquals(t *testing.T) {
t.Parallel()
if !id1.Equals(CreateID(address, publicKey1)) {
t.Errorf("Equals() = %s, want %s", id1.PublicKeyHex(), id2.PublicKeyHex())
}
}
func TestLess(t *testing.T) |
func TestPublicKeyHex(t *testing.T) {
t.Parallel()
want := "3132333435363738393031323334353637383930313233343536373839303132"
if id1.PublicKeyHex() != want {
t.Errorf("PublicKeyHex() = %s, want %s", id1.PublicKeyHex(), want)
}
}
func TestXorId(t *testing.T) {
t.Parallel()
publicKey1Hash := blake2b.New().HashBytes(publicKey1)
publicKey3Hash := blake2b.New().HashBytes(publicKey3)
newId := make([]byte, len(publicKey3Hash))
for i, b := range publicKey1Hash {
newId[i] = b ^ publicKey3Hash[i]
}
xor := ID{
Address: address,
Id: newId,
}
result := id1.XorID(id3)
if !xor.Equals(result) {
t.Errorf("Xor() = %v, want %v", xor, result)
}
}
func TestXor(t *testing.T) {
t.Parallel()
xor := ID{
Address: address,
PublicKey: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
}
result := id1.Xor(id3)
if !xor.Equals(result) {
t.Errorf("Xor() = %v, want %v", xor, result)
}
}
func TestPrefixLen(t *testing.T) {
t.Parallel()
testCases := []struct {
publicKeyHash uint32
expected int
}{
{1, 7},
{2, 6},
{4, 5},
{8, 4},
{16, 3},
{32, 2},
{64, 1},
}
for _, tt := range testCases {
publicKey := make([]byte, 4)
binary.LittleEndian.PutUint32(publicKey, tt.publicKeyHash)
id := ID{Address: address, Id: publicKey}
if id.PrefixLen() != tt.expected {
t.Errorf("PrefixLen() expected: %d, value: %d", tt.expected, id.PrefixLen())
}
}
}
| {
t.Parallel()
if id2.Less(id1) {
t.Errorf("'%s'.Less(%s) should be true", id2.PublicKeyHex(), id1.PublicKeyHex())
}
if !id1.Less(id2) {
t.Errorf("'%s'.Less(%s) should be false", id1.PublicKeyHex(), id2.PublicKeyHex())
}
if !id1.Less(id3) {
t.Errorf("'%s'.Less(%s) should be false", id1.PublicKeyHex(), id3.PublicKeyHex())
}
} |
config.go | package cloudstack
import (
"errors"
"fmt"
"os"
"time"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/uuid"
"github.com/hashicorp/packer/helper/communicator"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
)
// Config holds all the details needed to configure the builder.
type Config struct {
common.PackerConfig `mapstructure:",squash"`
common.HTTPConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
APIURL string `mapstructure:"api_url"`
APIKey string `mapstructure:"api_key"`
SecretKey string `mapstructure:"secret_key"`
AsyncTimeout time.Duration `mapstructure:"async_timeout"`
HTTPGetOnly bool `mapstructure:"http_get_only"`
SSLNoVerify bool `mapstructure:"ssl_no_verify"`
CIDRList []string `mapstructure:"cidr_list"`
CreateSecurityGroup bool `mapstructure:"create_security_group"`
DiskOffering string `mapstructure:"disk_offering"`
DiskSize int64 `mapstructure:"disk_size"`
Expunge bool `mapstructure:"expunge"`
Hypervisor string `mapstructure:"hypervisor"`
InstanceName string `mapstructure:"instance_name"`
Network string `mapstructure:"network"`
Project string `mapstructure:"project"`
PublicIPAddress string `mapstructure:"public_ip_address"`
PublicPort int `mapstructure:"public_port"`
SecurityGroups []string `mapstructure:"security_groups"`
ServiceOffering string `mapstructure:"service_offering"`
PreventFirewallChanges bool `mapstructure:"prevent_firewall_changes"`
SourceISO string `mapstructure:"source_iso"`
SourceTemplate string `mapstructure:"source_template"`
TemporaryKeypairName string `mapstructure:"temporary_keypair_name"`
UseLocalIPAddress bool `mapstructure:"use_local_ip_address"`
UserData string `mapstructure:"user_data"`
UserDataFile string `mapstructure:"user_data_file"`
Zone string `mapstructure:"zone"`
TemplateName string `mapstructure:"template_name"`
TemplateDisplayText string `mapstructure:"template_display_text"`
TemplateOS string `mapstructure:"template_os"`
TemplateFeatured bool `mapstructure:"template_featured"`
TemplatePublic bool `mapstructure:"template_public"`
TemplatePasswordEnabled bool `mapstructure:"template_password_enabled"`
TemplateRequiresHVM bool `mapstructure:"template_requires_hvm"`
TemplateScalable bool `mapstructure:"template_scalable"`
TemplateTag string `mapstructure:"template_tag"`
ctx interpolate.Context
}
// NewConfig parses and validates the given config.
func NewConfig(raws ...interface{}) (*Config, error) {
c := new(Config)
err := config.Decode(c, &config.DecodeOpts{
Interpolate: true,
InterpolateContext: &c.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{
"user_data",
},
},
}, raws...)
if err != nil {
return nil, err
}
var errs *packer.MultiError
// Set some defaults.
if c.APIURL == "" {
// Default to environment variable for api_url, if it exists
c.APIURL = os.Getenv("CLOUDSTACK_API_URL")
}
if c.APIKey == "" {
// Default to environment variable for api_key, if it exists
c.APIKey = os.Getenv("CLOUDSTACK_API_KEY")
}
if c.SecretKey == "" {
// Default to environment variable for secret_key, if it exists | if c.AsyncTimeout == 0 {
c.AsyncTimeout = 30 * time.Minute
}
if len(c.CIDRList) == 0 {
c.CIDRList = []string{"0.0.0.0/0"}
}
if c.InstanceName == "" {
c.InstanceName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
}
if c.TemplateName == "" {
name, err := interpolate.Render("packer-{{timestamp}}", nil)
if err != nil {
errs = packer.MultiErrorAppend(errs,
fmt.Errorf("Unable to parse template name: %s ", err))
}
c.TemplateName = name
}
if c.TemplateDisplayText == "" {
c.TemplateDisplayText = c.TemplateName
}
// If we are not given an explicit keypair, ssh_password or ssh_private_key_file,
// then create a temporary one, but only if the temporary_keypair_name has not
// been provided.
if c.Comm.SSHKeyPairName == "" && c.Comm.SSHTemporaryKeyPairName == "" &&
c.Comm.SSHPrivateKeyFile == "" && c.Comm.SSHPassword == "" {
c.Comm.SSHTemporaryKeyPairName = fmt.Sprintf("packer_%s", uuid.TimeOrderedUUID())
}
// Process required parameters.
if c.APIURL == "" {
errs = packer.MultiErrorAppend(errs, errors.New("a api_url must be specified"))
}
if c.APIKey == "" {
errs = packer.MultiErrorAppend(errs, errors.New("a api_key must be specified"))
}
if c.SecretKey == "" {
errs = packer.MultiErrorAppend(errs, errors.New("a secret_key must be specified"))
}
if c.Network == "" {
errs = packer.MultiErrorAppend(errs, errors.New("a network must be specified"))
}
if c.CreateSecurityGroup && !c.Expunge {
errs = packer.MultiErrorAppend(errs, errors.New("auto creating a temporary security group requires expunge"))
}
if c.ServiceOffering == "" {
errs = packer.MultiErrorAppend(errs, errors.New("a service_offering must be specified"))
}
if c.SourceISO == "" && c.SourceTemplate == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("either source_iso or source_template must be specified"))
}
if c.SourceISO != "" && c.SourceTemplate != "" {
errs = packer.MultiErrorAppend(
errs, errors.New("only one of source_iso or source_template can be specified"))
}
if c.SourceISO != "" && c.DiskOffering == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("a disk_offering must be specified when using source_iso"))
}
if c.SourceISO != "" && c.Hypervisor == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("a hypervisor must be specified when using source_iso"))
}
if c.TemplateOS == "" {
errs = packer.MultiErrorAppend(errs, errors.New("a template_os must be specified"))
}
if c.UserData != "" && c.UserDataFile != "" {
errs = packer.MultiErrorAppend(
errs, errors.New("only one of user_data or user_data_file can be specified"))
}
if c.UserDataFile != "" {
if _, err := os.Stat(c.UserDataFile); err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("user_data_file not found: %s", c.UserDataFile))
}
}
if c.Zone == "" {
errs = packer.MultiErrorAppend(errs, errors.New("a zone must be specified"))
}
if es := c.Comm.Prepare(&c.ctx); len(es) > 0 {
errs = packer.MultiErrorAppend(errs, es...)
}
// Check for errors and return if we have any.
if errs != nil && len(errs.Errors) > 0 {
return nil, errs
}
return c, nil
} | c.SecretKey = os.Getenv("CLOUDSTACK_SECRET_KEY")
}
|
setup.py | import os
import sys
from setuptools import setup, find_packages
from fnmatch import fnmatchcase
from distutils.util import convert_path
standard_exclude = ('*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info')
def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories):
|
setup(name='docassemble.graphletter',
version='0.0.1',
description=('A docassemble extension.'),
long_description='# docassemble.graphletter\n\nA docassemble extension.\n\n## Author\n\nSystem Administrator, [email protected]\n\n',
long_description_content_type='text/markdown',
author='System Administrator',
author_email='[email protected]',
license='The MIT License (MIT)',
url='https://docassemble.org',
packages=find_packages(),
namespace_packages=['docassemble'],
install_requires=[],
zip_safe=False,
package_data=find_package_data(where='docassemble/graphletter/', package='docassemble.graphletter'),
)
| out = {}
stack = [(convert_path(where), '', package)]
while stack:
where, prefix, package = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package))
else:
stack.append((fn, prefix + name + '/', package))
else:
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out |
pizza-description.js | module.exports = function () { | }
}
} | return function (formData) {
return {
description: `${formData.name}: ${formData.base}, ${formData.toppings.length > 0 ? formData.toppings.join(', ') : 'No Toppings'}` |
mod.rs | mod args;
mod output;
mod test_dir;
mod test_file;
mod test_link; | pub use args::with_default_args;
pub use output::*;
pub use test_dir::TestDir; | |
opts.go | package opts
import (
"fmt"
"math/big"
"net"
"path"
"regexp"
"strings"
"github.com/docker/docker/api/types/filters"
units "github.com/docker/go-units"
)
var (
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
)
// ListOpts holds a list of values and a validation function.
type ListOpts struct {
values *[]string
validator ValidatorFctType
}
// NewListOpts creates a new ListOpts with the specified validator.
func NewListOpts(validator ValidatorFctType) ListOpts {
var values []string
return *NewListOptsRef(&values, validator)
}
// NewListOptsRef creates a new ListOpts with the specified values and validator.
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
return &ListOpts{
values: values,
validator: validator,
}
}
func (opts *ListOpts) String() string {
if len(*opts.values) == 0 {
return ""
}
return fmt.Sprintf("%v", *opts.values)
}
// Set validates if needed the input value and adds it to the
// internal slice.
func (opts *ListOpts) Set(value string) error {
if opts.validator != nil {
v, err := opts.validator(value)
if err != nil {
return err
}
value = v
}
(*opts.values) = append((*opts.values), value)
return nil
}
// Delete removes the specified element from the slice.
func (opts *ListOpts) Delete(key string) {
for i, k := range *opts.values {
if k == key {
(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
return
}
}
}
// GetMap returns the content of values in a map in order to avoid
// duplicates.
func (opts *ListOpts) GetMap() map[string]struct{} {
ret := make(map[string]struct{})
for _, k := range *opts.values {
ret[k] = struct{}{}
}
return ret
}
// GetAll returns the values of slice.
func (opts *ListOpts) GetAll() []string {
return (*opts.values)
}
// GetAllOrEmpty returns the values of the slice
// or an empty slice when there are no values.
func (opts *ListOpts) GetAllOrEmpty() []string {
v := *opts.values
if v == nil {
return make([]string, 0)
}
return v
}
// Get checks the existence of the specified key.
func (opts *ListOpts) Get(key string) bool {
for _, k := range *opts.values {
if k == key {
return true
}
}
return false
}
// Len returns the amount of element in the slice.
func (opts *ListOpts) Len() int {
return len((*opts.values))
}
// Type returns a string name for this Option type
func (opts *ListOpts) Type() string {
return "list"
}
// WithValidator returns the ListOpts with validator set.
func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts {
opts.validator = validator
return opts
}
// NamedOption is an interface that list and map options
// with names implement.
type NamedOption interface {
Name() string
}
// NamedListOpts is a ListOpts with a configuration name.
// This struct is useful to keep reference to the assigned
// field name in the internal configuration struct.
type NamedListOpts struct {
name string
ListOpts
}
var _ NamedOption = &NamedListOpts{}
// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
return &NamedListOpts{
name: name,
ListOpts: *NewListOptsRef(values, validator),
}
}
// Name returns the name of the NamedListOpts in the configuration.
func (o *NamedListOpts) Name() string {
return o.name
}
// MapOpts holds a map of values and a validation function.
type MapOpts struct {
values map[string]string
validator ValidatorFctType
}
// Set validates if needed the input value and add it to the
// internal map, by splitting on '='.
func (opts *MapOpts) Set(value string) error {
if opts.validator != nil {
v, err := opts.validator(value)
if err != nil |
value = v
}
vals := strings.SplitN(value, "=", 2)
if len(vals) == 1 {
(opts.values)[vals[0]] = ""
} else {
(opts.values)[vals[0]] = vals[1]
}
return nil
}
// GetAll returns the values of MapOpts as a map.
func (opts *MapOpts) GetAll() map[string]string {
return opts.values
}
func (opts *MapOpts) String() string {
return fmt.Sprintf("%v", map[string]string((opts.values)))
}
// Type returns a string name for this Option type
func (opts *MapOpts) Type() string {
return "map"
}
// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
if values == nil {
values = make(map[string]string)
}
return &MapOpts{
values: values,
validator: validator,
}
}
// NamedMapOpts is a MapOpts struct with a configuration name.
// This struct is useful to keep reference to the assigned
// field name in the internal configuration struct.
type NamedMapOpts struct {
name string
MapOpts
}
var _ NamedOption = &NamedMapOpts{}
// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
return &NamedMapOpts{
name: name,
MapOpts: *NewMapOpts(values, validator),
}
}
// Name returns the name of the NamedMapOpts in the configuration.
func (o *NamedMapOpts) Name() string {
return o.name
}
// ValidatorFctType defines a validator function that returns a validated string and/or an error.
type ValidatorFctType func(val string) (string, error)
// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
type ValidatorFctListType func(val string) ([]string, error)
// ValidateIPAddress validates an Ip address.
func ValidateIPAddress(val string) (string, error) {
var ip = net.ParseIP(strings.TrimSpace(val))
if ip != nil {
return ip.String(), nil
}
return "", fmt.Errorf("%s is not an ip address", val)
}
// ValidateMACAddress validates a MAC address.
func ValidateMACAddress(val string) (string, error) {
_, err := net.ParseMAC(strings.TrimSpace(val))
if err != nil {
return "", err
}
return val, nil
}
// ValidateDNSSearch validates domain for resolvconf search configuration.
// A zero length domain is represented by a dot (.).
func ValidateDNSSearch(val string) (string, error) {
if val = strings.Trim(val, " "); val == "." {
return val, nil
}
return validateDomain(val)
}
func validateDomain(val string) (string, error) {
if alphaRegexp.FindString(val) == "" {
return "", fmt.Errorf("%s is not a valid domain", val)
}
ns := domainRegexp.FindSubmatch([]byte(val))
if len(ns) > 0 && len(ns[1]) < 255 {
return string(ns[1]), nil
}
return "", fmt.Errorf("%s is not a valid domain", val)
}
// ValidateLabel validates that the specified string is a valid label, and returns it.
// Labels are in the form on key=value.
func ValidateLabel(val string) (string, error) {
if strings.Count(val, "=") < 1 {
return "", fmt.Errorf("bad attribute format: %s", val)
}
return val, nil
}
// ValidateSysctl validates a sysctl and returns it.
func ValidateSysctl(val string) (string, error) {
validSysctlMap := map[string]bool{
"kernel.msgmax": true,
"kernel.msgmnb": true,
"kernel.msgmni": true,
"kernel.sem": true,
"kernel.shmall": true,
"kernel.shmmax": true,
"kernel.shmmni": true,
"kernel.shm_rmid_forced": true,
}
validSysctlPrefixes := []string{
"net.",
"fs.mqueue.",
}
arr := strings.Split(val, "=")
if len(arr) < 2 {
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
if validSysctlMap[arr[0]] {
return val, nil
}
for _, vp := range validSysctlPrefixes {
if strings.HasPrefix(arr[0], vp) {
return val, nil
}
}
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
// FilterOpt is a flag type for validating filters
type FilterOpt struct {
filter filters.Args
}
// NewFilterOpt returns a new FilterOpt
func NewFilterOpt() FilterOpt {
return FilterOpt{filter: filters.NewArgs()}
}
func (o *FilterOpt) String() string {
repr, err := filters.ToParam(o.filter)
if err != nil {
return "invalid filters"
}
return repr
}
// Set sets the value of the opt by parsing the command line value
func (o *FilterOpt) Set(value string) error {
var err error
o.filter, err = filters.ParseFlag(value, o.filter)
return err
}
// Type returns the option type
func (o *FilterOpt) Type() string {
return "filter"
}
// Value returns the value of this option
func (o *FilterOpt) Value() filters.Args {
return o.filter
}
// NanoCPUs is a type for fixed point fractional number.
type NanoCPUs int64
// String returns the string format of the number
func (c *NanoCPUs) String() string {
if *c == 0 {
return ""
}
return big.NewRat(c.Value(), 1e9).FloatString(3)
}
// Set sets the value of the NanoCPU by passing a string
func (c *NanoCPUs) Set(value string) error {
cpus, err := ParseCPUs(value)
*c = NanoCPUs(cpus)
return err
}
// Type returns the type
func (c *NanoCPUs) Type() string {
return "decimal"
}
// Value returns the value in int64
func (c *NanoCPUs) Value() int64 {
return int64(*c)
}
// ParseCPUs takes a string ratio and returns an integer value of nano cpus
func ParseCPUs(value string) (int64, error) {
cpu, ok := new(big.Rat).SetString(value)
if !ok {
return 0, fmt.Errorf("failed to parse %v as a rational number", value)
}
nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
if !nano.IsInt() {
return 0, fmt.Errorf("value is too precise")
}
return nano.Num().Int64(), nil
}
// ParseLink parses and validates the specified string as a link format (name:alias)
func ParseLink(val string) (string, string, error) {
if val == "" {
return "", "", fmt.Errorf("empty string specified for links")
}
arr := strings.Split(val, ":")
if len(arr) > 2 {
return "", "", fmt.Errorf("bad format for links: %s", val)
}
if len(arr) == 1 {
return val, val, nil
}
// This is kept because we can actually get a HostConfig with links
// from an already created container and the format is not `foo:bar`
// but `/foo:/c1/bar`
if strings.HasPrefix(arr[0], "/") {
_, alias := path.Split(arr[1])
return arr[0][1:], alias, nil
}
return arr[0], arr[1], nil
}
// ValidateLink validates that the specified string has a valid link format (containerName:alias).
func ValidateLink(val string) (string, error) {
_, _, err := ParseLink(val)
return val, err
}
// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc)
type MemBytes int64
// String returns the string format of the human readable memory bytes
func (m *MemBytes) String() string {
// NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not.
// We return "0" in case value is 0 here so that the default value is hidden.
// (Sometimes "default 0 B" is actually misleading)
if m.Value() != 0 {
return units.BytesSize(float64(m.Value()))
}
return "0"
}
// Set sets the value of the MemBytes by passing a string
func (m *MemBytes) Set(value string) error {
val, err := units.RAMInBytes(value)
*m = MemBytes(val)
return err
}
// Type returns the type
func (m *MemBytes) Type() string {
return "bytes"
}
// Value returns the value in int64
func (m *MemBytes) Value() int64 {
return int64(*m)
}
// UnmarshalJSON is the customized unmarshaler for MemBytes
func (m *MemBytes) UnmarshalJSON(s []byte) error {
if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' {
return fmt.Errorf("invalid size: %q", s)
}
val, err := units.RAMInBytes(string(s[1 : len(s)-1]))
*m = MemBytes(val)
return err
}
// MemSwapBytes is a type for human readable memory bytes (like 128M, 2g, etc).
// It differs from MemBytes in that -1 is valid and the default.
type MemSwapBytes int64
// Set sets the value of the MemSwapBytes by passing a string
func (m *MemSwapBytes) Set(value string) error {
if value == "-1" {
*m = MemSwapBytes(-1)
return nil
}
val, err := units.RAMInBytes(value)
*m = MemSwapBytes(val)
return err
}
// Type returns the type
func (m *MemSwapBytes) Type() string {
return "bytes"
}
// Value returns the value in int64
func (m *MemSwapBytes) Value() int64 {
return int64(*m)
}
func (m *MemSwapBytes) String() string {
b := MemBytes(*m)
return b.String()
}
// UnmarshalJSON is the customized unmarshaler for MemSwapBytes
func (m *MemSwapBytes) UnmarshalJSON(s []byte) error {
b := MemBytes(*m)
return b.UnmarshalJSON(s)
}
| {
return err
} |
006500710075.js | NDSearch.OnPrefixDataLoaded("equ",["Constant"],[["EQUAL",,[["ssGUI::Enums",,,,0,"File:ssGUI/Enums/SymbolKey.hpp:ssGUI.Enums.EQUAL","CClass:ssGUI.Enums:EQUAL"]]]]); |
||
creator.rs | use rg_lib::* ;
use model::* ;
use res::* ;
use cmd::{ CmdDesp } ;
use std::collections::HashMap ;
pub trait ResLoader <T> {
fn load( data : &StrMap) -> T ;
fn key() -> String ;
}
pub trait CmdLoader <T> {
fn load() -> T ;
fn key() -> String ;
}
pub type ResCreator = fn(key :&String, data : &StrMap) -> Option<Box<Res>> ;
pub type CmdCreator = fn(key :&String ) -> Option<Box<Cmd>> ;
type ResCreatorMap = HashMap<String,ResCreator> ;
type CmdCreatorMap = HashMap<String,CmdCreator> ;
pub struct ResFatory
{
creators : ResCreatorMap,
}
impl ResFatory
{
pub fn new() -> ResFatory
{
ResFatory{ creators : ResCreatorMap::new() }
}
pub fn | (&mut self ,key :String,creator : ResCreator )
{
self.creators.insert(key,creator) ;
}
pub fn create(&self,key :&String, data : &StrMap) -> Option<Box<Res>>
{
self.creators.get(key).and_then( | v | v(key,data) )
}
}
pub fn res_createor_impl<T>(key :&String, data : &StrMap) -> Option<Box<Res>>
where T: ResLoader<T> + ResDesp + InvokeHook + InvokeStart + InvokeStop + 'static
{
if *key == T::key()
{
let obj: ResBox = Box::new(T::load(data));
return Some(obj) ;
}
return None
}
pub fn regist_res_creator<T>(f : &mut ResFatory)
where T: ResLoader<T> + ResDesp + InvokeHook + InvokeStart + InvokeStop + 'static
{
let fnobj : ResCreator = res_createor_impl::<T> ;
f.regist(T::key(),fnobj) ;
}
pub struct CmdFatory
{
creators : CmdCreatorMap,
}
impl CmdFatory
{
pub fn new() -> CmdFatory
{
CmdFatory{ creators : CmdCreatorMap::new() }
}
pub fn regist(&mut self ,key :String,creator : CmdCreator )
{
self.creators.insert(key,creator) ;
}
pub fn create(&self,key :&String ) -> Option<Box<Cmd>>
{
self.creators.get(key).and_then( | v | v(key) )
}
}
pub fn cmd_createor_impl<T>(key :&String ) -> Option<Box<Cmd>>
where T: Cmd + CmdLoader<T> + CmdDesp + 'static
{
if *key == T::key()
{
let obj: CmdBox = Box::new(T::load());
return Some(obj) ;
}
return None
}
pub fn regist_cmd_creator<T>(f : &mut CmdFatory)
where T: Cmd + CmdLoader<T> + CmdDesp + 'static
{
let fnobj : CmdCreator = cmd_createor_impl::<T> ;
f.regist(T::key(),fnobj) ;
}
| regist |
test.rs | use std::fmt;
use dim::Dimensioned;
use approx::ApproxEq;
pub trait CmpConsts<B> {
fn test_eq(self, b: B);
}
#[cfg(feature = "spec")]
impl<A, B> CmpConsts<B> for A {
default fn test_eq(self, _: B) {
} | assert_ulps_eq!(self, b.into(), epsilon = A::new(0.0), max_ulps = 2);
}
} | }
impl<A, B> CmpConsts<B> for A where A: From<B> + fmt::Debug + Dimensioned<Value=f64> + ApproxEq<Epsilon=Self> {
fn test_eq(self, b: B) { |
game_thread.rs | //! Hooks and other code that is running on the game/main thread (As opposed to async threads).
use std::sync::mpsc::Receiver;
use std::sync::{Arc, Mutex};
use byteorder::{ByteOrder, LittleEndian};
use fxhash::FxHashSet;
use lazy_static::lazy_static;
use libc::c_void;
use once_cell::sync::OnceCell;
use bw_dat::dialog::Dialog;
use bw_dat::{Unit, UnitId};
use crate::app_messages::{GameSetupInfo};
use crate::bw::{self, Bw, get_bw, StormPlayerId};
use crate::forge;
use crate::replay;
use crate::snp;
lazy_static! {
pub static ref SEND_FROM_GAME_THREAD: Mutex<Option<tokio::sync::mpsc::UnboundedSender<GameThreadMessage>>> =
Mutex::new(None);
pub static ref GAME_RECEIVE_REQUESTS: Mutex<Option<Receiver<GameThreadRequest>>> =
Mutex::new(None);
}
/// Global for accessing game type/slots/etc from hooks.
static SETUP_INFO: OnceCell<Arc<GameSetupInfo>> = OnceCell::new();
/// Global for shieldbattery-specific replay data.
/// Will not be initialized outside replays. (Or if the replay doesn't have that data)
static SBAT_REPLAY_DATA: OnceCell<replay::SbatReplayData> = OnceCell::new();
/// Contains game id, shieldbattery user id pairs after the slots have been randomized,
/// human player slots / obeservers only.
/// Once this is set it is expected to be valid for the entire game.
/// Could also be easily extended to have storm ids if mapping between them is needed.
static PLAYER_ID_MAPPING: OnceCell<Vec<PlayerIdMapping>> = OnceCell::new();
pub struct PlayerIdMapping {
/// None at least for observers
pub game_id: Option<u8>,
pub sb_user_id: u32,
}
pub fn set_sbat_replay_data(data: replay::SbatReplayData) {
if let Err(_) = SBAT_REPLAY_DATA.set(data) {
warn!("Tried to set shieldbattery replay data twice");
}
}
fn sbat_replay_data() -> Option<&'static replay::SbatReplayData> {
SBAT_REPLAY_DATA.get()
}
// Async tasks request game thread to do some work
pub struct GameThreadRequest {
request_type: GameThreadRequestType,
// These requests probably won't have any reason to return values on success.
// If a single one does, it can send a GameThreadMessage.
done: tokio::sync::oneshot::Sender<()>,
}
impl GameThreadRequest {
pub fn new(
request_type: GameThreadRequestType,
) -> (GameThreadRequest, tokio::sync::oneshot::Receiver<()>) {
let (done, wait_done) = tokio::sync::oneshot::channel();
(GameThreadRequest { request_type, done }, wait_done)
}
}
pub enum GameThreadRequestType {
Initialize,
RunWndProc,
StartGame,
ExitCleanup,
SetupInfo(Arc<GameSetupInfo>),
}
// Game thread sends something to async tasks
pub enum GameThreadMessage {
WindowMove(i32, i32),
Snp(snp::SnpMessage),
/// Storm player id (which stays stable) -> game player id mapping.
/// Once this message is sent, any game player ids used so far should be
/// considered invalid and updated to match this mapping.
PlayersRandomized([Option<u8>; bw::MAX_STORM_PLAYERS]),
Results(GameThreadResults),
}
/// Sends a message from game thread to the async system.
pub fn send_game_msg_to_async(message: GameThreadMessage) {
let send_global = SEND_FROM_GAME_THREAD.lock().unwrap();
if let Some(ref send) = *send_global {
let _ = send.send(message);
} else {
debug!("Game thread messaging not active");
}
}
pub fn run_event_loop() -> ! {
debug!("Main thread reached event loop");
let mut receive_requests = GAME_RECEIVE_REQUESTS.lock().unwrap();
let receive_requests = receive_requests
.take()
.expect("Channel to receive requests not set?");
while let Ok(msg) = receive_requests.recv() {
unsafe {
handle_game_request(msg.request_type);
}
let _ = msg.done.send(());
}
// We can't return from here, as it would put us back in middle of BW's initialization code
crate::wait_async_exit();
}
unsafe fn handle_game_request(request: GameThreadRequestType) {
use self::GameThreadRequestType::*;
match request {
Initialize => init_bw(),
RunWndProc => forge::run_wnd_proc(),
StartGame => {
forge::game_started();
get_bw().run_game_loop();
debug!("Game loop ended");
let results = game_results();
send_game_msg_to_async(GameThreadMessage::Results(results));
forge::hide_window();
}
// Saves registry settings etc.
ExitCleanup => {
get_bw().clean_up_for_exit();
}
SetupInfo(info) => {
if let Err(_) = SETUP_INFO.set(info) {
warn!("Received second SetupInfo");
}
}
}
}
pub fn set_player_id_mapping(mapping: Vec<PlayerIdMapping>) {
if let Err(_) = PLAYER_ID_MAPPING.set(mapping) {
warn!("Player id mapping set twice");
}
}
pub fn player_id_mapping() -> &'static [PlayerIdMapping] {
PLAYER_ID_MAPPING.get()
.map(|x| &**x)
.unwrap_or_else(|| {
warn!("Tried to access player id mapping before it was set");
&[]
})
}
#[derive(Eq, PartialEq, Copy, Clone)]
pub enum PlayerLoseType {
UnknownChecksumMismatch,
UnknownDisconnect,
}
pub struct GameThreadResults {
// Index by ingame player id
pub victory_state: [u8; 8],
pub race: [u8; 8],
// Index by storm id
pub player_has_left: [bool; 8],
pub player_lose_type: Option<PlayerLoseType>,
pub time_ms: u32,
}
unsafe fn game_results() -> GameThreadResults {
let bw = get_bw();
let game = bw.game();
let players = bw.players();
GameThreadResults {
victory_state: (*game).victory_state,
race: {
let mut arr = [bw::RACE_ZERG; 8];
for i in 0..8 {
arr[i] = (*players.add(i as usize)).race;
}
arr
},
player_has_left: {
let mut arr = [false; 8];
for i in 0..8 {
arr[i] = (*game).player_has_left[i] != 0;
}
arr
},
player_lose_type: match (*game).player_lose_type {
1 => Some(PlayerLoseType::UnknownChecksumMismatch),
2 => Some(PlayerLoseType::UnknownDisconnect),
_ => None,
},
// Assuming fastest speed
time_ms: (*game).frame_count.saturating_mul(42),
}
}
// Does the rest of initialization that is being done in main thread before running forge's
// window proc.
unsafe fn init_bw() {
let bw = get_bw();
bw.init_sprites();
(*bw.game()).is_bw = 1;
debug!("Process initialized");
}
/// Bw impl is expected to hook the point after init_game_data and call this.
pub unsafe fn after_init_game_data() {
// Let async thread know about player randomization.
// The function that bw_1161/bw_scr refer to as init_game_data mainly initializes global
// data structures used in a game. Player randomization seems to have been done before that,
// so if it ever in future ends up being the case that the async thread has a point where it
// uses wrong game player ids, a more exact point for this hook should be decided.
//
// But for now it should be fine, and this should also be late enough in initialization that
// any possible alternate branches for save/replay/ums randomization should have been executed
// as well.
let bw = get_bw();
let mut mapping = [None; bw::MAX_STORM_PLAYERS];
let players = bw.players();
for i in 0..8 {
let storm_id = (*players.add(i)).storm_id;
if let Some(out) = mapping.get_mut(storm_id as usize) {
*out = Some(i as u8);
}
}
send_game_msg_to_async(GameThreadMessage::PlayersRandomized(mapping));
// Create fog-of-war sprites for any neutral buildings
if !is_ums() {
for unit in bw.active_units() {
if unit.player() == 11 && unit.is_landed_building() {
bw.create_fow_sprite(unit);
}
}
}
}
pub fn is_ums() -> bool {
// TODO This returns false on replays. Also same thing about looking at BW's
// structures as for is_team_game
SETUP_INFO.get()
.and_then(|x| x.game_type())
.filter(|x| x.is_ums())
.is_some()
}
pub fn is_team_game() -> bool {
// Technically it would be better to look at BW's structures instead, but we don't
// have them available for SC:R right now.
if is_replay() {
sbat_replay_data()
.filter(|x| x.team_game_main_players != [0, 0, 0, 0])
.is_some()
} else {
SETUP_INFO.get()
.and_then(|x| x.game_type())
.filter(|x| x.is_team_game())
.is_some()
}
}
pub fn is_replay() -> bool {
SETUP_INFO.get()
.and_then(|x| x.map.is_replay)
.unwrap_or(false)
}
pub fn setup_info() -> &'static GameSetupInfo {
&*SETUP_INFO.get().unwrap()
}
/// Bw impl is expected to call this after step_game,
/// the function that progresses game objects by a tick/frame/step.
/// In other words, if the game isn't paused/lagging, this gets ran 24 times in second
/// on fastest game speed.
/// This function can be used for hooks that change gameplay state after BW has done (most of)
/// its once-per-gameplay-frame processing but before anything gets rendered. It probably
/// isn't too useful to us unless we end up having a need to change game rules.
pub unsafe fn after_step_game() {
let bw = get_bw();
if is_replay() && !is_ums() {
// One thing BW's step_game does is that it removes any fog sprites that were
// no longer in fog. Unfortunately now that we show fog sprites for unexplored
// resources as well, removing those fog sprites ends up being problematic if
// the user switches vision off from a player who had those resources explored.
// In such case the unexplored fog sprites would not appear and some of the
// expansions would show up as empty while other unexplored bases keep their
// fog sprites as usual.
// To get around this issue, check which neutral buildings don't have fog
// sprites and add them back.
// (Adding fog sprites on visible area is fine, at least in replays)
let mut fow_sprites = FxHashSet::with_capacity_and_hasher(256, Default::default());
for fow in bw.fow_sprites() {
let sprite = (*fow).sprite;
let pos = bw.sprite_position(sprite);
fow_sprites.insert((pos.x, pos.y, UnitId((*fow).unit_id)));
}
for unit in bw.active_units() {
if unit.player() == 11 && unit.is_landed_building() {
// This currently adds fow sprites even for buildings that became
// neutral after player left. It's probably fine, but if it wasn't
// desired, checking that `sprite.player == 11` should only include
// buildings that existed from map start
let sprite = (**unit).sprite;
let pos = bw.sprite_position(sprite as *mut c_void);
if fow_sprites.insert((pos.x, pos.y, unit.id())) {
bw.create_fow_sprite(unit);
}
}
}
}
}
/// Reimplementation of replay command reading & processing since the default implementation
/// has buffer overflows for replays where there are too many commands in a frame.
///
/// A function pointer for the original function is still needed to handle replay ending
/// case which we don't need to touch.
pub unsafe fn step_replay_commands(orig: unsafe extern fn()) {
let bw = get_bw();
let game = bw.game();
let replay = bw.replay_data();
let command_lengths = bw.game_command_lengths();
let frame = (*game).frame_count;
let data_end = (*replay).data_start.add((*replay).data_length as usize);
let remaining_length = (data_end as usize).saturating_sub((*replay).data_pos as usize);
// Data is in format
// u32 frame, u8 length, { u8 storm_player, u8 cmd[] }[length]
// Repeated for each frame in replay, if the commands don't fit in a single frame
// then there can be repeated blocks with equal frame number.
let mut data = std::slice::from_raw_parts((*replay).data_pos, remaining_length);
if data.is_empty() {
// Let the original function handle replay ending
orig();
return;
}
loop {
let (mut frame_data, rest) = match replay_next_frame(data) {
Some(s) => s,
None => {
warn!("Broken replay? Unable to read next frame");
(*replay).data_pos = data_end;
return;
}
};
if frame_data.frame > frame {
break;
}
data = rest;
while let Some((storm_player, command)) = frame_data.next_command(command_lengths) {
bw.process_replay_commands(command, storm_player);
}
}
let new_pos = (data_end as usize - data.len()) as *mut u8;
(*replay).data_pos = new_pos;
}
struct ReplayFrame<'a> {
frame: u32,
// (u8 storm_player, u8 command[...]) pairs repeated.
// (Command length must be known from the data)
commands: &'a [u8],
}
fn replay_next_frame<'a>(input: &'a [u8]) -> Option<(ReplayFrame<'a>, &'a [u8])> {
let &commands_len = input.get(4)?;
let frame = LittleEndian::read_u32(input.get(..4)?);
let rest = input.get(5..)?;
let commands = rest.get(..commands_len as usize)?;
let rest = rest.get(commands_len as usize..)?;
Some((ReplayFrame {
frame,
commands,
}, rest))
}
impl<'a> ReplayFrame<'a> {
pub fn | (&mut self, command_lengths: &[u32]) -> Option<(StormPlayerId, &'a [u8])> {
let player = StormPlayerId(*self.commands.get(0)?);
let data = self.commands.get(1..)?;
let length = bw::commands::command_length(data, command_lengths)?;
let command = data.get(..length)?;
let rest = data.get(length..)?;
self.commands = rest;
Some((player, command))
}
}
/// Bw impl is expected to hook the point before init_unit_data and call this.
/// (It happens to be easy function for SC:R analysis to find and in a nice
/// spot to inject game init hooks for things that require initialization to
/// have progressed a bit but not too much)
pub unsafe fn before_init_unit_data(bw: &dyn Bw) {
let game = bw.game();
if let Some(ext) = sbat_replay_data() {
// This makes team game replays work.
// This hook is unfortunately after the game has calculated
// max supply for team games (It can be over 200), so we'll have to fix
// those as well.
//
// (I don't think we have a better way to check for team game replay right now
// other than just assuming that non-team games have main players as [0, 0, 0, 0])
if ext.team_game_main_players != [0, 0, 0, 0] {
(*game).team_game_main_player = ext.team_game_main_players;
(*game).starting_races = ext.starting_races;
let team_count = ext.team_game_main_players
.iter()
.take_while(|&&x| x != 0xff)
.count();
let players_per_team = match team_count {
2 => 4,
3 => 3,
4 => 2,
_ => 0,
};
// Non-main players get 0 max supply.
// Clear what bw had already initialized.
// (Other players having unused max supply likely won't matter but you never know)
for race_supplies in (*game).supplies.iter_mut() {
for max in race_supplies.max.iter_mut() {
*max = 0;
}
}
let mut pos = 0;
for i in 0..team_count {
let main_player = ext.team_game_main_players[i] as usize;
let first_pos = pos;
let mut race_counts = [0; 3];
for _ in 0..players_per_team {
// The third team of 3-team game has only two slots, they
// get their first slot counted twice
let index = match pos < 8 {
true => pos,
false => first_pos,
};
let race = ext.starting_races[index];
race_counts[race as usize] += 1;
pos += 1;
}
for race in 0..3 {
let count = race_counts[race].max(1);
// This value is twice the displayed, so 200 max supply for each
// player in team. (Or 200 if none)
(*game).supplies[race].max[main_player] = count * 400;
}
}
}
}
}
pub unsafe fn after_status_screen_update(bw: &dyn Bw, status_screen: Dialog, unit: Unit) {
// Show "Stacked (n)" text for stacked buildings
if unit.is_landed_building() {
fn normalize_id(id: UnitId) -> UnitId {
use bw_dat::unit;
// For mineral fields, consider any mineral field unit as equivalent.
// May be useful in some fastest maps.
match id {
unit::MINERAL_FIELD_2 | unit::MINERAL_FIELD_3 => unit::MINERAL_FIELD_1,
x => x,
}
}
// Find units that have same unit id and collide with this unit's center
// (So they don't necessarily have to be perfectly stacked)
let mut count = 0;
let pos = unit.position();
let id = normalize_id(unit.id());
// Doing a loop like this through every active unit is definitely worse
// than using some position searching structure, but building that structure
// would still require looping through the units once.
// If we have such structure in future for some other code it should be used
// here too though.
for other in bw.active_units() {
if normalize_id(other.id()) == id {
if other.collision_rect().contains_point(&pos) {
count += 1;
}
}
}
if count > 1 {
// Show the text at where unit rank/status is usually, as long as it hasn't
// been used.
if let Some(rank_status) = status_screen.child_by_id(-20) {
let existing_text = rank_status.string();
if rank_status.is_hidden() ||
existing_text.starts_with("Stacked") ||
existing_text == ""
{
use std::io::Write;
let mut buffer = [0; 32];
let buf_len = buffer.len();
let mut out = &mut buffer[..];
// TODO: Could use translations in other SC:R languages :)
let _ = write!(&mut out, "Stacked ({})", count);
let len = buf_len - out.len();
rank_status.set_string(&buffer[..len]);
rank_status.show();
}
}
}
}
}
| next_command |
lib.rs | //! Rust bindings for [SFML](http://www.sfml-dev.org), the Simple and Fast Multimedia Library.
//!
//! Prerequisites
//! =============
//!
//! - Rust 1.36
//!
//! - SFML 2.5 and CSFML 2.5 must be installed on your computer. You can download them here:
//!
//! - SFML 2.5: <http://www.sfml-dev.org/download.php>
//! - CSFML 2.5: <http://www.sfml-dev.org/download/csfml/>
//!
//! - Supported platforms:
//! - Linux
//! - Windows
//! - Mac OS X
//!
//! # License
//!
//! This software is a binding of the SFML library created by Laurent Gomila, which
//! is provided under the Zlib/png license.
//!
//! This software is provided under the same license than the SFML, the Zlib/png | missing_docs,
trivial_numeric_casts,
missing_copy_implementations,
missing_debug_implementations,
unused_results,
trivial_casts,
clippy::must_use_candidate,
clippy::doc_markdown,
clippy::cast_possible_truncation,
clippy::mut_mut,
clippy::cast_possible_wrap,
clippy::cast_sign_loss
)]
#[cfg(feature = "window")]
#[macro_use]
extern crate bitflags;
extern crate csfml_system_sys;
#[cfg(feature = "window")]
extern crate csfml_window_sys;
#[cfg(any(feature = "graphics", feature = "audio"))]
mod inputstream;
mod sf_bool_ext;
#[cfg(feature = "audio")]
pub mod audio;
#[cfg(feature = "graphics")]
pub mod graphics;
#[cfg(feature = "network")]
pub mod network;
pub mod system;
#[cfg(feature = "window")]
pub mod window; | //! license.
//!
#![warn( |
launch.ts | import { is } from "./is.js";
import { extend } from "./util/extend.js";
import { global } from "./global.js";
import { activateCamera } from "./activateCamera.js";
import { patch } from "./patch/patch.js";
import { load } from "./load.js";
import { loadingScreen } from "./util/loadingScreen.js";
let isReady = false;
let callbacks = [];
export interface IRuntimeLoading {
container? : string | HTMLElement;
canvas?: string | HTMLCanvasElement;
assets?: string;
scene: Function | string;
activeCamera?: Function | string | any;
patch?: Array<any>;
beforeFirstRender?: Function;
ktx?: boolean | Array<string>;
enableOfflineSupport?: boolean;
progress?: Function;
loadingScreen?: any;
load? : string | Array<string>;
babylon? : string;
}
let options : IRuntimeLoading = {
container : null,
canvas : null,
assets : null,
scene : null,
activeCamera : null,
patch : null,
ktx : false,
enableOfflineSupport : false,
progress: null,
loadingScreen: null,
load : null,
babylon : null
};
export function launch(obj: IRuntimeLoading | string) : Promise<BABYLON.Scene> {
isReady = false;
options = extend(options, obj);
return new Promise((resolve, reject) => {
_setup().then(() => {
_createScene().then(() => {
_load().then(() => {
_patch().then(() => {
_checkActiveCamera();
_beforeFirstRender();
// RESIZE | });
global.engine.resize();
start();
isReady = true;
loadingScreen.isVisible = false;
callbacks.forEach(function(callback) {
try {
callback.call(global.scene);
}
catch (ex) {
console.error(ex);
}
});
callbacks.length = 0;
resolve(global.scene);
});
});
}, (err) => {
reject(err);
});
});
});
}
function _setup() : Promise<any> {
return _babylon().then(() => {
// CANVAS
if (options.canvas) {
if (is.String(options.canvas)) {
let element = document.getElementById(<string> options.canvas);
if (is.DOM.canvas(element)) {
global.canvas = element;
}
else {
console.error("_r.launch - " + options.canvas + "is not a valid HTMLCanvasElement");
}
}
else {
if (is.DOM.canvas(options.canvas)) {
global.canvas = options.canvas;
}
else {
console.error("_r.launch - canvas parameter should be a string or a HTMLCanvasElement");
}
}
}
if (options.container) {
if (is.String(options.container)) {
let parent = document.getElementById(<string> options.container);
parent.appendChild(global.canvas);
}
else {
(<HTMLElement> options.container).appendChild(global.canvas);
}
}
else {
document.body.appendChild(global.canvas);
}
// KTX
if (options.ktx) {
if (is.Array(options.ktx)) {
global.engine.setTextureFormatToUse(<string[]> options.ktx);
}
else {
if (options.ktx === true) {
global.engine.setTextureFormatToUse(['-astc.ktx', '-dxt.ktx', '-pvrtc.ktx', '-etc2.ktx']);
}
}
}
// ENABLE OFFLINE SUPPORT
if (options.enableOfflineSupport) {
global.engine.enableOfflineSupport = options.enableOfflineSupport;
}
else {
global.engine.enableOfflineSupport = false;
}
// LOADING SCREEN
if (options.loadingScreen) {
global.engine.loadingScreen = options.loadingScreen;
}
loadingScreen.isVisible = true;
});
}
function _babylon() : Promise<any> {
if (options.babylon) {
switch (options.babylon) {
case 'preview' :
return load('https://preview.babylonjs.com/babylon.js');
case 'stable':
return load('https://cdn.babylonjs.com/babylon.js');
default:
return load(options.babylon);
}
}
else {
return new Promise((resolve, reject) => { resolve(); });
}
}
function _createScene() : Promise<any> {
if (options.scene) {
if (is.String(options.scene)) {
// scene is a filename
if (options.assets) {
return load.assets(options.assets + <string>options.scene, null, (evt : BABYLON.SceneLoaderProgressEvent) => {
if (options.progress) {
options.progress(evt);
}
}).then((assetsContainer) => {
assetsContainer.addAllToScene();
});
} else {
return load.assets(<string>options.scene, null, (evt : BABYLON.SceneLoaderProgressEvent) => {
if (options.progress) {
options.progress(evt);
}
}).then((assetsContainer) => {
assetsContainer.addAllToScene();
});
}
} else {
return new Promise((resolve, reject) => {
if (is.Function(options.scene)) {
try {
let result = eval("var canvas=_r.canvas; var engine = _r.engine; var scene=_r.scene; var createScene=" + options.scene + ';createScene()');
if (BABYLON.Engine.LastCreatedEngine.scenes.length == 2) {
BABYLON.Engine.LastCreatedEngine.scenes[0].dispose();
}
if (is.Scene(result)) {
global.scene = result;
}
resolve();
} catch (ex) {
reject(ex);
throw ex;
}
} else {
if (is.Scene(options.scene)) {
global.scene = options.scene;
resolve();
} else {
reject("invalid scene parameter in _r.launch");
throw new Error("invalid scene parameter in _r.launch");
}
}
});
}
}
}
function _patch() : Promise<null> {
if (options.patch) {
return patch(options.patch);
}
else {
return new Promise((resolve) => {
resolve();
});
}
}
function _load() : Promise<null> {
if (options.load) {
return load(options.load);
} else {
return new Promise((resolve) => {
resolve();
});
}
}
function _beforeFirstRender() {
if (options.beforeFirstRender && is.Function(options.beforeFirstRender)) {
options.beforeFirstRender();
}
}
function _checkActiveCamera() {
if (is.String(options.activeCamera)) {
activateCamera(<string> options.activeCamera);
}
else {
if (is.Function(options.activeCamera)) {
try {
let camera = (<Function> options.activeCamera).call(global.scene);
activateCamera(camera.name);
}
catch (ex) {
console.error("_r.launch() error on activeCamera", ex);
}
}
else {
if (is.Camera(options.activeCamera)) {
activateCamera(options.activeCamera.name);
}
}
}
if (!global.scene.activeCamera && global.scene.cameras.length > 0) {
activateCamera(global.scene.cameras[0].name);
}
if (global.scene.activeCamera && global.scene.activeCamera.inputs && !global.scene.activeCamera.inputs.attachedElement) {
global.scene.activeCamera.attachControl(global.canvas);
}
}
function loop() {
global.scene.render();
}
export function start() {
global.engine.runRenderLoop(loop);
}
export function pause() {
global.engine.stopRenderLoop(loop);
}
export function ready(callback: Function) {
if (isReady) {
callback.call(global.scene);
}
else {
callbacks.push(callback);
}
} | window.addEventListener('resize', function() {
global.engine.resize(); |
generator_v2.py | from itertools import chain
import math
import logging
import collections
from collections import OrderedDict
import tqdm
import random
import time
from einops import rearrange, repeat
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import autocast
from tl2.proj.fvcore import MODEL_REGISTRY, build_model
# from tl2.proj.stylegan2_ada import persistence
from tl2.launch.launch_utils import global_cfg
from tl2.proj.pytorch.pytorch_hook import VerboseModel
from tl2.proj.pytorch import torch_utils
from tl2.proj.pytorch import torch_utils, init_func
from tl2 import tl2_utils
from tl2.proj.pytorch.examples.nerf import cam_params
from tl2.proj.pytorch.examples.nerf import volume_rendering
from tl2.proj.pytorch.examples.networks import nerf_net
from tl2.proj.pytorch.examples.networks import multi_head_mapping
from tl2.proj.pytorch.examples.networks import cips_net
from exp.pigan import pigan_utils
from exp.dev.nerf_inr.models.generator_nerf_inr import INRNetwork
from exp.dev.nerf_inr.models.generator_nerf_inr import GeneratorNerfINR as GeneratorNerfINR_base
from exp.comm import comm_utils
from exp.comm.models import nerf_network
from exp.comm.models import inr_network
from exp.comm.models import film_layer
from exp.comm.models import mod_conv_fc
# from exp.cips3d.models import multi_head_mapping
class SkipLayer(nn.Module):
def __init__(self, ):
super(SkipLayer, self).__init__()
def forward(self, x0, x1):
# out = (x0 + x1) / math.pi
out = (x0 + x1)
return out
class SinAct(nn.Module):
def __init__(self, ):
super(SinAct, self).__init__()
def forward(self, x):
return torch.sin(x)
class LinearSinAct(nn.Module):
def __init__(self,
in_features,
out_features):
super(LinearSinAct, self).__init__()
self.linear = nn.Linear(in_features=in_features, out_features=out_features)
self.sin = SinAct()
pass
def forward(self, x, *args, **kwargs):
x = self.linear(x)
x = self.sin(x)
return x
class FiLMLayer(nn.Module):
def __init__(self,
in_dim,
out_dim,
style_dim,
use_style_fc=True,
which_linear=nn.Linear,
**kwargs):
super(FiLMLayer, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.style_dim = style_dim
self.use_style_fc = use_style_fc
self.linear = which_linear(in_dim, out_dim)
# self.linear.apply(film_layer.frequency_init(25))
# self.gain_scale = film_layer.LinearScale(scale=15, bias=30)
self.gain_scale = nn.Identity()
# Prepare gain and bias layers
if use_style_fc:
self.gain_fc = which_linear(style_dim, out_dim)
self.bias_fc = which_linear(style_dim, out_dim)
# self.gain_fc.weight.data.mul_(0.25)
# self.bias_fc.weight.data.mul_(0.25)
else:
self.style_dim = out_dim * 2
self.sin = SinAct()
self.lrelu = nn.LeakyReLU(0.2, inplace=True)
# self.register_buffer('stored_mean', torch.zeros(output_size))
# self.register_buffer('stored_var', torch.ones(output_size))
pass
def forward(self,
x,
style):
"""
:param x: (b, c) or (b, n, c)
:param style: (b, c)
:return:
"""
if self.use_style_fc:
gain = self.gain_fc(style)
gain = self.gain_scale(gain)
bias = self.bias_fc(style)
else:
style = rearrange(style, "b (n c) -> b n c", n=2)
gain, bias = style.unbind(dim=1)
gain = self.gain_scale(gain)
if x.dim() == 3:
gain = rearrange(gain, "b c -> b 1 c")
bias = rearrange(bias, "b c -> b 1 c")
elif x.dim() == 2:
pass
else:
assert 0
x = self.linear(x)
x = x * torch.rsqrt(torch.mean(x ** 2, dim=-1, keepdim=True) + 1e-8)
# out = self.sin(gain * x + bias)
out = self.lrelu((gain + 1.) * x + bias)
return out
def __repr__(self):
s = f'{self.__class__.__name__}(' \
f'in_dim={self.in_dim}, ' \
f'out_dim={self.out_dim}, ' \
f'style_dim={self.style_dim}, ' \
f'use_style_fc={self.use_style_fc}, ' \
f')'
return s
class INRNetwork_Skip(nn.Module):
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
input_dim,
style_dim,
hidden_layers,
dim_scale=1,
rgb_dim=3,
device=None,
name_prefix='inr',
**kwargs):
"""
:param z_dim:
:param hidden_dim:
:param rgb_dim:
:param device:
:param kwargs:
"""
super().__init__()
self.repr = f"input_dim={input_dim}, " \
f"style_dim={style_dim}, " \
f"hidden_layers={hidden_layers}, " \
f"dim_scale={dim_scale}, "
self.device = device
self.rgb_dim = rgb_dim
self.hidden_layers = hidden_layers
self.name_prefix = name_prefix
self.channels = {
0: int(512 * dim_scale), # 4
1: int(512 * dim_scale), # 8
2: int(512 * dim_scale), # 16
3: int(512 * dim_scale), # 32
4: int(512 * dim_scale), # 64
5: int(128 * dim_scale), # 128
6: int(64 * dim_scale), # 256
7: int(32 * dim_scale), # 512
8: int(16 * dim_scale), # 1024
}
self.style_dim_dict = {}
_out_dim = input_dim
self.network = nn.ModuleList()
self.to_rbgs = nn.ModuleList()
for i in range(hidden_layers):
_in_dim = _out_dim
_out_dim = self.channels[i]
_layer = film_layer.FiLMLayer(in_dim=_in_dim,
out_dim=_out_dim,
style_dim=style_dim)
self.network.append(_layer)
self.style_dim_dict[f'{name_prefix}_w{i}_0'] = _layer.style_dim
_layer = film_layer.FiLMLayer(in_dim=_out_dim,
out_dim=_out_dim,
style_dim=style_dim)
self.network.append(_layer)
self.style_dim_dict[f'{name_prefix}_w{i}_1'] = _layer.style_dim
to_rgb = inr_network.ToRGB(in_dim=_out_dim, dim_rgb=3)
self.to_rbgs.append(to_rgb)
self.tanh = nn.Sequential(
# nn.Linear(hidden_dim, rgb_dim),
nn.Tanh()
)
# self.to_rbg.apply(frequency_init(25))
torch_utils.print_number_params(
{
'network': self.network,
'to_rbgs': self.to_rbgs,
'inr_net': self
})
logging.getLogger('tl').info(self)
pass
def forward(self,
input,
style_dict,
**kwargs):
"""
:param input: points xyz, (b, num_points, 3)
:param style_dict:
:param ray_directions: (b, num_points, 3)
:param kwargs:
:return:
- out: (b, num_points, 4), rgb(3) + sigma(1)
"""
x = input
rgb = 0
for index in range(self.hidden_layers):
_layer = self.network[index * 2]
style = style_dict[f'{self.name_prefix}_w{index}_0']
if global_cfg.tl_debug:
VerboseModel.forward_verbose(_layer,
inputs_args=(x, style),
name_prefix=f"{self.name_prefix}.network.{index}.0.")
x = _layer(x, style)
_layer = self.network[index * 2 + 1]
style = style_dict[f'{self.name_prefix}_w{index}_1']
if global_cfg.tl_debug:
VerboseModel.forward_verbose(_layer,
inputs_args=(x, style),
name_prefix=f"{self.name_prefix}.network.{index}.1.")
x = _layer(x, style)
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.to_rbgs[index],
inputs_args=(x, rgb),
name_prefix=f'to_rgb.{index}')
rgb = self.to_rbgs[index](x, skip=rgb)
# if global_cfg.tl_debug:
# VerboseModel.forward_verbose(self.to_rbg,
# inputs_args=(x, ),
# name_prefix='to_rgb.')
# out = self.to_rbg(x)
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.tanh,
inputs_args=(rgb, ),
name_prefix='tanh.')
out = self.tanh(rgb)
return out
class ModSinLayer(nn.Module):
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
in_dim,
use_style_fc=False,
style_dim=None,
which_linear=nn.Linear,
spectral_norm=False,
eps=1e-5,
freq=1,
phase=0,
**kwargs):
super(ModSinLayer, self).__init__()
self.repr = f"in_dim={in_dim}, use_style_fc={use_style_fc}, style_dim={style_dim}, " \
f"freq={freq}, phase={phase}"
self.in_dim = in_dim
self.use_style_fc = use_style_fc
self.style_dim = style_dim
self.freq = freq
self.phase = phase
self.spectral_norm = spectral_norm
# Prepare gain and bias layers
if use_style_fc:
self.gain_fc = which_linear(style_dim, in_dim)
self.bias_fc = which_linear(style_dim, in_dim)
if spectral_norm:
self.gain_fc = nn.utils.spectral_norm(self.gain_fc)
self.bias_fc = nn.utils.spectral_norm(self.bias_fc)
else:
self.style_dim = in_dim * 2
self.eps = eps
self.lrelu = nn.LeakyReLU(0.2, inplace=True)
# self.register_buffer('stored_mean', torch.zeros(output_size))
# self.register_buffer('stored_var', torch.ones(output_size))
pass
def forward(self,
x,
style):
"""
Calculate class-conditional gains and biases.
:param x: (b, c) or (b, n, c)
:param style: (b, c)
:return:
"""
assert style.shape[-1] == self.style_dim
if self.use_style_fc:
gain = self.gain_fc(style) + 1.
bias = self.bias_fc(style)
else:
style = rearrange(style, "b (n c) -> b n c", n=2)
gain, bias = style.unbind(dim=1)
gain = gain + 1.
if x.dim() == 3:
gain = rearrange(gain, "b c -> b 1 c")
bias = rearrange(bias, "b c -> b 1 c")
elif x.dim() == 2:
pass
else:
assert 0
# x = torch.sin(self.freq * x + self.phase)
# out = x * gain + bias
x = x * torch.rsqrt(torch.mean(x ** 2, dim=-1, keepdim=True) + 1e-8)
x = x * gain + bias
out = self.lrelu(x)
return out
class ModSinLayer_NoBias(nn.Module):
def __repr__(self): return f"{self.__class__.__name__}({self.repr})"
def __init__(self,
in_dim,
use_style_fc=False,
style_dim=None,
which_linear=nn.Linear,
spectral_norm=False,
eps=1e-5,
freq=1,
phase=0,
**kwargs):
super(ModSinLayer_NoBias, self).__init__()
self.repr = f"in_dim={in_dim}, use_style_fc={use_style_fc}, style_dim={style_dim}, " \
f"freq={freq}, phase={phase}"
self.in_dim = in_dim
self.use_style_fc = use_style_fc
self.style_dim = style_dim
self.freq = freq
self.phase = phase
self.spectral_norm = spectral_norm
# Prepare gain and bias layers
if use_style_fc:
self.gain_fc = which_linear(style_dim, in_dim)
# self.bias_fc = which_linear(style_dim, in_dim)
if spectral_norm:
self.gain_fc = nn.utils.spectral_norm(self.gain_fc)
# self.bias_fc = nn.utils.spectral_norm(self.bias_fc)
else:
self.style_dim = in_dim * 2
self.eps = eps
pass
def forward(self,
x,
style):
"""
Calculate class-conditional gains and biases.
:param x: (b, c) or (b, n, c)
:param style: (b, c)
:return:
"""
assert style.shape[-1] == self.style_dim
if self.use_style_fc:
gain = self.gain_fc(style) + 1.
else:
style = rearrange(style, "b (n c) -> b n c", n=2)
gain, bias = style.unbind(dim=1)
gain = gain + 1.
if x.dim() == 3:
gain = rearrange(gain, "b c -> b 1 c")
elif x.dim() == 2:
pass
else:
assert 0
x = torch.sin(self.freq * x + self.phase)
# out = x * gain + bias
out = x * gain
return out
class SinBlock(nn.Module):
def __init__(self,
in_dim,
out_dim,
style_dim,
name_prefix,
):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.style_dim = style_dim
self.name_prefix = name_prefix
self.style_dim_dict = {}
# self.mod1 = mod_conv_fc.Modulated_FC_Conv(in_channel=in_dim,
# out_channel=out_dim,
# style_dim=style_dim,
# use_style_fc=True,
# scale=1.,
# # scale=None,
# )
self.mod1 = mod_conv_fc.SinStyleMod(in_channel=in_dim,
out_channel=out_dim,
style_dim=style_dim,
use_style_fc=True,
)
self.style_dim_dict[f'{name_prefix}_0'] = self.mod1.style_dim
self.act1 = nn.LeakyReLU(0.2, inplace=True)
# self.mod2 = mod_conv_fc.Modulated_FC_Conv(in_channel=out_dim,
# out_channel=out_dim,
# style_dim=style_dim,
# use_style_fc=True,
# scale=1.,
# # scale=None,
# )
self.mod2 = mod_conv_fc.SinStyleMod(in_channel=out_dim,
out_channel=out_dim,
style_dim=style_dim,
use_style_fc=True,
)
self.style_dim_dict[f'{name_prefix}_1'] = self.mod2.style_dim
self.act2 = nn.LeakyReLU(0.2, inplace=True)
# self.linear1 = nn.Linear(in_dim, out_dim)
# self.mod1 = ModSinLayer(in_dim=out_dim, use_style_fc=True, style_dim=style_dim)
# self.style_dim_dict[f'{name_prefix}_0'] = self.mod1.style_dim
# self.linear2 = nn.Linear(out_dim, out_dim)
# self.mod2 = ModSinLayer(in_dim=out_dim, use_style_fc=True, style_dim=style_dim)
# self.style_dim_dict[f'{name_prefix}_1'] = self.mod2.style_dim
self.skip = SkipLayer()
pass
def forward(self,
x,
style_dict,
skip=False):
x_orig = x
style = style_dict[f'{self.name_prefix}_0']
x = self.mod1(x, style)
x = self.act1(x)
style = style_dict[f'{self.name_prefix}_1']
x = self.mod2(x, style)
out = self.act2(x)
# x = self.linear1(x)
# style = style_dict[f'{self.name_prefix}_0']
# x = self.mod1(x, style)
# x = self.linear2(x)
# style = style_dict[f'{self.name_prefix}_1']
# out = self.mod2(x, style)
if skip and out.shape[-1] == x_orig.shape[-1]:
# out = (out + x_orig) / 1.41421
out = self.skip(out, x_orig)
return out
def __repr__(self):
repr = f"{self.__class__.__name__}(in_dim={self.in_dim}, " \
f"out_dim={self.out_dim}, " \
f"style_dim={self.style_dim})"
return repr
class ToRGB(nn.Module):
def __init__(self,
in_dim,
dim_rgb=3,
use_equal_fc=False):
super().__init__()
self.in_dim = in_dim
self.dim_rgb = dim_rgb
if use_equal_fc:
self.linear = mod_conv_fc.EqualLinear(in_dim, dim_rgb, scale=1.)
else:
self.linear = nn.Linear(in_dim, dim_rgb)
pass
def forward(self,
input,
skip=None):
out = self.linear(input)
if skip is not None:
out = out + skip
return out
@MODEL_REGISTRY.register(name_prefix=__name__)
# class Generator_Diffcam(GeneratorNerfINR_base):
class Generator_Diffcam(nn.Module):
def __repr__(self):
return tl2_utils.get_class_repr(self)
def __init__(self,
nerf_cfg,
mapping_shape_cfg,
mapping_app_cfg,
inr_cfg,
mapping_inr_cfg,
shape_block_end_index=None,
app_block_end_index=None,
inr_block_end_index=None,
device='cuda',
**kwargs):
super(Generator_Diffcam, self).__init__()
self.repr_str = tl2_utils.dict2string(dict_obj={
'nerf_cfg': nerf_cfg,
'mapping_shape_cfg': mapping_shape_cfg,
'mapping_app_cfg': mapping_app_cfg,
'inr_cfg': inr_cfg,
'mapping_inr_cfg': mapping_inr_cfg,
'shape_block_end_index': shape_block_end_index,
'app_block_end_index': app_block_end_index,
'inr_block_end_index': inr_block_end_index,
})
self.device = device
self.inr_block_end_index = inr_block_end_index
self.module_name_list = []
# nerf_net
self.nerf_net = nerf_net.NeRFNetwork_SIREN_skip(
shape_block_end_index=shape_block_end_index,
app_block_end_index=app_block_end_index,
**nerf_cfg)
self.module_name_list.append('nerf_net')
# mapping shape
self.mapping_shape = multi_head_mapping.MultiHeadMappingNetwork(**{
**mapping_shape_cfg,
'head_dim_dict': self.nerf_net.style_dim_dict_shape
})
self.module_name_list.append('mapping_shape')
# mapping appearance
self.mapping_app = multi_head_mapping.MultiHeadMappingNetwork(**{
**mapping_app_cfg,
'head_dim_dict': self.nerf_net.style_dim_dict_app
})
self.module_name_list.append('mapping_app')
_in_dim = nerf_cfg.app_net_cfg.out_dim
# inr_net
self.inr_net = cips_net.CIPSNet(**{
**inr_cfg,
"input_dim": _in_dim,
'add_out_layer': True,
})
self.module_name_list.append('inr_net')
self.mapping_inr = multi_head_mapping.MultiHeadMappingNetwork(**{
**mapping_inr_cfg,
'head_dim_dict': self.inr_net.style_dim_dict
})
self.module_name_list.append('mapping_inr')
self.aux_to_rbg = nn.Sequential(
nn.Linear(_in_dim, 3),
nn.Tanh()
)
self.aux_to_rbg.apply(nerf_network.frequency_init(25))
self.module_name_list.append('aux_to_rbg')
logger = logging.getLogger('tl')
models_dict = {}
for name in self.module_name_list:
models_dict[name] = getattr(self, name)
models_dict['G'] = self
torch_utils.print_number_params(models_dict=models_dict, logger=logger)
logger.info(self)
pass
def forward(self,
zs,
rays_o,
rays_d,
nerf_kwargs={},
psi=1,
return_aux_img=False,
grad_points=None,
forward_points=None, # disable gradients
**kwargs):
"""
Generates images from a noise vector, rendering parameters, and camera distribution.
Uses the hierarchical sampling scheme described in NeRF.
:param zs: {k: (b, z_dim), ...}
:param rays_o: (b, h, w, 3) in world space
:param rays_d: (b, h, w, 3) in world space
:return:
- pixels: (b, 3, h, w)
- pitch_yaw: (b, 2)
"""
# mapping network
style_dict = self.mapping_network(**zs)
if psi < 1:
avg_styles = self.generate_avg_frequencies(device=self.device)
style_dict = self.get_truncated_freq_phase(
raw_style_dict=style_dict, avg_style_dict=avg_styles, raw_lambda=psi)
b, h, w, c = rays_o.shape
rays_o = rearrange(rays_o, "b h w c -> b (h w) c")
rays_d = rearrange(rays_d, "b h w c -> b (h w) c")
if grad_points is not None and grad_points < h * w:
imgs, ret_maps = self.part_grad_forward(
rays_o=rays_o,
rays_d=rays_d,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img,
grad_points=grad_points)
else:
imgs, ret_maps = self.whole_grad_forward(
rays_o=rays_o,
rays_d=rays_d,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img,
forward_points=forward_points)
imgs = rearrange(imgs, "b (h w) c -> b c h w", h=h, w=w)
ret_imgs = {}
for name, v_map in ret_maps.items():
if v_map.dim() == 3:
v_map = rearrange(v_map, "b (h w) c -> b c h w", h=h, w=w)
elif v_map.dim() == 2:
v_map = rearrange(v_map, "b (h w) -> b h w", h=h, w=w)
ret_imgs[name] = v_map
return imgs, ret_imgs
def get_rays_axis_angle(self,
R,
t,
fx,
fy,
H: int,
W: int,
N_rays: int = -1):
"""
:param R: (b, 3)
:param t: (b, 3)
:param fx:
:param fy:
:param H:
:param W:
:param N_rays:
:return
- rays_o: (b, H, W, 3)
- rays_d: (b, H, W, 3)
- select_inds: (b, H, W)
"""
rays_o, rays_d, select_inds = cam_params.get_rays(
rot=R,
trans=t,
focal_x=fx,
focal_y=fy,
H=H,
W=W,
N_rays=N_rays,
flatten=False)
return rays_o, rays_d, select_inds
def get_batch_style_dict(self, b, style_dict):
ret_style_dict = {}
for name, style in style_dict.items():
ret_style_dict[name] = style[[b]]
return ret_style_dict
def whole_grad_forward(self,
rays_o,
rays_d,
style_dict,
nerf_kwargs,
return_aux_img=True,
forward_points=None,
**kwargs):
if forward_points is not None and forward_points < rays_o.shape[1]: # no gradients
# stage forward
with torch.no_grad():
batch_size = rays_o.shape[0]
num_points = rays_o.shape[1]
near = nerf_kwargs['near']
far = nerf_kwargs['far']
N_samples = nerf_kwargs['N_samples']
perturb = self.training
z_vals, points = volume_rendering.ray_sample_points(rays_o=rays_o,
rays_d=rays_d,
near=near,
far=far,
N_samples=N_samples,
perturb=perturb)
batch_image_ddict = collections.defaultdict(list)
for b in range(batch_size):
image_ddict = collections.defaultdict(list)
head = 0
while head < num_points:
tail = head + forward_points
cur_style_dict = self.get_batch_style_dict(b=b, style_dict=style_dict)
cur_inr_img, cur_ret_maps = self.points_forward(
rays_o=rays_o[[b], head:tail], # (b, hxw, 3)
rays_d=rays_d[[b], head:tail], # (b, hxw, 3)
points=points[[b], head:tail], # (b, hxw, Nsamples, 3)
z_vals=z_vals[[b], head:tail], # (b, hxw, Nsamples)
style_dict=cur_style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img)
image_ddict['inr_img'].append(cur_inr_img)
for k, v in cur_ret_maps.items():
image_ddict[k].append(v)
head += forward_points
for k, v in image_ddict.items():
one_image = torch.cat(v, dim=1)
batch_image_ddict[k].append(one_image)
ret_maps = {}
for k, v in batch_image_ddict.items():
ret_maps[k] = torch.cat(v, dim=0)
imgs = ret_maps.pop('inr_img')
else:
near = nerf_kwargs['near']
far = nerf_kwargs['far']
N_samples = nerf_kwargs['N_samples']
perturb = self.training
z_vals, points = volume_rendering.ray_sample_points(rays_o=rays_o,
rays_d=rays_d,
near=near,
far=far,
N_samples=N_samples,
perturb=perturb)
# transformed_points = rearrange(transformed_points, "b (h w s) c -> b (h w) s c", h=img_size, s=num_steps)
# transformed_ray_directions_expanded = rearrange(transformed_ray_directions_expanded,
# "b (h w s) c -> b (h w) s c", h=img_size, s=num_steps)
imgs, ret_maps = self.points_forward(
rays_o=rays_o,
rays_d=rays_d,
points=points,
z_vals=z_vals,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img)
return imgs, ret_maps
def part_grad_forward(self,
rays_o,
rays_d,
style_dict,
nerf_kwargs,
return_aux_img,
grad_points):
near = nerf_kwargs['near']
far = nerf_kwargs['far']
N_samples = nerf_kwargs['N_samples']
perturb = self.training
# z_vals: (b, hxw, Nsamples), points: (b, hxw, Nsamples, 3)
z_vals, points = volume_rendering.ray_sample_points(rays_o=rays_o, # (b, hxw, 3)
rays_d=rays_d, # (b, hxw, 3)
near=near,
far=far,
N_samples=N_samples,
perturb=perturb)
# transformed_points = rearrange(transformed_points, "b (h w s) c -> b (h w) s c", h=img_size, s=num_steps)
# transformed_ray_directions_expanded = rearrange(transformed_ray_directions_expanded,
# "b (h w s) c -> b (h w) s c", h=img_size, s=num_steps)
batch_size = rays_o.shape[0]
num_points = rays_o.shape[1]
device = self.device
assert num_points > grad_points
idx_grad, idx_no_grad = torch_utils.batch_random_split_indices(bs=batch_size,
num_points=num_points,
grad_points=grad_points,
device=device)
# rand_idx = torch.randperm(num_points, device=device)
# idx_grad = rand_idx[:grad_points]
# idx_no_grad = rand_idx[grad_points:]
inr_img_grad, ret_maps_grad = self.points_forward(
rays_o=rays_o,
rays_d=rays_d,
points=points,
z_vals=z_vals,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img,
idx_grad=idx_grad)
with torch.no_grad():
inr_img_no_grad, ret_maps_no_grad = self.points_forward(
rays_o=rays_o,
rays_d=rays_d,
points=points,
z_vals=z_vals,
style_dict=style_dict,
nerf_kwargs=nerf_kwargs,
return_aux_img=return_aux_img,
idx_grad=idx_no_grad)
imgs = comm_utils.batch_scatter_points(idx_grad=idx_grad,
points_grad=inr_img_grad,
idx_no_grad=idx_no_grad,
points_no_grad=inr_img_no_grad,
num_points=num_points)
ret_maps = {}
for k in ret_maps_grad.keys(): | points_grad=ret_maps_grad[k],
idx_no_grad=idx_no_grad,
points_no_grad=ret_maps_no_grad[k],
num_points=num_points)
ret_maps[k] = comp_map
return imgs, ret_maps
def points_forward(self,
rays_o,
rays_d,
points,
z_vals,
style_dict,
nerf_kwargs,
return_aux_img,
idx_grad=None,
**kwargs):
"""
:param rays_o: (b, hxw, 3)
:param rays_d: (b, hxw, 3)
:param points: (b, hxw, Nsamples, 3)
:param z_vals: (b, hxw, Nsamples)
:param style_dict:
:param nerf_kwargs:
:param return_aux_img:
:param idx_grad: (b, N_grad, )
:param kwargs:
:return:
"""
device = points.device
viewdirs = volume_rendering.get_viewdirs(rays_d=rays_d)
# viewdirs = viewdirs[..., None, :].expand_as(points)
N_samples = nerf_kwargs['N_samples']
if idx_grad is not None:
rays_o = comm_utils.batch_gather_points(points=rays_o, idx_grad=idx_grad)
rays_d = comm_utils.batch_gather_points(points=rays_d, idx_grad=idx_grad)
points = comm_utils.batch_gather_points(points=points, idx_grad=idx_grad)
z_vals = comm_utils.batch_gather_points(points=z_vals, idx_grad=idx_grad)
points = rearrange(points, "b Nrays Nsamples c -> b (Nrays Nsamples) c")
coarse_viewdirs = repeat(viewdirs, "b Nrays c -> b (Nrays Nsamples) c", Nsamples=N_samples)
# Model prediction on course points
coarse_output = self.nerf_net(
x=points, # b (Nrays Nsamples) c
ray_directions=coarse_viewdirs, # b (Nrays Nsamples) c
style_dict=style_dict)
coarse_output = rearrange(
coarse_output, "b (Nrays Nsamples) rgb_sigma -> b Nrays Nsamples rgb_sigma", Nsamples=N_samples)
# Re-sample fine points alont camera rays, as described in NeRF
if nerf_kwargs['N_importance'] > 0:
with torch.no_grad():
raw_sigma = coarse_output[..., -1]
perturb = self.training
fine_z_vals, fine_points = volume_rendering.get_fine_points(
z_vals=z_vals,
rays_o=rays_o,
rays_d=rays_d,
raw_sigma=raw_sigma,
N_importance=nerf_kwargs['N_importance'],
perturb=perturb,
raw_noise_std=nerf_kwargs['raw_noise_std'],
eps=nerf_kwargs['eps'])
# Model prediction on re-sampled find points
fine_points = rearrange(fine_points, "b Nrays Nsamples c -> b (Nrays Nsamples) c")
fine_viewdirs = repeat(viewdirs, "b Nrays c -> b (Nrays Nsamples) c", Nsamples=nerf_kwargs['N_importance'])
fine_output = self.nerf_net(
x=fine_points, # b (Nrays Nsamples) c
ray_directions=fine_viewdirs, # b (Nrays Nsamples) c
style_dict=style_dict)
fine_output = rearrange(
fine_output, "b (Nrays Nsamples) rgb_sigma -> b Nrays Nsamples rgb_sigma", Nsamples=nerf_kwargs['N_importance'])
# Combine course and fine points
DIM_SAMPLES = 2
all_z_vals = torch.cat([fine_z_vals, z_vals], dim=DIM_SAMPLES) # (b, N_rays, N_samples)
_, indices = torch.sort(all_z_vals, dim=DIM_SAMPLES) # (b, N_rays, N_samples)
# gather z_vals
all_z_vals = torch.gather(all_z_vals, DIM_SAMPLES, indices) # (b, N_rays, N_samples)
# (b, N_rays, N_samples, rgb_sigma)
all_outputs = torch.cat([fine_output, coarse_output], dim=DIM_SAMPLES)
view_shape = [*indices.shape, *(len(all_outputs.shape) - len(indices.shape)) * [1]]
all_outputs = torch.gather(all_outputs, DIM_SAMPLES, indices.view(view_shape).expand_as(all_outputs))
else:
all_outputs = coarse_output
all_z_vals = z_vals
# Create images with NeRF
all_raw_rgb = all_outputs[..., :-1]
all_raw_sigma = all_outputs[..., -1]
pixels_fea, ret_maps = volume_rendering.ray_integration(raw_rgb=all_raw_rgb,
raw_sigma=all_raw_sigma,
z_vals=all_z_vals,
rays_d=rays_d,
raw_noise_std=nerf_kwargs['raw_noise_std'],
eps=nerf_kwargs['eps'])
# inr_net
inr_img = self.inr_net(pixels_fea, style_dict, block_end_index=self.inr_block_end_index)
if return_aux_img:
# aux rgb_branch
aux_img = self.aux_to_rbg(pixels_fea)
ret_maps['aux_img'] = aux_img
return inr_img, ret_maps
def z_sampler(self,
shape,
device,
dist='gaussian'):
if dist == 'gaussian':
z = torch.randn(shape, device=device)
elif dist == 'uniform':
z = torch.rand(shape, device=device) * 2 - 1
return z
def get_zs(self,
b,
batch_split=1):
z_shape = self.z_sampler(shape=(b, self.mapping_shape.z_dim), device=self.device)
z_app = self.z_sampler(shape=(b, self.mapping_app.z_dim), device=self.device)
z_inr = self.z_sampler(shape=(b, self.mapping_inr.z_dim), device=self.device)
if batch_split > 1:
zs_list = []
z_shape_list = z_shape.split(b // batch_split)
z_app_list = z_app.split(b // batch_split)
z_inr_list = z_inr.split(b // batch_split)
for z_shape_, z_app_, z_inr_ in zip(z_shape_list, z_app_list, z_inr_list):
zs_ = {
'z_shape': z_shape_,
'z_app': z_app_,
'z_inr': z_inr_,
}
zs_list.append(zs_)
return zs_list
else:
zs = {
'z_shape': z_shape,
'z_app': z_app,
'z_inr': z_inr,
}
return zs
def mapping_network(self,
z_shape,
z_app,
z_inr):
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.mapping_shape,
inputs_args=(z_shape,),
submodels=['base_net'],
name_prefix='mapping_shape.')
VerboseModel.forward_verbose(self.mapping_app,
inputs_args=(z_app,),
submodels=['base_net'],
name_prefix='mapping_app.')
VerboseModel.forward_verbose(self.mapping_inr,
inputs_args=(z_inr,),
submodels=['base_net', ],
input_padding=50,
name_prefix='mapping_inr.')
style_dict = {}
style_dict.update(self.mapping_shape(z_shape))
style_dict.update(self.mapping_app(z_app))
style_dict.update(self.mapping_inr(z_inr))
return style_dict
def get_truncated_freq_phase(self,
raw_style_dict,
avg_style_dict,
raw_lambda):
truncated_style_dict = {}
for name, avg_style in avg_style_dict.items():
raw_style = raw_style_dict[name]
truncated_style = avg_style + raw_lambda * (raw_style - avg_style)
truncated_style_dict[name] = truncated_style
return truncated_style_dict
def generate_avg_frequencies(self,
num_samples=10000,
device='cuda'):
"""Calculates average frequencies and phase shifts"""
# z = torch.randn((num_samples, self.z_dim), device=device)
zs = self.get_zs(num_samples)
with torch.no_grad():
style_dict = self.mapping_network(**zs)
avg_styles = {}
for name, style in style_dict.items():
avg_styles[name] = style.mean(0, keepdim=True)
# self.avg_styles = avg_styles
return avg_styles
def staged_forward(self, *args, **kwargs):
raise NotImplementedError
def set_device(self, device):
pass
def forward_camera_pos_and_lookup(self,
zs,
img_size,
fov,
ray_start,
ray_end,
num_steps,
h_stddev,
v_stddev,
h_mean,
v_mean,
hierarchical_sample,
camera_pos,
camera_lookup,
psi=1,
sample_dist=None,
lock_view_dependence=False,
clamp_mode='relu',
nerf_noise=0.,
white_back=False,
last_back=False,
return_aux_img=False,
grad_points=None,
forward_points=None,
**kwargs):
"""
Generates images from a noise vector, rendering parameters, and camera distribution.
Uses the hierarchical sampling scheme described in NeRF.
:param z: (b, z_dim)
:param img_size:
:param fov: face: 12
:param ray_start: face: 0.88
:param ray_end: face: 1.12
:param num_steps: face: 12
:param h_stddev: face: 0.3
:param v_stddev: face: 0.155
:param h_mean: face: pi/2
:param v_mean: face: pi/2
:param hierarchical_sample: face: true
:param camera_pos: (b, 3)
:param camera_lookup: (b, 3)
:param psi: [0, 1]
:param sample_dist: mode for sample_camera_positions, face: 'gaussian'
:param lock_view_dependence: face: false
:param clamp_mode: face: 'relu'
:param nerf_noise:
:param last_back: face: false
:param white_back: face: false
:param kwargs:
:return:
- pixels: (b, 3, h, w)
- pitch_yaw: (b, 2)
"""
# mapping network
if global_cfg.tl_debug:
VerboseModel.forward_verbose(self.mapping_network_nerf,
inputs_args=(zs['z_nerf'],),
submodels=['base_net'],
name_prefix='mapping_nerf.')
VerboseModel.forward_verbose(self.mapping_network_inr,
inputs_args=(zs['z_inr'],),
submodels=['base_net', ],
input_padding=50,
name_prefix='mapping_inr.')
style_dict = self.mapping_network(**zs)
if psi < 1:
avg_styles = self.generate_avg_frequencies(device=self.device)
style_dict = self.get_truncated_freq_phase(
raw_style_dict=style_dict, avg_style_dict=avg_styles, raw_lambda=psi)
if grad_points is not None and grad_points < img_size ** 2:
imgs, pitch_yaw = self.part_grad_forward(
style_dict=style_dict,
img_size=img_size,
fov=fov,
ray_start=ray_start,
ray_end=ray_end,
num_steps=num_steps,
h_stddev=h_stddev,
v_stddev=v_stddev,
h_mean=h_mean,
v_mean=v_mean,
hierarchical_sample=hierarchical_sample,
sample_dist=sample_dist,
lock_view_dependence=lock_view_dependence,
clamp_mode=clamp_mode,
nerf_noise=nerf_noise,
white_back=white_back,
last_back=last_back,
return_aux_img=return_aux_img,
grad_points=grad_points,
camera_pos=camera_pos,
camera_lookup=camera_lookup,
)
return imgs, pitch_yaw
else:
imgs, pitch_yaw = self.whole_grad_forward(
style_dict=style_dict,
img_size=img_size,
fov=fov,
ray_start=ray_start,
ray_end=ray_end,
num_steps=num_steps,
h_stddev=h_stddev,
v_stddev=v_stddev,
h_mean=h_mean,
v_mean=v_mean,
hierarchical_sample=hierarchical_sample,
sample_dist=sample_dist,
lock_view_dependence=lock_view_dependence,
clamp_mode=clamp_mode,
nerf_noise=nerf_noise,
white_back=white_back,
last_back=last_back,
return_aux_img=return_aux_img,
forward_points=forward_points,
camera_pos=camera_pos,
camera_lookup=camera_lookup,
)
return imgs, pitch_yaw
@MODEL_REGISTRY.register(name_prefix=__name__)
class GeneratorNerfINR_freeze_NeRF(Generator_Diffcam):
def load_nerf_ema(self, G_ema):
ret = self.nerf_net.load_state_dict(G_ema.nerf_net.state_dict())
ret = self.mapping_network_nerf.load_state_dict(G_ema.mapping_network_nerf.state_dict())
ret = self.aux_to_rbg.load_state_dict(G_ema.aux_to_rbg.state_dict())
ret = self.mapping_network_inr.load_state_dict(G_ema.mapping_network_inr.state_dict())
ret = self.nerf_rgb_mapping.load_state_dict(G_ema.nerf_rgb_mapping.state_dict())
pass
def mapping_network(self,
z_nerf,
z_inr):
style_dict = {}
with torch.no_grad():
style_dict.update(self.mapping_network_nerf(z_nerf))
style_dict.update(self.mapping_network_inr(z_inr))
style_dict['nerf_rgb'] = self.nerf_rgb_mapping(style_dict['nerf_rgb'])
return style_dict
def points_forward(self,
style_dict,
transformed_points,
transformed_ray_directions_expanded,
num_steps,
hierarchical_sample,
z_vals,
clamp_mode,
nerf_noise,
transformed_ray_origins,
transformed_ray_directions,
white_back,
last_back,
return_aux_img,
idx_grad=None,
):
"""
:param style_dict:
:param transformed_points: (b, n, s, 3)
:param transformed_ray_directions_expanded: (b, n, s, 3)
:param num_steps: sampled points along a ray
:param hierarchical_sample:
:param z_vals: (b, n, s, 1)
:param clamp_mode: 'relu'
:param nerf_noise:
:param transformed_ray_origins: (b, n, 3)
:param transformed_ray_directions: (b, n, 3)
:param white_back:
:param last_back:
:return:
"""
device = transformed_points.device
if idx_grad is not None:
transformed_points = comm_utils.gather_points(points=transformed_points, idx_grad=idx_grad)
transformed_ray_directions_expanded = comm_utils.gather_points(
points=transformed_ray_directions_expanded, idx_grad=idx_grad)
z_vals = comm_utils.gather_points(points=z_vals, idx_grad=idx_grad)
transformed_ray_origins = comm_utils.gather_points(points=transformed_ray_origins, idx_grad=idx_grad)
transformed_ray_directions = comm_utils.gather_points(points=transformed_ray_directions, idx_grad=idx_grad)
transformed_points = rearrange(transformed_points, "b n s c -> b (n s) c")
transformed_ray_directions_expanded = rearrange(transformed_ray_directions_expanded, "b n s c -> b (n s) c")
# Model prediction on course points
with torch.no_grad():
coarse_output = self.nerf_net(
x=transformed_points, # (b, n x s, 3)
style_dict=style_dict,
ray_directions=transformed_ray_directions_expanded,
)
coarse_output = rearrange(coarse_output, "b (n s) rgb_sigma -> b n s rgb_sigma", s=num_steps)
# Re-sample fine points alont camera rays, as described in NeRF
if hierarchical_sample:
fine_points, fine_z_vals = self.get_fine_points_and_direction(
coarse_output=coarse_output,
z_vals=z_vals,
dim_rgb=self.nerf_net.rgb_dim,
clamp_mode=clamp_mode,
nerf_noise=nerf_noise,
num_steps=num_steps,
transformed_ray_origins=transformed_ray_origins,
transformed_ray_directions=transformed_ray_directions
)
# Model prediction on re-sampled find points
with torch.no_grad():
fine_output = self.nerf_net(
x=fine_points, # (b, n x s, 3)
style_dict=style_dict,
ray_directions=transformed_ray_directions_expanded, # (b, n x s, 3)
)
fine_output = rearrange(fine_output, "b (n s) rgb_sigma -> b n s rgb_sigma", s=num_steps)
# Combine course and fine points
all_outputs = torch.cat([fine_output, coarse_output], dim=-2) # (b, n, s, dim_rgb_sigma)
all_z_vals = torch.cat([fine_z_vals, z_vals], dim=-2) # (b, n, s, 1)
_, indices = torch.sort(all_z_vals, dim=-2) # (b, n, s, 1)
all_z_vals = torch.gather(all_z_vals, -2, indices) # (b, n, s, 1)
# (b, n, s, dim_rgb_sigma)
all_outputs = torch.gather(all_outputs, -2, indices.expand(-1, -1, -1, all_outputs.shape[-1]))
else:
all_outputs = coarse_output
all_z_vals = z_vals
# Create images with NeRF
pixels_fea, depth, weights = pigan_utils.fancy_integration(
rgb_sigma=all_outputs,
z_vals=all_z_vals,
device=device,
dim_rgb=self.nerf_net.rgb_dim,
white_back=white_back,
last_back=last_back,
clamp_mode=clamp_mode,
noise_std=nerf_noise)
inr_img = self.inr_net(pixels_fea, style_dict)
if return_aux_img:
# aux rgb_branch
with torch.no_grad():
aux_img = self.aux_to_rbg(pixels_fea)
else:
aux_img = None
return inr_img, aux_img | comp_map = comm_utils.batch_scatter_points(idx_grad=idx_grad, |
cache.go | package cache
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"time"
"github.com/pkg/errors"
"github.com/restic/restic/lib/debug"
"github.com/restic/restic/lib/fs"
"github.com/restic/restic/lib/restic"
)
// Cache manages a local cache.
type Cache struct {
path string
Base string
Created bool
PerformReadahead func(restic.Handle) bool
}
const dirMode = 0700
const fileMode = 0644
func readVersion(dir string) (v uint, err error) {
buf, err := ioutil.ReadFile(filepath.Join(dir, "version"))
if os.IsNotExist(err) {
return 0, nil
}
if err != nil {
return 0, errors.Wrap(err, "readVersion")
}
ver, err := strconv.ParseUint(string(buf), 10, 32)
if err != nil {
return 0, errors.Wrap(err, "readVersion")
}
return uint(ver), nil
}
const cacheVersion = 1
var cacheLayoutPaths = map[restic.FileType]string{
restic.PackFile: "data",
restic.SnapshotFile: "snapshots",
restic.IndexFile: "index",
}
const cachedirTagSignature = "Signature: 8a477f597d28d172789f06886806bc55\n"
func writeCachedirTag(dir string) error {
if err := fs.MkdirAll(dir, dirMode); err != nil {
return errors.WithStack(err)
}
tagfile := filepath.Join(dir, "CACHEDIR.TAG")
_, err := fs.Lstat(tagfile)
if err != nil && !os.IsNotExist(err) {
return errors.WithStack(err)
}
f, err := fs.OpenFile(tagfile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, fileMode)
if err != nil {
if os.IsExist(errors.Cause(err)) {
return nil
}
return errors.WithStack(err)
}
debug.Log("Create CACHEDIR.TAG at %v", dir)
if _, err := f.Write([]byte(cachedirTagSignature)); err != nil {
_ = f.Close()
return errors.WithStack(err)
}
return errors.WithStack(f.Close())
}
// New returns a new cache for the repo ID at basedir. If basedir is the empty
// string, the default cache location (according to the XDG standard) is used.
//
// For partial files, the complete file is loaded and stored in the cache when
// performReadahead returns true.
func New(id string, basedir string) (c *Cache, err error) {
if basedir == "" {
basedir, err = DefaultDir()
if err != nil {
return nil, err
}
}
err = fs.MkdirAll(basedir, 0700)
if err != nil {
return nil, errors.WithStack(err)
}
// create base dir and tag it as a cache directory
if err = writeCachedirTag(basedir); err != nil {
return nil, err
}
cachedir := filepath.Join(basedir, id)
debug.Log("using cache dir %v", cachedir)
v, err := readVersion(cachedir)
if err != nil {
return nil, err
}
if v > cacheVersion {
return nil, errors.New("cache version is newer")
}
// create the repo cache dir if it does not exist yet
var created bool
_, err = fs.Lstat(cachedir)
if os.IsNotExist(err) {
err = fs.MkdirAll(cachedir, dirMode)
if err != nil {
return nil, errors.WithStack(err)
}
created = true
}
// update the timestamp so that we can detect old cache dirs
err = updateTimestamp(cachedir)
if err != nil {
return nil, err
}
if v < cacheVersion {
err = ioutil.WriteFile(filepath.Join(cachedir, "version"), []byte(fmt.Sprintf("%d", cacheVersion)), fileMode)
if err != nil {
return nil, errors.WithStack(err)
}
}
for _, p := range cacheLayoutPaths {
if err = fs.MkdirAll(filepath.Join(cachedir, p), dirMode); err != nil {
return nil, errors.WithStack(err)
}
}
c = &Cache{
path: cachedir,
Base: basedir,
Created: created,
PerformReadahead: func(restic.Handle) bool {
// do not perform readahead by default
return false
},
}
return c, nil
}
// updateTimestamp sets the modification timestamp (mtime and atime) for the
// directory d to the current time.
func updateTimestamp(d string) error {
t := time.Now()
return fs.Chtimes(d, t, t)
}
// MaxCacheAge is the default age (30 days) after which cache directories are considered old.
const MaxCacheAge = 30 * 24 * time.Hour
func validCacheDirName(s string) bool {
r := regexp.MustCompile(`^[a-fA-F0-9]{64}$`)
return r.MatchString(s)
}
// listCacheDirs returns the list of cache directories.
func listCacheDirs(basedir string) ([]os.FileInfo, error) {
f, err := fs.Open(basedir)
if err != nil && os.IsNotExist(errors.Cause(err)) {
return nil, nil
}
if err != nil {
return nil, err
}
entries, err := f.Readdir(-1)
if err != nil {
return nil, err
}
err = f.Close()
if err != nil {
return nil, err
}
result := make([]os.FileInfo, 0, len(entries))
for _, entry := range entries {
if !entry.IsDir() {
continue
}
if !validCacheDirName(entry.Name()) {
continue
}
result = append(result, entry)
}
return result, nil
}
// All returns a list of cache directories.
func All(basedir string) (dirs []os.FileInfo, err error) {
return listCacheDirs(basedir)
}
// OlderThan returns the list of cache directories older than max.
func OlderThan(basedir string, max time.Duration) ([]os.FileInfo, error) |
// Old returns a list of cache directories with a modification time of more
// than 30 days ago.
func Old(basedir string) ([]os.FileInfo, error) {
return OlderThan(basedir, MaxCacheAge)
}
// IsOld returns true if the timestamp is considered old.
func IsOld(t time.Time, maxAge time.Duration) bool {
oldest := time.Now().Add(-maxAge)
return t.Before(oldest)
}
// Wrap returns a backend with a cache.
func (c *Cache) Wrap(be restic.Backend) restic.Backend {
return newBackend(be, c)
}
// BaseDir returns the base directory.
func (c *Cache) BaseDir() string {
return c.Base
}
| {
entries, err := listCacheDirs(basedir)
if err != nil {
return nil, err
}
var oldCacheDirs []os.FileInfo
for _, fi := range entries {
if !IsOld(fi.ModTime(), max) {
continue
}
oldCacheDirs = append(oldCacheDirs, fi)
}
debug.Log("%d old cache dirs found", len(oldCacheDirs))
return oldCacheDirs, nil
} |
try_setter.rs | #![cfg(feature = "nightlytests")]
#![feature(try_from)]
#[macro_use]
extern crate derive_builder;
use std::convert::TryFrom;
use std::net::{IpAddr, AddrParseError};
use std::str::FromStr;
use std::string::ToString;
#[derive(Debug, Clone, PartialEq)]
pub struct MyAddr(IpAddr);
impl From<IpAddr> for MyAddr {
fn from(v: IpAddr) -> Self {
MyAddr(v)
}
}
#[cfg(feature = "nightlytests")]
impl<'a> TryFrom<&'a str> for MyAddr {
type Error = AddrParseError;
fn try_from(v: &str) -> Result<Self, Self::Error> {
Ok(MyAddr(v.parse()?))
}
}
#[derive(Debug, PartialEq, Builder)]
#[builder(try_setter, setter(into))]
struct | {
pub source: MyAddr,
pub dest: MyAddr,
}
#[derive(Debug, PartialEq, Builder)]
#[builder(try_setter, setter(into, prefix = "set"))]
struct Ipsum {
pub source: MyAddr,
}
fn exact_helper() -> Result<Lorem, String> {
LoremBuilder::default()
.source(IpAddr::from_str("1.2.3.4").unwrap())
.dest(IpAddr::from_str("0.0.0.0").unwrap())
.build()
}
#[cfg(feature = "nightlytests")]
fn try_helper() -> Result<Lorem, String> {
LoremBuilder::default()
.try_source("1.2.3.4").map_err(|e| e.to_string())?
.try_dest("0.0.0.0").map_err(|e| e.to_string())?
.build()
}
#[test]
fn infallible_set() {
let _ = LoremBuilder::default()
.source(IpAddr::from_str("1.2.3.4").unwrap())
.dest(IpAddr::from_str("0.0.0.0").unwrap())
.build();
}
#[test]
#[cfg(feature = "nightlytests")]
fn fallible_set() {
let mut builder = LoremBuilder::default();
let try_result = builder.try_source("1.2.3.4");
let built = try_result
.expect("Passed well-formed address")
.dest(IpAddr::from_str("0.0.0.0").unwrap())
.build()
.unwrap();
assert_eq!(built, exact_helper().unwrap());
}
#[test]
#[cfg(feature = "nightlytests")]
fn with_helper() {
assert_eq!(exact_helper().unwrap(), try_helper().unwrap());
}
#[test]
#[cfg(feature = "nightlytests")]
fn renamed() {
IpsumBuilder::default()
.try_set_source("0.0.0.0")
.unwrap()
.build()
.expect("All fields were provided");
}
| Lorem |
tunnel.rs | use crate::error::{Error, Result};
use crate::packet::TunnelPacket;
use futures::{executor::LocalPool, poll};
use futures::channel::{mpsc, oneshot};
use futures::channel::mpsc::{UnboundedReceiver, UnboundedSender};
use futures::prelude::*;
use futures::task::{Poll, Spawn, SpawnExt};
use pnet::packet::ip::IpNextHeaderProtocol;
use pnet::transport::{transport_channel, TransportChannelType, TransportProtocol, icmp_packet_iter};
use pnet_macros_support::packet::Packet;
use std::net::IpAddr;
use std::sync::Arc;
use std::thread;
enum | {
Client,
Server,
}
pub(crate) struct Tunnel {
tunnel_type: TunnelType,
listen_addr: IpAddr,
listen_port: u16,
remote_addr: IpAddr,
}
impl Tunnel {
pub(crate) fn new(is_server: bool, listen_addr: IpAddr, listen_port: u16, remote_addr: IpAddr) -> Result<Tunnel> {
let tunnel_type = if is_server {
TunnelType::Server
} else {
TunnelType::Client
};
Ok(Tunnel {
tunnel_type: tunnel_type,
listen_addr: listen_addr,
listen_port: listen_port,
remote_addr: remote_addr,
})
}
// TODO: probably wanna implement timeout api stuff and all that
pub(crate) fn run(self, mut tx: UnboundedSender<Arc<TunnelPacket>>,
mut rx: UnboundedReceiver<Arc<TunnelPacket>>,
addr_rx: Option<oneshot::Receiver<IpAddr>>) -> Result<()> {
let chan_type = TransportChannelType::Layer4(TransportProtocol::Ipv4(IpNextHeaderProtocol(1)));
let (mut sender, mut server) = transport_channel(84, chan_type).map_err(Error::StdIo)?;
match &self.tunnel_type {
TunnelType::Client => {
let out_thread = thread::Builder::new().name("c_out_thread".to_owned());
let _out_handler = out_thread.spawn(move || {
let mut pool = LocalPool::new();
let mut spawner = pool.spawner();
spawner.spawn(async move {
loop {
match await!(rx.next()) {
Some(pack) => {
//println!("rx loop: {}", str::from_utf8(pack.payload()).unwrap());
sender.send_to(Arc::try_unwrap(pack).unwrap(), self.remote_addr).unwrap();
},
None => (),
};
}
}).unwrap();
pool.run();
});
let in_thread = thread::Builder::new().name("c_in_thread".to_owned());
let _in_handler = in_thread.spawn(move || {
let mut pool = LocalPool::new();
let mut spawner = pool.spawner();
spawner.spawn(async move {
let mut siter = icmp_packet_iter(&mut server);
loop {
let (pack, _addr) = siter.next().unwrap();
let decoded: TunnelPacket = pack.into();
//println!("{}", std::str::from_utf8(decoded.payload()).unwrap());
await!(tx.send(Arc::new(decoded))).unwrap();
}
}).unwrap();
pool.run();
});
//in_thread.join().map_err(Error::Thread)?;
//out_thread.join().map_err(Error::Thread)?;
},
TunnelType::Server => {
let in_thread = thread::Builder::new().name("s_in_thread".to_owned());
let _in_handler = in_thread.spawn(move || {
let mut pool = LocalPool::new();
let mut spawner = pool.spawner();
spawner.spawn(async move {
let mut siter = icmp_packet_iter(&mut server);
loop {
let (pack, _addr) = siter.next().unwrap();
let decoded: TunnelPacket = pack.into();
await!(tx.send(Arc::new(decoded))).unwrap();
}
}).unwrap();
pool.run();
});
let out_thread = thread::Builder::new().name("s_out_thread".to_owned());
let _out_handler = out_thread.spawn(move || {
let mut pool = LocalPool::new();
let mut spawner = pool.spawner();
spawner.spawn(async move {
let addr_rx = addr_rx.ok_or(Error::Other("Failed to retrieve connection address")).unwrap();
let addr = await!(addr_rx.map(|a| a.unwrap()));
loop {
match poll!(rx.next()) {
Poll::Ready(Some(pack)) => {
//println!("{}", std::str::from_utf8(pack.payload()).unwrap());
let _ = sender.send_to(Arc::try_unwrap(pack).unwrap(), addr).unwrap();
()
},
_ => (),
};
}
}).unwrap();
pool.run();
});
//in_thread.join().map_err(Error::Thread)?;
//out_thread.join().map_err(Error::Thread)?;
},
};
Ok(())
}
}
| TunnelType |
db_test.go | // Copyright (c) 2012, Suryandaru Triandana <[email protected]>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"fmt"
"math/rand"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"unsafe"
"github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/util"
)
func tkey(i int) []byte {
return []byte(fmt.Sprintf("%016d", i))
}
func tval(seed, n int) []byte {
r := rand.New(rand.NewSource(int64(seed)))
return randomString(r, n)
}
type dbHarness struct {
t *testing.T
stor *testStorage
db *DB
o *opt.Options
ro *opt.ReadOptions
wo *opt.WriteOptions
}
func newDbHarnessWopt(t *testing.T, o *opt.Options) *dbHarness {
h := new(dbHarness)
h.init(t, o)
return h
}
func newDbHarness(t *testing.T) *dbHarness {
return newDbHarnessWopt(t, &opt.Options{})
}
func (h *dbHarness) init(t *testing.T, o *opt.Options) {
h.t = t
h.stor = newTestStorage(t)
h.o = o
h.ro = nil
h.wo = nil
if err := h.openDB0(); err != nil {
// So that it will come after fatal message.
defer h.stor.Close()
h.t.Fatal("Open (init): got error: ", err)
}
}
func (h *dbHarness) openDB0() (err error) {
h.t.Log("opening DB")
h.db, err = Open(h.stor, h.o)
return
}
func (h *dbHarness) openDB() {
if err := h.openDB0(); err != nil {
h.t.Fatal("Open: got error: ", err)
}
}
func (h *dbHarness) closeDB0() error {
h.t.Log("closing DB")
return h.db.Close()
}
func (h *dbHarness) closeDB() {
if err := h.closeDB0(); err != nil {
h.t.Error("Close: got error: ", err)
}
h.stor.CloseCheck()
runtime.GC()
}
func (h *dbHarness) reopenDB() {
h.closeDB()
h.openDB()
}
func (h *dbHarness) close() {
h.closeDB0()
h.db = nil
h.stor.Close()
h.stor = nil
runtime.GC()
}
func (h *dbHarness) openAssert(want bool) {
db, err := Open(h.stor, h.o)
if err != nil {
if want {
h.t.Error("Open: assert: got error: ", err)
} else {
h.t.Log("Open: assert: got error (expected): ", err)
}
} else {
if !want {
h.t.Error("Open: assert: expect error")
}
db.Close()
}
}
func (h *dbHarness) write(batch *Batch) {
if err := h.db.Write(batch, h.wo); err != nil {
h.t.Error("Write: got error: ", err)
}
}
func (h *dbHarness) put(key, value string) {
if err := h.db.Put([]byte(key), []byte(value), h.wo); err != nil {
h.t.Error("Put: got error: ", err)
}
}
func (h *dbHarness) putMulti(n int, low, hi string) {
for i := 0; i < n; i++ {
h.put(low, "begin")
h.put(hi, "end")
h.compactMem()
}
}
func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) {
t := h.t
db := h.db
var res uint64
v := db.s.version()
for i, tt := range v.tables[1 : len(v.tables)-1] {
level := i + 1
next := v.tables[level+1]
for _, t := range tt {
r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false)
sum := r.size()
if sum > res {
res = sum
}
}
}
v.release()
if res > want {
t.Errorf("next level overlapping bytes is more than %d, got=%d", want, res)
}
}
func (h *dbHarness) delete(key string) {
t := h.t
db := h.db
err := db.Delete([]byte(key), h.wo)
if err != nil {
t.Error("Delete: got error: ", err)
}
}
func (h *dbHarness) assertNumKeys(want int) {
iter := h.db.NewIterator(nil, h.ro)
defer iter.Release()
got := 0
for iter.Next() {
got++
}
if err := iter.Error(); err != nil {
h.t.Error("assertNumKeys: ", err)
}
if want != got {
h.t.Errorf("assertNumKeys: want=%d got=%d", want, got)
}
}
func (h *dbHarness) getr(db Reader, key string, expectFound bool) (found bool, v []byte) {
t := h.t
v, err := db.Get([]byte(key), h.ro)
switch err {
case ErrNotFound:
if expectFound {
t.Errorf("Get: key '%s' not found, want found", key)
}
case nil:
found = true
if !expectFound {
t.Errorf("Get: key '%s' found, want not found", key)
}
default:
t.Error("Get: got error: ", err)
}
return
}
func (h *dbHarness) get(key string, expectFound bool) (found bool, v []byte) {
return h.getr(h.db, key, expectFound)
}
func (h *dbHarness) getValr(db Reader, key, value string) {
t := h.t
found, r := h.getr(db, key, true)
if !found {
return
}
rval := string(r)
if rval != value {
t.Errorf("Get: invalid value, got '%s', want '%s'", rval, value)
}
}
func (h *dbHarness) getVal(key, value string) {
h.getValr(h.db, key, value)
}
func (h *dbHarness) allEntriesFor(key, want string) {
t := h.t
db := h.db
s := db.s
ikey := newIKey([]byte(key), kMaxSeq, tVal)
iter := db.newRawIterator(nil, nil)
if !iter.Seek(ikey) && iter.Error() != nil {
t.Error("AllEntries: error during seek, err: ", iter.Error())
return
}
res := "[ "
first := true
for iter.Valid() {
rkey := iKey(iter.Key())
if _, t, ok := rkey.parseNum(); ok {
if s.icmp.uCompare(ikey.ukey(), rkey.ukey()) != 0 {
break
}
if !first {
res += ", "
}
first = false
switch t {
case tVal:
res += string(iter.Value())
case tDel:
res += "DEL"
}
} else {
if !first {
res += ", "
}
first = false
res += "CORRUPTED"
}
iter.Next()
}
if !first {
res += " "
}
res += "]"
if res != want {
t.Errorf("AllEntries: assert failed for key %q, got=%q want=%q", key, res, want)
}
}
// Return a string that contains all key,value pairs in order,
// formatted like "(k1->v1)(k2->v2)".
func (h *dbHarness) getKeyVal(want string) {
t := h.t
db := h.db
s, err := db.GetSnapshot()
if err != nil {
t.Fatal("GetSnapshot: got error: ", err)
}
res := ""
iter := s.NewIterator(nil, nil)
for iter.Next() {
res += fmt.Sprintf("(%s->%s)", string(iter.Key()), string(iter.Value()))
}
iter.Release()
if res != want {
t.Errorf("GetKeyVal: invalid key/value pair, got=%q want=%q", res, want)
}
s.Release()
}
func (h *dbHarness) waitCompaction() {
t := h.t
db := h.db
if err := db.compSendIdle(db.tcompCmdC); err != nil {
t.Error("compaction error: ", err)
}
}
func (h *dbHarness) waitMemCompaction() {
t := h.t
db := h.db
if err := db.compSendIdle(db.mcompCmdC); err != nil {
t.Error("compaction error: ", err)
}
}
func (h *dbHarness) compactMem() {
t := h.t
db := h.db
db.writeLockC <- struct{}{}
defer func() {
<-db.writeLockC
}()
if _, err := db.rotateMem(0); err != nil {
t.Error("compaction error: ", err)
}
if err := db.compSendIdle(db.mcompCmdC); err != nil {
t.Error("compaction error: ", err)
}
if h.totalTables() == 0 {
t.Error("zero tables after mem compaction")
}
}
func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) {
t := h.t
db := h.db
var _min, _max []byte
if min != "" {
_min = []byte(min)
}
if max != "" {
_max = []byte(max)
}
if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil {
if wanterr {
t.Log("CompactRangeAt: got error (expected): ", err)
} else {
t.Error("CompactRangeAt: got error: ", err)
}
} else if wanterr {
t.Error("CompactRangeAt: expect error")
}
}
func (h *dbHarness) compactRangeAt(level int, min, max string) {
h.compactRangeAtErr(level, min, max, false)
}
func (h *dbHarness) compactRange(min, max string) {
t := h.t
db := h.db
var r util.Range
if min != "" {
r.Start = []byte(min)
}
if max != "" {
r.Limit = []byte(max)
}
if err := db.CompactRange(r); err != nil {
t.Error("CompactRange: got error: ", err)
}
}
func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
t := h.t
db := h.db
s, err := db.SizeOf([]util.Range{
{[]byte(start), []byte(limit)},
})
if err != nil {
t.Error("SizeOf: got error: ", err)
}
if s.Sum() < low || s.Sum() > hi {
t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d",
shorten(start), shorten(limit), low, hi, s.Sum())
}
}
func (h *dbHarness) getSnapshot() (s *Snapshot) {
s, err := h.db.GetSnapshot()
if err != nil {
h.t.Fatal("GetSnapshot: got error: ", err)
}
return
}
func (h *dbHarness) tablesPerLevel(want string) {
res := ""
nz := 0
v := h.db.s.version()
for level, tt := range v.tables {
if level > 0 {
res += ","
}
res += fmt.Sprint(len(tt))
if len(tt) > 0 {
nz = len(res)
}
}
v.release()
res = res[:nz]
if res != want {
h.t.Errorf("invalid tables len, want=%s, got=%s", want, res)
}
}
func (h *dbHarness) totalTables() (n int) {
v := h.db.s.version()
for _, tt := range v.tables {
n += len(tt)
}
v.release()
return
}
type keyValue interface {
Key() []byte
Value() []byte
}
func testKeyVal(t *testing.T, kv keyValue, want string) {
res := string(kv.Key()) + "->" + string(kv.Value())
if res != want {
t.Errorf("invalid key/value, want=%q, got=%q", want, res)
}
}
func numKey(num int) string {
return fmt.Sprintf("key%06d", num)
}
var _bloom_filter = filter.NewBloomFilter(10)
func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) {
for i := 0; i < 4; i++ {
func() {
switch i {
case 0:
case 1:
if o == nil {
o = &opt.Options{Filter: _bloom_filter}
} else {
old := o
o = &opt.Options{}
*o = *old
o.Filter = _bloom_filter
}
case 2:
if o == nil {
o = &opt.Options{Compression: opt.NoCompression}
} else {
old := o
o = &opt.Options{}
*o = *old
o.Compression = opt.NoCompression
}
}
h := newDbHarnessWopt(t, o)
defer h.close()
switch i {
case 3:
h.reopenDB()
}
f(h)
}()
}
}
func trun(t *testing.T, f func(h *dbHarness)) {
truno(t, nil, f)
}
func testAligned(t *testing.T, name string, offset uintptr) {
if offset%8 != 0 {
t.Errorf("field %s offset is not 64-bit aligned", name)
}
}
func Test_FieldsAligned(t *testing.T) {
p1 := new(DB)
testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq))
p2 := new(session)
testAligned(t, "session.stFileNum", unsafe.Offsetof(p2.stFileNum))
testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum))
testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum))
testAligned(t, "session.stSeq", unsafe.Offsetof(p2.stSeq))
}
func TestDb_Locking(t *testing.T) {
h := newDbHarness(t)
defer h.stor.Close()
h.openAssert(false)
h.closeDB()
h.openAssert(true)
}
func TestDb_Empty(t *testing.T) {
trun(t, func(h *dbHarness) {
h.get("foo", false)
h.reopenDB()
h.get("foo", false)
})
}
func TestDb_ReadWrite(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("foo", "v1")
h.getVal("foo", "v1")
h.put("bar", "v2")
h.put("foo", "v3")
h.getVal("foo", "v3")
h.getVal("bar", "v2")
h.reopenDB()
h.getVal("foo", "v3")
h.getVal("bar", "v2")
})
}
func TestDb_PutDeleteGet(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("foo", "v1")
h.getVal("foo", "v1")
h.put("foo", "v2")
h.getVal("foo", "v2")
h.delete("foo")
h.get("foo", false)
h.reopenDB()
h.get("foo", false)
})
}
func TestDb_EmptyBatch(t *testing.T) {
h := newDbHarness(t)
defer h.close()
h.get("foo", false)
err := h.db.Write(new(Batch), h.wo)
if err != nil {
t.Error("writing empty batch yield error: ", err)
}
h.get("foo", false)
}
func TestDb_GetFromFrozen(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100})
defer h.close()
h.put("foo", "v1")
h.getVal("foo", "v1")
h.stor.DelaySync(storage.TypeTable) // Block sync calls
h.put("k1", strings.Repeat("x", 100000)) // Fill memtable
h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction
for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ {
time.Sleep(10 * time.Microsecond)
}
if h.db.getFrozenMem() == nil {
h.stor.ReleaseSync(storage.TypeTable)
t.Fatal("No frozen mem")
}
h.getVal("foo", "v1")
h.stor.ReleaseSync(storage.TypeTable) // Release sync calls
h.reopenDB()
h.getVal("foo", "v1")
h.get("k1", true)
h.get("k2", true)
}
func TestDb_GetFromTable(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("foo", "v1")
h.compactMem()
h.getVal("foo", "v1")
})
}
func TestDb_GetSnapshot(t *testing.T) {
trun(t, func(h *dbHarness) {
bar := strings.Repeat("b", 200)
h.put("foo", "v1")
h.put(bar, "v1")
snap, err := h.db.GetSnapshot()
if err != nil {
t.Fatal("GetSnapshot: got error: ", err)
}
h.put("foo", "v2")
h.put(bar, "v2")
h.getVal("foo", "v2")
h.getVal(bar, "v2")
h.getValr(snap, "foo", "v1")
h.getValr(snap, bar, "v1")
h.compactMem()
h.getVal("foo", "v2")
h.getVal(bar, "v2")
h.getValr(snap, "foo", "v1")
h.getValr(snap, bar, "v1")
snap.Release()
h.reopenDB()
h.getVal("foo", "v2")
h.getVal(bar, "v2")
})
}
func TestDb_GetLevel0Ordering(t *testing.T) {
trun(t, func(h *dbHarness) {
for i := 0; i < 4; i++ {
h.put("bar", fmt.Sprintf("b%d", i))
h.put("foo", fmt.Sprintf("v%d", i))
h.compactMem()
}
h.getVal("foo", "v3")
h.getVal("bar", "b3")
v := h.db.s.version()
t0len := v.tLen(0)
v.release()
if t0len < 2 {
t.Errorf("level-0 tables is less than 2, got %d", t0len)
}
h.reopenDB()
h.getVal("foo", "v3")
h.getVal("bar", "b3")
})
}
func TestDb_GetOrderedByLevels(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("foo", "v1")
h.compactMem()
h.compactRange("a", "z")
h.getVal("foo", "v1")
h.put("foo", "v2")
h.compactMem()
h.getVal("foo", "v2")
})
}
func TestDb_GetPicksCorrectFile(t *testing.T) {
trun(t, func(h *dbHarness) {
// Arrange to have multiple files in a non-level-0 level.
h.put("a", "va")
h.compactMem()
h.compactRange("a", "b")
h.put("x", "vx")
h.compactMem()
h.compactRange("x", "y")
h.put("f", "vf")
h.compactMem()
h.compactRange("f", "g")
h.getVal("a", "va")
h.getVal("f", "vf")
h.getVal("x", "vx")
h.compactRange("", "")
h.getVal("a", "va")
h.getVal("f", "vf")
h.getVal("x", "vx")
})
}
func TestDb_GetEncountersEmptyLevel(t *testing.T) {
trun(t, func(h *dbHarness) {
// Arrange for the following to happen:
// * sstable A in level 0
// * nothing in level 1
// * sstable B in level 2
// Then do enough Get() calls to arrange for an automatic compaction
// of sstable A. A bug would cause the compaction to be marked as
// occuring at level 1 (instead of the correct level 0).
// Step 1: First place sstables in levels 0 and 2
for i := 0; ; i++ {
if i >= 100 {
t.Fatal("could not fill levels-0 and level-2")
}
v := h.db.s.version()
if v.tLen(0) > 0 && v.tLen(2) > 0 {
v.release()
break
}
v.release()
h.put("a", "begin")
h.put("z", "end")
h.compactMem()
h.getVal("a", "begin")
h.getVal("z", "end")
}
// Step 2: clear level 1 if necessary.
h.compactRangeAt(1, "", "")
h.tablesPerLevel("1,0,1")
h.getVal("a", "begin")
h.getVal("z", "end")
// Step 3: read a bunch of times
for i := 0; i < 200; i++ {
h.get("missing", false)
}
// Step 4: Wait for compaction to finish
h.waitCompaction()
v := h.db.s.version()
if v.tLen(0) > 0 {
t.Errorf("level-0 tables more than 0, got %d", v.tLen(0))
}
v.release()
h.getVal("a", "begin")
h.getVal("z", "end")
})
}
func TestDb_IterMultiWithDelete(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("a", "va")
h.put("b", "vb")
h.put("c", "vc")
h.delete("b")
h.get("b", false)
iter := h.db.NewIterator(nil, nil)
iter.Seek([]byte("c"))
testKeyVal(t, iter, "c->vc")
iter.Prev()
testKeyVal(t, iter, "a->va")
iter.Release()
h.compactMem()
iter = h.db.NewIterator(nil, nil)
iter.Seek([]byte("c"))
testKeyVal(t, iter, "c->vc")
iter.Prev()
testKeyVal(t, iter, "a->va")
iter.Release()
})
}
func TestDb_IteratorPinsRef(t *testing.T) {
h := newDbHarness(t)
defer h.close()
h.put("foo", "hello")
// Get iterator that will yield the current contents of the DB.
iter := h.db.NewIterator(nil, nil)
// Write to force compactions
h.put("foo", "newvalue1")
for i := 0; i < 100; i++ {
h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10))
}
h.put("foo", "newvalue2")
iter.First()
testKeyVal(t, iter, "foo->hello")
if iter.Next() {
t.Errorf("expect eof")
}
iter.Release()
}
func TestDb_Recover(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("foo", "v1")
h.put("baz", "v5")
h.reopenDB()
h.getVal("foo", "v1")
h.getVal("foo", "v1")
h.getVal("baz", "v5")
h.put("bar", "v2")
h.put("foo", "v3")
h.reopenDB()
h.getVal("foo", "v3")
h.put("foo", "v4")
h.getVal("foo", "v4")
h.getVal("bar", "v2")
h.getVal("baz", "v5")
})
}
func TestDb_RecoverWithEmptyJournal(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("foo", "v1")
h.put("foo", "v2")
h.reopenDB()
h.reopenDB()
h.put("foo", "v3")
h.reopenDB()
h.getVal("foo", "v3")
})
}
func TestDb_RecoverDuringMemtableCompaction(t *testing.T) {
truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) {
h.stor.DelaySync(storage.TypeTable)
h.put("big1", strings.Repeat("x", 10000000))
h.put("big2", strings.Repeat("y", 1000))
h.put("bar", "v2")
h.stor.ReleaseSync(storage.TypeTable)
h.reopenDB()
h.getVal("bar", "v2")
h.getVal("big1", strings.Repeat("x", 10000000))
h.getVal("big2", strings.Repeat("y", 1000))
})
}
func TestDb_MinorCompactionsHappen(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000})
defer h.close()
n := 500
key := func(i int) string {
return fmt.Sprintf("key%06d", i)
}
for i := 0; i < n; i++ {
h.put(key(i), key(i)+strings.Repeat("v", 1000))
}
for i := 0; i < n; i++ {
h.getVal(key(i), key(i)+strings.Repeat("v", 1000))
}
h.reopenDB()
for i := 0; i < n; i++ {
h.getVal(key(i), key(i)+strings.Repeat("v", 1000))
}
}
func TestDb_RecoverWithLargeJournal(t *testing.T) {
h := newDbHarness(t)
defer h.close()
h.put("big1", strings.Repeat("1", 200000))
h.put("big2", strings.Repeat("2", 200000))
h.put("small3", strings.Repeat("3", 10))
h.put("small4", strings.Repeat("4", 10))
h.tablesPerLevel("")
// Make sure that if we re-open with a small write buffer size that
// we flush table files in the middle of a large journal file.
h.o.WriteBuffer = 100000
h.reopenDB()
h.getVal("big1", strings.Repeat("1", 200000))
h.getVal("big2", strings.Repeat("2", 200000))
h.getVal("small3", strings.Repeat("3", 10))
h.getVal("small4", strings.Repeat("4", 10))
v := h.db.s.version()
if v.tLen(0) <= 1 {
t.Errorf("tables-0 less than one")
}
v.release()
}
func TestDb_CompactionsGenerateMultipleFiles(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{
WriteBuffer: 10000000,
Compression: opt.NoCompression,
})
defer h.close()
v := h.db.s.version()
if v.tLen(0) > 0 {
t.Errorf("level-0 tables more than 0, got %d", v.tLen(0))
}
v.release()
n := 80
// Write 8MB (80 values, each 100K)
for i := 0; i < n; i++ {
h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10))
}
// Reopening moves updates to level-0
h.reopenDB()
h.compactRangeAt(0, "", "")
v = h.db.s.version()
if v.tLen(0) > 0 {
t.Errorf("level-0 tables more than 0, got %d", v.tLen(0))
}
if v.tLen(1) <= 1 {
t.Errorf("level-1 tables less than 1, got %d", v.tLen(1))
}
v.release()
for i := 0; i < n; i++ {
h.getVal(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10))
}
}
func TestDb_RepeatedWritesToSameKey(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000})
defer h.close()
maxTables := kNumLevels + kL0_StopWritesTrigger
value := strings.Repeat("v", 2*h.o.GetWriteBuffer())
for i := 0; i < 5*maxTables; i++ {
h.put("key", value)
n := h.totalTables()
if n > maxTables {
t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i)
}
}
}
func TestDb_RepeatedWritesToSameKeyAfterReopen(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000})
defer h.close()
h.reopenDB()
maxTables := kNumLevels + kL0_StopWritesTrigger
value := strings.Repeat("v", 2*h.o.GetWriteBuffer())
for i := 0; i < 5*maxTables; i++ {
h.put("key", value)
n := h.totalTables()
if n > maxTables {
t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i)
}
}
}
func TestDb_SparseMerge(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
defer h.close()
h.putMulti(kNumLevels, "A", "Z")
// Suppose there is:
// small amount of data with prefix A
// large amount of data with prefix B
// small amount of data with prefix C
// and that recent updates have made small changes to all three prefixes.
// Check that we do not do a compaction that merges all of B in one shot.
h.put("A", "va")
value := strings.Repeat("x", 1000)
for i := 0; i < 100000; i++ {
h.put(fmt.Sprintf("B%010d", i), value)
}
h.put("C", "vc")
h.compactMem()
h.compactRangeAt(0, "", "")
h.waitCompaction()
// Make sparse update
h.put("A", "va2")
h.put("B100", "bvalue2")
h.put("C", "vc2")
h.compactMem()
h.maxNextLevelOverlappingBytes(20 * 1048576)
h.compactRangeAt(0, "", "")
h.waitCompaction()
h.maxNextLevelOverlappingBytes(20 * 1048576)
h.compactRangeAt(1, "", "")
h.waitCompaction()
h.maxNextLevelOverlappingBytes(20 * 1048576)
}
func TestDb_SizeOf(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{
Compression: opt.NoCompression,
WriteBuffer: 10000000,
})
defer h.close()
h.sizeAssert("", "xyz", 0, 0)
h.reopenDB()
h.sizeAssert("", "xyz", 0, 0)
// Write 8MB (80 values, each 100K)
n := 80
s1 := 100000
s2 := 105000
for i := 0; i < n; i++ {
h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10))
}
// 0 because SizeOf() does not account for memtable space
h.sizeAssert("", numKey(50), 0, 0)
for r := 0; r < 3; r++ {
h.reopenDB()
for cs := 0; cs < n; cs += 10 {
for i := 0; i < n; i += 10 {
h.sizeAssert("", numKey(i), uint64(s1*i), uint64(s2*i))
h.sizeAssert("", numKey(i)+".suffix", uint64(s1*(i+1)), uint64(s2*(i+1)))
h.sizeAssert(numKey(i), numKey(i+10), uint64(s1*10), uint64(s2*10))
}
h.sizeAssert("", numKey(50), uint64(s1*50), uint64(s2*50))
h.sizeAssert("", numKey(50)+".suffix", uint64(s1*50), uint64(s2*50))
h.compactRangeAt(0, numKey(cs), numKey(cs+9))
}
v := h.db.s.version()
if v.tLen(0) != 0 {
t.Errorf("level-0 tables was not zero, got %d", v.tLen(0))
}
if v.tLen(1) == 0 {
t.Error("level-1 tables was zero")
}
v.release()
}
}
func TestDb_SizeOf_MixOfSmallAndLarge(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
defer h.close()
sizes := []uint64{
10000,
10000,
100000,
10000,
100000,
10000,
300000,
10000,
}
for i, n := range sizes {
h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), int(n)/10))
}
for r := 0; r < 3; r++ {
h.reopenDB()
var x uint64
for i, n := range sizes {
y := x
if i > 0 {
y += 1000
}
h.sizeAssert("", numKey(i), x, y)
x += n
}
h.sizeAssert(numKey(3), numKey(5), 110000, 111000)
h.compactRangeAt(0, "", "")
}
}
func TestDb_Snapshot(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("foo", "v1")
s1 := h.getSnapshot()
h.put("foo", "v2")
s2 := h.getSnapshot()
h.put("foo", "v3")
s3 := h.getSnapshot()
h.put("foo", "v4")
h.getValr(s1, "foo", "v1")
h.getValr(s2, "foo", "v2")
h.getValr(s3, "foo", "v3")
h.getVal("foo", "v4")
s3.Release()
h.getValr(s1, "foo", "v1")
h.getValr(s2, "foo", "v2")
h.getVal("foo", "v4")
s1.Release()
h.getValr(s2, "foo", "v2")
h.getVal("foo", "v4")
s2.Release()
h.getVal("foo", "v4")
})
}
func TestDb_HiddenValuesAreRemoved(t *testing.T) {
trun(t, func(h *dbHarness) {
s := h.db.s
h.put("foo", "v1")
h.compactMem()
m := kMaxMemCompactLevel
v := s.version()
num := v.tLen(m)
v.release()
if num != 1 {
t.Errorf("invalid level-%d len, want=1 got=%d", m, num)
}
// Place a table at level last-1 to prevent merging with preceding mutation
h.put("a", "begin")
h.put("z", "end")
h.compactMem()
v = s.version()
if v.tLen(m) != 1 {
t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m))
}
if v.tLen(m-1) != 1 {
t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1))
}
v.release()
h.delete("foo")
h.put("foo", "v2")
h.allEntriesFor("foo", "[ v2, DEL, v1 ]")
h.compactMem()
h.allEntriesFor("foo", "[ v2, DEL, v1 ]")
h.compactRangeAt(m-2, "", "z")
// DEL eliminated, but v1 remains because we aren't compacting that level
// (DEL can be eliminated because v2 hides v1).
h.allEntriesFor("foo", "[ v2, v1 ]")
h.compactRangeAt(m-1, "", "")
// Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1).
h.allEntriesFor("foo", "[ v2 ]")
})
}
func TestDb_DeletionMarkers2(t *testing.T) {
h := newDbHarness(t)
defer h.close()
s := h.db.s
h.put("foo", "v1")
h.compactMem()
m := kMaxMemCompactLevel
v := s.version()
num := v.tLen(m)
v.release()
if num != 1 {
t.Errorf("invalid level-%d len, want=1 got=%d", m, num)
}
// Place a table at level last-1 to prevent merging with preceding mutation
h.put("a", "begin")
h.put("z", "end")
h.compactMem()
v = s.version()
if v.tLen(m) != 1 {
t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m))
}
if v.tLen(m-1) != 1 {
t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1))
}
v.release()
h.delete("foo")
h.allEntriesFor("foo", "[ DEL, v1 ]")
h.compactMem() // Moves to level last-2
h.allEntriesFor("foo", "[ DEL, v1 ]")
h.compactRangeAt(m-2, "", "")
// DEL kept: "last" file overlaps
h.allEntriesFor("foo", "[ DEL, v1 ]")
h.compactRangeAt(m-1, "", "")
// Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1).
h.allEntriesFor("foo", "[ ]")
}
func TestDb_CompactionTableOpenError(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{CachedOpenFiles: -1})
defer h.close()
im := 10
jm := 10
for r := 0; r < 2; r++ {
for i := 0; i < im; i++ {
for j := 0; j < jm; j++ {
h.put(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j))
}
h.compactMem()
}
}
if n := h.totalTables(); n != im*2 {
t.Errorf("total tables is %d, want %d", n, im)
}
h.stor.SetOpenErr(storage.TypeTable)
go h.db.CompactRange(util.Range{})
if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil {
t.Log("compaction error: ", err)
}
h.closeDB0()
h.openDB()
h.stor.SetOpenErr(0)
for i := 0; i < im; i++ {
for j := 0; j < jm; j++ {
h.getVal(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j))
}
}
}
func TestDb_OverlapInLevel0(t *testing.T) {
trun(t, func(h *dbHarness) {
if kMaxMemCompactLevel != 2 {
t.Fatal("fix test to reflect the config")
}
// Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
h.put("100", "v100")
h.put("999", "v999")
h.compactMem()
h.delete("100")
h.delete("999")
h.compactMem()
h.tablesPerLevel("0,1,1")
// Make files spanning the following ranges in level-0:
// files[0] 200 .. 900
// files[1] 300 .. 500
// Note that files are sorted by min key.
h.put("300", "v300")
h.put("500", "v500")
h.compactMem()
h.put("200", "v200")
h.put("600", "v600")
h.put("900", "v900")
h.compactMem()
h.tablesPerLevel("2,1,1")
// Compact away the placeholder files we created initially
h.compactRangeAt(1, "", "")
h.compactRangeAt(2, "", "")
h.tablesPerLevel("2")
// Do a memtable compaction. Before bug-fix, the compaction would
// not detect the overlap with level-0 files and would incorrectly place
// the deletion in a deeper level.
h.delete("600")
h.compactMem()
h.tablesPerLevel("3")
h.get("600", false)
})
}
func TestDb_L0_CompactionBug_Issue44_a(t *testing.T) {
h := newDbHarness(t)
defer h.close()
h.reopenDB()
h.put("b", "v")
h.reopenDB()
h.delete("b")
h.delete("a")
h.reopenDB()
h.delete("a")
h.reopenDB()
h.put("a", "v")
h.reopenDB()
h.reopenDB()
h.getKeyVal("(a->v)")
h.waitCompaction()
h.getKeyVal("(a->v)")
}
func TestDb_L0_CompactionBug_Issue44_b(t *testing.T) {
h := newDbHarness(t)
defer h.close()
h.reopenDB()
h.put("", "")
h.reopenDB()
h.delete("e")
h.put("", "")
h.reopenDB()
h.put("c", "cv")
h.reopenDB()
h.put("", "")
h.reopenDB()
h.put("", "")
h.waitCompaction()
h.reopenDB()
h.put("d", "dv")
h.reopenDB()
h.put("", "")
h.reopenDB()
h.delete("d")
h.delete("b")
h.reopenDB()
h.getKeyVal("(->)(c->cv)")
h.waitCompaction()
h.getKeyVal("(->)(c->cv)")
}
func TestDb_SingleEntryMemCompaction(t *testing.T) {
trun(t, func(h *dbHarness) {
for i := 0; i < 10; i++ {
h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer))
h.compactMem()
h.put("key", strings.Repeat("v", opt.DefaultBlockSize))
h.compactMem()
h.put("k", "v")
h.compactMem()
h.put("", "")
h.compactMem()
h.put("verybig", strings.Repeat("v", opt.DefaultWriteBuffer*2))
h.compactMem()
}
})
}
func TestDb_ManifestWriteError(t *testing.T) {
for i := 0; i < 2; i++ {
func() {
h := newDbHarness(t)
defer h.close()
h.put("foo", "bar")
h.getVal("foo", "bar")
// Mem compaction (will succeed)
h.compactMem()
h.getVal("foo", "bar")
v := h.db.s.version()
if n := v.tLen(kMaxMemCompactLevel); n != 1 {
t.Errorf("invalid total tables, want=1 got=%d", n)
}
v.release()
if i == 0 {
h.stor.SetWriteErr(storage.TypeManifest)
} else {
h.stor.SetSyncErr(storage.TypeManifest)
}
// Merging compaction (will fail)
h.compactRangeAtErr(kMaxMemCompactLevel, "", "", true)
h.db.Close()
h.stor.SetWriteErr(0)
h.stor.SetSyncErr(0)
// Should not lose data
h.openDB()
h.getVal("foo", "bar")
}()
}
}
func assertErr(t *testing.T, err error, wanterr bool) {
if err != nil {
if wanterr {
t.Log("AssertErr: got error (expected): ", err)
} else {
t.Error("AssertErr: got error: ", err)
}
} else if wanterr {
t.Error("AssertErr: expect error")
}
}
func TestDb_ClosedIsClosed(t *testing.T) {
h := newDbHarness(t)
db := h.db
var iter, iter2 iterator.Iterator
var snap *Snapshot
func() {
defer h.close()
h.put("k", "v")
h.getVal("k", "v")
iter = db.NewIterator(nil, h.ro)
iter.Seek([]byte("k"))
testKeyVal(t, iter, "k->v")
var err error
snap, err = db.GetSnapshot()
if err != nil {
t.Fatal("GetSnapshot: got error: ", err)
}
h.getValr(snap, "k", "v")
iter2 = snap.NewIterator(nil, h.ro)
iter2.Seek([]byte("k"))
testKeyVal(t, iter2, "k->v")
h.put("foo", "v2")
h.delete("foo")
// closing DB
iter.Release()
iter2.Release()
}()
assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true)
_, err := db.Get([]byte("k"), h.ro)
assertErr(t, err, true)
if iter.Valid() {
t.Errorf("iter.Valid should false")
}
assertErr(t, iter.Error(), false)
testKeyVal(t, iter, "->")
if iter.Seek([]byte("k")) {
t.Errorf("iter.Seek should false")
}
assertErr(t, iter.Error(), true)
assertErr(t, iter2.Error(), false)
_, err = snap.Get([]byte("k"), h.ro)
assertErr(t, err, true)
_, err = db.GetSnapshot()
assertErr(t, err, true)
iter3 := db.NewIterator(nil, h.ro)
assertErr(t, iter3.Error(), true)
iter3 = snap.NewIterator(nil, h.ro)
assertErr(t, iter3.Error(), true)
assertErr(t, db.Delete([]byte("k"), h.wo), true)
_, err = db.GetProperty("leveldb.stats")
assertErr(t, err, true)
_, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}})
assertErr(t, err, true)
assertErr(t, db.CompactRange(util.Range{}), true)
assertErr(t, db.Close(), true)
}
type numberComparer struct{}
func (numberComparer) num(x []byte) (n int) {
fmt.Sscan(string(x[1:len(x)-1]), &n)
return
}
func (numberComparer) Name() string {
return "test.NumberComparer"
}
func (p numberComparer) Compare(a, b []byte) int {
return p.num(a) - p.num(b)
}
func (numberComparer) Separator(dst, a, b []byte) []byte { return nil }
func (numberComparer) Successor(dst, b []byte) []byte { return nil }
func TestDb_CustomComparer(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{
Comparer: numberComparer{},
WriteBuffer: 1000,
})
defer h.close()
h.put("[10]", "ten")
h.put("[0x14]", "twenty")
for i := 0; i < 2; i++ {
h.getVal("[10]", "ten")
h.getVal("[0xa]", "ten")
h.getVal("[20]", "twenty")
h.getVal("[0x14]", "twenty")
h.get("[15]", false)
h.get("[0xf]", false)
h.compactMem()
h.compactRange("[0]", "[9999]")
}
for n := 0; n < 2; n++ {
for i := 0; i < 100; i++ {
v := fmt.Sprintf("[%d]", i*10)
h.put(v, v)
}
h.compactMem()
h.compactRange("[0]", "[1000000]")
}
}
func TestDb_ManualCompaction(t *testing.T) {
h := newDbHarness(t)
defer h.close()
if kMaxMemCompactLevel != 2 {
t.Fatal("fix test to reflect the config")
}
h.putMulti(3, "p", "q")
h.tablesPerLevel("1,1,1")
// Compaction range falls before files
h.compactRange("", "c")
h.tablesPerLevel("1,1,1")
// Compaction range falls after files
h.compactRange("r", "z")
h.tablesPerLevel("1,1,1")
// Compaction range overlaps files
h.compactRange("p1", "p9")
h.tablesPerLevel("0,0,1")
// Populate a different range
h.putMulti(3, "c", "e")
h.tablesPerLevel("1,1,2")
// Compact just the new range
h.compactRange("b", "f")
h.tablesPerLevel("0,0,2")
// Compact all
h.putMulti(1, "a", "z")
h.tablesPerLevel("0,1,2")
h.compactRange("", "")
h.tablesPerLevel("0,0,1")
}
func TestDb_BloomFilter(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{
BlockCache: opt.NoCache,
Filter: filter.NewBloomFilter(10),
})
defer h.close()
key := func(i int) string {
return fmt.Sprintf("key%06d", i)
}
const (
n = 10000
indexOverheat = 19898
filterOverheat = 19799
)
// Populate multiple layers
for i := 0; i < n; i++ {
h.put(key(i), key(i))
}
h.compactMem()
h.compactRange("a", "z")
for i := 0; i < n; i += 100 {
h.put(key(i), key(i))
}
h.compactMem()
// Prevent auto compactions triggered by seeks
h.stor.DelaySync(storage.TypeTable)
// Lookup present keys. Should rarely read from small sstable.
h.stor.SetReadCounter(storage.TypeTable)
for i := 0; i < n; i++ {
h.getVal(key(i), key(i))
}
cnt := int(h.stor.ReadCounter())
t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt)
if min, max := n+indexOverheat+filterOverheat, n+indexOverheat+filterOverheat+2*n/100; cnt < min || cnt > max {
t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt)
}
// Lookup missing keys. Should rarely read from either sstable.
h.stor.ResetReadCounter()
for i := 0; i < n; i++ {
h.get(key(i)+".missing", false)
}
cnt = int(h.stor.ReadCounter())
t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt)
if max := 3*n/100 + indexOverheat + filterOverheat; cnt > max {
t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt)
}
h.stor.ReleaseSync(storage.TypeTable)
}
func TestDb_Concurrent(t *testing.T) {
const n, secs, maxkey = 4, 2, 1000
runtime.GOMAXPROCS(n)
trun(t, func(h *dbHarness) {
var closeWg sync.WaitGroup
var stop uint32
var cnt [n]uint32
for i := 0; i < n; i++ {
closeWg.Add(1)
go func(i int) {
var put, get, found uint
defer func() {
t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d",
i, cnt[i], put, get, found, get-found)
closeWg.Done()
}()
rnd := rand.New(rand.NewSource(int64(1000 + i)))
for atomic.LoadUint32(&stop) == 0 {
x := cnt[i]
k := rnd.Intn(maxkey)
kstr := fmt.Sprintf("%016d", k)
if (rnd.Int() % 2) > 0 {
put++
h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x))
} else {
get++
v, err := h.db.Get([]byte(kstr), h.ro)
if err == nil {
found++
rk, ri, rx := 0, -1, uint32(0)
fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx)
if rk != k {
t.Errorf("invalid key want=%d got=%d", k, rk)
}
if ri < 0 || ri >= n {
t.Error("invalid goroutine number: ", ri)
} else {
tx := atomic.LoadUint32(&(cnt[ri]))
if rx > tx {
t.Errorf("invalid seq number, %d > %d ", rx, tx)
}
}
} else if err != ErrNotFound {
t.Error("Get: got error: ", err)
return
}
}
atomic.AddUint32(&cnt[i], 1)
}
}(i)
}
time.Sleep(secs * time.Second)
atomic.StoreUint32(&stop, 1)
closeWg.Wait()
})
runtime.GOMAXPROCS(1)
}
func TestDb_Concurrent2(t *testing.T) {
const n, n2 = 4, 4000
runtime.GOMAXPROCS(n*2 + 2)
truno(t, &opt.Options{WriteBuffer: 30}, func(h *dbHarness) {
var closeWg sync.WaitGroup
var stop uint32
for i := 0; i < n; i++ {
closeWg.Add(1)
go func(i int) {
for k := 0; atomic.LoadUint32(&stop) == 0; k++ {
h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10))
}
closeWg.Done()
}(i)
}
for i := 0; i < n; i++ {
closeWg.Add(1)
go func(i int) {
for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- {
h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10))
}
closeWg.Done()
}(i)
}
cmp := comparer.DefaultComparer
for i := 0; i < n2; i++ {
closeWg.Add(1)
go func(i int) {
it := h.db.NewIterator(nil, nil)
var pk []byte
for it.Next() {
kk := it.Key()
if cmp.Compare(kk, pk) <= 0 {
t.Errorf("iter %d: %q is successor of %q", i, pk, kk)
}
pk = append(pk[:0], kk...)
var k, vk, vi int
if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil {
t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err)
} else if n < 1 {
t.Errorf("iter %d: Cannot parse key %q", i, it.Key())
}
if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil {
t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err)
} else if n < 2 {
t.Errorf("iter %d: Cannot parse value %q", i, it.Value())
}
if vk != k {
t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk)
}
}
if err := it.Error(); err != nil {
t.Errorf("iter %d: Got error: %v", i, err)
} | }
atomic.StoreUint32(&stop, 1)
closeWg.Wait()
})
runtime.GOMAXPROCS(1)
}
func TestDb_CreateReopenDbOnFile(t *testing.T) {
dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid()))
if err := os.RemoveAll(dbpath); err != nil {
t.Fatal("cannot remove old db: ", err)
}
defer os.RemoveAll(dbpath)
for i := 0; i < 3; i++ {
stor, err := storage.OpenFile(dbpath)
if err != nil {
t.Fatalf("(%d) cannot open storage: %s", i, err)
}
db, err := Open(stor, nil)
if err != nil {
t.Fatalf("(%d) cannot open db: %s", i, err)
}
if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil {
t.Fatalf("(%d) cannot write to db: %s", i, err)
}
if err := db.Close(); err != nil {
t.Fatalf("(%d) cannot close db: %s", i, err)
}
if err := stor.Close(); err != nil {
t.Fatalf("(%d) cannot close storage: %s", i, err)
}
}
}
func TestDb_CreateReopenDbOnFile2(t *testing.T) {
dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid()))
if err := os.RemoveAll(dbpath); err != nil {
t.Fatal("cannot remove old db: ", err)
}
defer os.RemoveAll(dbpath)
for i := 0; i < 3; i++ {
db, err := OpenFile(dbpath, nil)
if err != nil {
t.Fatalf("(%d) cannot open db: %s", i, err)
}
if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil {
t.Fatalf("(%d) cannot write to db: %s", i, err)
}
if err := db.Close(); err != nil {
t.Fatalf("(%d) cannot close db: %s", i, err)
}
}
}
func TestDb_DeletionMarkersOnMemdb(t *testing.T) {
h := newDbHarness(t)
defer h.close()
h.put("foo", "v1")
h.compactMem()
h.delete("foo")
h.get("foo", false)
h.getKeyVal("")
}
func TestDb_LeveldbIssue178(t *testing.T) {
nKeys := (kMaxTableSize / 30) * 5
key1 := func(i int) string {
return fmt.Sprintf("my_key_%d", i)
}
key2 := func(i int) string {
return fmt.Sprintf("my_key_%d_xxx", i)
}
// Disable compression since it affects the creation of layers and the
// code below is trying to test against a very specific scenario.
h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
defer h.close()
// Create first key range.
batch := new(Batch)
for i := 0; i < nKeys; i++ {
batch.Put([]byte(key1(i)), []byte("value for range 1 key"))
}
h.write(batch)
// Create second key range.
batch.Reset()
for i := 0; i < nKeys; i++ {
batch.Put([]byte(key2(i)), []byte("value for range 2 key"))
}
h.write(batch)
// Delete second key range.
batch.Reset()
for i := 0; i < nKeys; i++ {
batch.Delete([]byte(key2(i)))
}
h.write(batch)
h.waitMemCompaction()
// Run manual compaction.
h.compactRange(key1(0), key1(nKeys-1))
// Checking the keys.
h.assertNumKeys(nKeys)
}
func TestDb_LeveldbIssue200(t *testing.T) {
h := newDbHarness(t)
defer h.close()
h.put("1", "b")
h.put("2", "c")
h.put("3", "d")
h.put("4", "e")
h.put("5", "f")
iter := h.db.NewIterator(nil, h.ro)
// Add an element that should not be reflected in the iterator.
h.put("25", "cd")
iter.Seek([]byte("5"))
assertBytes(t, []byte("5"), iter.Key())
iter.Prev()
assertBytes(t, []byte("4"), iter.Key())
iter.Prev()
assertBytes(t, []byte("3"), iter.Key())
iter.Next()
assertBytes(t, []byte("4"), iter.Key())
iter.Next()
assertBytes(t, []byte("5"), iter.Key())
} | it.Release()
closeWg.Done()
}(i) |
github-search.module.ts | import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
@NgModule({
imports: [
CommonModule | ],
declarations: []
})
export class GithubSearchModule { } |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.