file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
kvrpcpb.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: kvrpcpb.proto
package kvrpcpb
import (
"fmt"
"io"
"math"
proto "github.com/golang/protobuf/proto"
_ "github.com/gogo/protobuf/gogoproto"
errorpb "github.com/pingcap/kvproto/pkg/errorpb"
metapb "github.com/pingcap/kvproto/pkg/metapb"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type VerOp int32
const (
VerOp_VerPut VerOp = 0
VerOp_VerDel VerOp = 1
)
var VerOp_name = map[int32]string{
0: "VerPut",
1: "VerDel",
}
var VerOp_value = map[string]int32{
"VerPut": 0,
"VerDel": 1,
}
func (x VerOp) String() string {
return proto.EnumName(VerOp_name, int32(x))
}
func (VerOp) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{0}
}
type CommandPri int32
const (
CommandPri_Normal CommandPri = 0
CommandPri_Low CommandPri = 1
CommandPri_High CommandPri = 2
)
var CommandPri_name = map[int32]string{
0: "Normal",
1: "Low",
2: "High",
}
var CommandPri_value = map[string]int32{
"Normal": 0,
"Low": 1,
"High": 2,
}
func (x CommandPri) String() string {
return proto.EnumName(CommandPri_name, int32(x))
}
func (CommandPri) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{1}
}
type IsolationLevel int32
const (
IsolationLevel_SI IsolationLevel = 0
IsolationLevel_RC IsolationLevel = 1
)
var IsolationLevel_name = map[int32]string{
0: "SI",
1: "RC",
}
var IsolationLevel_value = map[string]int32{
"SI": 0,
"RC": 1,
}
func (x IsolationLevel) String() string {
return proto.EnumName(IsolationLevel_name, int32(x))
}
func (IsolationLevel) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{2}
}
type Op int32
const (
Op_Put Op = 0
Op_Del Op = 1
Op_Lock Op = 2
Op_Rollback Op = 3
// insert operation has a constraint that key should not exist before.
Op_Insert Op = 4
Op_PessimisticLock Op = 5
Op_CheckNotExists Op = 6
)
var Op_name = map[int32]string{
0: "Put",
1: "Del",
2: "Lock",
3: "Rollback",
4: "Insert",
5: "PessimisticLock",
6: "CheckNotExists",
}
var Op_value = map[string]int32{
"Put": 0,
"Del": 1,
"Lock": 2,
"Rollback": 3,
"Insert": 4,
"PessimisticLock": 5,
"CheckNotExists": 6,
}
func (x Op) String() string {
return proto.EnumName(Op_name, int32(x))
}
func (Op) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{3}
}
type Assertion int32
const (
Assertion_None Assertion = 0
Assertion_Exist Assertion = 1
Assertion_NotExist Assertion = 2
)
var Assertion_name = map[int32]string{
0: "None",
1: "Exist",
2: "NotExist",
}
var Assertion_value = map[string]int32{
"None": 0,
"Exist": 1,
"NotExist": 2,
}
func (x Assertion) String() string {
return proto.EnumName(Assertion_name, int32(x))
}
func (Assertion) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{4}
}
type Action int32
const (
Action_NoAction Action = 0
Action_TTLExpireRollback Action = 1
Action_LockNotExistRollback Action = 2
Action_MinCommitTSPushed Action = 3
Action_TTLExpirePessimisticRollback Action = 4
Action_LockNotExistDoNothing Action = 5
)
var Action_name = map[int32]string{
0: "NoAction",
1: "TTLExpireRollback",
2: "LockNotExistRollback",
3: "MinCommitTSPushed",
4: "TTLExpirePessimisticRollback",
5: "LockNotExistDoNothing",
}
var Action_value = map[string]int32{
"NoAction": 0,
"TTLExpireRollback": 1,
"LockNotExistRollback": 2,
"MinCommitTSPushed": 3,
"TTLExpirePessimisticRollback": 4,
"LockNotExistDoNothing": 5,
}
func (x Action) String() string {
return proto.EnumName(Action_name, int32(x))
}
func (Action) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{5}
}
type ExtraOp int32
const (
ExtraOp_Noop ExtraOp = 0
// ReadOldValue represents to output the previous value for delete/update operations.
ExtraOp_ReadOldValue ExtraOp = 1
)
var ExtraOp_name = map[int32]string{
0: "Noop",
1: "ReadOldValue",
}
var ExtraOp_value = map[string]int32{
"Noop": 0,
"ReadOldValue": 1,
}
func (x ExtraOp) String() string {
return proto.EnumName(ExtraOp_name, int32(x))
}
func (ExtraOp) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{6}
}
// A transactional get command. Lookup a value for `key` in the transaction with
// starting timestamp = `version`.
type GetRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Version uint64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetRequest) Reset() { *m = GetRequest{} }
func (m *GetRequest) String() string { return proto.CompactTextString(m) }
func (*GetRequest) ProtoMessage() {}
func (*GetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{0}
}
func (m *GetRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetRequest.Merge(dst, src)
}
func (m *GetRequest) XXX_Size() int {
return m.Size()
}
func (m *GetRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetRequest proto.InternalMessageInfo
func (m *GetRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *GetRequest) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *GetRequest) GetVersion() uint64 {
if m != nil {
return m.Version
}
return 0
}
type GetResponse struct {
// A region error indicates that the request was sent to the wrong TiKV node
// (or other, similar errors).
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
// A value could not be retrieved due to the state of the database for the requested key.
Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
// A successful result.
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
// True if the key does not exist in the database.
NotFound bool `protobuf:"varint,4,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"`
// Time and scan details when processing the request.
ExecDetailsV2 *ExecDetailsV2 `protobuf:"bytes,6,opt,name=exec_details_v2,json=execDetailsV2" json:"exec_details_v2,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetResponse) Reset() { *m = GetResponse{} }
func (m *GetResponse) String() string { return proto.CompactTextString(m) }
func (*GetResponse) ProtoMessage() {}
func (*GetResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{1}
}
func (m *GetResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetResponse.Merge(dst, src)
}
func (m *GetResponse) XXX_Size() int {
return m.Size()
}
func (m *GetResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetResponse proto.InternalMessageInfo
func (m *GetResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *GetResponse) GetError() *KeyError {
if m != nil {
return m.Error
}
return nil
}
func (m *GetResponse) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func (m *GetResponse) GetNotFound() bool {
if m != nil {
return m.NotFound
}
return false
}
func (m *GetResponse) GetExecDetailsV2() *ExecDetailsV2 {
if m != nil {
return m.ExecDetailsV2
}
return nil
}
// Scan fetches values for a range of keys; it is part of the transaction with
// starting timestamp = `version`.
type ScanRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"`
// The maximum number of results to return.
Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
Version uint64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
// Return only the keys found by scanning, not their values.
KeyOnly bool `protobuf:"varint,5,opt,name=key_only,json=keyOnly,proto3" json:"key_only,omitempty"`
Reverse bool `protobuf:"varint,6,opt,name=reverse,proto3" json:"reverse,omitempty"`
// For compatibility, when scanning forward, the range to scan is [start_key, end_key), where start_key < end_key;
// and when scanning backward, it scans [end_key, start_key) in descending order, where end_key < start_key.
EndKey []byte `protobuf:"bytes,7,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"`
// If sample_step > 0, skips 'sample_step - 1' number of keys after each returned key.
// locks are not checked.
SampleStep uint32 `protobuf:"varint,8,opt,name=sample_step,json=sampleStep,proto3" json:"sample_step,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScanRequest) Reset() { *m = ScanRequest{} }
func (m *ScanRequest) String() string { return proto.CompactTextString(m) }
func (*ScanRequest) ProtoMessage() {}
func (*ScanRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{2}
}
func (m *ScanRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScanRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScanRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ScanRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScanRequest.Merge(dst, src)
}
func (m *ScanRequest) XXX_Size() int {
return m.Size()
}
func (m *ScanRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ScanRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ScanRequest proto.InternalMessageInfo
func (m *ScanRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *ScanRequest) GetStartKey() []byte {
if m != nil {
return m.StartKey
}
return nil
}
func (m *ScanRequest) GetLimit() uint32 {
if m != nil {
return m.Limit
}
return 0
}
func (m *ScanRequest) GetVersion() uint64 {
if m != nil {
return m.Version
}
return 0
}
func (m *ScanRequest) GetKeyOnly() bool {
if m != nil {
return m.KeyOnly
}
return false
}
func (m *ScanRequest) GetReverse() bool {
if m != nil {
return m.Reverse
}
return false
}
func (m *ScanRequest) GetEndKey() []byte {
if m != nil {
return m.EndKey
}
return nil
}
func (m *ScanRequest) GetSampleStep() uint32 {
if m != nil {
return m.SampleStep
}
return 0
}
type ScanResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
// Each KvPair may contain a key error.
Pairs []*KvPair `protobuf:"bytes,2,rep,name=pairs" json:"pairs,omitempty"`
// This KeyError exists when some key is locked but we cannot check locks of all keys.
// In this case, `pairs` should be empty and the client should redo scanning all the keys
// after resolving the lock.
Error *KeyError `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScanResponse) Reset() { *m = ScanResponse{} }
func (m *ScanResponse) String() string { return proto.CompactTextString(m) }
func (*ScanResponse) ProtoMessage() {}
func (*ScanResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{3}
}
func (m *ScanResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScanResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScanResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ScanResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScanResponse.Merge(dst, src)
}
func (m *ScanResponse) XXX_Size() int {
return m.Size()
}
func (m *ScanResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ScanResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ScanResponse proto.InternalMessageInfo
func (m *ScanResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *ScanResponse) GetPairs() []*KvPair {
if m != nil {
return m.Pairs
}
return nil
}
func (m *ScanResponse) GetError() *KeyError {
if m != nil {
return m.Error
}
return nil
}
// A prewrite is the first phase of writing to TiKV. It contains all data to be written in a transaction.
// TiKV will write the data in a preliminary state. Data cannot be read until it has been committed.
// The client should only commit a transaction once all prewrites succeed.
type PrewriteRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
// The data to be written to the database.
Mutations []*Mutation `protobuf:"bytes,2,rep,name=mutations" json:"mutations,omitempty"`
// The client picks one key to be primary (unrelated to the primary key concept in SQL). This
// key's lock is the source of truth for the state of a transaction. All other locks due to a
// transaction will point to the primary lock.
PrimaryLock []byte `protobuf:"bytes,3,opt,name=primary_lock,json=primaryLock,proto3" json:"primary_lock,omitempty"`
// Identifies the transaction being written.
StartVersion uint64 `protobuf:"varint,4,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"`
LockTtl uint64 `protobuf:"varint,5,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"`
// TiKV can skip some checks, used for speeding up data migration.
SkipConstraintCheck bool `protobuf:"varint,6,opt,name=skip_constraint_check,json=skipConstraintCheck,proto3" json:"skip_constraint_check,omitempty"`
// For pessimistic transaction, some mutations don't need to be locked, for example, non-unique index key.
IsPessimisticLock []bool `protobuf:"varint,7,rep,packed,name=is_pessimistic_lock,json=isPessimisticLock" json:"is_pessimistic_lock,omitempty"`
// How many keys this transaction involves in this region.
TxnSize uint64 `protobuf:"varint,8,opt,name=txn_size,json=txnSize,proto3" json:"txn_size,omitempty"`
// For pessimistic transactions only; used to check if a conflict lock is already committed.
ForUpdateTs uint64 `protobuf:"varint,9,opt,name=for_update_ts,json=forUpdateTs,proto3" json:"for_update_ts,omitempty"`
// If min_commit_ts > 0, this is a large transaction request, the final commit_ts
// will be inferred from `min_commit_ts`.
MinCommitTs uint64 `protobuf:"varint,10,opt,name=min_commit_ts,json=minCommitTs,proto3" json:"min_commit_ts,omitempty"`
// When async commit is enabled, `secondaries` should be set as the key list of all secondary
// locks if the request prewrites the primary lock.
UseAsyncCommit bool `protobuf:"varint,11,opt,name=use_async_commit,json=useAsyncCommit,proto3" json:"use_async_commit,omitempty"`
Secondaries [][]byte `protobuf:"bytes,12,rep,name=secondaries" json:"secondaries,omitempty"`
// When the transaction involves only one region, it's possible to commit the transaction
// directly with 1PC protocol.
TryOnePc bool `protobuf:"varint,13,opt,name=try_one_pc,json=tryOnePc,proto3" json:"try_one_pc,omitempty"`
// The max commit ts is reserved for limiting the commit ts of 1PC or async commit, which can be used to avoid
// inconsistency with schema change.
MaxCommitTs uint64 `protobuf:"varint,14,opt,name=max_commit_ts,json=maxCommitTs,proto3" json:"max_commit_ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PrewriteRequest) Reset() { *m = PrewriteRequest{} }
func (m *PrewriteRequest) String() string { return proto.CompactTextString(m) }
func (*PrewriteRequest) ProtoMessage() {}
func (*PrewriteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{4}
}
func (m *PrewriteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PrewriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PrewriteRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *PrewriteRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_PrewriteRequest.Merge(dst, src)
}
func (m *PrewriteRequest) XXX_Size() int {
return m.Size()
}
func (m *PrewriteRequest) XXX_DiscardUnknown() {
xxx_messageInfo_PrewriteRequest.DiscardUnknown(m)
}
var xxx_messageInfo_PrewriteRequest proto.InternalMessageInfo
func (m *PrewriteRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *PrewriteRequest) GetMutations() []*Mutation {
if m != nil {
return m.Mutations
}
return nil
}
func (m *PrewriteRequest) GetPrimaryLock() []byte {
if m != nil {
return m.PrimaryLock
}
return nil
}
func (m *PrewriteRequest) GetStartVersion() uint64 {
if m != nil {
return m.StartVersion
}
return 0
}
func (m *PrewriteRequest) GetLockTtl() uint64 {
if m != nil {
return m.LockTtl
}
return 0
}
func (m *PrewriteRequest) GetSkipConstraintCheck() bool {
if m != nil {
return m.SkipConstraintCheck
}
return false
}
func (m *PrewriteRequest) GetIsPessimisticLock() []bool {
if m != nil {
return m.IsPessimisticLock
}
return nil
}
func (m *PrewriteRequest) GetTxnSize() uint64 {
if m != nil {
return m.TxnSize
}
return 0
}
func (m *PrewriteRequest) GetForUpdateTs() uint64 {
if m != nil {
return m.ForUpdateTs
}
return 0
}
func (m *PrewriteRequest) GetMinCommitTs() uint64 {
if m != nil {
return m.MinCommitTs
}
return 0
}
func (m *PrewriteRequest) GetUseAsyncCommit() bool {
if m != nil {
return m.UseAsyncCommit
}
return false
}
func (m *PrewriteRequest) GetSecondaries() [][]byte {
if m != nil {
return m.Secondaries
}
return nil
}
func (m *PrewriteRequest) GetTryOnePc() bool {
if m != nil {
return m.TryOnePc
}
return false
}
func (m *PrewriteRequest) GetMaxCommitTs() uint64 {
if m != nil {
return m.MaxCommitTs
}
return 0
}
type PrewriteResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Errors []*KeyError `protobuf:"bytes,2,rep,name=errors" json:"errors,omitempty"`
// 0 if the min_commit_ts is not ready or any other reason that async
// commit cannot proceed. The client can then fallback to normal way to
// continue committing the transaction if prewrite are all finished.
MinCommitTs uint64 `protobuf:"varint,3,opt,name=min_commit_ts,json=minCommitTs,proto3" json:"min_commit_ts,omitempty"`
// When the transaction is successfully committed with 1PC protocol, this field will be set to
// the commit ts of the transaction. Otherwise, if TiKV failed to commit it with 1PC or the
// transaction is not 1PC, the value will be 0.
OnePcCommitTs uint64 `protobuf:"varint,4,opt,name=one_pc_commit_ts,json=onePcCommitTs,proto3" json:"one_pc_commit_ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PrewriteResponse) Reset() { *m = PrewriteResponse{} }
func (m *PrewriteResponse) String() string { return proto.CompactTextString(m) }
func (*PrewriteResponse) ProtoMessage() {}
func (*PrewriteResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{5}
}
func (m *PrewriteResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PrewriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PrewriteResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *PrewriteResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_PrewriteResponse.Merge(dst, src)
}
func (m *PrewriteResponse) XXX_Size() int {
return m.Size()
}
func (m *PrewriteResponse) XXX_DiscardUnknown() {
xxx_messageInfo_PrewriteResponse.DiscardUnknown(m)
}
var xxx_messageInfo_PrewriteResponse proto.InternalMessageInfo
func (m *PrewriteResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *PrewriteResponse) GetErrors() []*KeyError {
if m != nil {
return m.Errors
}
return nil
}
func (m *PrewriteResponse) GetMinCommitTs() uint64 {
if m != nil {
return m.MinCommitTs
}
return 0
}
func (m *PrewriteResponse) GetOnePcCommitTs() uint64 {
if m != nil {
return m.OnePcCommitTs
}
return 0
}
// Lock a set of keys to prepare to write to them.
type PessimisticLockRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
// In this case every `Op` of the mutations must be `PessimisticLock`.
Mutations []*Mutation `protobuf:"bytes,2,rep,name=mutations" json:"mutations,omitempty"`
PrimaryLock []byte `protobuf:"bytes,3,opt,name=primary_lock,json=primaryLock,proto3" json:"primary_lock,omitempty"`
StartVersion uint64 `protobuf:"varint,4,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"`
LockTtl uint64 `protobuf:"varint,5,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"`
// Each locking command in a pessimistic transaction has its own timestamp. If locking fails, then
// the corresponding SQL statement can be retried with a later timestamp, TiDB does not need to
// retry the whole transaction. The name comes from the `SELECT ... FOR UPDATE` SQL statement which
// is a locking read. Each `SELECT ... FOR UPDATE` in a transaction will be assigned its own
// timestamp.
ForUpdateTs uint64 `protobuf:"varint,6,opt,name=for_update_ts,json=forUpdateTs,proto3" json:"for_update_ts,omitempty"`
// If the request is the first lock request, we don't need to detect deadlock.
IsFirstLock bool `protobuf:"varint,7,opt,name=is_first_lock,json=isFirstLock,proto3" json:"is_first_lock,omitempty"`
// Time to wait for lock released in milliseconds when encountering locks.
// 0 means using default timeout in TiKV. Negative means no wait.
WaitTimeout int64 `protobuf:"varint,8,opt,name=wait_timeout,json=waitTimeout,proto3" json:"wait_timeout,omitempty"`
// If it is true, TiKV will acquire the pessimistic lock regardless of write conflict
// and return the latest value. It's only supported for single mutation.
Force bool `protobuf:"varint,9,opt,name=force,proto3" json:"force,omitempty"`
// If it is true, TiKV will return values of the keys if no error, so TiDB can cache the values for
// later read in the same transaction.
// When 'force' is set to true, this field is ignored.
ReturnValues bool `protobuf:"varint,10,opt,name=return_values,json=returnValues,proto3" json:"return_values,omitempty"`
// If min_commit_ts > 0, this is large transaction proto, the final commit_ts
// would be infered from min_commit_ts.
MinCommitTs uint64 `protobuf:"varint,11,opt,name=min_commit_ts,json=minCommitTs,proto3" json:"min_commit_ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PessimisticLockRequest) Reset() { *m = PessimisticLockRequest{} }
func (m *PessimisticLockRequest) String() string { return proto.CompactTextString(m) }
func (*PessimisticLockRequest) ProtoMessage() {}
func (*PessimisticLockRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{6}
}
func (m *PessimisticLockRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PessimisticLockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PessimisticLockRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *PessimisticLockRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_PessimisticLockRequest.Merge(dst, src)
}
func (m *PessimisticLockRequest) XXX_Size() int {
return m.Size()
}
func (m *PessimisticLockRequest) XXX_DiscardUnknown() {
xxx_messageInfo_PessimisticLockRequest.DiscardUnknown(m)
}
var xxx_messageInfo_PessimisticLockRequest proto.InternalMessageInfo
func (m *PessimisticLockRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *PessimisticLockRequest) GetMutations() []*Mutation {
if m != nil {
return m.Mutations
}
return nil
}
func (m *PessimisticLockRequest) GetPrimaryLock() []byte {
if m != nil {
return m.PrimaryLock
}
return nil
}
func (m *PessimisticLockRequest) GetStartVersion() uint64 {
if m != nil {
return m.StartVersion
}
return 0
}
func (m *PessimisticLockRequest) GetLockTtl() uint64 {
if m != nil {
return m.LockTtl
}
return 0
}
func (m *PessimisticLockRequest) GetForUpdateTs() uint64 {
if m != nil {
return m.ForUpdateTs
}
return 0
}
func (m *PessimisticLockRequest) GetIsFirstLock() bool {
if m != nil {
return m.IsFirstLock
}
return false
}
func (m *PessimisticLockRequest) GetWaitTimeout() int64 {
if m != nil {
return m.WaitTimeout
}
return 0
}
func (m *PessimisticLockRequest) GetForce() bool {
if m != nil {
return m.Force
}
return false
}
func (m *PessimisticLockRequest) GetReturnValues() bool {
if m != nil {
return m.ReturnValues
}
return false
}
func (m *PessimisticLockRequest) GetMinCommitTs() uint64 {
if m != nil {
return m.MinCommitTs
}
return 0
}
type PessimisticLockResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Errors []*KeyError `protobuf:"bytes,2,rep,name=errors" json:"errors,omitempty"`
// It carries the latest value and its commit ts if force in PessimisticLockRequest is true.
CommitTs uint64 `protobuf:"varint,3,opt,name=commit_ts,json=commitTs,proto3" json:"commit_ts,omitempty"`
Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
// The values is set if 'return_values' is true in the request and no error.
// If 'force' is true, this field is not used.
Values [][]byte `protobuf:"bytes,5,rep,name=values" json:"values,omitempty"`
// Indicates whether the values at the same index is correspond to an existing key.
// In legacy TiKV, this field is not used even 'force' is false. In that case, an empty value indicates
// two possible situations: (1) the key does not exist. (2) the key exists but the value is empty.
NotFounds []bool `protobuf:"varint,6,rep,packed,name=not_founds,json=notFounds" json:"not_founds,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PessimisticLockResponse) Reset() { *m = PessimisticLockResponse{} }
func (m *PessimisticLockResponse) String() string { return proto.CompactTextString(m) }
func (*PessimisticLockResponse) ProtoMessage() {}
func (*PessimisticLockResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{7}
}
func (m *PessimisticLockResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PessimisticLockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PessimisticLockResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *PessimisticLockResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_PessimisticLockResponse.Merge(dst, src)
}
func (m *PessimisticLockResponse) XXX_Size() int {
return m.Size()
}
func (m *PessimisticLockResponse) XXX_DiscardUnknown() {
xxx_messageInfo_PessimisticLockResponse.DiscardUnknown(m)
}
var xxx_messageInfo_PessimisticLockResponse proto.InternalMessageInfo
func (m *PessimisticLockResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *PessimisticLockResponse) GetErrors() []*KeyError {
if m != nil {
return m.Errors
}
return nil
}
func (m *PessimisticLockResponse) GetCommitTs() uint64 {
if m != nil {
return m.CommitTs
}
return 0
}
func (m *PessimisticLockResponse) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func (m *PessimisticLockResponse) GetValues() [][]byte {
if m != nil {
return m.Values
}
return nil
}
func (m *PessimisticLockResponse) GetNotFounds() []bool {
if m != nil {
return m.NotFounds
}
return nil
}
// Unlock keys locked using `PessimisticLockRequest`.
type PessimisticRollbackRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
StartVersion uint64 `protobuf:"varint,2,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"`
ForUpdateTs uint64 `protobuf:"varint,3,opt,name=for_update_ts,json=forUpdateTs,proto3" json:"for_update_ts,omitempty"`
Keys [][]byte `protobuf:"bytes,4,rep,name=keys" json:"keys,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PessimisticRollbackRequest) Reset() { *m = PessimisticRollbackRequest{} }
func (m *PessimisticRollbackRequest) String() string { return proto.CompactTextString(m) }
func (*PessimisticRollbackRequest) ProtoMessage() {}
func (*PessimisticRollbackRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{8}
}
func (m *PessimisticRollbackRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PessimisticRollbackRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PessimisticRollbackRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *PessimisticRollbackRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_PessimisticRollbackRequest.Merge(dst, src)
}
func (m *PessimisticRollbackRequest) XXX_Size() int {
return m.Size()
}
func (m *PessimisticRollbackRequest) XXX_DiscardUnknown() {
xxx_messageInfo_PessimisticRollbackRequest.DiscardUnknown(m)
}
var xxx_messageInfo_PessimisticRollbackRequest proto.InternalMessageInfo
func (m *PessimisticRollbackRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *PessimisticRollbackRequest) GetStartVersion() uint64 {
if m != nil {
return m.StartVersion
}
return 0
}
func (m *PessimisticRollbackRequest) GetForUpdateTs() uint64 {
if m != nil {
return m.ForUpdateTs
}
return 0
}
func (m *PessimisticRollbackRequest) GetKeys() [][]byte {
if m != nil {
return m.Keys
}
return nil
}
type PessimisticRollbackResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Errors []*KeyError `protobuf:"bytes,2,rep,name=errors" json:"errors,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PessimisticRollbackResponse) Reset() { *m = PessimisticRollbackResponse{} }
func (m *PessimisticRollbackResponse) String() string { return proto.CompactTextString(m) }
func (*PessimisticRollbackResponse) ProtoMessage() {}
func (*PessimisticRollbackResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{9}
}
func (m *PessimisticRollbackResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PessimisticRollbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PessimisticRollbackResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *PessimisticRollbackResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_PessimisticRollbackResponse.Merge(dst, src)
}
func (m *PessimisticRollbackResponse) XXX_Size() int {
return m.Size()
}
func (m *PessimisticRollbackResponse) XXX_DiscardUnknown() {
xxx_messageInfo_PessimisticRollbackResponse.DiscardUnknown(m)
}
var xxx_messageInfo_PessimisticRollbackResponse proto.InternalMessageInfo
func (m *PessimisticRollbackResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *PessimisticRollbackResponse) GetErrors() []*KeyError {
if m != nil {
return m.Errors
}
return nil
}
// Used to update the lock_ttl of a psessimistic and/or large transaction to prevent it from been killed.
type TxnHeartBeatRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
// The key of the lock to update.
PrimaryLock []byte `protobuf:"bytes,2,opt,name=primary_lock,json=primaryLock,proto3" json:"primary_lock,omitempty"`
// Start timestamp of the large transaction.
StartVersion uint64 `protobuf:"varint,3,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"`
// The new TTL the sender would like.
AdviseLockTtl uint64 `protobuf:"varint,4,opt,name=advise_lock_ttl,json=adviseLockTtl,proto3" json:"advise_lock_ttl,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TxnHeartBeatRequest) Reset() { *m = TxnHeartBeatRequest{} }
func (m *TxnHeartBeatRequest) String() string { return proto.CompactTextString(m) }
func (*TxnHeartBeatRequest) ProtoMessage() {}
func (*TxnHeartBeatRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{10}
}
func (m *TxnHeartBeatRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TxnHeartBeatRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_TxnHeartBeatRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *TxnHeartBeatRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_TxnHeartBeatRequest.Merge(dst, src)
}
func (m *TxnHeartBeatRequest) XXX_Size() int {
return m.Size()
}
func (m *TxnHeartBeatRequest) XXX_DiscardUnknown() {
xxx_messageInfo_TxnHeartBeatRequest.DiscardUnknown(m)
}
var xxx_messageInfo_TxnHeartBeatRequest proto.InternalMessageInfo
func (m *TxnHeartBeatRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *TxnHeartBeatRequest) GetPrimaryLock() []byte {
if m != nil {
return m.PrimaryLock
}
return nil
}
func (m *TxnHeartBeatRequest) GetStartVersion() uint64 {
if m != nil {
return m.StartVersion
}
return 0
}
func (m *TxnHeartBeatRequest) GetAdviseLockTtl() uint64 {
if m != nil {
return m.AdviseLockTtl
}
return 0
}
type TxnHeartBeatResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
// The TTL actually set on the requested lock.
LockTtl uint64 `protobuf:"varint,3,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TxnHeartBeatResponse) Reset() { *m = TxnHeartBeatResponse{} }
func (m *TxnHeartBeatResponse) String() string { return proto.CompactTextString(m) }
func (*TxnHeartBeatResponse) ProtoMessage() {}
func (*TxnHeartBeatResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{11}
}
func (m *TxnHeartBeatResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TxnHeartBeatResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_TxnHeartBeatResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *TxnHeartBeatResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_TxnHeartBeatResponse.Merge(dst, src)
}
func (m *TxnHeartBeatResponse) XXX_Size() int {
return m.Size()
}
func (m *TxnHeartBeatResponse) XXX_DiscardUnknown() {
xxx_messageInfo_TxnHeartBeatResponse.DiscardUnknown(m)
}
var xxx_messageInfo_TxnHeartBeatResponse proto.InternalMessageInfo
func (m *TxnHeartBeatResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *TxnHeartBeatResponse) GetError() *KeyError {
if m != nil {
return m.Error
}
return nil
}
func (m *TxnHeartBeatResponse) GetLockTtl() uint64 {
if m != nil {
return m.LockTtl
}
return 0
}
// CheckTxnStatusRequest checks the status of a transaction.
// If the transaction is rollbacked/committed, return that result.
// If the TTL of the transaction is exhausted, abort that transaction and inform the caller.
// Otherwise, returns the TTL information for the transaction.
// CheckTxnStatusRequest may also push forward the minCommitTS of a large transaction.
type CheckTxnStatusRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
// Primary key and lock ts together to locate the primary lock of a transaction.
PrimaryKey []byte `protobuf:"bytes,2,opt,name=primary_key,json=primaryKey,proto3" json:"primary_key,omitempty"`
// Starting timestamp of the transaction being checked.
LockTs uint64 `protobuf:"varint,3,opt,name=lock_ts,json=lockTs,proto3" json:"lock_ts,omitempty"`
// The start timestamp of the transaction which this request is part of.
CallerStartTs uint64 `protobuf:"varint,4,opt,name=caller_start_ts,json=callerStartTs,proto3" json:"caller_start_ts,omitempty"`
// The client must specify the current time to TiKV using this timestamp. It is used to check TTL
// timeouts. It may be inaccurate.
CurrentTs uint64 `protobuf:"varint,5,opt,name=current_ts,json=currentTs,proto3" json:"current_ts,omitempty"`
// If true, then TiKV will leave a rollback tombstone in the write CF for `primary_key`, even if
// that key is not locked.
RollbackIfNotExist bool `protobuf:"varint,6,opt,name=rollback_if_not_exist,json=rollbackIfNotExist,proto3" json:"rollback_if_not_exist,omitempty"`
// This field is set to true only if the transaction is known to fall back from async commit.
// Then, CheckTxnStatus treats the transaction as non-async-commit even if the use_async_commit
// field in the primary lock is true.
ForceSyncCommit bool `protobuf:"varint,7,opt,name=force_sync_commit,json=forceSyncCommit,proto3" json:"force_sync_commit,omitempty"`
// If the check request is used to resolve or decide the transaction status for a input pessimistic
// lock, the transaction status could not be decided if the primary lock is pessimistic too and
// it's still uncertain.
ResolvingPessimisticLock bool `protobuf:"varint,8,opt,name=resolving_pessimistic_lock,json=resolvingPessimisticLock,proto3" json:"resolving_pessimistic_lock,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CheckTxnStatusRequest) Reset() { *m = CheckTxnStatusRequest{} }
func (m *CheckTxnStatusRequest) String() string { return proto.CompactTextString(m) }
func (*CheckTxnStatusRequest) ProtoMessage() {}
func (*CheckTxnStatusRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{12}
}
func (m *CheckTxnStatusRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CheckTxnStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CheckTxnStatusRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CheckTxnStatusRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CheckTxnStatusRequest.Merge(dst, src)
}
func (m *CheckTxnStatusRequest) XXX_Size() int {
return m.Size()
}
func (m *CheckTxnStatusRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CheckTxnStatusRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CheckTxnStatusRequest proto.InternalMessageInfo
func (m *CheckTxnStatusRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *CheckTxnStatusRequest) GetPrimaryKey() []byte {
if m != nil {
return m.PrimaryKey
}
return nil
}
func (m *CheckTxnStatusRequest) GetLockTs() uint64 {
if m != nil {
return m.LockTs
}
return 0
}
func (m *CheckTxnStatusRequest) GetCallerStartTs() uint64 {
if m != nil {
return m.CallerStartTs
}
return 0
}
func (m *CheckTxnStatusRequest) GetCurrentTs() uint64 {
if m != nil {
return m.CurrentTs
}
return 0
}
func (m *CheckTxnStatusRequest) GetRollbackIfNotExist() bool {
if m != nil {
return m.RollbackIfNotExist
}
return false
}
func (m *CheckTxnStatusRequest) GetForceSyncCommit() bool {
if m != nil {
return m.ForceSyncCommit
}
return false
}
func (m *CheckTxnStatusRequest) GetResolvingPessimisticLock() bool {
if m != nil {
return m.ResolvingPessimisticLock
}
return false
}
type CheckTxnStatusResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
// Three kinds of transaction status:
// locked: lock_ttl > 0
// committed: commit_version > 0
// rollbacked: lock_ttl = 0 && commit_version = 0
LockTtl uint64 `protobuf:"varint,3,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"`
CommitVersion uint64 `protobuf:"varint,4,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"`
// The action performed by TiKV (and why if the action is to rollback).
Action Action `protobuf:"varint,5,opt,name=action,proto3,enum=kvrpcpb.Action" json:"action,omitempty"`
LockInfo *LockInfo `protobuf:"bytes,6,opt,name=lock_info,json=lockInfo" json:"lock_info,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CheckTxnStatusResponse) Reset() { *m = CheckTxnStatusResponse{} }
func (m *CheckTxnStatusResponse) String() string { return proto.CompactTextString(m) }
func (*CheckTxnStatusResponse) ProtoMessage() {}
func (*CheckTxnStatusResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{13}
}
func (m *CheckTxnStatusResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CheckTxnStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CheckTxnStatusResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CheckTxnStatusResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CheckTxnStatusResponse.Merge(dst, src)
}
func (m *CheckTxnStatusResponse) XXX_Size() int {
return m.Size()
}
func (m *CheckTxnStatusResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CheckTxnStatusResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CheckTxnStatusResponse proto.InternalMessageInfo
func (m *CheckTxnStatusResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *CheckTxnStatusResponse) GetError() *KeyError {
if m != nil {
return m.Error
}
return nil
}
func (m *CheckTxnStatusResponse) GetLockTtl() uint64 {
if m != nil {
return m.LockTtl
}
return 0
}
func (m *CheckTxnStatusResponse) GetCommitVersion() uint64 {
if m != nil {
return m.CommitVersion
}
return 0
}
func (m *CheckTxnStatusResponse) GetAction() Action {
if m != nil {
return m.Action
}
return Action_NoAction
}
func (m *CheckTxnStatusResponse) GetLockInfo() *LockInfo {
if m != nil {
return m.LockInfo
}
return nil
}
// Part of the async commit protocol, checks for locks on all supplied keys. If a lock is missing,
// does not have a successful status, or belongs to another transaction, TiKV will leave a rollback
// tombstone for that key.
type CheckSecondaryLocksRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Keys [][]byte `protobuf:"bytes,2,rep,name=keys" json:"keys,omitempty"`
// Identifies the transaction we are investigating.
StartVersion uint64 `protobuf:"varint,3,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CheckSecondaryLocksRequest) Reset() { *m = CheckSecondaryLocksRequest{} }
func (m *CheckSecondaryLocksRequest) String() string { return proto.CompactTextString(m) }
func (*CheckSecondaryLocksRequest) ProtoMessage() {}
func (*CheckSecondaryLocksRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{14}
}
func (m *CheckSecondaryLocksRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CheckSecondaryLocksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CheckSecondaryLocksRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CheckSecondaryLocksRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CheckSecondaryLocksRequest.Merge(dst, src)
}
func (m *CheckSecondaryLocksRequest) XXX_Size() int {
return m.Size()
}
func (m *CheckSecondaryLocksRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CheckSecondaryLocksRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CheckSecondaryLocksRequest proto.InternalMessageInfo
func (m *CheckSecondaryLocksRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *CheckSecondaryLocksRequest) GetKeys() [][]byte {
if m != nil {
return m.Keys
}
return nil
}
func (m *CheckSecondaryLocksRequest) GetStartVersion() uint64 {
if m != nil {
return m.StartVersion
}
return 0
}
type CheckSecondaryLocksResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
// For each key in `keys` in `CheckSecondaryLocks`, there will be a lock in
// this list if there is a lock present and belonging to the correct transaction,
// nil otherwise.
Locks []*LockInfo `protobuf:"bytes,3,rep,name=locks" json:"locks,omitempty"`
// If any of the locks have been committed, this is the commit ts used. If no
// locks have been committed, it will be zero.
CommitTs uint64 `protobuf:"varint,4,opt,name=commit_ts,json=commitTs,proto3" json:"commit_ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CheckSecondaryLocksResponse) Reset() { *m = CheckSecondaryLocksResponse{} }
func (m *CheckSecondaryLocksResponse) String() string { return proto.CompactTextString(m) }
func (*CheckSecondaryLocksResponse) ProtoMessage() {}
func (*CheckSecondaryLocksResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{15}
}
func (m *CheckSecondaryLocksResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CheckSecondaryLocksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CheckSecondaryLocksResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CheckSecondaryLocksResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CheckSecondaryLocksResponse.Merge(dst, src)
}
func (m *CheckSecondaryLocksResponse) XXX_Size() int {
return m.Size()
}
func (m *CheckSecondaryLocksResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CheckSecondaryLocksResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CheckSecondaryLocksResponse proto.InternalMessageInfo
func (m *CheckSecondaryLocksResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *CheckSecondaryLocksResponse) GetError() *KeyError {
if m != nil {
return m.Error
}
return nil
}
func (m *CheckSecondaryLocksResponse) GetLocks() []*LockInfo {
if m != nil {
return m.Locks
}
return nil
}
func (m *CheckSecondaryLocksResponse) GetCommitTs() uint64 {
if m != nil {
return m.CommitTs
}
return 0
}
// The second phase of writing to TiKV. If there are no errors or conflicts, then this request
// commits a transaction so that its data can be read by other transactions.
type CommitRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
// Identifies the transaction.
StartVersion uint64 `protobuf:"varint,2,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"`
// All keys in the transaction (to be committed).
Keys [][]byte `protobuf:"bytes,3,rep,name=keys" json:"keys,omitempty"`
// Timestamp for the end of the transaction. Must be greater than `start_version`.
CommitVersion uint64 `protobuf:"varint,4,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CommitRequest) Reset() { *m = CommitRequest{} }
func (m *CommitRequest) String() string { return proto.CompactTextString(m) }
func (*CommitRequest) ProtoMessage() {}
func (*CommitRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{16}
}
func (m *CommitRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CommitRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CommitRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CommitRequest.Merge(dst, src)
}
func (m *CommitRequest) XXX_Size() int {
return m.Size()
}
func (m *CommitRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CommitRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CommitRequest proto.InternalMessageInfo
func (m *CommitRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *CommitRequest) GetStartVersion() uint64 {
if m != nil {
return m.StartVersion
}
return 0
}
func (m *CommitRequest) GetKeys() [][]byte {
if m != nil {
return m.Keys
}
return nil
}
func (m *CommitRequest) GetCommitVersion() uint64 {
if m != nil {
return m.CommitVersion
}
return 0
}
type CommitResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
// If the commit ts is derived from min_commit_ts, this field should be set.
CommitVersion uint64 `protobuf:"varint,3,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CommitResponse) Reset() { *m = CommitResponse{} }
func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
func (*CommitResponse) ProtoMessage() {}
func (*CommitResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{17}
}
func (m *CommitResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CommitResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CommitResponse.Merge(dst, src)
}
func (m *CommitResponse) XXX_Size() int {
return m.Size()
}
func (m *CommitResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CommitResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CommitResponse proto.InternalMessageInfo
func (m *CommitResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *CommitResponse) GetError() *KeyError {
if m != nil {
return m.Error
}
return nil
}
func (m *CommitResponse) GetCommitVersion() uint64 {
if m != nil {
return m.CommitVersion
}
return 0
}
// Not yet implemented.
type ImportRequest struct {
Mutations []*Mutation `protobuf:"bytes,1,rep,name=mutations" json:"mutations,omitempty"`
CommitVersion uint64 `protobuf:"varint,2,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ImportRequest) Reset() { *m = ImportRequest{} }
func (m *ImportRequest) String() string { return proto.CompactTextString(m) }
func (*ImportRequest) ProtoMessage() {}
func (*ImportRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{18}
}
func (m *ImportRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImportRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ImportRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ImportRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImportRequest.Merge(dst, src)
}
func (m *ImportRequest) XXX_Size() int {
return m.Size()
}
func (m *ImportRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ImportRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ImportRequest proto.InternalMessageInfo
func (m *ImportRequest) GetMutations() []*Mutation {
if m != nil {
return m.Mutations
}
return nil
}
func (m *ImportRequest) GetCommitVersion() uint64 {
if m != nil {
return m.CommitVersion
}
return 0
}
type ImportResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ImportResponse) Reset() { *m = ImportResponse{} }
func (m *ImportResponse) String() string { return proto.CompactTextString(m) }
func (*ImportResponse) ProtoMessage() {}
func (*ImportResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{19}
}
func (m *ImportResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ImportResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ImportResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ImportResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImportResponse.Merge(dst, src)
}
func (m *ImportResponse) XXX_Size() int {
return m.Size()
}
func (m *ImportResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ImportResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ImportResponse proto.InternalMessageInfo
func (m *ImportResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *ImportResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
// Cleanup a key by possibly unlocking it.
// From 4.0 onwards, this message is no longer used.
type CleanupRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
StartVersion uint64 `protobuf:"varint,3,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"`
// The current timestamp, used in combination with a lock's TTL to determine
// if the lock has expired. If `current_ts == 0`, then the key will be unlocked
// irrespective of its TTL.
CurrentTs uint64 `protobuf:"varint,4,opt,name=current_ts,json=currentTs,proto3" json:"current_ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CleanupRequest) Reset() { *m = CleanupRequest{} }
func (m *CleanupRequest) String() string { return proto.CompactTextString(m) }
func (*CleanupRequest) ProtoMessage() {}
func (*CleanupRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{20}
}
func (m *CleanupRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CleanupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CleanupRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CleanupRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CleanupRequest.Merge(dst, src)
}
func (m *CleanupRequest) XXX_Size() int {
return m.Size()
}
func (m *CleanupRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CleanupRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CleanupRequest proto.InternalMessageInfo
func (m *CleanupRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *CleanupRequest) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *CleanupRequest) GetStartVersion() uint64 {
if m != nil {
return m.StartVersion
}
return 0
}
func (m *CleanupRequest) GetCurrentTs() uint64 {
if m != nil {
return m.CurrentTs
}
return 0
}
type CleanupResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
// Set if the key is already committed.
CommitVersion uint64 `protobuf:"varint,3,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CleanupResponse) Reset() { *m = CleanupResponse{} }
func (m *CleanupResponse) String() string { return proto.CompactTextString(m) }
func (*CleanupResponse) ProtoMessage() {}
func (*CleanupResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{21}
}
func (m *CleanupResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CleanupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CleanupResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CleanupResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CleanupResponse.Merge(dst, src)
}
func (m *CleanupResponse) XXX_Size() int {
return m.Size()
}
func (m *CleanupResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CleanupResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CleanupResponse proto.InternalMessageInfo
func (m *CleanupResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *CleanupResponse) GetError() *KeyError {
if m != nil {
return m.Error
}
return nil
}
func (m *CleanupResponse) GetCommitVersion() uint64 {
if m != nil {
return m.CommitVersion
}
return 0
}
// Similar to a `Get` request.
type BatchGetRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Keys [][]byte `protobuf:"bytes,2,rep,name=keys" json:"keys,omitempty"`
Version uint64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BatchGetRequest) Reset() { *m = BatchGetRequest{} }
func (m *BatchGetRequest) String() string { return proto.CompactTextString(m) }
func (*BatchGetRequest) ProtoMessage() {}
func (*BatchGetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{22}
}
func (m *BatchGetRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BatchGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BatchGetRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *BatchGetRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_BatchGetRequest.Merge(dst, src)
}
func (m *BatchGetRequest) XXX_Size() int {
return m.Size()
}
func (m *BatchGetRequest) XXX_DiscardUnknown() {
xxx_messageInfo_BatchGetRequest.DiscardUnknown(m)
}
var xxx_messageInfo_BatchGetRequest proto.InternalMessageInfo
func (m *BatchGetRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *BatchGetRequest) GetKeys() [][]byte {
if m != nil {
return m.Keys
}
return nil
}
func (m *BatchGetRequest) GetVersion() uint64 {
if m != nil {
return m.Version
}
return 0
}
type BatchGetResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Pairs []*KvPair `protobuf:"bytes,2,rep,name=pairs" json:"pairs,omitempty"`
// Time and scan details when processing the request.
ExecDetailsV2 *ExecDetailsV2 `protobuf:"bytes,4,opt,name=exec_details_v2,json=execDetailsV2" json:"exec_details_v2,omitempty"`
// This KeyError exists when some key is locked but we cannot check locks of all keys.
// In this case, `pairs` should be empty and the client should redo batch get all the keys
// after resolving the lock.
Error *KeyError `protobuf:"bytes,5,opt,name=error" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BatchGetResponse) Reset() { *m = BatchGetResponse{} }
func (m *BatchGetResponse) String() string { return proto.CompactTextString(m) }
func (*BatchGetResponse) ProtoMessage() {}
func (*BatchGetResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{23}
}
func (m *BatchGetResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BatchGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BatchGetResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *BatchGetResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_BatchGetResponse.Merge(dst, src)
}
func (m *BatchGetResponse) XXX_Size() int {
return m.Size()
}
func (m *BatchGetResponse) XXX_DiscardUnknown() {
xxx_messageInfo_BatchGetResponse.DiscardUnknown(m)
}
var xxx_messageInfo_BatchGetResponse proto.InternalMessageInfo
func (m *BatchGetResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *BatchGetResponse) GetPairs() []*KvPair {
if m != nil {
return m.Pairs
}
return nil
}
func (m *BatchGetResponse) GetExecDetailsV2() *ExecDetailsV2 {
if m != nil {
return m.ExecDetailsV2
}
return nil
}
func (m *BatchGetResponse) GetError() *KeyError {
if m != nil {
return m.Error
}
return nil
}
// Rollback a prewritten transaction. This will remove the preliminary data from the database,
// unlock locks, and leave a rollback tombstone.
type BatchRollbackRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
// Identify the transaction to be rolled back.
StartVersion uint64 `protobuf:"varint,2,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"`
// The keys to rollback.
Keys [][]byte `protobuf:"bytes,3,rep,name=keys" json:"keys,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BatchRollbackRequest) Reset() { *m = BatchRollbackRequest{} }
func (m *BatchRollbackRequest) String() string { return proto.CompactTextString(m) }
func (*BatchRollbackRequest) ProtoMessage() {}
func (*BatchRollbackRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{24}
}
func (m *BatchRollbackRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BatchRollbackRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BatchRollbackRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *BatchRollbackRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_BatchRollbackRequest.Merge(dst, src)
}
func (m *BatchRollbackRequest) XXX_Size() int {
return m.Size()
}
func (m *BatchRollbackRequest) XXX_DiscardUnknown() {
xxx_messageInfo_BatchRollbackRequest.DiscardUnknown(m)
}
var xxx_messageInfo_BatchRollbackRequest proto.InternalMessageInfo
func (m *BatchRollbackRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *BatchRollbackRequest) GetStartVersion() uint64 {
if m != nil {
return m.StartVersion
}
return 0
}
func (m *BatchRollbackRequest) GetKeys() [][]byte {
if m != nil {
return m.Keys
}
return nil
}
type BatchRollbackResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BatchRollbackResponse) Reset() { *m = BatchRollbackResponse{} }
func (m *BatchRollbackResponse) String() string { return proto.CompactTextString(m) }
func (*BatchRollbackResponse) ProtoMessage() {}
func (*BatchRollbackResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{25}
}
func (m *BatchRollbackResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BatchRollbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BatchRollbackResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *BatchRollbackResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_BatchRollbackResponse.Merge(dst, src)
}
func (m *BatchRollbackResponse) XXX_Size() int {
return m.Size()
}
func (m *BatchRollbackResponse) XXX_DiscardUnknown() {
xxx_messageInfo_BatchRollbackResponse.DiscardUnknown(m)
}
var xxx_messageInfo_BatchRollbackResponse proto.InternalMessageInfo
func (m *BatchRollbackResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *BatchRollbackResponse) GetError() *KeyError {
if m != nil {
return m.Error
}
return nil
}
// Scan the database for locks. Used at the start of the GC process to find all
// old locks.
type ScanLockRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
// Returns all locks with a start timestamp before `max_version`.
MaxVersion uint64 `protobuf:"varint,2,opt,name=max_version,json=maxVersion,proto3" json:"max_version,omitempty"`
// Start scanning from this key.
StartKey []byte `protobuf:"bytes,3,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"`
// The maximum number of locks to return.
Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"`
// The exclusive upperbound for scanning.
EndKey []byte `protobuf:"bytes,5,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScanLockRequest) Reset() { *m = ScanLockRequest{} }
func (m *ScanLockRequest) String() string { return proto.CompactTextString(m) }
func (*ScanLockRequest) ProtoMessage() {}
func (*ScanLockRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{26}
}
func (m *ScanLockRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScanLockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScanLockRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ScanLockRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScanLockRequest.Merge(dst, src)
}
func (m *ScanLockRequest) XXX_Size() int {
return m.Size()
}
func (m *ScanLockRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ScanLockRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ScanLockRequest proto.InternalMessageInfo
func (m *ScanLockRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *ScanLockRequest) GetMaxVersion() uint64 {
if m != nil {
return m.MaxVersion
}
return 0
}
func (m *ScanLockRequest) GetStartKey() []byte {
if m != nil {
return m.StartKey
}
return nil
}
func (m *ScanLockRequest) GetLimit() uint32 {
if m != nil {
return m.Limit
}
return 0
}
func (m *ScanLockRequest) GetEndKey() []byte {
if m != nil {
return m.EndKey
}
return nil
}
type ScanLockResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
// Info on all locks found by the scan.
Locks []*LockInfo `protobuf:"bytes,3,rep,name=locks" json:"locks,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScanLockResponse) Reset() { *m = ScanLockResponse{} }
func (m *ScanLockResponse) String() string { return proto.CompactTextString(m) }
func (*ScanLockResponse) ProtoMessage() {}
func (*ScanLockResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{27}
}
func (m *ScanLockResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScanLockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScanLockResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ScanLockResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScanLockResponse.Merge(dst, src)
}
func (m *ScanLockResponse) XXX_Size() int {
return m.Size()
}
func (m *ScanLockResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ScanLockResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ScanLockResponse proto.InternalMessageInfo
func (m *ScanLockResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *ScanLockResponse) GetError() *KeyError {
if m != nil {
return m.Error
}
return nil
}
func (m *ScanLockResponse) GetLocks() []*LockInfo {
if m != nil {
return m.Locks
}
return nil
}
// For all keys locked by the transaction identified by `start_version`, either
// commit or rollback the transaction and unlock the key.
type ResolveLockRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
StartVersion uint64 `protobuf:"varint,2,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"`
// `commit_version == 0` means the transaction was rolled back.
// `commit_version > 0` means the transaction was committed at the given timestamp.
CommitVersion uint64 `protobuf:"varint,3,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"`
TxnInfos []*TxnInfo `protobuf:"bytes,4,rep,name=txn_infos,json=txnInfos" json:"txn_infos,omitempty"`
// Only resolve specified keys.
Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ResolveLockRequest) Reset() { *m = ResolveLockRequest{} }
func (m *ResolveLockRequest) String() string { return proto.CompactTextString(m) }
func (*ResolveLockRequest) ProtoMessage() {}
func (*ResolveLockRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{28}
}
func (m *ResolveLockRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ResolveLockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ResolveLockRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ResolveLockRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResolveLockRequest.Merge(dst, src)
}
func (m *ResolveLockRequest) XXX_Size() int {
return m.Size()
}
func (m *ResolveLockRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ResolveLockRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ResolveLockRequest proto.InternalMessageInfo
func (m *ResolveLockRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *ResolveLockRequest) GetStartVersion() uint64 {
if m != nil {
return m.StartVersion
}
return 0
}
func (m *ResolveLockRequest) GetCommitVersion() uint64 {
if m != nil {
return m.CommitVersion
}
return 0
}
func (m *ResolveLockRequest) GetTxnInfos() []*TxnInfo {
if m != nil {
return m.TxnInfos
}
return nil
}
func (m *ResolveLockRequest) GetKeys() [][]byte {
if m != nil {
return m.Keys
}
return nil
}
type ResolveLockResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ResolveLockResponse) Reset() { *m = ResolveLockResponse{} }
func (m *ResolveLockResponse) String() string { return proto.CompactTextString(m) }
func (*ResolveLockResponse) ProtoMessage() {}
func (*ResolveLockResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{29}
}
func (m *ResolveLockResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ResolveLockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ResolveLockResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ResolveLockResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResolveLockResponse.Merge(dst, src)
}
func (m *ResolveLockResponse) XXX_Size() int {
return m.Size()
}
func (m *ResolveLockResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ResolveLockResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ResolveLockResponse proto.InternalMessageInfo
func (m *ResolveLockResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *ResolveLockResponse) GetError() *KeyError {
if m != nil {
return m.Error
}
return nil
}
// Request TiKV to garbage collect all non-current data older than `safe_point`.
type GCRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
SafePoint uint64 `protobuf:"varint,2,opt,name=safe_point,json=safePoint,proto3" json:"safe_point,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GCRequest) Reset() { *m = GCRequest{} }
func (m *GCRequest) String() string { return proto.CompactTextString(m) }
func (*GCRequest) ProtoMessage() {}
func (*GCRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{30}
}
func (m *GCRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GCRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GCRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GCRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GCRequest.Merge(dst, src)
}
func (m *GCRequest) XXX_Size() int {
return m.Size()
}
func (m *GCRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GCRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GCRequest proto.InternalMessageInfo
func (m *GCRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *GCRequest) GetSafePoint() uint64 {
if m != nil {
return m.SafePoint
}
return 0
}
type GCResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GCResponse) Reset() { *m = GCResponse{} }
func (m *GCResponse) String() string { return proto.CompactTextString(m) }
func (*GCResponse) ProtoMessage() {}
func (*GCResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{31}
}
func (m *GCResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GCResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GCResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GCResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GCResponse.Merge(dst, src)
}
func (m *GCResponse) XXX_Size() int {
return m.Size()
}
func (m *GCResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GCResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GCResponse proto.InternalMessageInfo
func (m *GCResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *GCResponse) GetError() *KeyError {
if m != nil {
return m.Error
}
return nil
}
// Delete a range of data from TiKV.
// This message should not be used.
type DeleteRangeRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"`
EndKey []byte `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"`
// If true, the data will not be immediately deleted, but the operation will
// still be replicated via Raft. This is used to notify TiKV that the data
// will be deleted using `unsafe_destroy_range` soon.
NotifyOnly bool `protobuf:"varint,4,opt,name=notify_only,json=notifyOnly,proto3" json:"notify_only,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} }
func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteRangeRequest) ProtoMessage() {}
func (*DeleteRangeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{32}
}
func (m *DeleteRangeRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_DeleteRangeRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *DeleteRangeRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeleteRangeRequest.Merge(dst, src)
}
func (m *DeleteRangeRequest) XXX_Size() int {
return m.Size()
}
func (m *DeleteRangeRequest) XXX_DiscardUnknown() {
xxx_messageInfo_DeleteRangeRequest.DiscardUnknown(m)
}
var xxx_messageInfo_DeleteRangeRequest proto.InternalMessageInfo
func (m *DeleteRangeRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *DeleteRangeRequest) GetStartKey() []byte {
if m != nil {
return m.StartKey
}
return nil
}
func (m *DeleteRangeRequest) GetEndKey() []byte {
if m != nil {
return m.EndKey
}
return nil
}
func (m *DeleteRangeRequest) GetNotifyOnly() bool {
if m != nil {
return m.NotifyOnly
}
return false
}
type DeleteRangeResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} }
func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) }
func (*DeleteRangeResponse) ProtoMessage() {}
func (*DeleteRangeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{33}
}
func (m *DeleteRangeResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *DeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_DeleteRangeResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *DeleteRangeResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeleteRangeResponse.Merge(dst, src)
}
func (m *DeleteRangeResponse) XXX_Size() int {
return m.Size()
}
func (m *DeleteRangeResponse) XXX_DiscardUnknown() {
xxx_messageInfo_DeleteRangeResponse.DiscardUnknown(m)
}
var xxx_messageInfo_DeleteRangeResponse proto.InternalMessageInfo
func (m *DeleteRangeResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *DeleteRangeResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
type RawGetRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Cf string `protobuf:"bytes,3,opt,name=cf,proto3" json:"cf,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawGetRequest) Reset() { *m = RawGetRequest{} }
func (m *RawGetRequest) String() string { return proto.CompactTextString(m) }
func (*RawGetRequest) ProtoMessage() {}
func (*RawGetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{34}
}
func (m *RawGetRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawGetRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawGetRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawGetRequest.Merge(dst, src)
}
func (m *RawGetRequest) XXX_Size() int {
return m.Size()
}
func (m *RawGetRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RawGetRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RawGetRequest proto.InternalMessageInfo
func (m *RawGetRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *RawGetRequest) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *RawGetRequest) GetCf() string {
if m != nil {
return m.Cf
}
return ""
}
type RawGetResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
NotFound bool `protobuf:"varint,4,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawGetResponse) Reset() { *m = RawGetResponse{} }
func (m *RawGetResponse) String() string { return proto.CompactTextString(m) }
func (*RawGetResponse) ProtoMessage() {}
func (*RawGetResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{35}
}
func (m *RawGetResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawGetResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawGetResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawGetResponse.Merge(dst, src)
}
func (m *RawGetResponse) XXX_Size() int {
return m.Size()
}
func (m *RawGetResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RawGetResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RawGetResponse proto.InternalMessageInfo
func (m *RawGetResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *RawGetResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *RawGetResponse) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func (m *RawGetResponse) GetNotFound() bool {
if m != nil {
return m.NotFound
}
return false
}
type RawBatchGetRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Keys [][]byte `protobuf:"bytes,2,rep,name=keys" json:"keys,omitempty"`
Cf string `protobuf:"bytes,3,opt,name=cf,proto3" json:"cf,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawBatchGetRequest) Reset() { *m = RawBatchGetRequest{} }
func (m *RawBatchGetRequest) String() string { return proto.CompactTextString(m) }
func (*RawBatchGetRequest) ProtoMessage() {}
func (*RawBatchGetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{36}
}
func (m *RawBatchGetRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawBatchGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawBatchGetRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawBatchGetRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawBatchGetRequest.Merge(dst, src)
}
func (m *RawBatchGetRequest) XXX_Size() int {
return m.Size()
}
func (m *RawBatchGetRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RawBatchGetRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RawBatchGetRequest proto.InternalMessageInfo
func (m *RawBatchGetRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *RawBatchGetRequest) GetKeys() [][]byte {
if m != nil {
return m.Keys
}
return nil
}
func (m *RawBatchGetRequest) GetCf() string {
if m != nil {
return m.Cf
}
return ""
}
type RawBatchGetResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Pairs []*KvPair `protobuf:"bytes,2,rep,name=pairs" json:"pairs,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawBatchGetResponse) Reset() { *m = RawBatchGetResponse{} }
func (m *RawBatchGetResponse) String() string { return proto.CompactTextString(m) }
func (*RawBatchGetResponse) ProtoMessage() {}
func (*RawBatchGetResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{37}
}
func (m *RawBatchGetResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawBatchGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawBatchGetResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawBatchGetResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawBatchGetResponse.Merge(dst, src)
}
func (m *RawBatchGetResponse) XXX_Size() int {
return m.Size()
}
func (m *RawBatchGetResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RawBatchGetResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RawBatchGetResponse proto.InternalMessageInfo
func (m *RawBatchGetResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *RawBatchGetResponse) GetPairs() []*KvPair {
if m != nil {
return m.Pairs
}
return nil
}
type RawPutRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
Cf string `protobuf:"bytes,4,opt,name=cf,proto3" json:"cf,omitempty"`
Ttl uint64 `protobuf:"varint,5,opt,name=ttl,proto3" json:"ttl,omitempty"`
ForCas bool `protobuf:"varint,6,opt,name=for_cas,json=forCas,proto3" json:"for_cas,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawPutRequest) Reset() { *m = RawPutRequest{} }
func (m *RawPutRequest) String() string { return proto.CompactTextString(m) }
func (*RawPutRequest) ProtoMessage() {}
func (*RawPutRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{38}
}
func (m *RawPutRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawPutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawPutRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawPutRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawPutRequest.Merge(dst, src)
}
func (m *RawPutRequest) XXX_Size() int {
return m.Size()
}
func (m *RawPutRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RawPutRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RawPutRequest proto.InternalMessageInfo
func (m *RawPutRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *RawPutRequest) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *RawPutRequest) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func (m *RawPutRequest) GetCf() string {
if m != nil {
return m.Cf
}
return ""
}
func (m *RawPutRequest) GetTtl() uint64 {
if m != nil {
return m.Ttl
}
return 0
}
func (m *RawPutRequest) GetForCas() bool {
if m != nil {
return m.ForCas
}
return false
}
type RawPutResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawPutResponse) Reset() { *m = RawPutResponse{} }
func (m *RawPutResponse) String() string { return proto.CompactTextString(m) }
func (*RawPutResponse) ProtoMessage() {}
func (*RawPutResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{39}
}
func (m *RawPutResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawPutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawPutResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawPutResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawPutResponse.Merge(dst, src)
}
func (m *RawPutResponse) XXX_Size() int {
return m.Size()
}
func (m *RawPutResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RawPutResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RawPutResponse proto.InternalMessageInfo
func (m *RawPutResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *RawPutResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
type RawBatchPutRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Pairs []*KvPair `protobuf:"bytes,2,rep,name=pairs" json:"pairs,omitempty"`
Cf string `protobuf:"bytes,3,opt,name=cf,proto3" json:"cf,omitempty"`
Ttl uint64 `protobuf:"varint,4,opt,name=ttl,proto3" json:"ttl,omitempty"`
ForCas bool `protobuf:"varint,5,opt,name=for_cas,json=forCas,proto3" json:"for_cas,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawBatchPutRequest) Reset() { *m = RawBatchPutRequest{} }
func (m *RawBatchPutRequest) String() string { return proto.CompactTextString(m) }
func (*RawBatchPutRequest) ProtoMessage() {}
func (*RawBatchPutRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{40}
}
func (m *RawBatchPutRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawBatchPutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawBatchPutRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawBatchPutRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawBatchPutRequest.Merge(dst, src)
}
func (m *RawBatchPutRequest) XXX_Size() int {
return m.Size()
}
func (m *RawBatchPutRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RawBatchPutRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RawBatchPutRequest proto.InternalMessageInfo
func (m *RawBatchPutRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *RawBatchPutRequest) GetPairs() []*KvPair {
if m != nil {
return m.Pairs
}
return nil
}
func (m *RawBatchPutRequest) GetCf() string {
if m != nil {
return m.Cf
}
return ""
}
func (m *RawBatchPutRequest) GetTtl() uint64 {
if m != nil {
return m.Ttl
}
return 0
}
func (m *RawBatchPutRequest) GetForCas() bool {
if m != nil {
return m.ForCas
}
return false
}
type RawBatchPutResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawBatchPutResponse) Reset() { *m = RawBatchPutResponse{} }
func (m *RawBatchPutResponse) String() string { return proto.CompactTextString(m) }
func (*RawBatchPutResponse) ProtoMessage() {}
func (*RawBatchPutResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{41}
}
func (m *RawBatchPutResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawBatchPutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawBatchPutResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawBatchPutResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawBatchPutResponse.Merge(dst, src)
}
func (m *RawBatchPutResponse) XXX_Size() int {
return m.Size()
}
func (m *RawBatchPutResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RawBatchPutResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RawBatchPutResponse proto.InternalMessageInfo
func (m *RawBatchPutResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *RawBatchPutResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
type RawDeleteRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Cf string `protobuf:"bytes,3,opt,name=cf,proto3" json:"cf,omitempty"`
ForCas bool `protobuf:"varint,4,opt,name=for_cas,json=forCas,proto3" json:"for_cas,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawDeleteRequest) Reset() { *m = RawDeleteRequest{} }
func (m *RawDeleteRequest) String() string { return proto.CompactTextString(m) }
func (*RawDeleteRequest) ProtoMessage() {}
func (*RawDeleteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{42}
}
func (m *RawDeleteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawDeleteRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawDeleteRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawDeleteRequest.Merge(dst, src)
}
func (m *RawDeleteRequest) XXX_Size() int {
return m.Size()
}
func (m *RawDeleteRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RawDeleteRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RawDeleteRequest proto.InternalMessageInfo
func (m *RawDeleteRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *RawDeleteRequest) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *RawDeleteRequest) GetCf() string {
if m != nil {
return m.Cf
}
return ""
}
func (m *RawDeleteRequest) GetForCas() bool {
if m != nil {
return m.ForCas
}
return false
}
type RawDeleteResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawDeleteResponse) Reset() { *m = RawDeleteResponse{} }
func (m *RawDeleteResponse) String() string { return proto.CompactTextString(m) }
func (*RawDeleteResponse) ProtoMessage() {}
func (*RawDeleteResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{43}
}
func (m *RawDeleteResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawDeleteResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawDeleteResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawDeleteResponse.Merge(dst, src)
}
func (m *RawDeleteResponse) XXX_Size() int {
return m.Size()
}
func (m *RawDeleteResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RawDeleteResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RawDeleteResponse proto.InternalMessageInfo
func (m *RawDeleteResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *RawDeleteResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
type RawBatchDeleteRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Keys [][]byte `protobuf:"bytes,2,rep,name=keys" json:"keys,omitempty"`
Cf string `protobuf:"bytes,3,opt,name=cf,proto3" json:"cf,omitempty"`
ForCas bool `protobuf:"varint,4,opt,name=for_cas,json=forCas,proto3" json:"for_cas,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawBatchDeleteRequest) Reset() { *m = RawBatchDeleteRequest{} }
func (m *RawBatchDeleteRequest) String() string { return proto.CompactTextString(m) }
func (*RawBatchDeleteRequest) ProtoMessage() {}
func (*RawBatchDeleteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{44}
}
func (m *RawBatchDeleteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawBatchDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawBatchDeleteRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawBatchDeleteRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawBatchDeleteRequest.Merge(dst, src)
}
func (m *RawBatchDeleteRequest) XXX_Size() int {
return m.Size()
}
func (m *RawBatchDeleteRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RawBatchDeleteRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RawBatchDeleteRequest proto.InternalMessageInfo
func (m *RawBatchDeleteRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *RawBatchDeleteRequest) GetKeys() [][]byte {
if m != nil {
return m.Keys
}
return nil
}
func (m *RawBatchDeleteRequest) GetCf() string {
if m != nil {
return m.Cf
}
return ""
}
func (m *RawBatchDeleteRequest) GetForCas() bool {
if m != nil {
return m.ForCas
}
return false
}
type RawBatchDeleteResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawBatchDeleteResponse) Reset() { *m = RawBatchDeleteResponse{} }
func (m *RawBatchDeleteResponse) String() string { return proto.CompactTextString(m) }
func (*RawBatchDeleteResponse) ProtoMessage() {}
func (*RawBatchDeleteResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{45}
}
func (m *RawBatchDeleteResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawBatchDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawBatchDeleteResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawBatchDeleteResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawBatchDeleteResponse.Merge(dst, src)
}
func (m *RawBatchDeleteResponse) XXX_Size() int {
return m.Size()
}
func (m *RawBatchDeleteResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RawBatchDeleteResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RawBatchDeleteResponse proto.InternalMessageInfo
func (m *RawBatchDeleteResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *RawBatchDeleteResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
type RawScanRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"`
Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
KeyOnly bool `protobuf:"varint,4,opt,name=key_only,json=keyOnly,proto3" json:"key_only,omitempty"`
Cf string `protobuf:"bytes,5,opt,name=cf,proto3" json:"cf,omitempty"`
Reverse bool `protobuf:"varint,6,opt,name=reverse,proto3" json:"reverse,omitempty"`
// For compatibility, when scanning forward, the range to scan is [start_key, end_key), where start_key < end_key;
// and when scanning backward, it scans [end_key, start_key) in descending order, where end_key < start_key.
EndKey []byte `protobuf:"bytes,7,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawScanRequest) Reset() { *m = RawScanRequest{} }
func (m *RawScanRequest) String() string { return proto.CompactTextString(m) }
func (*RawScanRequest) ProtoMessage() {}
func (*RawScanRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{46}
}
func (m *RawScanRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawScanRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawScanRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawScanRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawScanRequest.Merge(dst, src)
}
func (m *RawScanRequest) XXX_Size() int {
return m.Size()
}
func (m *RawScanRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RawScanRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RawScanRequest proto.InternalMessageInfo
func (m *RawScanRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *RawScanRequest) GetStartKey() []byte {
if m != nil {
return m.StartKey
}
return nil
}
func (m *RawScanRequest) GetLimit() uint32 {
if m != nil {
return m.Limit
}
return 0
}
func (m *RawScanRequest) GetKeyOnly() bool {
if m != nil {
return m.KeyOnly
}
return false
}
func (m *RawScanRequest) GetCf() string {
if m != nil {
return m.Cf
}
return ""
}
func (m *RawScanRequest) GetReverse() bool {
if m != nil {
return m.Reverse
}
return false
}
func (m *RawScanRequest) GetEndKey() []byte {
if m != nil {
return m.EndKey
}
return nil
}
type RawScanResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Kvs []*KvPair `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawScanResponse) Reset() { *m = RawScanResponse{} }
func (m *RawScanResponse) String() string { return proto.CompactTextString(m) }
func (*RawScanResponse) ProtoMessage() {}
func (*RawScanResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{47}
}
func (m *RawScanResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawScanResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawScanResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawScanResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawScanResponse.Merge(dst, src)
}
func (m *RawScanResponse) XXX_Size() int {
return m.Size()
}
func (m *RawScanResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RawScanResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RawScanResponse proto.InternalMessageInfo
func (m *RawScanResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *RawScanResponse) GetKvs() []*KvPair {
if m != nil {
return m.Kvs
}
return nil
}
type RawDeleteRangeRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"`
EndKey []byte `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"`
Cf string `protobuf:"bytes,4,opt,name=cf,proto3" json:"cf,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawDeleteRangeRequest) Reset() { *m = RawDeleteRangeRequest{} }
func (m *RawDeleteRangeRequest) String() string { return proto.CompactTextString(m) }
func (*RawDeleteRangeRequest) ProtoMessage() {}
func (*RawDeleteRangeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{48}
}
func (m *RawDeleteRangeRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawDeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawDeleteRangeRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawDeleteRangeRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawDeleteRangeRequest.Merge(dst, src)
}
func (m *RawDeleteRangeRequest) XXX_Size() int {
return m.Size()
}
func (m *RawDeleteRangeRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RawDeleteRangeRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RawDeleteRangeRequest proto.InternalMessageInfo
func (m *RawDeleteRangeRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *RawDeleteRangeRequest) GetStartKey() []byte {
if m != nil {
return m.StartKey
}
return nil
}
func (m *RawDeleteRangeRequest) GetEndKey() []byte {
if m != nil {
return m.EndKey
}
return nil
}
func (m *RawDeleteRangeRequest) GetCf() string {
if m != nil {
return m.Cf
}
return ""
}
type RawDeleteRangeResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawDeleteRangeResponse) Reset() { *m = RawDeleteRangeResponse{} }
func (m *RawDeleteRangeResponse) String() string { return proto.CompactTextString(m) }
func (*RawDeleteRangeResponse) ProtoMessage() {}
func (*RawDeleteRangeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{49}
}
func (m *RawDeleteRangeResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawDeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawDeleteRangeResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawDeleteRangeResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawDeleteRangeResponse.Merge(dst, src)
}
func (m *RawDeleteRangeResponse) XXX_Size() int {
return m.Size()
}
func (m *RawDeleteRangeResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RawDeleteRangeResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RawDeleteRangeResponse proto.InternalMessageInfo
func (m *RawDeleteRangeResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *RawDeleteRangeResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
type RawBatchScanRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Ranges []*KeyRange `protobuf:"bytes,2,rep,name=ranges" json:"ranges,omitempty"`
EachLimit uint32 `protobuf:"varint,3,opt,name=each_limit,json=eachLimit,proto3" json:"each_limit,omitempty"`
KeyOnly bool `protobuf:"varint,4,opt,name=key_only,json=keyOnly,proto3" json:"key_only,omitempty"`
Cf string `protobuf:"bytes,5,opt,name=cf,proto3" json:"cf,omitempty"`
Reverse bool `protobuf:"varint,6,opt,name=reverse,proto3" json:"reverse,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawBatchScanRequest) Reset() { *m = RawBatchScanRequest{} }
func (m *RawBatchScanRequest) String() string { return proto.CompactTextString(m) }
func (*RawBatchScanRequest) ProtoMessage() {}
func (*RawBatchScanRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{50}
}
func (m *RawBatchScanRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawBatchScanRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawBatchScanRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawBatchScanRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawBatchScanRequest.Merge(dst, src)
}
func (m *RawBatchScanRequest) XXX_Size() int {
return m.Size()
}
func (m *RawBatchScanRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RawBatchScanRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RawBatchScanRequest proto.InternalMessageInfo
func (m *RawBatchScanRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *RawBatchScanRequest) GetRanges() []*KeyRange {
if m != nil {
return m.Ranges
}
return nil
}
func (m *RawBatchScanRequest) GetEachLimit() uint32 {
if m != nil {
return m.EachLimit
}
return 0
}
func (m *RawBatchScanRequest) GetKeyOnly() bool {
if m != nil {
return m.KeyOnly
}
return false
}
func (m *RawBatchScanRequest) GetCf() string {
if m != nil {
return m.Cf
}
return ""
}
func (m *RawBatchScanRequest) GetReverse() bool {
if m != nil {
return m.Reverse
}
return false
}
type RawBatchScanResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Kvs []*KvPair `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawBatchScanResponse) Reset() { *m = RawBatchScanResponse{} }
func (m *RawBatchScanResponse) String() string { return proto.CompactTextString(m) }
func (*RawBatchScanResponse) ProtoMessage() {}
func (*RawBatchScanResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{51}
}
func (m *RawBatchScanResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawBatchScanResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawBatchScanResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawBatchScanResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawBatchScanResponse.Merge(dst, src)
}
func (m *RawBatchScanResponse) XXX_Size() int {
return m.Size()
}
func (m *RawBatchScanResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RawBatchScanResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RawBatchScanResponse proto.InternalMessageInfo
func (m *RawBatchScanResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *RawBatchScanResponse) GetKvs() []*KvPair {
if m != nil {
return m.Kvs
}
return nil
}
type UnsafeDestroyRangeRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"`
EndKey []byte `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UnsafeDestroyRangeRequest) Reset() { *m = UnsafeDestroyRangeRequest{} }
func (m *UnsafeDestroyRangeRequest) String() string { return proto.CompactTextString(m) }
func (*UnsafeDestroyRangeRequest) ProtoMessage() {}
func (*UnsafeDestroyRangeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{52}
}
func (m *UnsafeDestroyRangeRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *UnsafeDestroyRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_UnsafeDestroyRangeRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *UnsafeDestroyRangeRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_UnsafeDestroyRangeRequest.Merge(dst, src)
}
func (m *UnsafeDestroyRangeRequest) XXX_Size() int {
return m.Size()
}
func (m *UnsafeDestroyRangeRequest) XXX_DiscardUnknown() {
xxx_messageInfo_UnsafeDestroyRangeRequest.DiscardUnknown(m)
}
var xxx_messageInfo_UnsafeDestroyRangeRequest proto.InternalMessageInfo
func (m *UnsafeDestroyRangeRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *UnsafeDestroyRangeRequest) GetStartKey() []byte {
if m != nil {
return m.StartKey
}
return nil
}
func (m *UnsafeDestroyRangeRequest) GetEndKey() []byte {
if m != nil {
return m.EndKey
}
return nil
}
type UnsafeDestroyRangeResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UnsafeDestroyRangeResponse) Reset() { *m = UnsafeDestroyRangeResponse{} }
func (m *UnsafeDestroyRangeResponse) String() string { return proto.CompactTextString(m) }
func (*UnsafeDestroyRangeResponse) ProtoMessage() {}
func (*UnsafeDestroyRangeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{53}
}
func (m *UnsafeDestroyRangeResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *UnsafeDestroyRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_UnsafeDestroyRangeResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *UnsafeDestroyRangeResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_UnsafeDestroyRangeResponse.Merge(dst, src)
}
func (m *UnsafeDestroyRangeResponse) XXX_Size() int {
return m.Size()
}
func (m *UnsafeDestroyRangeResponse) XXX_DiscardUnknown() {
xxx_messageInfo_UnsafeDestroyRangeResponse.DiscardUnknown(m)
}
var xxx_messageInfo_UnsafeDestroyRangeResponse proto.InternalMessageInfo
func (m *UnsafeDestroyRangeResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *UnsafeDestroyRangeResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
type RegisterLockObserverRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
MaxTs uint64 `protobuf:"varint,2,opt,name=max_ts,json=maxTs,proto3" json:"max_ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RegisterLockObserverRequest) Reset() { *m = RegisterLockObserverRequest{} }
func (m *RegisterLockObserverRequest) String() string { return proto.CompactTextString(m) }
func (*RegisterLockObserverRequest) ProtoMessage() {}
func (*RegisterLockObserverRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{54}
}
func (m *RegisterLockObserverRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RegisterLockObserverRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RegisterLockObserverRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RegisterLockObserverRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RegisterLockObserverRequest.Merge(dst, src)
}
func (m *RegisterLockObserverRequest) XXX_Size() int {
return m.Size()
}
func (m *RegisterLockObserverRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RegisterLockObserverRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RegisterLockObserverRequest proto.InternalMessageInfo
func (m *RegisterLockObserverRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *RegisterLockObserverRequest) GetMaxTs() uint64 {
if m != nil {
return m.MaxTs
}
return 0
}
type RegisterLockObserverResponse struct {
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RegisterLockObserverResponse) Reset() { *m = RegisterLockObserverResponse{} }
func (m *RegisterLockObserverResponse) String() string { return proto.CompactTextString(m) }
func (*RegisterLockObserverResponse) ProtoMessage() {}
func (*RegisterLockObserverResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{55}
}
func (m *RegisterLockObserverResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RegisterLockObserverResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RegisterLockObserverResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RegisterLockObserverResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RegisterLockObserverResponse.Merge(dst, src)
}
func (m *RegisterLockObserverResponse) XXX_Size() int {
return m.Size()
}
func (m *RegisterLockObserverResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RegisterLockObserverResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RegisterLockObserverResponse proto.InternalMessageInfo
func (m *RegisterLockObserverResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
type CheckLockObserverRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
MaxTs uint64 `protobuf:"varint,2,opt,name=max_ts,json=maxTs,proto3" json:"max_ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CheckLockObserverRequest) Reset() { *m = CheckLockObserverRequest{} }
func (m *CheckLockObserverRequest) String() string { return proto.CompactTextString(m) }
func (*CheckLockObserverRequest) ProtoMessage() {}
func (*CheckLockObserverRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{56}
}
func (m *CheckLockObserverRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CheckLockObserverRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CheckLockObserverRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CheckLockObserverRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CheckLockObserverRequest.Merge(dst, src)
}
func (m *CheckLockObserverRequest) XXX_Size() int {
return m.Size()
}
func (m *CheckLockObserverRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CheckLockObserverRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CheckLockObserverRequest proto.InternalMessageInfo
func (m *CheckLockObserverRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *CheckLockObserverRequest) GetMaxTs() uint64 {
if m != nil {
return m.MaxTs
}
return 0
}
type CheckLockObserverResponse struct {
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
IsClean bool `protobuf:"varint,2,opt,name=is_clean,json=isClean,proto3" json:"is_clean,omitempty"`
Locks []*LockInfo `protobuf:"bytes,3,rep,name=locks" json:"locks,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CheckLockObserverResponse) Reset() { *m = CheckLockObserverResponse{} }
func (m *CheckLockObserverResponse) String() string { return proto.CompactTextString(m) }
func (*CheckLockObserverResponse) ProtoMessage() {}
func (*CheckLockObserverResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{57}
}
func (m *CheckLockObserverResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CheckLockObserverResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CheckLockObserverResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CheckLockObserverResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CheckLockObserverResponse.Merge(dst, src)
}
func (m *CheckLockObserverResponse) XXX_Size() int {
return m.Size()
}
func (m *CheckLockObserverResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CheckLockObserverResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CheckLockObserverResponse proto.InternalMessageInfo
func (m *CheckLockObserverResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *CheckLockObserverResponse) GetIsClean() bool {
if m != nil {
return m.IsClean
}
return false
}
func (m *CheckLockObserverResponse) GetLocks() []*LockInfo {
if m != nil {
return m.Locks
}
return nil
}
type RemoveLockObserverRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
MaxTs uint64 `protobuf:"varint,2,opt,name=max_ts,json=maxTs,proto3" json:"max_ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RemoveLockObserverRequest) Reset() { *m = RemoveLockObserverRequest{} }
func (m *RemoveLockObserverRequest) String() string { return proto.CompactTextString(m) }
func (*RemoveLockObserverRequest) ProtoMessage() {}
func (*RemoveLockObserverRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{58}
}
func (m *RemoveLockObserverRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RemoveLockObserverRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RemoveLockObserverRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RemoveLockObserverRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RemoveLockObserverRequest.Merge(dst, src)
}
func (m *RemoveLockObserverRequest) XXX_Size() int {
return m.Size()
}
func (m *RemoveLockObserverRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RemoveLockObserverRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RemoveLockObserverRequest proto.InternalMessageInfo
func (m *RemoveLockObserverRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *RemoveLockObserverRequest) GetMaxTs() uint64 {
if m != nil {
return m.MaxTs
}
return 0
}
type RemoveLockObserverResponse struct {
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RemoveLockObserverResponse) Reset() { *m = RemoveLockObserverResponse{} }
func (m *RemoveLockObserverResponse) String() string { return proto.CompactTextString(m) }
func (*RemoveLockObserverResponse) ProtoMessage() {}
func (*RemoveLockObserverResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{59}
}
func (m *RemoveLockObserverResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RemoveLockObserverResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RemoveLockObserverResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RemoveLockObserverResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RemoveLockObserverResponse.Merge(dst, src)
}
func (m *RemoveLockObserverResponse) XXX_Size() int {
return m.Size()
}
func (m *RemoveLockObserverResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RemoveLockObserverResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RemoveLockObserverResponse proto.InternalMessageInfo
func (m *RemoveLockObserverResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
type PhysicalScanLockRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
MaxTs uint64 `protobuf:"varint,2,opt,name=max_ts,json=maxTs,proto3" json:"max_ts,omitempty"`
StartKey []byte `protobuf:"bytes,3,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"`
Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PhysicalScanLockRequest) Reset() { *m = PhysicalScanLockRequest{} }
func (m *PhysicalScanLockRequest) String() string { return proto.CompactTextString(m) }
func (*PhysicalScanLockRequest) ProtoMessage() {}
func (*PhysicalScanLockRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{60}
}
func (m *PhysicalScanLockRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PhysicalScanLockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PhysicalScanLockRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *PhysicalScanLockRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_PhysicalScanLockRequest.Merge(dst, src)
}
func (m *PhysicalScanLockRequest) XXX_Size() int {
return m.Size()
}
func (m *PhysicalScanLockRequest) XXX_DiscardUnknown() {
xxx_messageInfo_PhysicalScanLockRequest.DiscardUnknown(m)
}
var xxx_messageInfo_PhysicalScanLockRequest proto.InternalMessageInfo
func (m *PhysicalScanLockRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *PhysicalScanLockRequest) GetMaxTs() uint64 {
if m != nil {
return m.MaxTs
}
return 0
}
func (m *PhysicalScanLockRequest) GetStartKey() []byte {
if m != nil {
return m.StartKey
}
return nil
}
func (m *PhysicalScanLockRequest) GetLimit() uint32 {
if m != nil {
return m.Limit
}
return 0
}
type PhysicalScanLockResponse struct {
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
Locks []*LockInfo `protobuf:"bytes,2,rep,name=locks" json:"locks,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PhysicalScanLockResponse) Reset() { *m = PhysicalScanLockResponse{} }
func (m *PhysicalScanLockResponse) String() string { return proto.CompactTextString(m) }
func (*PhysicalScanLockResponse) ProtoMessage() {}
func (*PhysicalScanLockResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{61}
}
func (m *PhysicalScanLockResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PhysicalScanLockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PhysicalScanLockResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *PhysicalScanLockResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_PhysicalScanLockResponse.Merge(dst, src)
}
func (m *PhysicalScanLockResponse) XXX_Size() int {
return m.Size()
}
func (m *PhysicalScanLockResponse) XXX_DiscardUnknown() {
xxx_messageInfo_PhysicalScanLockResponse.DiscardUnknown(m)
}
var xxx_messageInfo_PhysicalScanLockResponse proto.InternalMessageInfo
func (m *PhysicalScanLockResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *PhysicalScanLockResponse) GetLocks() []*LockInfo {
if m != nil {
return m.Locks
}
return nil
}
// Sent from PD to a TiKV node.
type SplitRegionRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
SplitKey []byte `protobuf:"bytes,2,opt,name=split_key,json=splitKey,proto3" json:"split_key,omitempty"` // Deprecated: Do not use.
SplitKeys [][]byte `protobuf:"bytes,3,rep,name=split_keys,json=splitKeys" json:"split_keys,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SplitRegionRequest) Reset() { *m = SplitRegionRequest{} }
func (m *SplitRegionRequest) String() string { return proto.CompactTextString(m) }
func (*SplitRegionRequest) ProtoMessage() {}
func (*SplitRegionRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{62}
}
func (m *SplitRegionRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SplitRegionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SplitRegionRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SplitRegionRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SplitRegionRequest.Merge(dst, src)
}
func (m *SplitRegionRequest) XXX_Size() int {
return m.Size()
}
func (m *SplitRegionRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SplitRegionRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SplitRegionRequest proto.InternalMessageInfo
func (m *SplitRegionRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
// Deprecated: Do not use.
func (m *SplitRegionRequest) GetSplitKey() []byte {
if m != nil {
return m.SplitKey
}
return nil
}
func (m *SplitRegionRequest) GetSplitKeys() [][]byte {
if m != nil {
return m.SplitKeys
}
return nil
}
type SplitRegionResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Left *metapb.Region `protobuf:"bytes,2,opt,name=left" json:"left,omitempty"` // Deprecated: Do not use.
Right *metapb.Region `protobuf:"bytes,3,opt,name=right" json:"right,omitempty"` // Deprecated: Do not use.
Regions []*metapb.Region `protobuf:"bytes,4,rep,name=regions" json:"regions,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SplitRegionResponse) Reset() { *m = SplitRegionResponse{} }
func (m *SplitRegionResponse) String() string { return proto.CompactTextString(m) }
func (*SplitRegionResponse) ProtoMessage() {}
func (*SplitRegionResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{63}
}
func (m *SplitRegionResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SplitRegionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SplitRegionResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SplitRegionResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SplitRegionResponse.Merge(dst, src)
}
func (m *SplitRegionResponse) XXX_Size() int {
return m.Size()
}
func (m *SplitRegionResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SplitRegionResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SplitRegionResponse proto.InternalMessageInfo
func (m *SplitRegionResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
// Deprecated: Do not use.
func (m *SplitRegionResponse) GetLeft() *metapb.Region {
if m != nil {
return m.Left
}
return nil
}
// Deprecated: Do not use.
func (m *SplitRegionResponse) GetRight() *metapb.Region {
if m != nil {
return m.Right
}
return nil
}
func (m *SplitRegionResponse) GetRegions() []*metapb.Region {
if m != nil {
return m.Regions
}
return nil
}
// Sent from TiFlash to a TiKV node.
type ReadIndexRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
// TiKV checks the given range if there is any unapplied lock
// blocking the read request.
StartTs uint64 `protobuf:"varint,2,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"`
Ranges []*KeyRange `protobuf:"bytes,3,rep,name=ranges" json:"ranges,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReadIndexRequest) Reset() { *m = ReadIndexRequest{} }
func (m *ReadIndexRequest) String() string { return proto.CompactTextString(m) }
func (*ReadIndexRequest) ProtoMessage() {}
func (*ReadIndexRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{64}
}
func (m *ReadIndexRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReadIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReadIndexRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ReadIndexRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReadIndexRequest.Merge(dst, src)
}
func (m *ReadIndexRequest) XXX_Size() int {
return m.Size()
}
func (m *ReadIndexRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ReadIndexRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ReadIndexRequest proto.InternalMessageInfo
func (m *ReadIndexRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *ReadIndexRequest) GetStartTs() uint64 {
if m != nil {
return m.StartTs
}
return 0
}
func (m *ReadIndexRequest) GetRanges() []*KeyRange {
if m != nil {
return m.Ranges
}
return nil
}
type ReadIndexResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
ReadIndex uint64 `protobuf:"varint,2,opt,name=read_index,json=readIndex,proto3" json:"read_index,omitempty"`
// If `locked` is set, this read request is blocked by a lock.
// The lock should be returned to the client.
Locked *LockInfo `protobuf:"bytes,3,opt,name=locked" json:"locked,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReadIndexResponse) Reset() { *m = ReadIndexResponse{} }
func (m *ReadIndexResponse) String() string { return proto.CompactTextString(m) }
func (*ReadIndexResponse) ProtoMessage() {}
func (*ReadIndexResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{65}
}
func (m *ReadIndexResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReadIndexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReadIndexResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ReadIndexResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReadIndexResponse.Merge(dst, src)
}
func (m *ReadIndexResponse) XXX_Size() int {
return m.Size()
}
func (m *ReadIndexResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ReadIndexResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ReadIndexResponse proto.InternalMessageInfo
func (m *ReadIndexResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *ReadIndexResponse) GetReadIndex() uint64 {
if m != nil {
return m.ReadIndex
}
return 0
}
func (m *ReadIndexResponse) GetLocked() *LockInfo {
if m != nil {
return m.Locked
}
return nil
}
type VerMutation struct {
Op VerOp `protobuf:"varint,1,opt,name=op,proto3,enum=kvrpcpb.VerOp" json:"op,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerMutation) Reset() { *m = VerMutation{} }
func (m *VerMutation) String() string { return proto.CompactTextString(m) }
func (*VerMutation) ProtoMessage() {}
func (*VerMutation) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{66}
}
func (m *VerMutation) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerMutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerMutation.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerMutation) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerMutation.Merge(dst, src)
}
func (m *VerMutation) XXX_Size() int {
return m.Size()
}
func (m *VerMutation) XXX_DiscardUnknown() {
xxx_messageInfo_VerMutation.DiscardUnknown(m)
}
var xxx_messageInfo_VerMutation proto.InternalMessageInfo
func (m *VerMutation) GetOp() VerOp {
if m != nil {
return m.Op
}
return VerOp_VerPut
}
func (m *VerMutation) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *VerMutation) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
type VerValue struct {
Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
Version uint64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerValue) Reset() { *m = VerValue{} }
func (m *VerValue) String() string { return proto.CompactTextString(m) }
func (*VerValue) ProtoMessage() {}
func (*VerValue) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{67}
}
func (m *VerValue) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerValue.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerValue) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerValue.Merge(dst, src)
}
func (m *VerValue) XXX_Size() int {
return m.Size()
}
func (m *VerValue) XXX_DiscardUnknown() {
xxx_messageInfo_VerValue.DiscardUnknown(m)
}
var xxx_messageInfo_VerValue proto.InternalMessageInfo
func (m *VerValue) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func (m *VerValue) GetVersion() uint64 {
if m != nil {
return m.Version
}
return 0
}
type VerError struct {
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerError) Reset() { *m = VerError{} }
func (m *VerError) String() string { return proto.CompactTextString(m) }
func (*VerError) ProtoMessage() {}
func (*VerError) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{68}
}
func (m *VerError) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerError.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerError) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerError.Merge(dst, src)
}
func (m *VerError) XXX_Size() int {
return m.Size()
}
func (m *VerError) XXX_DiscardUnknown() {
xxx_messageInfo_VerError.DiscardUnknown(m)
}
var xxx_messageInfo_VerError proto.InternalMessageInfo
func (m *VerError) GetError() string {
if m != nil {
return m.Error
}
return ""
}
type VerKvPair struct {
Error *VerError `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Value *VerValue `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerKvPair) Reset() { *m = VerKvPair{} }
func (m *VerKvPair) String() string { return proto.CompactTextString(m) }
func (*VerKvPair) ProtoMessage() {}
func (*VerKvPair) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{69}
}
func (m *VerKvPair) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerKvPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerKvPair.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerKvPair) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerKvPair.Merge(dst, src)
}
func (m *VerKvPair) XXX_Size() int {
return m.Size()
}
func (m *VerKvPair) XXX_DiscardUnknown() {
xxx_messageInfo_VerKvPair.DiscardUnknown(m)
}
var xxx_messageInfo_VerKvPair proto.InternalMessageInfo
func (m *VerKvPair) GetError() *VerError {
if m != nil {
return m.Error
}
return nil
}
func (m *VerKvPair) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *VerKvPair) GetValue() *VerValue {
if m != nil {
return m.Value
}
return nil
}
type VerGetRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
StartVersion uint64 `protobuf:"varint,3,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerGetRequest) Reset() { *m = VerGetRequest{} }
func (m *VerGetRequest) String() string { return proto.CompactTextString(m) }
func (*VerGetRequest) ProtoMessage() {}
func (*VerGetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{70}
}
func (m *VerGetRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerGetRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerGetRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerGetRequest.Merge(dst, src)
}
func (m *VerGetRequest) XXX_Size() int {
return m.Size()
}
func (m *VerGetRequest) XXX_DiscardUnknown() {
xxx_messageInfo_VerGetRequest.DiscardUnknown(m)
}
var xxx_messageInfo_VerGetRequest proto.InternalMessageInfo
func (m *VerGetRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *VerGetRequest) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *VerGetRequest) GetStartVersion() uint64 {
if m != nil {
return m.StartVersion
}
return 0
}
type VerGetResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error *VerError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
Value *VerValue `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
NotFound bool `protobuf:"varint,4,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerGetResponse) Reset() { *m = VerGetResponse{} }
func (m *VerGetResponse) String() string { return proto.CompactTextString(m) }
func (*VerGetResponse) ProtoMessage() {}
func (*VerGetResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{71}
}
func (m *VerGetResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerGetResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerGetResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerGetResponse.Merge(dst, src)
}
func (m *VerGetResponse) XXX_Size() int {
return m.Size()
}
func (m *VerGetResponse) XXX_DiscardUnknown() {
xxx_messageInfo_VerGetResponse.DiscardUnknown(m)
}
var xxx_messageInfo_VerGetResponse proto.InternalMessageInfo
func (m *VerGetResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *VerGetResponse) GetError() *VerError {
if m != nil {
return m.Error
}
return nil
}
func (m *VerGetResponse) GetValue() *VerValue {
if m != nil {
return m.Value
}
return nil
}
func (m *VerGetResponse) GetNotFound() bool {
if m != nil {
return m.NotFound
}
return false
}
type VerBatchGetRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Key [][]byte `protobuf:"bytes,2,rep,name=key" json:"key,omitempty"`
StartVersion uint64 `protobuf:"varint,3,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerBatchGetRequest) Reset() { *m = VerBatchGetRequest{} }
func (m *VerBatchGetRequest) String() string { return proto.CompactTextString(m) }
func (*VerBatchGetRequest) ProtoMessage() {}
func (*VerBatchGetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{72}
}
func (m *VerBatchGetRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerBatchGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerBatchGetRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerBatchGetRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerBatchGetRequest.Merge(dst, src)
}
func (m *VerBatchGetRequest) XXX_Size() int {
return m.Size()
}
func (m *VerBatchGetRequest) XXX_DiscardUnknown() {
xxx_messageInfo_VerBatchGetRequest.DiscardUnknown(m)
}
var xxx_messageInfo_VerBatchGetRequest proto.InternalMessageInfo
func (m *VerBatchGetRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *VerBatchGetRequest) GetKey() [][]byte {
if m != nil {
return m.Key
}
return nil
}
func (m *VerBatchGetRequest) GetStartVersion() uint64 {
if m != nil {
return m.StartVersion
}
return 0
}
type VerBatchGetResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Pairs []*VerKvPair `protobuf:"bytes,2,rep,name=pairs" json:"pairs,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerBatchGetResponse) Reset() { *m = VerBatchGetResponse{} }
func (m *VerBatchGetResponse) String() string { return proto.CompactTextString(m) }
func (*VerBatchGetResponse) ProtoMessage() {}
func (*VerBatchGetResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{73}
}
func (m *VerBatchGetResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerBatchGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerBatchGetResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerBatchGetResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerBatchGetResponse.Merge(dst, src)
}
func (m *VerBatchGetResponse) XXX_Size() int {
return m.Size()
}
func (m *VerBatchGetResponse) XXX_DiscardUnknown() {
xxx_messageInfo_VerBatchGetResponse.DiscardUnknown(m)
}
var xxx_messageInfo_VerBatchGetResponse proto.InternalMessageInfo
func (m *VerBatchGetResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *VerBatchGetResponse) GetPairs() []*VerKvPair {
if m != nil {
return m.Pairs
}
return nil
}
type VerMutRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Mut *VerMutation `protobuf:"bytes,2,opt,name=mut" json:"mut,omitempty"`
Version uint64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerMutRequest) Reset() { *m = VerMutRequest{} }
func (m *VerMutRequest) String() string { return proto.CompactTextString(m) }
func (*VerMutRequest) ProtoMessage() {}
func (*VerMutRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{74}
}
func (m *VerMutRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerMutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerMutRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerMutRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerMutRequest.Merge(dst, src)
}
func (m *VerMutRequest) XXX_Size() int {
return m.Size()
}
func (m *VerMutRequest) XXX_DiscardUnknown() {
xxx_messageInfo_VerMutRequest.DiscardUnknown(m)
}
var xxx_messageInfo_VerMutRequest proto.InternalMessageInfo
func (m *VerMutRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *VerMutRequest) GetMut() *VerMutation {
if m != nil {
return m.Mut
}
return nil
}
func (m *VerMutRequest) GetVersion() uint64 {
if m != nil {
return m.Version
}
return 0
}
type VerMutResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error *VerError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerMutResponse) Reset() { *m = VerMutResponse{} }
func (m *VerMutResponse) String() string { return proto.CompactTextString(m) }
func (*VerMutResponse) ProtoMessage() {}
func (*VerMutResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{75}
}
func (m *VerMutResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerMutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerMutResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerMutResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerMutResponse.Merge(dst, src)
}
func (m *VerMutResponse) XXX_Size() int {
return m.Size()
}
func (m *VerMutResponse) XXX_DiscardUnknown() {
xxx_messageInfo_VerMutResponse.DiscardUnknown(m)
}
var xxx_messageInfo_VerMutResponse proto.InternalMessageInfo
func (m *VerMutResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *VerMutResponse) GetError() *VerError {
if m != nil {
return m.Error
}
return nil
}
type VerBatchMutRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Muts []*VerMutation `protobuf:"bytes,2,rep,name=muts" json:"muts,omitempty"`
Version uint64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerBatchMutRequest) Reset() { *m = VerBatchMutRequest{} }
func (m *VerBatchMutRequest) String() string { return proto.CompactTextString(m) }
func (*VerBatchMutRequest) ProtoMessage() {}
func (*VerBatchMutRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{76}
}
func (m *VerBatchMutRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerBatchMutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerBatchMutRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerBatchMutRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerBatchMutRequest.Merge(dst, src)
}
func (m *VerBatchMutRequest) XXX_Size() int {
return m.Size()
}
func (m *VerBatchMutRequest) XXX_DiscardUnknown() {
xxx_messageInfo_VerBatchMutRequest.DiscardUnknown(m)
}
var xxx_messageInfo_VerBatchMutRequest proto.InternalMessageInfo
func (m *VerBatchMutRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *VerBatchMutRequest) GetMuts() []*VerMutation {
if m != nil {
return m.Muts
}
return nil
}
func (m *VerBatchMutRequest) GetVersion() uint64 {
if m != nil {
return m.Version
}
return 0
}
type VerBatchMutResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error *VerError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerBatchMutResponse) Reset() { *m = VerBatchMutResponse{} }
func (m *VerBatchMutResponse) String() string { return proto.CompactTextString(m) }
func (*VerBatchMutResponse) ProtoMessage() {}
func (*VerBatchMutResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{77}
}
func (m *VerBatchMutResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerBatchMutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerBatchMutResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerBatchMutResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerBatchMutResponse.Merge(dst, src)
}
func (m *VerBatchMutResponse) XXX_Size() int {
return m.Size()
}
func (m *VerBatchMutResponse) XXX_DiscardUnknown() {
xxx_messageInfo_VerBatchMutResponse.DiscardUnknown(m)
}
var xxx_messageInfo_VerBatchMutResponse proto.InternalMessageInfo
func (m *VerBatchMutResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *VerBatchMutResponse) GetError() *VerError {
if m != nil {
return m.Error
}
return nil
}
type VerScanRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"`
EndKey []byte `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"`
Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"`
KeyOnly bool `protobuf:"varint,5,opt,name=key_only,json=keyOnly,proto3" json:"key_only,omitempty"`
Reverse bool `protobuf:"varint,6,opt,name=reverse,proto3" json:"reverse,omitempty"`
StartVersion uint64 `protobuf:"varint,7,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerScanRequest) Reset() { *m = VerScanRequest{} }
func (m *VerScanRequest) String() string { return proto.CompactTextString(m) }
func (*VerScanRequest) ProtoMessage() {}
func (*VerScanRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{78}
}
func (m *VerScanRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerScanRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerScanRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerScanRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerScanRequest.Merge(dst, src)
}
func (m *VerScanRequest) XXX_Size() int {
return m.Size()
}
func (m *VerScanRequest) XXX_DiscardUnknown() {
xxx_messageInfo_VerScanRequest.DiscardUnknown(m)
}
var xxx_messageInfo_VerScanRequest proto.InternalMessageInfo
func (m *VerScanRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *VerScanRequest) GetStartKey() []byte {
if m != nil {
return m.StartKey
}
return nil
}
func (m *VerScanRequest) GetEndKey() []byte {
if m != nil {
return m.EndKey
}
return nil
}
func (m *VerScanRequest) GetLimit() uint32 {
if m != nil {
return m.Limit
}
return 0
}
func (m *VerScanRequest) GetKeyOnly() bool {
if m != nil {
return m.KeyOnly
}
return false
}
func (m *VerScanRequest) GetReverse() bool {
if m != nil {
return m.Reverse
}
return false
}
func (m *VerScanRequest) GetStartVersion() uint64 {
if m != nil {
return m.StartVersion
}
return 0
}
type VerScanResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Pairs []*VerKvPair `protobuf:"bytes,2,rep,name=pairs" json:"pairs,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerScanResponse) Reset() { *m = VerScanResponse{} }
func (m *VerScanResponse) String() string { return proto.CompactTextString(m) }
func (*VerScanResponse) ProtoMessage() {}
func (*VerScanResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{79}
}
func (m *VerScanResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerScanResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerScanResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerScanResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerScanResponse.Merge(dst, src)
}
func (m *VerScanResponse) XXX_Size() int {
return m.Size()
}
func (m *VerScanResponse) XXX_DiscardUnknown() {
xxx_messageInfo_VerScanResponse.DiscardUnknown(m)
}
var xxx_messageInfo_VerScanResponse proto.InternalMessageInfo
func (m *VerScanResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *VerScanResponse) GetPairs() []*VerKvPair {
if m != nil {
return m.Pairs
}
return nil
}
type VerDeleteRangeRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"`
EndKey []byte `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerDeleteRangeRequest) Reset() { *m = VerDeleteRangeRequest{} }
func (m *VerDeleteRangeRequest) String() string { return proto.CompactTextString(m) }
func (*VerDeleteRangeRequest) ProtoMessage() {}
func (*VerDeleteRangeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{80}
}
func (m *VerDeleteRangeRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerDeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerDeleteRangeRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerDeleteRangeRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerDeleteRangeRequest.Merge(dst, src)
}
func (m *VerDeleteRangeRequest) XXX_Size() int {
return m.Size()
}
func (m *VerDeleteRangeRequest) XXX_DiscardUnknown() {
xxx_messageInfo_VerDeleteRangeRequest.DiscardUnknown(m)
}
var xxx_messageInfo_VerDeleteRangeRequest proto.InternalMessageInfo
func (m *VerDeleteRangeRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *VerDeleteRangeRequest) GetStartKey() []byte {
if m != nil {
return m.StartKey
}
return nil
}
func (m *VerDeleteRangeRequest) GetEndKey() []byte {
if m != nil {
return m.EndKey
}
return nil
}
type VerDeleteRangeResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error *VerError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VerDeleteRangeResponse) Reset() { *m = VerDeleteRangeResponse{} }
func (m *VerDeleteRangeResponse) String() string { return proto.CompactTextString(m) }
func (*VerDeleteRangeResponse) ProtoMessage() {}
func (*VerDeleteRangeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{81}
}
func (m *VerDeleteRangeResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VerDeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VerDeleteRangeResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *VerDeleteRangeResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_VerDeleteRangeResponse.Merge(dst, src)
}
func (m *VerDeleteRangeResponse) XXX_Size() int {
return m.Size()
}
func (m *VerDeleteRangeResponse) XXX_DiscardUnknown() {
xxx_messageInfo_VerDeleteRangeResponse.DiscardUnknown(m)
}
var xxx_messageInfo_VerDeleteRangeResponse proto.InternalMessageInfo
func (m *VerDeleteRangeResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *VerDeleteRangeResponse) GetError() *VerError {
if m != nil {
return m.Error
}
return nil
}
type MvccGetByKeyRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MvccGetByKeyRequest) Reset() { *m = MvccGetByKeyRequest{} }
func (m *MvccGetByKeyRequest) String() string { return proto.CompactTextString(m) }
func (*MvccGetByKeyRequest) ProtoMessage() {}
func (*MvccGetByKeyRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{82}
}
func (m *MvccGetByKeyRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MvccGetByKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MvccGetByKeyRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *MvccGetByKeyRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_MvccGetByKeyRequest.Merge(dst, src)
}
func (m *MvccGetByKeyRequest) XXX_Size() int {
return m.Size()
}
func (m *MvccGetByKeyRequest) XXX_DiscardUnknown() {
xxx_messageInfo_MvccGetByKeyRequest.DiscardUnknown(m)
}
var xxx_messageInfo_MvccGetByKeyRequest proto.InternalMessageInfo
func (m *MvccGetByKeyRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *MvccGetByKeyRequest) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
type MvccGetByKeyResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
Info *MvccInfo `protobuf:"bytes,3,opt,name=info" json:"info,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MvccGetByKeyResponse) Reset() { *m = MvccGetByKeyResponse{} }
func (m *MvccGetByKeyResponse) String() string { return proto.CompactTextString(m) }
func (*MvccGetByKeyResponse) ProtoMessage() {}
func (*MvccGetByKeyResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{83}
}
func (m *MvccGetByKeyResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MvccGetByKeyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MvccGetByKeyResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *MvccGetByKeyResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_MvccGetByKeyResponse.Merge(dst, src)
}
func (m *MvccGetByKeyResponse) XXX_Size() int {
return m.Size()
}
func (m *MvccGetByKeyResponse) XXX_DiscardUnknown() {
xxx_messageInfo_MvccGetByKeyResponse.DiscardUnknown(m)
}
var xxx_messageInfo_MvccGetByKeyResponse proto.InternalMessageInfo
func (m *MvccGetByKeyResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *MvccGetByKeyResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *MvccGetByKeyResponse) GetInfo() *MvccInfo {
if m != nil {
return m.Info
}
return nil
}
type MvccGetByStartTsRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
StartTs uint64 `protobuf:"varint,2,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MvccGetByStartTsRequest) Reset() { *m = MvccGetByStartTsRequest{} }
func (m *MvccGetByStartTsRequest) String() string { return proto.CompactTextString(m) }
func (*MvccGetByStartTsRequest) ProtoMessage() {}
func (*MvccGetByStartTsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{84}
}
func (m *MvccGetByStartTsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MvccGetByStartTsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MvccGetByStartTsRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *MvccGetByStartTsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_MvccGetByStartTsRequest.Merge(dst, src)
}
func (m *MvccGetByStartTsRequest) XXX_Size() int {
return m.Size()
}
func (m *MvccGetByStartTsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_MvccGetByStartTsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_MvccGetByStartTsRequest proto.InternalMessageInfo
func (m *MvccGetByStartTsRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *MvccGetByStartTsRequest) GetStartTs() uint64 {
if m != nil {
return m.StartTs
}
return 0
}
type MvccGetByStartTsResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
Info *MvccInfo `protobuf:"bytes,4,opt,name=info" json:"info,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MvccGetByStartTsResponse) Reset() { *m = MvccGetByStartTsResponse{} }
func (m *MvccGetByStartTsResponse) String() string { return proto.CompactTextString(m) }
func (*MvccGetByStartTsResponse) ProtoMessage() {}
func (*MvccGetByStartTsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{85}
}
func (m *MvccGetByStartTsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MvccGetByStartTsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MvccGetByStartTsResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *MvccGetByStartTsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_MvccGetByStartTsResponse.Merge(dst, src)
}
func (m *MvccGetByStartTsResponse) XXX_Size() int {
return m.Size()
}
func (m *MvccGetByStartTsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_MvccGetByStartTsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_MvccGetByStartTsResponse proto.InternalMessageInfo
func (m *MvccGetByStartTsResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *MvccGetByStartTsResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *MvccGetByStartTsResponse) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *MvccGetByStartTsResponse) GetInfo() *MvccInfo {
if m != nil {
return m.Info
}
return nil
}
// Miscellaneous metadata attached to most requests.
type Context struct {
RegionId uint64 `protobuf:"varint,1,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"`
RegionEpoch *metapb.RegionEpoch `protobuf:"bytes,2,opt,name=region_epoch,json=regionEpoch" json:"region_epoch,omitempty"`
Peer *metapb.Peer `protobuf:"bytes,3,opt,name=peer" json:"peer,omitempty"`
Term uint64 `protobuf:"varint,5,opt,name=term,proto3" json:"term,omitempty"`
Priority CommandPri `protobuf:"varint,6,opt,name=priority,proto3,enum=kvrpcpb.CommandPri" json:"priority,omitempty"`
IsolationLevel IsolationLevel `protobuf:"varint,7,opt,name=isolation_level,json=isolationLevel,proto3,enum=kvrpcpb.IsolationLevel" json:"isolation_level,omitempty"`
NotFillCache bool `protobuf:"varint,8,opt,name=not_fill_cache,json=notFillCache,proto3" json:"not_fill_cache,omitempty"`
SyncLog bool `protobuf:"varint,9,opt,name=sync_log,json=syncLog,proto3" json:"sync_log,omitempty"`
// True means execution time statistics should be recorded and returned.
RecordTimeStat bool `protobuf:"varint,10,opt,name=record_time_stat,json=recordTimeStat,proto3" json:"record_time_stat,omitempty"`
// True means RocksDB scan statistics should be recorded and returned.
RecordScanStat bool `protobuf:"varint,11,opt,name=record_scan_stat,json=recordScanStat,proto3" json:"record_scan_stat,omitempty"`
ReplicaRead bool `protobuf:"varint,12,opt,name=replica_read,json=replicaRead,proto3" json:"replica_read,omitempty"`
ResolvedLocks []uint64 `protobuf:"varint,13,rep,packed,name=resolved_locks,json=resolvedLocks" json:"resolved_locks,omitempty"`
MaxExecutionDurationMs uint64 `protobuf:"varint,14,opt,name=max_execution_duration_ms,json=maxExecutionDurationMs,proto3" json:"max_execution_duration_ms,omitempty"`
// After a region applies to `applied_index`, we can get a
// snapshot for the region even if the peer is a follower.
AppliedIndex uint64 `protobuf:"varint,15,opt,name=applied_index,json=appliedIndex,proto3" json:"applied_index,omitempty"`
// A hint for TiKV to schedule tasks more fairly. Query with same task ID
// may share same priority and resource quota.
TaskId uint64 `protobuf:"varint,16,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
// Not required to read the most up-to-date data, replicas with `safe_ts` >= `start_ts`
// can handle read request directly
StaleRead bool `protobuf:"varint,17,opt,name=stale_read,json=staleRead,proto3" json:"stale_read,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Context) Reset() { *m = Context{} }
func (m *Context) String() string { return proto.CompactTextString(m) }
func (*Context) ProtoMessage() {}
func (*Context) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{86}
}
func (m *Context) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Context) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Context.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *Context) XXX_Merge(src proto.Message) {
xxx_messageInfo_Context.Merge(dst, src)
}
func (m *Context) XXX_Size() int {
return m.Size()
}
func (m *Context) XXX_DiscardUnknown() {
xxx_messageInfo_Context.DiscardUnknown(m)
}
var xxx_messageInfo_Context proto.InternalMessageInfo
func (m *Context) GetRegionId() uint64 {
if m != nil {
return m.RegionId
}
return 0
}
func (m *Context) GetRegionEpoch() *metapb.RegionEpoch {
if m != nil {
return m.RegionEpoch
}
return nil
}
func (m *Context) GetPeer() *metapb.Peer {
if m != nil {
return m.Peer
}
return nil
}
func (m *Context) GetTerm() uint64 {
if m != nil {
return m.Term
}
return 0
}
func (m *Context) GetPriority() CommandPri {
if m != nil {
return m.Priority
}
return CommandPri_Normal
}
func (m *Context) GetIsolationLevel() IsolationLevel {
if m != nil {
return m.IsolationLevel
}
return IsolationLevel_SI
}
func (m *Context) GetNotFillCache() bool {
if m != nil {
return m.NotFillCache
}
return false
}
func (m *Context) GetSyncLog() bool {
if m != nil {
return m.SyncLog
}
return false
}
func (m *Context) GetRecordTimeStat() bool {
if m != nil {
return m.RecordTimeStat
}
return false
}
func (m *Context) GetRecordScanStat() bool {
if m != nil {
return m.RecordScanStat
}
return false
}
func (m *Context) GetReplicaRead() bool {
if m != nil {
return m.ReplicaRead
}
return false
}
func (m *Context) GetResolvedLocks() []uint64 {
if m != nil {
return m.ResolvedLocks
}
return nil
}
func (m *Context) GetMaxExecutionDurationMs() uint64 {
if m != nil {
return m.MaxExecutionDurationMs
}
return 0
}
func (m *Context) GetAppliedIndex() uint64 {
if m != nil {
return m.AppliedIndex
}
return 0
}
func (m *Context) GetTaskId() uint64 {
if m != nil {
return m.TaskId
}
return 0
}
func (m *Context) GetStaleRead() bool {
if m != nil {
return m.StaleRead
}
return false
}
type LockInfo struct {
PrimaryLock []byte `protobuf:"bytes,1,opt,name=primary_lock,json=primaryLock,proto3" json:"primary_lock,omitempty"`
LockVersion uint64 `protobuf:"varint,2,opt,name=lock_version,json=lockVersion,proto3" json:"lock_version,omitempty"`
Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
LockTtl uint64 `protobuf:"varint,4,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"`
// How many keys this transaction involves in this region.
TxnSize uint64 `protobuf:"varint,5,opt,name=txn_size,json=txnSize,proto3" json:"txn_size,omitempty"`
LockType Op `protobuf:"varint,6,opt,name=lock_type,json=lockType,proto3,enum=kvrpcpb.Op" json:"lock_type,omitempty"`
LockForUpdateTs uint64 `protobuf:"varint,7,opt,name=lock_for_update_ts,json=lockForUpdateTs,proto3" json:"lock_for_update_ts,omitempty"`
// Fields for transactions that are using Async Commit.
UseAsyncCommit bool `protobuf:"varint,8,opt,name=use_async_commit,json=useAsyncCommit,proto3" json:"use_async_commit,omitempty"`
MinCommitTs uint64 `protobuf:"varint,9,opt,name=min_commit_ts,json=minCommitTs,proto3" json:"min_commit_ts,omitempty"`
Secondaries [][]byte `protobuf:"bytes,10,rep,name=secondaries" json:"secondaries,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LockInfo) Reset() { *m = LockInfo{} }
func (m *LockInfo) String() string { return proto.CompactTextString(m) }
func (*LockInfo) ProtoMessage() {}
func (*LockInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{87}
}
func (m *LockInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LockInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_LockInfo.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *LockInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_LockInfo.Merge(dst, src)
}
func (m *LockInfo) XXX_Size() int {
return m.Size()
}
func (m *LockInfo) XXX_DiscardUnknown() {
xxx_messageInfo_LockInfo.DiscardUnknown(m)
}
var xxx_messageInfo_LockInfo proto.InternalMessageInfo
func (m *LockInfo) GetPrimaryLock() []byte {
if m != nil {
return m.PrimaryLock
}
return nil
}
func (m *LockInfo) GetLockVersion() uint64 {
if m != nil {
return m.LockVersion
}
return 0
}
func (m *LockInfo) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *LockInfo) GetLockTtl() uint64 {
if m != nil {
return m.LockTtl
}
return 0
}
func (m *LockInfo) GetTxnSize() uint64 {
if m != nil {
return m.TxnSize
}
return 0
}
func (m *LockInfo) GetLockType() Op {
if m != nil {
return m.LockType
}
return Op_Put
}
func (m *LockInfo) GetLockForUpdateTs() uint64 {
if m != nil {
return m.LockForUpdateTs
}
return 0
}
func (m *LockInfo) GetUseAsyncCommit() bool {
if m != nil {
return m.UseAsyncCommit
}
return false
}
func (m *LockInfo) GetMinCommitTs() uint64 {
if m != nil {
return m.MinCommitTs
}
return 0
}
func (m *LockInfo) GetSecondaries() [][]byte {
if m != nil {
return m.Secondaries
}
return nil
}
type KeyError struct {
Locked *LockInfo `protobuf:"bytes,1,opt,name=locked" json:"locked,omitempty"`
Retryable string `protobuf:"bytes,2,opt,name=retryable,proto3" json:"retryable,omitempty"`
Abort string `protobuf:"bytes,3,opt,name=abort,proto3" json:"abort,omitempty"`
Conflict *WriteConflict `protobuf:"bytes,4,opt,name=conflict" json:"conflict,omitempty"`
AlreadyExist *AlreadyExist `protobuf:"bytes,5,opt,name=already_exist,json=alreadyExist" json:"already_exist,omitempty"`
Deadlock *Deadlock `protobuf:"bytes,6,opt,name=deadlock" json:"deadlock,omitempty"`
CommitTsExpired *CommitTsExpired `protobuf:"bytes,7,opt,name=commit_ts_expired,json=commitTsExpired" json:"commit_ts_expired,omitempty"`
TxnNotFound *TxnNotFound `protobuf:"bytes,8,opt,name=txn_not_found,json=txnNotFound" json:"txn_not_found,omitempty"`
CommitTsTooLarge *CommitTsTooLarge `protobuf:"bytes,9,opt,name=commit_ts_too_large,json=commitTsTooLarge" json:"commit_ts_too_large,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeyError) Reset() { *m = KeyError{} }
func (m *KeyError) String() string { return proto.CompactTextString(m) }
func (*KeyError) ProtoMessage() {}
func (*KeyError) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{88}
}
func (m *KeyError) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *KeyError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_KeyError.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *KeyError) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeyError.Merge(dst, src)
}
func (m *KeyError) XXX_Size() int {
return m.Size()
}
func (m *KeyError) XXX_DiscardUnknown() {
xxx_messageInfo_KeyError.DiscardUnknown(m)
}
var xxx_messageInfo_KeyError proto.InternalMessageInfo
func (m *KeyError) GetLocked() *LockInfo {
if m != nil {
return m.Locked
}
return nil
}
func (m *KeyError) GetRetryable() string {
if m != nil {
return m.Retryable
}
return ""
}
func (m *KeyError) GetAbort() string {
if m != nil {
return m.Abort
}
return ""
}
func (m *KeyError) GetConflict() *WriteConflict {
if m != nil {
return m.Conflict
}
return nil
}
func (m *KeyError) GetAlreadyExist() *AlreadyExist {
if m != nil {
return m.AlreadyExist
}
return nil
}
func (m *KeyError) GetDeadlock() *Deadlock {
if m != nil {
return m.Deadlock
}
return nil
}
func (m *KeyError) GetCommitTsExpired() *CommitTsExpired {
if m != nil {
return m.CommitTsExpired
}
return nil
}
func (m *KeyError) GetTxnNotFound() *TxnNotFound {
if m != nil {
return m.TxnNotFound
}
return nil
}
func (m *KeyError) GetCommitTsTooLarge() *CommitTsTooLarge {
if m != nil {
return m.CommitTsTooLarge
}
return nil
}
type WriteConflict struct {
StartTs uint64 `protobuf:"varint,1,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"`
ConflictTs uint64 `protobuf:"varint,2,opt,name=conflict_ts,json=conflictTs,proto3" json:"conflict_ts,omitempty"`
Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
Primary []byte `protobuf:"bytes,4,opt,name=primary,proto3" json:"primary,omitempty"`
ConflictCommitTs uint64 `protobuf:"varint,5,opt,name=conflict_commit_ts,json=conflictCommitTs,proto3" json:"conflict_commit_ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *WriteConflict) Reset() { *m = WriteConflict{} }
func (m *WriteConflict) String() string { return proto.CompactTextString(m) }
func (*WriteConflict) ProtoMessage() {}
func (*WriteConflict) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{89}
}
func (m *WriteConflict) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *WriteConflict) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_WriteConflict.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *WriteConflict) XXX_Merge(src proto.Message) {
xxx_messageInfo_WriteConflict.Merge(dst, src)
}
func (m *WriteConflict) XXX_Size() int {
return m.Size()
}
func (m *WriteConflict) XXX_DiscardUnknown() {
xxx_messageInfo_WriteConflict.DiscardUnknown(m)
}
var xxx_messageInfo_WriteConflict proto.InternalMessageInfo
func (m *WriteConflict) GetStartTs() uint64 {
if m != nil {
return m.StartTs
}
return 0
}
func (m *WriteConflict) GetConflictTs() uint64 {
if m != nil {
return m.ConflictTs
}
return 0
}
func (m *WriteConflict) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *WriteConflict) GetPrimary() []byte {
if m != nil {
return m.Primary
}
return nil
}
func (m *WriteConflict) GetConflictCommitTs() uint64 {
if m != nil {
return m.ConflictCommitTs
}
return 0
}
type AlreadyExist struct {
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AlreadyExist) Reset() { *m = AlreadyExist{} }
func (m *AlreadyExist) String() string { return proto.CompactTextString(m) }
func (*AlreadyExist) ProtoMessage() {}
func (*AlreadyExist) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{90}
}
func (m *AlreadyExist) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *AlreadyExist) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_AlreadyExist.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *AlreadyExist) XXX_Merge(src proto.Message) {
xxx_messageInfo_AlreadyExist.Merge(dst, src)
}
func (m *AlreadyExist) XXX_Size() int {
return m.Size()
}
func (m *AlreadyExist) XXX_DiscardUnknown() {
xxx_messageInfo_AlreadyExist.DiscardUnknown(m)
}
var xxx_messageInfo_AlreadyExist proto.InternalMessageInfo
func (m *AlreadyExist) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
type Deadlock struct {
LockTs uint64 `protobuf:"varint,1,opt,name=lock_ts,json=lockTs,proto3" json:"lock_ts,omitempty"`
LockKey []byte `protobuf:"bytes,2,opt,name=lock_key,json=lockKey,proto3" json:"lock_key,omitempty"`
DeadlockKeyHash uint64 `protobuf:"varint,3,opt,name=deadlock_key_hash,json=deadlockKeyHash,proto3" json:"deadlock_key_hash,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Deadlock) Reset() { *m = Deadlock{} }
func (m *Deadlock) String() string { return proto.CompactTextString(m) }
func (*Deadlock) ProtoMessage() {}
func (*Deadlock) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{91}
}
func (m *Deadlock) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Deadlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Deadlock.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *Deadlock) XXX_Merge(src proto.Message) {
xxx_messageInfo_Deadlock.Merge(dst, src)
}
func (m *Deadlock) XXX_Size() int {
return m.Size()
}
func (m *Deadlock) XXX_DiscardUnknown() {
xxx_messageInfo_Deadlock.DiscardUnknown(m)
}
var xxx_messageInfo_Deadlock proto.InternalMessageInfo
func (m *Deadlock) GetLockTs() uint64 {
if m != nil {
return m.LockTs
}
return 0
}
func (m *Deadlock) GetLockKey() []byte {
if m != nil {
return m.LockKey
}
return nil
}
func (m *Deadlock) GetDeadlockKeyHash() uint64 {
if m != nil {
return m.DeadlockKeyHash
}
return 0
}
type CommitTsExpired struct {
StartTs uint64 `protobuf:"varint,1,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"`
AttemptedCommitTs uint64 `protobuf:"varint,2,opt,name=attempted_commit_ts,json=attemptedCommitTs,proto3" json:"attempted_commit_ts,omitempty"`
Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
MinCommitTs uint64 `protobuf:"varint,4,opt,name=min_commit_ts,json=minCommitTs,proto3" json:"min_commit_ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CommitTsExpired) Reset() { *m = CommitTsExpired{} }
func (m *CommitTsExpired) String() string { return proto.CompactTextString(m) }
func (*CommitTsExpired) ProtoMessage() {}
func (*CommitTsExpired) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{92}
}
func (m *CommitTsExpired) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CommitTsExpired) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CommitTsExpired.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CommitTsExpired) XXX_Merge(src proto.Message) {
xxx_messageInfo_CommitTsExpired.Merge(dst, src)
}
func (m *CommitTsExpired) XXX_Size() int {
return m.Size()
}
func (m *CommitTsExpired) XXX_DiscardUnknown() {
xxx_messageInfo_CommitTsExpired.DiscardUnknown(m)
}
var xxx_messageInfo_CommitTsExpired proto.InternalMessageInfo
func (m *CommitTsExpired) GetStartTs() uint64 {
if m != nil {
return m.StartTs
}
return 0
}
func (m *CommitTsExpired) GetAttemptedCommitTs() uint64 {
if m != nil {
return m.AttemptedCommitTs
}
return 0
}
func (m *CommitTsExpired) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *CommitTsExpired) GetMinCommitTs() uint64 {
if m != nil {
return m.MinCommitTs
}
return 0
}
type TxnNotFound struct {
StartTs uint64 `protobuf:"varint,1,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"`
PrimaryKey []byte `protobuf:"bytes,2,opt,name=primary_key,json=primaryKey,proto3" json:"primary_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TxnNotFound) Reset() { *m = TxnNotFound{} }
func (m *TxnNotFound) String() string { return proto.CompactTextString(m) }
func (*TxnNotFound) ProtoMessage() {}
func (*TxnNotFound) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{93}
}
func (m *TxnNotFound) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TxnNotFound) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_TxnNotFound.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *TxnNotFound) XXX_Merge(src proto.Message) {
xxx_messageInfo_TxnNotFound.Merge(dst, src)
}
func (m *TxnNotFound) XXX_Size() int {
return m.Size()
}
func (m *TxnNotFound) XXX_DiscardUnknown() {
xxx_messageInfo_TxnNotFound.DiscardUnknown(m)
}
var xxx_messageInfo_TxnNotFound proto.InternalMessageInfo
func (m *TxnNotFound) GetStartTs() uint64 {
if m != nil {
return m.StartTs
}
return 0
}
func (m *TxnNotFound) GetPrimaryKey() []byte {
if m != nil {
return m.PrimaryKey
}
return nil
}
type CommitTsTooLarge struct {
CommitTs uint64 `protobuf:"varint,1,opt,name=commit_ts,json=commitTs,proto3" json:"commit_ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CommitTsTooLarge) Reset() { *m = CommitTsTooLarge{} }
func (m *CommitTsTooLarge) String() string { return proto.CompactTextString(m) }
func (*CommitTsTooLarge) ProtoMessage() {}
func (*CommitTsTooLarge) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{94}
}
func (m *CommitTsTooLarge) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CommitTsTooLarge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CommitTsTooLarge.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CommitTsTooLarge) XXX_Merge(src proto.Message) {
xxx_messageInfo_CommitTsTooLarge.Merge(dst, src)
}
func (m *CommitTsTooLarge) XXX_Size() int {
return m.Size()
}
func (m *CommitTsTooLarge) XXX_DiscardUnknown() {
xxx_messageInfo_CommitTsTooLarge.DiscardUnknown(m)
}
var xxx_messageInfo_CommitTsTooLarge proto.InternalMessageInfo
func (m *CommitTsTooLarge) GetCommitTs() uint64 {
if m != nil {
return m.CommitTs
}
return 0
}
type TimeDetail struct {
// Off-cpu wall time elapsed in TiKV side. Usually this includes queue waiting time and
// other kind of waitings in series.
WaitWallTimeMs int64 `protobuf:"varint,1,opt,name=wait_wall_time_ms,json=waitWallTimeMs,proto3" json:"wait_wall_time_ms,omitempty"`
// Off-cpu and on-cpu wall time elapsed to actually process the request payload. It does not
// include `wait_wall_time`.
// This field is very close to the CPU time in most cases. Some wait time spend in RocksDB
// cannot be excluded for now, like Mutex wait time, which is included in this field, so that
// this field is called wall time instead of CPU time.
ProcessWallTimeMs int64 `protobuf:"varint,2,opt,name=process_wall_time_ms,json=processWallTimeMs,proto3" json:"process_wall_time_ms,omitempty"`
// KV read wall Time means the time used in key/value scan and get.
KvReadWallTimeMs int64 `protobuf:"varint,3,opt,name=kv_read_wall_time_ms,json=kvReadWallTimeMs,proto3" json:"kv_read_wall_time_ms,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TimeDetail) Reset() { *m = TimeDetail{} }
func (m *TimeDetail) String() string { return proto.CompactTextString(m) }
func (*TimeDetail) ProtoMessage() {}
func (*TimeDetail) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{95}
}
func (m *TimeDetail) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TimeDetail) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_TimeDetail.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *TimeDetail) XXX_Merge(src proto.Message) {
xxx_messageInfo_TimeDetail.Merge(dst, src)
}
func (m *TimeDetail) XXX_Size() int {
return m.Size()
}
func (m *TimeDetail) XXX_DiscardUnknown() {
xxx_messageInfo_TimeDetail.DiscardUnknown(m)
}
var xxx_messageInfo_TimeDetail proto.InternalMessageInfo
func (m *TimeDetail) GetWaitWallTimeMs() int64 {
if m != nil {
return m.WaitWallTimeMs
}
return 0
}
func (m *TimeDetail) GetProcessWallTimeMs() int64 {
if m != nil {
return m.ProcessWallTimeMs
}
return 0
}
func (m *TimeDetail) GetKvReadWallTimeMs() int64 {
if m != nil {
return m.KvReadWallTimeMs
}
return 0
}
type ScanInfo struct {
Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"`
Processed int64 `protobuf:"varint,2,opt,name=processed,proto3" json:"processed,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScanInfo) Reset() { *m = ScanInfo{} }
func (m *ScanInfo) String() string { return proto.CompactTextString(m) }
func (*ScanInfo) ProtoMessage() {}
func (*ScanInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{96}
}
func (m *ScanInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScanInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScanInfo.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ScanInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScanInfo.Merge(dst, src)
}
func (m *ScanInfo) XXX_Size() int {
return m.Size()
}
func (m *ScanInfo) XXX_DiscardUnknown() {
xxx_messageInfo_ScanInfo.DiscardUnknown(m)
}
var xxx_messageInfo_ScanInfo proto.InternalMessageInfo
func (m *ScanInfo) GetTotal() int64 {
if m != nil {
return m.Total
}
return 0
}
func (m *ScanInfo) GetProcessed() int64 {
if m != nil {
return m.Processed
}
return 0
}
// Only reserved for compatibility.
type ScanDetail struct {
Write *ScanInfo `protobuf:"bytes,1,opt,name=write" json:"write,omitempty"`
Lock *ScanInfo `protobuf:"bytes,2,opt,name=lock" json:"lock,omitempty"`
Data *ScanInfo `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScanDetail) Reset() { *m = ScanDetail{} }
func (m *ScanDetail) String() string { return proto.CompactTextString(m) }
func (*ScanDetail) ProtoMessage() {}
func (*ScanDetail) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{97}
}
func (m *ScanDetail) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScanDetail) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScanDetail.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ScanDetail) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScanDetail.Merge(dst, src)
}
func (m *ScanDetail) XXX_Size() int {
return m.Size()
}
func (m *ScanDetail) XXX_DiscardUnknown() {
xxx_messageInfo_ScanDetail.DiscardUnknown(m)
}
var xxx_messageInfo_ScanDetail proto.InternalMessageInfo
func (m *ScanDetail) GetWrite() *ScanInfo {
if m != nil {
return m.Write
}
return nil
}
func (m *ScanDetail) GetLock() *ScanInfo {
if m != nil {
return m.Lock
}
return nil
}
func (m *ScanDetail) GetData() *ScanInfo {
if m != nil {
return m.Data
}
return nil
}
type ScanDetailV2 struct {
// Number of user keys scanned from the storage.
// It does not include deleted version or RocksDB tombstone keys.
// For Coprocessor requests, it includes keys that has been filtered out by
// Selection.
ProcessedVersions uint64 `protobuf:"varint,1,opt,name=processed_versions,json=processedVersions,proto3" json:"processed_versions,omitempty"`
// Approximate number of MVCC keys meet during scanning. It includes
// deleted versions, but does not include RocksDB tombstone keys.
//
// When this field is notably larger than `processed_versions`, it means
// there are a lot of deleted MVCC keys.
TotalVersions uint64 `protobuf:"varint,2,opt,name=total_versions,json=totalVersions,proto3" json:"total_versions,omitempty"`
// Total number of deletes and single deletes skipped over during
// iteration, i.e. how many RocksDB tombstones are skipped.
RocksdbDeleteSkippedCount uint64 `protobuf:"varint,3,opt,name=rocksdb_delete_skipped_count,json=rocksdbDeleteSkippedCount,proto3" json:"rocksdb_delete_skipped_count,omitempty"`
// Total number of internal keys skipped over during iteration.
// See https://github.com/facebook/rocksdb/blob/9f1c84ca471d8b1ad7be9f3eebfc2c7e07dfd7a7/include/rocksdb/perf_context.h#L84 for details.
RocksdbKeySkippedCount uint64 `protobuf:"varint,4,opt,name=rocksdb_key_skipped_count,json=rocksdbKeySkippedCount,proto3" json:"rocksdb_key_skipped_count,omitempty"`
// Total number of RocksDB block cache hits.
RocksdbBlockCacheHitCount uint64 `protobuf:"varint,5,opt,name=rocksdb_block_cache_hit_count,json=rocksdbBlockCacheHitCount,proto3" json:"rocksdb_block_cache_hit_count,omitempty"`
// Total number of block reads (with IO).
RocksdbBlockReadCount uint64 `protobuf:"varint,6,opt,name=rocksdb_block_read_count,json=rocksdbBlockReadCount,proto3" json:"rocksdb_block_read_count,omitempty"`
// Total number of bytes from block reads.
RocksdbBlockReadByte uint64 `protobuf:"varint,7,opt,name=rocksdb_block_read_byte,json=rocksdbBlockReadByte,proto3" json:"rocksdb_block_read_byte,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ScanDetailV2) Reset() { *m = ScanDetailV2{} }
func (m *ScanDetailV2) String() string { return proto.CompactTextString(m) }
func (*ScanDetailV2) ProtoMessage() {}
func (*ScanDetailV2) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{98}
}
func (m *ScanDetailV2) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScanDetailV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScanDetailV2.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ScanDetailV2) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScanDetailV2.Merge(dst, src)
}
func (m *ScanDetailV2) XXX_Size() int {
return m.Size()
}
func (m *ScanDetailV2) XXX_DiscardUnknown() {
xxx_messageInfo_ScanDetailV2.DiscardUnknown(m)
}
var xxx_messageInfo_ScanDetailV2 proto.InternalMessageInfo
func (m *ScanDetailV2) GetProcessedVersions() uint64 {
if m != nil {
return m.ProcessedVersions
}
return 0
}
func (m *ScanDetailV2) GetTotalVersions() uint64 {
if m != nil {
return m.TotalVersions
}
return 0
}
func (m *ScanDetailV2) GetRocksdbDeleteSkippedCount() uint64 {
if m != nil {
return m.RocksdbDeleteSkippedCount
}
return 0
}
func (m *ScanDetailV2) GetRocksdbKeySkippedCount() uint64 {
if m != nil {
return m.RocksdbKeySkippedCount
}
return 0
}
func (m *ScanDetailV2) GetRocksdbBlockCacheHitCount() uint64 {
if m != nil {
return m.RocksdbBlockCacheHitCount
}
return 0
}
func (m *ScanDetailV2) GetRocksdbBlockReadCount() uint64 {
if m != nil {
return m.RocksdbBlockReadCount
}
return 0
}
func (m *ScanDetailV2) GetRocksdbBlockReadByte() uint64 {
if m != nil {
return m.RocksdbBlockReadByte
}
return 0
}
type ExecDetails struct {
// Available when ctx.record_time_stat = true or meet slow query.
TimeDetail *TimeDetail `protobuf:"bytes,1,opt,name=time_detail,json=timeDetail" json:"time_detail,omitempty"`
// Available when ctx.record_scan_stat = true or meet slow query.
ScanDetail *ScanDetail `protobuf:"bytes,2,opt,name=scan_detail,json=scanDetail" json:"scan_detail,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ExecDetails) Reset() { *m = ExecDetails{} }
func (m *ExecDetails) String() string { return proto.CompactTextString(m) }
func (*ExecDetails) ProtoMessage() {}
func (*ExecDetails) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{99}
}
func (m *ExecDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExecDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExecDetails.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ExecDetails) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExecDetails.Merge(dst, src)
}
func (m *ExecDetails) XXX_Size() int {
return m.Size()
}
func (m *ExecDetails) XXX_DiscardUnknown() {
xxx_messageInfo_ExecDetails.DiscardUnknown(m)
}
var xxx_messageInfo_ExecDetails proto.InternalMessageInfo
func (m *ExecDetails) GetTimeDetail() *TimeDetail {
if m != nil {
return m.TimeDetail
}
return nil
}
func (m *ExecDetails) GetScanDetail() *ScanDetail {
if m != nil {
return m.ScanDetail
}
return nil
}
type ExecDetailsV2 struct {
// Available when ctx.record_time_stat = true or meet slow query.
TimeDetail *TimeDetail `protobuf:"bytes,1,opt,name=time_detail,json=timeDetail" json:"time_detail,omitempty"`
// Available when ctx.record_scan_stat = true or meet slow query.
ScanDetailV2 *ScanDetailV2 `protobuf:"bytes,2,opt,name=scan_detail_v2,json=scanDetailV2" json:"scan_detail_v2,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ExecDetailsV2) Reset() { *m = ExecDetailsV2{} }
func (m *ExecDetailsV2) String() string { return proto.CompactTextString(m) }
func (*ExecDetailsV2) ProtoMessage() {}
func (*ExecDetailsV2) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{100}
}
func (m *ExecDetailsV2) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExecDetailsV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExecDetailsV2.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ExecDetailsV2) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExecDetailsV2.Merge(dst, src)
}
func (m *ExecDetailsV2) XXX_Size() int {
return m.Size()
}
func (m *ExecDetailsV2) XXX_DiscardUnknown() {
xxx_messageInfo_ExecDetailsV2.DiscardUnknown(m)
}
var xxx_messageInfo_ExecDetailsV2 proto.InternalMessageInfo
func (m *ExecDetailsV2) GetTimeDetail() *TimeDetail {
if m != nil {
return m.TimeDetail
}
return nil
}
func (m *ExecDetailsV2) GetScanDetailV2() *ScanDetailV2 {
if m != nil {
return m.ScanDetailV2
}
return nil
}
type KvPair struct {
Error *KeyError `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KvPair) Reset() { *m = KvPair{} }
func (m *KvPair) String() string { return proto.CompactTextString(m) }
func (*KvPair) ProtoMessage() {}
func (*KvPair) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{101}
}
func (m *KvPair) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *KvPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_KvPair.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *KvPair) XXX_Merge(src proto.Message) {
xxx_messageInfo_KvPair.Merge(dst, src)
}
func (m *KvPair) XXX_Size() int {
return m.Size()
}
func (m *KvPair) XXX_DiscardUnknown() {
xxx_messageInfo_KvPair.DiscardUnknown(m)
}
var xxx_messageInfo_KvPair proto.InternalMessageInfo
func (m *KvPair) GetError() *KeyError {
if m != nil {
return m.Error
}
return nil
}
func (m *KvPair) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *KvPair) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
type Mutation struct {
Op Op `protobuf:"varint,1,opt,name=op,proto3,enum=kvrpcpb.Op" json:"op,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
Assertion Assertion `protobuf:"varint,4,opt,name=assertion,proto3,enum=kvrpcpb.Assertion" json:"assertion,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Mutation) Reset() { *m = Mutation{} }
func (m *Mutation) String() string { return proto.CompactTextString(m) }
func (*Mutation) ProtoMessage() {}
func (*Mutation) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{102}
}
func (m *Mutation) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Mutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Mutation.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *Mutation) XXX_Merge(src proto.Message) {
xxx_messageInfo_Mutation.Merge(dst, src)
}
func (m *Mutation) XXX_Size() int {
return m.Size()
}
func (m *Mutation) XXX_DiscardUnknown() {
xxx_messageInfo_Mutation.DiscardUnknown(m)
}
var xxx_messageInfo_Mutation proto.InternalMessageInfo
func (m *Mutation) GetOp() Op {
if m != nil {
return m.Op
}
return Op_Put
}
func (m *Mutation) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *Mutation) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func (m *Mutation) GetAssertion() Assertion {
if m != nil {
return m.Assertion
}
return Assertion_None
}
type MvccWrite struct {
Type Op `protobuf:"varint,1,opt,name=type,proto3,enum=kvrpcpb.Op" json:"type,omitempty"`
StartTs uint64 `protobuf:"varint,2,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"`
CommitTs uint64 `protobuf:"varint,3,opt,name=commit_ts,json=commitTs,proto3" json:"commit_ts,omitempty"`
ShortValue []byte `protobuf:"bytes,4,opt,name=short_value,json=shortValue,proto3" json:"short_value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MvccWrite) Reset() { *m = MvccWrite{} }
func (m *MvccWrite) String() string { return proto.CompactTextString(m) }
func (*MvccWrite) ProtoMessage() {}
func (*MvccWrite) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{103}
}
func (m *MvccWrite) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MvccWrite) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MvccWrite.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *MvccWrite) XXX_Merge(src proto.Message) {
xxx_messageInfo_MvccWrite.Merge(dst, src)
}
func (m *MvccWrite) XXX_Size() int {
return m.Size()
}
func (m *MvccWrite) XXX_DiscardUnknown() {
xxx_messageInfo_MvccWrite.DiscardUnknown(m)
}
var xxx_messageInfo_MvccWrite proto.InternalMessageInfo
func (m *MvccWrite) GetType() Op {
if m != nil {
return m.Type
}
return Op_Put
}
func (m *MvccWrite) GetStartTs() uint64 {
if m != nil {
return m.StartTs
}
return 0
}
func (m *MvccWrite) GetCommitTs() uint64 {
if m != nil {
return m.CommitTs
}
return 0
}
func (m *MvccWrite) GetShortValue() []byte {
if m != nil {
return m.ShortValue
}
return nil
}
type MvccValue struct {
StartTs uint64 `protobuf:"varint,1,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"`
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MvccValue) Reset() { *m = MvccValue{} }
func (m *MvccValue) String() string { return proto.CompactTextString(m) }
func (*MvccValue) ProtoMessage() {}
func (*MvccValue) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{104}
}
func (m *MvccValue) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MvccValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MvccValue.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *MvccValue) XXX_Merge(src proto.Message) {
xxx_messageInfo_MvccValue.Merge(dst, src)
}
func (m *MvccValue) XXX_Size() int {
return m.Size()
}
func (m *MvccValue) XXX_DiscardUnknown() {
xxx_messageInfo_MvccValue.DiscardUnknown(m)
}
var xxx_messageInfo_MvccValue proto.InternalMessageInfo
func (m *MvccValue) GetStartTs() uint64 {
if m != nil {
return m.StartTs
}
return 0
}
func (m *MvccValue) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
type MvccLock struct {
Type Op `protobuf:"varint,1,opt,name=type,proto3,enum=kvrpcpb.Op" json:"type,omitempty"`
StartTs uint64 `protobuf:"varint,2,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"`
Primary []byte `protobuf:"bytes,3,opt,name=primary,proto3" json:"primary,omitempty"`
ShortValue []byte `protobuf:"bytes,4,opt,name=short_value,json=shortValue,proto3" json:"short_value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MvccLock) Reset() { *m = MvccLock{} }
func (m *MvccLock) String() string { return proto.CompactTextString(m) }
func (*MvccLock) ProtoMessage() {}
func (*MvccLock) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{105}
}
func (m *MvccLock) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MvccLock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MvccLock.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *MvccLock) XXX_Merge(src proto.Message) {
xxx_messageInfo_MvccLock.Merge(dst, src)
}
func (m *MvccLock) XXX_Size() int {
return m.Size()
}
func (m *MvccLock) XXX_DiscardUnknown() {
xxx_messageInfo_MvccLock.DiscardUnknown(m)
}
var xxx_messageInfo_MvccLock proto.InternalMessageInfo
func (m *MvccLock) GetType() Op {
if m != nil {
return m.Type
}
return Op_Put
}
func (m *MvccLock) GetStartTs() uint64 {
if m != nil {
return m.StartTs
}
return 0
}
func (m *MvccLock) GetPrimary() []byte {
if m != nil {
return m.Primary
}
return nil
}
func (m *MvccLock) GetShortValue() []byte {
if m != nil {
return m.ShortValue
}
return nil
}
type MvccInfo struct {
Lock *MvccLock `protobuf:"bytes,1,opt,name=lock" json:"lock,omitempty"`
Writes []*MvccWrite `protobuf:"bytes,2,rep,name=writes" json:"writes,omitempty"`
Values []*MvccValue `protobuf:"bytes,3,rep,name=values" json:"values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MvccInfo) Reset() { *m = MvccInfo{} }
func (m *MvccInfo) String() string { return proto.CompactTextString(m) }
func (*MvccInfo) ProtoMessage() {}
func (*MvccInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{106}
}
func (m *MvccInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MvccInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MvccInfo.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *MvccInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_MvccInfo.Merge(dst, src)
}
func (m *MvccInfo) XXX_Size() int {
return m.Size()
}
func (m *MvccInfo) XXX_DiscardUnknown() {
xxx_messageInfo_MvccInfo.DiscardUnknown(m)
}
var xxx_messageInfo_MvccInfo proto.InternalMessageInfo
func (m *MvccInfo) GetLock() *MvccLock {
if m != nil {
return m.Lock
}
return nil
}
func (m *MvccInfo) GetWrites() []*MvccWrite {
if m != nil {
return m.Writes
}
return nil
}
func (m *MvccInfo) GetValues() []*MvccValue {
if m != nil {
return m.Values
}
return nil
}
type TxnInfo struct {
Txn uint64 `protobuf:"varint,1,opt,name=txn,proto3" json:"txn,omitempty"`
Status uint64 `protobuf:"varint,2,opt,name=status,proto3" json:"status,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TxnInfo) Reset() { *m = TxnInfo{} }
func (m *TxnInfo) String() string { return proto.CompactTextString(m) }
func (*TxnInfo) ProtoMessage() {}
func (*TxnInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{107}
}
func (m *TxnInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TxnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_TxnInfo.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *TxnInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_TxnInfo.Merge(dst, src)
}
func (m *TxnInfo) XXX_Size() int {
return m.Size()
}
func (m *TxnInfo) XXX_DiscardUnknown() {
xxx_messageInfo_TxnInfo.DiscardUnknown(m)
}
var xxx_messageInfo_TxnInfo proto.InternalMessageInfo
func (m *TxnInfo) GetTxn() uint64 {
if m != nil {
return m.Txn
}
return 0
}
func (m *TxnInfo) GetStatus() uint64 {
if m != nil {
return m.Status
}
return 0
}
type KeyRange struct {
StartKey []byte `protobuf:"bytes,1,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"`
EndKey []byte `protobuf:"bytes,2,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeyRange) Reset() { *m = KeyRange{} }
func (m *KeyRange) String() string { return proto.CompactTextString(m) }
func (*KeyRange) ProtoMessage() {}
func (*KeyRange) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{108}
}
func (m *KeyRange) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *KeyRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_KeyRange.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *KeyRange) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeyRange.Merge(dst, src)
}
func (m *KeyRange) XXX_Size() int {
return m.Size()
}
func (m *KeyRange) XXX_DiscardUnknown() {
xxx_messageInfo_KeyRange.DiscardUnknown(m)
}
var xxx_messageInfo_KeyRange proto.InternalMessageInfo
func (m *KeyRange) GetStartKey() []byte {
if m != nil {
return m.StartKey
}
return nil
}
func (m *KeyRange) GetEndKey() []byte {
if m != nil {
return m.EndKey
}
return nil
}
type LeaderInfo struct {
RegionId uint64 `protobuf:"varint,1,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"`
PeerId uint64 `protobuf:"varint,2,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
Term uint64 `protobuf:"varint,3,opt,name=term,proto3" json:"term,omitempty"`
RegionEpoch *metapb.RegionEpoch `protobuf:"bytes,4,opt,name=region_epoch,json=regionEpoch" json:"region_epoch,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LeaderInfo) Reset() { *m = LeaderInfo{} }
func (m *LeaderInfo) String() string { return proto.CompactTextString(m) }
func (*LeaderInfo) ProtoMessage() {}
func (*LeaderInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{109}
}
func (m *LeaderInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LeaderInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_LeaderInfo.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *LeaderInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_LeaderInfo.Merge(dst, src)
}
func (m *LeaderInfo) XXX_Size() int {
return m.Size()
}
func (m *LeaderInfo) XXX_DiscardUnknown() {
xxx_messageInfo_LeaderInfo.DiscardUnknown(m)
}
var xxx_messageInfo_LeaderInfo proto.InternalMessageInfo
func (m *LeaderInfo) GetRegionId() uint64 {
if m != nil {
return m.RegionId
}
return 0
}
func (m *LeaderInfo) GetPeerId() uint64 {
if m != nil {
return m.PeerId
}
return 0
}
func (m *LeaderInfo) GetTerm() uint64 {
if m != nil {
return m.Term
}
return 0
}
func (m *LeaderInfo) GetRegionEpoch() *metapb.RegionEpoch {
if m != nil {
return m.RegionEpoch
}
return nil
}
type CheckLeaderRequest struct {
Regions []*LeaderInfo `protobuf:"bytes,1,rep,name=regions" json:"regions,omitempty"`
Ts uint64 `protobuf:"varint,2,opt,name=ts,proto3" json:"ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CheckLeaderRequest) Reset() { *m = CheckLeaderRequest{} }
func (m *CheckLeaderRequest) String() string { return proto.CompactTextString(m) }
func (*CheckLeaderRequest) ProtoMessage() {}
func (*CheckLeaderRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{110}
}
func (m *CheckLeaderRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CheckLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CheckLeaderRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CheckLeaderRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CheckLeaderRequest.Merge(dst, src)
}
func (m *CheckLeaderRequest) XXX_Size() int {
return m.Size()
}
func (m *CheckLeaderRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CheckLeaderRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CheckLeaderRequest proto.InternalMessageInfo
func (m *CheckLeaderRequest) GetRegions() []*LeaderInfo {
if m != nil {
return m.Regions
}
return nil
}
func (m *CheckLeaderRequest) GetTs() uint64 {
if m != nil {
return m.Ts
}
return 0
}
type CheckLeaderResponse struct {
Regions []uint64 `protobuf:"varint,1,rep,packed,name=regions" json:"regions,omitempty"`
Ts uint64 `protobuf:"varint,2,opt,name=ts,proto3" json:"ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CheckLeaderResponse) Reset() { *m = CheckLeaderResponse{} }
func (m *CheckLeaderResponse) String() string { return proto.CompactTextString(m) }
func (*CheckLeaderResponse) ProtoMessage() {}
func (*CheckLeaderResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{111}
}
func (m *CheckLeaderResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CheckLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CheckLeaderResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CheckLeaderResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CheckLeaderResponse.Merge(dst, src)
}
func (m *CheckLeaderResponse) XXX_Size() int {
return m.Size()
}
func (m *CheckLeaderResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CheckLeaderResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CheckLeaderResponse proto.InternalMessageInfo
func (m *CheckLeaderResponse) GetRegions() []uint64 {
if m != nil {
return m.Regions
}
return nil
}
func (m *CheckLeaderResponse) GetTs() uint64 {
if m != nil {
return m.Ts
}
return 0
}
type RawGetKeyTTLRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Cf string `protobuf:"bytes,3,opt,name=cf,proto3" json:"cf,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawGetKeyTTLRequest) Reset() { *m = RawGetKeyTTLRequest{} }
func (m *RawGetKeyTTLRequest) String() string { return proto.CompactTextString(m) }
func (*RawGetKeyTTLRequest) ProtoMessage() {}
func (*RawGetKeyTTLRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{112}
}
func (m *RawGetKeyTTLRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawGetKeyTTLRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawGetKeyTTLRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawGetKeyTTLRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawGetKeyTTLRequest.Merge(dst, src)
}
func (m *RawGetKeyTTLRequest) XXX_Size() int {
return m.Size()
}
func (m *RawGetKeyTTLRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RawGetKeyTTLRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RawGetKeyTTLRequest proto.InternalMessageInfo
func (m *RawGetKeyTTLRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *RawGetKeyTTLRequest) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *RawGetKeyTTLRequest) GetCf() string {
if m != nil {
return m.Cf
}
return ""
}
type RawGetKeyTTLResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
Ttl uint64 `protobuf:"varint,3,opt,name=ttl,proto3" json:"ttl,omitempty"`
NotFound bool `protobuf:"varint,4,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawGetKeyTTLResponse) Reset() { *m = RawGetKeyTTLResponse{} }
func (m *RawGetKeyTTLResponse) String() string { return proto.CompactTextString(m) }
func (*RawGetKeyTTLResponse) ProtoMessage() {}
func (*RawGetKeyTTLResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{113}
}
func (m *RawGetKeyTTLResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawGetKeyTTLResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawGetKeyTTLResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawGetKeyTTLResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawGetKeyTTLResponse.Merge(dst, src)
}
func (m *RawGetKeyTTLResponse) XXX_Size() int {
return m.Size()
}
func (m *RawGetKeyTTLResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RawGetKeyTTLResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RawGetKeyTTLResponse proto.InternalMessageInfo
func (m *RawGetKeyTTLResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *RawGetKeyTTLResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *RawGetKeyTTLResponse) GetTtl() uint64 {
if m != nil {
return m.Ttl
}
return 0
}
func (m *RawGetKeyTTLResponse) GetNotFound() bool {
if m != nil {
return m.NotFound
}
return false
}
type RawCASRequest struct {
Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
// If the length of this field equals zero, it means that the key-value should only put into database if and only if the key
// does not exist.
PreviousNotExist bool `protobuf:"varint,4,opt,name=previous_not_exist,json=previousNotExist,proto3" json:"previous_not_exist,omitempty"`
PreviousValue []byte `protobuf:"bytes,5,opt,name=previous_value,json=previousValue,proto3" json:"previous_value,omitempty"`
Cf string `protobuf:"bytes,6,opt,name=cf,proto3" json:"cf,omitempty"`
Ttl uint64 `protobuf:"varint,7,opt,name=ttl,proto3" json:"ttl,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawCASRequest) Reset() { *m = RawCASRequest{} }
func (m *RawCASRequest) String() string { return proto.CompactTextString(m) }
func (*RawCASRequest) ProtoMessage() {}
func (*RawCASRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{114}
}
func (m *RawCASRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawCASRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawCASRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawCASRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawCASRequest.Merge(dst, src)
}
func (m *RawCASRequest) XXX_Size() int {
return m.Size()
}
func (m *RawCASRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RawCASRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RawCASRequest proto.InternalMessageInfo
func (m *RawCASRequest) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *RawCASRequest) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *RawCASRequest) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func (m *RawCASRequest) GetPreviousNotExist() bool {
if m != nil {
return m.PreviousNotExist
}
return false
}
func (m *RawCASRequest) GetPreviousValue() []byte {
if m != nil {
return m.PreviousValue
}
return nil
}
func (m *RawCASRequest) GetCf() string {
if m != nil {
return m.Cf
}
return ""
}
func (m *RawCASRequest) GetTtl() uint64 {
if m != nil {
return m.Ttl
}
return 0
}
type RawCASResponse struct {
RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
NotEqual bool `protobuf:"varint,4,opt,name=not_equal,json=notEqual,proto3" json:"not_equal,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RawCASResponse) Reset() { *m = RawCASResponse{} }
func (m *RawCASResponse) String() string { return proto.CompactTextString(m) }
func (*RawCASResponse) ProtoMessage() {}
func (*RawCASResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_kvrpcpb_aa18ec4cb240d160, []int{115}
}
func (m *RawCASResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawCASResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RawCASResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *RawCASResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawCASResponse.Merge(dst, src)
}
func (m *RawCASResponse) XXX_Size() int {
return m.Size()
}
func (m *RawCASResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RawCASResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RawCASResponse proto.InternalMessageInfo
func (m *RawCASResponse) GetRegionError() *errorpb.Error {
if m != nil {
return m.RegionError
}
return nil
}
func (m *RawCASResponse) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *RawCASResponse) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func (m *RawCASResponse) GetNotEqual() bool {
if m != nil {
return m.NotEqual
}
return false
}
func init() {
proto.RegisterType((*GetRequest)(nil), "kvrpcpb.GetRequest")
proto.RegisterType((*GetResponse)(nil), "kvrpcpb.GetResponse")
proto.RegisterType((*ScanRequest)(nil), "kvrpcpb.ScanRequest")
proto.RegisterType((*ScanResponse)(nil), "kvrpcpb.ScanResponse")
proto.RegisterType((*PrewriteRequest)(nil), "kvrpcpb.PrewriteRequest")
proto.RegisterType((*PrewriteResponse)(nil), "kvrpcpb.PrewriteResponse")
proto.RegisterType((*PessimisticLockRequest)(nil), "kvrpcpb.PessimisticLockRequest")
proto.RegisterType((*PessimisticLockResponse)(nil), "kvrpcpb.PessimisticLockResponse")
proto.RegisterType((*PessimisticRollbackRequest)(nil), "kvrpcpb.PessimisticRollbackRequest")
proto.RegisterType((*PessimisticRollbackResponse)(nil), "kvrpcpb.PessimisticRollbackResponse")
proto.RegisterType((*TxnHeartBeatRequest)(nil), "kvrpcpb.TxnHeartBeatRequest")
proto.RegisterType((*TxnHeartBeatResponse)(nil), "kvrpcpb.TxnHeartBeatResponse")
proto.RegisterType((*CheckTxnStatusRequest)(nil), "kvrpcpb.CheckTxnStatusRequest")
proto.RegisterType((*CheckTxnStatusResponse)(nil), "kvrpcpb.CheckTxnStatusResponse")
proto.RegisterType((*CheckSecondaryLocksRequest)(nil), "kvrpcpb.CheckSecondaryLocksRequest")
proto.RegisterType((*CheckSecondaryLocksResponse)(nil), "kvrpcpb.CheckSecondaryLocksResponse")
proto.RegisterType((*CommitRequest)(nil), "kvrpcpb.CommitRequest")
proto.RegisterType((*CommitResponse)(nil), "kvrpcpb.CommitResponse")
proto.RegisterType((*ImportRequest)(nil), "kvrpcpb.ImportRequest")
proto.RegisterType((*ImportResponse)(nil), "kvrpcpb.ImportResponse")
proto.RegisterType((*CleanupRequest)(nil), "kvrpcpb.CleanupRequest")
proto.RegisterType((*CleanupResponse)(nil), "kvrpcpb.CleanupResponse")
proto.RegisterType((*BatchGetRequest)(nil), "kvrpcpb.BatchGetRequest")
proto.RegisterType((*BatchGetResponse)(nil), "kvrpcpb.BatchGetResponse")
proto.RegisterType((*BatchRollbackRequest)(nil), "kvrpcpb.BatchRollbackRequest")
proto.RegisterType((*BatchRollbackResponse)(nil), "kvrpcpb.BatchRollbackResponse")
proto.RegisterType((*ScanLockRequest)(nil), "kvrpcpb.ScanLockRequest")
proto.RegisterType((*ScanLockResponse)(nil), "kvrpcpb.ScanLockResponse")
proto.RegisterType((*ResolveLockRequest)(nil), "kvrpcpb.ResolveLockRequest")
proto.RegisterType((*ResolveLockResponse)(nil), "kvrpcpb.ResolveLockResponse")
proto.RegisterType((*GCRequest)(nil), "kvrpcpb.GCRequest")
proto.RegisterType((*GCResponse)(nil), "kvrpcpb.GCResponse")
proto.RegisterType((*DeleteRangeRequest)(nil), "kvrpcpb.DeleteRangeRequest")
proto.RegisterType((*DeleteRangeResponse)(nil), "kvrpcpb.DeleteRangeResponse")
proto.RegisterType((*RawGetRequest)(nil), "kvrpcpb.RawGetRequest")
proto.RegisterType((*RawGetResponse)(nil), "kvrpcpb.RawGetResponse")
proto.RegisterType((*RawBatchGetRequest)(nil), "kvrpcpb.RawBatchGetRequest")
proto.RegisterType((*RawBatchGetResponse)(nil), "kvrpcpb.RawBatchGetResponse")
proto.RegisterType((*RawPutRequest)(nil), "kvrpcpb.RawPutRequest")
proto.RegisterType((*RawPutResponse)(nil), "kvrpcpb.RawPutResponse")
proto.RegisterType((*RawBatchPutRequest)(nil), "kvrpcpb.RawBatchPutRequest")
proto.RegisterType((*RawBatchPutResponse)(nil), "kvrpcpb.RawBatchPutResponse")
proto.RegisterType((*RawDeleteRequest)(nil), "kvrpcpb.RawDeleteRequest")
proto.RegisterType((*RawDeleteResponse)(nil), "kvrpcpb.RawDeleteResponse")
proto.RegisterType((*RawBatchDeleteRequest)(nil), "kvrpcpb.RawBatchDeleteRequest")
proto.RegisterType((*RawBatchDeleteResponse)(nil), "kvrpcpb.RawBatchDeleteResponse")
proto.RegisterType((*RawScanRequest)(nil), "kvrpcpb.RawScanRequest")
proto.RegisterType((*RawScanResponse)(nil), "kvrpcpb.RawScanResponse")
proto.RegisterType((*RawDeleteRangeRequest)(nil), "kvrpcpb.RawDeleteRangeRequest")
proto.RegisterType((*RawDeleteRangeResponse)(nil), "kvrpcpb.RawDeleteRangeResponse")
proto.RegisterType((*RawBatchScanRequest)(nil), "kvrpcpb.RawBatchScanRequest")
proto.RegisterType((*RawBatchScanResponse)(nil), "kvrpcpb.RawBatchScanResponse")
proto.RegisterType((*UnsafeDestroyRangeRequest)(nil), "kvrpcpb.UnsafeDestroyRangeRequest")
proto.RegisterType((*UnsafeDestroyRangeResponse)(nil), "kvrpcpb.UnsafeDestroyRangeResponse")
proto.RegisterType((*RegisterLockObserverRequest)(nil), "kvrpcpb.RegisterLockObserverRequest")
proto.RegisterType((*RegisterLockObserverResponse)(nil), "kvrpcpb.RegisterLockObserverResponse")
proto.RegisterType((*CheckLockObserverRequest)(nil), "kvrpcpb.CheckLockObserverRequest")
proto.RegisterType((*CheckLockObserverResponse)(nil), "kvrpcpb.CheckLockObserverResponse")
proto.RegisterType((*RemoveLockObserverRequest)(nil), "kvrpcpb.RemoveLockObserverRequest")
proto.RegisterType((*RemoveLockObserverResponse)(nil), "kvrpcpb.RemoveLockObserverResponse")
proto.RegisterType((*PhysicalScanLockRequest)(nil), "kvrpcpb.PhysicalScanLockRequest")
proto.RegisterType((*PhysicalScanLockResponse)(nil), "kvrpcpb.PhysicalScanLockResponse")
proto.RegisterType((*SplitRegionRequest)(nil), "kvrpcpb.SplitRegionRequest")
proto.RegisterType((*SplitRegionResponse)(nil), "kvrpcpb.SplitRegionResponse")
proto.RegisterType((*ReadIndexRequest)(nil), "kvrpcpb.ReadIndexRequest")
proto.RegisterType((*ReadIndexResponse)(nil), "kvrpcpb.ReadIndexResponse")
proto.RegisterType((*VerMutation)(nil), "kvrpcpb.VerMutation")
proto.RegisterType((*VerValue)(nil), "kvrpcpb.VerValue")
proto.RegisterType((*VerError)(nil), "kvrpcpb.VerError")
proto.RegisterType((*VerKvPair)(nil), "kvrpcpb.VerKvPair")
proto.RegisterType((*VerGetRequest)(nil), "kvrpcpb.VerGetRequest")
proto.RegisterType((*VerGetResponse)(nil), "kvrpcpb.VerGetResponse")
proto.RegisterType((*VerBatchGetRequest)(nil), "kvrpcpb.VerBatchGetRequest")
proto.RegisterType((*VerBatchGetResponse)(nil), "kvrpcpb.VerBatchGetResponse")
proto.RegisterType((*VerMutRequest)(nil), "kvrpcpb.VerMutRequest")
proto.RegisterType((*VerMutResponse)(nil), "kvrpcpb.VerMutResponse")
proto.RegisterType((*VerBatchMutRequest)(nil), "kvrpcpb.VerBatchMutRequest")
proto.RegisterType((*VerBatchMutResponse)(nil), "kvrpcpb.VerBatchMutResponse")
proto.RegisterType((*VerScanRequest)(nil), "kvrpcpb.VerScanRequest")
proto.RegisterType((*VerScanResponse)(nil), "kvrpcpb.VerScanResponse")
proto.RegisterType((*VerDeleteRangeRequest)(nil), "kvrpcpb.VerDeleteRangeRequest")
proto.RegisterType((*VerDeleteRangeResponse)(nil), "kvrpcpb.VerDeleteRangeResponse")
proto.RegisterType((*MvccGetByKeyRequest)(nil), "kvrpcpb.MvccGetByKeyRequest")
proto.RegisterType((*MvccGetByKeyResponse)(nil), "kvrpcpb.MvccGetByKeyResponse")
proto.RegisterType((*MvccGetByStartTsRequest)(nil), "kvrpcpb.MvccGetByStartTsRequest")
proto.RegisterType((*MvccGetByStartTsResponse)(nil), "kvrpcpb.MvccGetByStartTsResponse")
proto.RegisterType((*Context)(nil), "kvrpcpb.Context")
proto.RegisterType((*LockInfo)(nil), "kvrpcpb.LockInfo")
proto.RegisterType((*KeyError)(nil), "kvrpcpb.KeyError")
proto.RegisterType((*WriteConflict)(nil), "kvrpcpb.WriteConflict")
proto.RegisterType((*AlreadyExist)(nil), "kvrpcpb.AlreadyExist")
proto.RegisterType((*Deadlock)(nil), "kvrpcpb.Deadlock")
proto.RegisterType((*CommitTsExpired)(nil), "kvrpcpb.CommitTsExpired")
proto.RegisterType((*TxnNotFound)(nil), "kvrpcpb.TxnNotFound")
proto.RegisterType((*CommitTsTooLarge)(nil), "kvrpcpb.CommitTsTooLarge")
proto.RegisterType((*TimeDetail)(nil), "kvrpcpb.TimeDetail")
proto.RegisterType((*ScanInfo)(nil), "kvrpcpb.ScanInfo")
proto.RegisterType((*ScanDetail)(nil), "kvrpcpb.ScanDetail")
proto.RegisterType((*ScanDetailV2)(nil), "kvrpcpb.ScanDetailV2")
proto.RegisterType((*ExecDetails)(nil), "kvrpcpb.ExecDetails")
proto.RegisterType((*ExecDetailsV2)(nil), "kvrpcpb.ExecDetailsV2")
proto.RegisterType((*KvPair)(nil), "kvrpcpb.KvPair")
proto.RegisterType((*Mutation)(nil), "kvrpcpb.Mutation")
proto.RegisterType((*MvccWrite)(nil), "kvrpcpb.MvccWrite")
proto.RegisterType((*MvccValue)(nil), "kvrpcpb.MvccValue")
proto.RegisterType((*MvccLock)(nil), "kvrpcpb.MvccLock")
proto.RegisterType((*MvccInfo)(nil), "kvrpcpb.MvccInfo")
proto.RegisterType((*TxnInfo)(nil), "kvrpcpb.TxnInfo")
proto.RegisterType((*KeyRange)(nil), "kvrpcpb.KeyRange")
proto.RegisterType((*LeaderInfo)(nil), "kvrpcpb.LeaderInfo")
proto.RegisterType((*CheckLeaderRequest)(nil), "kvrpcpb.CheckLeaderRequest")
proto.RegisterType((*CheckLeaderResponse)(nil), "kvrpcpb.CheckLeaderResponse")
proto.RegisterType((*RawGetKeyTTLRequest)(nil), "kvrpcpb.RawGetKeyTTLRequest")
proto.RegisterType((*RawGetKeyTTLResponse)(nil), "kvrpcpb.RawGetKeyTTLResponse")
proto.RegisterType((*RawCASRequest)(nil), "kvrpcpb.RawCASRequest")
proto.RegisterType((*RawCASResponse)(nil), "kvrpcpb.RawCASResponse")
proto.RegisterEnum("kvrpcpb.VerOp", VerOp_name, VerOp_value)
proto.RegisterEnum("kvrpcpb.CommandPri", CommandPri_name, CommandPri_value)
proto.RegisterEnum("kvrpcpb.IsolationLevel", IsolationLevel_name, IsolationLevel_value)
proto.RegisterEnum("kvrpcpb.Op", Op_name, Op_value)
proto.RegisterEnum("kvrpcpb.Assertion", Assertion_name, Assertion_value)
proto.RegisterEnum("kvrpcpb.Action", Action_name, Action_value)
proto.RegisterEnum("kvrpcpb.ExtraOp", ExtraOp_name, ExtraOp_value)
}
func (m *GetRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n1, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if m.Version != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Version))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GetResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GetResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n2, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n2
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n3, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n3
}
if len(m.Value) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value)))
i += copy(dAtA[i:], m.Value)
}
if m.NotFound {
dAtA[i] = 0x20
i++
if m.NotFound {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.ExecDetailsV2 != nil {
dAtA[i] = 0x32
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ExecDetailsV2.Size()))
n4, err := m.ExecDetailsV2.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n4
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ScanRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScanRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n5, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n5
}
if len(m.StartKey) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey)))
i += copy(dAtA[i:], m.StartKey)
}
if m.Limit != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Limit))
}
if m.Version != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Version))
}
if m.KeyOnly {
dAtA[i] = 0x28
i++
if m.KeyOnly {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.Reverse {
dAtA[i] = 0x30
i++
if m.Reverse {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if len(m.EndKey) > 0 {
dAtA[i] = 0x3a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey)))
i += copy(dAtA[i:], m.EndKey)
}
if m.SampleStep != 0 {
dAtA[i] = 0x40
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.SampleStep))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ScanResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScanResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n6, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n6
}
if len(m.Pairs) > 0 {
for _, msg := range m.Pairs {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.Error != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n7, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n7
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PrewriteRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PrewriteRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n8, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n8
}
if len(m.Mutations) > 0 {
for _, msg := range m.Mutations {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.PrimaryLock) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryLock)))
i += copy(dAtA[i:], m.PrimaryLock)
}
if m.StartVersion != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion))
}
if m.LockTtl != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTtl))
}
if m.SkipConstraintCheck {
dAtA[i] = 0x30
i++
if m.SkipConstraintCheck {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if len(m.IsPessimisticLock) > 0 {
dAtA[i] = 0x3a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.IsPessimisticLock)))
for _, b := range m.IsPessimisticLock {
if b {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
}
if m.TxnSize != 0 {
dAtA[i] = 0x40
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.TxnSize))
}
if m.ForUpdateTs != 0 {
dAtA[i] = 0x48
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ForUpdateTs))
}
if m.MinCommitTs != 0 {
dAtA[i] = 0x50
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MinCommitTs))
}
if m.UseAsyncCommit {
dAtA[i] = 0x58
i++
if m.UseAsyncCommit {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if len(m.Secondaries) > 0 {
for _, b := range m.Secondaries {
dAtA[i] = 0x62
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if m.TryOnePc {
dAtA[i] = 0x68
i++
if m.TryOnePc {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.MaxCommitTs != 0 {
dAtA[i] = 0x70
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MaxCommitTs))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PrewriteResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PrewriteResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n9, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n9
}
if len(m.Errors) > 0 {
for _, msg := range m.Errors {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.MinCommitTs != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MinCommitTs))
}
if m.OnePcCommitTs != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.OnePcCommitTs))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PessimisticLockRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PessimisticLockRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n10, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n10
}
if len(m.Mutations) > 0 {
for _, msg := range m.Mutations {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.PrimaryLock) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryLock)))
i += copy(dAtA[i:], m.PrimaryLock)
}
if m.StartVersion != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion))
}
if m.LockTtl != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTtl))
}
if m.ForUpdateTs != 0 {
dAtA[i] = 0x30
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ForUpdateTs))
}
if m.IsFirstLock {
dAtA[i] = 0x38
i++
if m.IsFirstLock {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.WaitTimeout != 0 {
dAtA[i] = 0x40
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.WaitTimeout))
}
if m.Force {
dAtA[i] = 0x48
i++
if m.Force {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.ReturnValues {
dAtA[i] = 0x50
i++
if m.ReturnValues {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.MinCommitTs != 0 {
dAtA[i] = 0x58
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MinCommitTs))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PessimisticLockResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PessimisticLockResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n11, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n11
}
if len(m.Errors) > 0 {
for _, msg := range m.Errors {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.CommitTs != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitTs))
}
if len(m.Value) > 0 {
dAtA[i] = 0x22
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value)))
i += copy(dAtA[i:], m.Value)
}
if len(m.Values) > 0 {
for _, b := range m.Values {
dAtA[i] = 0x2a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if len(m.NotFounds) > 0 {
dAtA[i] = 0x32
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.NotFounds)))
for _, b := range m.NotFounds {
if b {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PessimisticRollbackRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PessimisticRollbackRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n12, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n12
}
if m.StartVersion != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion))
}
if m.ForUpdateTs != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ForUpdateTs))
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
dAtA[i] = 0x22
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PessimisticRollbackResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PessimisticRollbackResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n13, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n13
}
if len(m.Errors) > 0 {
for _, msg := range m.Errors {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *TxnHeartBeatRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TxnHeartBeatRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n14, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n14
}
if len(m.PrimaryLock) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryLock)))
i += copy(dAtA[i:], m.PrimaryLock)
}
if m.StartVersion != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion))
}
if m.AdviseLockTtl != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.AdviseLockTtl))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *TxnHeartBeatResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TxnHeartBeatResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n15, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n15
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n16, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n16
}
if m.LockTtl != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTtl))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *CheckTxnStatusRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CheckTxnStatusRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n17, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n17
}
if len(m.PrimaryKey) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryKey)))
i += copy(dAtA[i:], m.PrimaryKey)
}
if m.LockTs != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTs))
}
if m.CallerStartTs != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CallerStartTs))
}
if m.CurrentTs != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CurrentTs))
}
if m.RollbackIfNotExist {
dAtA[i] = 0x30
i++
if m.RollbackIfNotExist {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.ForceSyncCommit {
dAtA[i] = 0x38
i++
if m.ForceSyncCommit {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.ResolvingPessimisticLock {
dAtA[i] = 0x40
i++
if m.ResolvingPessimisticLock {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *CheckTxnStatusResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CheckTxnStatusResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n18, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n18
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n19, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n19
}
if m.LockTtl != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTtl))
}
if m.CommitVersion != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion))
}
if m.Action != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Action))
}
if m.LockInfo != nil {
dAtA[i] = 0x32
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockInfo.Size()))
n20, err := m.LockInfo.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n20
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *CheckSecondaryLocksRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CheckSecondaryLocksRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n21, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n21
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if m.StartVersion != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *CheckSecondaryLocksResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CheckSecondaryLocksResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n22, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n22
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n23, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n23
}
if len(m.Locks) > 0 {
for _, msg := range m.Locks {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.CommitTs != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitTs))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *CommitRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CommitRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n24, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n24
}
if m.StartVersion != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion))
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if m.CommitVersion != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *CommitResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CommitResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n25, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n25
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n26, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n26
}
if m.CommitVersion != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ImportRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ImportRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Mutations) > 0 {
for _, msg := range m.Mutations {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.CommitVersion != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ImportResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ImportResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n27, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n27
}
if len(m.Error) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *CleanupRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CleanupRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n28, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n28
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if m.StartVersion != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion))
}
if m.CurrentTs != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CurrentTs))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *CleanupResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CleanupResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n29, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n29
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n30, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n30
}
if m.CommitVersion != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *BatchGetRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BatchGetRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n31, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n31
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if m.Version != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Version))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *BatchGetResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BatchGetResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n32, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n32
}
if len(m.Pairs) > 0 {
for _, msg := range m.Pairs {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.ExecDetailsV2 != nil {
dAtA[i] = 0x22
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ExecDetailsV2.Size()))
n33, err := m.ExecDetailsV2.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n33
}
if m.Error != nil {
dAtA[i] = 0x2a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n34, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n34
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *BatchRollbackRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BatchRollbackRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n35, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n35
}
if m.StartVersion != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion))
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *BatchRollbackResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BatchRollbackResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n36, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n36
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n37, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n37
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ScanLockRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScanLockRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n38, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n38
}
if m.MaxVersion != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MaxVersion))
}
if len(m.StartKey) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey)))
i += copy(dAtA[i:], m.StartKey)
}
if m.Limit != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Limit))
}
if len(m.EndKey) > 0 {
dAtA[i] = 0x2a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey)))
i += copy(dAtA[i:], m.EndKey)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ScanLockResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScanLockResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n39, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n39
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n40, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n40
}
if len(m.Locks) > 0 {
for _, msg := range m.Locks {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ResolveLockRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ResolveLockRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n41, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n41
}
if m.StartVersion != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion))
}
if m.CommitVersion != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion))
}
if len(m.TxnInfos) > 0 {
for _, msg := range m.TxnInfos {
dAtA[i] = 0x22
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
dAtA[i] = 0x2a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ResolveLockResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ResolveLockResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n42, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n42
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n43, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n43
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GCRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GCRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n44, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n44
}
if m.SafePoint != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.SafePoint))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GCResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GCResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n45, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n45
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n46, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n46
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n47, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n47
}
if len(m.StartKey) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey)))
i += copy(dAtA[i:], m.StartKey)
}
if len(m.EndKey) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey)))
i += copy(dAtA[i:], m.EndKey)
}
if m.NotifyOnly {
dAtA[i] = 0x20
i++
if m.NotifyOnly {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n48, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n48
}
if len(m.Error) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawGetRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawGetRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n49, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n49
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.Cf) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf)))
i += copy(dAtA[i:], m.Cf)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawGetResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawGetResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n50, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n50
}
if len(m.Error) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if len(m.Value) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value)))
i += copy(dAtA[i:], m.Value)
}
if m.NotFound {
dAtA[i] = 0x20
i++
if m.NotFound {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawBatchGetRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawBatchGetRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n51, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n51
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if len(m.Cf) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf)))
i += copy(dAtA[i:], m.Cf)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawBatchGetResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawBatchGetResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n52, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n52
}
if len(m.Pairs) > 0 {
for _, msg := range m.Pairs {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawPutRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawPutRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n53, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n53
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.Value) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value)))
i += copy(dAtA[i:], m.Value)
}
if len(m.Cf) > 0 {
dAtA[i] = 0x22
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf)))
i += copy(dAtA[i:], m.Cf)
}
if m.Ttl != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Ttl))
}
if m.ForCas {
dAtA[i] = 0x30
i++
if m.ForCas {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawPutResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawPutResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n54, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n54
}
if len(m.Error) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawBatchPutRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawBatchPutRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n55, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n55
}
if len(m.Pairs) > 0 {
for _, msg := range m.Pairs {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.Cf) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf)))
i += copy(dAtA[i:], m.Cf)
}
if m.Ttl != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Ttl))
}
if m.ForCas {
dAtA[i] = 0x28
i++
if m.ForCas {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawBatchPutResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawBatchPutResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n56, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n56
}
if len(m.Error) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawDeleteRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n57, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n57
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.Cf) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf)))
i += copy(dAtA[i:], m.Cf)
}
if m.ForCas {
dAtA[i] = 0x20
i++
if m.ForCas {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawDeleteResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n58, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n58
}
if len(m.Error) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawBatchDeleteRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawBatchDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n59, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n59
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if len(m.Cf) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf)))
i += copy(dAtA[i:], m.Cf)
}
if m.ForCas {
dAtA[i] = 0x20
i++
if m.ForCas {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawBatchDeleteResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawBatchDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n60, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n60
}
if len(m.Error) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawScanRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawScanRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n61, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n61
}
if len(m.StartKey) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey)))
i += copy(dAtA[i:], m.StartKey)
}
if m.Limit != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Limit))
}
if m.KeyOnly {
dAtA[i] = 0x20
i++
if m.KeyOnly {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if len(m.Cf) > 0 {
dAtA[i] = 0x2a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf)))
i += copy(dAtA[i:], m.Cf)
}
if m.Reverse {
dAtA[i] = 0x30
i++
if m.Reverse {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if len(m.EndKey) > 0 {
dAtA[i] = 0x3a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey)))
i += copy(dAtA[i:], m.EndKey)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawScanResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawScanResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n62, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n62
}
if len(m.Kvs) > 0 {
for _, msg := range m.Kvs {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawDeleteRangeRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawDeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n63, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n63
}
if len(m.StartKey) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey)))
i += copy(dAtA[i:], m.StartKey)
}
if len(m.EndKey) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey)))
i += copy(dAtA[i:], m.EndKey)
}
if len(m.Cf) > 0 {
dAtA[i] = 0x22
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf)))
i += copy(dAtA[i:], m.Cf)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawDeleteRangeResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawDeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n64, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n64
}
if len(m.Error) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawBatchScanRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawBatchScanRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n65, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n65
}
if len(m.Ranges) > 0 {
for _, msg := range m.Ranges {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.EachLimit != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.EachLimit))
}
if m.KeyOnly {
dAtA[i] = 0x20
i++
if m.KeyOnly {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if len(m.Cf) > 0 {
dAtA[i] = 0x2a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf)))
i += copy(dAtA[i:], m.Cf)
}
if m.Reverse {
dAtA[i] = 0x30
i++
if m.Reverse {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawBatchScanResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawBatchScanResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n66, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n66
}
if len(m.Kvs) > 0 {
for _, msg := range m.Kvs {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *UnsafeDestroyRangeRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *UnsafeDestroyRangeRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n67, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n67
}
if len(m.StartKey) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey)))
i += copy(dAtA[i:], m.StartKey)
}
if len(m.EndKey) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey)))
i += copy(dAtA[i:], m.EndKey)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *UnsafeDestroyRangeResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *UnsafeDestroyRangeResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n68, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n68
}
if len(m.Error) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RegisterLockObserverRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RegisterLockObserverRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n69, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n69
}
if m.MaxTs != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MaxTs))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RegisterLockObserverResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RegisterLockObserverResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Error) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *CheckLockObserverRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CheckLockObserverRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n70, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n70
}
if m.MaxTs != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MaxTs))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *CheckLockObserverResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CheckLockObserverResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Error) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if m.IsClean {
dAtA[i] = 0x10
i++
if m.IsClean {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if len(m.Locks) > 0 {
for _, msg := range m.Locks {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RemoveLockObserverRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RemoveLockObserverRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n71, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n71
}
if m.MaxTs != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MaxTs))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RemoveLockObserverResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RemoveLockObserverResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Error) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PhysicalScanLockRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PhysicalScanLockRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n72, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n72
}
if m.MaxTs != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MaxTs))
}
if len(m.StartKey) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey)))
i += copy(dAtA[i:], m.StartKey)
}
if m.Limit != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Limit))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PhysicalScanLockResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PhysicalScanLockResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Error) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if len(m.Locks) > 0 {
for _, msg := range m.Locks {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *SplitRegionRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SplitRegionRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n73, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n73
}
if len(m.SplitKey) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.SplitKey)))
i += copy(dAtA[i:], m.SplitKey)
}
if len(m.SplitKeys) > 0 {
for _, b := range m.SplitKeys {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *SplitRegionResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SplitRegionResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n74, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n74
}
if m.Left != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Left.Size()))
n75, err := m.Left.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n75
}
if m.Right != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Right.Size()))
n76, err := m.Right.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n76
}
if len(m.Regions) > 0 {
for _, msg := range m.Regions {
dAtA[i] = 0x22
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ReadIndexRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ReadIndexRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n77, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n77
}
if m.StartTs != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs))
}
if len(m.Ranges) > 0 {
for _, msg := range m.Ranges {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ReadIndexResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ReadIndexResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n78, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n78
}
if m.ReadIndex != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ReadIndex))
}
if m.Locked != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Locked.Size()))
n79, err := m.Locked.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n79
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerMutation) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerMutation) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Op != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Op))
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.Value) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value)))
i += copy(dAtA[i:], m.Value)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerValue) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Value) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value)))
i += copy(dAtA[i:], m.Value)
}
if m.Version != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Version))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerError) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerError) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Error) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerKvPair) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerKvPair) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Error != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n80, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n80
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if m.Value != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Value.Size()))
n81, err := m.Value.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n81
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerGetRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerGetRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n82, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n82
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if m.StartVersion != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerGetResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerGetResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n83, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n83
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n84, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n84
}
if m.Value != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Value.Size()))
n85, err := m.Value.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n85
}
if m.NotFound {
dAtA[i] = 0x20
i++
if m.NotFound {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerBatchGetRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerBatchGetRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n86, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n86
}
if len(m.Key) > 0 {
for _, b := range m.Key {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if m.StartVersion != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerBatchGetResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerBatchGetResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n87, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n87
}
if len(m.Pairs) > 0 {
for _, msg := range m.Pairs {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerMutRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerMutRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n88, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n88
}
if m.Mut != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Mut.Size()))
n89, err := m.Mut.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n89
}
if m.Version != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Version))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerMutResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerMutResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n90, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n90
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n91, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n91
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerBatchMutRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerBatchMutRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n92, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n92
}
if len(m.Muts) > 0 {
for _, msg := range m.Muts {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.Version != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Version))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerBatchMutResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerBatchMutResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n93, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n93
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n94, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n94
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerScanRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerScanRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n95, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n95
}
if len(m.StartKey) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey)))
i += copy(dAtA[i:], m.StartKey)
}
if len(m.EndKey) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey)))
i += copy(dAtA[i:], m.EndKey)
}
if m.Limit != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Limit))
}
if m.KeyOnly {
dAtA[i] = 0x28
i++
if m.KeyOnly {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.Reverse {
dAtA[i] = 0x30
i++
if m.Reverse {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.StartVersion != 0 {
dAtA[i] = 0x38
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerScanResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerScanResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n96, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n96
}
if len(m.Pairs) > 0 {
for _, msg := range m.Pairs {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerDeleteRangeRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerDeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n97, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n97
}
if len(m.StartKey) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey)))
i += copy(dAtA[i:], m.StartKey)
}
if len(m.EndKey) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey)))
i += copy(dAtA[i:], m.EndKey)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *VerDeleteRangeResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VerDeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n98, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n98
}
if m.Error != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n99, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n99
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *MvccGetByKeyRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MvccGetByKeyRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n100, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n100
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *MvccGetByKeyResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MvccGetByKeyResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n101, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n101
}
if len(m.Error) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if m.Info != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Info.Size()))
n102, err := m.Info.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n102
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *MvccGetByStartTsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MvccGetByStartTsRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n103, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n103
}
if m.StartTs != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *MvccGetByStartTsResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MvccGetByStartTsResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n104, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n104
}
if len(m.Error) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if len(m.Key) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if m.Info != nil {
dAtA[i] = 0x22
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Info.Size()))
n105, err := m.Info.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n105
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Context) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Context) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionId != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionId))
}
if m.RegionEpoch != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionEpoch.Size()))
n106, err := m.RegionEpoch.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n106
}
if m.Peer != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Peer.Size()))
n107, err := m.Peer.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n107
}
if m.Term != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Term))
}
if m.Priority != 0 {
dAtA[i] = 0x30
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Priority))
}
if m.IsolationLevel != 0 {
dAtA[i] = 0x38
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.IsolationLevel))
}
if m.NotFillCache {
dAtA[i] = 0x40
i++
if m.NotFillCache {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.SyncLog {
dAtA[i] = 0x48
i++
if m.SyncLog {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.RecordTimeStat {
dAtA[i] = 0x50
i++
if m.RecordTimeStat {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.RecordScanStat {
dAtA[i] = 0x58
i++
if m.RecordScanStat {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.ReplicaRead {
dAtA[i] = 0x60
i++
if m.ReplicaRead {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if len(m.ResolvedLocks) > 0 {
dAtA109 := make([]byte, len(m.ResolvedLocks)*10)
var j108 int
for _, num := range m.ResolvedLocks {
for num >= 1<<7 {
dAtA109[j108] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j108++
}
dAtA109[j108] = uint8(num)
j108++
}
dAtA[i] = 0x6a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(j108))
i += copy(dAtA[i:], dAtA109[:j108])
}
if m.MaxExecutionDurationMs != 0 {
dAtA[i] = 0x70
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MaxExecutionDurationMs))
}
if m.AppliedIndex != 0 {
dAtA[i] = 0x78
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.AppliedIndex))
}
if m.TaskId != 0 {
dAtA[i] = 0x80
i++
dAtA[i] = 0x1
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.TaskId))
}
if m.StaleRead {
dAtA[i] = 0x88
i++
dAtA[i] = 0x1
i++
if m.StaleRead {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *LockInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *LockInfo) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.PrimaryLock) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryLock)))
i += copy(dAtA[i:], m.PrimaryLock)
}
if m.LockVersion != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockVersion))
}
if len(m.Key) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if m.LockTtl != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTtl))
}
if m.TxnSize != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.TxnSize))
}
if m.LockType != 0 {
dAtA[i] = 0x30
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockType))
}
if m.LockForUpdateTs != 0 {
dAtA[i] = 0x38
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockForUpdateTs))
}
if m.UseAsyncCommit {
dAtA[i] = 0x40
i++
if m.UseAsyncCommit {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.MinCommitTs != 0 {
dAtA[i] = 0x48
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MinCommitTs))
}
if len(m.Secondaries) > 0 {
for _, b := range m.Secondaries {
dAtA[i] = 0x52
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *KeyError) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *KeyError) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Locked != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Locked.Size()))
n110, err := m.Locked.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n110
}
if len(m.Retryable) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Retryable)))
i += copy(dAtA[i:], m.Retryable)
}
if len(m.Abort) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Abort)))
i += copy(dAtA[i:], m.Abort)
}
if m.Conflict != nil {
dAtA[i] = 0x22
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Conflict.Size()))
n111, err := m.Conflict.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n111
}
if m.AlreadyExist != nil {
dAtA[i] = 0x2a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.AlreadyExist.Size()))
n112, err := m.AlreadyExist.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n112
}
if m.Deadlock != nil {
dAtA[i] = 0x32
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Deadlock.Size()))
n113, err := m.Deadlock.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n113
}
if m.CommitTsExpired != nil {
dAtA[i] = 0x3a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitTsExpired.Size()))
n114, err := m.CommitTsExpired.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n114
}
if m.TxnNotFound != nil {
dAtA[i] = 0x42
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.TxnNotFound.Size()))
n115, err := m.TxnNotFound.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n115
}
if m.CommitTsTooLarge != nil {
dAtA[i] = 0x4a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitTsTooLarge.Size()))
n116, err := m.CommitTsTooLarge.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n116
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *WriteConflict) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *WriteConflict) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.StartTs != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs))
}
if m.ConflictTs != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ConflictTs))
}
if len(m.Key) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.Primary) > 0 {
dAtA[i] = 0x22
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Primary)))
i += copy(dAtA[i:], m.Primary)
}
if m.ConflictCommitTs != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ConflictCommitTs))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *AlreadyExist) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *AlreadyExist) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Key) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Deadlock) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Deadlock) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.LockTs != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTs))
}
if len(m.LockKey) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.LockKey)))
i += copy(dAtA[i:], m.LockKey)
}
if m.DeadlockKeyHash != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.DeadlockKeyHash))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *CommitTsExpired) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CommitTsExpired) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.StartTs != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs))
}
if m.AttemptedCommitTs != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.AttemptedCommitTs))
}
if len(m.Key) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if m.MinCommitTs != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MinCommitTs))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *TxnNotFound) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TxnNotFound) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.StartTs != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs))
}
if len(m.PrimaryKey) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryKey)))
i += copy(dAtA[i:], m.PrimaryKey)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *CommitTsTooLarge) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CommitTsTooLarge) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.CommitTs != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitTs))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *TimeDetail) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TimeDetail) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.WaitWallTimeMs != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.WaitWallTimeMs))
}
if m.ProcessWallTimeMs != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ProcessWallTimeMs))
}
if m.KvReadWallTimeMs != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.KvReadWallTimeMs))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ScanInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScanInfo) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Total != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Total))
}
if m.Processed != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Processed))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ScanDetail) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScanDetail) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Write != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Write.Size()))
n117, err := m.Write.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n117
}
if m.Lock != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Lock.Size()))
n118, err := m.Lock.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n118
}
if m.Data != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Data.Size()))
n119, err := m.Data.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n119
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ScanDetailV2) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScanDetailV2) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.ProcessedVersions != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ProcessedVersions))
}
if m.TotalVersions != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.TotalVersions))
}
if m.RocksdbDeleteSkippedCount != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RocksdbDeleteSkippedCount))
}
if m.RocksdbKeySkippedCount != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RocksdbKeySkippedCount))
}
if m.RocksdbBlockCacheHitCount != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RocksdbBlockCacheHitCount))
}
if m.RocksdbBlockReadCount != 0 {
dAtA[i] = 0x30
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RocksdbBlockReadCount))
}
if m.RocksdbBlockReadByte != 0 {
dAtA[i] = 0x38
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RocksdbBlockReadByte))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ExecDetails) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExecDetails) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.TimeDetail != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.TimeDetail.Size()))
n120, err := m.TimeDetail.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n120
}
if m.ScanDetail != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ScanDetail.Size()))
n121, err := m.ScanDetail.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n121
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ExecDetailsV2) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExecDetailsV2) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.TimeDetail != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.TimeDetail.Size()))
n122, err := m.TimeDetail.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n122
}
if m.ScanDetailV2 != nil {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ScanDetailV2.Size()))
n123, err := m.ScanDetailV2.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n123
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *KvPair) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *KvPair) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Error != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size()))
n124, err := m.Error.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n124
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.Value) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value)))
i += copy(dAtA[i:], m.Value)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Mutation) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Mutation) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Op != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Op))
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.Value) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value)))
i += copy(dAtA[i:], m.Value)
}
if m.Assertion != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Assertion))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *MvccWrite) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MvccWrite) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Type != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Type))
}
if m.StartTs != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs))
}
if m.CommitTs != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitTs))
}
if len(m.ShortValue) > 0 {
dAtA[i] = 0x22
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.ShortValue)))
i += copy(dAtA[i:], m.ShortValue)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *MvccValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MvccValue) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.StartTs != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs))
}
if len(m.Value) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value)))
i += copy(dAtA[i:], m.Value)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *MvccLock) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MvccLock) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Type != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Type))
}
if m.StartTs != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs))
}
if len(m.Primary) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Primary)))
i += copy(dAtA[i:], m.Primary)
}
if len(m.ShortValue) > 0 {
dAtA[i] = 0x22
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.ShortValue)))
i += copy(dAtA[i:], m.ShortValue)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *MvccInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MvccInfo) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Lock != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Lock.Size()))
n125, err := m.Lock.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n125
}
if len(m.Writes) > 0 {
for _, msg := range m.Writes {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.Values) > 0 {
for _, msg := range m.Values {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *TxnInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TxnInfo) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Txn != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Txn))
}
if m.Status != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Status))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *KeyRange) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *KeyRange) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.StartKey) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey)))
i += copy(dAtA[i:], m.StartKey)
}
if len(m.EndKey) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey)))
i += copy(dAtA[i:], m.EndKey)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *LeaderInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *LeaderInfo) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionId != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionId))
}
if m.PeerId != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.PeerId))
}
if m.Term != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Term))
}
if m.RegionEpoch != nil {
dAtA[i] = 0x22
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionEpoch.Size()))
n126, err := m.RegionEpoch.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n126
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *CheckLeaderRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CheckLeaderRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Regions) > 0 {
for _, msg := range m.Regions {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.Ts != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Ts))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *CheckLeaderResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CheckLeaderResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Regions) > 0 {
dAtA128 := make([]byte, len(m.Regions)*10)
var j127 int
for _, num := range m.Regions {
for num >= 1<<7 {
dAtA128[j127] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j127++
}
dAtA128[j127] = uint8(num)
j127++
}
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(j127))
i += copy(dAtA[i:], dAtA128[:j127])
}
if m.Ts != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Ts))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawGetKeyTTLRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawGetKeyTTLRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n129, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n129
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.Cf) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf)))
i += copy(dAtA[i:], m.Cf)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawGetKeyTTLResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawGetKeyTTLResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n130, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n130
}
if len(m.Error) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if m.Ttl != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Ttl))
}
if m.NotFound {
dAtA[i] = 0x20
i++
if m.NotFound {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawCASRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawCASRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Context != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size()))
n131, err := m.Context.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n131
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.Value) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value)))
i += copy(dAtA[i:], m.Value)
}
if m.PreviousNotExist {
dAtA[i] = 0x20
i++
if m.PreviousNotExist {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if len(m.PreviousValue) > 0 {
dAtA[i] = 0x2a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PreviousValue)))
i += copy(dAtA[i:], m.PreviousValue)
}
if len(m.Cf) > 0 {
dAtA[i] = 0x32
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf)))
i += copy(dAtA[i:], m.Cf)
}
if m.Ttl != 0 {
dAtA[i] = 0x38
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Ttl))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *RawCASResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *RawCASResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.RegionError != nil {
dAtA[i] = 0xa
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size()))
n132, err := m.RegionError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n132
}
if len(m.Error) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error)))
i += copy(dAtA[i:], m.Error)
}
if len(m.Value) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value)))
i += copy(dAtA[i:], m.Value)
}
if m.NotEqual {
dAtA[i] = 0x20
i++
if m.NotEqual {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func encodeVarintKvrpcpb(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *GetRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Version != 0 {
n += 1 + sovKvrpcpb(uint64(m.Version))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GetResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.NotFound {
n += 2
}
if m.ExecDetailsV2 != nil {
l = m.ExecDetailsV2.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ScanRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.StartKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Limit != 0 {
n += 1 + sovKvrpcpb(uint64(m.Limit))
}
if m.Version != 0 {
n += 1 + sovKvrpcpb(uint64(m.Version))
}
if m.KeyOnly {
n += 2
}
if m.Reverse {
n += 2
}
l = len(m.EndKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.SampleStep != 0 {
n += 1 + sovKvrpcpb(uint64(m.SampleStep))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ScanResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Pairs) > 0 {
for _, e := range m.Pairs {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PrewriteRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Mutations) > 0 {
for _, e := range m.Mutations {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
l = len(m.PrimaryLock)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.StartVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartVersion))
}
if m.LockTtl != 0 {
n += 1 + sovKvrpcpb(uint64(m.LockTtl))
}
if m.SkipConstraintCheck {
n += 2
}
if len(m.IsPessimisticLock) > 0 {
n += 1 + sovKvrpcpb(uint64(len(m.IsPessimisticLock))) + len(m.IsPessimisticLock)*1
}
if m.TxnSize != 0 {
n += 1 + sovKvrpcpb(uint64(m.TxnSize))
}
if m.ForUpdateTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.ForUpdateTs))
}
if m.MinCommitTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.MinCommitTs))
}
if m.UseAsyncCommit {
n += 2
}
if len(m.Secondaries) > 0 {
for _, b := range m.Secondaries {
l = len(b)
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.TryOnePc {
n += 2
}
if m.MaxCommitTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.MaxCommitTs))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PrewriteResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Errors) > 0 {
for _, e := range m.Errors {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.MinCommitTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.MinCommitTs))
}
if m.OnePcCommitTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.OnePcCommitTs))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PessimisticLockRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Mutations) > 0 {
for _, e := range m.Mutations {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
l = len(m.PrimaryLock)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.StartVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartVersion))
}
if m.LockTtl != 0 {
n += 1 + sovKvrpcpb(uint64(m.LockTtl))
}
if m.ForUpdateTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.ForUpdateTs))
}
if m.IsFirstLock {
n += 2
}
if m.WaitTimeout != 0 {
n += 1 + sovKvrpcpb(uint64(m.WaitTimeout))
}
if m.Force {
n += 2
}
if m.ReturnValues {
n += 2
}
if m.MinCommitTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.MinCommitTs))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PessimisticLockResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Errors) > 0 {
for _, e := range m.Errors {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.CommitTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.CommitTs))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Values) > 0 {
for _, b := range m.Values {
l = len(b)
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if len(m.NotFounds) > 0 {
n += 1 + sovKvrpcpb(uint64(len(m.NotFounds))) + len(m.NotFounds)*1
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PessimisticRollbackRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.StartVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartVersion))
}
if m.ForUpdateTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.ForUpdateTs))
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
l = len(b)
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PessimisticRollbackResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Errors) > 0 {
for _, e := range m.Errors {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *TxnHeartBeatRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.PrimaryLock)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.StartVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartVersion))
}
if m.AdviseLockTtl != 0 {
n += 1 + sovKvrpcpb(uint64(m.AdviseLockTtl))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *TxnHeartBeatResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.LockTtl != 0 {
n += 1 + sovKvrpcpb(uint64(m.LockTtl))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *CheckTxnStatusRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.PrimaryKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.LockTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.LockTs))
}
if m.CallerStartTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.CallerStartTs))
}
if m.CurrentTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.CurrentTs))
}
if m.RollbackIfNotExist {
n += 2
}
if m.ForceSyncCommit {
n += 2
}
if m.ResolvingPessimisticLock {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *CheckTxnStatusResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.LockTtl != 0 {
n += 1 + sovKvrpcpb(uint64(m.LockTtl))
}
if m.CommitVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.CommitVersion))
}
if m.Action != 0 {
n += 1 + sovKvrpcpb(uint64(m.Action))
}
if m.LockInfo != nil {
l = m.LockInfo.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *CheckSecondaryLocksRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
l = len(b)
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.StartVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartVersion))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *CheckSecondaryLocksResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Locks) > 0 {
for _, e := range m.Locks {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.CommitTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.CommitTs))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *CommitRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.StartVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartVersion))
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
l = len(b)
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.CommitVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.CommitVersion))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *CommitResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.CommitVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.CommitVersion))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ImportRequest) Size() (n int) {
var l int
_ = l
if len(m.Mutations) > 0 {
for _, e := range m.Mutations {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.CommitVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.CommitVersion))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ImportResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *CleanupRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.StartVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartVersion))
}
if m.CurrentTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.CurrentTs))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *CleanupResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.CommitVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.CommitVersion))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *BatchGetRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
l = len(b)
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.Version != 0 {
n += 1 + sovKvrpcpb(uint64(m.Version))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *BatchGetResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Pairs) > 0 {
for _, e := range m.Pairs {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.ExecDetailsV2 != nil {
l = m.ExecDetailsV2.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *BatchRollbackRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.StartVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartVersion))
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
l = len(b)
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *BatchRollbackResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ScanLockRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.MaxVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.MaxVersion))
}
l = len(m.StartKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Limit != 0 {
n += 1 + sovKvrpcpb(uint64(m.Limit))
}
l = len(m.EndKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ScanLockResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Locks) > 0 {
for _, e := range m.Locks {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ResolveLockRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.StartVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartVersion))
}
if m.CommitVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.CommitVersion))
}
if len(m.TxnInfos) > 0 {
for _, e := range m.TxnInfos {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
l = len(b)
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ResolveLockResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GCRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.SafePoint != 0 {
n += 1 + sovKvrpcpb(uint64(m.SafePoint))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GCResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *DeleteRangeRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.StartKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.EndKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.NotifyOnly {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *DeleteRangeResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawGetRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Cf)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawGetResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.NotFound {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawBatchGetRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
l = len(b)
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
l = len(m.Cf)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawBatchGetResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Pairs) > 0 {
for _, e := range m.Pairs {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawPutRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Cf)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Ttl != 0 {
n += 1 + sovKvrpcpb(uint64(m.Ttl))
}
if m.ForCas {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawPutResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawBatchPutRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Pairs) > 0 {
for _, e := range m.Pairs {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
l = len(m.Cf)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Ttl != 0 {
n += 1 + sovKvrpcpb(uint64(m.Ttl))
}
if m.ForCas {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawBatchPutResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawDeleteRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Cf)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.ForCas {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawDeleteResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawBatchDeleteRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Keys) > 0 {
for _, b := range m.Keys {
l = len(b)
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
l = len(m.Cf)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.ForCas {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawBatchDeleteResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawScanRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.StartKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Limit != 0 {
n += 1 + sovKvrpcpb(uint64(m.Limit))
}
if m.KeyOnly {
n += 2
}
l = len(m.Cf)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Reverse {
n += 2
}
l = len(m.EndKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawScanResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Kvs) > 0 {
for _, e := range m.Kvs {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawDeleteRangeRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.StartKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.EndKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Cf)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawDeleteRangeResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawBatchScanRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Ranges) > 0 {
for _, e := range m.Ranges {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.EachLimit != 0 {
n += 1 + sovKvrpcpb(uint64(m.EachLimit))
}
if m.KeyOnly {
n += 2
}
l = len(m.Cf)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Reverse {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawBatchScanResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Kvs) > 0 {
for _, e := range m.Kvs {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *UnsafeDestroyRangeRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.StartKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.EndKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *UnsafeDestroyRangeResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RegisterLockObserverRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.MaxTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.MaxTs))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RegisterLockObserverResponse) Size() (n int) {
var l int
_ = l
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *CheckLockObserverRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.MaxTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.MaxTs))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *CheckLockObserverResponse) Size() (n int) {
var l int
_ = l
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.IsClean {
n += 2
}
if len(m.Locks) > 0 {
for _, e := range m.Locks {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RemoveLockObserverRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.MaxTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.MaxTs))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RemoveLockObserverResponse) Size() (n int) {
var l int
_ = l
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PhysicalScanLockRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.MaxTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.MaxTs))
}
l = len(m.StartKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Limit != 0 {
n += 1 + sovKvrpcpb(uint64(m.Limit))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PhysicalScanLockResponse) Size() (n int) {
var l int
_ = l
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Locks) > 0 {
for _, e := range m.Locks {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *SplitRegionRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.SplitKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.SplitKeys) > 0 {
for _, b := range m.SplitKeys {
l = len(b)
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *SplitRegionResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Left != nil {
l = m.Left.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Right != nil {
l = m.Right.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Regions) > 0 {
for _, e := range m.Regions {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ReadIndexRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.StartTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartTs))
}
if len(m.Ranges) > 0 {
for _, e := range m.Ranges {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ReadIndexResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.ReadIndex != 0 {
n += 1 + sovKvrpcpb(uint64(m.ReadIndex))
}
if m.Locked != nil {
l = m.Locked.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerMutation) Size() (n int) {
var l int
_ = l
if m.Op != 0 {
n += 1 + sovKvrpcpb(uint64(m.Op))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerValue) Size() (n int) {
var l int
_ = l
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Version != 0 {
n += 1 + sovKvrpcpb(uint64(m.Version))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerError) Size() (n int) {
var l int
_ = l
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerKvPair) Size() (n int) {
var l int
_ = l
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Value != nil {
l = m.Value.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerGetRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.StartVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartVersion))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerGetResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Value != nil {
l = m.Value.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.NotFound {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerBatchGetRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Key) > 0 {
for _, b := range m.Key {
l = len(b)
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.StartVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartVersion))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerBatchGetResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Pairs) > 0 {
for _, e := range m.Pairs {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerMutRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Mut != nil {
l = m.Mut.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Version != 0 {
n += 1 + sovKvrpcpb(uint64(m.Version))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerMutResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerBatchMutRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Muts) > 0 {
for _, e := range m.Muts {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.Version != 0 {
n += 1 + sovKvrpcpb(uint64(m.Version))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerBatchMutResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerScanRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.StartKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.EndKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Limit != 0 {
n += 1 + sovKvrpcpb(uint64(m.Limit))
}
if m.KeyOnly {
n += 2
}
if m.Reverse {
n += 2
}
if m.StartVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartVersion))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerScanResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Pairs) > 0 {
for _, e := range m.Pairs {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerDeleteRangeRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.StartKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.EndKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *VerDeleteRangeResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *MvccGetByKeyRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *MvccGetByKeyResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Info != nil {
l = m.Info.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *MvccGetByStartTsRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.StartTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartTs))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *MvccGetByStartTsResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Info != nil {
l = m.Info.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Context) Size() (n int) {
var l int
_ = l
if m.RegionId != 0 {
n += 1 + sovKvrpcpb(uint64(m.RegionId))
}
if m.RegionEpoch != nil {
l = m.RegionEpoch.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Peer != nil {
l = m.Peer.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Term != 0 {
n += 1 + sovKvrpcpb(uint64(m.Term))
}
if m.Priority != 0 {
n += 1 + sovKvrpcpb(uint64(m.Priority))
}
if m.IsolationLevel != 0 {
n += 1 + sovKvrpcpb(uint64(m.IsolationLevel))
}
if m.NotFillCache {
n += 2
}
if m.SyncLog {
n += 2
}
if m.RecordTimeStat {
n += 2
}
if m.RecordScanStat {
n += 2
}
if m.ReplicaRead {
n += 2
}
if len(m.ResolvedLocks) > 0 {
l = 0
for _, e := range m.ResolvedLocks {
l += sovKvrpcpb(uint64(e))
}
n += 1 + sovKvrpcpb(uint64(l)) + l
}
if m.MaxExecutionDurationMs != 0 {
n += 1 + sovKvrpcpb(uint64(m.MaxExecutionDurationMs))
}
if m.AppliedIndex != 0 {
n += 1 + sovKvrpcpb(uint64(m.AppliedIndex))
}
if m.TaskId != 0 {
n += 2 + sovKvrpcpb(uint64(m.TaskId))
}
if m.StaleRead {
n += 3
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *LockInfo) Size() (n int) {
var l int
_ = l
l = len(m.PrimaryLock)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.LockVersion != 0 {
n += 1 + sovKvrpcpb(uint64(m.LockVersion))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.LockTtl != 0 {
n += 1 + sovKvrpcpb(uint64(m.LockTtl))
}
if m.TxnSize != 0 {
n += 1 + sovKvrpcpb(uint64(m.TxnSize))
}
if m.LockType != 0 {
n += 1 + sovKvrpcpb(uint64(m.LockType))
}
if m.LockForUpdateTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.LockForUpdateTs))
}
if m.UseAsyncCommit {
n += 2
}
if m.MinCommitTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.MinCommitTs))
}
if len(m.Secondaries) > 0 {
for _, b := range m.Secondaries {
l = len(b)
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *KeyError) Size() (n int) {
var l int
_ = l
if m.Locked != nil {
l = m.Locked.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Retryable)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Abort)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Conflict != nil {
l = m.Conflict.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.AlreadyExist != nil {
l = m.AlreadyExist.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Deadlock != nil {
l = m.Deadlock.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.CommitTsExpired != nil {
l = m.CommitTsExpired.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.TxnNotFound != nil {
l = m.TxnNotFound.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.CommitTsTooLarge != nil {
l = m.CommitTsTooLarge.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *WriteConflict) Size() (n int) {
var l int
_ = l
if m.StartTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartTs))
}
if m.ConflictTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.ConflictTs))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Primary)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.ConflictCommitTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.ConflictCommitTs))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *AlreadyExist) Size() (n int) {
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Deadlock) Size() (n int) {
var l int
_ = l
if m.LockTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.LockTs))
}
l = len(m.LockKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.DeadlockKeyHash != 0 {
n += 1 + sovKvrpcpb(uint64(m.DeadlockKeyHash))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *CommitTsExpired) Size() (n int) {
var l int
_ = l
if m.StartTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartTs))
}
if m.AttemptedCommitTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.AttemptedCommitTs))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.MinCommitTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.MinCommitTs))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *TxnNotFound) Size() (n int) {
var l int
_ = l
if m.StartTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartTs))
}
l = len(m.PrimaryKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *CommitTsTooLarge) Size() (n int) {
var l int
_ = l
if m.CommitTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.CommitTs))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *TimeDetail) Size() (n int) {
var l int
_ = l
if m.WaitWallTimeMs != 0 {
n += 1 + sovKvrpcpb(uint64(m.WaitWallTimeMs))
}
if m.ProcessWallTimeMs != 0 {
n += 1 + sovKvrpcpb(uint64(m.ProcessWallTimeMs))
}
if m.KvReadWallTimeMs != 0 {
n += 1 + sovKvrpcpb(uint64(m.KvReadWallTimeMs))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ScanInfo) Size() (n int) {
var l int
_ = l
if m.Total != 0 {
n += 1 + sovKvrpcpb(uint64(m.Total))
}
if m.Processed != 0 {
n += 1 + sovKvrpcpb(uint64(m.Processed))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ScanDetail) Size() (n int) {
var l int
_ = l
if m.Write != nil {
l = m.Write.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Lock != nil {
l = m.Lock.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Data != nil {
l = m.Data.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ScanDetailV2) Size() (n int) {
var l int
_ = l
if m.ProcessedVersions != 0 {
n += 1 + sovKvrpcpb(uint64(m.ProcessedVersions))
}
if m.TotalVersions != 0 {
n += 1 + sovKvrpcpb(uint64(m.TotalVersions))
}
if m.RocksdbDeleteSkippedCount != 0 {
n += 1 + sovKvrpcpb(uint64(m.RocksdbDeleteSkippedCount))
}
if m.RocksdbKeySkippedCount != 0 {
n += 1 + sovKvrpcpb(uint64(m.RocksdbKeySkippedCount))
}
if m.RocksdbBlockCacheHitCount != 0 {
n += 1 + sovKvrpcpb(uint64(m.RocksdbBlockCacheHitCount))
}
if m.RocksdbBlockReadCount != 0 {
n += 1 + sovKvrpcpb(uint64(m.RocksdbBlockReadCount))
}
if m.RocksdbBlockReadByte != 0 {
n += 1 + sovKvrpcpb(uint64(m.RocksdbBlockReadByte))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ExecDetails) Size() (n int) {
var l int
_ = l
if m.TimeDetail != nil {
l = m.TimeDetail.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.ScanDetail != nil {
l = m.ScanDetail.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ExecDetailsV2) Size() (n int) {
var l int
_ = l
if m.TimeDetail != nil {
l = m.TimeDetail.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.ScanDetailV2 != nil {
l = m.ScanDetailV2.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *KvPair) Size() (n int) {
var l int
_ = l
if m.Error != nil {
l = m.Error.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Mutation) Size() (n int) {
var l int
_ = l
if m.Op != 0 {
n += 1 + sovKvrpcpb(uint64(m.Op))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Assertion != 0 {
n += 1 + sovKvrpcpb(uint64(m.Assertion))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *MvccWrite) Size() (n int) {
var l int
_ = l
if m.Type != 0 {
n += 1 + sovKvrpcpb(uint64(m.Type))
}
if m.StartTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartTs))
}
if m.CommitTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.CommitTs))
}
l = len(m.ShortValue)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *MvccValue) Size() (n int) {
var l int
_ = l
if m.StartTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartTs))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *MvccLock) Size() (n int) {
var l int
_ = l
if m.Type != 0 {
n += 1 + sovKvrpcpb(uint64(m.Type))
}
if m.StartTs != 0 {
n += 1 + sovKvrpcpb(uint64(m.StartTs))
}
l = len(m.Primary)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.ShortValue)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *MvccInfo) Size() (n int) {
var l int
_ = l
if m.Lock != nil {
l = m.Lock.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if len(m.Writes) > 0 {
for _, e := range m.Writes {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if len(m.Values) > 0 {
for _, e := range m.Values {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *TxnInfo) Size() (n int) {
var l int
_ = l
if m.Txn != 0 {
n += 1 + sovKvrpcpb(uint64(m.Txn))
}
if m.Status != 0 {
n += 1 + sovKvrpcpb(uint64(m.Status))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *KeyRange) Size() (n int) {
var l int
_ = l
l = len(m.StartKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.EndKey)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *LeaderInfo) Size() (n int) {
var l int
_ = l
if m.RegionId != 0 {
n += 1 + sovKvrpcpb(uint64(m.RegionId))
}
if m.PeerId != 0 {
n += 1 + sovKvrpcpb(uint64(m.PeerId))
}
if m.Term != 0 {
n += 1 + sovKvrpcpb(uint64(m.Term))
}
if m.RegionEpoch != nil {
l = m.RegionEpoch.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *CheckLeaderRequest) Size() (n int) {
var l int
_ = l
if len(m.Regions) > 0 {
for _, e := range m.Regions {
l = e.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
}
if m.Ts != 0 {
n += 1 + sovKvrpcpb(uint64(m.Ts))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *CheckLeaderResponse) Size() (n int) {
var l int
_ = l
if len(m.Regions) > 0 {
l = 0
for _, e := range m.Regions {
l += sovKvrpcpb(uint64(e))
}
n += 1 + sovKvrpcpb(uint64(l)) + l
}
if m.Ts != 0 {
n += 1 + sovKvrpcpb(uint64(m.Ts))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawGetKeyTTLRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Cf)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawGetKeyTTLResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Ttl != 0 {
n += 1 + sovKvrpcpb(uint64(m.Ttl))
}
if m.NotFound {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawCASRequest) Size() (n int) {
var l int
_ = l
if m.Context != nil {
l = m.Context.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.PreviousNotExist {
n += 2
}
l = len(m.PreviousValue)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Cf)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.Ttl != 0 {
n += 1 + sovKvrpcpb(uint64(m.Ttl))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *RawCASResponse) Size() (n int) {
var l int
_ = l
if m.RegionError != nil {
l = m.RegionError.Size()
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Error)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKvrpcpb(uint64(l))
}
if m.NotEqual {
n += 2
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovKvrpcpb(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozKvrpcpb(x uint64) (n int) {
return sovKvrpcpb(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *GetRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
m.Version = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Version |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GetResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &KeyError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NotFound", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NotFound = bool(v != 0)
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExecDetailsV2", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ExecDetailsV2 == nil {
m.ExecDetailsV2 = &ExecDetailsV2{}
}
if err := m.ExecDetailsV2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScanRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScanRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScanRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...)
if m.StartKey == nil {
m.StartKey = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
}
m.Limit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Limit |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
m.Version = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Version |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KeyOnly", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.KeyOnly = bool(v != 0)
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Reverse", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Reverse = bool(v != 0)
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...)
if m.EndKey == nil {
m.EndKey = []byte{}
}
iNdEx = postIndex
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SampleStep", wireType)
}
m.SampleStep = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SampleStep |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScanResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScanResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScanResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Pairs = append(m.Pairs, &KvPair{})
if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &KeyError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PrewriteRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PrewriteRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PrewriteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Mutations = append(m.Mutations, &Mutation{})
if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PrimaryLock", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PrimaryLock = append(m.PrimaryLock[:0], dAtA[iNdEx:postIndex]...)
if m.PrimaryLock == nil {
m.PrimaryLock = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType)
}
m.StartVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LockTtl", wireType)
}
m.LockTtl = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.LockTtl |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SkipConstraintCheck", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.SkipConstraintCheck = bool(v != 0)
case 7:
if wireType == 0 {
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.IsPessimisticLock = append(m.IsPessimisticLock, bool(v != 0))
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + packedLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
for iNdEx < postIndex {
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.IsPessimisticLock = append(m.IsPessimisticLock, bool(v != 0))
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field IsPessimisticLock", wireType)
}
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TxnSize", wireType)
}
m.TxnSize = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TxnSize |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 9:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ForUpdateTs", wireType)
}
m.ForUpdateTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ForUpdateTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MinCommitTs", wireType)
}
m.MinCommitTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MinCommitTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 11:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field UseAsyncCommit", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.UseAsyncCommit = bool(v != 0)
case 12:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Secondaries", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Secondaries = append(m.Secondaries, make([]byte, postIndex-iNdEx))
copy(m.Secondaries[len(m.Secondaries)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 13:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TryOnePc", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.TryOnePc = bool(v != 0)
case 14:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxCommitTs", wireType)
}
m.MaxCommitTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MaxCommitTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PrewriteResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PrewriteResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PrewriteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Errors = append(m.Errors, &KeyError{})
if err := m.Errors[len(m.Errors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MinCommitTs", wireType)
}
m.MinCommitTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MinCommitTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field OnePcCommitTs", wireType)
}
m.OnePcCommitTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.OnePcCommitTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PessimisticLockRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PessimisticLockRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PessimisticLockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Mutations = append(m.Mutations, &Mutation{})
if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PrimaryLock", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PrimaryLock = append(m.PrimaryLock[:0], dAtA[iNdEx:postIndex]...)
if m.PrimaryLock == nil {
m.PrimaryLock = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType)
}
m.StartVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LockTtl", wireType)
}
m.LockTtl = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.LockTtl |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ForUpdateTs", wireType)
}
m.ForUpdateTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ForUpdateTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field IsFirstLock", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.IsFirstLock = bool(v != 0)
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field WaitTimeout", wireType)
}
m.WaitTimeout = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.WaitTimeout |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 9:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Force = bool(v != 0)
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ReturnValues", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.ReturnValues = bool(v != 0)
case 11:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MinCommitTs", wireType)
}
m.MinCommitTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MinCommitTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PessimisticLockResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PessimisticLockResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PessimisticLockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Errors = append(m.Errors, &KeyError{})
if err := m.Errors[len(m.Errors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CommitTs", wireType)
}
m.CommitTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CommitTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Values = append(m.Values, make([]byte, postIndex-iNdEx))
copy(m.Values[len(m.Values)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType == 0 {
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NotFounds = append(m.NotFounds, bool(v != 0))
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + packedLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
for iNdEx < postIndex {
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NotFounds = append(m.NotFounds, bool(v != 0))
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field NotFounds", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PessimisticRollbackRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PessimisticRollbackRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PessimisticRollbackRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType)
}
m.StartVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ForUpdateTs", wireType)
}
m.ForUpdateTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ForUpdateTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PessimisticRollbackResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PessimisticRollbackResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PessimisticRollbackResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Errors = append(m.Errors, &KeyError{})
if err := m.Errors[len(m.Errors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TxnHeartBeatRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TxnHeartBeatRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TxnHeartBeatRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PrimaryLock", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PrimaryLock = append(m.PrimaryLock[:0], dAtA[iNdEx:postIndex]...)
if m.PrimaryLock == nil {
m.PrimaryLock = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType)
}
m.StartVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field AdviseLockTtl", wireType)
}
m.AdviseLockTtl = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.AdviseLockTtl |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TxnHeartBeatResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TxnHeartBeatResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TxnHeartBeatResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &KeyError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LockTtl", wireType)
}
m.LockTtl = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.LockTtl |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CheckTxnStatusRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CheckTxnStatusRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CheckTxnStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PrimaryKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PrimaryKey = append(m.PrimaryKey[:0], dAtA[iNdEx:postIndex]...)
if m.PrimaryKey == nil {
m.PrimaryKey = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LockTs", wireType)
}
m.LockTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.LockTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CallerStartTs", wireType)
}
m.CallerStartTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CallerStartTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CurrentTs", wireType)
}
m.CurrentTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CurrentTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RollbackIfNotExist", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.RollbackIfNotExist = bool(v != 0)
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ForceSyncCommit", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.ForceSyncCommit = bool(v != 0)
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ResolvingPessimisticLock", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.ResolvingPessimisticLock = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CheckTxnStatusResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CheckTxnStatusResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CheckTxnStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &KeyError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LockTtl", wireType)
}
m.LockTtl = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.LockTtl |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType)
}
m.CommitVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CommitVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
}
m.Action = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Action |= (Action(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LockInfo", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.LockInfo == nil {
m.LockInfo = &LockInfo{}
}
if err := m.LockInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CheckSecondaryLocksRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CheckSecondaryLocksRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CheckSecondaryLocksRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType)
}
m.StartVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CheckSecondaryLocksResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CheckSecondaryLocksResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CheckSecondaryLocksResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &KeyError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Locks", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Locks = append(m.Locks, &LockInfo{})
if err := m.Locks[len(m.Locks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CommitTs", wireType)
}
m.CommitTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CommitTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CommitRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CommitRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CommitRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType)
}
m.StartVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType)
}
m.CommitVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CommitVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CommitResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CommitResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CommitResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &KeyError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType)
}
m.CommitVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CommitVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ImportRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ImportRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ImportRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Mutations = append(m.Mutations, &Mutation{})
if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType)
}
m.CommitVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CommitVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ImportResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ImportResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ImportResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CleanupRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CleanupRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CleanupRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType)
}
m.StartVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CurrentTs", wireType)
}
m.CurrentTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CurrentTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CleanupResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CleanupResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CleanupResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &KeyError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType)
}
m.CommitVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CommitVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *BatchGetRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BatchGetRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BatchGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
m.Version = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Version |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *BatchGetResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BatchGetResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BatchGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Pairs = append(m.Pairs, &KvPair{})
if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExecDetailsV2", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ExecDetailsV2 == nil {
m.ExecDetailsV2 = &ExecDetailsV2{}
}
if err := m.ExecDetailsV2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &KeyError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *BatchRollbackRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BatchRollbackRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BatchRollbackRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType)
}
m.StartVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *BatchRollbackResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BatchRollbackResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BatchRollbackResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &KeyError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScanLockRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScanLockRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScanLockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxVersion", wireType)
}
m.MaxVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MaxVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...)
if m.StartKey == nil {
m.StartKey = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
}
m.Limit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Limit |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...)
if m.EndKey == nil {
m.EndKey = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScanLockResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScanLockResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScanLockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &KeyError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Locks", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Locks = append(m.Locks, &LockInfo{})
if err := m.Locks[len(m.Locks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ResolveLockRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ResolveLockRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ResolveLockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType)
}
m.StartVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType)
}
m.CommitVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CommitVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TxnInfos", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TxnInfos = append(m.TxnInfos, &TxnInfo{})
if err := m.TxnInfos[len(m.TxnInfos)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ResolveLockResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ResolveLockResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ResolveLockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &KeyError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GCRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GCRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GCRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SafePoint", wireType)
}
m.SafePoint = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SafePoint |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GCResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GCResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GCResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &KeyError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...)
if m.StartKey == nil {
m.StartKey = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...)
if m.EndKey == nil {
m.EndKey = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NotifyOnly", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NotifyOnly = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawGetRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawGetRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Cf = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawGetResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawGetResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NotFound", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NotFound = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawBatchGetRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawBatchGetRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawBatchGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Cf = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawBatchGetResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawBatchGetResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawBatchGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Pairs = append(m.Pairs, &KvPair{})
if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawPutRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawPutRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawPutRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Cf = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Ttl", wireType)
}
m.Ttl = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Ttl |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ForCas", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.ForCas = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawPutResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawPutResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawPutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawBatchPutRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawBatchPutRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawBatchPutRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Pairs = append(m.Pairs, &KvPair{})
if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Cf = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Ttl", wireType)
}
m.Ttl = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Ttl |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ForCas", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.ForCas = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawBatchPutResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawBatchPutResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawBatchPutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawDeleteRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawDeleteRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Cf = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ForCas", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.ForCas = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawDeleteResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawDeleteResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawBatchDeleteRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawBatchDeleteRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawBatchDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Cf = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ForCas", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.ForCas = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawBatchDeleteResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawBatchDeleteResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawBatchDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawScanRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawScanRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawScanRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...)
if m.StartKey == nil {
m.StartKey = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
}
m.Limit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Limit |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KeyOnly", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.KeyOnly = bool(v != 0)
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Cf = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Reverse", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Reverse = bool(v != 0)
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...)
if m.EndKey == nil {
m.EndKey = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawScanResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawScanResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawScanResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Kvs = append(m.Kvs, &KvPair{})
if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawDeleteRangeRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawDeleteRangeRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawDeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...)
if m.StartKey == nil {
m.StartKey = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...)
if m.EndKey == nil {
m.EndKey = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Cf = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawDeleteRangeResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawDeleteRangeResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawDeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawBatchScanRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawBatchScanRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawBatchScanRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ranges = append(m.Ranges, &KeyRange{})
if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field EachLimit", wireType)
}
m.EachLimit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.EachLimit |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KeyOnly", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.KeyOnly = bool(v != 0)
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Cf = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Reverse", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Reverse = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawBatchScanResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawBatchScanResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawBatchScanResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Kvs = append(m.Kvs, &KvPair{})
if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *UnsafeDestroyRangeRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: UnsafeDestroyRangeRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: UnsafeDestroyRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...)
if m.StartKey == nil {
m.StartKey = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...)
if m.EndKey == nil {
m.EndKey = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *UnsafeDestroyRangeResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: UnsafeDestroyRangeResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: UnsafeDestroyRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RegisterLockObserverRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RegisterLockObserverRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RegisterLockObserverRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxTs", wireType)
}
m.MaxTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MaxTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RegisterLockObserverResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RegisterLockObserverResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RegisterLockObserverResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CheckLockObserverRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CheckLockObserverRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CheckLockObserverRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxTs", wireType)
}
m.MaxTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MaxTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CheckLockObserverResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CheckLockObserverResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CheckLockObserverResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field IsClean", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.IsClean = bool(v != 0)
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Locks", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Locks = append(m.Locks, &LockInfo{})
if err := m.Locks[len(m.Locks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RemoveLockObserverRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RemoveLockObserverRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RemoveLockObserverRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxTs", wireType)
}
m.MaxTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MaxTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RemoveLockObserverResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RemoveLockObserverResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RemoveLockObserverResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PhysicalScanLockRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PhysicalScanLockRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PhysicalScanLockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxTs", wireType)
}
m.MaxTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MaxTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...)
if m.StartKey == nil {
m.StartKey = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
}
m.Limit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Limit |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PhysicalScanLockResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PhysicalScanLockResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PhysicalScanLockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Locks", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Locks = append(m.Locks, &LockInfo{})
if err := m.Locks[len(m.Locks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SplitRegionRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SplitRegionRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SplitRegionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SplitKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SplitKey = append(m.SplitKey[:0], dAtA[iNdEx:postIndex]...)
if m.SplitKey == nil {
m.SplitKey = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SplitKeys", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SplitKeys = append(m.SplitKeys, make([]byte, postIndex-iNdEx))
copy(m.SplitKeys[len(m.SplitKeys)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SplitRegionResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SplitRegionResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SplitRegionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Left", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Left == nil {
m.Left = &metapb.Region{}
}
if err := m.Left.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Right", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Right == nil {
m.Right = &metapb.Region{}
}
if err := m.Right.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Regions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Regions = append(m.Regions, &metapb.Region{})
if err := m.Regions[len(m.Regions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReadIndexRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ReadIndexRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ReadIndexRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType)
}
m.StartTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ranges = append(m.Ranges, &KeyRange{})
if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReadIndexResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ReadIndexResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ReadIndexResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ReadIndex", wireType)
}
m.ReadIndex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ReadIndex |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Locked", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Locked == nil {
m.Locked = &LockInfo{}
}
if err := m.Locked.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerMutation) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerMutation: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerMutation: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType)
}
m.Op = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Op |= (VerOp(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerValue) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerValue: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerValue: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
m.Version = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Version |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerError) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerError: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerError: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerKvPair) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerKvPair: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerKvPair: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &VerError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Value == nil {
m.Value = &VerValue{}
}
if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerGetRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerGetRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType)
}
m.StartVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerGetResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerGetResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &VerError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Value == nil {
m.Value = &VerValue{}
}
if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NotFound", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NotFound = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerBatchGetRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerBatchGetRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerBatchGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key, make([]byte, postIndex-iNdEx))
copy(m.Key[len(m.Key)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType)
}
m.StartVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerBatchGetResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerBatchGetResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerBatchGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Pairs = append(m.Pairs, &VerKvPair{})
if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerMutRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerMutRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerMutRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Mut", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Mut == nil {
m.Mut = &VerMutation{}
}
if err := m.Mut.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
m.Version = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Version |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerMutResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerMutResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerMutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &VerError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerBatchMutRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerBatchMutRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerBatchMutRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Muts", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Muts = append(m.Muts, &VerMutation{})
if err := m.Muts[len(m.Muts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
m.Version = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Version |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerBatchMutResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerBatchMutResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerBatchMutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &VerError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerScanRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerScanRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerScanRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...)
if m.StartKey == nil {
m.StartKey = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...)
if m.EndKey == nil {
m.EndKey = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
}
m.Limit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Limit |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KeyOnly", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.KeyOnly = bool(v != 0)
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Reverse", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Reverse = bool(v != 0)
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType)
}
m.StartVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerScanResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerScanResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerScanResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Pairs = append(m.Pairs, &VerKvPair{})
if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerDeleteRangeRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerDeleteRangeRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerDeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...)
if m.StartKey == nil {
m.StartKey = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...)
if m.EndKey == nil {
m.EndKey = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VerDeleteRangeResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VerDeleteRangeResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VerDeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &VerError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MvccGetByKeyRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MvccGetByKeyRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MvccGetByKeyRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MvccGetByKeyResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MvccGetByKeyResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MvccGetByKeyResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Info == nil {
m.Info = &MvccInfo{}
}
if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MvccGetByStartTsRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MvccGetByStartTsRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MvccGetByStartTsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType)
}
m.StartTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MvccGetByStartTsResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MvccGetByStartTsResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MvccGetByStartTsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Info == nil {
m.Info = &MvccInfo{}
}
if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Context) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Context: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Context: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType)
}
m.RegionId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RegionId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionEpoch", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionEpoch == nil {
m.RegionEpoch = &metapb.RegionEpoch{}
}
if err := m.RegionEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Peer == nil {
m.Peer = &metapb.Peer{}
}
if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
}
m.Term = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Term |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType)
}
m.Priority = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Priority |= (CommandPri(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field IsolationLevel", wireType)
}
m.IsolationLevel = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.IsolationLevel |= (IsolationLevel(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NotFillCache", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NotFillCache = bool(v != 0)
case 9:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SyncLog", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.SyncLog = bool(v != 0)
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RecordTimeStat", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.RecordTimeStat = bool(v != 0)
case 11:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RecordScanStat", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.RecordScanStat = bool(v != 0)
case 12:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ReplicaRead", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.ReplicaRead = bool(v != 0)
case 13:
if wireType == 0 {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.ResolvedLocks = append(m.ResolvedLocks, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + packedLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
for iNdEx < postIndex {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.ResolvedLocks = append(m.ResolvedLocks, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field ResolvedLocks", wireType)
}
case 14:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxExecutionDurationMs", wireType)
}
m.MaxExecutionDurationMs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MaxExecutionDurationMs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 15:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field AppliedIndex", wireType)
}
m.AppliedIndex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.AppliedIndex |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 16:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TaskId", wireType)
}
m.TaskId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TaskId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 17:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StaleRead", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.StaleRead = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *LockInfo) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: LockInfo: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: LockInfo: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PrimaryLock", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PrimaryLock = append(m.PrimaryLock[:0], dAtA[iNdEx:postIndex]...)
if m.PrimaryLock == nil {
m.PrimaryLock = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LockVersion", wireType)
}
m.LockVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.LockVersion |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LockTtl", wireType)
}
m.LockTtl = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.LockTtl |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TxnSize", wireType)
}
m.TxnSize = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TxnSize |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LockType", wireType)
}
m.LockType = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.LockType |= (Op(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LockForUpdateTs", wireType)
}
m.LockForUpdateTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.LockForUpdateTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field UseAsyncCommit", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.UseAsyncCommit = bool(v != 0)
case 9:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MinCommitTs", wireType)
}
m.MinCommitTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MinCommitTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Secondaries", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Secondaries = append(m.Secondaries, make([]byte, postIndex-iNdEx))
copy(m.Secondaries[len(m.Secondaries)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *KeyError) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: KeyError: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: KeyError: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Locked", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Locked == nil {
m.Locked = &LockInfo{}
}
if err := m.Locked.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Retryable", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Retryable = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Abort", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Abort = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Conflict", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Conflict == nil {
m.Conflict = &WriteConflict{}
}
if err := m.Conflict.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AlreadyExist", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.AlreadyExist == nil {
m.AlreadyExist = &AlreadyExist{}
}
if err := m.AlreadyExist.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Deadlock", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Deadlock == nil {
m.Deadlock = &Deadlock{}
}
if err := m.Deadlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CommitTsExpired", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.CommitTsExpired == nil {
m.CommitTsExpired = &CommitTsExpired{}
}
if err := m.CommitTsExpired.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TxnNotFound", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.TxnNotFound == nil {
m.TxnNotFound = &TxnNotFound{}
}
if err := m.TxnNotFound.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CommitTsTooLarge", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.CommitTsTooLarge == nil {
m.CommitTsTooLarge = &CommitTsTooLarge{}
}
if err := m.CommitTsTooLarge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *WriteConflict) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: WriteConflict: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: WriteConflict: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType)
}
m.StartTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ConflictTs", wireType)
}
m.ConflictTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ConflictTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Primary = append(m.Primary[:0], dAtA[iNdEx:postIndex]...)
if m.Primary == nil {
m.Primary = []byte{}
}
iNdEx = postIndex
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ConflictCommitTs", wireType)
}
m.ConflictCommitTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ConflictCommitTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *AlreadyExist) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: AlreadyExist: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: AlreadyExist: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Deadlock) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Deadlock: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Deadlock: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LockTs", wireType)
}
m.LockTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.LockTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LockKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.LockKey = append(m.LockKey[:0], dAtA[iNdEx:postIndex]...)
if m.LockKey == nil {
m.LockKey = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DeadlockKeyHash", wireType)
}
m.DeadlockKeyHash = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.DeadlockKeyHash |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CommitTsExpired) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CommitTsExpired: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CommitTsExpired: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType)
}
m.StartTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field AttemptedCommitTs", wireType)
}
m.AttemptedCommitTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.AttemptedCommitTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MinCommitTs", wireType)
}
m.MinCommitTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MinCommitTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TxnNotFound) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TxnNotFound: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TxnNotFound: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType)
}
m.StartTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PrimaryKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PrimaryKey = append(m.PrimaryKey[:0], dAtA[iNdEx:postIndex]...)
if m.PrimaryKey == nil {
m.PrimaryKey = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CommitTsTooLarge) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CommitTsTooLarge: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CommitTsTooLarge: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CommitTs", wireType)
}
m.CommitTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CommitTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TimeDetail) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TimeDetail: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TimeDetail: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field WaitWallTimeMs", wireType)
}
m.WaitWallTimeMs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.WaitWallTimeMs |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ProcessWallTimeMs", wireType)
}
m.ProcessWallTimeMs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ProcessWallTimeMs |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KvReadWallTimeMs", wireType)
}
m.KvReadWallTimeMs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.KvReadWallTimeMs |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScanInfo) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScanInfo: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScanInfo: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType)
}
m.Total = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Total |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Processed", wireType)
}
m.Processed = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Processed |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScanDetail) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScanDetail: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScanDetail: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Write", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Write == nil {
m.Write = &ScanInfo{}
}
if err := m.Write.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Lock", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Lock == nil {
m.Lock = &ScanInfo{}
}
if err := m.Lock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Data == nil {
m.Data = &ScanInfo{}
}
if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScanDetailV2) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScanDetailV2: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScanDetailV2: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ProcessedVersions", wireType)
}
m.ProcessedVersions = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ProcessedVersions |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TotalVersions", wireType)
}
m.TotalVersions = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TotalVersions |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RocksdbDeleteSkippedCount", wireType)
}
m.RocksdbDeleteSkippedCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RocksdbDeleteSkippedCount |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RocksdbKeySkippedCount", wireType)
}
m.RocksdbKeySkippedCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RocksdbKeySkippedCount |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RocksdbBlockCacheHitCount", wireType)
}
m.RocksdbBlockCacheHitCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RocksdbBlockCacheHitCount |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RocksdbBlockReadCount", wireType)
}
m.RocksdbBlockReadCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RocksdbBlockReadCount |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RocksdbBlockReadByte", wireType)
}
m.RocksdbBlockReadByte = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RocksdbBlockReadByte |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExecDetails) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExecDetails: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExecDetails: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeDetail", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.TimeDetail == nil {
m.TimeDetail = &TimeDetail{}
}
if err := m.TimeDetail.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ScanDetail", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ScanDetail == nil {
m.ScanDetail = &ScanDetail{}
}
if err := m.ScanDetail.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExecDetailsV2) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExecDetailsV2: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExecDetailsV2: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeDetail", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.TimeDetail == nil {
m.TimeDetail = &TimeDetail{}
}
if err := m.TimeDetail.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ScanDetailV2", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ScanDetailV2 == nil {
m.ScanDetailV2 = &ScanDetailV2{}
}
if err := m.ScanDetailV2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *KvPair) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: KvPair: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: KvPair: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Error == nil {
m.Error = &KeyError{}
}
if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Mutation) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Mutation: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Mutation: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType)
}
m.Op = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Op |= (Op(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Assertion", wireType)
}
m.Assertion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Assertion |= (Assertion(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MvccWrite) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MvccWrite: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MvccWrite: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= (Op(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType)
}
m.StartTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CommitTs", wireType)
}
m.CommitTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CommitTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ShortValue", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ShortValue = append(m.ShortValue[:0], dAtA[iNdEx:postIndex]...)
if m.ShortValue == nil {
m.ShortValue = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MvccValue) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MvccValue: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MvccValue: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType)
}
m.StartTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MvccLock) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MvccLock: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MvccLock: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= (Op(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType)
}
m.StartTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartTs |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Primary = append(m.Primary[:0], dAtA[iNdEx:postIndex]...)
if m.Primary == nil {
m.Primary = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ShortValue", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ShortValue = append(m.ShortValue[:0], dAtA[iNdEx:postIndex]...)
if m.ShortValue == nil {
m.ShortValue = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MvccInfo) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MvccInfo: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MvccInfo: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Lock", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Lock == nil {
m.Lock = &MvccLock{}
}
if err := m.Lock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Writes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Writes = append(m.Writes, &MvccWrite{})
if err := m.Writes[len(m.Writes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Values = append(m.Values, &MvccValue{})
if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TxnInfo) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TxnInfo: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TxnInfo: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType)
}
m.Txn = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Txn |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
m.Status = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Status |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *KeyRange) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: KeyRange: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: KeyRange: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...)
if m.StartKey == nil {
m.StartKey = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...)
if m.EndKey == nil {
m.EndKey = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *LeaderInfo) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: LeaderInfo: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: LeaderInfo: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType)
}
m.RegionId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RegionId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PeerId", wireType)
}
m.PeerId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.PeerId |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
}
m.Term = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Term |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionEpoch", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionEpoch == nil {
m.RegionEpoch = &metapb.RegionEpoch{}
}
if err := m.RegionEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CheckLeaderRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CheckLeaderRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CheckLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Regions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Regions = append(m.Regions, &LeaderInfo{})
if err := m.Regions[len(m.Regions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Ts", wireType)
}
m.Ts = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Ts |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CheckLeaderResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CheckLeaderResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CheckLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType == 0 {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Regions = append(m.Regions, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + packedLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
for iNdEx < postIndex {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Regions = append(m.Regions, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field Regions", wireType)
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Ts", wireType)
}
m.Ts = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Ts |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawGetKeyTTLRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawGetKeyTTLRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawGetKeyTTLRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Cf = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawGetKeyTTLResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawGetKeyTTLResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawGetKeyTTLResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Ttl", wireType)
}
m.Ttl = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Ttl |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NotFound", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NotFound = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawCASRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawCASRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawCASRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Context == nil {
m.Context = &Context{}
}
if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PreviousNotExist", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.PreviousNotExist = bool(v != 0)
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PreviousValue", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PreviousValue = append(m.PreviousValue[:0], dAtA[iNdEx:postIndex]...)
if m.PreviousValue == nil {
m.PreviousValue = []byte{}
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Cf = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Ttl", wireType)
}
m.Ttl = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Ttl |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *RawCASResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: RawCASResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: RawCASResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RegionError == nil {
m.RegionError = &errorpb.Error{}
}
if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKvrpcpb
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NotEqual", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NotEqual = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipKvrpcpb(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKvrpcpb
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipKvrpcpb(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthKvrpcpb
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKvrpcpb
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipKvrpcpb(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthKvrpcpb = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowKvrpcpb = fmt.Errorf("proto: integer overflow")
)
func | () { proto.RegisterFile("kvrpcpb.proto", fileDescriptor_kvrpcpb_aa18ec4cb240d160) }
var fileDescriptor_kvrpcpb_aa18ec4cb240d160 = []byte{
// 4337 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x3c, 0x5d, 0x6f, 0x1c, 0xc9,
0x71, 0x9a, 0xdd, 0xd9, 0xaf, 0xda, 0x0f, 0x0e, 0x9b, 0xa4, 0xb4, 0x94, 0xee, 0x24, 0x6a, 0x12,
0x9d, 0x78, 0xb4, 0x8f, 0x8a, 0xe9, 0xcb, 0x97, 0x63, 0xd8, 0x77, 0xa2, 0x74, 0x27, 0xde, 0x51,
0x22, 0x31, 0xdc, 0xa3, 0x71, 0x40, 0x72, 0xe3, 0xe1, 0x6c, 0x93, 0x9c, 0x70, 0x76, 0x66, 0xd4,
0xd3, 0xbb, 0xda, 0xb5, 0x61, 0xc0, 0xce, 0x17, 0x60, 0x20, 0xb1, 0xe3, 0x4b, 0x00, 0x1b, 0x48,
0x90, 0x07, 0x03, 0x0e, 0x92, 0xbc, 0x25, 0xef, 0x41, 0x5e, 0xf2, 0x90, 0x97, 0x20, 0x46, 0x9e,
0x82, 0xe4, 0x21, 0xb1, 0xf2, 0x1b, 0xf2, 0x1e, 0xf4, 0xd7, 0x7c, 0xec, 0xae, 0x28, 0xde, 0x1e,
0xc9, 0x33, 0xfc, 0xc4, 0xed, 0xaa, 0xea, 0xe9, 0xaa, 0xea, 0xea, 0xaa, 0xea, 0x9a, 0x1a, 0x42,
0xf3, 0x64, 0x40, 0x22, 0x37, 0x3a, 0x58, 0x8f, 0x48, 0x48, 0x43, 0x54, 0x91, 0xc3, 0xeb, 0x8d,
0x1e, 0xa6, 0x8e, 0x02, 0x5f, 0x6f, 0x62, 0x42, 0x42, 0x92, 0x0c, 0x17, 0x8f, 0xc2, 0xa3, 0x90,
0xff, 0xbc, 0xc7, 0x7e, 0x49, 0xe8, 0x1c, 0xe9, 0xc7, 0x94, 0xff, 0x14, 0x00, 0xb3, 0x0b, 0xf0,
0x2e, 0xa6, 0x16, 0x7e, 0xda, 0xc7, 0x31, 0x45, 0x6b, 0x50, 0x71, 0xc3, 0x80, 0xe2, 0x21, 0x6d,
0x6b, 0x2b, 0xda, 0x6a, 0x7d, 0xc3, 0x58, 0x57, 0x6b, 0x6f, 0x0a, 0xb8, 0xa5, 0x08, 0x90, 0x01,
0xc5, 0x13, 0x3c, 0x6a, 0x17, 0x56, 0xb4, 0xd5, 0x86, 0xc5, 0x7e, 0xa2, 0x36, 0x54, 0x06, 0x98,
0xc4, 0x5e, 0x18, 0xb4, 0x8b, 0x2b, 0xda, 0xaa, 0x6e, 0xa9, 0xa1, 0xf9, 0x5c, 0x83, 0x3a, 0x5f,
0x26, 0x8e, 0xc2, 0x20, 0xc6, 0xe8, 0x0b, 0xd0, 0x20, 0xf8, 0xc8, 0x0b, 0x03, 0x9b, 0x33, 0x2d,
0x17, 0x6b, 0xad, 0x2b, 0x11, 0x1e, 0xb2, 0xbf, 0x56, 0x5d, 0xd0, 0xf0, 0x01, 0xba, 0x0b, 0x25,
0x41, 0x5b, 0xe0, 0xb4, 0xf3, 0x09, 0x63, 0xef, 0xe3, 0x91, 0x20, 0x17, 0x78, 0xb4, 0x08, 0xa5,
0x81, 0xe3, 0xf7, 0x31, 0xe7, 0xa1, 0x61, 0x89, 0x01, 0xba, 0x01, 0xb5, 0x20, 0xa4, 0xf6, 0x61,
0xd8, 0x0f, 0xba, 0x6d, 0x7d, 0x45, 0x5b, 0xad, 0x5a, 0xd5, 0x20, 0xa4, 0xef, 0xb0, 0x31, 0xfa,
0x0a, 0xcc, 0xe1, 0x21, 0x76, 0xed, 0x2e, 0xa6, 0x8e, 0xe7, 0xc7, 0xf6, 0x60, 0xa3, 0x5d, 0xe6,
0xab, 0x5c, 0x4d, 0x56, 0x79, 0x38, 0xc4, 0xee, 0x03, 0x81, 0xde, 0xdf, 0xb0, 0x9a, 0x38, 0x3b,
0x7c, 0x4f, 0xaf, 0x96, 0x8c, 0xb2, 0xf9, 0x7f, 0x1a, 0xd4, 0xf7, 0x5c, 0x27, 0x98, 0x45, 0x99,
0x37, 0xa0, 0x16, 0x53, 0x87, 0x50, 0x3b, 0x55, 0x69, 0x95, 0x03, 0xde, 0xc7, 0x23, 0x26, 0x91,
0xef, 0xf5, 0x3c, 0xca, 0x25, 0x6a, 0x5a, 0x62, 0x90, 0xd5, 0xb6, 0x9e, 0xd3, 0x36, 0x5a, 0x86,
0xea, 0x09, 0x1e, 0xd9, 0x61, 0xe0, 0x8f, 0xda, 0x25, 0x2e, 0x6a, 0xe5, 0x04, 0x8f, 0x76, 0x02,
0x9f, 0x6f, 0x11, 0xc1, 0x8c, 0x0e, 0x73, 0x09, 0xab, 0x96, 0x1a, 0xa2, 0x6b, 0x50, 0xc1, 0x41,
0x97, 0xaf, 0x5f, 0xe1, 0xeb, 0x97, 0x71, 0xd0, 0x65, 0xab, 0xdf, 0x82, 0x7a, 0xec, 0xf4, 0x22,
0x1f, 0xdb, 0x31, 0xc5, 0x51, 0xbb, 0xca, 0x79, 0x00, 0x01, 0xda, 0xa3, 0x38, 0x32, 0x7f, 0xa0,
0x41, 0x43, 0xc8, 0x3d, 0xfb, 0xee, 0xde, 0x81, 0x52, 0xe4, 0x78, 0x24, 0x6e, 0x17, 0x56, 0x8a,
0xab, 0xf5, 0x8d, 0xb9, 0x74, 0x77, 0x07, 0xbb, 0x8e, 0x47, 0x2c, 0x81, 0x4d, 0x8d, 0xa0, 0x78,
0xba, 0x11, 0x98, 0x7f, 0xad, 0xc3, 0xdc, 0x2e, 0xc1, 0xcf, 0x88, 0x47, 0xf1, 0x2c, 0xfb, 0x71,
0x0f, 0x6a, 0xbd, 0x3e, 0x75, 0xa8, 0x17, 0x06, 0x8a, 0xa7, 0x74, 0xb1, 0xc7, 0x12, 0x63, 0xa5,
0x34, 0xe8, 0x36, 0x34, 0x22, 0xe2, 0xf5, 0x1c, 0x32, 0xb2, 0xfd, 0xd0, 0x3d, 0x91, 0xc6, 0x57,
0x97, 0xb0, 0xed, 0xd0, 0x3d, 0x41, 0xbf, 0x04, 0x4d, 0xb1, 0xc7, 0xf9, 0x6d, 0x6b, 0x70, 0xe0,
0x7e, 0xba, 0x77, 0x6c, 0xbe, 0x4d, 0xa9, 0xcf, 0xf7, 0x4e, 0xb7, 0x2a, 0x6c, 0xdc, 0xa1, 0x3e,
0xda, 0x80, 0xa5, 0xf8, 0xc4, 0x8b, 0x6c, 0x37, 0x0c, 0x62, 0x4a, 0x1c, 0x2f, 0xa0, 0xb6, 0x7b,
0x8c, 0xdd, 0x13, 0xb9, 0x93, 0x0b, 0x0c, 0xb9, 0x99, 0xe0, 0x36, 0x19, 0x0a, 0xad, 0xc3, 0x82,
0x17, 0xdb, 0x11, 0x8e, 0x63, 0xaf, 0xe7, 0xc5, 0xd4, 0x73, 0x05, 0x77, 0x95, 0x95, 0xe2, 0x6a,
0xd5, 0x9a, 0xf7, 0xe2, 0xdd, 0x14, 0xc3, 0x79, 0x5c, 0x86, 0x2a, 0x1d, 0x06, 0x76, 0xec, 0x7d,
0x03, 0xf3, 0x9d, 0xd6, 0xad, 0x0a, 0x1d, 0x06, 0x7b, 0xde, 0x37, 0x30, 0x32, 0xa1, 0x79, 0x18,
0x12, 0xbb, 0x1f, 0x75, 0x1d, 0x8a, 0x6d, 0x1a, 0xb7, 0x6b, 0x1c, 0x5f, 0x3f, 0x0c, 0xc9, 0x07,
0x1c, 0xd6, 0x89, 0x19, 0x4d, 0xcf, 0x0b, 0x6c, 0x37, 0xec, 0xf5, 0x3c, 0xca, 0x68, 0x40, 0xd0,
0xf4, 0xbc, 0x60, 0x93, 0xc3, 0x3a, 0x31, 0x5a, 0x05, 0xa3, 0x1f, 0x63, 0xdb, 0x89, 0x47, 0x81,
0x2b, 0x29, 0xdb, 0x75, 0x2e, 0x41, 0xab, 0x1f, 0xe3, 0xb7, 0x19, 0x58, 0xd0, 0xa2, 0x15, 0xa8,
0xc7, 0xd8, 0x0d, 0x83, 0xae, 0x43, 0x3c, 0x1c, 0xb7, 0x1b, 0x2b, 0x45, 0xa6, 0xd2, 0x0c, 0x08,
0xbd, 0x02, 0x40, 0x09, 0xb3, 0x74, 0x6c, 0x47, 0x6e, 0xbb, 0x29, 0x8e, 0x35, 0x25, 0xa3, 0x9d,
0x00, 0xef, 0xba, 0x9c, 0x1b, 0x67, 0x98, 0xe1, 0xa6, 0x25, 0xb9, 0x71, 0x86, 0x8a, 0x1b, 0xf3,
0x1f, 0x35, 0x30, 0x52, 0x43, 0x99, 0xdd, 0x80, 0x5f, 0x87, 0x32, 0xc7, 0x4e, 0x5a, 0x4b, 0x62,
0x9a, 0x92, 0x60, 0x52, 0x49, 0xc5, 0x49, 0x25, 0xdd, 0x05, 0x43, 0x08, 0x95, 0x21, 0x13, 0xe6,
0xd2, 0x0c, 0x99, 0x6c, 0x09, 0xff, 0x3f, 0x2e, 0xc2, 0xd5, 0xb1, 0x4d, 0xfc, 0x45, 0xb1, 0xf7,
0x09, 0x83, 0x2b, 0x4f, 0x35, 0x38, 0x2f, 0xb6, 0x0f, 0x3d, 0x12, 0x53, 0x65, 0xd9, 0xcc, 0x06,
0xea, 0x5e, 0xfc, 0x0e, 0x83, 0x71, 0x3e, 0x6e, 0x43, 0xe3, 0x99, 0xc3, 0x54, 0xe8, 0xf5, 0x70,
0xd8, 0xa7, 0xdc, 0xae, 0x8b, 0x56, 0x9d, 0xc1, 0x3a, 0x02, 0xc4, 0x3c, 0xec, 0x61, 0x48, 0x5c,
0xcc, 0x6d, 0xba, 0x6a, 0x89, 0x01, 0x13, 0x80, 0x60, 0xda, 0x27, 0x81, 0xcd, 0x63, 0x88, 0xb0,
0xe6, 0xaa, 0xd5, 0x10, 0xc0, 0x7d, 0x0e, 0x9b, 0xdc, 0xcd, 0xfa, 0xc4, 0x6e, 0x9a, 0x3f, 0xd3,
0xe0, 0xda, 0xc4, 0x26, 0x5d, 0x8a, 0xad, 0xdd, 0x80, 0xda, 0xb8, 0x9d, 0x55, 0x5d, 0x65, 0x64,
0x49, 0xa4, 0xd4, 0xb3, 0x91, 0xf2, 0x2a, 0x94, 0xa5, 0xb8, 0x25, 0x7e, 0xe0, 0xe4, 0x08, 0xbd,
0x0a, 0x90, 0x44, 0x50, 0xb6, 0x17, 0xcc, 0x83, 0xd4, 0x54, 0x08, 0x8d, 0xcd, 0x9f, 0x68, 0x70,
0x3d, 0x23, 0xa3, 0x15, 0xfa, 0xfe, 0x81, 0x33, 0x9b, 0x31, 0x4e, 0x18, 0x4e, 0x61, 0x8a, 0xe1,
0x4c, 0x58, 0x47, 0x71, 0xd2, 0x3a, 0x10, 0xe8, 0x27, 0x78, 0xc4, 0x4e, 0x0e, 0x13, 0x84, 0xff,
0x36, 0xbf, 0x09, 0x37, 0xa6, 0xb2, 0x79, 0x19, 0xdb, 0x61, 0xfe, 0xbd, 0x06, 0x0b, 0x9d, 0x61,
0xf0, 0x08, 0x3b, 0x84, 0xde, 0xc7, 0xce, 0x4c, 0x79, 0xd7, 0xf8, 0xc9, 0x2b, 0x9c, 0xe1, 0xe4,
0x15, 0xa7, 0x28, 0xf0, 0x35, 0x98, 0x73, 0xba, 0x03, 0x2f, 0xc6, 0x76, 0x72, 0x00, 0xa5, 0x87,
0x11, 0xe0, 0x6d, 0x71, 0x0c, 0xcd, 0x3f, 0xd1, 0x60, 0x31, 0xcf, 0xf3, 0x25, 0x24, 0x71, 0x59,
0xb7, 0x50, 0xcc, 0xb9, 0x05, 0xf3, 0x67, 0x05, 0x58, 0xe2, 0xc1, 0xad, 0x33, 0x0c, 0xf6, 0xa8,
0x43, 0xfb, 0xf1, 0x2c, 0x5a, 0xbc, 0x05, 0x4a, 0x63, 0x99, 0x94, 0x0b, 0x24, 0x88, 0xa5, 0x3d,
0xd7, 0xa0, 0x22, 0x38, 0x50, 0x96, 0x55, 0xe6, 0x0c, 0xc4, 0x4c, 0x6f, 0xae, 0xe3, 0xfb, 0x98,
0xd8, 0x42, 0xc7, 0xa9, 0x67, 0x16, 0xe0, 0x3d, 0x06, 0xed, 0xf0, 0xf3, 0xe2, 0xf6, 0x09, 0xc1,
0x01, 0x27, 0x11, 0xbe, 0xad, 0x26, 0x21, 0x9d, 0x18, 0x7d, 0x01, 0x96, 0x88, 0x34, 0x3e, 0xdb,
0x3b, 0xb4, 0xd9, 0xd1, 0xc2, 0x43, 0x2f, 0xa6, 0x32, 0x9a, 0x23, 0x85, 0xdc, 0x3a, 0x7c, 0x12,
0xd2, 0x87, 0x0c, 0x83, 0xd6, 0x60, 0x9e, 0x3b, 0x26, 0x3b, 0x1b, 0x3a, 0x85, 0xc3, 0x9b, 0xe3,
0x88, 0xbd, 0x34, 0x76, 0x7e, 0x19, 0xae, 0x13, 0x1c, 0x87, 0xfe, 0xc0, 0x0b, 0x8e, 0x26, 0xe3,
0x7f, 0x95, 0x4f, 0x6a, 0x27, 0x14, 0x63, 0xce, 0xc9, 0xfc, 0x7e, 0x01, 0xae, 0x8e, 0xeb, 0xf8,
0x33, 0xdd, 0x75, 0x74, 0x07, 0x5a, 0xd2, 0x91, 0xe5, 0xa3, 0x49, 0x53, 0x40, 0x95, 0x51, 0xdf,
0x85, 0xb2, 0xe3, 0xb2, 0x08, 0xc5, 0x15, 0xde, 0xca, 0x24, 0x92, 0x6f, 0x73, 0xb0, 0x25, 0xd1,
0x68, 0x1d, 0x6a, 0x7c, 0x29, 0x2f, 0x38, 0x0c, 0x65, 0xb2, 0x9f, 0xf2, 0xc5, 0x74, 0xb0, 0x15,
0x1c, 0x86, 0x16, 0x67, 0x87, 0xfd, 0x32, 0xbf, 0xa3, 0xc1, 0x75, 0xae, 0x91, 0x3d, 0x99, 0x7e,
0xf0, 0x93, 0x36, 0x93, 0xe9, 0x29, 0xaf, 0x54, 0x48, 0xbd, 0xd2, 0x99, 0x4e, 0xac, 0xf9, 0x4f,
0x1a, 0xdc, 0x98, 0xca, 0xc3, 0x25, 0x6c, 0xcd, 0x5d, 0x28, 0x31, 0x5d, 0xb0, 0xc3, 0x50, 0x9c,
0xae, 0x2b, 0x81, 0xcf, 0x47, 0x1c, 0x3d, 0x1f, 0x71, 0xcc, 0xbf, 0xd5, 0xa0, 0x29, 0x0c, 0xf4,
0xc2, 0xe2, 0x82, 0xd2, 0x6e, 0x31, 0xa3, 0xdd, 0xb3, 0x19, 0x8f, 0xb8, 0xc6, 0x59, 0xe5, 0x03,
0x2f, 0xf0, 0xc3, 0x23, 0xf3, 0xcf, 0x34, 0x68, 0x29, 0x5e, 0x2f, 0x41, 0xc1, 0x93, 0x3c, 0x16,
0xa7, 0xf0, 0x68, 0x1e, 0x41, 0x73, 0xab, 0x17, 0x85, 0x24, 0x51, 0x60, 0x2e, 0x73, 0xd3, 0xce,
0x90, 0xb9, 0x4d, 0x2e, 0x54, 0x98, 0xb6, 0xd0, 0x87, 0xd0, 0x52, 0x0b, 0xcd, 0x2e, 0xfd, 0x62,
0x56, 0xfa, 0x9a, 0xba, 0x9c, 0x7d, 0xcc, 0x34, 0xeb, 0x63, 0x27, 0xe8, 0x47, 0xe7, 0x53, 0x78,
0x38, 0x53, 0xbc, 0xcb, 0xfb, 0x63, 0x7d, 0xcc, 0x1f, 0x9b, 0x7f, 0xae, 0xc1, 0x5c, 0xc2, 0xd4,
0xcf, 0xcf, 0x7e, 0x9f, 0xc0, 0xdc, 0x7d, 0x87, 0xba, 0xc7, 0x33, 0x16, 0x69, 0xa6, 0xf9, 0x9a,
0x17, 0x97, 0x69, 0xfe, 0x4b, 0x03, 0x23, 0x5d, 0xed, 0xc2, 0x6f, 0xf3, 0x53, 0xca, 0x2e, 0xfa,
0x27, 0x28, 0xbb, 0xa4, 0xba, 0x2e, 0x9d, 0xae, 0xeb, 0xf7, 0xf4, 0x6a, 0xd1, 0xd0, 0xcd, 0x6f,
0xc2, 0x22, 0x17, 0xee, 0xc2, 0x53, 0xd3, 0x29, 0x2e, 0xc8, 0x8c, 0x61, 0x69, 0x6c, 0xf1, 0x8b,
0xb7, 0x31, 0xf3, 0xef, 0x34, 0x98, 0xdb, 0x73, 0x9d, 0x60, 0xd6, 0x5b, 0xe1, 0x2d, 0x60, 0x77,
0xe5, 0x31, 0x59, 0xa1, 0xe7, 0x0c, 0x95, 0xa4, 0xb9, 0xb2, 0x55, 0xf1, 0x45, 0x65, 0x2b, 0x3d,
0x5b, 0xb6, 0xca, 0xd4, 0x99, 0x4a, 0xd9, 0x3a, 0x93, 0xf9, 0x43, 0x0d, 0x8c, 0x94, 0xd9, 0x9f,
0xa3, 0x90, 0x66, 0xfe, 0x9b, 0x06, 0xc8, 0xe2, 0xa9, 0x12, 0x9e, 0x55, 0x93, 0x67, 0xb2, 0x9b,
0xb3, 0xb9, 0x04, 0xf4, 0x06, 0xd4, 0xe8, 0x30, 0xe0, 0x99, 0x8b, 0xb8, 0xda, 0x64, 0x57, 0xee,
0x0c, 0x03, 0x91, 0xb9, 0x50, 0xf1, 0x23, 0xbd, 0x04, 0x95, 0x32, 0xd6, 0xf8, 0x14, 0x16, 0x72,
0x02, 0x5d, 0x82, 0x2d, 0xee, 0x43, 0xed, 0xdd, 0xcd, 0x59, 0x54, 0xf7, 0x2a, 0x40, 0xec, 0x1c,
0x62, 0x3b, 0x0a, 0xbd, 0x80, 0x4a, 0xbd, 0xd5, 0x18, 0x64, 0x97, 0x01, 0xcc, 0x63, 0x00, 0xf6,
0xdc, 0x4b, 0x90, 0xe0, 0x87, 0x1a, 0xa0, 0x07, 0xd8, 0xc7, 0x14, 0x5b, 0x4e, 0x70, 0x84, 0xcf,
0xbd, 0xcc, 0x9b, 0x39, 0x19, 0xc5, 0xf1, 0x0a, 0x6c, 0x10, 0x52, 0xef, 0x50, 0x96, 0x74, 0x45,
0xf5, 0x1a, 0x04, 0x68, 0x27, 0xf0, 0x47, 0xe6, 0x47, 0xb0, 0x90, 0x63, 0xec, 0xbc, 0x03, 0xf6,
0xef, 0x40, 0xd3, 0x72, 0x9e, 0x9d, 0xdb, 0x7b, 0x82, 0x16, 0x14, 0xdc, 0x43, 0x2e, 0x63, 0xcd,
0x2a, 0xb8, 0x87, 0xe6, 0x1f, 0x6b, 0xd0, 0x52, 0xcf, 0x3f, 0x67, 0xd6, 0x67, 0x78, 0x1b, 0x60,
0x76, 0x01, 0x59, 0xce, 0xb3, 0xf3, 0x8e, 0xba, 0xe3, 0x42, 0x87, 0xb0, 0x90, 0x5b, 0xe5, 0xa2,
0xa3, 0xad, 0xf9, 0x57, 0x1a, 0xdf, 0xc5, 0xdd, 0xfe, 0x39, 0xed, 0xe2, 0x74, 0xcd, 0x0a, 0x31,
0x75, 0x25, 0x26, 0x9b, 0x97, 0x96, 0xf6, 0xd8, 0x4f, 0x66, 0xe6, 0x87, 0x21, 0xb1, 0x5d, 0x27,
0x96, 0x57, 0xdd, 0xf2, 0x61, 0x48, 0x36, 0x9d, 0x98, 0x65, 0x9c, 0x8a, 0xbf, 0xf3, 0x36, 0xe0,
0x1f, 0x6b, 0xe9, 0x9e, 0xce, 0xa8, 0x80, 0x33, 0xe6, 0x34, 0x63, 0xdb, 0xac, 0xe4, 0xd7, 0xa7,
0xca, 0x5f, 0xca, 0xc9, 0xff, 0x51, 0x6a, 0x11, 0x17, 0xa2, 0x84, 0x11, 0x18, 0x96, 0xf3, 0x4c,
0x3a, 0x8a, 0x8b, 0x38, 0xc8, 0x59, 0xd1, 0xf4, 0x9c, 0x68, 0xbf, 0x0d, 0xf3, 0x99, 0xa5, 0xcf,
0x5b, 0xb0, 0x6f, 0x6b, 0xb0, 0xa4, 0x34, 0x37, 0xbb, 0x78, 0x67, 0x38, 0xb4, 0x2f, 0x16, 0xd0,
0x81, 0xab, 0xe3, 0x1c, 0x9c, 0xb7, 0x94, 0xff, 0x2a, 0xbc, 0xe4, 0x25, 0xbe, 0x61, 0xcc, 0xbe,
0x47, 0xd4, 0xf3, 0xef, 0x11, 0x85, 0x62, 0x4a, 0x89, 0x62, 0x3e, 0xf9, 0x7b, 0x45, 0xf3, 0x08,
0xe6, 0x12, 0x71, 0x66, 0xd7, 0xd5, 0x6d, 0x28, 0x9e, 0x0c, 0x5e, 0x78, 0x28, 0x19, 0xce, 0xfc,
0xae, 0x30, 0x8f, 0xcf, 0x24, 0x74, 0x8f, 0xb9, 0x43, 0x69, 0x27, 0x17, 0x1a, 0xac, 0x7f, 0xaa,
0xa5, 0x7e, 0x64, 0x56, 0x63, 0x79, 0x1d, 0xca, 0x84, 0x71, 0x37, 0xb5, 0xa4, 0x2d, 0xf8, 0x96,
0x04, 0x2c, 0x3d, 0xc3, 0x8e, 0x7b, 0x6c, 0x67, 0xed, 0xa7, 0xc6, 0x20, 0xdb, 0xe7, 0x66, 0x43,
0xa6, 0x0f, 0x8b, 0x79, 0x89, 0x2e, 0xd4, 0x5e, 0xbe, 0x05, 0xcb, 0x1f, 0x04, 0x2c, 0xc1, 0x7c,
0x80, 0x63, 0x4a, 0xc2, 0xd1, 0xe5, 0x9a, 0x8c, 0x89, 0xe1, 0xfa, 0xb4, 0xe5, 0xcf, 0xdb, 0x4c,
0xbe, 0x0e, 0x37, 0x2c, 0x7c, 0xe4, 0xc5, 0x14, 0x13, 0x76, 0x07, 0xd8, 0x39, 0x88, 0x31, 0x19,
0x60, 0x32, 0x8b, 0x9c, 0x4b, 0x50, 0x66, 0xd7, 0x44, 0x1a, 0xcb, 0xec, 0xbc, 0xd4, 0x73, 0x86,
0x9d, 0xd8, 0x7c, 0x13, 0x5e, 0x99, 0xbe, 0x82, 0x14, 0x25, 0xe1, 0x4b, 0xcb, 0xe7, 0x9a, 0x6d,
0x5e, 0xe3, 0xbc, 0x20, 0xa6, 0xfa, 0xb0, 0x3c, 0xe5, 0xf1, 0xa7, 0x71, 0xc4, 0x4c, 0xd8, 0x8b,
0x6d, 0xd7, 0xc7, 0x8e, 0xb8, 0xb6, 0x55, 0xad, 0x8a, 0x17, 0xf3, 0x52, 0xd1, 0xd9, 0xaf, 0x90,
0x1f, 0xc1, 0xb2, 0x85, 0x7b, 0xa1, 0xb8, 0x6f, 0x5d, 0x80, 0x58, 0x1b, 0x70, 0x7d, 0xda, 0xf3,
0x4f, 0xd5, 0xf4, 0xf7, 0x35, 0xb8, 0xb6, 0x7b, 0x3c, 0x8a, 0x3d, 0xd7, 0xf1, 0x3f, 0x4d, 0x95,
0x60, 0x3a, 0x4b, 0x33, 0xd4, 0x06, 0xcc, 0x0f, 0xa1, 0x3d, 0xc9, 0xd0, 0xa9, 0x7b, 0x93, 0x6c,
0x40, 0xe1, 0x25, 0x1b, 0xf0, 0x6d, 0x0d, 0xd0, 0x5e, 0xe4, 0x7b, 0xd4, 0xe2, 0x27, 0x63, 0xb6,
0x6a, 0x48, 0x2d, 0x66, 0x4f, 0x48, 0x8f, 0xf3, 0xfd, 0x42, 0x5b, 0xb3, 0xaa, 0x1c, 0xc8, 0x84,
0x62, 0x37, 0x55, 0x45, 0xa0, 0xaa, 0x3f, 0x35, 0x85, 0x8d, 0xcd, 0x7f, 0xd6, 0x60, 0x21, 0xc7,
0xc2, 0xec, 0x47, 0xfa, 0x35, 0xd0, 0x7d, 0x7c, 0x48, 0xe5, 0x95, 0xb5, 0xb5, 0x2e, 0x1b, 0xc1,
0xc4, 0x83, 0x39, 0x57, 0x1c, 0x8f, 0x56, 0xa1, 0x44, 0xbc, 0xa3, 0x63, 0x2a, 0xfb, 0x65, 0xa6,
0x11, 0x0a, 0x02, 0xb4, 0xca, 0x9c, 0xef, 0x11, 0x2f, 0x22, 0x8b, 0x92, 0xc2, 0x18, 0xad, 0xa5,
0xd0, 0xe6, 0x1f, 0x68, 0x60, 0x58, 0xd8, 0xe9, 0x6e, 0x05, 0x5d, 0x3c, 0x9c, 0x45, 0x8f, 0xcb,
0x50, 0x4d, 0xde, 0x9c, 0x09, 0x8b, 0xa9, 0xc4, 0xf2, 0x9d, 0x59, 0x1a, 0x77, 0x8a, 0x2f, 0x89,
0x3b, 0xe6, 0x9f, 0x6a, 0x30, 0x9f, 0x61, 0x63, 0x76, 0x5d, 0xbe, 0x0a, 0x40, 0xb0, 0xd3, 0xb5,
0x3d, 0xf6, 0x20, 0x55, 0x5f, 0x20, 0xea, 0xc9, 0x8c, 0x25, 0x66, 0x41, 0xb8, 0x3b, 0xd1, 0x73,
0x94, 0x98, 0x98, 0x24, 0x30, 0x3f, 0x80, 0xfa, 0x3e, 0x26, 0xaa, 0xe6, 0x8e, 0x6e, 0x42, 0x21,
0x8c, 0x38, 0x07, 0xad, 0x8d, 0x56, 0x32, 0x6b, 0x1f, 0x93, 0x9d, 0xc8, 0x2a, 0x84, 0xd1, 0x59,
0xaf, 0x54, 0xe6, 0x97, 0xa0, 0xba, 0x8f, 0x09, 0x6f, 0x37, 0x48, 0x29, 0xb4, 0xec, 0xa5, 0x2b,
0x53, 0xd1, 0x2d, 0xe4, 0x2b, 0xba, 0x2b, 0x7c, 0xee, 0x58, 0x1c, 0xc8, 0x79, 0x81, 0x10, 0x6a,
0xfb, 0x98, 0x88, 0xf8, 0x97, 0xd6, 0x42, 0xb4, 0x31, 0x59, 0xd5, 0x43, 0xd4, 0xb9, 0x9b, 0xe4,
0xfd, 0x6e, 0x96, 0xf7, 0xb1, 0xa9, 0x9c, 0x77, 0x25, 0x0e, 0x81, 0xe6, 0x3e, 0x26, 0xe7, 0x56,
0x4c, 0x38, 0xd3, 0x9b, 0xb3, 0x7f, 0xd0, 0xa0, 0xa5, 0x16, 0xbd, 0x80, 0x4a, 0xd1, 0xb8, 0x76,
0xce, 0xaa, 0x8b, 0xd3, 0xeb, 0x10, 0xcf, 0x00, 0xed, 0x63, 0xf2, 0x69, 0xea, 0x10, 0x89, 0xb6,
0x8a, 0x9f, 0x48, 0x5b, 0x04, 0x16, 0x72, 0x0b, 0xcf, 0xae, 0xb1, 0xd5, 0xfc, 0xa5, 0x19, 0x65,
0x15, 0x91, 0xaf, 0x4e, 0x7c, 0x8b, 0x5b, 0xc5, 0xe3, 0xd9, 0xee, 0xe6, 0xaf, 0x41, 0xb1, 0xd7,
0x57, 0xde, 0x70, 0x31, 0xbb, 0x48, 0xf2, 0x02, 0x8c, 0x11, 0x9c, 0xf2, 0xe6, 0xc3, 0xe7, 0xf6,
0xf1, 0xb8, 0x7f, 0x29, 0xf6, 0xc1, 0x5c, 0x68, 0xb2, 0xb5, 0x33, 0x8a, 0xbc, 0x0a, 0x7a, 0xaf,
0x4f, 0x95, 0x62, 0xa7, 0xcb, 0xcc, 0x29, 0x4e, 0x11, 0xfa, 0x69, 0xba, 0xcf, 0x97, 0x25, 0xf9,
0x73, 0x71, 0x10, 0x2f, 0xe4, 0x12, 0xfb, 0xc2, 0x4b, 0xd8, 0xf4, 0x17, 0x11, 0x33, 0x75, 0xc9,
0x4e, 0x9c, 0x9f, 0xca, 0x94, 0xf3, 0x13, 0xc0, 0x5c, 0x22, 0xe3, 0x65, 0x9c, 0x9d, 0x11, 0x2c,
0xed, 0x63, 0xf2, 0x59, 0xdc, 0x6f, 0x4d, 0x0a, 0x57, 0xc7, 0x97, 0xbe, 0x04, 0x2b, 0xda, 0x83,
0x85, 0xc7, 0x03, 0xd7, 0x7d, 0x17, 0xd3, 0xfb, 0x23, 0x96, 0x18, 0x9c, 0x47, 0x20, 0x31, 0xff,
0x48, 0x83, 0xc5, 0xfc, 0x53, 0xcf, 0xbb, 0x16, 0x7d, 0x07, 0x74, 0xde, 0x6e, 0x32, 0x1e, 0x15,
0xd8, 0xaa, 0x3c, 0x91, 0xe0, 0x68, 0xf3, 0xeb, 0x70, 0x2d, 0xe1, 0x43, 0x36, 0x13, 0x9d, 0x6f,
0x9a, 0x65, 0xfe, 0xa5, 0x06, 0xed, 0xc9, 0x25, 0xce, 0x5b, 0x5c, 0xa9, 0xe2, 0x62, 0x1a, 0xab,
0x95, 0x02, 0xf4, 0xd3, 0x15, 0xf0, 0x83, 0x12, 0x54, 0x36, 0x53, 0xb3, 0x94, 0xdc, 0x78, 0x5d,
0xce, 0x8a, 0x6e, 0x55, 0x05, 0x60, 0xab, 0x8b, 0x7e, 0x2d, 0x65, 0x35, 0x0a, 0xdd, 0x63, 0x69,
0x37, 0x0b, 0xf9, 0xcc, 0xf5, 0x21, 0x43, 0x25, 0xfc, 0xb2, 0x01, 0x5a, 0x01, 0x3d, 0xc2, 0x58,
0x75, 0x91, 0x37, 0x14, 0xfd, 0x2e, 0xc6, 0xc4, 0xe2, 0x18, 0x84, 0x40, 0xa7, 0x98, 0xf4, 0x64,
0xdd, 0x9a, 0xff, 0x46, 0xf7, 0xa0, 0x1a, 0x11, 0x2f, 0x24, 0x1e, 0x1d, 0x71, 0xb7, 0xd0, 0xda,
0x58, 0xc8, 0x68, 0xbf, 0xd7, 0x73, 0x82, 0xee, 0x2e, 0xf1, 0xac, 0x84, 0x08, 0xbd, 0x05, 0x73,
0x5e, 0x1c, 0xfa, 0xdc, 0x19, 0xdb, 0x3e, 0x1e, 0x60, 0x9f, 0xbb, 0x8b, 0xd6, 0xc6, 0xb5, 0x64,
0xde, 0x96, 0xc2, 0x6f, 0x33, 0xb4, 0xd5, 0xf2, 0x72, 0x63, 0xf4, 0xcb, 0xd0, 0xe2, 0xf9, 0x81,
0xe7, 0xfb, 0xb6, 0xeb, 0xb8, 0xc7, 0x58, 0x76, 0x6e, 0x35, 0x58, 0x92, 0xe0, 0xf9, 0xfe, 0x26,
0x83, 0xf1, 0x9d, 0x1e, 0x05, 0xae, 0xed, 0x87, 0x47, 0xb2, 0x81, 0xb5, 0xc2, 0xc6, 0xdb, 0xe1,
0x11, 0x5a, 0x05, 0x83, 0x60, 0x37, 0x24, 0x5d, 0xde, 0xfd, 0x6a, 0xc7, 0xd4, 0xa1, 0xb2, 0x8b,
0xb5, 0x25, 0xe0, 0x1d, 0xaf, 0x87, 0xf7, 0xa8, 0x43, 0x33, 0x94, 0xb1, 0xeb, 0x04, 0x82, 0xb2,
0x9e, 0xa5, 0x64, 0xfe, 0x8c, 0x53, 0xde, 0x66, 0x5a, 0x8f, 0x7c, 0xcf, 0x75, 0x6c, 0x96, 0x26,
0xb7, 0x1b, 0xa2, 0xe5, 0x56, 0xc2, 0x58, 0x4e, 0x8e, 0xee, 0x40, 0x4b, 0xf4, 0x96, 0xe1, 0xae,
0x2d, 0xee, 0x67, 0xcd, 0x95, 0xe2, 0xaa, 0x6e, 0x35, 0x15, 0x94, 0x37, 0x2e, 0xa1, 0xdf, 0x84,
0x65, 0x76, 0x73, 0xc4, 0x43, 0xec, 0xf6, 0xb9, 0x92, 0xba, 0x7d, 0x22, 0xb4, 0xd5, 0x53, 0xcd,
0xda, 0x57, 0x7b, 0xce, 0xf0, 0xa1, 0xc2, 0x3f, 0x90, 0xe8, 0xc7, 0xbc, 0x61, 0xca, 0x89, 0x22,
0xdf, 0xc3, 0x2a, 0x71, 0x9f, 0x13, 0x8e, 0x58, 0x02, 0x45, 0xee, 0x7e, 0x0d, 0x2a, 0xd4, 0x89,
0x4f, 0x98, 0xe9, 0x18, 0xa2, 0x87, 0x8f, 0x0d, 0xb7, 0xba, 0xfc, 0xa6, 0x46, 0x1d, 0x1f, 0x0b,
0x01, 0xe6, 0xb9, 0x00, 0x35, 0x0e, 0x61, 0xec, 0xbf, 0xa7, 0x57, 0x75, 0xa3, 0xc4, 0x24, 0x72,
0xba, 0xf6, 0xd3, 0x7e, 0x48, 0xfa, 0x3d, 0xf3, 0xbf, 0x0b, 0x50, 0x55, 0x09, 0xff, 0x44, 0x0b,
0xa6, 0x36, 0xd9, 0x82, 0x79, 0x1b, 0x1a, 0xbc, 0xbf, 0x2c, 0x9f, 0x97, 0xd7, 0x19, 0x4c, 0xbd,
0xc7, 0x9d, 0x3c, 0x1f, 0xd9, 0xfe, 0x37, 0x3d, 0xdf, 0xff, 0x96, 0x6d, 0xcc, 0x2f, 0xe5, 0x1b,
0xf3, 0x57, 0x65, 0x2b, 0x1b, 0x1d, 0x45, 0x58, 0x1a, 0x66, 0x3d, 0x31, 0xb0, 0x9d, 0x48, 0x34,
0xb1, 0x75, 0x46, 0x11, 0x46, 0x9f, 0x03, 0xc4, 0x29, 0xf3, 0x8d, 0xb3, 0x22, 0x84, 0xcd, 0x31,
0xcc, 0x3b, 0x99, 0xe6, 0xd9, 0x69, 0x7d, 0xfa, 0xd5, 0xa9, 0x7d, 0xfa, 0x13, 0x2d, 0xd0, 0xb5,
0xc9, 0x86, 0xf6, 0xb1, 0x5e, 0x7e, 0x98, 0xe8, 0xe5, 0x37, 0xff, 0xbd, 0x08, 0x55, 0xf5, 0xca,
0x35, 0x73, 0xeb, 0xd2, 0x5e, 0x72, 0xeb, 0x42, 0xaf, 0x30, 0x0f, 0x41, 0xc9, 0xc8, 0x39, 0xf0,
0xb1, 0x74, 0x40, 0x29, 0x80, 0xb9, 0x26, 0xe7, 0x20, 0x24, 0x54, 0x16, 0xf1, 0xc5, 0x00, 0x6d,
0x40, 0xd5, 0x0d, 0x83, 0x43, 0xdf, 0x73, 0xe9, 0x44, 0xcb, 0xc9, 0xd7, 0x88, 0x47, 0xf1, 0xa6,
0xc4, 0x5a, 0x09, 0x1d, 0xfa, 0x12, 0x34, 0x1d, 0x9f, 0x99, 0xc4, 0x48, 0x36, 0x6a, 0x8a, 0xae,
0x93, 0xa5, 0xb4, 0xc3, 0x50, 0x60, 0x79, 0xaf, 0xa6, 0xd5, 0x70, 0x32, 0x23, 0xf4, 0x06, 0x54,
0xbb, 0xd8, 0xe9, 0x72, 0x63, 0x19, 0x6f, 0x36, 0x7c, 0x20, 0x11, 0x56, 0x42, 0x82, 0x1e, 0xc0,
0x7c, 0xa2, 0x4c, 0x1b, 0x0f, 0x23, 0x8f, 0xe0, 0x2e, 0xdf, 0xa6, 0xfa, 0x46, 0x3b, 0xe7, 0x72,
0x98, 0x6a, 0x1f, 0x0a, 0xbc, 0x35, 0xe7, 0xe6, 0x01, 0xe8, 0x37, 0xa0, 0xc9, 0x4c, 0x26, 0xbd,
0x60, 0x54, 0xc7, 0xf2, 0xe3, 0xce, 0x30, 0x78, 0x22, 0x2f, 0x1b, 0x56, 0x9d, 0xa6, 0x03, 0xf4,
0x08, 0x16, 0xd2, 0xf5, 0x69, 0x18, 0xda, 0xbe, 0x43, 0x8e, 0x44, 0x73, 0x7c, 0x7d, 0x63, 0x79,
0x82, 0x83, 0x4e, 0x18, 0x6e, 0x33, 0x02, 0xcb, 0x70, 0xc7, 0x20, 0xe6, 0x4f, 0x34, 0x68, 0xe6,
0x14, 0x9a, 0x0b, 0x4b, 0x5a, 0xfe, 0xf6, 0x7f, 0x0b, 0xea, 0x4a, 0xdb, 0x69, 0xd0, 0x02, 0x05,
0xea, 0xc4, 0x53, 0x4e, 0x4c, 0x1b, 0x2a, 0xf2, 0xd4, 0xc9, 0x26, 0x76, 0x35, 0x44, 0x9f, 0x07,
0x94, 0x3c, 0x2c, 0xb5, 0x4c, 0x71, 0x74, 0x0c, 0x85, 0x49, 0x3a, 0xf4, 0x57, 0xa0, 0x91, 0xdd,
0x3e, 0xb5, 0x92, 0x96, 0xa6, 0x07, 0xbf, 0x0b, 0x55, 0xb5, 0x53, 0xd9, 0xde, 0x60, 0x2d, 0xd7,
0x1b, 0xac, 0x0e, 0x70, 0x9a, 0x5a, 0x70, 0x42, 0x96, 0x42, 0xad, 0xc1, 0xbc, 0xda, 0x5f, 0x86,
0xb6, 0x8f, 0x9d, 0xf8, 0x58, 0x26, 0xe4, 0x73, 0x0a, 0xf1, 0x3e, 0x1e, 0x3d, 0x72, 0xe2, 0x63,
0xf3, 0x63, 0x0d, 0xe6, 0xc6, 0xb6, 0xf7, 0x34, 0xbd, 0xad, 0xc3, 0x82, 0x43, 0x29, 0xee, 0x45,
0x14, 0x77, 0x33, 0xb2, 0x0a, 0xfd, 0xcd, 0x27, 0xa8, 0xe4, 0x2c, 0x4e, 0xaa, 0x71, 0xe2, 0x04,
0xeb, 0x93, 0x1f, 0x31, 0x6c, 0x41, 0x3d, 0x63, 0x30, 0x2f, 0xd9, 0xc7, 0x53, 0x7b, 0xab, 0xcd,
0x7b, 0x60, 0x8c, 0xdb, 0x4e, 0xbe, 0x6f, 0x54, 0x1b, 0xeb, 0x1b, 0xfd, 0x91, 0x06, 0xc0, 0x22,
0x95, 0xe8, 0xfd, 0x42, 0xaf, 0xc3, 0x3c, 0xff, 0xa2, 0xe3, 0x99, 0xe3, 0xfb, 0x22, 0xb0, 0xf5,
0xc4, 0x9c, 0xa2, 0xd5, 0x62, 0x88, 0xaf, 0x39, 0xbe, 0xcf, 0xc8, 0x1f, 0xc7, 0xe8, 0x1e, 0x2c,
0x46, 0x24, 0x74, 0x71, 0x1c, 0xe7, 0xa9, 0x0b, 0x9c, 0x7a, 0x5e, 0xe2, 0x32, 0x13, 0xd6, 0x61,
0xf1, 0x64, 0xc0, 0xe3, 0x42, 0x7e, 0x42, 0x91, 0x4f, 0x30, 0x4e, 0x06, 0x2c, 0x42, 0xa4, 0xf4,
0xe6, 0x57, 0xa0, 0xca, 0x22, 0x23, 0x8f, 0x0b, 0x8b, 0x50, 0xa2, 0x21, 0x75, 0x7c, 0xc9, 0x8b,
0x18, 0x30, 0x07, 0x25, 0x97, 0xc1, 0x5d, 0xb9, 0x6e, 0x0a, 0x30, 0x7f, 0x5f, 0x03, 0x60, 0x0f,
0x90, 0xa2, 0xdd, 0x85, 0x12, 0xff, 0x16, 0x69, 0xc2, 0xef, 0xa9, 0x45, 0x2c, 0x81, 0x67, 0xb9,
0x54, 0xd2, 0xfe, 0x3f, 0x95, 0x8e, 0xa3, 0x19, 0x59, 0xd7, 0xa1, 0xce, 0x44, 0xce, 0x99, 0x92,
0x31, 0xb4, 0xf9, 0x71, 0x51, 0x7c, 0xc3, 0x27, 0xb8, 0xd8, 0xdf, 0x40, 0x6f, 0x00, 0x4a, 0x78,
0x54, 0x41, 0x4c, 0xed, 0xcb, 0x7c, 0x82, 0x91, 0xa1, 0x8c, 0x37, 0x95, 0x72, 0x61, 0x53, 0x52,
0xd9, 0x54, 0xca, 0xa1, 0x09, 0xd9, 0x57, 0xe1, 0x15, 0xc2, 0x22, 0x7f, 0xf7, 0xc0, 0xee, 0xf2,
0x3b, 0x83, 0x1d, 0x9f, 0x78, 0x51, 0xc4, 0xcd, 0xb6, 0x1f, 0x50, 0x79, 0x1e, 0x96, 0x25, 0x8d,
0xb8, 0x56, 0xec, 0x09, 0x8a, 0x4d, 0x46, 0xc0, 0x32, 0x06, 0xf5, 0x00, 0x76, 0x88, 0xf2, 0xb3,
0x85, 0xd1, 0x5e, 0x95, 0x04, 0xef, 0xe3, 0x51, 0x6e, 0xea, 0x5b, 0xf0, 0xaa, 0x9a, 0x7a, 0xc0,
0x4f, 0x21, 0x4f, 0xa8, 0xec, 0x63, 0x8f, 0xca, 0xe9, 0xa5, 0xdc, 0xe2, 0xf7, 0x19, 0x0d, 0xcf,
0xaf, 0x1e, 0x79, 0x54, 0x3c, 0xe1, 0xd7, 0xa1, 0x9d, 0x7f, 0x02, 0xb7, 0x12, 0x31, 0x59, 0x7c,
0x9b, 0xb4, 0x94, 0x9d, 0xcc, 0x0c, 0x45, 0x4c, 0xfc, 0x55, 0xb8, 0x36, 0x65, 0xe2, 0xc1, 0x88,
0x62, 0x19, 0x7c, 0x17, 0xc7, 0xe7, 0xdd, 0x1f, 0x51, 0x6c, 0xfe, 0xa1, 0x06, 0xf5, 0x4c, 0x03,
0x24, 0x7a, 0x13, 0xea, 0xdc, 0x1a, 0x45, 0xbf, 0xa4, 0xb4, 0x90, 0x34, 0x07, 0x4d, 0x0f, 0x88,
0x05, 0x34, 0x3d, 0x2c, 0x6f, 0x42, 0x9d, 0x67, 0x74, 0x72, 0x56, 0x61, 0x6c, 0x56, 0xba, 0xeb,
0x16, 0xc4, 0xc9, 0x6f, 0xd1, 0x32, 0x29, 0x12, 0x21, 0xf3, 0xf7, 0x34, 0x68, 0xe6, 0x1a, 0x31,
0x67, 0xe4, 0xe4, 0xb7, 0xa0, 0x95, 0xe1, 0xc4, 0x1e, 0x6c, 0x48, 0x66, 0x96, 0xa6, 0x30, 0xb3,
0xbf, 0x61, 0x35, 0xe2, 0xcc, 0xc8, 0xfc, 0x10, 0xca, 0x2f, 0x2b, 0x52, 0x8e, 0x37, 0xf8, 0x9d,
0xb5, 0xc0, 0xfa, 0x1d, 0x0d, 0xaa, 0x49, 0xd5, 0xf6, 0x46, 0xa6, 0x6a, 0x9b, 0x4b, 0xa3, 0x3e,
0x41, 0xc9, 0x16, 0xfd, 0x0a, 0xd4, 0x9c, 0x38, 0xc6, 0x84, 0xaa, 0x5e, 0xf3, 0x56, 0xe6, 0xfe,
0xfe, 0xb6, 0xc2, 0x58, 0x29, 0x11, 0xdb, 0xeb, 0x1a, 0xbb, 0x06, 0xf1, 0x60, 0x89, 0x6e, 0x81,
0xce, 0xb3, 0xb9, 0x29, 0x6c, 0x70, 0xc4, 0x69, 0x35, 0xf4, 0x53, 0x3f, 0xf9, 0xba, 0x05, 0xf5,
0xf8, 0x38, 0x24, 0xd4, 0xce, 0x7e, 0xf8, 0x05, 0x1c, 0xc4, 0x0b, 0x93, 0xe6, 0x97, 0x05, 0x1b,
0xa2, 0xda, 0x7c, 0x8a, 0x8f, 0x4f, 0xe4, 0x2e, 0x4c, 0x68, 0x72, 0xe0, 0x8a, 0x6f, 0x49, 0x3f,
0x8d, 0x10, 0x99, 0xb8, 0x5e, 0xcc, 0xc7, 0xf5, 0x97, 0x4a, 0xf0, 0x5d, 0xc9, 0x03, 0xf7, 0xc8,
0xca, 0x4b, 0x6a, 0x53, 0x6e, 0x9c, 0xfc, 0xf5, 0x92, 0xf0, 0x92, 0x6b, 0x50, 0xe6, 0x5e, 0x75,
0xb2, 0xd8, 0x92, 0xec, 0x89, 0x25, 0x29, 0x18, 0xad, 0xfc, 0x3e, 0xae, 0x38, 0x85, 0x56, 0x94,
0x77, 0x25, 0x85, 0xf9, 0x45, 0xa8, 0xc8, 0x86, 0x4c, 0xde, 0x08, 0x34, 0x0c, 0xa4, 0x1a, 0xd9,
0x4f, 0x74, 0x15, 0xca, 0x31, 0xff, 0xb6, 0x46, 0x0a, 0x2f, 0x47, 0xe6, 0x5b, 0x3c, 0x0f, 0xe6,
0xd5, 0x94, 0x7c, 0x55, 0x46, 0x7b, 0x71, 0x55, 0xa6, 0x90, 0xab, 0xca, 0x7c, 0x4f, 0x03, 0xd8,
0xc6, 0x4e, 0x17, 0x13, 0xbe, 0xf4, 0xa9, 0x77, 0xe8, 0x6b, 0x50, 0x61, 0x37, 0x5e, 0x86, 0x92,
0x6c, 0xb0, 0xe1, 0x56, 0x37, 0xb9, 0x02, 0x17, 0x33, 0x57, 0xe0, 0xf1, 0x0b, 0xb7, 0x7e, 0xb6,
0x0b, 0xb7, 0xb9, 0x07, 0x48, 0xbc, 0x75, 0xe5, 0x4c, 0xa9, 0x6a, 0xc6, 0x1b, 0xe9, 0x3b, 0x27,
0xf1, 0xe1, 0x42, 0xea, 0x41, 0x52, 0xee, 0x93, 0x17, 0x4f, 0xa8, 0x05, 0x85, 0xc4, 0x50, 0x0a,
0x34, 0x36, 0xbf, 0x0a, 0x0b, 0xb9, 0x87, 0xca, 0xfa, 0x45, 0x3b, 0xff, 0x54, 0xfd, 0xc5, 0x0f,
0x70, 0x79, 0xa3, 0xc4, 0xbb, 0x98, 0x29, 0xb3, 0xd3, 0xd9, 0xbe, 0x98, 0xe6, 0xc6, 0xef, 0x69,
0xbc, 0x79, 0x21, 0xb3, 0xca, 0x05, 0xd4, 0x59, 0xd2, 0x0f, 0xa6, 0x78, 0x8b, 0xd9, 0xa9, 0xaf,
0x15, 0xfe, 0x53, 0xf4, 0x01, 0x6e, 0xbe, 0xbd, 0x77, 0x91, 0x7d, 0x80, 0x9f, 0x67, 0xf9, 0x03,
0x1e, 0x78, 0x61, 0x3f, 0xce, 0x7c, 0xdb, 0x26, 0x78, 0x31, 0x14, 0x26, 0xf9, 0xb2, 0xed, 0x0e,
0xb4, 0x12, 0x6a, 0xf1, 0x30, 0xd1, 0x1b, 0xde, 0x54, 0xd0, 0xfd, 0x4c, 0x73, 0x61, 0x79, 0xbc,
0xb9, 0xae, 0x92, 0x48, 0xae, 0x5a, 0x49, 0xb9, 0x70, 0x97, 0xda, 0x4a, 0x8a, 0x9f, 0xf6, 0x1d,
0x3f, 0xa3, 0xeb, 0x87, 0x6c, 0xbc, 0x76, 0x0b, 0x4a, 0xfc, 0x7d, 0x1f, 0x02, 0x28, 0xef, 0x63,
0xb2, 0xdb, 0xa7, 0xc6, 0x15, 0xf9, 0xfb, 0x01, 0xf6, 0x0d, 0x6d, 0xed, 0x73, 0x00, 0x69, 0xe9,
0x88, 0x61, 0x9e, 0x84, 0xa4, 0xe7, 0xf8, 0xc6, 0x15, 0x54, 0x81, 0xe2, 0x76, 0xf8, 0xcc, 0xd0,
0x50, 0x15, 0xf4, 0x47, 0xde, 0xd1, 0xb1, 0x51, 0x58, 0x5b, 0x81, 0x56, 0xbe, 0x5e, 0x84, 0xca,
0x50, 0xd8, 0xdb, 0x32, 0xae, 0xb0, 0xbf, 0xd6, 0xa6, 0xa1, 0xad, 0xb9, 0x50, 0xd8, 0x89, 0xd8,
0x54, 0xb1, 0x52, 0x05, 0x8a, 0x7c, 0x19, 0xf6, 0x0c, 0xe6, 0xed, 0x8c, 0x02, 0x6a, 0x40, 0x55,
0x7d, 0x82, 0x60, 0x14, 0xd9, 0x82, 0x5b, 0x01, 0x0b, 0x41, 0x86, 0x8e, 0x16, 0x60, 0x6e, 0xec,
0x33, 0x40, 0xa3, 0x84, 0x10, 0xb4, 0xf8, 0x19, 0x53, 0x3b, 0x15, 0x1b, 0xe5, 0xb5, 0x75, 0xa8,
0x25, 0x21, 0x8c, 0x3d, 0xf9, 0x49, 0x18, 0x60, 0xe3, 0x0a, 0xaa, 0x41, 0x89, 0x93, 0x18, 0x1a,
0x5b, 0x44, 0x4d, 0x30, 0x0a, 0x6b, 0x7f, 0xa1, 0x41, 0x59, 0x7c, 0x7d, 0x27, 0x10, 0xe2, 0xb7,
0x71, 0x05, 0x2d, 0xc1, 0x7c, 0xa7, 0xb3, 0x2d, 0x2e, 0x38, 0x09, 0x53, 0x1a, 0x6a, 0xc3, 0x22,
0x5b, 0x5d, 0x3d, 0x21, 0xc1, 0x14, 0xd8, 0x84, 0xc7, 0xc9, 0x8d, 0x64, 0x6f, 0xb7, 0x1f, 0x1f,
0xe3, 0xae, 0x51, 0x44, 0x2b, 0xf0, 0x4a, 0xf2, 0x9c, 0x29, 0xdf, 0xf6, 0x1a, 0x3a, 0x5a, 0x86,
0xa5, 0xec, 0x23, 0x1f, 0x84, 0x4f, 0x42, 0x7a, 0xec, 0x05, 0x47, 0x46, 0x69, 0xed, 0x0e, 0x54,
0x1e, 0x0e, 0x29, 0x71, 0x76, 0x22, 0x21, 0x4b, 0x18, 0x19, 0x57, 0x90, 0x01, 0x0d, 0x96, 0x85,
0xed, 0xf8, 0x5d, 0x6e, 0x78, 0x86, 0x76, 0xff, 0xb5, 0xff, 0xf8, 0x9b, 0xaa, 0xf6, 0x2f, 0xcf,
0x6f, 0x6a, 0x3f, 0x7d, 0x7e, 0x53, 0xfb, 0x9f, 0xe7, 0x37, 0xb5, 0x1f, 0xfd, 0xef, 0xcd, 0x2b,
0x60, 0x84, 0xe4, 0x68, 0x9d, 0x7a, 0x27, 0x83, 0xf5, 0x93, 0x01, 0xff, 0x7f, 0x2a, 0x07, 0x65,
0xfe, 0xe7, 0x8b, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x53, 0x1b, 0x1d, 0x24, 0xb4, 0x45, 0x00,
0x00,
}
| init |
items.py | from combat import Damage, DamageType
from components import consumable, effects
from components_types import ConsumableTarget as Target, ConsumableType
from entities.factory import ItemFactory
from ranged_value import Range
def effect_level_up(effect: effects.Effect, floor: int, base: int):
match effect:
case effects.HealEffect(amount=amount):
effect.amount += (floor - base) * amount // 5 // 10
case effects.RestoreManaEffect(amount=amount):
effect.amount += (floor - base) * amount // 5 // 10
case effects.DurableEffect(effect=next_effect, turns=turns):
effect.turns += (floor - base) // 5 * turns // 10
effect_level_up(next_effect, floor, base)
case effects.Combine(effects=used_effects):
for next_effect in used_effects:
effect_level_up(next_effect, floor, base)
def | (item, floor, base):
assert item.consumable is not None
effect_level_up(item.consumable.effect, floor, base)
health_potion = ItemFactory(
char="&",
color=(127, 0, 255),
name="Health Potion",
consume=consumable.Potion(target=Target.SELF, effect=effects.HealEffect(40)),
fit_to_level=potion_level_up,
)
mana_potion = ItemFactory(
char="&",
color=(0, 0, 255),
name="Mana Potion",
consume=consumable.Potion(target=Target.SELF, effect=effects.RestoreManaEffect(10)),
fit_to_level=potion_level_up,
)
universal_potion = ItemFactory(
char="&",
color=(64, 0, 255),
name="Universal Potion",
consume=consumable.Potion(target=Target.SELF, effect=effects.Combine([
effects.HealEffect(amount=40),
effects.RestoreManaEffect(amount=20),
])),
fit_to_level=potion_level_up,
base_floor=6,
)
regeneration_potion = ItemFactory(
char="&",
color=(3, 192, 60),
name="Regeneration Potion",
consume=consumable.Potion(target=Target.SELF, effect=effects.AddEffect(
effects.DurableEffect(10, effects.HealEffect(4))
)),
fit_to_level=potion_level_up,
)
poison_scroll = ItemFactory(
char="-",
color=(128, 30, 70),
name="Poisoned bolt",
consume=consumable.Consumable(
target=Target.NEAREST, range=5,
effect=effects.AddEffect(
effects.DurableEffect(5, effects.DamageEffect(Damage(Range(3, 4), DamageType.POISON)))),
),
)
lightning_scroll = ItemFactory(
char="~",
color=(255, 255, 0),
name="Lightning Scroll",
consume=consumable.MagicScroll(
target=Target.NEAREST, name="Lightning", range=5,
effect=effects.DamageEffect(Damage(Range(10, 30), DamageType.LIGHTNING)),
),
)
fireball_scroll = ItemFactory(
char="~",
color=(255, 0, 0),
name="Fireball Scroll",
consume=consumable.MagicScroll(
target=Target.RANGED, name="Fireball", radius=5,
effect=effects.Combine([
effects.DamageEffect(Damage(Range(10, 15), DamageType.FIRE)),
effects.AddConfusionEffect(1),
])
),
)
firebolt_scroll = ItemFactory(
char="~",
color=(255, 0, 0),
name="Firebolt Scroll",
consume=consumable.MagicScroll(
target=Target.RANDOM, name="Firebolt", range=5,
effect=effects.DamageEffect(Damage(Range(15, 20), DamageType.FIRE)),
),
)
confusion_scroll = ItemFactory(
char="~",
color=(207, 63, 255),
name="Confusion Scroll",
consume=consumable.MagicScroll(
target=Target.SELECTED, name="Confusion", range=5,
effect=effects.AddConfusionEffect(10),
),
)
healing_book = ItemFactory(
char="#",
color=(127, 0, 255),
name="Magic book: Health",
consume=consumable.MagicBook(
target=Target.SELF,
mp=20,
name="healing",
effect=effects.HealEffect(40),
),
fit_to_level=potion_level_up
)
lightning_book = ItemFactory(
char="#",
color=(255, 255, 0),
name="Magic book: Lighting",
consume=consumable.MagicBook(
target=Target.NEAREST,
mp=30,
name="lightning",
effect=effects.DamageEffect(Damage(Range(25, 30), DamageType.LIGHTNING)),
range=4
),
)
fireball_book = ItemFactory(
char="#",
color=(255, 0, 0),
name="Magic book: Fireball",
consume=consumable.MagicBook(
target=Target.RANGED,
mp=40,
name="fireball",
effect=effects.Combine([
effects.DamageEffect(Damage(Range(25, 35), DamageType.FIRE)),
effects.AddConfusionEffect(1),
]),
radius=3
),
)
confusion_book = ItemFactory(
char="#",
color=(207, 63, 255),
name="Magic book: Confusion",
consume=consumable.MagicBook(
target=Target.SELECTED,
mp=20,
name="confusion",
effect=effects.AddConfusionEffect(5)
),
)
| potion_level_up |
rest_test.go | // Package rest provides HTTP types and primitives for REST
// requests validation and responses handling.
package rest
import (
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/secp256k1"
"github.com/serjplus/cosmos-sdk/client/context"
"github.com/serjplus/cosmos-sdk/codec"
"github.com/serjplus/cosmos-sdk/types"
)
func TestBaseReqValidateBasic(t *testing.T) {
fromAddr := "cosmos1cq0sxam6x4l0sv9yz3a2vlqhdhvt2k6jtgcse0"
tenstakes, err := types.ParseCoins("10stake")
require.NoError(t, err)
onestake, err := types.ParseDecCoins("1.0stake")
require.NoError(t, err)
req1 := NewBaseReq(
fromAddr, "", "nonempty", "", "", 0, 0, tenstakes, nil, false,
)
req2 := NewBaseReq(
"", "", "nonempty", "", "", 0, 0, tenstakes, nil, false,
)
req3 := NewBaseReq(
fromAddr, "", "", "", "", 0, 0, tenstakes, nil, false,
)
req4 := NewBaseReq(
fromAddr, "", "nonempty", "", "", 0, 0, tenstakes, onestake, false,
)
req5 := NewBaseReq(
fromAddr, "", "nonempty", "", "", 0, 0, types.Coins{}, types.DecCoins{}, false,
)
tests := []struct {
name string
req BaseReq
w http.ResponseWriter
want bool
}{
{"ok", req1, httptest.NewRecorder(), true},
{"neither fees nor gasprices provided", req5, httptest.NewRecorder(), true},
{"empty from", req2, httptest.NewRecorder(), false},
{"empty chain-id", req3, httptest.NewRecorder(), false},
{"fees and gasprices provided", req4, httptest.NewRecorder(), false},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
require.Equal(t, tt.want, tt.req.ValidateBasic(tt.w))
})
}
}
func TestParseHTTPArgs(t *testing.T) {
req0 := mustNewRequest(t, "", "/", nil)
req1 := mustNewRequest(t, "", "/?limit=5", nil)
req2 := mustNewRequest(t, "", "/?page=5", nil)
req3 := mustNewRequest(t, "", "/?page=5&limit=5", nil)
reqE1 := mustNewRequest(t, "", "/?page=-1", nil)
reqE2 := mustNewRequest(t, "", "/?limit=-1", nil)
req4 := mustNewRequest(t, "", "/?foo=faa", nil)
tests := []struct {
name string
req *http.Request
w http.ResponseWriter
tags []string
page int
limit int
err bool
}{
{"no params", req0, httptest.NewRecorder(), []string{}, DefaultPage, DefaultLimit, false},
{"Limit", req1, httptest.NewRecorder(), []string{}, DefaultPage, 5, false},
{"Page", req2, httptest.NewRecorder(), []string{}, 5, DefaultLimit, false},
{"Page and limit", req3, httptest.NewRecorder(), []string{}, 5, 5, false},
{"error page 0", reqE1, httptest.NewRecorder(), []string{}, DefaultPage, DefaultLimit, true},
{"error limit 0", reqE2, httptest.NewRecorder(), []string{}, DefaultPage, DefaultLimit, true},
{"tags", req4, httptest.NewRecorder(), []string{"foo='faa'"}, DefaultPage, DefaultLimit, false},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
tags, page, limit, err := ParseHTTPArgs(tt.req)
if tt.err {
require.NotNil(t, err)
} else {
require.Nil(t, err)
require.Equal(t, tt.tags, tags)
require.Equal(t, tt.page, page)
require.Equal(t, tt.limit, limit)
}
})
}
}
func TestParseQueryHeight(t *testing.T) {
var emptyHeight int64
height := int64(1256756)
req0 := mustNewRequest(t, "", "/", nil)
req1 := mustNewRequest(t, "", "/?height=1256756", nil)
req2 := mustNewRequest(t, "", "/?height=456yui4567", nil)
req3 := mustNewRequest(t, "", "/?height=-1", nil)
tests := []struct {
name string
req *http.Request
w http.ResponseWriter
cliCtx context.CLIContext
expectedHeight int64
expectedOk bool
}{
{"no height", req0, httptest.NewRecorder(), context.CLIContext{}, emptyHeight, true},
{"height", req1, httptest.NewRecorder(), context.CLIContext{}, height, true},
{"invalid height", req2, httptest.NewRecorder(), context.CLIContext{}, emptyHeight, false},
{"negative height", req3, httptest.NewRecorder(), context.CLIContext{}, emptyHeight, false},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
cliCtx, ok := ParseQueryHeightOrReturnBadRequest(tt.w, tt.cliCtx, tt.req)
if tt.expectedOk {
require.True(t, ok)
require.Equal(t, tt.expectedHeight, cliCtx.Height)
} else {
require.False(t, ok)
require.Empty(t, tt.expectedHeight, cliCtx.Height)
}
})
}
}
func TestProcessPostResponse(t *testing.T) {
// mock account
// PubKey field ensures amino encoding is used first since standard
// JSON encoding will panic on crypto.PubKey
type mockAccount struct {
Address types.AccAddress `json:"address"`
Coins types.Coins `json:"coins"`
PubKey crypto.PubKey `json:"public_key"`
AccountNumber uint64 `json:"account_number"`
Sequence uint64 `json:"sequence"`
}
// setup
ctx := context.NewCLIContext()
height := int64(194423)
privKey := secp256k1.GenPrivKey()
pubKey := privKey.PubKey()
addr := types.AccAddress(pubKey.Address())
coins := types.NewCoins(types.NewCoin("atom", types.NewInt(100)), types.NewCoin("tree", types.NewInt(125)))
accNumber := uint64(104)
sequence := uint64(32)
acc := mockAccount{addr, coins, pubKey, accNumber, sequence}
cdc := codec.New()
codec.RegisterCrypto(cdc)
cdc.RegisterConcrete(&mockAccount{}, "cosmos-sdk/mockAccount", nil)
ctx = ctx.WithCodec(cdc)
// setup expected results
jsonNoIndent, err := ctx.Codec.MarshalJSON(acc)
require.Nil(t, err)
jsonWithIndent, err := ctx.Codec.MarshalJSONIndent(acc, "", " ")
require.Nil(t, err)
respNoIndent := NewResponseWithHeight(height, jsonNoIndent)
respWithIndent := NewResponseWithHeight(height, jsonWithIndent)
expectedNoIndent, err := ctx.Codec.MarshalJSON(respNoIndent)
require.Nil(t, err)
expectedWithIndent, err := ctx.Codec.MarshalJSONIndent(respWithIndent, "", " ")
require.Nil(t, err)
// check that negative height writes an error
w := httptest.NewRecorder()
ctx = ctx.WithHeight(-1)
PostProcessResponse(w, ctx, acc)
require.Equal(t, http.StatusInternalServerError, w.Code)
// check that height returns expected response
ctx = ctx.WithHeight(height)
runPostProcessResponse(t, ctx, acc, expectedNoIndent, false)
// check height with indent
runPostProcessResponse(t, ctx, acc, expectedWithIndent, true)
}
// asserts that ResponseRecorder returns the expected code and body
// runs PostProcessResponse on the objects regular interface and on
// the marshalled struct.
func runPostProcessResponse(t *testing.T, ctx context.CLIContext, obj interface{}, expectedBody []byte, indent bool) {
if indent {
ctx.Indent = indent
}
// test using regular struct | require.Equal(t, http.StatusOK, w.Code, w.Body)
resp := w.Result()
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
require.Nil(t, err)
require.Equal(t, expectedBody, body)
var marshalled []byte
if indent {
marshalled, err = ctx.Codec.MarshalJSONIndent(obj, "", " ")
} else {
marshalled, err = ctx.Codec.MarshalJSON(obj)
}
require.Nil(t, err)
// test using marshalled struct
w = httptest.NewRecorder()
PostProcessResponse(w, ctx, marshalled)
require.Equal(t, http.StatusOK, w.Code, w.Body)
resp = w.Result()
defer resp.Body.Close()
body, err = ioutil.ReadAll(resp.Body)
require.Nil(t, err)
require.Equal(t, expectedBody, body)
}
func mustNewRequest(t *testing.T, method, url string, body io.Reader) *http.Request {
req, err := http.NewRequest(method, url, body)
require.NoError(t, err)
err = req.ParseForm()
require.NoError(t, err)
return req
} | w := httptest.NewRecorder()
PostProcessResponse(w, ctx, obj) |
agent_key_manager.go | package ssh
import (
"crypto"
"crypto/ed25519"
"crypto/rand"
"fmt"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
)
type AgentKeyManager struct {
agent agent.ExtendedAgent
}
func | (agent agent.ExtendedAgent) KeyManager {
return &AgentKeyManager{
agent: agent,
}
}
// GetKey will generate new ssh keypair
func (a *AgentKeyManager) GetKey() (crypto.PublicKey, crypto.PrivateKey, error) {
public, private, err := ed25519.GenerateKey(rand.Reader)
return public, private, errors.Wrap(err, "could not generate ed25519 keys")
}
// WriteKey will write the key and certificate to the agent
func (a *AgentKeyManager) WriteKey(
priv crypto.PrivateKey,
cert *ssh.Certificate,
) error {
err := a.agent.Add(agent.AddedKey{
PrivateKey: priv,
Certificate: cert,
Comment: a.getComment(cert),
LifetimeSecs: getLifetimeSecs(cert),
})
return errors.Wrap(err, "could not add keys to agent")
}
func (a *AgentKeyManager) getComment(cert *ssh.Certificate) string {
now := time.Now().Local().Format(time.UnixDate)
return fmt.Sprintf("Added by blessclient at %s", now)
}
func (a *AgentKeyManager) ListCertificates() ([]*ssh.Certificate, error) {
agentKeys, err := a.agent.List()
if err != nil {
return nil, errors.Wrap(err, "could not list agent keys")
}
allCerts := []*ssh.Certificate{}
for _, agentKey := range agentKeys {
pub, err := ssh.ParsePublicKey(agentKey.Marshal())
if err != nil {
logrus.Warnf("could not parse public key: %s", err.Error())
continue
}
cert, ok := pub.(*ssh.Certificate)
if !ok {
continue
}
_, ok = cert.Extensions["ssh-ca-lambda"]
if !ok {
// not a certificate we care about
continue
}
now := time.Now()
validAfter := time.Unix(int64(cert.ValidAfter), 0)
validBefore := time.Unix(int64(cert.ValidBefore), 0)
if !(now.After(validAfter) && now.Before(validBefore)) {
continue // expired
}
allCerts = append(allCerts, cert)
}
return allCerts, nil
}
func (a *AgentKeyManager) HasValidCertificate() (bool, error) {
certs, err := a.ListCertificates()
if err != nil {
return false, err
}
return len(certs) > 0, nil
}
| NewAgentKeyManager |
stash.ts | import { runCommandGit, RunCommandOptions } from "lib/run-command";
export interface StashOptions extends RunCommandOptions {
action?: "push" | "pop" | "drop";
paths?: string[];
}
export const stash = (options: StashOptions = {}) => {
const args = createArgs(options);
return runCommandGit("stash", args, options);
};
function | (options: StashOptions): string[] {
const { action = "push", paths = [] } = options;
if (action === "drop") {
return ["drop"];
} else if (action === "pop") {
return ["pop"];
}
return ["push", "--keep-index", "--include-untracked", "--", ...paths];
}
| createArgs |
install-compiler.ts | import cleanRequire from '../../../utils/clean-require';
import isTemplateValid from '../../../utils/is-template-valid';
import * as npm from '../../../utils/npm-utils';
import strings from '../../../resources/index';
import { Logger } from '../../logger';
import { Template } from '../../../types';
export default function installCompiler(
options: {
compilerPath: string;
componentPath: string;
dependency: string;
logger: Logger;
},
cb: (err: string | number | null, data: Template) => void
): void {
const { compilerPath, componentPath, dependency, logger } = options;
logger.warn(strings.messages.cli.INSTALLING_DEPS(dependency, componentPath));
const npmOptions = {
dependency,
installPath: componentPath, | save: false,
silent: true,
usePrefix: false
};
npm.installDependency(npmOptions, err => {
err ? logger.err('FAIL') : logger.ok('OK');
const compiler = cleanRequire<Template>(compilerPath, { justTry: true });
const isOk = isTemplateValid(compiler);
const errorMsg = 'There was a problem while installing the compiler';
cb(!err && isOk ? null : errorMsg, compiler as Template);
});
} | |
_inputs.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'AutoPausePropertiesArgs',
'AutoScalePropertiesArgs',
'CmdkeySetupArgs',
'ComponentSetupArgs',
'CustomerManagedKeyDetailsArgs',
'DataLakeStorageAccountDetailsArgs',
'DynamicExecutorAllocationArgs',
'EncryptionDetailsArgs',
'EntityReferenceArgs',
'EnvironmentVariableSetupArgs',
'GitHubClientSecretArgs',
'IntegrationRuntimeComputePropertiesArgs',
'IntegrationRuntimeCustomSetupScriptPropertiesArgs',
'IntegrationRuntimeDataFlowPropertiesArgs',
'IntegrationRuntimeDataProxyPropertiesArgs',
'IntegrationRuntimeSsisCatalogInfoArgs',
'IntegrationRuntimeSsisPropertiesArgs',
'IntegrationRuntimeVNetPropertiesArgs',
'LibraryInfoArgs',
'LibraryRequirementsArgs',
'LinkedIntegrationRuntimeKeyAuthorizationArgs',
'LinkedIntegrationRuntimeRbacAuthorizationArgs',
'ManagedIdentityArgs',
'ManagedIntegrationRuntimeArgs',
'ManagedVirtualNetworkReferenceArgs',
'ManagedVirtualNetworkSettingsArgs',
'PrivateEndpointConnectionArgs',
'PrivateLinkServiceConnectionStateArgs',
'PurviewConfigurationArgs',
'SecureStringArgs',
'SelfHostedIntegrationRuntimeArgs',
'SkuArgs',
'SqlPoolVulnerabilityAssessmentRuleBaselineItemArgs',
'VirtualNetworkProfileArgs',
'VulnerabilityAssessmentRecurringScansPropertiesArgs',
'WorkspaceKeyDetailsArgs',
'WorkspaceRepositoryConfigurationArgs',
]
@pulumi.input_type
class AutoPausePropertiesArgs:
def __init__(__self__, *,
delay_in_minutes: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None):
"""
Auto-pausing properties of a Big Data pool powered by Apache Spark
:param pulumi.Input[int] delay_in_minutes: Number of minutes of idle time before the Big Data pool is automatically paused.
:param pulumi.Input[bool] enabled: Whether auto-pausing is enabled for the Big Data pool.
"""
if delay_in_minutes is not None:
pulumi.set(__self__, "delay_in_minutes", delay_in_minutes)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter(name="delayInMinutes")
def delay_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
Number of minutes of idle time before the Big Data pool is automatically paused.
"""
return pulumi.get(self, "delay_in_minutes")
@delay_in_minutes.setter
def delay_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "delay_in_minutes", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether auto-pausing is enabled for the Big Data pool.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class AutoScalePropertiesArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
max_node_count: Optional[pulumi.Input[int]] = None,
min_node_count: Optional[pulumi.Input[int]] = None):
"""
Auto-scaling properties of a Big Data pool powered by Apache Spark
:param pulumi.Input[bool] enabled: Whether automatic scaling is enabled for the Big Data pool.
:param pulumi.Input[int] max_node_count: The maximum number of nodes the Big Data pool can support.
:param pulumi.Input[int] min_node_count: The minimum number of nodes the Big Data pool can support.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if max_node_count is not None:
pulumi.set(__self__, "max_node_count", max_node_count)
if min_node_count is not None:
pulumi.set(__self__, "min_node_count", min_node_count)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether automatic scaling is enabled for the Big Data pool.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="maxNodeCount")
def max_node_count(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of nodes the Big Data pool can support.
"""
return pulumi.get(self, "max_node_count")
@max_node_count.setter
def max_node_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_node_count", value)
@property
@pulumi.getter(name="minNodeCount")
def min_node_count(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of nodes the Big Data pool can support.
"""
return pulumi.get(self, "min_node_count")
@min_node_count.setter
def min_node_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_node_count", value)
@pulumi.input_type
class CmdkeySetupArgs:
def __init__(__self__, *,
password: pulumi.Input['SecureStringArgs'],
target_name: Any,
type: pulumi.Input[str],
user_name: Any):
"""
The custom setup of running cmdkey commands.
:param pulumi.Input['SecureStringArgs'] password: The password of data source access.
:param Any target_name: The server name of data source access.
:param pulumi.Input[str] type: The type of custom setup.
Expected value is 'CmdkeySetup'.
:param Any user_name: The user name of data source access.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "target_name", target_name)
pulumi.set(__self__, "type", 'CmdkeySetup')
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter
def password(self) -> pulumi.Input['SecureStringArgs']:
"""
The password of data source access.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input['SecureStringArgs']):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="targetName")
def target_name(self) -> Any:
"""
The server name of data source access.
"""
return pulumi.get(self, "target_name")
@target_name.setter
def target_name(self, value: Any):
pulumi.set(self, "target_name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of custom setup.
Expected value is 'CmdkeySetup'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> Any:
"""
The user name of data source access.
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: Any):
pulumi.set(self, "user_name", value)
@pulumi.input_type
class ComponentSetupArgs:
def __init__(__self__, *,
component_name: pulumi.Input[str],
type: pulumi.Input[str],
license_key: Optional[pulumi.Input['SecureStringArgs']] = None):
"""
The custom setup of installing 3rd party components.
:param pulumi.Input[str] component_name: The name of the 3rd party component.
:param pulumi.Input[str] type: The type of custom setup.
Expected value is 'ComponentSetup'.
:param pulumi.Input['SecureStringArgs'] license_key: The license key to activate the component.
"""
pulumi.set(__self__, "component_name", component_name)
pulumi.set(__self__, "type", 'ComponentSetup')
if license_key is not None:
pulumi.set(__self__, "license_key", license_key)
@property
@pulumi.getter(name="componentName")
def component_name(self) -> pulumi.Input[str]:
"""
The name of the 3rd party component.
"""
return pulumi.get(self, "component_name")
@component_name.setter
def component_name(self, value: pulumi.Input[str]):
pulumi.set(self, "component_name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of custom setup.
Expected value is 'ComponentSetup'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="licenseKey")
def license_key(self) -> Optional[pulumi.Input['SecureStringArgs']]:
"""
The license key to activate the component.
"""
return pulumi.get(self, "license_key")
@license_key.setter
def license_key(self, value: Optional[pulumi.Input['SecureStringArgs']]):
pulumi.set(self, "license_key", value)
@pulumi.input_type
class CustomerManagedKeyDetailsArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input['WorkspaceKeyDetailsArgs']] = None):
"""
Details of the customer managed key associated with the workspace
:param pulumi.Input['WorkspaceKeyDetailsArgs'] key: The key object of the workspace
"""
if key is not None:
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input['WorkspaceKeyDetailsArgs']]:
"""
The key object of the workspace
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input['WorkspaceKeyDetailsArgs']]):
pulumi.set(self, "key", value)
@pulumi.input_type
class DataLakeStorageAccountDetailsArgs:
def __init__(__self__, *,
account_url: Optional[pulumi.Input[str]] = None,
filesystem: Optional[pulumi.Input[str]] = None):
"""
Details of the data lake storage account associated with the workspace
:param pulumi.Input[str] account_url: Account URL
:param pulumi.Input[str] filesystem: Filesystem name
"""
if account_url is not None:
pulumi.set(__self__, "account_url", account_url)
if filesystem is not None:
pulumi.set(__self__, "filesystem", filesystem)
@property
@pulumi.getter(name="accountUrl")
def account_url(self) -> Optional[pulumi.Input[str]]:
"""
Account URL
"""
return pulumi.get(self, "account_url")
@account_url.setter
def account_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_url", value)
@property
@pulumi.getter
def filesystem(self) -> Optional[pulumi.Input[str]]:
"""
Filesystem name
"""
return pulumi.get(self, "filesystem")
@filesystem.setter
def filesystem(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filesystem", value)
@pulumi.input_type
class DynamicExecutorAllocationArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None):
"""
Dynamic Executor Allocation Properties
:param pulumi.Input[bool] enabled: Indicates whether Dynamic Executor Allocation is enabled or not.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether Dynamic Executor Allocation is enabled or not.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class EncryptionDetailsArgs:
def __init__(__self__, *,
cmk: Optional[pulumi.Input['CustomerManagedKeyDetailsArgs']] = None):
"""
Details of the encryption associated with the workspace
:param pulumi.Input['CustomerManagedKeyDetailsArgs'] cmk: Customer Managed Key Details
"""
if cmk is not None:
pulumi.set(__self__, "cmk", cmk)
@property
@pulumi.getter
def cmk(self) -> Optional[pulumi.Input['CustomerManagedKeyDetailsArgs']]:
"""
Customer Managed Key Details
"""
return pulumi.get(self, "cmk")
@cmk.setter
def cmk(self, value: Optional[pulumi.Input['CustomerManagedKeyDetailsArgs']]):
pulumi.set(self, "cmk", value)
@pulumi.input_type
class EntityReferenceArgs:
def __init__(__self__, *,
reference_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'IntegrationRuntimeEntityReferenceType']]] = None):
"""
The entity reference.
:param pulumi.Input[str] reference_name: The name of this referenced entity.
:param pulumi.Input[Union[str, 'IntegrationRuntimeEntityReferenceType']] type: The type of this referenced entity.
"""
if reference_name is not None:
pulumi.set(__self__, "reference_name", reference_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="referenceName")
def reference_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of this referenced entity.
"""
return pulumi.get(self, "reference_name")
@reference_name.setter
def reference_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reference_name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'IntegrationRuntimeEntityReferenceType']]]:
"""
The type of this referenced entity.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'IntegrationRuntimeEntityReferenceType']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class EnvironmentVariableSetupArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
variable_name: pulumi.Input[str],
variable_value: pulumi.Input[str]):
"""
The custom setup of setting environment variable.
:param pulumi.Input[str] type: The type of custom setup.
Expected value is 'EnvironmentVariableSetup'.
:param pulumi.Input[str] variable_name: The name of the environment variable.
:param pulumi.Input[str] variable_value: The value of the environment variable.
"""
pulumi.set(__self__, "type", 'EnvironmentVariableSetup')
pulumi.set(__self__, "variable_name", variable_name)
pulumi.set(__self__, "variable_value", variable_value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of custom setup.
Expected value is 'EnvironmentVariableSetup'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="variableName")
def variable_name(self) -> pulumi.Input[str]:
"""
The name of the environment variable.
"""
return pulumi.get(self, "variable_name")
@variable_name.setter
def variable_name(self, value: pulumi.Input[str]):
pulumi.set(self, "variable_name", value)
@property
@pulumi.getter(name="variableValue")
def variable_value(self) -> pulumi.Input[str]:
"""
The value of the environment variable.
"""
return pulumi.get(self, "variable_value")
@variable_value.setter
def variable_value(self, value: pulumi.Input[str]):
pulumi.set(self, "variable_value", value)
@pulumi.input_type
class GitHubClientSecretArgs:
def __init__(__self__, *,
byoa_secret_akv_url: Optional[pulumi.Input[str]] = None,
byoa_secret_name: Optional[pulumi.Input[str]] = None):
"""
Client secret information for factory's bring your own app repository configuration
:param pulumi.Input[str] byoa_secret_akv_url: Bring your own app client secret AKV URL
:param pulumi.Input[str] byoa_secret_name: Bring your own app client secret name in AKV
"""
if byoa_secret_akv_url is not None:
pulumi.set(__self__, "byoa_secret_akv_url", byoa_secret_akv_url)
if byoa_secret_name is not None:
pulumi.set(__self__, "byoa_secret_name", byoa_secret_name)
@property
@pulumi.getter(name="byoaSecretAkvUrl")
def byoa_secret_akv_url(self) -> Optional[pulumi.Input[str]]:
"""
Bring your own app client secret AKV URL
"""
return pulumi.get(self, "byoa_secret_akv_url")
@byoa_secret_akv_url.setter
def byoa_secret_akv_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "byoa_secret_akv_url", value)
@property
@pulumi.getter(name="byoaSecretName")
def byoa_secret_name(self) -> Optional[pulumi.Input[str]]:
"""
Bring your own app client secret name in AKV
"""
return pulumi.get(self, "byoa_secret_name")
@byoa_secret_name.setter
def byoa_secret_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "byoa_secret_name", value)
@pulumi.input_type
class IntegrationRuntimeComputePropertiesArgs:
def __init__(__self__, *,
data_flow_properties: Optional[pulumi.Input['IntegrationRuntimeDataFlowPropertiesArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
max_parallel_executions_per_node: Optional[pulumi.Input[int]] = None,
node_size: Optional[pulumi.Input[str]] = None,
number_of_nodes: Optional[pulumi.Input[int]] = None,
v_net_properties: Optional[pulumi.Input['IntegrationRuntimeVNetPropertiesArgs']] = None):
"""
The compute resource properties for managed integration runtime.
:param pulumi.Input['IntegrationRuntimeDataFlowPropertiesArgs'] data_flow_properties: Data flow properties for managed integration runtime.
:param pulumi.Input[str] location: The location for managed integration runtime. The supported regions could be found on https://docs.microsoft.com/en-us/azure/data-factory/data-factory-data-movement-activities
:param pulumi.Input[int] max_parallel_executions_per_node: Maximum parallel executions count per node for managed integration runtime.
:param pulumi.Input[str] node_size: The node size requirement to managed integration runtime.
:param pulumi.Input[int] number_of_nodes: The required number of nodes for managed integration runtime.
:param pulumi.Input['IntegrationRuntimeVNetPropertiesArgs'] v_net_properties: VNet properties for managed integration runtime.
"""
if data_flow_properties is not None:
pulumi.set(__self__, "data_flow_properties", data_flow_properties)
if location is not None:
pulumi.set(__self__, "location", location)
if max_parallel_executions_per_node is not None:
pulumi.set(__self__, "max_parallel_executions_per_node", max_parallel_executions_per_node)
if node_size is not None:
pulumi.set(__self__, "node_size", node_size)
if number_of_nodes is not None:
pulumi.set(__self__, "number_of_nodes", number_of_nodes)
if v_net_properties is not None:
pulumi.set(__self__, "v_net_properties", v_net_properties)
@property
@pulumi.getter(name="dataFlowProperties")
def data_flow_properties(self) -> Optional[pulumi.Input['IntegrationRuntimeDataFlowPropertiesArgs']]:
"""
Data flow properties for managed integration runtime.
"""
return pulumi.get(self, "data_flow_properties") | def data_flow_properties(self, value: Optional[pulumi.Input['IntegrationRuntimeDataFlowPropertiesArgs']]):
pulumi.set(self, "data_flow_properties", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location for managed integration runtime. The supported regions could be found on https://docs.microsoft.com/en-us/azure/data-factory/data-factory-data-movement-activities
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="maxParallelExecutionsPerNode")
def max_parallel_executions_per_node(self) -> Optional[pulumi.Input[int]]:
"""
Maximum parallel executions count per node for managed integration runtime.
"""
return pulumi.get(self, "max_parallel_executions_per_node")
@max_parallel_executions_per_node.setter
def max_parallel_executions_per_node(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_parallel_executions_per_node", value)
@property
@pulumi.getter(name="nodeSize")
def node_size(self) -> Optional[pulumi.Input[str]]:
"""
The node size requirement to managed integration runtime.
"""
return pulumi.get(self, "node_size")
@node_size.setter
def node_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_size", value)
@property
@pulumi.getter(name="numberOfNodes")
def number_of_nodes(self) -> Optional[pulumi.Input[int]]:
"""
The required number of nodes for managed integration runtime.
"""
return pulumi.get(self, "number_of_nodes")
@number_of_nodes.setter
def number_of_nodes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "number_of_nodes", value)
@property
@pulumi.getter(name="vNetProperties")
def v_net_properties(self) -> Optional[pulumi.Input['IntegrationRuntimeVNetPropertiesArgs']]:
"""
VNet properties for managed integration runtime.
"""
return pulumi.get(self, "v_net_properties")
@v_net_properties.setter
def v_net_properties(self, value: Optional[pulumi.Input['IntegrationRuntimeVNetPropertiesArgs']]):
pulumi.set(self, "v_net_properties", value)
@pulumi.input_type
class IntegrationRuntimeCustomSetupScriptPropertiesArgs:
def __init__(__self__, *,
blob_container_uri: Optional[pulumi.Input[str]] = None,
sas_token: Optional[pulumi.Input['SecureStringArgs']] = None):
"""
Custom setup script properties for a managed dedicated integration runtime.
:param pulumi.Input[str] blob_container_uri: The URI of the Azure blob container that contains the custom setup script.
:param pulumi.Input['SecureStringArgs'] sas_token: The SAS token of the Azure blob container.
"""
if blob_container_uri is not None:
pulumi.set(__self__, "blob_container_uri", blob_container_uri)
if sas_token is not None:
pulumi.set(__self__, "sas_token", sas_token)
@property
@pulumi.getter(name="blobContainerUri")
def blob_container_uri(self) -> Optional[pulumi.Input[str]]:
"""
The URI of the Azure blob container that contains the custom setup script.
"""
return pulumi.get(self, "blob_container_uri")
@blob_container_uri.setter
def blob_container_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "blob_container_uri", value)
@property
@pulumi.getter(name="sasToken")
def sas_token(self) -> Optional[pulumi.Input['SecureStringArgs']]:
"""
The SAS token of the Azure blob container.
"""
return pulumi.get(self, "sas_token")
@sas_token.setter
def sas_token(self, value: Optional[pulumi.Input['SecureStringArgs']]):
pulumi.set(self, "sas_token", value)
@pulumi.input_type
class IntegrationRuntimeDataFlowPropertiesArgs:
def __init__(__self__, *,
cleanup: Optional[pulumi.Input[bool]] = None,
compute_type: Optional[pulumi.Input[Union[str, 'DataFlowComputeType']]] = None,
core_count: Optional[pulumi.Input[int]] = None,
time_to_live: Optional[pulumi.Input[int]] = None):
"""
Data flow properties for managed integration runtime.
:param pulumi.Input[bool] cleanup: Cluster will not be recycled and it will be used in next data flow activity run until TTL (time to live) is reached if this is set as false. Default is true.
:param pulumi.Input[Union[str, 'DataFlowComputeType']] compute_type: Compute type of the cluster which will execute data flow job.
:param pulumi.Input[int] core_count: Core count of the cluster which will execute data flow job. Supported values are: 8, 16, 32, 48, 80, 144 and 272.
:param pulumi.Input[int] time_to_live: Time to live (in minutes) setting of the cluster which will execute data flow job.
"""
if cleanup is not None:
pulumi.set(__self__, "cleanup", cleanup)
if compute_type is not None:
pulumi.set(__self__, "compute_type", compute_type)
if core_count is not None:
pulumi.set(__self__, "core_count", core_count)
if time_to_live is not None:
pulumi.set(__self__, "time_to_live", time_to_live)
@property
@pulumi.getter
def cleanup(self) -> Optional[pulumi.Input[bool]]:
"""
Cluster will not be recycled and it will be used in next data flow activity run until TTL (time to live) is reached if this is set as false. Default is true.
"""
return pulumi.get(self, "cleanup")
@cleanup.setter
def cleanup(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cleanup", value)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> Optional[pulumi.Input[Union[str, 'DataFlowComputeType']]]:
"""
Compute type of the cluster which will execute data flow job.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: Optional[pulumi.Input[Union[str, 'DataFlowComputeType']]]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="coreCount")
def core_count(self) -> Optional[pulumi.Input[int]]:
"""
Core count of the cluster which will execute data flow job. Supported values are: 8, 16, 32, 48, 80, 144 and 272.
"""
return pulumi.get(self, "core_count")
@core_count.setter
def core_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "core_count", value)
@property
@pulumi.getter(name="timeToLive")
def time_to_live(self) -> Optional[pulumi.Input[int]]:
"""
Time to live (in minutes) setting of the cluster which will execute data flow job.
"""
return pulumi.get(self, "time_to_live")
@time_to_live.setter
def time_to_live(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "time_to_live", value)
@pulumi.input_type
class IntegrationRuntimeDataProxyPropertiesArgs:
def __init__(__self__, *,
connect_via: Optional[pulumi.Input['EntityReferenceArgs']] = None,
path: Optional[pulumi.Input[str]] = None,
staging_linked_service: Optional[pulumi.Input['EntityReferenceArgs']] = None):
"""
Data proxy properties for a managed dedicated integration runtime.
:param pulumi.Input['EntityReferenceArgs'] connect_via: The self-hosted integration runtime reference.
:param pulumi.Input[str] path: The path to contain the staged data in the Blob storage.
:param pulumi.Input['EntityReferenceArgs'] staging_linked_service: The staging linked service reference.
"""
if connect_via is not None:
pulumi.set(__self__, "connect_via", connect_via)
if path is not None:
pulumi.set(__self__, "path", path)
if staging_linked_service is not None:
pulumi.set(__self__, "staging_linked_service", staging_linked_service)
@property
@pulumi.getter(name="connectVia")
def connect_via(self) -> Optional[pulumi.Input['EntityReferenceArgs']]:
"""
The self-hosted integration runtime reference.
"""
return pulumi.get(self, "connect_via")
@connect_via.setter
def connect_via(self, value: Optional[pulumi.Input['EntityReferenceArgs']]):
pulumi.set(self, "connect_via", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
The path to contain the staged data in the Blob storage.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter(name="stagingLinkedService")
def staging_linked_service(self) -> Optional[pulumi.Input['EntityReferenceArgs']]:
"""
The staging linked service reference.
"""
return pulumi.get(self, "staging_linked_service")
@staging_linked_service.setter
def staging_linked_service(self, value: Optional[pulumi.Input['EntityReferenceArgs']]):
pulumi.set(self, "staging_linked_service", value)
@pulumi.input_type
class IntegrationRuntimeSsisCatalogInfoArgs:
def __init__(__self__, *,
catalog_admin_password: Optional[pulumi.Input['SecureStringArgs']] = None,
catalog_admin_user_name: Optional[pulumi.Input[str]] = None,
catalog_pricing_tier: Optional[pulumi.Input[Union[str, 'IntegrationRuntimeSsisCatalogPricingTier']]] = None,
catalog_server_endpoint: Optional[pulumi.Input[str]] = None):
"""
Catalog information for managed dedicated integration runtime.
:param pulumi.Input['SecureStringArgs'] catalog_admin_password: The password of the administrator user account of the catalog database.
:param pulumi.Input[str] catalog_admin_user_name: The administrator user name of catalog database.
:param pulumi.Input[Union[str, 'IntegrationRuntimeSsisCatalogPricingTier']] catalog_pricing_tier: The pricing tier for the catalog database. The valid values could be found in https://azure.microsoft.com/en-us/pricing/details/sql-database/
:param pulumi.Input[str] catalog_server_endpoint: The catalog database server URL.
"""
if catalog_admin_password is not None:
pulumi.set(__self__, "catalog_admin_password", catalog_admin_password)
if catalog_admin_user_name is not None:
pulumi.set(__self__, "catalog_admin_user_name", catalog_admin_user_name)
if catalog_pricing_tier is not None:
pulumi.set(__self__, "catalog_pricing_tier", catalog_pricing_tier)
if catalog_server_endpoint is not None:
pulumi.set(__self__, "catalog_server_endpoint", catalog_server_endpoint)
@property
@pulumi.getter(name="catalogAdminPassword")
def catalog_admin_password(self) -> Optional[pulumi.Input['SecureStringArgs']]:
"""
The password of the administrator user account of the catalog database.
"""
return pulumi.get(self, "catalog_admin_password")
@catalog_admin_password.setter
def catalog_admin_password(self, value: Optional[pulumi.Input['SecureStringArgs']]):
pulumi.set(self, "catalog_admin_password", value)
@property
@pulumi.getter(name="catalogAdminUserName")
def catalog_admin_user_name(self) -> Optional[pulumi.Input[str]]:
"""
The administrator user name of catalog database.
"""
return pulumi.get(self, "catalog_admin_user_name")
@catalog_admin_user_name.setter
def catalog_admin_user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "catalog_admin_user_name", value)
@property
@pulumi.getter(name="catalogPricingTier")
def catalog_pricing_tier(self) -> Optional[pulumi.Input[Union[str, 'IntegrationRuntimeSsisCatalogPricingTier']]]:
"""
The pricing tier for the catalog database. The valid values could be found in https://azure.microsoft.com/en-us/pricing/details/sql-database/
"""
return pulumi.get(self, "catalog_pricing_tier")
@catalog_pricing_tier.setter
def catalog_pricing_tier(self, value: Optional[pulumi.Input[Union[str, 'IntegrationRuntimeSsisCatalogPricingTier']]]):
pulumi.set(self, "catalog_pricing_tier", value)
@property
@pulumi.getter(name="catalogServerEndpoint")
def catalog_server_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The catalog database server URL.
"""
return pulumi.get(self, "catalog_server_endpoint")
@catalog_server_endpoint.setter
def catalog_server_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "catalog_server_endpoint", value)
@pulumi.input_type
class IntegrationRuntimeSsisPropertiesArgs:
def __init__(__self__, *,
catalog_info: Optional[pulumi.Input['IntegrationRuntimeSsisCatalogInfoArgs']] = None,
custom_setup_script_properties: Optional[pulumi.Input['IntegrationRuntimeCustomSetupScriptPropertiesArgs']] = None,
data_proxy_properties: Optional[pulumi.Input['IntegrationRuntimeDataProxyPropertiesArgs']] = None,
edition: Optional[pulumi.Input[Union[str, 'IntegrationRuntimeEdition']]] = None,
express_custom_setup_properties: Optional[pulumi.Input[Sequence[pulumi.Input[Union['CmdkeySetupArgs', 'ComponentSetupArgs', 'EnvironmentVariableSetupArgs']]]]] = None,
license_type: Optional[pulumi.Input[Union[str, 'IntegrationRuntimeLicenseType']]] = None):
"""
SSIS properties for managed integration runtime.
:param pulumi.Input['IntegrationRuntimeSsisCatalogInfoArgs'] catalog_info: Catalog information for managed dedicated integration runtime.
:param pulumi.Input['IntegrationRuntimeCustomSetupScriptPropertiesArgs'] custom_setup_script_properties: Custom setup script properties for a managed dedicated integration runtime.
:param pulumi.Input['IntegrationRuntimeDataProxyPropertiesArgs'] data_proxy_properties: Data proxy properties for a managed dedicated integration runtime.
:param pulumi.Input[Union[str, 'IntegrationRuntimeEdition']] edition: The edition for the SSIS Integration Runtime
:param pulumi.Input[Sequence[pulumi.Input[Union['CmdkeySetupArgs', 'ComponentSetupArgs', 'EnvironmentVariableSetupArgs']]]] express_custom_setup_properties: Custom setup without script properties for a SSIS integration runtime.
:param pulumi.Input[Union[str, 'IntegrationRuntimeLicenseType']] license_type: License type for bringing your own license scenario.
"""
if catalog_info is not None:
pulumi.set(__self__, "catalog_info", catalog_info)
if custom_setup_script_properties is not None:
pulumi.set(__self__, "custom_setup_script_properties", custom_setup_script_properties)
if data_proxy_properties is not None:
pulumi.set(__self__, "data_proxy_properties", data_proxy_properties)
if edition is not None:
pulumi.set(__self__, "edition", edition)
if express_custom_setup_properties is not None:
pulumi.set(__self__, "express_custom_setup_properties", express_custom_setup_properties)
if license_type is not None:
pulumi.set(__self__, "license_type", license_type)
@property
@pulumi.getter(name="catalogInfo")
def catalog_info(self) -> Optional[pulumi.Input['IntegrationRuntimeSsisCatalogInfoArgs']]:
"""
Catalog information for managed dedicated integration runtime.
"""
return pulumi.get(self, "catalog_info")
@catalog_info.setter
def catalog_info(self, value: Optional[pulumi.Input['IntegrationRuntimeSsisCatalogInfoArgs']]):
pulumi.set(self, "catalog_info", value)
@property
@pulumi.getter(name="customSetupScriptProperties")
def custom_setup_script_properties(self) -> Optional[pulumi.Input['IntegrationRuntimeCustomSetupScriptPropertiesArgs']]:
"""
Custom setup script properties for a managed dedicated integration runtime.
"""
return pulumi.get(self, "custom_setup_script_properties")
@custom_setup_script_properties.setter
def custom_setup_script_properties(self, value: Optional[pulumi.Input['IntegrationRuntimeCustomSetupScriptPropertiesArgs']]):
pulumi.set(self, "custom_setup_script_properties", value)
@property
@pulumi.getter(name="dataProxyProperties")
def data_proxy_properties(self) -> Optional[pulumi.Input['IntegrationRuntimeDataProxyPropertiesArgs']]:
"""
Data proxy properties for a managed dedicated integration runtime.
"""
return pulumi.get(self, "data_proxy_properties")
@data_proxy_properties.setter
def data_proxy_properties(self, value: Optional[pulumi.Input['IntegrationRuntimeDataProxyPropertiesArgs']]):
pulumi.set(self, "data_proxy_properties", value)
@property
@pulumi.getter
def edition(self) -> Optional[pulumi.Input[Union[str, 'IntegrationRuntimeEdition']]]:
"""
The edition for the SSIS Integration Runtime
"""
return pulumi.get(self, "edition")
@edition.setter
def edition(self, value: Optional[pulumi.Input[Union[str, 'IntegrationRuntimeEdition']]]):
pulumi.set(self, "edition", value)
@property
@pulumi.getter(name="expressCustomSetupProperties")
def express_custom_setup_properties(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union['CmdkeySetupArgs', 'ComponentSetupArgs', 'EnvironmentVariableSetupArgs']]]]]:
"""
Custom setup without script properties for a SSIS integration runtime.
"""
return pulumi.get(self, "express_custom_setup_properties")
@express_custom_setup_properties.setter
def express_custom_setup_properties(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union['CmdkeySetupArgs', 'ComponentSetupArgs', 'EnvironmentVariableSetupArgs']]]]]):
pulumi.set(self, "express_custom_setup_properties", value)
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[pulumi.Input[Union[str, 'IntegrationRuntimeLicenseType']]]:
"""
License type for bringing your own license scenario.
"""
return pulumi.get(self, "license_type")
@license_type.setter
def license_type(self, value: Optional[pulumi.Input[Union[str, 'IntegrationRuntimeLicenseType']]]):
pulumi.set(self, "license_type", value)
@pulumi.input_type
class IntegrationRuntimeVNetPropertiesArgs:
def __init__(__self__, *,
public_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subnet: Optional[pulumi.Input[str]] = None,
v_net_id: Optional[pulumi.Input[str]] = None):
"""
VNet properties for managed integration runtime.
:param pulumi.Input[Sequence[pulumi.Input[str]]] public_ips: Resource IDs of the public IP addresses that this integration runtime will use.
:param pulumi.Input[str] subnet: The name of the subnet this integration runtime will join.
:param pulumi.Input[str] v_net_id: The ID of the VNet that this integration runtime will join.
"""
if public_ips is not None:
pulumi.set(__self__, "public_ips", public_ips)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if v_net_id is not None:
pulumi.set(__self__, "v_net_id", v_net_id)
@property
@pulumi.getter(name="publicIPs")
def public_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Resource IDs of the public IP addresses that this integration runtime will use.
"""
return pulumi.get(self, "public_ips")
@public_ips.setter
def public_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "public_ips", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input[str]]:
"""
The name of the subnet this integration runtime will join.
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet", value)
@property
@pulumi.getter(name="vNetId")
def v_net_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the VNet that this integration runtime will join.
"""
return pulumi.get(self, "v_net_id")
@v_net_id.setter
def v_net_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "v_net_id", value)
@pulumi.input_type
class LibraryInfoArgs:
def __init__(__self__, *,
container_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
uploaded_timestamp: Optional[pulumi.Input[str]] = None):
"""
Library/package information of a Big Data pool powered by Apache Spark
:param pulumi.Input[str] container_name: Storage blob container name.
:param pulumi.Input[str] name: Name of the library.
:param pulumi.Input[str] path: Storage blob path of library.
:param pulumi.Input[str] type: Type of the library.
:param pulumi.Input[str] uploaded_timestamp: The last update time of the library.
"""
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if name is not None:
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
if type is not None:
pulumi.set(__self__, "type", type)
if uploaded_timestamp is not None:
pulumi.set(__self__, "uploaded_timestamp", uploaded_timestamp)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Storage blob container name.
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the library.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Storage blob path of library.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type of the library.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="uploadedTimestamp")
def uploaded_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
The last update time of the library.
"""
return pulumi.get(self, "uploaded_timestamp")
@uploaded_timestamp.setter
def uploaded_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uploaded_timestamp", value)
@pulumi.input_type
class LibraryRequirementsArgs:
def __init__(__self__, *,
content: Optional[pulumi.Input[str]] = None,
filename: Optional[pulumi.Input[str]] = None):
"""
Library requirements for a Big Data pool powered by Apache Spark
:param pulumi.Input[str] content: The library requirements.
:param pulumi.Input[str] filename: The filename of the library requirements file.
"""
if content is not None:
pulumi.set(__self__, "content", content)
if filename is not None:
pulumi.set(__self__, "filename", filename)
@property
@pulumi.getter
def content(self) -> Optional[pulumi.Input[str]]:
"""
The library requirements.
"""
return pulumi.get(self, "content")
@content.setter
def content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content", value)
@property
@pulumi.getter
def filename(self) -> Optional[pulumi.Input[str]]:
"""
The filename of the library requirements file.
"""
return pulumi.get(self, "filename")
@filename.setter
def filename(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filename", value)
@pulumi.input_type
class LinkedIntegrationRuntimeKeyAuthorizationArgs:
def __init__(__self__, *,
authorization_type: pulumi.Input[str],
key: pulumi.Input['SecureStringArgs']):
"""
The key authorization type integration runtime.
:param pulumi.Input[str] authorization_type: The authorization type for integration runtime sharing.
Expected value is 'Key'.
:param pulumi.Input['SecureStringArgs'] key: The key used for authorization.
"""
pulumi.set(__self__, "authorization_type", 'Key')
pulumi.set(__self__, "key", key)
@property
@pulumi.getter(name="authorizationType")
def authorization_type(self) -> pulumi.Input[str]:
"""
The authorization type for integration runtime sharing.
Expected value is 'Key'.
"""
return pulumi.get(self, "authorization_type")
@authorization_type.setter
def authorization_type(self, value: pulumi.Input[str]):
pulumi.set(self, "authorization_type", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input['SecureStringArgs']:
"""
The key used for authorization.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input['SecureStringArgs']):
pulumi.set(self, "key", value)
@pulumi.input_type
class LinkedIntegrationRuntimeRbacAuthorizationArgs:
def __init__(__self__, *,
authorization_type: pulumi.Input[str],
resource_id: pulumi.Input[str]):
"""
The role based access control (RBAC) authorization type integration runtime.
:param pulumi.Input[str] authorization_type: The authorization type for integration runtime sharing.
Expected value is 'RBAC'.
:param pulumi.Input[str] resource_id: The resource identifier of the integration runtime to be shared.
"""
pulumi.set(__self__, "authorization_type", 'RBAC')
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="authorizationType")
def authorization_type(self) -> pulumi.Input[str]:
"""
The authorization type for integration runtime sharing.
Expected value is 'RBAC'.
"""
return pulumi.get(self, "authorization_type")
@authorization_type.setter
def authorization_type(self, value: pulumi.Input[str]):
pulumi.set(self, "authorization_type", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Input[str]:
"""
The resource identifier of the integration runtime to be shared.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class ManagedIdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input['ResourceIdentityType']] = None):
"""
The workspace managed identity
:param pulumi.Input['ResourceIdentityType'] type: The type of managed identity for the workspace
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['ResourceIdentityType']]:
"""
The type of managed identity for the workspace
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['ResourceIdentityType']]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ManagedIntegrationRuntimeArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
compute_properties: Optional[pulumi.Input['IntegrationRuntimeComputePropertiesArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
managed_virtual_network: Optional[pulumi.Input['ManagedVirtualNetworkReferenceArgs']] = None,
ssis_properties: Optional[pulumi.Input['IntegrationRuntimeSsisPropertiesArgs']] = None):
"""
Managed integration runtime, including managed elastic and managed dedicated integration runtimes.
:param pulumi.Input[str] type: The type of integration runtime.
Expected value is 'Managed'.
:param pulumi.Input['IntegrationRuntimeComputePropertiesArgs'] compute_properties: The compute resource for managed integration runtime.
:param pulumi.Input[str] description: Integration runtime description.
:param pulumi.Input['ManagedVirtualNetworkReferenceArgs'] managed_virtual_network: Managed Virtual Network reference.
:param pulumi.Input['IntegrationRuntimeSsisPropertiesArgs'] ssis_properties: SSIS properties for managed integration runtime.
"""
pulumi.set(__self__, "type", 'Managed')
if compute_properties is not None:
pulumi.set(__self__, "compute_properties", compute_properties)
if description is not None:
pulumi.set(__self__, "description", description)
if managed_virtual_network is not None:
pulumi.set(__self__, "managed_virtual_network", managed_virtual_network)
if ssis_properties is not None:
pulumi.set(__self__, "ssis_properties", ssis_properties)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of integration runtime.
Expected value is 'Managed'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="computeProperties")
def compute_properties(self) -> Optional[pulumi.Input['IntegrationRuntimeComputePropertiesArgs']]:
"""
The compute resource for managed integration runtime.
"""
return pulumi.get(self, "compute_properties")
@compute_properties.setter
def compute_properties(self, value: Optional[pulumi.Input['IntegrationRuntimeComputePropertiesArgs']]):
pulumi.set(self, "compute_properties", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Integration runtime description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="managedVirtualNetwork")
def managed_virtual_network(self) -> Optional[pulumi.Input['ManagedVirtualNetworkReferenceArgs']]:
"""
Managed Virtual Network reference.
"""
return pulumi.get(self, "managed_virtual_network")
@managed_virtual_network.setter
def managed_virtual_network(self, value: Optional[pulumi.Input['ManagedVirtualNetworkReferenceArgs']]):
pulumi.set(self, "managed_virtual_network", value)
@property
@pulumi.getter(name="ssisProperties")
def ssis_properties(self) -> Optional[pulumi.Input['IntegrationRuntimeSsisPropertiesArgs']]:
"""
SSIS properties for managed integration runtime.
"""
return pulumi.get(self, "ssis_properties")
@ssis_properties.setter
def ssis_properties(self, value: Optional[pulumi.Input['IntegrationRuntimeSsisPropertiesArgs']]):
pulumi.set(self, "ssis_properties", value)
@pulumi.input_type
class ManagedVirtualNetworkReferenceArgs:
def __init__(__self__, *,
reference_name: pulumi.Input[str],
type: pulumi.Input[str]):
"""
Managed Virtual Network reference type.
:param pulumi.Input[str] reference_name: Reference ManagedVirtualNetwork name.
:param pulumi.Input[str] type: Managed Virtual Network reference type.
"""
pulumi.set(__self__, "reference_name", reference_name)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="referenceName")
def reference_name(self) -> pulumi.Input[str]:
"""
Reference ManagedVirtualNetwork name.
"""
return pulumi.get(self, "reference_name")
@reference_name.setter
def reference_name(self, value: pulumi.Input[str]):
pulumi.set(self, "reference_name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Managed Virtual Network reference type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ManagedVirtualNetworkSettingsArgs:
def __init__(__self__, *,
allowed_aad_tenant_ids_for_linking: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
linked_access_check_on_target_resource: Optional[pulumi.Input[bool]] = None,
prevent_data_exfiltration: Optional[pulumi.Input[bool]] = None):
"""
Managed Virtual Network Settings
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_aad_tenant_ids_for_linking: Allowed Aad Tenant Ids For Linking
:param pulumi.Input[bool] linked_access_check_on_target_resource: Linked Access Check On Target Resource
:param pulumi.Input[bool] prevent_data_exfiltration: Prevent Data Exfiltration
"""
if allowed_aad_tenant_ids_for_linking is not None:
pulumi.set(__self__, "allowed_aad_tenant_ids_for_linking", allowed_aad_tenant_ids_for_linking)
if linked_access_check_on_target_resource is not None:
pulumi.set(__self__, "linked_access_check_on_target_resource", linked_access_check_on_target_resource)
if prevent_data_exfiltration is not None:
pulumi.set(__self__, "prevent_data_exfiltration", prevent_data_exfiltration)
@property
@pulumi.getter(name="allowedAadTenantIdsForLinking")
def allowed_aad_tenant_ids_for_linking(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Allowed Aad Tenant Ids For Linking
"""
return pulumi.get(self, "allowed_aad_tenant_ids_for_linking")
@allowed_aad_tenant_ids_for_linking.setter
def allowed_aad_tenant_ids_for_linking(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_aad_tenant_ids_for_linking", value)
@property
@pulumi.getter(name="linkedAccessCheckOnTargetResource")
def linked_access_check_on_target_resource(self) -> Optional[pulumi.Input[bool]]:
"""
Linked Access Check On Target Resource
"""
return pulumi.get(self, "linked_access_check_on_target_resource")
@linked_access_check_on_target_resource.setter
def linked_access_check_on_target_resource(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "linked_access_check_on_target_resource", value)
@property
@pulumi.getter(name="preventDataExfiltration")
def prevent_data_exfiltration(self) -> Optional[pulumi.Input[bool]]:
"""
Prevent Data Exfiltration
"""
return pulumi.get(self, "prevent_data_exfiltration")
@prevent_data_exfiltration.setter
def prevent_data_exfiltration(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "prevent_data_exfiltration", value)
@pulumi.input_type
class PrivateEndpointConnectionArgs:
def __init__(__self__, *,
private_link_service_connection_state: Optional[pulumi.Input['PrivateLinkServiceConnectionStateArgs']] = None):
"""
A private endpoint connection
:param pulumi.Input['PrivateLinkServiceConnectionStateArgs'] private_link_service_connection_state: Connection state of the private endpoint connection.
"""
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStateArgs']]:
"""
Connection state of the private endpoint connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: Optional[pulumi.Input['PrivateLinkServiceConnectionStateArgs']]):
pulumi.set(self, "private_link_service_connection_state", value)
@pulumi.input_type
class PrivateLinkServiceConnectionStateArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
Connection state details of the private endpoint
:param pulumi.Input[str] description: The private link service connection description.
:param pulumi.Input[str] status: The private link service connection status.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The private link service connection description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The private link service connection status.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class PurviewConfigurationArgs:
def __init__(__self__, *,
purview_resource_id: Optional[pulumi.Input[str]] = None):
"""
Purview Configuration
:param pulumi.Input[str] purview_resource_id: Purview Resource ID
"""
if purview_resource_id is not None:
pulumi.set(__self__, "purview_resource_id", purview_resource_id)
@property
@pulumi.getter(name="purviewResourceId")
def purview_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
Purview Resource ID
"""
return pulumi.get(self, "purview_resource_id")
@purview_resource_id.setter
def purview_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "purview_resource_id", value)
@pulumi.input_type
class SecureStringArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
value: pulumi.Input[str]):
"""
Azure Synapse secure string definition. The string value will be masked with asterisks '*' during Get or List API calls.
:param pulumi.Input[str] type: Type of the secret.
Expected value is 'SecureString'.
:param pulumi.Input[str] value: Value of secure string.
"""
pulumi.set(__self__, "type", 'SecureString')
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of the secret.
Expected value is 'SecureString'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Value of secure string.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SelfHostedIntegrationRuntimeArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
linked_info: Optional[pulumi.Input[Union['LinkedIntegrationRuntimeKeyAuthorizationArgs', 'LinkedIntegrationRuntimeRbacAuthorizationArgs']]] = None):
"""
Self-hosted integration runtime.
:param pulumi.Input[str] type: The type of integration runtime.
Expected value is 'SelfHosted'.
:param pulumi.Input[str] description: Integration runtime description.
:param pulumi.Input[Union['LinkedIntegrationRuntimeKeyAuthorizationArgs', 'LinkedIntegrationRuntimeRbacAuthorizationArgs']] linked_info: Linked integration runtime type from data factory
"""
pulumi.set(__self__, "type", 'SelfHosted')
if description is not None:
pulumi.set(__self__, "description", description)
if linked_info is not None:
pulumi.set(__self__, "linked_info", linked_info)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of integration runtime.
Expected value is 'SelfHosted'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Integration runtime description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="linkedInfo")
def linked_info(self) -> Optional[pulumi.Input[Union['LinkedIntegrationRuntimeKeyAuthorizationArgs', 'LinkedIntegrationRuntimeRbacAuthorizationArgs']]]:
"""
Linked integration runtime type from data factory
"""
return pulumi.get(self, "linked_info")
@linked_info.setter
def linked_info(self, value: Optional[pulumi.Input[Union['LinkedIntegrationRuntimeKeyAuthorizationArgs', 'LinkedIntegrationRuntimeRbacAuthorizationArgs']]]):
pulumi.set(self, "linked_info", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
capacity: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
tier: Optional[pulumi.Input[str]] = None):
"""
SQL pool SKU
:param pulumi.Input[int] capacity: If the SKU supports scale out/in then the capacity integer should be included. If scale out/in is not possible for the resource this may be omitted.
:param pulumi.Input[str] name: The SKU name
:param pulumi.Input[str] tier: The service tier
"""
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if name is not None:
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[int]]:
"""
If the SKU supports scale out/in then the capacity integer should be included. If scale out/in is not possible for the resource this may be omitted.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The SKU name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input[str]]:
"""
The service tier
"""
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tier", value)
@pulumi.input_type
class SqlPoolVulnerabilityAssessmentRuleBaselineItemArgs:
def __init__(__self__, *,
result: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
Properties for an Sql pool vulnerability assessment rule baseline's result.
:param pulumi.Input[Sequence[pulumi.Input[str]]] result: The rule baseline result
"""
pulumi.set(__self__, "result", result)
@property
@pulumi.getter
def result(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The rule baseline result
"""
return pulumi.get(self, "result")
@result.setter
def result(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "result", value)
@pulumi.input_type
class VirtualNetworkProfileArgs:
def __init__(__self__, *,
compute_subnet_id: Optional[pulumi.Input[str]] = None):
"""
Virtual Network Profile
:param pulumi.Input[str] compute_subnet_id: Subnet ID used for computes in workspace
"""
if compute_subnet_id is not None:
pulumi.set(__self__, "compute_subnet_id", compute_subnet_id)
@property
@pulumi.getter(name="computeSubnetId")
def compute_subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
Subnet ID used for computes in workspace
"""
return pulumi.get(self, "compute_subnet_id")
@compute_subnet_id.setter
def compute_subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_subnet_id", value)
@pulumi.input_type
class VulnerabilityAssessmentRecurringScansPropertiesArgs:
def __init__(__self__, *,
email_subscription_admins: Optional[pulumi.Input[bool]] = None,
emails: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None):
"""
Properties of a Vulnerability Assessment recurring scans.
:param pulumi.Input[bool] email_subscription_admins: Specifies that the schedule scan notification will be is sent to the subscription administrators.
:param pulumi.Input[Sequence[pulumi.Input[str]]] emails: Specifies an array of e-mail addresses to which the scan notification is sent.
:param pulumi.Input[bool] is_enabled: Recurring scans state.
"""
if email_subscription_admins is None:
email_subscription_admins = True
if email_subscription_admins is not None:
pulumi.set(__self__, "email_subscription_admins", email_subscription_admins)
if emails is not None:
pulumi.set(__self__, "emails", emails)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
@property
@pulumi.getter(name="emailSubscriptionAdmins")
def email_subscription_admins(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies that the schedule scan notification will be is sent to the subscription administrators.
"""
return pulumi.get(self, "email_subscription_admins")
@email_subscription_admins.setter
def email_subscription_admins(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "email_subscription_admins", value)
@property
@pulumi.getter
def emails(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies an array of e-mail addresses to which the scan notification is sent.
"""
return pulumi.get(self, "emails")
@emails.setter
def emails(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "emails", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Recurring scans state.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@pulumi.input_type
class WorkspaceKeyDetailsArgs:
def __init__(__self__, *,
key_vault_url: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
Details of the customer managed key associated with the workspace
:param pulumi.Input[str] key_vault_url: Workspace Key sub-resource key vault url
:param pulumi.Input[str] name: Workspace Key sub-resource name
"""
if key_vault_url is not None:
pulumi.set(__self__, "key_vault_url", key_vault_url)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="keyVaultUrl")
def key_vault_url(self) -> Optional[pulumi.Input[str]]:
"""
Workspace Key sub-resource key vault url
"""
return pulumi.get(self, "key_vault_url")
@key_vault_url.setter
def key_vault_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_vault_url", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Workspace Key sub-resource name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class WorkspaceRepositoryConfigurationArgs:
def __init__(__self__, *,
account_name: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input['GitHubClientSecretArgs']] = None,
collaboration_branch: Optional[pulumi.Input[str]] = None,
host_name: Optional[pulumi.Input[str]] = None,
last_commit_id: Optional[pulumi.Input[str]] = None,
project_name: Optional[pulumi.Input[str]] = None,
repository_name: Optional[pulumi.Input[str]] = None,
root_folder: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Git integration settings
:param pulumi.Input[str] account_name: Account name
:param pulumi.Input[str] client_id: GitHub bring your own app client id
:param pulumi.Input['GitHubClientSecretArgs'] client_secret: GitHub bring your own app client secret information.
:param pulumi.Input[str] collaboration_branch: Collaboration branch
:param pulumi.Input[str] host_name: GitHub Enterprise host name. For example: https://github.mydomain.com
:param pulumi.Input[str] last_commit_id: The last commit ID
:param pulumi.Input[str] project_name: VSTS project name
:param pulumi.Input[str] repository_name: Repository name
:param pulumi.Input[str] root_folder: Root folder to use in the repository
:param pulumi.Input[str] tenant_id: The VSTS tenant ID
:param pulumi.Input[str] type: Type of workspace repositoryID configuration. Example WorkspaceVSTSConfiguration, WorkspaceGitHubConfiguration
"""
if account_name is not None:
pulumi.set(__self__, "account_name", account_name)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if collaboration_branch is not None:
pulumi.set(__self__, "collaboration_branch", collaboration_branch)
if host_name is not None:
pulumi.set(__self__, "host_name", host_name)
if last_commit_id is not None:
pulumi.set(__self__, "last_commit_id", last_commit_id)
if project_name is not None:
pulumi.set(__self__, "project_name", project_name)
if repository_name is not None:
pulumi.set(__self__, "repository_name", repository_name)
if root_folder is not None:
pulumi.set(__self__, "root_folder", root_folder)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> Optional[pulumi.Input[str]]:
"""
Account name
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
GitHub bring your own app client id
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input['GitHubClientSecretArgs']]:
"""
GitHub bring your own app client secret information.
"""
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input['GitHubClientSecretArgs']]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="collaborationBranch")
def collaboration_branch(self) -> Optional[pulumi.Input[str]]:
"""
Collaboration branch
"""
return pulumi.get(self, "collaboration_branch")
@collaboration_branch.setter
def collaboration_branch(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "collaboration_branch", value)
@property
@pulumi.getter(name="hostName")
def host_name(self) -> Optional[pulumi.Input[str]]:
"""
GitHub Enterprise host name. For example: https://github.mydomain.com
"""
return pulumi.get(self, "host_name")
@host_name.setter
def host_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_name", value)
@property
@pulumi.getter(name="lastCommitId")
def last_commit_id(self) -> Optional[pulumi.Input[str]]:
"""
The last commit ID
"""
return pulumi.get(self, "last_commit_id")
@last_commit_id.setter
def last_commit_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_commit_id", value)
@property
@pulumi.getter(name="projectName")
def project_name(self) -> Optional[pulumi.Input[str]]:
"""
VSTS project name
"""
return pulumi.get(self, "project_name")
@project_name.setter
def project_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_name", value)
@property
@pulumi.getter(name="repositoryName")
def repository_name(self) -> Optional[pulumi.Input[str]]:
"""
Repository name
"""
return pulumi.get(self, "repository_name")
@repository_name.setter
def repository_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repository_name", value)
@property
@pulumi.getter(name="rootFolder")
def root_folder(self) -> Optional[pulumi.Input[str]]:
"""
Root folder to use in the repository
"""
return pulumi.get(self, "root_folder")
@root_folder.setter
def root_folder(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "root_folder", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The VSTS tenant ID
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type of workspace repositoryID configuration. Example WorkspaceVSTSConfiguration, WorkspaceGitHubConfiguration
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value) |
@data_flow_properties.setter |
scrape_mars.py |
# Dependencies
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import time
import pandas as pd
from pprint import pprint
from urllib.parse import urlsplit
import pymongo
# Initialize PyMongo to work with MongoDBs
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
# Define database and collection
db = client.mars_db
collection = db.items
def init_browser():
# capture path to chrome driver
executable_path = {'executable_path': ChromeDriverManager().install()}
return Browser('chrome', **executable_path, headless=False)
def scrape_info():
| browser = init_browser()
mars_info = {}
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# scrape latest news headline and para
news_title=soup.find('ul', class_='item_list').\
find('li', class_='slide').\
find('div', class_= 'content_title').text
news_para=soup.find("div", class_='article_teaser_body').text
mars_info['news_title'] = news_title
mars_info['news_para'] = news_para
# Featured image
featured_image = "https://www.nasa.gov/image-feature/jpl/perseverance-s-first-full-color-look-at-mars"
browser.visit(featured_image)
base_url = "{0.scheme}://{0.netloc}/".format(urlsplit(featured_image))
# click on featured image using xpath
xpath = '//*[@id="468477"]/div[2]/div[2]/a/img'
results = browser.find_by_xpath(xpath)
img = results[0]
img.click()
time.sleep(1)
#get image url using BeautifulSoup
html_image = browser.html
soup = BeautifulSoup(html_image, "html.parser")
featured_img_url = soup.find('img')['src']
mars_info['featured_img_url'] = featured_img_url
# Mars Facts
url_facts = "https://space-facts.com/mars/"
table = pd.read_html(url_facts)
table[0]
df_mars_facts = table[0]
df_mars_facts.columns = ["Parameter", "Values"]
fact_table = df_mars_facts.set_index(["Parameter"])
mars_html_table = fact_table.to_html()
mars_html_table = mars_html_table.replace("\n", "")
mars_info['mars_facts_table'] = mars_html_table
# Mars Hemisphere
hemisphere_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemisphere_url)
#Get base url
hemisphere_base_url = "{0.scheme}://{0.netloc}/".format(urlsplit(hemisphere_url))
# list of xpaths for mars hemispheres
xpaths = ['//*[@id="product-section"]/div[2]/div[1]/a/img', '//*[@id="product-section"]/div[2]/div[2]/a/img', '//*[@id="product-section"]/div[2]/div[3]/a/img', '//*[@id="product-section"]/div[2]/div[4]/a/img']
hemisphere_img_urls = []
for xpath in xpaths:
hemisphere_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemisphere_url)
results = browser.find_by_xpath(xpath)
img = results[0]
img.click()
time.sleep(1)
#get image url using BeautifulSoup
html_image = browser.html
soup = BeautifulSoup(html_image, "html.parser")
img_url = soup.find("img", class_='wide-image')["src"]
time.sleep(1)
img_url = hemisphere_base_url + img_url
title = soup.find("h2",class_="title").text
hemisphere_img_urls.append({'title': title, 'image_url':img_url})
mars_info['hemisphere_img_urls'] = hemisphere_img_urls
browser.quit()
# collection.insert_one(mars_info)
return mars_info |
|
bellerophon_radix_tests.rs | #![cfg(feature = "radix")]
mod bellerophon;
use bellerophon::bellerophon_test;
use lexical_util::format::NumberFormatBuilder;
const BASE3: u128 = NumberFormatBuilder::from_radix(3);
#[test]
fn | () {
// Checking the exact rounding of the digits close to 5e-324.
bellerophon_test::<f64, { BASE3 }>(5, -640, false, 4172256988254845, 10);
bellerophon_test::<f64, { BASE3 }>(2, -679, false, 0, 0);
bellerophon_test::<f64, { BASE3 }>(3, -679, false, 1, 0);
bellerophon_test::<f64, { BASE3 }>(6, -680, false, 0, 0);
bellerophon_test::<f64, { BASE3 }>(7, -680, false, 1, 0);
bellerophon_test::<f64, { BASE3 }>(20, -681, false, 0, 0);
bellerophon_test::<f64, { BASE3 }>(21, -681, false, 1, 0);
bellerophon_test::<f64, { BASE3 }>(61, -682, false, 0, 0);
bellerophon_test::<f64, { BASE3 }>(62, -682, false, 1, 0);
bellerophon_test::<f64, { BASE3 }>(184, -683, false, 0, 0);
bellerophon_test::<f64, { BASE3 }>(185, -683, false, 1, 0);
bellerophon_test::<f64, { BASE3 }>(554, -684, false, 0, 0);
bellerophon_test::<f64, { BASE3 }>(555, -684, false, 1, 0);
}
| bellerophon_radix_test |
configmaps.go | package configs
import (
"strings"
"github.com/golang/glog"
v1 "k8s.io/api/core/v1"
"github.com/nginxinc/kubernetes-ingress/internal/configs/version1"
)
// ParseConfigMap parses ConfigMap into ConfigParams.
func ParseConfigMap(cfgm *v1.ConfigMap, nginxPlus bool, hasAppProtect bool, hasAppProtectDos bool) *ConfigParams {
cfgParams := NewDefaultConfigParams(nginxPlus)
if serverTokens, exists, err := GetMapKeyAsBool(cfgm.Data, "server-tokens", cfgm); exists {
if err != nil {
if nginxPlus {
cfgParams.ServerTokens = cfgm.Data["server-tokens"]
} else {
glog.Error(err)
}
} else {
cfgParams.ServerTokens = "off"
if serverTokens {
cfgParams.ServerTokens = "on"
}
}
}
if lbMethod, exists := cfgm.Data["lb-method"]; exists {
if nginxPlus { | } else {
cfgParams.LBMethod = parsedMethod
}
} else {
if parsedMethod, err := ParseLBMethod(lbMethod); err != nil {
glog.Errorf("Configmap %s/%s: Invalid value for the lb-method key: got %q: %v", cfgm.GetNamespace(), cfgm.GetName(), lbMethod, err)
} else {
cfgParams.LBMethod = parsedMethod
}
}
}
if proxyConnectTimeout, exists := cfgm.Data["proxy-connect-timeout"]; exists {
cfgParams.ProxyConnectTimeout = proxyConnectTimeout
}
if proxyReadTimeout, exists := cfgm.Data["proxy-read-timeout"]; exists {
cfgParams.ProxyReadTimeout = proxyReadTimeout
}
if proxySendTimeout, exists := cfgm.Data["proxy-send-timeout"]; exists {
cfgParams.ProxySendTimeout = proxySendTimeout
}
if proxyHideHeaders, exists, err := GetMapKeyAsStringSlice(cfgm.Data, "proxy-hide-headers", cfgm, ","); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.ProxyHideHeaders = proxyHideHeaders
}
}
if proxyPassHeaders, exists, err := GetMapKeyAsStringSlice(cfgm.Data, "proxy-pass-headers", cfgm, ","); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.ProxyPassHeaders = proxyPassHeaders
}
}
if clientMaxBodySize, exists := cfgm.Data["client-max-body-size"]; exists {
cfgParams.ClientMaxBodySize = clientMaxBodySize
}
if serverNamesHashBucketSize, exists := cfgm.Data["server-names-hash-bucket-size"]; exists {
cfgParams.MainServerNamesHashBucketSize = serverNamesHashBucketSize
}
if serverNamesHashMaxSize, exists := cfgm.Data["server-names-hash-max-size"]; exists {
cfgParams.MainServerNamesHashMaxSize = serverNamesHashMaxSize
}
if HTTP2, exists, err := GetMapKeyAsBool(cfgm.Data, "http2", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.HTTP2 = HTTP2
}
}
if redirectToHTTPS, exists, err := GetMapKeyAsBool(cfgm.Data, "redirect-to-https", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.RedirectToHTTPS = redirectToHTTPS
}
}
if sslRedirect, exists, err := GetMapKeyAsBool(cfgm.Data, "ssl-redirect", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.SSLRedirect = sslRedirect
}
}
if hsts, exists, err := GetMapKeyAsBool(cfgm.Data, "hsts", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
parsingErrors := false
hstsMaxAge, existsMA, err := GetMapKeyAsInt64(cfgm.Data, "hsts-max-age", cfgm)
if existsMA && err != nil {
glog.Error(err)
parsingErrors = true
}
hstsIncludeSubdomains, existsIS, err := GetMapKeyAsBool(cfgm.Data, "hsts-include-subdomains", cfgm)
if existsIS && err != nil {
glog.Error(err)
parsingErrors = true
}
hstsBehindProxy, existsBP, err := GetMapKeyAsBool(cfgm.Data, "hsts-behind-proxy", cfgm)
if existsBP && err != nil {
glog.Error(err)
parsingErrors = true
}
if parsingErrors {
glog.Errorf("Configmap %s/%s: There are configuration issues with hsts annotations, skipping options for all hsts settings", cfgm.GetNamespace(), cfgm.GetName())
} else {
cfgParams.HSTS = hsts
if existsMA {
cfgParams.HSTSMaxAge = hstsMaxAge
}
if existsIS {
cfgParams.HSTSIncludeSubdomains = hstsIncludeSubdomains
}
if existsBP {
cfgParams.HSTSBehindProxy = hstsBehindProxy
}
}
}
}
if proxyProtocol, exists, err := GetMapKeyAsBool(cfgm.Data, "proxy-protocol", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.ProxyProtocol = proxyProtocol
}
}
if realIPHeader, exists := cfgm.Data["real-ip-header"]; exists {
cfgParams.RealIPHeader = realIPHeader
}
if setRealIPFrom, exists, err := GetMapKeyAsStringSlice(cfgm.Data, "set-real-ip-from", cfgm, ","); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.SetRealIPFrom = setRealIPFrom
}
}
if realIPRecursive, exists, err := GetMapKeyAsBool(cfgm.Data, "real-ip-recursive", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.RealIPRecursive = realIPRecursive
}
}
if sslProtocols, exists := cfgm.Data["ssl-protocols"]; exists {
cfgParams.MainServerSSLProtocols = sslProtocols
}
if sslPreferServerCiphers, exists, err := GetMapKeyAsBool(cfgm.Data, "ssl-prefer-server-ciphers", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.MainServerSSLPreferServerCiphers = sslPreferServerCiphers
}
}
if sslCiphers, exists := cfgm.Data["ssl-ciphers"]; exists {
cfgParams.MainServerSSLCiphers = strings.Trim(sslCiphers, "\n")
}
if sslDHParamFile, exists := cfgm.Data["ssl-dhparam-file"]; exists {
sslDHParamFile = strings.Trim(sslDHParamFile, "\n")
cfgParams.MainServerSSLDHParamFileContent = &sslDHParamFile
}
if errorLogLevel, exists := cfgm.Data["error-log-level"]; exists {
cfgParams.MainErrorLogLevel = errorLogLevel
}
if accessLogOff, exists, err := GetMapKeyAsBool(cfgm.Data, "access-log-off", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.MainAccessLogOff = accessLogOff
}
}
if logFormat, exists, err := GetMapKeyAsStringSlice(cfgm.Data, "log-format", cfgm, "\n"); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.MainLogFormat = logFormat
}
}
if logFormatEscaping, exists := cfgm.Data["log-format-escaping"]; exists {
logFormatEscaping = strings.TrimSpace(logFormatEscaping)
if logFormatEscaping != "" {
cfgParams.MainLogFormatEscaping = logFormatEscaping
}
}
if streamLogFormat, exists, err := GetMapKeyAsStringSlice(cfgm.Data, "stream-log-format", cfgm, "\n"); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.MainStreamLogFormat = streamLogFormat
}
}
if streamLogFormatEscaping, exists := cfgm.Data["stream-log-format-escaping"]; exists {
streamLogFormatEscaping = strings.TrimSpace(streamLogFormatEscaping)
if streamLogFormatEscaping != "" {
cfgParams.MainStreamLogFormatEscaping = streamLogFormatEscaping
}
}
if defaultServerAccessLogOff, exists, err := GetMapKeyAsBool(cfgm.Data, "default-server-access-log-off", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.DefaultServerAccessLogOff = defaultServerAccessLogOff
}
}
if defaultServerReturn, exists := cfgm.Data["default-server-return"]; exists {
cfgParams.DefaultServerReturn = defaultServerReturn
}
if proxyBuffering, exists, err := GetMapKeyAsBool(cfgm.Data, "proxy-buffering", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.ProxyBuffering = proxyBuffering
}
}
if proxyBuffers, exists := cfgm.Data["proxy-buffers"]; exists {
cfgParams.ProxyBuffers = proxyBuffers
}
if proxyBufferSize, exists := cfgm.Data["proxy-buffer-size"]; exists {
cfgParams.ProxyBufferSize = proxyBufferSize
}
if proxyMaxTempFileSize, exists := cfgm.Data["proxy-max-temp-file-size"]; exists {
cfgParams.ProxyMaxTempFileSize = proxyMaxTempFileSize
}
if mainMainSnippets, exists, err := GetMapKeyAsStringSlice(cfgm.Data, "main-snippets", cfgm, "\n"); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.MainMainSnippets = mainMainSnippets
}
}
if mainHTTPSnippets, exists, err := GetMapKeyAsStringSlice(cfgm.Data, "http-snippets", cfgm, "\n"); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.MainHTTPSnippets = mainHTTPSnippets
}
}
if locationSnippets, exists, err := GetMapKeyAsStringSlice(cfgm.Data, "location-snippets", cfgm, "\n"); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.LocationSnippets = locationSnippets
}
}
if serverSnippets, exists, err := GetMapKeyAsStringSlice(cfgm.Data, "server-snippets", cfgm, "\n"); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.ServerSnippets = serverSnippets
}
}
if _, exists, err := GetMapKeyAsInt(cfgm.Data, "worker-processes", cfgm); exists {
if err != nil && cfgm.Data["worker-processes"] != "auto" {
glog.Errorf("Configmap %s/%s: Invalid value for worker-processes key: must be an integer or the string 'auto', got %q", cfgm.GetNamespace(), cfgm.GetName(), cfgm.Data["worker-processes"])
} else {
cfgParams.MainWorkerProcesses = cfgm.Data["worker-processes"]
}
}
if workerCPUAffinity, exists := cfgm.Data["worker-cpu-affinity"]; exists {
cfgParams.MainWorkerCPUAffinity = workerCPUAffinity
}
if workerShutdownTimeout, exists := cfgm.Data["worker-shutdown-timeout"]; exists {
cfgParams.MainWorkerShutdownTimeout = workerShutdownTimeout
}
if workerConnections, exists := cfgm.Data["worker-connections"]; exists {
cfgParams.MainWorkerConnections = workerConnections
}
if workerRlimitNofile, exists := cfgm.Data["worker-rlimit-nofile"]; exists {
cfgParams.MainWorkerRlimitNofile = workerRlimitNofile
}
if keepalive, exists, err := GetMapKeyAsInt(cfgm.Data, "keepalive", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.Keepalive = keepalive
}
}
if maxFails, exists, err := GetMapKeyAsInt(cfgm.Data, "max-fails", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.MaxFails = maxFails
}
}
if upstreamZoneSize, exists := cfgm.Data["upstream-zone-size"]; exists {
cfgParams.UpstreamZoneSize = upstreamZoneSize
}
if failTimeout, exists := cfgm.Data["fail-timeout"]; exists {
cfgParams.FailTimeout = failTimeout
}
if mainTemplate, exists := cfgm.Data["main-template"]; exists {
cfgParams.MainTemplate = &mainTemplate
}
if ingressTemplate, exists := cfgm.Data["ingress-template"]; exists {
cfgParams.IngressTemplate = &ingressTemplate
}
if virtualServerTemplate, exists := cfgm.Data["virtualserver-template"]; exists {
cfgParams.VirtualServerTemplate = &virtualServerTemplate
}
if mainStreamSnippets, exists, err := GetMapKeyAsStringSlice(cfgm.Data, "stream-snippets", cfgm, "\n"); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.MainStreamSnippets = mainStreamSnippets
}
}
if resolverAddresses, exists, err := GetMapKeyAsStringSlice(cfgm.Data, "resolver-addresses", cfgm, ","); exists {
if err != nil {
glog.Error(err)
} else {
if nginxPlus {
cfgParams.ResolverAddresses = resolverAddresses
} else {
glog.Warning("ConfigMap key 'resolver-addresses' requires NGINX Plus")
}
}
}
if resolverIpv6, exists, err := GetMapKeyAsBool(cfgm.Data, "resolver-ipv6", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
if nginxPlus {
cfgParams.ResolverIPV6 = resolverIpv6
} else {
glog.Warning("ConfigMap key 'resolver-ipv6' requires NGINX Plus")
}
}
}
if resolverValid, exists := cfgm.Data["resolver-valid"]; exists {
if nginxPlus {
cfgParams.ResolverValid = resolverValid
} else {
glog.Warning("ConfigMap key 'resolver-valid' requires NGINX Plus")
}
}
if resolverTimeout, exists := cfgm.Data["resolver-timeout"]; exists {
if nginxPlus {
cfgParams.ResolverTimeout = resolverTimeout
} else {
glog.Warning("ConfigMap key 'resolver-timeout' requires NGINX Plus")
}
}
if keepaliveTimeout, exists := cfgm.Data["keepalive-timeout"]; exists {
cfgParams.MainKeepaliveTimeout = keepaliveTimeout
}
if keepaliveRequests, exists, err := GetMapKeyAsInt64(cfgm.Data, "keepalive-requests", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.MainKeepaliveRequests = keepaliveRequests
}
}
if varHashBucketSize, exists, err := GetMapKeyAsUint64(cfgm.Data, "variables-hash-bucket-size", cfgm, true); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.VariablesHashBucketSize = varHashBucketSize
}
}
if varHashMaxSize, exists, err := GetMapKeyAsUint64(cfgm.Data, "variables-hash-max-size", cfgm, false); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.VariablesHashMaxSize = varHashMaxSize
}
}
if openTracingTracer, exists := cfgm.Data["opentracing-tracer"]; exists {
cfgParams.MainOpenTracingTracer = openTracingTracer
}
if openTracingTracerConfig, exists := cfgm.Data["opentracing-tracer-config"]; exists {
cfgParams.MainOpenTracingTracerConfig = openTracingTracerConfig
}
if cfgParams.MainOpenTracingTracer != "" || cfgParams.MainOpenTracingTracerConfig != "" {
cfgParams.MainOpenTracingLoadModule = true
}
if openTracing, exists, err := GetMapKeyAsBool(cfgm.Data, "opentracing", cfgm); exists {
if err != nil {
glog.Error(err)
} else {
if cfgParams.MainOpenTracingLoadModule {
cfgParams.MainOpenTracingEnabled = openTracing
} else {
glog.Error("ConfigMap Key 'opentracing' requires both 'opentracing-tracer' and 'opentracing-tracer-config' Keys configured, Opentracing will be disabled")
}
}
}
if hasAppProtect {
if appProtectFailureModeAction, exists := cfgm.Data["app-protect-failure-mode-action"]; exists {
if appProtectFailureModeAction == "pass" || appProtectFailureModeAction == "drop" {
cfgParams.MainAppProtectFailureModeAction = appProtectFailureModeAction
} else {
glog.Error("ConfigMap Key 'app-protect-failure-mode-action' must have value 'pass' or 'drop'. Ignoring.")
}
}
if appProtectCompressedRequestsAction, exists := cfgm.Data["app-protect-compressed-requests-action"]; exists {
if appProtectCompressedRequestsAction == "pass" || appProtectCompressedRequestsAction == "drop" {
cfgParams.MainAppProtectCompressedRequestsAction = appProtectCompressedRequestsAction
} else {
glog.Error("ConfigMap Key 'app-protect-compressed-requests-action' must have value 'pass' or 'drop'. Ignoring.")
}
}
if appProtectCookieSeed, exists := cfgm.Data["app-protect-cookie-seed"]; exists {
cfgParams.MainAppProtectCookieSeed = appProtectCookieSeed
}
if appProtectCPUThresholds, exists := cfgm.Data["app-protect-cpu-thresholds"]; exists {
if VerifyAppProtectThresholds(appProtectCPUThresholds) {
cfgParams.MainAppProtectCPUThresholds = appProtectCPUThresholds
} else {
glog.Error("ConfigMap Key 'app-protect-cpu-thresholds' must follow pattern: 'high=<0 - 100> low=<0 - 100>'. Ignoring.")
}
}
if appProtectPhysicalMemoryThresholds, exists := cfgm.Data["app-protect-physical-memory-util-thresholds"]; exists {
cfgParams.MainAppProtectPhysicalMemoryThresholds = appProtectPhysicalMemoryThresholds
if VerifyAppProtectThresholds(appProtectPhysicalMemoryThresholds) {
cfgParams.MainAppProtectPhysicalMemoryThresholds = appProtectPhysicalMemoryThresholds
} else {
glog.Error("ConfigMap Key 'app-protect-physical-memory-thresholds' must follow pattern: 'high=<0 - 100> low=<0 - 100>'. Ignoring.")
}
}
if appProtectReconnectPeriod, exists := cfgm.Data["app-protect-reconnect-period-seconds"]; exists {
period, err := ParseFloat64(appProtectReconnectPeriod)
if err == nil && period > 0 && period <= 60 {
cfgParams.MainAppProtectReconnectPeriod = appProtectReconnectPeriod
} else {
glog.Error("ConfigMap Key 'app-protect-reconnect-period-second' must have value between '0' and '60'. '0' is illegal. Ignoring.")
}
}
}
if hasAppProtectDos {
if appProtectDosLogFormat, exists, err := GetMapKeyAsStringSlice(cfgm.Data, "app-protect-dos-log-format", cfgm, "\n"); exists {
if err != nil {
glog.Error(err)
} else {
cfgParams.MainAppProtectDosLogFormat = appProtectDosLogFormat
}
}
if appProtectDosLogFormatEscaping, exists := cfgm.Data["app-protect-dos-log-format-escaping"]; exists {
appProtectDosLogFormatEscaping = strings.TrimSpace(appProtectDosLogFormatEscaping)
if appProtectDosLogFormatEscaping != "" {
cfgParams.MainAppProtectDosLogFormatEscaping = appProtectDosLogFormatEscaping
}
}
}
return cfgParams
}
// GenerateNginxMainConfig generates MainConfig.
func GenerateNginxMainConfig(staticCfgParams *StaticConfigParams, config *ConfigParams) *version1.MainConfig {
nginxCfg := &version1.MainConfig{
AccessLogOff: config.MainAccessLogOff,
DefaultServerAccessLogOff: config.DefaultServerAccessLogOff,
DefaultServerReturn: config.DefaultServerReturn,
ErrorLogLevel: config.MainErrorLogLevel,
HealthStatus: staticCfgParams.HealthStatus,
HealthStatusURI: staticCfgParams.HealthStatusURI,
HTTP2: config.HTTP2,
HTTPSnippets: config.MainHTTPSnippets,
KeepaliveRequests: config.MainKeepaliveRequests,
KeepaliveTimeout: config.MainKeepaliveTimeout,
LogFormat: config.MainLogFormat,
LogFormatEscaping: config.MainLogFormatEscaping,
MainSnippets: config.MainMainSnippets,
NginxStatus: staticCfgParams.NginxStatus,
NginxStatusAllowCIDRs: staticCfgParams.NginxStatusAllowCIDRs,
NginxStatusPort: staticCfgParams.NginxStatusPort,
OpenTracingEnabled: config.MainOpenTracingEnabled,
OpenTracingLoadModule: config.MainOpenTracingLoadModule,
OpenTracingTracer: config.MainOpenTracingTracer,
OpenTracingTracerConfig: config.MainOpenTracingTracerConfig,
ProxyProtocol: config.ProxyProtocol,
ResolverAddresses: config.ResolverAddresses,
ResolverIPV6: config.ResolverIPV6,
ResolverTimeout: config.ResolverTimeout,
ResolverValid: config.ResolverValid,
RealIPHeader: config.RealIPHeader,
RealIPRecursive: config.RealIPRecursive,
SetRealIPFrom: config.SetRealIPFrom,
ServerNamesHashBucketSize: config.MainServerNamesHashBucketSize,
ServerNamesHashMaxSize: config.MainServerNamesHashMaxSize,
ServerTokens: config.ServerTokens,
SSLCiphers: config.MainServerSSLCiphers,
SSLDHParam: config.MainServerSSLDHParam,
SSLPreferServerCiphers: config.MainServerSSLPreferServerCiphers,
SSLProtocols: config.MainServerSSLProtocols,
SSLRejectHandshake: staticCfgParams.SSLRejectHandshake,
TLSPassthrough: staticCfgParams.TLSPassthrough,
StreamLogFormat: config.MainStreamLogFormat,
StreamLogFormatEscaping: config.MainStreamLogFormatEscaping,
StreamSnippets: config.MainStreamSnippets,
StubStatusOverUnixSocketForOSS: staticCfgParams.StubStatusOverUnixSocketForOSS,
WorkerCPUAffinity: config.MainWorkerCPUAffinity,
WorkerProcesses: config.MainWorkerProcesses,
WorkerShutdownTimeout: config.MainWorkerShutdownTimeout,
WorkerConnections: config.MainWorkerConnections,
WorkerRlimitNofile: config.MainWorkerRlimitNofile,
VariablesHashBucketSize: config.VariablesHashBucketSize,
VariablesHashMaxSize: config.VariablesHashMaxSize,
AppProtectLoadModule: staticCfgParams.MainAppProtectLoadModule,
AppProtectDosLoadModule: staticCfgParams.MainAppProtectDosLoadModule,
AppProtectFailureModeAction: config.MainAppProtectFailureModeAction,
AppProtectCompressedRequestsAction: config.MainAppProtectCompressedRequestsAction,
AppProtectCookieSeed: config.MainAppProtectCookieSeed,
AppProtectCPUThresholds: config.MainAppProtectCPUThresholds,
AppProtectPhysicalMemoryThresholds: config.MainAppProtectPhysicalMemoryThresholds,
AppProtectReconnectPeriod: config.MainAppProtectReconnectPeriod,
AppProtectDosLogFormat: config.MainAppProtectDosLogFormat,
AppProtectDosLogFormatEscaping: config.MainAppProtectDosLogFormatEscaping,
InternalRouteServer: staticCfgParams.EnableInternalRoutes,
InternalRouteServerName: staticCfgParams.PodName,
LatencyMetrics: staticCfgParams.EnableLatencyMetrics,
PreviewPolicies: staticCfgParams.EnablePreviewPolicies,
}
return nginxCfg
} | if parsedMethod, err := ParseLBMethodForPlus(lbMethod); err != nil {
glog.Errorf("Configmap %s/%s: Invalid value for the lb-method key: got %q: %v", cfgm.GetNamespace(), cfgm.GetName(), lbMethod, err) |
avg.rs | use crate::math::reducers::{reducer_for, Reduce};
use crate::math::utils::run_with_function;
use nu_protocol::ast::Call;
use nu_protocol::engine::{Command, EngineState, Stack};
use nu_protocol::{Example, PipelineData, ShellError, Signature, Span, Value};
#[derive(Clone)]
pub struct SubCommand;
impl Command for SubCommand {
fn name(&self) -> &str {
"math avg"
}
fn signature(&self) -> Signature {
Signature::build("math avg")
}
fn usage(&self) -> &str {
"Finds the average of a list of numbers or tables"
}
fn run(
&self,
_engine_state: &EngineState,
_stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<nu_protocol::PipelineData, nu_protocol::ShellError> {
run_with_function(call, input, average)
}
fn examples(&self) -> Vec<Example> {
vec![Example {
description: "Get the average of a list of numbers",
example: "[-50 100.0 25] | math avg",
result: Some(Value::Float {
val: 25.0,
span: Span::unknown(),
}),
}]
}
}
pub fn average(values: &[Value], head: &Span) -> Result<Value, ShellError> {
let sum = reducer_for(Reduce::Summation);
let total = &sum(
Value::Int {
val: 0,
span: Span::unknown(),
},
values.to_vec(),
*head,
)?;
match total {
Value::Filesize { val, span } => Ok(Value::Filesize {
val: val / values.len() as i64,
span: *span,
}),
Value::Duration { val, span } => Ok(Value::Duration { | _ => total.div(
*head,
&Value::Int {
val: values.len() as i64,
span: *head,
},
),
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_examples() {
use crate::test_examples;
test_examples(SubCommand {})
}
} | val: val / values.len() as i64,
span: *span,
}), |
Help.test.tsx | import React from 'react';
import { render, screen } from '@testing-library/react';
import {Help} from './Help';
| describe('Help', () => {
it('renders the given children', () => {
const content = 'Testing';
render(<Help>{content}</Help>);
expect(screen.getByText(content)).toBeInTheDocument();
});
}); | |
config.py | import os
import numpy as np
from .general_utils import get_logger
from .data_utils import load_vocab, get_processing_word
class Config():
def __init__(self, load=True):
"""Initialize hyperparameters and load vocabs
Args:
load_embeddings: (bool) if True, load embeddings into
np array, else None
"""
# directory for training outputs
if not os.path.exists(self.dir_output):
os.makedirs(self.dir_output)
# create instance of logger
self.logger = get_logger(self.path_log)
# load if requested (default)
if load:
self.load()
| def load(self):
"""Loads vocabulary, processing functions and embeddings
Supposes that build_data.py has been run successfully and that
the corresponding files have been created (vocab and trimmed
vectors)
"""
# 1. vocabulary
self.vocab_words = load_vocab(self.filename_words)
self.vocab_relations = load_vocab(self.filename_relation)
self.nwords = len(self.vocab_words)
self.nrelations = len(self.vocab_relations)
# 2. get processing functions that map str -> id
self.processing_word = get_processing_word(self.vocab_words, UNK = "<UNK>")
self.processing_relation = get_processing_word(self.vocab_relations, UNK='NA')
# 3. get pre-trained embeddings
self.embeddings = (np.load(self.filename_embeddings)['vec']
if self.use_pretrained else None)
# general config
dir_output = "./results/test/"
graph_output = "./graph"
dir_model = dir_output + "model.weights/" # directory to save models
path_log = dir_output + "log.txt"
restore_model = "./results/test/model.weights/early_best.ckpt"
# embeddings
dim_word = 50
dim_pos = 5
dim = dim_word + 2*dim_pos
# position range in sentence
nposition = 500
# convolution
window_size = 3
feature_maps = 230
filename_train_origin = "./data/origin_data/train.txt"
filename_train = "./data/processed_data/train.txt"
filename_train_wrong = "./data/processed_data/wrong_parse_train.txt"
filename_dev = "./data/processed_data/test.txt"
filename_test_origin = "./data/origin_data/test.txt"
filename_test = "./data/processed_data/test.txt"
filename_test_wrong = "./data/processed_data/wrong_parse_test.txt"
max_iter = None # if not None, max number of examples in Dataset
# vocab (created from dataset with build_data.py)
filename_words = "./data/processed_data/words.txt"
filename_embeddings = "./data/processed_data/vectors.npz"
filename_relation_origin = "./data/origin_data/relation2id.txt"
filename_relation = "./data/processed_data/relation.txt"
# word vectors file
filename_wordvectors = "./data/origin_data/vec.txt"
use_pretrained = True
MIL = False # if True, using multi-instances learning
shuffle = False # if True, shuffle train dataset
max_iter = None # if not None, max number of examples in Dataset
# training
train_word_embeddings = False
train_pos_embeddings = True
nepochs = 15
dropout = 0.5
batch_size = 50
lr_method = "adadelta"
lr = 0.001
lr_decay = 0.9
clip = -1 # if negative, no clipping
nepoch_no_imprv = 3
early_stop = True
max_train_step = 100000 | |
test_scope.py | # Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Thomas Beermann, <[email protected]>, 2012
# - Angelos Molfetas, <[email protected]>, 2012
# - Mario Lassnig, <[email protected]>, 2012
# - Vincent Garonne, <[email protected]>, 2012-2015
# - Cedric Serfon, <[email protected]>, 2017
# - Andrew Lister, <[email protected]>, 2019
# - Patrick Austin <[email protected]>, 2020
from json import dumps, loads
from paste.fixture import TestApp
from nose.tools import assert_equal, assert_true, assert_in, raises, assert_raises
from rucio.client.accountclient import AccountClient
from rucio.client.scopeclient import ScopeClient
from rucio.common.config import config_get, config_get_bool
from rucio.common.exception import AccountNotFound, Duplicate, ScopeNotFound, InvalidObject
from rucio.common.types import InternalAccount, InternalScope
from rucio.common.utils import generate_uuid as uuid
from rucio.core.scope import get_scopes, add_scope, is_scope_owner
from rucio.tests.common import account_name_generator, scope_name_generator
from rucio.web.rest.account import APP as account_app
from rucio.web.rest.authentication import APP as auth_app
class | ():
def __init__(self):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')}
else:
self.vo = {}
self.scopes = [InternalScope(scope_name_generator(), **self.vo) for _ in range(5)]
self.jdoe = InternalAccount('jdoe', **self.vo)
def test_list_scopes(self):
""" SCOPE (CORE): List scopes """
for scope in self.scopes:
add_scope(scope=scope, account=self.jdoe)
scopes = get_scopes(account=self.jdoe)
for scope in scopes:
assert_in(scope, scopes)
def test_is_scope_owner(self):
""" SCOPE (CORE): Is scope owner """
scope = InternalScope(scope_name_generator(), **self.vo)
add_scope(scope=scope, account=self.jdoe)
anwser = is_scope_owner(scope=scope, account=self.jdoe)
assert_equal(anwser, True)
class TestScope():
def __init__(self):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo_header = {'X-Rucio-VO': config_get('client', 'vo', raise_exception=False, default='tst')}
else:
self.vo_header = {}
self.scopes = [scope_name_generator() for _ in range(5)]
def test_scope_success(self):
""" SCOPE (REST): send a POST to create a new account and scope """
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers2 = {'X-Rucio-Auth-Token': str(token)}
acntusr = account_name_generator()
data = dumps({'type': 'USER', 'email': 'rucio.email.com'})
res2 = TestApp(account_app.wsgifunc(*mw)).post('/' + acntusr, headers=headers2, params=data, expect_errors=True)
assert_equal(res2.status, 201)
headers3 = {'X-Rucio-Auth-Token': str(token)}
scopeusr = scope_name_generator()
res3 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (acntusr, scopeusr), headers=headers3, expect_errors=True)
assert_equal(res3.status, 201)
def test_scope_failure(self):
""" SCOPE (REST): send a POST to create a new scope for a not existing account to test the error"""
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers2 = {'X-Rucio-Auth-Token': str(token)}
scopeusr = scope_name_generator()
account_name_generator()
res2 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (scopeusr, scopeusr), headers=headers2, expect_errors=True)
assert_equal(res2.status, 404)
def test_scope_duplicate(self):
""" SCOPE (REST): send a POST to create a already existing scope to test the error"""
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers2 = {'X-Rucio-Auth-Token': str(token)}
acntusr = account_name_generator()
data = dumps({'type': 'USER', 'email': '[email protected]'})
res2 = TestApp(account_app.wsgifunc(*mw)).post('/' + acntusr, headers=headers2, params=data, expect_errors=True)
assert_equal(res2.status, 201)
headers3 = {'X-Rucio-Auth-Token': str(token)}
scopeusr = scope_name_generator()
res3 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (acntusr, scopeusr), headers=headers3, expect_errors=True)
assert_equal(res3.status, 201)
res3 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (acntusr, scopeusr), headers=headers3, expect_errors=True)
assert_equal(res3.status, 409)
def test_list_scope(self):
""" SCOPE (REST): send a GET list all scopes for one account """
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
tmp_val = account_name_generator()
headers2 = {'Rucio-Type': 'user', 'X-Rucio-Account': 'root', 'X-Rucio-Auth-Token': str(token)}
data = dumps({'type': 'USER', 'email': '[email protected]'})
res2 = TestApp(account_app.wsgifunc(*mw)).post('/%s' % tmp_val, headers=headers2, params=data, expect_errors=True)
assert_equal(res2.status, 201)
headers3 = {'X-Rucio-Auth-Token': str(token)}
for scope in self.scopes:
data = dumps({})
res3 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (tmp_val, scope), headers=headers3, params=data, expect_errors=True)
assert_equal(res3.status, 201)
res4 = TestApp(account_app.wsgifunc(*mw)).get('/%s/scopes/' % tmp_val, headers=headers3, expect_errors=True)
assert_equal(res4.status, 200)
svr_list = loads(res4.body)
for scope in self.scopes:
assert_in(scope, svr_list)
def test_list_scope_account_not_found(self):
""" SCOPE (REST): send a GET list all scopes for a not existing account """
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers3 = {'X-Rucio-Auth-Token': str(token)}
res3 = TestApp(account_app.wsgifunc(*mw)).get('/testaccount/scopes', headers=headers3, expect_errors=True)
assert_equal(res3.status, 404)
assert_equal(res3.header('ExceptionClass'), 'AccountNotFound')
def test_list_scope_no_scopes(self):
""" SCOPE (REST): send a GET list all scopes for one account without scopes """
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers2 = {'X-Rucio-Auth-Token': str(token)}
acntusr = account_name_generator()
data = dumps({'type': 'USER', 'email': '[email protected]'})
res2 = TestApp(account_app.wsgifunc(*mw)).post('/' + acntusr, headers=headers2, params=data, expect_errors=True)
assert_equal(res2.status, 201)
headers3 = {'X-Rucio-Auth-Token': str(token)}
res4 = TestApp(account_app.wsgifunc(*mw)).get('/%s/scopes/' % (acntusr), headers=headers3, params=data, expect_errors=True)
assert_equal(res4.status, 404)
assert_equal(res4.header('ExceptionClass'), 'ScopeNotFound')
class TestScopeClient():
def __init__(self):
self.account_client = AccountClient()
self.scope_client = ScopeClient()
def test_create_scope(self):
""" SCOPE (CLIENTS): create a new scope."""
account = 'jdoe'
scope = scope_name_generator()
ret = self.scope_client.add_scope(account, scope)
assert_true(ret)
with assert_raises(InvalidObject):
self.scope_client.add_scope(account, 'tooooolooooongscooooooooooooope')
with assert_raises(InvalidObject):
self.scope_client.add_scope(account, '$?!')
@raises(AccountNotFound)
def test_create_scope_no_account(self):
""" SCOPE (CLIENTS): try to create scope for not existing account."""
account = str(uuid()).lower()[:30]
scope = scope_name_generator()
self.scope_client.add_scope(account, scope)
@raises(Duplicate)
def test_create_scope_duplicate(self):
""" SCOPE (CLIENTS): try to create a duplicate scope."""
account = 'jdoe'
scope = scope_name_generator()
self.scope_client.add_scope(account, scope)
self.scope_client.add_scope(account, scope)
def test_list_scopes(self):
""" SCOPE (CLIENTS): try to list scopes for an account."""
account = 'jdoe'
scope_list = [scope_name_generator() for _ in range(5)]
for scope in scope_list:
self.scope_client.add_scope(account, scope)
svr_list = self.scope_client.list_scopes_for_account(account)
for scope in scope_list:
if scope not in svr_list:
assert_true(False)
@raises(AccountNotFound)
def test_list_scopes_account_not_found(self):
""" SCOPE (CLIENTS): try to list scopes for a non existing account."""
account = account_name_generator()
self.scope_client.list_scopes_for_account(account)
@raises(ScopeNotFound)
def test_list_scopes_no_scopes(self):
""" SCOPE (CLIENTS): try to list scopes for an account without scopes."""
account = account_name_generator()
self.account_client.add_account(account, 'USER', '[email protected]')
self.scope_client.list_scopes_for_account(account)
| TestScopeCoreApi |
hook-osgeo.py | #-----------------------------------------------------------------------------
# Copyright (c) 2015, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from PyInstaller.utils.hooks import collect_data_files
from PyInstaller.compat import is_win, is_darwin
import os
import sys
# The osgeo libraries require auxiliary data and may have hidden dependencies.
# There are several possible configurations on how these libraries can be
# deployed.
# This hook evaluates the cases when:
# - the `data` folder is present "in-source" (sharing the same namespace folder
# as the code libraries)
# - the `data` folder is present "out-source" (for instance, on Anaconda for
# Windows, in PYTHONHOME/Library/data)
# In this latter case, the hook also checks for the presence of `proj` library
# (e.g., on Windows in PYTHONHOME) for being added to the bundle.
#
# This hook has been tested with gdal (v.1.11.2 and 1.11.3) on:
# - Win7 64bit
# - Ubuntu 15.04 64bit
# - Mac OS X Yosemite 10.10
#
# TODO: Fix for gdal>=2.0: 'NameError: global name 'help' is not defined'
# flag used to identify an Anaconda environment
is_conda = False
# Auxiliary data:
#
# - general case (data in 'osgeo/data/gdal'):
datas = collect_data_files('osgeo', subdir=os.path.join('data', 'gdal'))
# check if the data has been effectively found in 'osgeo/data/gdal'
if len(datas) == 0:
if hasattr(sys, 'real_prefix'): # check if in a virtual environment
root_path = sys.real_prefix
else:
root_path = sys.prefix
# - conda-specific
if is_win:
tgt_gdal_data = os.path.join('Library', 'data')
src_gdal_data = os.path.join(root_path, 'Library', 'data')
else: # both linux and darwin
tgt_gdal_data = os.path.join('share', 'gdal')
src_gdal_data = os.path.join(root_path, 'share', 'gdal')
if os.path.exists(src_gdal_data):
|
# Hidden dependencies
if is_conda:
# if `proj.4` is present, it provides additional functionalities
if is_win:
proj4_lib = os.path.join(root_path, 'proj.dll')
elif is_darwin:
proj4_lib = os.path.join(root_path, 'lib', 'libproj.dylib')
else: # assumed linux-like settings
proj4_lib = os.path.join(root_path, 'lib', 'libproj.so')
if os.path.exists(proj4_lib):
binaries = [(proj4_lib, ""), ]
| is_conda = True
datas.append((src_gdal_data, tgt_gdal_data))
# a real-time hook takes case to define the path for `GDAL_DATA` |
__main__.py | #!/usr/bin/env python
if __name__ == '__main__': | import sys
import fsleyes.main as main
sys.exit(main.main()) |
|
train.py | # -*- coding: utf-8 -*-
import time
import os
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from reader import data_loader, test_data_loader, multithread_loader
from yolov3 import YOLOv3
# train.py
# 提升点: 可以改变anchor的大小,注意训练和测试时要使用同样的anchor
ANCHORS = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
ANCHOR_MASKS = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
IGNORE_THRESH = .7
NUM_CLASSES = 7
TRAINDIR = 'insects/train'
VALIDDIR = 'insects/val'
# train.py
if __name__ == '__main__':
with fluid.dygraph.guard():
model = YOLOv3('yolov3', num_classes = NUM_CLASSES, is_train=True)
opt = fluid.optimizer.Momentum(
learning_rate=0.001, #提升点:可以调整学习率,或者设置学习率衰减
momentum=0.9) # 提升点: 可以添加正则化项
train_loader = multithread_loader(TRAINDIR, batch_size= 10, mode='train')
valid_loader = multithread_loader(VALIDDIR, batch_size= 10, mode='valid')
MAX_EPOCH = 100 # 提升点: 可以改变训练的轮数
for epoch in range(MAX_EPOCH):
for i, data in enumerate(train_loader()):
img, gt_boxes, gt_labels, img_scale = data
gt_scores = np.ones(gt_labels.shape).astype('float32')
gt_scores = to_variable(gt_scores)
img = to_variable(img)
gt_boxes = to_variable(gt_boxes) | anchor_masks = ANCHOR_MASKS,
ignore_thresh=IGNORE_THRESH,
use_label_smooth=False)
loss.backward()
opt.minimize(loss)
model.clear_gradients()
if i % 1 == 0:
timestring = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
print('{}[TRAIN]epoch {}, iter {}, output loss: {}'.format(timestring, epoch, i, loss.numpy()))
# save params of model
if (epoch % 5 == 0) or (epoch == MAX_EPOCH -1):
fluid.save_dygraph(model.state_dict(), 'yolo_epoch{}'.format(epoch))
# 每个epoch结束之后在验证集上进行测试
model.eval()
for i, data in enumerate(valid_loader()):
img, gt_boxes, gt_labels, img_scale = data
gt_scores = np.ones(gt_labels.shape).astype('float32')
gt_scores = to_variable(gt_scores)
img = to_variable(img)
gt_boxes = to_variable(gt_boxes)
gt_labels = to_variable(gt_labels)
outputs = model(img)
loss = model.get_loss(outputs, gt_boxes, gt_labels, gtscore=gt_scores,
anchors = ANCHORS,
anchor_masks = ANCHOR_MASKS,
ignore_thresh=IGNORE_THRESH,
use_label_smooth=False)
if i % 1 == 0:
timestring = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
print('{}[VALID]epoch {}, iter {}, output loss: {}'.format(timestring, epoch, i, loss.numpy()))
model.train() | gt_labels = to_variable(gt_labels)
outputs = model(img)
loss = model.get_loss(outputs, gt_boxes, gt_labels, gtscore=gt_scores,
anchors = ANCHORS, |
operations.rs | // Copyright 2020 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under the MIT license <LICENSE-MIT
// http://opensource.org/licenses/MIT> or the Modified BSD license <LICENSE-BSD
// https://opensource.org/licenses/BSD-3-Clause>, at your option. This file may not be copied,
// modified, or distributed except according to those terms. Please review the Licences for the
// specific language governing permissions and limitations relating to use of the SAFE Network
// Software.
use super::{
authd::run as authd_run,
errors::{Error, Result},
};
use cluFlock::ExclusiveFlock;
use directories::BaseDirs;
use flexi_logger::{DeferredNow, Logger};
use log::{self, debug, info, Record};
use std::{
fs::{create_dir_all, File, OpenOptions},
io::{self, prelude::*},
path::PathBuf,
process::{self, Command, Stdio},
str, thread,
time::Duration,
};
const SAFE_AUTHD_PID_FILE: &str = "safe-authd.pid";
const DEFAULT_LOG_LEVEL: &str = "info";
pub async fn start_authd(listen: &str, log_dir: Option<PathBuf>, foreground: bool) -> Result<()> {
if foreground {
// Let's run it as a normal process in the foreground
run_in_foreground(listen, log_dir).await
} else {
// Run it as a daemon, i.e. a detached process in the background
launch_detached_process(listen, log_dir)
}
}
pub fn stop_authd(log_dir: Option<PathBuf>) -> Result<()> {
println!("Stopping SAFE Authenticator daemon (safe-authd)...");
if cfg!(windows) {
// Since in Windows we cannot read the locked PID file,
// we kill authd by using the binary name
let binary_file_name = "safe-authd.exe";
let current_pid = process::id();
let output = Command::new("taskkill")
.args(&[
"/F",
"/IM",
binary_file_name,
"/FI",
&format!("PID ne {}", current_pid),
])
.output()?;
if output.status.success() {
io::stdout().write_all(&output.stdout).map_err(|err| {
Error::GeneralError(format!("Failed to output success info: {}", err))
})?;
Ok(())
} else {
let msg = format!(
"Error when attempting to stop authd: {}",
String::from_utf8_lossy(&output.stderr)
);
println!("{}", msg);
Err(Error::GeneralError(msg))
}
} else {
// For Linux and Mac we can read the locked PID file, |
debug!("Retrieving authd PID from: {:?}", &pid_file_path);
let mut file = File::open(&pid_file_path).map_err(|err| {
Error::GeneralError(format!(
"Failed to open safe-authd daemon PID file ('{}') to stop daemon: {}",
pid_file_path.display(),
err
))
})?;
let mut pid = String::new();
file.read_to_string(&mut pid)?;
let output = Command::new("kill").arg("-9").arg(&pid).output()?;
if output.status.success() {
println!("Success, safe-authd (PID: {}) stopped!", pid);
} else {
println!("No running safe-authd process (with PID {}) was found", pid);
}
Ok(())
}
}
pub async fn restart_authd(listen: &str, log_dir: Option<PathBuf>, foreground: bool) -> Result<()> {
match stop_authd(log_dir.clone()) {
Ok(()) => {
// Let's give it a sec so it's properlly stopped
thread::sleep(Duration::from_millis(1000));
}
Err(err) => println!("{}", err),
}
start_authd(listen, log_dir, foreground).await?;
println!("Success, safe-authd restarted!");
Ok(())
}
// Private functions
async fn run_in_foreground(listen: &str, log_dir: Option<PathBuf>) -> Result<()> {
let log_path = get_authd_log_path(log_dir.clone())?;
let authd_exec = std::env::current_exe()?;
// Custom formatter for logs
let do_format = move |writer: &mut dyn Write, clock: &mut DeferredNow, record: &Record| {
write!(
writer,
"{} {} [{}:{}] {}",
record.level(),
clock.now().to_rfc3339(),
record.file().unwrap_or_default(),
record.line().unwrap_or_default(),
record.args()
)
};
// Depending on log_dir arg received we output logs to stdout or to a file
let logger = Logger::with_env_or_str(DEFAULT_LOG_LEVEL)
.format(do_format)
.suppress_timestamp();
if let Some(log_file_path) = log_dir {
logger
.log_to_file()
.directory(log_file_path)
.append()
.start()
} else {
logger.start()
}
.map_err(|err| Error::GeneralError(format!("Error when initialising logger: {}", err)))?;
info!(
"Running authd instance from executable at \"{}\"",
authd_exec.display()
);
let pid = process::id();
info!("authd instance starting (PID: {})...", pid);
let mut pid_file_path = log_path.clone();
pid_file_path.push(SAFE_AUTHD_PID_FILE);
debug!("PID file to be written at: {:?}", &pid_file_path);
// Open/create PID file
let pid_file = if pid_file_path.exists() {
OpenOptions::new()
.write(true)
.truncate(false)
.open(&pid_file_path)?
} else {
File::create(&pid_file_path)?
};
// Try to lock PID file
match pid_file.try_lock() {
Ok(mut pid_file) => {
// We got the lock on the PID file, therefore write our current PID
pid_file.set_len(0)?;
write!(pid_file, "{}", pid).map_err(|err| {
Error::GeneralError(format!(
"Failed to start safe-authd daemon ({}): {}",
authd_exec.display(),
err.to_string()
))
})?;
info!("Initialising SAFE Authenticator services...");
authd_run(listen, None, None).await?;
// Release PID file lock (this is done automatically anyways if process is killed)
drop(pid_file);
Ok(())
}
Err(err) => {
// We cannot start the authd services since PID file lock coudln't be obtained
let os_error_code = err.raw_os_error().unwrap_or_else(|| 0);
debug!(
"Failed to lock PID file with OS error code: {}",
os_error_code
);
// Let's check if the error is due to authd already running
let is_already_started: bool = if cfg!(target_os = "windows") {
// On Windows: ERROR_LOCK_VIOLATION == 33
os_error_code == 33
} else if cfg!(target_os = "linux") {
// On Linux: EWOULDBLOCK == EAGAIN == 11
os_error_code == 11
} else {
// On Mac: 35
os_error_code == 35
};
let res_err = if is_already_started {
// A daemon has been already started keeping the lock on the PID file,
// although we don't know its status
Error::AuthdAlreadyStarted(format!(
"Failed to start safe-authd daemon ({})",
authd_exec.display(),
))
} else {
Error::GeneralError(format!(
"Unknown error when attempting get lock on PID file at {}: {:?}",
pid_file_path.display(),
err
))
};
Err(res_err)
}
}
}
fn launch_detached_process(listen: &str, log_dir: Option<PathBuf>) -> Result<()> {
let log_path = get_authd_log_path(log_dir)?;
let authd_exec = std::env::current_exe()?;
println!("Starting SAFE Authenticator daemon (safe-authd)...");
// We execute this same binary but requesting to run in the foreground,
// and since we spawn it, it will be a detached process running in the background
let args = [
"start",
"--fg",
"--listen",
listen,
"--log-dir",
&log_path.display().to_string(),
];
debug!(
"Running '{}' with args {:?} ...",
authd_exec.display(),
args
);
// Spwan the process
let mut child = Command::new(&authd_exec)
.args(&args)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()?;
// Let's give it a sec so it starts/fails
thread::sleep(Duration::from_millis(1000));
// If it failed to start already, we can get the error code from it,
// otherwise we'll assume it started correctly
if let Ok(Some(status)) = child.try_wait() {
let exit_code = match status.code() {
Some(code) => code,
None => 1,
};
let error = Error::from_code(
exit_code,
format!(
"Failed to start safe-authd daemon '{}' (exit code: {})",
authd_exec.display(),
exit_code
),
);
println!("{}", error);
Err(error)
} else {
println!("safe-authd started (PID: {})", child.id());
Ok(())
}
}
fn get_authd_log_path(log_dir: Option<PathBuf>) -> Result<PathBuf> {
match log_dir {
Some(p) => Ok(p),
None => {
let base_dirs = BaseDirs::new().ok_or_else(|| {
Error::GeneralError("Failed to obtain user's home path".to_string())
})?;
let mut path = PathBuf::from(base_dirs.home_dir());
path.push(".safe");
path.push("authd");
path.push("logs");
if !path.exists() {
println!("Creating '{}' folder", path.display());
create_dir_all(path.clone()).map_err(|err| {
Error::GeneralError(format!(
"Couldn't create target path to store authd log files: {}",
err
))
})?;
}
Ok(path)
}
}
} | // and then kill authd using its PID
let mut pid_file_path: PathBuf = get_authd_log_path(log_dir)?;
pid_file_path.push(SAFE_AUTHD_PID_FILE); |
Footer.tsx | import { graphql, useStaticQuery } from 'gatsby'
import React, { Props } from 'react' |
interface FooterProps {
children?: any
isRoot?: any
}
export const Footer: React.FC<FooterProps> = ({ isRoot }) => {
const data = useStaticQuery(graphql`
query SiteTitle {
site {
siteMetadata {
title
}
}
}
`)
return <>aaa</>
}
export default Footer | |
change-locale.ts | import { Component } from '@angular/core';
import { BsDatepickerConfig } from 'ngx-bootstrap/datepicker';
import { listLocales } from 'ngx-bootstrap/bs-moment';
@Component({
selector: 'demo-datepicker-change-locale',
templateUrl: './change-locale.html'
})
export class DemoDatepickerChangeLocaleComponent { | bsConfig: Partial<BsDatepickerConfig>;
applyLocale(pop: any) {
// create new object on each property change
// so Angular can catch object reference change
this.bsConfig = Object.assign({}, { locale: this.locale });
setTimeout(() => {
pop.hide();
pop.show();
});
}
} | locale = 'en';
locales = listLocales(); |
blind_signature.rs | use crate::{MessageGenerators, Signature, MAX_MSGS};
use blake2::Blake2b;
use bls::SecretKey;
use bls12_381_plus::{G1Projective, Scalar};
use core::convert::TryFrom;
use digest::Digest;
use ff::Field;
use group::Curve;
use hmac_drbg::HmacDRBG;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use short_group_signatures_core::{error::Error, lib::*};
use subtle::CtOption;
use typenum::U64;
/// A BBS+ blind signature
/// structurally identical to `Signature` but is used to
/// help with misuse and confusion.
///
/// 1 or more messages have been hidden by the signature recipient
/// so the signer only knows a subset of the messages to be signed
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct BlindSignature {
pub(crate) a: G1Projective,
pub(crate) e: Scalar,
pub(crate) s: Scalar,
}
impl Serialize for BlindSignature {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let sig = Signature {
a: self.a,
e: self.e,
s: self.s,
};
sig.serialize(s)
}
}
impl<'de> Deserialize<'de> for BlindSignature {
fn deserialize<D>(d: D) -> Result<BlindSignature, D::Error>
where
D: Deserializer<'de>,
{
let sig = Signature::deserialize(d)?;
Ok(Self {
a: sig.a,
e: sig.e,
s: sig.s,
})
}
}
impl BlindSignature {
/// The number of bytes in a signature
pub const BYTES: usize = 112;
/// Generate a blind signature where only a subset of messages are known to the signer
/// The rest are encoded as a commitment
pub fn new(
commitment: Commitment,
sk: &SecretKey,
generators: &MessageGenerators,
msgs: &[(usize, Message)],
) -> Result<Self, Error> {
if generators.len() < msgs.len() {
return Err(Error::new(1, "not enough message generators"));
}
if sk.0.is_zero() {
return Err(Error::new(2, "invalid secret key"));
}
let mut hasher = Blake2b::new();
hasher.update(generators.h0.to_affine().to_uncompressed());
for i in 0..generators.len() {
hasher.update(generators.get(i).to_affine().to_uncompressed());
}
for (_, m) in msgs.iter() {
hasher.update(m.to_bytes())
}
let nonce = hasher.finalize();
let mut drbg = HmacDRBG::<Blake2b>::new(&sk.to_bytes()[..], &nonce[..], &[]);
// Should yield non-zero values for `e` and `s`, very small likelihood of it being zero
let e = Scalar::from_bytes_wide(
&<[u8; 64]>::try_from(&drbg.generate::<U64>(Some(&[1u8]))[..]).unwrap(),
);
let s = Scalar::from_bytes_wide(
&<[u8; 64]>::try_from(&drbg.generate::<U64>(Some(&[2u8]))[..]).unwrap(),
);
// Can't go more than 128, but that's quite a bit
let mut points = [G1Projective::identity(); MAX_MSGS];
let mut scalars = [Scalar::one(); MAX_MSGS];
points[0] = commitment.0;
points[1] = G1Projective::generator();
points[2] = generators.h0;
scalars[2] = s;
let mut i = 3;
for (idx, m) in msgs.iter() {
points[i] = generators.get(*idx);
scalars[i] = m.0;
i += 1;
}
let b = G1Projective::sum_of_products(&points[..i], &scalars[..i]);
let exp = (e + sk.0).invert().unwrap();
Ok(Self { a: b * exp, e, s })
}
/// Once signature on committed attributes (blind signature) is received, the signature needs to be unblinded.
/// Takes the blinding factor used in the commitment.
pub fn to_unblinded(self, blinding: SignatureBlinding) -> Signature |
/// Get the byte representation of this signature
pub fn to_bytes(&self) -> [u8; Self::BYTES] {
let sig = Signature {
a: self.a,
e: self.e,
s: self.s,
};
sig.to_bytes()
}
/// Convert a byte sequence into a signature
pub fn from_bytes(data: &[u8; Self::BYTES]) -> CtOption<Self> {
Signature::from_bytes(data).map(|sig| Self {
a: sig.a,
e: sig.e,
s: sig.s,
})
}
}
| {
Signature {
a: self.a,
e: self.e,
s: self.s + blinding.0,
}
} |
hummingbot_application.py | #!/usr/bin/env python
import asyncio
from collections import deque
import logging
import time
from typing import List, Dict, Optional, Tuple, Set, Deque
from hummingbot.client.command import __all__ as commands
from hummingbot.core.clock import Clock
from hummingbot.core.data_type.order_book_tracker import OrderBookTrackerDataSourceType
from hummingbot.core.data_type.user_stream_tracker import UserStreamTrackerDataSourceType
from hummingbot.logger import HummingbotLogger
from hummingbot.logger.application_warning import ApplicationWarning
from hummingbot.market.binance.binance_market import BinanceMarket
from hummingbot.market.bittrex.bittrex_market import BittrexMarket
from hummingbot.market.kucoin.kucoin_market import KucoinMarket
from hummingbot.market.coinbase_pro.coinbase_pro_market import CoinbaseProMarket
from hummingbot.market.huobi.huobi_market import HuobiMarket
from hummingbot.market.liquid.liquid_market import LiquidMarket
from hummingbot.market.market_base import MarketBase
from hummingbot.market.paper_trade import create_paper_trade_market
from hummingbot.market.radar_relay.radar_relay_market import RadarRelayMarket
from hummingbot.market.bamboo_relay.bamboo_relay_market import BambooRelayMarket
from hummingbot.market.dolomite.dolomite_market import DolomiteMarket
from hummingbot.market.loopring.loopring_market import LoopringMarket
from hummingbot.market.bitcoin_com.bitcoin_com_market import BitcoinComMarket
from hummingbot.market.kraken.kraken_market import KrakenMarket
from hummingbot.model.sql_connection_manager import SQLConnectionManager
from hummingbot.wallet.ethereum.ethereum_chain import EthereumChain
from hummingbot.wallet.ethereum.web3_wallet import Web3Wallet
from hummingbot.client.ui.keybindings import load_key_bindings
from hummingbot.client.ui.parser import load_parser, ThrowingArgumentParser
from hummingbot.client.ui.hummingbot_cli import HummingbotCLI
from hummingbot.client.ui.completer import load_completer
from hummingbot.client.errors import InvalidCommandError, ArgumentParserError
from hummingbot.client.config.global_config_map import global_config_map, using_wallet
from hummingbot.client.config.config_helpers import get_erc20_token_addresses, get_strategy_config_map
from hummingbot.strategy.strategy_base import StrategyBase
from hummingbot.strategy.cross_exchange_market_making import CrossExchangeMarketPair
from hummingbot.core.utils.kill_switch import KillSwitch
from hummingbot.data_feed.data_feed_base import DataFeedBase
from hummingbot.notifier.notifier_base import NotifierBase
from hummingbot.notifier.telegram_notifier import TelegramNotifier
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from hummingbot.market.markets_recorder import MarketsRecorder
from hummingbot.client.config.security import Security
s_logger = None
MARKET_CLASSES = {
"bamboo_relay": BambooRelayMarket,
"binance": BinanceMarket,
"coinbase_pro": CoinbaseProMarket,
"huobi": HuobiMarket,
"liquid": LiquidMarket,
"radar_relay": RadarRelayMarket,
"dolomite": DolomiteMarket,
"loopring": LoopringMarket,
"bittrex": BittrexMarket,
"kucoin": KucoinMarket,
"bitcoin_com": BitcoinComMarket,
"kraken": KrakenMarket,
}
class HummingbotApplication(*commands):
KILL_TIMEOUT = 10.0
APP_WARNING_EXPIRY_DURATION = 3600.0
APP_WARNING_STATUS_LIMIT = 6
_main_app: Optional["HummingbotApplication"] = None
@classmethod
def logger(cls) -> HummingbotLogger:
global s_logger
if s_logger is None:
s_logger = logging.getLogger(__name__)
return s_logger
@classmethod
def main_application(cls) -> "HummingbotApplication":
if cls._main_app is None:
cls._main_app = HummingbotApplication()
return cls._main_app
def __init__(self):
self.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
self.parser: ThrowingArgumentParser = load_parser(self)
self.app = HummingbotCLI(
input_handler=self._handle_command, bindings=load_key_bindings(self), completer=load_completer(self)
)
self.markets: Dict[str, MarketBase] = {}
self.wallet: Optional[Web3Wallet] = None
# strategy file name and name get assigned value after import or create command
self.strategy_file_name: str = None
self.strategy_name: str = None
self.strategy_task: Optional[asyncio.Task] = None
self.strategy: Optional[StrategyBase] = None
self.market_pair: Optional[CrossExchangeMarketPair] = None
self.market_trading_pair_tuples: List[MarketTradingPairTuple] = []
self.clock: Optional[Clock] = None
self.init_time: int = int(time.time() * 1e3)
self.start_time: Optional[int] = None
self.assets: Optional[Set[str]] = set()
self.starting_balances = {}
self.placeholder_mode = False
self.log_queue_listener: Optional[logging.handlers.QueueListener] = None
self.data_feed: Optional[DataFeedBase] = None
self.notifiers: List[NotifierBase] = []
self.kill_switch: Optional[KillSwitch] = None
self._app_warnings: Deque[ApplicationWarning] = deque()
self._trading_required: bool = True
self.trade_fill_db: SQLConnectionManager = SQLConnectionManager.get_trade_fills_instance()
self.markets_recorder: Optional[MarketsRecorder] = None
@property
def | (self):
if self.strategy_name is not None:
return get_strategy_config_map(self.strategy_name)
return None
def _notify(self, msg: str):
self.app.log(msg)
for notifier in self.notifiers:
notifier.add_msg_to_queue(msg)
def _handle_command(self, raw_command: str):
raw_command = raw_command.lower().strip()
try:
if self.placeholder_mode:
pass
else:
args = self.parser.parse_args(args=raw_command.split())
kwargs = vars(args)
if not hasattr(args, "func"):
return
f = args.func
del kwargs["func"]
f(**kwargs)
except InvalidCommandError as e:
self._notify("Invalid command: %s" % (str(e),))
except ArgumentParserError as e:
self._notify(str(e))
except NotImplementedError:
self._notify("Command not yet implemented. This feature is currently under development.")
except Exception as e:
self.logger().error(e, exc_info=True)
async def _cancel_outstanding_orders(self) -> bool:
success = True
try:
on_chain_cancel_on_exit = global_config_map.get("on_chain_cancel_on_exit").value
bamboo_relay_use_coordinator = global_config_map.get("bamboo_relay_use_coordinator").value
kill_timeout: float = self.KILL_TIMEOUT
self._notify("Cancelling outstanding orders...")
for market_name, market in self.markets.items():
# By default, the bot does not cancel orders on exit on Radar Relay or Bamboo Relay,
# since all open orders will expire in a short window
if not on_chain_cancel_on_exit and (market_name == "radar_relay" or (market_name == "bamboo_relay" and not bamboo_relay_use_coordinator)):
continue
cancellation_results = await market.cancel_all(kill_timeout)
uncancelled = list(filter(lambda cr: cr.success is False, cancellation_results))
if len(uncancelled) > 0:
success = False
uncancelled_order_ids = list(map(lambda cr: cr.order_id, uncancelled))
self._notify("\nFailed to cancel the following orders on %s:\n%s" % (
market_name,
'\n'.join(uncancelled_order_ids)
))
except Exception:
self.logger().error(f"Error canceling outstanding orders.", exc_info=True)
success = False
if success:
self._notify("All outstanding orders cancelled.")
return success
async def run(self):
await self.app.run()
def add_application_warning(self, app_warning: ApplicationWarning):
self._expire_old_application_warnings()
self._app_warnings.append(app_warning)
def clear_application_warning(self):
self._app_warnings.clear()
@staticmethod
def _initialize_market_assets(market_name: str, trading_pairs: List[str]) -> List[Tuple[str, str]]:
market_class: MarketBase = MARKET_CLASSES.get(market_name, MarketBase)
market_trading_pairs: List[Tuple[str, str]] = [market_class.split_trading_pair(trading_pair) for trading_pair in trading_pairs]
return market_trading_pairs
@staticmethod
def _convert_to_exchange_trading_pair(market_name: str, hb_trading_pair: List[str]) -> List[str]:
market_class: MarketBase = MARKET_CLASSES.get(market_name, MarketBase)
return [market_class.convert_to_exchange_trading_pair(trading_pair) for trading_pair in hb_trading_pair]
def _initialize_wallet(self, token_trading_pairs: List[str]):
if not using_wallet():
return
ethereum_wallet = global_config_map.get("ethereum_wallet").value
private_key = Security._private_keys[ethereum_wallet]
ethereum_rpc_url = global_config_map.get("ethereum_rpc_url").value
erc20_token_addresses = get_erc20_token_addresses(token_trading_pairs)
chain_name: str = global_config_map.get("ethereum_chain_name").value
self.wallet: Web3Wallet = Web3Wallet(
private_key=private_key,
backend_urls=[ethereum_rpc_url],
erc20_token_addresses=erc20_token_addresses,
chain=getattr(EthereumChain, chain_name),
)
def _initialize_markets(self, market_names: List[Tuple[str, List[str]]]):
ethereum_rpc_url = global_config_map.get("ethereum_rpc_url").value
# aggregate trading_pairs if there are duplicate markets
market_trading_pairs_map = {}
for market_name, trading_pairs in market_names:
if market_name not in market_trading_pairs_map:
market_trading_pairs_map[market_name] = []
market_class: MarketBase = MARKET_CLASSES.get(market_name, MarketBase)
for trading_pair in trading_pairs:
exchange_trading_pair: str = market_class.convert_to_exchange_trading_pair(trading_pair)
market_trading_pairs_map[market_name].append(exchange_trading_pair)
for market_name, trading_pairs in market_trading_pairs_map.items():
if global_config_map.get("paper_trade_enabled").value:
try:
market = create_paper_trade_market(market_name, trading_pairs)
except Exception:
raise
paper_trade_account_balance = global_config_map.get("paper_trade_account_balance").value
for asset, balance in paper_trade_account_balance:
market.set_balance(asset, balance)
elif market_name == "binance":
binance_api_key = global_config_map.get("binance_api_key").value
binance_api_secret = global_config_map.get("binance_api_secret").value
market = BinanceMarket(
binance_api_key,
binance_api_secret,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required,
)
elif market_name == "radar_relay":
assert self.wallet is not None
market = RadarRelayMarket(
wallet=self.wallet,
ethereum_rpc_url=ethereum_rpc_url,
trading_pairs=trading_pairs,
trading_required=self._trading_required,
)
elif market_name == "bamboo_relay":
assert self.wallet is not None
use_coordinator = global_config_map.get("bamboo_relay_use_coordinator").value
pre_emptive_soft_cancels = global_config_map.get("bamboo_relay_pre_emptive_soft_cancels").value
market = BambooRelayMarket(
wallet=self.wallet,
ethereum_rpc_url=ethereum_rpc_url,
trading_pairs=trading_pairs,
use_coordinator=use_coordinator,
pre_emptive_soft_cancels=pre_emptive_soft_cancels,
trading_required=self._trading_required,
)
elif market_name == "coinbase_pro":
coinbase_pro_api_key = global_config_map.get("coinbase_pro_api_key").value
coinbase_pro_secret_key = global_config_map.get("coinbase_pro_secret_key").value
coinbase_pro_passphrase = global_config_map.get("coinbase_pro_passphrase").value
market = CoinbaseProMarket(coinbase_pro_api_key,
coinbase_pro_secret_key,
coinbase_pro_passphrase,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "huobi":
huobi_api_key = global_config_map.get("huobi_api_key").value
huobi_secret_key = global_config_map.get("huobi_secret_key").value
market = HuobiMarket(huobi_api_key,
huobi_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "liquid":
liquid_api_key = global_config_map.get("liquid_api_key").value
liquid_secret_key = global_config_map.get("liquid_secret_key").value
market = LiquidMarket(liquid_api_key,
liquid_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
user_stream_tracker_data_source_type=UserStreamTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "dolomite":
assert self.wallet is not None
is_test_net: bool = global_config_map.get("ethereum_chain_name").value == "DOLOMITE_TEST"
market = DolomiteMarket(
wallet=self.wallet,
ethereum_rpc_url=ethereum_rpc_url,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
isTestNet=is_test_net,
trading_required=self._trading_required,
)
elif market_name == "loopring":
loopring_accountid : int = global_config_map.get("loopring_accountid").value
loopring_exchangeid : int = global_config_map.get("loopring_exchangeid").value
loopring_private_key : str = global_config_map.get("loopring_private_key").value
loopring_api_key : str = global_config_map.get("loopring_api_key").value
market = LoopringMarket(
loopring_accountid=loopring_accountid,
loopring_exchangeid=loopring_exchangeid,
loopring_private_key=loopring_private_key,
loopring_api_key=loopring_api_key,
trading_pairs=trading_pairs,
trading_required=self._trading_required,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API
)
elif market_name == "bittrex":
bittrex_api_key = global_config_map.get("bittrex_api_key").value
bittrex_secret_key = global_config_map.get("bittrex_secret_key").value
market = BittrexMarket(bittrex_api_key,
bittrex_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "kucoin":
kucoin_api_key = global_config_map.get("kucoin_api_key").value
kucoin_secret_key = global_config_map.get("kucoin_secret_key").value
kucoin_passphrase = global_config_map.get("kucoin_passphrase").value
market = KucoinMarket(kucoin_api_key,
kucoin_passphrase,
kucoin_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "bitcoin_com":
bitcoin_com_api_key = global_config_map.get("bitcoin_com_api_key").value
bitcoin_com_secret_key = global_config_map.get("bitcoin_com_secret_key").value
market = BitcoinComMarket(bitcoin_com_api_key,
bitcoin_com_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "kraken":
kraken_api_key = global_config_map.get("kraken_api_key").value
kraken_secret_key = global_config_map.get("kraken_secret_key").value
market = KrakenMarket(kraken_api_key,
kraken_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
else:
raise ValueError(f"Market name {market_name} is invalid.")
self.markets[market_name]: MarketBase = market
self.markets_recorder = MarketsRecorder(
self.trade_fill_db,
list(self.markets.values()),
self.strategy_file_name,
self.strategy_name,
)
self.markets_recorder.start()
def _initialize_notifiers(self):
if global_config_map.get("telegram_enabled").value:
# TODO: refactor to use single instance
if not any([isinstance(n, TelegramNotifier) for n in self.notifiers]):
self.notifiers.append(
TelegramNotifier(
token=global_config_map["telegram_token"].value,
chat_id=global_config_map["telegram_chat_id"].value,
hb=self,
)
)
for notifier in self.notifiers:
notifier.start()
| strategy_config_map |
jquery.scrollorama.js | /*
scrollorama - The jQuery plugin for doing cool scrolly stuff
by John Polacek (@johnpolacek)
Dual licensed under MIT and GPL.
*/
(function($) {
$.scrollorama = function(options) {
var scrollorama = this,
blocks = [],
browserPrefix = '',
onBlockChange = function() {},
latestKnownScrollY = 0,
ticking = false,
requestAnimFrame = window.requestAnimationFrame ||
window.webkitRequestAnimationFrame ||
window.mozRequestAnimationFrame ||
window.oRequestAnimationFrame ||
window.msRequestAnimationFrame ||
function( callback ){
window.setTimeout(callback, 1000 / 60);
},
defaults = {offset:0, enablePin: true};
scrollorama.settings = $.extend({}, defaults, options);
scrollorama.blockIndex = 0;
if (options.blocks === undefined) { alert('ERROR: Must assign blocks class selector to scrollorama plugin'); }
// PRIVATE FUNCTIONS
function init() {
var i, block, didScroll = false;
if (typeof scrollorama.settings.blocks === 'string') { scrollorama.settings.blocks = $(scrollorama.settings.blocks); }
// set browser prefix
if ($.browser.mozilla) { browserPrefix = '-moz-'; }
if ($.browser.webkit) { browserPrefix = '-webkit-'; }
if ($.browser.opera) { browserPrefix = '-o-'; }
if ($.browser.msie) { browserPrefix = '-ms-'; }
// create blocks array to contain animation props
$('body').css('position','relative');
for (i=0; i<scrollorama.settings.blocks.length; i++) {
block = scrollorama.settings.blocks.eq(i);
blocks.push({
block: block,
top: block.offset().top - parseInt(block.css('margin-top'), 10),
pin: 0,
animations:[]
});
}
// convert block elements to absolute position
if (scrollorama.settings.enablePin.toString() === 'true') {
for (i=0; i<blocks.length; i++) {
blocks[i].block
.css('position', 'absolute')
.css('top', blocks[i].top);
}
}
$('body').prepend('<div id="scroll-wrap"></div>');
latestKnownScrollY = 0;
ticking = false;
$(window).scroll( onScroll );
}
function onScroll() {
latestKnownScrollY = window.scrollY;
requestTick();
}
function requestTick() {
if(!ticking) {
requestAnimFrame(function(){
onScrollorama();
update();
});
}
ticking = true;
}
function update() {
// reset the tick so we can
// capture the next onScroll
ticking = false;
}
function onScrollorama() {
var scrollTop = $(window).scrollTop(),
currBlockIndex = getCurrBlockIndex(scrollTop),
i, j, anim, startAnimPos, endAnimPos, animPercent, animVal;
// update all animations
for (i=0; i<blocks.length; i++) {
// go through the animations for each block
if (blocks[i].animations.length) {
for (j=0; j<blocks[i].animations.length; j++) {
anim = blocks[i].animations[j];
// if above current block, settings should be at start value
if (i > currBlockIndex) {
if (currBlockIndex !== i-1 && anim.baseline !== 'bottom') {
setProperty(anim.element, anim.property, anim.startVal);
}
if (blocks[i].pin) {
blocks[i].block
.css('position', 'absolute')
.css('top', blocks[i].top);
}
}
// if below current block, settings should be at end value
// unless on an element that gets animated when it hits the bottom of the viewport
else if (i < currBlockIndex) {
setProperty(anim.element, anim.property, anim.endVal);
if (blocks[i].pin) {
blocks[i].block
.css('position', 'absolute')
.css('top', (blocks[i].top + blocks[i].pin));
}
}
// otherwise, set values per scroll position
if (i === currBlockIndex || (currBlockIndex === i-1 && anim.baseline === 'bottom')) {
// if block gets pinned, set position fixed
if (blocks[i].pin && currBlockIndex === i) {
blocks[i].block
.css('position', 'fixed')
.css('top', 0);
}
// set start and end animation positions
startAnimPos = blocks[i].top + anim.delay;
if (anim.baseline === 'bottom') { startAnimPos -= $(window).height(); }
endAnimPos = startAnimPos + anim.duration;
// if scroll is before start of animation, set to start value
if (scrollTop < startAnimPos) {
setProperty(anim.element, anim.property, anim.startVal);
}
// if scroll is after end of animation, set to end value
else if (scrollTop > endAnimPos) {
setProperty(anim.element, anim.property, anim.endVal);
if (blocks[i].pin) {
blocks[i].block
.css('position', 'absolute')
.css('top', (blocks[i].top + blocks[i].pin));
}
}
// otherwise, set value based on scroll
else {
// calculate percent to animate
animPercent = (scrollTop - startAnimPos) / anim.duration;
// account for easing if there is any
if ( anim.easing && $.isFunction( $.easing[anim.easing] ) ) {
animPercent = $.easing[anim.easing]( animPercent, animPercent*1000, 0, 1, 1000 );
}
// then multiply the percent by the value range and calculate the new value
animVal = anim.startVal + (animPercent * (anim.endVal - anim.startVal));
setProperty(anim.element, anim.property, animVal);
}
}
}
}
}
// update blockIndex and trigger event if changed
if (scrollorama.blockIndex !== currBlockIndex) {
scrollorama.blockIndex = currBlockIndex;
onBlockChange();
}
}
function getCurrBlockIndex(scrollTop) {
var currBlockIndex = 0, i;
for (i=0; i<blocks.length; i++) {
// check if block is in view
if (blocks[i].top <= scrollTop - scrollorama.settings.offset) { currBlockIndex = i; }
}
return currBlockIndex;
}
function setProperty(target, prop, val) {
var scaleCSS, currentPosition;
if (prop === 'rotate' || prop === 'zoom' || prop === 'scale') {
if (prop === 'rotate') {
target.css(browserPrefix+'transform', 'rotate('+val+'deg)');
} else if (prop === 'zoom' || prop === 'scale') {
scaleCSS = 'scale('+val+')';
if (browserPrefix !== '-ms-') {
target.css(browserPrefix+'transform', scaleCSS);
} else {
target.css('zoom', scaleCSS);
}
}
}
else if(prop === 'background-position-x' || prop === 'background-position-y' ) {
currentPosition = target.css('background-position').split(' ');
if(prop === 'background-position-x') {
target.css('background-position',val+'px '+currentPosition[1]);
}
if(prop === 'background-position-y') {
target.css('background-position', currentPosition[0]+' '+val+'px');
}
}
else if(prop === 'text-shadow' ) {
target.css(prop,'0px 0px '+val+'px #ffffff');
} else {
target.css(prop, val);
}
} | // PUBLIC FUNCTIONS
scrollorama.animate = function(target) {
var targetIndex,
targetBlock,
anim,
offset,
i, j;
/*
target = animation target
arguments = array of animation parameters
anim = object that contains all animation params (created from arguments)
offset = positioning helper for pinning
animation parameters:
delay = amount of scrolling (in pixels) before animation starts
duration = amount of scrolling (in pixels) over which the animation occurs
property = css property being animated
start = start value of the property
end = end value of the property
pin = pin block during animation duration (applies to all animations within block)
baseline = top (default, when block reaches top of viewport) or bottom (when block first comies into view)
easing = just like jquery's easing functions
*/
// if string, convert to DOM object
if (typeof target === 'string') { target = $(target); }
// find block of target
for (i=0; i<blocks.length; i++) {
if (blocks[i].block.has(target).length) {
targetBlock = blocks[i];
targetIndex = i;
}
}
// add each animation to the blocks animations array from function arguments
for (i=1; i<arguments.length; i++) {
anim = arguments[i];
// for top/left/right/bottom, set relative positioning if static
if (anim.property === 'top' || anim.property === 'left' || anim.property === 'bottom' || anim.property === 'right' ) {
if (target.css('position') === 'static') { target.css('position','relative'); }
// set anim.start, anim.end defaults
cssValue = parseInt(target.css(anim.property),10);
if (anim.start === undefined) {
anim.start = isNaN(cssValue) ? 0 : cssValue;
} else if (anim.end === undefined) {
anim.end = isNaN(cssValue) ? 0 : cssValue;
}
}
// set anim.start/anim.end defaults for rotate, zoom/scale, letter-spacing
if (anim.property === 'rotate') {
if (anim.start === undefined) { anim.start = 0; }
if (anim.end === undefined) { anim.end = 0; }
} else if (anim.property === 'zoom' || anim.property === 'scale' ) {
if (anim.start === undefined) { anim.start = 1; }
if (anim.end === undefined) { anim.end = 1; }
} else if (anim.property === 'letter-spacing' && target.css(anim.property)) {
if (anim.start === undefined) { anim.start = 1; }
if (anim.end === undefined) { anim.end = 1; }
}
if (anim.baseline === undefined) {
if (anim.pin || targetBlock.pin || targetIndex === 0) {
anim.baseline = 'top';
} else {
anim.baseline = 'bottom';
}
}
if (anim.delay === undefined) { anim.delay = 0; }
targetBlock.animations.push({
element: target,
delay: anim.delay,
duration: anim.duration,
property: anim.property,
startVal: anim.start !== undefined ? anim.start : parseInt(target.css(anim.property),10), // if undefined, use current css value
endVal: anim.end !== undefined ? anim.end : parseInt(target.css(anim.property),10), // if undefined, use current css value
baseline: anim.baseline !== undefined ? anim.baseline : 'bottom',
easing: anim.easing
});
if (anim.pin) {
if (targetBlock.pin < anim.duration + anim.delay) {
offset = anim.duration + anim.delay - targetBlock.pin;
targetBlock.pin += offset;
// adjust positions of blocks below target block
for (j=targetIndex+1; j<blocks.length; j++) {
blocks[j].top += offset;
blocks[j].block.css('top', blocks[j].top);
}
}
}
}
onScrollorama();
};
// function for passing blockChange event callback
scrollorama.onBlockChange = function(f) {
onBlockChange = f;
};
// function for getting an array of scrollpoints
// (top of each animation block and animation element scroll start point)
scrollorama.getScrollpoints = function() {
var scrollpoints = [],i,j,anim;
for (i=0; i<blocks.length; i++) {
scrollpoints.push(blocks[i].top);
// go through the animations for each block
if (blocks[i].animations.length && blocks[i].pin > 0) {
for (j=0; j<blocks[i].animations.length; j++) {
anim = blocks[i].animations[j];
scrollpoints.push(blocks[i].top + anim.delay + anim.duration);
}
}
}
// make sure scrollpoints are in numeric order
scrollpoints.sort(function(a,b) {return a - b;});
return scrollpoints;
};
// INIT
init();
return scrollorama;
};
//
// Easing functions from jQuery UI
//
$.extend($.easing, {
def: 'easeOutQuad',
swing: function (x, t, b, c, d) {
//alert($.easing.default);
return $.easing[$.easing.def](x, t, b, c, d);
},
easeInQuad: function (x, t, b, c, d) {
return c*(t/=d)*t + b;
},
easeOutQuad: function (x, t, b, c, d) {
return -c *(t/=d)*(t-2) + b;
},
easeInOutQuad: function (x, t, b, c, d) {
if ((t/=d/2) < 1) { return c/2*t*t + b; }
return -c/2 * ((--t)*(t-2) - 1) + b;
},
easeInCubic: function (x, t, b, c, d) {
return c*(t/=d)*t*t + b;
},
easeOutCubic: function (x, t, b, c, d) {
return c*((t=t/d-1)*t*t + 1) + b;
},
easeInOutCubic: function (x, t, b, c, d) {
if ((t/=d/2) < 1) { return c/2*t*t*t + b; }
return c/2*((t-=2)*t*t + 2) + b;
},
easeInQuart: function (x, t, b, c, d) {
return c*(t/=d)*t*t*t + b;
},
easeOutQuart: function (x, t, b, c, d) {
return -c * ((t=t/d-1)*t*t*t - 1) + b;
},
easeInOutQuart: function (x, t, b, c, d) {
if ((t/=d/2) < 1) { return c/2*t*t*t*t + b; }
return -c/2 * ((t-=2)*t*t*t - 2) + b;
},
easeInQuint: function (x, t, b, c, d) {
return c*(t/=d)*t*t*t*t + b;
},
easeOutQuint: function (x, t, b, c, d) {
return c*((t=t/d-1)*t*t*t*t + 1) + b;
},
easeInOutQuint: function (x, t, b, c, d) {
if ((t/=d/2) < 1) { return c/2*t*t*t*t*t + b; }
return c/2*((t-=2)*t*t*t*t + 2) + b;
},
easeInSine: function (x, t, b, c, d) {
return -c * Math.cos(t/d * (Math.PI/2)) + c + b;
},
easeOutSine: function (x, t, b, c, d) {
return c * Math.sin(t/d * (Math.PI/2)) + b;
},
easeInOutSine: function (x, t, b, c, d) {
return -c/2 * (Math.cos(Math.PI*t/d) - 1) + b;
},
easeInExpo: function (x, t, b, c, d) {
return (t===0) ? b : c * Math.pow(2, 10 * (t/d - 1)) + b;
},
easeOutExpo: function (x, t, b, c, d) {
return (t===d) ? b+c : c * (-Math.pow(2, -10 * t/d) + 1) + b;
},
easeInOutExpo: function (x, t, b, c, d) {
if (t===0) { return b; }
if (t===d) { return b+c; }
if ((t/=d/2) < 1) { return c/2 * Math.pow(2, 10 * (t - 1)) + b; }
return c/2 * (-Math.pow(2, -10 * --t) + 2) + b;
},
easeInCirc: function (x, t, b, c, d) {
return -c * (Math.sqrt(1 - (t/=d)*t) - 1) + b;
},
easeOutCirc: function (x, t, b, c, d) {
return c * Math.sqrt(1 - (t=t/d-1)*t) + b;
},
easeInOutCirc: function (x, t, b, c, d) {
if ((t/=d/2) < 1) { return -c/2 * (Math.sqrt(1 - t*t) - 1) + b; }
return c/2 * (Math.sqrt(1 - (t-=2)*t) + 1) + b;
},
easeInElastic: function (x, t, b, c, d) {
var s=1.70158,p=0,a=c;
if (t===0) { return b; }
if ((t/=d)===1) { return b+c; }
if (!p) { p=d*0.3; }
if (a < Math.abs(c)) { a=c; s=p/4; }
else{ s = p/(2*Math.PI) * Math.asin (c/a); }
return -(a*Math.pow(2,10*(t-=1)) * Math.sin( (t*d-s)*(2*Math.PI)/p )) + b;
},
easeOutElastic: function (x, t, b, c, d) {
var s=1.70158,p=0,a=c;
if (t===0) { return b; }
if ((t/=d)===1) { return b+c; }
if (!p) { p=d*0.3; }
if (a < Math.abs(c)) { a=c; s=p/4; }
else { s = p/(2*Math.PI) * Math.asin (c/a); }
return a*Math.pow(2,-10*t) * Math.sin( (t*d-s)*(2*Math.PI)/p ) + c + b;
},
easeInOutElastic: function (x, t, b, c, d) {
var s=1.70158,p=0,a=c;
if (t===0) { return b; }
if ((t/=d/2)===2) { return b+c; }
if (!p) { p=d*(0.3*1.5); }
if (a < Math.abs(c)) { a=c; s=p/4; }
else { s = p/(2*Math.PI) * Math.asin (c/a); }
if (t < 1) { return -0.5*(a*Math.pow(2,10*(t-=1)) * Math.sin( (t*d-s)*(2*Math.PI)/p )) + b; }
return a*Math.pow(2,-10*(t-=1)) * Math.sin( (t*d-s)*(2*Math.PI)/p )*0.5 + c + b;
},
easeInBack: function (x, t, b, c, d, s) {
if (s === undefined) { s = 1.70158; }
return c*(t/=d)*t*((s+1)*t - s) + b;
},
easeOutBack: function (x, t, b, c, d, s) {
if (s === undefined) { s = 1.70158; }
return c*((t=t/d-1)*t*((s+1)*t + s) + 1) + b;
},
easeInOutBack: function (x, t, b, c, d, s) {
if (s === undefined) { s = 1.70158; }
if ((t/=d/2) < 1) { return c/2*(t*t*(((s*=(1.525))+1)*t - s)) + b; }
return c/2*((t-=2)*t*(((s*=(1.525))+1)*t + s) + 2) + b;
},
easeInBounce: function (x, t, b, c, d) {
return c - $.easing.easeOutBounce (x, d-t, 0, c, d) + b;
},
easeOutBounce: function (x, t, b, c, d) {
if ((t/=d) < (1/2.75)) {
return c*(7.5625*t*t) + b;
} else if (t < (2/2.75)) {
return c*(7.5625*(t-=(1.5/2.75))*t + 0.75) + b;
} else if (t < (2.5/2.75)) {
return c*(7.5625*(t-=(2.25/2.75))*t + 0.9375) + b;
} else {
return c*(7.5625*(t-=(2.625/2.75))*t + 0.984375) + b;
}
},
easeInOutBounce: function (x, t, b, c, d) {
if (t < d/2) { return $.easing.easeInBounce (x, t*2, 0, c, d) * 0.5 + b; }
return $.easing.easeOutBounce (x, t*2-d, 0, c, d) * 0.5 + c*0.5 + b;
}
});
})(jQuery); | |
model.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__all__ = ["Model", "TrainState", "LossHistory"]
import pickle
from collections import OrderedDict
import numpy as np
from . import config
from . import display
from . import gradients as grad
from . import losses as losses_module
from . import metrics as metrics_module
from . import optimizers
from . import utils
from .backend import backend_name, tf, torch
from .callbacks import CallbackList
class Model(object):
|
class TrainState(object):
def __init__(self):
self.epoch = 0
self.step = 0
# Current data
self.X_train = None
self.y_train = None
self.train_aux_vars = None
self.X_test = None
self.y_test = None
self.test_aux_vars = None
# Results of current step
# Train results
self.loss_train = None
self.y_pred_train = None
# Test results
self.loss_test = None
self.y_pred_test = None
self.y_std_test = None
self.metrics_test = None
# The best results correspond to the min train loss
self.best_step = 0
self.best_loss_train = np.inf
self.best_loss_test = np.inf
self.best_y = None
self.best_ystd = None
self.best_metrics = None
def set_data_train(self, X_train, y_train, train_aux_vars=None):
self.X_train = X_train
self.y_train = y_train
self.train_aux_vars = train_aux_vars
def set_data_test(self, X_test, y_test, test_aux_vars=None):
self.X_test = X_test
self.y_test = y_test
self.test_aux_vars = test_aux_vars
def update_best(self):
if self.best_loss_train > np.sum(self.loss_train):
self.best_step = self.step
self.best_loss_train = np.sum(self.loss_train)
self.best_loss_test = np.sum(self.loss_test)
self.best_y = self.y_pred_test
self.best_ystd = self.y_std_test
self.best_metrics = self.metrics_test
def disregard_best(self):
self.best_loss_train = np.inf
def packed_data(self):
def merge_values(values):
if values is None:
return None
return np.hstack(values) if isinstance(values, (list, tuple)) else values
X_train = merge_values(self.X_train)
y_train = merge_values(self.y_train)
X_test = merge_values(self.X_test)
y_test = merge_values(self.y_test)
best_y = merge_values(self.best_y)
best_ystd = merge_values(self.best_ystd)
return X_train, y_train, X_test, y_test, best_y, best_ystd
class LossHistory(object):
def __init__(self):
self.steps = []
self.loss_train = []
self.loss_test = []
self.metrics_test = []
self.loss_weights = 1
def set_loss_weights(self, loss_weights):
self.loss_weights = loss_weights
def append(self, step, loss_train, loss_test, metrics_test):
self.steps.append(step)
self.loss_train.append(loss_train)
if loss_test is None:
loss_test = self.loss_test[-1]
if metrics_test is None:
metrics_test = self.metrics_test[-1]
self.loss_test.append(loss_test)
self.metrics_test.append(metrics_test)
| """A ``Model`` trains a ``NN`` on a ``Data``.
Args:
data: ``deepxde.data.Data`` instance.
net: ``deepxde.nn.NN`` instance.
"""
def __init__(self, data, net):
self.data = data
self.net = net
self.opt_name = None
self.batch_size = None
self.callbacks = None
self.metrics = None
self.external_trainable_variables = []
self.train_state = TrainState()
self.losshistory = LossHistory()
self.stop_training = False
# Backend-dependent attributes
self.opt = None
# Tensor or callable
self.outputs = None
self.outputs_losses = None
self.train_step = None
if backend_name == "tensorflow.compat.v1":
self.sess = None
self.saver = None
@utils.timing
def compile(
self,
optimizer,
lr=None,
loss="MSE",
metrics=None,
decay=None,
loss_weights=None,
external_trainable_variables=None,
):
"""Configures the model for training.
Args:
optimizer: String. Name of optimizer.
lr: A Tensor or a floating point value. The learning rate. For L-BFGS, use
`dde.optimizers.set_LBFGS_options` to set the hyperparameters.
loss: If the same loss is used for all errors, then `loss` is a String (name
of objective function) or objective function. If different errors use
different losses, then `loss` is a list whose size is equal to the
number of errors.
metrics: List of metrics to be evaluated by the model during training.
decay: Tuple. Name and parameters of decay to the initial learning rate. One
of the following options:
- `inverse time decay <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/InverseTimeDecay>`_: ("inverse time", decay_steps, decay_rate)
- `cosine decay <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/CosineDecay>`_: ("cosine", decay_steps, alpha)
loss_weights: A list specifying scalar coefficients (Python floats) to
weight the loss contributions. The loss value that will be minimized by
the model will then be the weighted sum of all individual losses,
weighted by the loss_weights coefficients.
external_trainable_variables: A trainable ``tf.Variable`` object or a list
of trainable ``tf.Variable`` objects. The unknown parameters in the
physics systems that need to be recovered. If the backend is
tensorflow.compat.v1, `external_trainable_variables` is ignored, and all
trainable ``tf.Variable`` objects are automatically collected.
"""
print("Compiling model...")
self.opt_name = optimizer
loss_fn = losses_module.get(loss)
if external_trainable_variables is None:
self.external_trainable_variables = []
else:
if backend_name == "tensorflow.compat.v1":
print(
"Warning: For the backend tensorflow.compat.v1, "
"`external_trainable_variables` is ignored, and all trainable "
"``tf.Variable`` objects are automatically collected."
)
if not isinstance(external_trainable_variables, list):
external_trainable_variables = [external_trainable_variables]
self.external_trainable_variables = external_trainable_variables
if backend_name == "tensorflow.compat.v1":
self._compile_tensorflow_compat_v1(lr, loss_fn, decay, loss_weights)
elif backend_name == "tensorflow":
self._compile_tensorflow(lr, loss_fn, decay, loss_weights)
elif backend_name == "pytorch":
self._compile_pytorch(lr, loss_fn, decay, loss_weights)
# metrics may use model variables such as self.net, and thus are instantiated
# after backend compile.
metrics = metrics or []
self.metrics = [metrics_module.get(m) for m in metrics]
def _compile_tensorflow_compat_v1(self, lr, loss_fn, decay, loss_weights):
"""tensorflow.compat.v1"""
if not self.net.built:
self.net.build()
if self.sess is None:
self.sess = tf.Session()
self.saver = tf.train.Saver(max_to_keep=None)
# Data losses
losses = self.data.losses(self.net.targets, self.net.outputs, loss_fn, self)
if not isinstance(losses, list):
losses = [losses]
# Regularization loss
if self.net.regularizer is not None:
losses.append(tf.losses.get_regularization_loss())
losses = tf.convert_to_tensor(losses)
# Weighted losses
if loss_weights is not None:
losses *= loss_weights
self.losshistory.set_loss_weights(loss_weights)
total_loss = tf.math.reduce_sum(losses)
# Tensors
self.outputs = self.net.outputs
self.outputs_losses = [self.net.outputs, losses]
self.train_step = optimizers.get(
total_loss, self.opt_name, learning_rate=lr, decay=decay
)
def _compile_tensorflow(self, lr, loss_fn, decay, loss_weights):
"""tensorflow"""
# TODO: Avoid creating multiple graphs by using tf.TensorSpec.
@tf.function
def outputs(training, inputs):
return self.net(inputs, training=training)
# TODO: Avoid creating multiple graphs by using tf.TensorSpec.
@tf.function
def outputs_losses(training, inputs, targets, auxiliary_vars):
self.net.training = training
self.net.inputs = inputs
self.net.auxiliary_vars = auxiliary_vars
# Don't call outputs() decorated by @tf.function above, otherwise the
# gradient of outputs wrt inputs will be lost here.
outputs_ = self.net(inputs, training=training)
# Data losses
losses = self.data.losses(targets, outputs_, loss_fn, self)
if not isinstance(losses, list):
losses = [losses]
# Regularization loss
if self.net.regularizer is not None:
losses += [tf.math.reduce_sum(self.net.losses)]
losses = tf.convert_to_tensor(losses)
# Weighted losses
if loss_weights is not None:
losses *= loss_weights
self.losshistory.set_loss_weights(loss_weights)
return outputs_, losses
opt = optimizers.get(self.opt_name, learning_rate=lr, decay=decay)
@tf.function
def train_step(inputs, targets, auxiliary_vars):
# inputs and targets are np.ndarray and automatically converted to Tensor.
with tf.GradientTape() as tape:
losses = outputs_losses(True, inputs, targets, auxiliary_vars)[1]
total_loss = tf.math.reduce_sum(losses)
trainable_variables = (
self.net.trainable_variables + self.external_trainable_variables
)
grads = tape.gradient(total_loss, trainable_variables)
opt.apply_gradients(zip(grads, trainable_variables))
def train_step_tfp(
inputs, targets, auxiliary_vars, previous_optimizer_results=None
):
def build_loss():
losses = outputs_losses(True, inputs, targets, auxiliary_vars)[1]
return tf.math.reduce_sum(losses)
trainable_variables = (
self.net.trainable_variables + self.external_trainable_variables
)
return opt(trainable_variables, build_loss, previous_optimizer_results)
# Callables
self.outputs = outputs
self.outputs_losses = outputs_losses
self.train_step = (
train_step
if not optimizers.is_external_optimizer(self.opt_name)
else train_step_tfp
)
def _compile_pytorch(self, lr, loss_fn, decay, loss_weights):
"""pytorch"""
def outputs(training, inputs):
self.net.train(mode=training)
with torch.no_grad():
return self.net(torch.as_tensor(inputs))
def outputs_losses(training, inputs, targets):
self.net.train(mode=training)
self.net.inputs = torch.as_tensor(inputs)
self.net.inputs.requires_grad_()
outputs_ = self.net(self.net.inputs)
# Data losses
if targets is not None:
targets = torch.as_tensor(targets)
losses = self.data.losses(targets, outputs_, loss_fn, self)
if not isinstance(losses, list):
losses = [losses]
# TODO: regularization
losses = torch.stack(losses)
# Weighted losses
if loss_weights is not None:
losses *= torch.as_tensor(loss_weights)
self.losshistory.set_loss_weights(loss_weights)
# Clear cached Jacobians and Hessians.
grad.clear()
return outputs_, losses
# Another way is using per-parameter options
# https://pytorch.org/docs/stable/optim.html#per-parameter-options,
# but not all optimizers (such as L-BFGS) support this.
trainable_variables = (
list(self.net.parameters()) + self.external_trainable_variables
)
self.opt = optimizers.get(
trainable_variables, self.opt_name, learning_rate=lr, decay=decay
)
def train_step(inputs, targets):
def closure():
losses = outputs_losses(True, inputs, targets)[1]
total_loss = torch.sum(losses)
self.opt.zero_grad()
total_loss.backward()
return total_loss
self.opt.step(closure)
# Callables
self.outputs = outputs
self.outputs_losses = outputs_losses
self.train_step = train_step
def _outputs(self, training, inputs):
if backend_name == "tensorflow.compat.v1":
feed_dict = self.net.feed_dict(training, inputs)
return self.sess.run(self.outputs, feed_dict=feed_dict)
# tensorflow and pytorch
outs = self.outputs(training, inputs)
return utils.to_numpy(outs)
def _outputs_losses(self, training, inputs, targets, auxiliary_vars):
if backend_name == "tensorflow.compat.v1":
feed_dict = self.net.feed_dict(training, inputs, targets, auxiliary_vars)
return self.sess.run(self.outputs_losses, feed_dict=feed_dict)
if backend_name == "tensorflow":
outs = self.outputs_losses(training, inputs, targets, auxiliary_vars)
elif backend_name == "pytorch":
# TODO: auxiliary_vars
self.net.requires_grad_(requires_grad=False)
outs = self.outputs_losses(training, inputs, targets)
self.net.requires_grad_()
return utils.to_numpy(outs)
def _train_step(self, inputs, targets, auxiliary_vars):
if backend_name == "tensorflow.compat.v1":
feed_dict = self.net.feed_dict(True, inputs, targets, auxiliary_vars)
self.sess.run(self.train_step, feed_dict=feed_dict)
elif backend_name == "tensorflow":
self.train_step(inputs, targets, auxiliary_vars)
elif backend_name == "pytorch":
# TODO: auxiliary_vars
self.train_step(inputs, targets)
@utils.timing
def train(
self,
epochs=None,
batch_size=None,
display_every=1000,
disregard_previous_best=False,
callbacks=None,
model_restore_path=None,
model_save_path=None,
):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Args:
epochs: Integer. Number of iterations to train the model. Note: It is the
number of iterations, not the number of epochs.
batch_size: Integer or ``None``. If you solve PDEs via ``dde.data.PDE`` or
``dde.data.TimePDE``, do not use `batch_size`, and instead use
`dde.callbacks.PDEResidualResampler
<https://deepxde.readthedocs.io/en/latest/modules/deepxde.html#deepxde.callbacks.PDEResidualResampler>`_,
see an `example <https://github.com/lululxvi/deepxde/blob/master/examples/diffusion_1d_resample.py>`_.
display_every: Integer. Print the loss and metrics every this steps.
disregard_previous_best: If ``True``, disregard the previous saved best
model.
callbacks: List of ``dde.callbacks.Callback`` instances. List of callbacks
to apply during training.
model_restore_path: String. Path where parameters were previously saved.
See ``save_path`` in `tf.train.Saver.restore <https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/Saver#restore>`_.
model_save_path: String. Prefix of filenames created for the checkpoint.
See ``save_path`` in `tf.train.Saver.save <https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/Saver#save>`_.
"""
self.batch_size = batch_size
self.callbacks = CallbackList(callbacks=callbacks)
self.callbacks.set_model(self)
if disregard_previous_best:
self.train_state.disregard_best()
if backend_name == "tensorflow.compat.v1":
if self.train_state.step == 0:
print("Initializing variables...")
self.sess.run(tf.global_variables_initializer())
else:
utils.guarantee_initialized_variables(self.sess)
if model_restore_path is not None:
self.restore(model_restore_path, verbose=1)
print("Training model...\n")
self.stop_training = False
self.train_state.set_data_train(*self.data.train_next_batch(self.batch_size))
self.train_state.set_data_test(*self.data.test())
self._test()
self.callbacks.on_train_begin()
if optimizers.is_external_optimizer(self.opt_name):
if backend_name == "tensorflow.compat.v1":
self._train_tensorflow_compat_v1_scipy(display_every)
elif backend_name == "tensorflow":
self._train_tensorflow_tfp()
elif backend_name == "pytorch":
self._train_pytorch_lbfgs()
else:
if epochs is None:
raise ValueError("No epochs for {}.".format(self.opt_name))
self._train_sgd(epochs, display_every)
self.callbacks.on_train_end()
print("")
display.training_display.summary(self.train_state)
if model_save_path is not None:
self.save(model_save_path, verbose=1)
return self.losshistory, self.train_state
def _train_sgd(self, epochs, display_every):
for i in range(epochs):
self.callbacks.on_epoch_begin()
self.callbacks.on_batch_begin()
self.train_state.set_data_train(
*self.data.train_next_batch(self.batch_size)
)
self._train_step(
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
self.train_state.epoch += 1
self.train_state.step += 1
if self.train_state.step % display_every == 0 or i + 1 == epochs:
self._test()
self.callbacks.on_batch_end()
self.callbacks.on_epoch_end()
if self.stop_training:
break
def _train_tensorflow_compat_v1_scipy(self, display_every):
def loss_callback(loss_train):
self.train_state.epoch += 1
self.train_state.step += 1
if self.train_state.step % display_every == 0:
self.train_state.loss_train = loss_train
self.train_state.loss_test = None
self.train_state.metrics_test = None
self.losshistory.append(
self.train_state.step, self.train_state.loss_train, None, None
)
display.training_display(self.train_state)
self.train_state.set_data_train(*self.data.train_next_batch(self.batch_size))
feed_dict = self.net.feed_dict(
True,
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
self.train_step.minimize(
self.sess,
feed_dict=feed_dict,
fetches=[self.outputs_losses[1]],
loss_callback=loss_callback,
)
self._test()
def _train_tensorflow_tfp(self):
# There is only one optimization step. If using multiple steps with/without
# previous_optimizer_results, L-BFGS failed to reach a small error. The reason
# could be that tfp.optimizer.lbfgs_minimize will start from scratch for each
# call.
n_iter = 0
while n_iter < optimizers.LBFGS_options["maxiter"]:
self.train_state.set_data_train(
*self.data.train_next_batch(self.batch_size)
)
results = self.train_step(
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
n_iter += results.num_iterations.numpy()
self.train_state.epoch += results.num_iterations.numpy()
self.train_state.step += results.num_iterations.numpy()
self._test()
if results.converged or results.failed:
break
def _train_pytorch_lbfgs(self):
prev_n_iter = 0
while prev_n_iter < optimizers.LBFGS_options["maxiter"]:
self.callbacks.on_epoch_begin()
self.callbacks.on_batch_begin()
self.train_state.set_data_train(
*self.data.train_next_batch(self.batch_size)
)
self._train_step(
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
n_iter = self.opt.state_dict()["state"][0]["n_iter"]
if prev_n_iter == n_iter:
# Converged
break
self.train_state.epoch += n_iter - prev_n_iter
self.train_state.step += n_iter - prev_n_iter
prev_n_iter = n_iter
self._test()
self.callbacks.on_batch_end()
self.callbacks.on_epoch_end()
if self.stop_training:
break
def _test(self):
(
self.train_state.y_pred_train,
self.train_state.loss_train,
) = self._outputs_losses(
True,
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
self.train_state.y_pred_test, self.train_state.loss_test = self._outputs_losses(
False,
self.train_state.X_test,
self.train_state.y_test,
self.train_state.test_aux_vars,
)
if isinstance(self.train_state.y_test, (list, tuple)):
self.train_state.metrics_test = [
m(self.train_state.y_test[i], self.train_state.y_pred_test[i])
for m in self.metrics
for i in range(len(self.train_state.y_test))
]
else:
self.train_state.metrics_test = [
m(self.train_state.y_test, self.train_state.y_pred_test)
for m in self.metrics
]
self.train_state.update_best()
self.losshistory.append(
self.train_state.step,
self.train_state.loss_train,
self.train_state.loss_test,
self.train_state.metrics_test,
)
display.training_display(self.train_state)
def predict(self, x, operator=None, callbacks=None):
"""Generates output predictions for the input samples."""
if isinstance(x, tuple):
x = tuple(np.array(xi, dtype=config.real(np)) for xi in x)
else:
x = np.array(x, dtype=config.real(np))
self.callbacks = CallbackList(callbacks=callbacks)
self.callbacks.set_model(self)
self.callbacks.on_predict_begin()
if operator is None:
y = self._outputs(False, x)
else:
# TODO: predict operator with auxiliary_vars
if backend_name == "tensorflow.compat.v1":
if utils.get_num_args(operator) == 2:
op = operator(self.net.inputs, self.net.outputs)
elif utils.get_num_args(operator) == 3:
op = operator(self.net.inputs, self.net.outputs, x)
y = self.sess.run(op, feed_dict=self.net.feed_dict(False, x))
elif backend_name == "tensorflow":
if utils.get_num_args(operator) == 2:
@tf.function
def op(inputs):
y = self.net(inputs)
return operator(inputs, y)
elif utils.get_num_args(operator) == 3:
@tf.function
def op(inputs):
y = self.net(inputs)
return operator(inputs, y, x)
y = op(x)
y = utils.to_numpy(y)
elif backend_name == "pytorch":
inputs = torch.as_tensor(x)
inputs.requires_grad_()
outputs = self.net(inputs)
if utils.get_num_args(operator) == 2:
y = operator(inputs, outputs)
elif utils.get_num_args(operator) == 3:
y = operator(inputs, outputs, x)
y = utils.to_numpy(y)
self.callbacks.on_predict_end()
return y
# def evaluate(self, x, y, callbacks=None):
# """Returns the loss values & metrics values for the model in test mode."""
# raise NotImplementedError(
# "Model.evaluate to be implemented. Alternatively, use Model.predict."
# )
def state_dict(self):
"""Returns a dictionary containing all variables."""
# TODO: backend tensorflow, pytorch
if backend_name != "tensorflow.compat.v1":
raise NotImplementedError(
"state_dict hasn't been implemented for this backend."
)
destination = OrderedDict()
variables_names = [v.name for v in tf.global_variables()]
values = self.sess.run(variables_names)
for k, v in zip(variables_names, values):
destination[k] = v
return destination
def save(self, save_path, protocol="tf.train.Saver", verbose=0):
"""Saves all variables to a disk file.
Args:
protocol (string): If `protocol` is "tf.train.Saver", save using
`tf.train.Save <https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/Saver#attributes>`_.
If `protocol` is "pickle", save using the Python pickle module. Only
"tf.train.Saver" protocol supports ``restore()``.
"""
# TODO: backend tensorflow, pytorch
if backend_name != "tensorflow.compat.v1":
raise NotImplementedError(
"state_dict hasn't been implemented for this backend."
)
if verbose > 0:
print(
"Epoch {}: saving model to {}-{} ...\n".format(
self.train_state.epoch, save_path, self.train_state.epoch
)
)
if protocol == "tf.train.Saver":
self.saver.save(self.sess, save_path, global_step=self.train_state.epoch)
elif protocol == "pickle":
with open("{}-{}.pkl".format(save_path, self.train_state.epoch), "wb") as f:
pickle.dump(self.state_dict(), f)
def restore(self, save_path, verbose=0):
"""Restore all variables from a disk file."""
# TODO: backend tensorflow, pytorch
if backend_name != "tensorflow.compat.v1":
raise NotImplementedError(
"state_dict hasn't been implemented for this backend."
)
if verbose > 0:
print("Restoring model from {} ...\n".format(save_path))
self.saver.restore(self.sess, save_path)
def print_model(self):
"""Prints all trainable variables."""
# TODO: backend tensorflow, pytorch
if backend_name != "tensorflow.compat.v1":
raise NotImplementedError(
"state_dict hasn't been implemented for this backend."
)
variables_names = [v.name for v in tf.trainable_variables()]
values = self.sess.run(variables_names)
for k, v in zip(variables_names, values):
print("Variable: {}, Shape: {}".format(k, v.shape))
print(v) |
get_method_signatures_test.go | // +build unit
package contracts
import (
"context"
"fmt"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/consensys/orchestrate/pkg/errors"
"github.com/consensys/orchestrate/pkg/types/testutils"
mocks2 "github.com/consensys/orchestrate/services/api/business/use-cases/mocks"
)
func TestGetMethodSignatures_Execute(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
ctx := context.Background()
mockGetContractUC := mocks2.NewMockGetContractUseCase(ctrl)
usecase := NewGetMethodSignaturesUseCase(mockGetContractUC)
t.Run("should execute use case successfully", func(t *testing.T) {
contract := testutils.FakeContract()
mockGetContractUC.EXPECT().Execute(gomock.Any(), contract.Name, contract.Tag).Return(contract, nil)
signatures, err := usecase.Execute(ctx, contract.Name, contract.Tag, "transfer")
assert.NoError(t, err)
assert.Equal(t, signatures[0], "transfer(address,uint256)")
})
t.Run("should execute use case successfully if method name is constructor", func(t *testing.T) {
contract := testutils.FakeContract()
mockGetContractUC.EXPECT().Execute(gomock.Any(), contract.Name, contract.Tag).Return(contract, nil)
signatures, err := usecase.Execute(ctx, contract.Name, contract.Tag, constructorMethodName)
assert.NoError(t, err)
assert.Equal(t, signatures[0], "constructor")
})
t.Run("should execute use case successfully and return an empty array if nothing is found", func(t *testing.T) {
contract := testutils.FakeContract()
mockGetContractUC.EXPECT().Execute(gomock.Any(), contract.Name, contract.Tag).Return(contract, nil)
signatures, err := usecase.Execute(ctx, contract.Name, contract.Tag, "inexistentMethod")
assert.NoError(t, err)
assert.Empty(t, signatures)
})
t.Run("should fail with same error if get contract fails", func(t *testing.T) {
contract := testutils.FakeContract()
expectedErr := fmt.Errorf("error")
mockGetContractUC.EXPECT().Execute(gomock.Any(), contract.Name, contract.Tag).Return(nil, expectedErr)
signatures, err := usecase.Execute(ctx, contract.Name, contract.Tag, constructorMethodName)
assert.Nil(t, signatures)
assert.Equal(t, errors.FromError(expectedErr).ExtendComponent(getMethodSignaturesComponent), err)
}) | contract.ABI = "wrongABI"
mockGetContractUC.EXPECT().Execute(gomock.Any(), contract.Name, contract.Tag).Return(contract, nil)
signatures, err := usecase.Execute(ctx, contract.Name, contract.Tag, constructorMethodName)
assert.Nil(t, signatures)
assert.True(t, errors.IsDataCorruptedError(err))
})
} |
t.Run("should fail with DataCorruptedError if fails to get the ABI", func(t *testing.T) {
contract := testutils.FakeContract() |
decisions.py | import rospy
from humanoid_league_msgs.msg import GameState, RobotControlState
from dynamic_stack_decider.abstract_decision_element import AbstractDecisionElement
class CheckFallen(AbstractDecisionElement):
"""
Checks if robot is fallen
"""
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state == RobotControlState.FALLEN:
return "FALLEN"
return "NOT_FALLEN"
def get_reevaluate(self):
return True
class CheckFalling(AbstractDecisionElement):
"""
Checks if robot is falling
"""
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state == RobotControlState.FALLING:
return "FALLING"
return "NOT_FALLING"
def get_reevaluate(self):
return True
class CheckGettingUp(AbstractDecisionElement):
"""
Checks if robot is getting up
"""
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state == RobotControlState.GETTING_UP:
return "GETTING_UP"
return "NOT_GETTING_UP"
def get_reevaluate(self):
return True
class CheckPickup(AbstractDecisionElement):
"""
Checks if robot is picked up
"""
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state == RobotControlState.PICKED_UP:
self.blackboard.last_state_pickup = True
return "UP"
else:
if self.blackboard.last_state_pickup:
self.blackboard.last_state_pickup = False
return "JUST_DOWN"
return "DOWN"
def get_reevaluate(self):
return True
class GettingUpState(AbstractDecisionElement):
"""
Checks if the robot falls, stands up or is freshly standing
"""
def __init__(self, blackboard, dsd, parameters=None):
super(GettingUpState, self).__init__(blackboard, dsd, parameters)
self.get_up_states = [
RobotControlState.FALLING,
RobotControlState.FALLEN,
RobotControlState.GETTING_UP]
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state in self.get_up_states:
self.blackboard.last_state_get_up = True
return "YES"
else:
if self.blackboard.last_state_get_up:
self.blackboard.last_state_get_up = False
return "GOTUP"
return "NO"
def get_reevaluate(self):
return True
class CheckGameStateReceived(AbstractDecisionElement):
"""
Checks if gamestate from gamecontroller is received.
"""
def perform(self, reevaluate=False):
self.clear_debug_data()
if not self.blackboard.game_state_received:
if not self.blackboard.initialized:
self.blackboard.initialized = True
return "NO_GAMESTATE_INIT"
else:
return "DO_NOTHING"
return "GAMESTATE_RECEIVED"
def get_reevaluate(self):
return True
class CheckGameState(AbstractDecisionElement):
"""
Checks which game state we are in
"""
|
if self.blackboard.penalized:
return "PENALTY"
elif self.blackboard.game_state == 0:
return "INIT"
elif self.blackboard.game_state == 2:
return "SET"
elif self.blackboard.game_state == 3:
return "PLAYING"
return "NO_INFORMATION"
def get_reevaluate(self):
return True | def perform(self, reevaluate=False):
self.clear_debug_data() |
touch_event.rs | // This file is auto-generated by rute_gen. DO NOT EDIT.
use std::cell::Cell;
use std::rc::Rc;
#[allow(unused_imports)]
use std::marker::PhantomData;
#[allow(unused_imports)]
use std::os::raw::c_void;
#[allow(unused_imports)]
use std::mem::transmute;
#[allow(unused_imports)]
use std::ffi::{CStr, CString};
use rute_ffi_base::*;
#[allow(unused_imports)]
use auto::*;
/// **Notice these docs are heavy WIP and not very relevent yet**
///
/// # Enabling Touch Events
///
/// Touch events occur when pressing, releasing, or moving one or more touch points on a touch
/// device (such as a touch-screen or track-pad). To receive touch events, widgets have to have the
/// Qt::WA_AcceptTouchEvents attribute set and graphics items need to have the
/// [acceptTouchEvents](QGraphicsItem::setAcceptTouchEvents())
/// attribute set to true.
///
/// When using QAbstractScrollArea based widgets, you should enable the Qt::WA_AcceptTouchEvents
/// attribute on the scroll area's [viewport](QAbstractScrollArea::viewport())
///
///
/// Similarly to QMouseEvent, Qt automatically grabs each touch point on the first press inside a
/// widget, and the widget will receive all updates for the touch point until it is released.
/// Note that it is possible for a widget to receive events for numerous touch points, and that
/// multiple widgets may be receiving touch events at the same time.
///
/// # Event Handling
///
/// All touch events are of type QEvent::TouchBegin, QEvent::TouchUpdate, QEvent::TouchEnd or
/// QEvent::TouchCancel. Reimplement QWidget::event() or QAbstractScrollArea::viewportEvent() for
/// widgets and QGraphicsItem::sceneEvent() for items in a graphics view to receive touch events.
///
/// Unlike widgets, QWindows receive touch events always, there is no need to opt in. When working
/// directly with a QWindow, it is enough to reimplement QWindow::touchEvent().
///
/// The QEvent::TouchUpdate and QEvent::TouchEnd events are sent to the widget or item that
/// accepted the QEvent::TouchBegin event. If the QEvent::TouchBegin event is not accepted and not
/// filtered by an event filter, then no further touch events are sent until the next
/// QEvent::TouchBegin.
///
/// Some systems may send an event of type QEvent::TouchCancel. Upon receiving this event
/// applications are requested to ignore the entire active touch sequence. For example in a
/// composited system the compositor may decide to treat certain gestures as system-wide
/// gestures. Whenever such a decision is made (the gesture is recognized), the clients will be
/// notified with a QEvent::TouchCancel event so they can update their state accordingly.
///
/// The touchPoints() function returns a list of all touch points contained in the event. Note that
/// this list may be empty, for example in case of a QEvent::TouchCancel event. Information about
/// each touch point can be retrieved using the QTouchEvent::TouchPoint class. The
/// Qt::TouchPointState enum describes the different states that a touch point may have.
///
/// **Note**: The list of touchPoints() will never be partial: A touch event will always contain a touch
/// point for each existing physical touch contacts targetting the window or widget to which the
/// event is sent. For instance, assuming that all touches target the same window or widget, an
/// event with a condition of touchPoints().count()==2 is guaranteed to imply that the number of
/// fingers touching the touchscreen or touchpad is exactly two.
///
/// # Event Delivery and Propagation
///
/// By default, QGuiApplication translates the first touch point in a QTouchEvent into
/// a QMouseEvent. This makes it possible to enable touch events on existing widgets that do not
/// normally handle QTouchEvent. See below for information on some special considerations needed
/// when doing this.
///
/// QEvent::TouchBegin is the first touch event sent to a widget. The QEvent::TouchBegin event
/// contains a special accept flag that indicates whether the receiver wants the event. By default,
/// the event is accepted. You should call ignore() if the touch event is not handled by your
/// widget. The QEvent::TouchBegin event is propagated up the parent widget chain until a widget
/// accepts it with accept(), or an event filter consumes it. For QGraphicsItems, the
/// QEvent::TouchBegin event is propagated to items under the mouse (similar to mouse event
/// propagation for QGraphicsItems).
///
/// # Touch Point Grouping
///
/// As mentioned above, it is possible that several widgets can be receiving QTouchEvents at the
/// same time. However, Qt makes sure to never send duplicate QEvent::TouchBegin events to the same
/// widget, which could theoretically happen during propagation if, for example, the user touched 2
/// separate widgets in a QGroupBox and both widgets ignored the QEvent::TouchBegin event.
///
/// To avoid this, Qt will group new touch points together using the following rules:
///
/// * When the first touch point is detected, the destination widget is determined firstly by the location on screen and secondly by the propagation rules.
/// * When additional touch points are detected, Qt first looks to see if there are any active touch points on any ancestor or descendent of the widget under the new touch point. If there are, the new touch point is grouped with the first, and the new touch point will be sent in a single QTouchEvent to the widget that handled the first touch point. (The widget under the new touch point will not receive an event).
///
/// This makes it possible for sibling widgets to handle touch events independently while making
/// sure that the sequence of QTouchEvents is always correct.
///
/// # Mouse Events and Touch Event Synthesizing
///
/// QTouchEvent delivery is independent from that of QMouseEvent. The application flags
/// Qt::AA_SynthesizeTouchForUnhandledMouseEvents and Qt::AA_SynthesizeMouseForUnhandledTouchEvents
/// can be used to enable or disable automatic synthesizing of touch events to mouse events and
/// mouse events to touch events.
///
/// # Caveats
///
/// * As mentioned above, enabling touch events means multiple widgets can be receiving touch events simultaneously. Combined with the default QWidget::event() handling for QTouchEvents, this gives you great flexibility in designing touch user interfaces. Be aware of the implications. For example, it is possible that the user is moving a QSlider with one finger and pressing a QPushButton with another. The signals emitted by these widgets will be interleaved.
/// * Recursion into the event loop using one of the exec() methods (e.g., QDialog::exec() or QMenu::exec()) in a QTouchEvent event handler is not supported. Since there are multiple event recipients, recursion may cause problems, including but not limited to lost events and unexpected infinite recursion.
/// * QTouchEvents are not affected by a [mouse grab](QWidget::grabMouse())
/// or an [active pop-up widget](QApplication::activePopupWidget())
/// . The behavior of QTouchEvents is undefined when opening a pop-up or grabbing the mouse while there are more than one active touch points.
///
/// **See also:** [`TouchEvent::touch_point()`]
/// [`t::touch_point_state()`]
/// [`t::wa_accept_touch_events()`]
/// [`GraphicsItem::accept_touch_events`]
/// # Licence
///
/// The documentation is an adoption of the original [Qt Documentation](http://doc.qt.io/) and provided herein is licensed under the terms of the [GNU Free Documentation License version 1.3](http://www.gnu.org/licenses/fdl.html) as published by the Free Software Foundation.
#[derive(Clone)]
pub struct TouchEvent<'a> {
#[doc(hidden)]
pub data: Rc<Cell<Option<*const RUBase>>>,
#[doc(hidden)]
pub all_funcs: *const RUTouchEventAllFuncs,
#[doc(hidden)]
pub owned: bool,
#[doc(hidden)]
pub _marker: PhantomData<::std::cell::Cell<&'a ()>>,
}
impl<'a> TouchEvent<'a> {
#[allow(dead_code)]
pub(crate) fn new_from_rc(ffi_data: RUTouchEvent) -> TouchEvent<'a> {
TouchEvent {
data: unsafe { Rc::from_raw(ffi_data.host_data as *const Cell<Option<*const RUBase>>) },
all_funcs: ffi_data.all_funcs,
owned: false,
_marker: PhantomData,
}
}
#[allow(dead_code)]
pub(crate) fn new_from_owned(ffi_data: RUTouchEvent) -> TouchEvent<'a> {
TouchEvent {
data: Rc::new(Cell::new(Some(ffi_data.qt_data as *const RUBase))),
all_funcs: ffi_data.all_funcs,
owned: true,
_marker: PhantomData,
}
}
#[allow(dead_code)]
pub(crate) fn new_from_temporary(ffi_data: RUTouchEvent) -> TouchEvent<'a> {
TouchEvent {
data: Rc::new(Cell::new(Some(ffi_data.qt_data as *const RUBase))),
all_funcs: ffi_data.all_funcs,
owned: false,
_marker: PhantomData,
}
}
///
/// Returns the window on which the event occurred. Useful for doing
/// global-local mapping on data like rawScreenPositions() which,
/// for performance reasons, only stores the global positions in the
/// touch event.
pub fn window(&self) -> Option<Window> {
let (obj_data, funcs) = self.get_touch_event_obj_funcs();
unsafe {
let ret_val = ((*funcs).window)(obj_data);
if ret_val.qt_data == ::std::ptr::null() {
return None;
}
let t = ret_val;
let ret_val;
if t.host_data != ::std::ptr::null() {
ret_val = Window::new_from_rc(t);
} else {
ret_val = Window::new_from_owned(t);
}
Some(ret_val)
}
}
///
/// Returns the target object within the window on which the event occurred.
/// This is typically a QWidget or a QQuickItem. May be 0 when no specific target is available.
pub fn target(&self) -> Option<Object> {
let (obj_data, funcs) = self.get_touch_event_obj_funcs();
unsafe {
let ret_val = ((*funcs).target)(obj_data);
if ret_val.qt_data == ::std::ptr::null() {
return None;
}
let t = ret_val;
let ret_val;
if t.host_data != ::std::ptr::null() {
ret_val = Object::new_from_rc(t);
} else {
ret_val = Object::new_from_owned(t);
}
Some(ret_val)
}
}
///
/// Returns a bitwise OR of all the touch point states for this event.
pub fn touch_point_states(&self) -> TouchPointStates {
let (obj_data, funcs) = self.get_touch_event_obj_funcs();
unsafe {
let ret_val = ((*funcs).touch_point_states)(obj_data);
let ret_val = TouchPointStates::from_bits_truncate(ret_val);
ret_val
}
}
#[doc(hidden)]
pub fn modifiers(&self) -> KeyboardModifiers |
#[doc(hidden)]
pub fn set_modifiers(&self, amodifiers: KeyboardModifiers) -> &Self {
let enum_amodifiers_1 = amodifiers.bits();
let (obj_data, funcs) = self.get_input_event_obj_funcs();
unsafe {
((*funcs).set_modifiers)(obj_data, enum_amodifiers_1);
}
self
}
#[doc(hidden)]
pub fn timestamp(&self) -> u64 {
let (obj_data, funcs) = self.get_input_event_obj_funcs();
unsafe {
let ret_val = ((*funcs).timestamp)(obj_data);
ret_val
}
}
#[doc(hidden)]
pub fn set_timestamp(&self, atimestamp: u64) -> &Self {
let (obj_data, funcs) = self.get_input_event_obj_funcs();
unsafe {
((*funcs).set_timestamp)(obj_data, atimestamp);
}
self
}
#[doc(hidden)]
pub fn spontaneous(&self) -> bool {
let (obj_data, funcs) = self.get_event_obj_funcs();
unsafe {
let ret_val = ((*funcs).spontaneous)(obj_data);
ret_val
}
}
#[doc(hidden)]
pub fn set_accepted(&self, accepted: bool) -> &Self {
let (obj_data, funcs) = self.get_event_obj_funcs();
unsafe {
((*funcs).set_accepted)(obj_data, accepted);
}
self
}
#[doc(hidden)]
pub fn is_accepted(&self) -> bool {
let (obj_data, funcs) = self.get_event_obj_funcs();
unsafe {
let ret_val = ((*funcs).is_accepted)(obj_data);
ret_val
}
}
#[doc(hidden)]
pub fn accept(&self) -> &Self {
let (obj_data, funcs) = self.get_event_obj_funcs();
unsafe {
((*funcs).accept)(obj_data);
}
self
}
#[doc(hidden)]
pub fn ignore(&self) -> &Self {
let (obj_data, funcs) = self.get_event_obj_funcs();
unsafe {
((*funcs).ignore)(obj_data);
}
self
}
pub fn build(&self) -> Self {
self.clone()
}
}
pub trait TouchEventTrait<'a> {
#[inline]
#[doc(hidden)]
fn get_touch_event_obj_funcs(&self) -> (*const RUBase, *const RUTouchEventFuncs);
}
impl<'a> EventTrait<'a> for TouchEvent<'a> {
#[doc(hidden)]
fn get_event_obj_funcs(&self) -> (*const RUBase, *const RUEventFuncs) {
let obj = self.data.get().unwrap();
unsafe { (obj, (*self.all_funcs).event_funcs) }
}
}
impl<'a> InputEventTrait<'a> for TouchEvent<'a> {
#[doc(hidden)]
fn get_input_event_obj_funcs(&self) -> (*const RUBase, *const RUInputEventFuncs) {
let obj = self.data.get().unwrap();
unsafe { (obj, (*self.all_funcs).input_event_funcs) }
}
}
impl<'a> TouchEventTrait<'a> for TouchEvent<'a> {
#[doc(hidden)]
fn get_touch_event_obj_funcs(&self) -> (*const RUBase, *const RUTouchEventFuncs) {
let obj = self.data.get().unwrap();
unsafe { (obj, (*self.all_funcs).touch_event_funcs) }
}
}
| {
let (obj_data, funcs) = self.get_input_event_obj_funcs();
unsafe {
let ret_val = ((*funcs).modifiers)(obj_data);
let ret_val = KeyboardModifiers::from_bits_truncate(ret_val);
ret_val
}
} |
cycle_period_length_analysis.py | #!/usr/bin/python
# Imports
import sys, os, re, time
import argparse
import pdb
import pickle
from itertools import *
# Science
import numpy as np
import scipy.stats as stats
import pandas as pd
# Plotting
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
################################## FUNCTIONS ############################
# Population time-series
def population_time_series_embedding_lengths(cycle_stats_df, attribute, cutoff_criteria, cutoff, sample_style, save_dir):
'''
Function that plots a population level time series embedding of cycle and period lengths
In plot:
x axis is length_attribute for cycle 1,
y axis is length attribute for cycle 2,
z is for cycle 3
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
sample_style: whether to pick 3 consecutive 'random' or 'first' cycles per-user
save_dir: path where to save plot
Output:
None
'''
#get users with color by attribute > cutoff, and <= cutoff
cycle_stats_df_greater_than = cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]
cycle_stats_df_less_than = cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]
cycle_lengths_greater_than = cycle_stats_df_greater_than[attribute]
cycle_lengths_less_than = cycle_stats_df_less_than[attribute]
# Filename
if sample_style == 'first':
filename = '{}/population_time_series_embedding_for_{}_split_by_{}_{}_first_3.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)
if sample_style == 'random':
filename = '{}/population_time_series_embedding_for_{}_split_by_{}_{}_sample_3.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)
# Plot
colors = ['orange', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for index, cycle_lengths in enumerate([cycle_lengths_greater_than, cycle_lengths_less_than]):
print('Start selecting cycles for one group')
if sample_style=='first':
sample_cycle_lengths = [cycle_length[:3] for cycle_length in cycle_lengths if len(cycle_length) >= 3]
if sample_style=='random':
sample_cycle_lengths = []
for cycle_length in cycle_lengths:
if len(cycle_length) >= 3:
num_cycles_array = np.linspace(0, len(cycle_length)-3, len(cycle_length)-2)
start_index = np.random.choice(num_cycles_array, size=1).astype(int)[0]
sample_cycle_lengths.append(cycle_length[start_index:start_index+3])
print('Finished selecting cycles for one group')
print('Start plotting one group')
for i in range(len(sample_cycle_lengths)):
xs = sample_cycle_lengths[i][0]
ys = sample_cycle_lengths[i][1]
zs = sample_cycle_lengths[i][2]
# Plot this point
ax.scatter(xs, ys, zs, color = colors[index], s=1, alpha=0.3)
print('Finished plotting one group')
ax.set_xlabel(attribute+ '[i]')
ax.set_ylabel(attribute+ '[i+1]')
ax.set_zlabel(attribute+ '[i+2]')
if attribute == 'cycle_lengths':
#ref_line_points = np.linspace(10, 90, 10)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(10,90)
ax.set_ylim3d(10,90)
ax.set_zlim3d(10,90)
elif attribute == 'period_lengths':
max_period_days=28
#ref_line_points = np.linspace(1, max_period_days, 4)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(1,max_period_days)
ax.set_ylim3d(1,max_period_days)
ax.set_zlim3d(1,max_period_days)
ax.set_xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_yticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_zticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.savefig(filename.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
# With angles
for angle in [30, 60, 90, 180]:
print('Start one view')
filename_angle = filename[:-4]+'_'+str(angle)+'.pdf'
ax.view_init(elev=None, azim=angle)
# Add (a)/(b) labels for paper
ax.text2D(12, 7,'(a)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(filename_angle.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
plt.close()
# Time series embedding for a randomly chosen user
def random_time_series_embedding_lengths(cycle_stats_df, attribute, cutoff_criteria, cutoff, save_dir):
'''
Function that plots a time series embedding of cycle and period lengths for a randomly chosen user per group
In plot:
x axis is length_attribute for cycle i,
y axis is length attribute for cycle i+1,
z is for cycle i+2
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
save_dir: path where to save plot
Output:
None
'''
# Select users with median number of cycles tracked
cycle_stats_df_median = cycle_stats_df[cycle_stats_df['num_cycles_tracked'] == 11]
filename = '{}/random_time_series_embedding_for_{}_split_by_{}_{}.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)
#get users with color by attribute > cutoff, and <= cutoff
cycle_stats_df_greater_than = cycle_stats_df_median[cycle_stats_df_median[cutoff_criteria] > cutoff]
cycle_stats_df_less_than = cycle_stats_df_median[cycle_stats_df_median[cutoff_criteria] <= cutoff]
cycle_lengths_greater_than = cycle_stats_df_greater_than[attribute]
cycle_lengths_less_than = cycle_stats_df_less_than[attribute]
# Randomly pick a user from each group
cycle_lengths_greater_than_user = np.random.choice(cycle_lengths_greater_than, size=1, replace=False)
cycle_lengths_less_than_user = np.random.choice(cycle_lengths_less_than, size=1, replace=False)
# Plot
colors = ['orange', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#plot each user, color by median intercycle length
xs = list(cycle_lengths_greater_than_user[0][0:-2])
ys = list(cycle_lengths_greater_than_user[0][1:-1])
zs = list(cycle_lengths_greater_than_user[0][2:])
ax.scatter(xs, ys, zs, color = 'orange')
ax.plot(xs, ys, zs, color='orange', linestyle='dashed', alpha=0.8)
xs = list(cycle_lengths_less_than_user[0][0:-2])
ys = list(cycle_lengths_less_than_user[0][1:-1])
zs = list(cycle_lengths_less_than_user[0][2:])
ax.scatter(xs, ys, zs, color = 'c')
ax.plot(xs, ys, zs, color='c', linestyle='dashed', alpha=0.8)
ax.set_xlabel(attribute+ '[i]')
ax.set_ylabel(attribute+ '[i+1]')
ax.set_zlabel(attribute+ '[i+2]')
if attribute == 'cycle_lengths':
#ref_line_points = np.linspace(10, 90, 10)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(10,90)
ax.set_ylim3d(10,90)
ax.set_zlim3d(10,90)
elif attribute == 'period_lengths':
max_period_days=28
#ref_line_points = np.linspace(1, max_period_days, 4)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(1,max_period_days)
ax.set_ylim3d(1,max_period_days)
ax.set_zlim3d(1,max_period_days)
ax.set_xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_yticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_zticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.savefig(filename.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
# With angles
for angle in [30, 60, 90, 180]:
print('Start one view')
filename_angle = filename[:-4]+'_'+str(angle)+'.pdf'
ax.view_init(elev=None, azim=angle)
plt.savefig(filename_angle.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
plt.close()
# Plot period and cycle length distributions per group
def plot_lengths_hist_by_attribute_cutoff(cycle_stats_df, cycle_df, attribute, cutoff_criteria, cutoff, pdf_or_cdf, save_dir):
'''
Function that plots cycle and period length distributions across groups
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
cycle_df: pandas dataframe, with information about each user's cycle
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
pdf_or_cdf: whether to plot 'pdf's or 'cdf's
save_dir: path where to save plot
Output:
None
'''
# Identify groups per cutoff criteria
users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_greater_than_cutoff)]
cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_less_than_cutoff)]
colors = ['orange', 'c']
labels=['Highly variable', 'NOT highly variable']
if attribute == 'cycle_length':
# Compute histogram
# Bins based on integer range of values
my_bins=np.arange(
np.min([cycles_users_greater_than_cutoff[attribute].dropna().min(), cycles_users_less_than_cutoff[attribute].dropna().min()]),
np.max([cycles_users_greater_than_cutoff[attribute].dropna().max(), cycles_users_less_than_cutoff[attribute].dropna().max()])+1)
all_counts, all_bins = np.histogram(cycle_df[attribute].dropna(), bins=my_bins, density=True)
counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(cycles_users_greater_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(cycles_users_less_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
# Separate PDF/CDF plots
if pdf_or_cdf=='pdf':
# PDF
hist_type='stepfilled'
cumulative=False
y_label='P(Cycle length = n)'
cohort_filename = '{}/{}_pdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_pdf_per_group.pdf'.format(save_dir, attribute)
elif pdf_or_cdf=='cdf':
# CDF
hist_type='step'
cumulative=True
y_label='P(Cycle length $\leq$ n)'
cohort_filename = '{}/{}_cdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_cdf_per_group.pdf'.format(save_dir, attribute)
else:
raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))
# Population
plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.arange(my_bins.min(), my_bins.max()+1, 10))
plt.xlabel('Cycle length in days')
plt.ylabel(y_label)
plt.savefig(cohort_filename, format='pdf', bbox_inches='tight')
plt.close()
# Per-group
plt.hist(bins_greater_than_cutoff[:-1], bins_greater_than_cutoff, weights=counts_greater_than_cutoff, density=True, cumulative=cumulative, color=colors[0], alpha=0.5, label=labels[0], histtype=hist_type)
plt.hist(bins_less_than_cutoff[:-1], bins_less_than_cutoff, weights=counts_less_than_cutoff, density=True, cumulative=cumulative, color=colors[1], alpha=0.5, label=labels[1], histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.arange(my_bins.min(), my_bins.max()+1, 10))
plt.xlabel('Cycle length in days')
plt.ylabel(y_label)
# Add (a)/(b) labels for paper
plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(per_group_filename, format='pdf', bbox_inches='tight')
plt.close()
elif attribute == 'period_length':
# Compute histogram
# Bins based on integer range of values
my_bins=np.arange(
np.min([cycles_users_greater_than_cutoff[attribute].dropna().min(), cycles_users_less_than_cutoff[attribute].dropna().min()]),
np.max([cycles_users_greater_than_cutoff[attribute].dropna().max(), cycles_users_less_than_cutoff[attribute].dropna().max()])+1)
all_counts, all_bins = np.histogram(cycle_df[attribute].dropna(), bins=my_bins, density=True)
counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(cycles_users_greater_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(cycles_users_less_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
# Separate PDF/CDF plots
max_period_days=28
if pdf_or_cdf=='pdf':
# PDF
hist_type='stepfilled'
cumulative=False
y_label='P(Period length = n)'
cohort_filename = '{}/{}_pdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_pdf_per_group.pdf'.format(save_dir, attribute)
elif pdf_or_cdf=='cdf':
# CDF
hist_type='step'
cumulative=True
y_label='P(Period length $\leq$ n)'
cohort_filename = '{}/{}_cdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_cdf_per_group.pdf'.format(save_dir, attribute)
else:
raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))
# Population
plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.xlim(1,max_period_days)
plt.xlabel('Period length in days')
plt.ylabel(y_label)
plt.savefig(cohort_filename, format='pdf', bbox_inches='tight')
plt.close()
# Per-group
plt.hist(bins_greater_than_cutoff[:-1], bins_greater_than_cutoff, weights=counts_greater_than_cutoff, density=True, cumulative=cumulative, color=colors[0], alpha=0.5, label=labels[0], histtype=hist_type)
plt.hist(bins_less_than_cutoff[:-1], bins_less_than_cutoff, weights=counts_less_than_cutoff, density=True, cumulative=cumulative, color=colors[1], alpha=0.5, label=labels[1], histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.xlim(1,max_period_days)
plt.xlabel('Period length in days')
plt.ylabel(y_label)
# Add (a)/(b) labels for paper
plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(per_group_filename, format='pdf', bbox_inches='tight')
plt.close()
else:
raise ValueError('Unknown attribute {}'.format(attribute))
# Bootstrapped-KS for cycle and period length
def bootstrapped_cycle_period_lengths_KS(cycle_stats_df, cycle_df, cutoff_criteria, cutoff, n_bootstrapping, results_dir):
'''
Function that computes cycle and period length Kolmogorov-Smirnov tests between group distributions, based on bootstrapping
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
cycle_df: pandas dataframe, with information about user's cycle
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
n_bootstrapping: Number of bootstrapped samples to use for the analysis
save_dir: path where to save plot
Output:
None
'''
# True separation of users into groups
true_users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
true_users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
n_users_greater_than_cutoff=true_users_greater_than_cutoff.size
n_users_less_than_cutoff=true_users_less_than_cutoff.size
########### TRUE OBSERVERD STATISTICS ##########
# Cycles per-group
true_cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(true_users_greater_than_cutoff)]
true_cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(true_users_less_than_cutoff)]
# KS cycle_length
true_KS_cycle_length, true_p_val_cycle_length = stats.ks_2samp(true_cycles_users_greater_than_cutoff['cycle_length'].dropna(), true_cycles_users_less_than_cutoff['cycle_length'].dropna())
# KS period_length
true_KS_period_length, true_p_val_period_length = stats.ks_2samp(true_cycles_users_greater_than_cutoff['period_length'].dropna(), true_cycles_users_less_than_cutoff['period_length'].dropna())
########### BOOTSTRAP BASED STATISTICS ##########
# Computed suff statistics
bootstrapped_KS_cycle_length=np.zeros(n_bootstrapping)
bootstrapped_p_val_cycle_length=np.zeros(n_bootstrapping)
bootstrapped_KS_period_length=np.zeros(n_bootstrapping)
bootstrapped_p_val_period_length=np.zeros(n_bootstrapping)
for n_bootstrap in np.arange(n_bootstrapping):
#print('Sample={}/{}'.format(n_bootstrap,n_bootstrapping))
# Bootstrapped sample indicators
bootstrapped_users_greater_than_cutoff=np.random.choice(true_users_greater_than_cutoff,n_bootstrapping)
bootstrapped_users_less_than_cutoff=np.random.choice(true_users_less_than_cutoff,n_bootstrapping)
# Cycles per-group
bootstrapped_cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(bootstrapped_users_greater_than_cutoff)]
bootstrapped_cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(bootstrapped_users_less_than_cutoff)]
# KS cycle_length
bootstrapped_KS_cycle_length[n_bootstrap], bootstrapped_p_val_cycle_length[n_bootstrap] = stats.ks_2samp(bootstrapped_cycles_users_greater_than_cutoff['cycle_length'].dropna(), bootstrapped_cycles_users_less_than_cutoff['cycle_length'].dropna())
# KS period_length
bootstrapped_KS_period_length[n_bootstrap], bootstrapped_p_val_period_length[n_bootstrap] = stats.ks_2samp(bootstrapped_cycles_users_greater_than_cutoff['period_length'].dropna(), bootstrapped_cycles_users_less_than_cutoff['period_length'].dropna())
# Print bootstrap results
print('*************************************************************************')
print('******** Cycle-length KS={} (p={}) ***********'.format(true_KS_cycle_length, true_p_val_cycle_length))
print('******** Cycle-length Bootstrapped KS={}+/-{} (p={} (+/-{}))***********'.format(
bootstrapped_KS_cycle_length.mean(), bootstrapped_KS_cycle_length.std(), bootstrapped_p_val_cycle_length.mean(), bootstrapped_p_val_cycle_length.std()
))
print('******** Cycle-length Bootstrapped KS={}({},{}) p={} ({},{}))***********'.format(
bootstrapped_KS_cycle_length.mean(), np.percentile(bootstrapped_KS_cycle_length, 2.5, axis=0), np.percentile(bootstrapped_KS_cycle_length, 97.5, axis=0),
bootstrapped_p_val_cycle_length.mean(), np.percentile(bootstrapped_p_val_cycle_length, 2.5, axis=0), np.percentile(bootstrapped_p_val_cycle_length, 97.5, axis=0)
))
print('*************************************************************************')
print('******** Period-length KS={} (p={}) ***********'.format(true_KS_period_length, true_p_val_period_length))
print('******** Period-length Bootstrapped KS={}+/-{} (p={} (+/-{}))***********'.format(
bootstrapped_KS_period_length.mean(), bootstrapped_KS_period_length.std(), bootstrapped_p_val_period_length.mean(), bootstrapped_p_val_period_length.std()
))
print('******** Period-length Bootstrapped KS={}({},{}) p={} ({},{}))***********'.format(
bootstrapped_KS_period_length.mean(), np.percentile(bootstrapped_KS_period_length, 2.5, axis=0), np.percentile(bootstrapped_KS_period_length, 97.5, axis=0),
bootstrapped_p_val_period_length.mean(), np.percentile(bootstrapped_p_val_period_length, 2.5, axis=0), np.percentile(bootstrapped_p_val_period_length, 97.5, axis=0)
))
print('*************************************************************************')
# Average statistics over cycle-id
def plot_avg_lengths_by_attribute_cutoff(cycle_stats_df, cycle_df, attribute, cutoff_criteria, cutoff, save_dir):
'''
Function that plots cycle and period length average and standard deviation across user's timeline (i.e., by cycle-id) across groups
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
cycle_df: pandas dataframe, with information about each user's cycle
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
save_dir: path where to save plot
Output:
None
'''
# Identify groups per cutoff criteria
users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_greater_than_cutoff)]
cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_less_than_cutoff)]
# Plotting
colors = ['slateblue', 'c', 'orange']
max_cycle_id=20
if attribute == 'cycle_length':
fig, axes = plt.subplots(3, 1, sharex='all', sharey='all', figsize = (15,15))
for index, dataset in enumerate([cycle_df, cycles_users_less_than_cutoff, cycles_users_greater_than_cutoff]):
means = dataset.groupby(['cycle_id'])[attribute].mean()[:max_cycle_id]
std = dataset.groupby(['cycle_id'])[attribute].std()[:max_cycle_id]
# Plot
axes[index].plot(np.unique(dataset['cycle_id'])[:20], means, color = colors[index])
axes[index].autoscale(enable=True, tight=True, axis='x')
axes[index].fill_between(np.unique(dataset['cycle_id'])[:max_cycle_id], means - std, means + std, alpha=0.4, color=colors[index])
axes[index].set_xticks(np.append([1],np.arange(2,max_cycle_id+1,2)))
axes[index].set_xlabel('Cycle ID')
axes[index].set_ylabel('Cycle length')
axes[index].set_ylim(20,55)
# Add (a)/(b) labels for paper
plt.text(12, 7, '(a)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
# Save and close
plt.savefig('{}/avg_{}_per_cycle_id.pdf'.format(save_dir,attribute), format='pdf', bbox_inches='tight')
plt.close()
elif attribute == 'period_length':
fig, axes = plt.subplots(3, 1, sharex='all', sharey='all', figsize = (15,15))
for index, dataset in enumerate([cycle_df, cycles_users_less_than_cutoff, cycles_users_greater_than_cutoff]):
means = dataset.groupby(['cycle_id'])[attribute].mean()[:max_cycle_id]
std = dataset.groupby(['cycle_id'])[attribute].std()[:max_cycle_id]
# Plot
axes[index].plot(np.unique(dataset['cycle_id'])[:20], means, color = colors[index])
axes[index].autoscale(enable=True, tight=True, axis='x')
axes[index].fill_between(np.unique(dataset['cycle_id'])[:max_cycle_id], means - std, means + std, alpha=0.4, color=colors[index])
axes[index].set_xticks(np.append([1],np.arange(2,max_cycle_id+1,2)))
axes[index].set_xlabel('Cycle ID')
axes[index].set_ylabel('Period length')
axes[index].set_ylim(1,9)
# Add (a)/(b) labels for paper
plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
# Save and close
plt.savefig('{}/avg_{}_per_cycle_id.pdf'.format(save_dir,attribute), format='pdf', bbox_inches='tight')
plt.close()
else:
raise ValueError('Unknown attribute {}'.format(attribute))
# Plot for max intercycle length (i.e., CLD) histogram
def plot_max_intercycle_length_hists(cycle_stats, cycle_stats_exclude_flagged, save_dir):
'''
Function that plots max inter cycle length (max CLD) histograms with and without excluded cycles
Input:
cycle_stats: pandas dataframe, with information about user's cycle statistics
cycle_stats_exclude_flagged: pandas dataframe for users after removing excluded flags, with information about user's cycle statistics
save_dir: path where to save plot
Output:
None
'''
my_bins=np.arange(min(cycle_stats['max_inter_cycle_length']), max(cycle_stats['max_inter_cycle_length']) + 1)
plt.hist(cycle_stats['max_inter_cycle_length'], bins=my_bins, label='With behaviorally-tainted cycles', color='blue', histtype='step')
plt.hist(cycle_stats_exclude_flagged['max_inter_cycle_length'], bins=my_bins, label='Excluding behaviorally-tainted cycles', color='red', histtype='step')
plt.autoscale(enable=True, tight=True, axis='x')
plt.ylim(0,38000)
plt.xlabel('Maximum CLD in days')
plt.ylabel('User count with maximum CLD')
plt.savefig('{}/hist_max_inter_cycle_length_with_and_without_excluded_flags.pdf'.format(save_dir), format='pdf', bbox_inches='tight')
plt.close()
# Plot for median Vs max intercycle length (i.e., CLD) histogram
def plot_median_vs_max_intercycle_length(cycle_stats, save_dir):
'''
Function that plots median Vs max inter cycle length (CLD) 2D scatter histogram
Input:
cycle_stats: pandas dataframe, with information about user's cycle statistics
save_dir: path where to save plot
Output:
None
'''
plt.hist2d(cycle_stats['median_inter_cycle_length'], cycle_stats['max_inter_cycle_length'], bins=(75, 75), cmap='jet', norm=colors.LogNorm())
plt.autoscale(enable=True, tight=True)
range_vals_median = np.linspace(min(cycle_stats['median_inter_cycle_length']), max(cycle_stats['median_inter_cycle_length']), 100)
plt.plot(range_vals_median, range_vals_median+10, label='Median CLD + 10', color='red')
plt.xlabel('Median CLD')
plt.ylabel('Maximum CLD')
plt.xlim((0,75))
plt.ylim((0, 75))
plt.colorbar()
plt.savefig('{}/median_vs_max_scatter_2d_hist.pdf'.format(save_dir), format='pdf', bbox_inches='tight')
plt.close()
# Plot for median intercycle length (i.e., CLD) histogram
def plot_median_CLD_hist(cycle_stats, pdf_or_cdf, save_dir):
'''
Function that plots median CLD histograms
Input:
cycle_stats: pandas dataframe, with information about user's cycle statistics
pdf_or_cdf: whether to plot 'pdf's or 'cdf's
save_dir: path where to save plot
Output:
None
'''
# Median CLD histogram
my_bins=np.arange(cycle_stats['median_inter_cycle_length'].dropna().min(),cycle_stats['median_inter_cycle_length'].dropna().max()+1)
all_counts, all_bins = np.histogram(cycle_stats['median_inter_cycle_length'].dropna(), bins=my_bins, density=True)
# Separate PDF/CDF plots
if pdf_or_cdf=='pdf':
# PDF
hist_type='stepfilled'
cumulative=False
y_label='P(Median CLD = n)'
cohort_filename = '{}/median_CLD_pdf_cohort.pdf'.format(save_dir)
elif pdf_or_cdf=='cdf':
# CDF
hist_type='step'
cumulative=True
y_label='P(Median CLD $\leq$ n)'
cohort_filename = '{}/median_CLD_cdf_cohort.pdf'.format(save_dir)
else:
raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))
# Actual plot
plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xlabel('Median CLD in days')
plt.ylabel('P(Median CLD $\leq$ n)')
plt.grid(True)
plt.savefig('{}/median_CLD_cdf.pdf'.format(save_dir), format='pdf', bbox_inches='tight')
plt.close()
################################## MAIN ############################
def | ():
'''
Main function of the script that runs the cycle and period length related analysis
Input:
None
Output:
None
'''
### Directories
data_dir='../data'
preprocessed_data_dir='../preprocessed_data'
results_dir = '../results/characterizing_cycle_and_symptoms/cycle_period_length_analysis'
os.makedirs(results_dir, exist_ok = True)
################# SYMPTOMS TRACKED #################
# Tracking
with open('{}/tracking_enriched.pickle'.format(data_dir), 'rb') as f:
tracking = pickle.load(f)
print('Tracking-data loaded')
################# CYCLES #################
with open('{}/cohort_cycle_stats.pickle'.format(preprocessed_data_dir), 'rb') as f:
cohort_cycle_stats = pickle.load(f)
# Cycles flagged
with open('{}/cohort_cycles_flagged.pickle'.format(preprocessed_data_dir), 'rb') as f:
cohort_cycles_flagged = pickle.load(f)
# Exclude cycles flagged as badly tracked
cohort_cycles = cohort_cycles_flagged[cohort_cycles_flagged['badly_tracked_cycle'] == 'f']
# Cycles stats
with open('{}/cohort_clean_cycle_stats.pickle'.format(preprocessed_data_dir), 'rb') as f:
cohort_clean_cycle_stats = pickle.load(f)
print('Cycles-data loaded')
################# PLOTTING #################
#### PLOT histogram of max intercycle length, with and without excluding flagged cycles
plot_max_intercycle_length_hists(cohort_cycle_stats, cohort_clean_cycle_stats, results_dir)
#### PLOT Median Vs Max CLD 2D histogram
plot_median_vs_max_intercycle_length(cohort_clean_cycle_stats, results_dir)
#### PLOT Median CLD histogram
plot_median_CLD_hist(cohort_clean_cycle_stats, 'cdf', results_dir)
#### PLOT cycle and period length histograms: pdf
plot_lengths_hist_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'cycle_length', 'median_inter_cycle_length', 9, 'pdf', results_dir)
plot_lengths_hist_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'period_length', 'median_inter_cycle_length', 9, 'pdf', results_dir)
#### Bootstrapped-KS cycle and period length
bootstrapped_cycle_period_lengths_KS(cohort_clean_cycle_stats, cohort_cycles, 'median_inter_cycle_length', 9, 100000, results_dir)
#### PLOT average cycle and average length over cycle-id
plot_avg_lengths_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'cycle_length', 'median_inter_cycle_length', 9, results_dir)
plot_avg_lengths_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'period_length', 'median_inter_cycle_length', 9, results_dir)
#### PLOT random cycle length time-series
random_time_series_embedding_lengths(cohort_clean_cycle_stats, 'cycle_lengths', 'median_inter_cycle_length', 9, results_dir)
#### PLOT population level cycle and period length time-series
population_time_series_embedding_lengths(cohort_clean_cycle_stats, 'cycle_lengths', 'median_inter_cycle_length', 9, 'random', results_dir)
population_time_series_embedding_lengths(cohort_clean_cycle_stats, 'period_lengths', 'median_inter_cycle_length', 9, 'random', results_dir)
# Making sure the main program is not executed when the module is imported
if __name__ == '__main__':
# Just run the main
main()
| main |
base.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials # type: ignore
from google.cloud.vision_v1p2beta1.types import image_annotator
from google.longrunning import operations_pb2 as operations # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-vision",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ImageAnnotatorTransport(abc.ABC):
"""Abstract transport class for ImageAnnotator."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
)
def __init__(
self,
*,
host: str = "vision.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
|
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file, scopes=scopes, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(
scopes=scopes, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.batch_annotate_images: gapic_v1.method.wrap_method(
self.batch_annotate_images,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
),
),
default_timeout=600.0,
client_info=client_info,
),
self.async_batch_annotate_files: gapic_v1.method.wrap_method(
self.async_batch_annotate_files,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
),
),
default_timeout=600.0,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def batch_annotate_images(
self,
) -> typing.Callable[
[image_annotator.BatchAnnotateImagesRequest],
typing.Union[
image_annotator.BatchAnnotateImagesResponse,
typing.Awaitable[image_annotator.BatchAnnotateImagesResponse],
],
]:
raise NotImplementedError()
@property
def async_batch_annotate_files(
self,
) -> typing.Callable[
[image_annotator.AsyncBatchAnnotateFilesRequest],
typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
]:
raise NotImplementedError()
__all__ = ("ImageAnnotatorTransport",)
| host += ":443" |
models.py | from django.db import models
class Todolist(models.Model):
title = models.CharField(max_length=100)
detail = models.TextField(null=True, blank=True)
|
def __str__(self):
return self.title |
|
create.py | import json
import sqlite3
from typing import Any, Callable, Dict, List, Optional, Type, Union
from sqlalchemy.ext.asyncio import create_async_engine as _create_async_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.future import Engine as _FutureEngine
from sqlalchemy.pool import Pool
from typing_extensions import Literal, TypedDict
from ..default import Default, _DefaultPlaceholder
# Types defined in sqlalchemy2-stubs, but can't be imported, so re-define here
_Debug = Literal["debug"]
_IsolationLevel = Literal[
"SERIALIZABLE",
"REPEATABLE READ",
"READ COMMITTED",
"READ UNCOMMITTED",
"AUTOCOMMIT",
]
_ParamStyle = Literal["qmark", "numeric", "named", "format", "pyformat"]
_ResetOnReturn = Literal["rollback", "commit"]
class _SQLiteConnectArgs(TypedDict, total=False):
timeout: float
detect_types: Any
isolation_level: Optional[Literal["DEFERRED", "IMMEDIATE", "EXCLUSIVE"]]
check_same_thread: bool
factory: Type[sqlite3.Connection]
cached_statements: int
uri: bool
_ConnectArgs = Union[_SQLiteConnectArgs, Dict[str, Any]]
# Re-define create_engine to have by default future=True, and assume that's what is used
# Also show the default values used for each parameter, but don't set them unless
# explicitly passed as arguments by the user to prevent errors. E.g. SQLite doesn't
# support pool connection arguments.
def create_async_engine(
url: Union[str, URL],
*,
connect_args: _ConnectArgs = Default({}), # type: ignore
echo: Union[bool, _Debug] = Default(False),
echo_pool: Union[bool, _Debug] = Default(False),
enable_from_linting: bool = Default(True),
encoding: str = Default("utf-8"),
execution_options: Dict[Any, Any] = Default({}),
future: bool = True,
hide_parameters: bool = Default(False),
implicit_returning: bool = Default(True),
isolation_level: Optional[_IsolationLevel] = Default(None),
json_deserializer: Callable[..., Any] = Default(json.loads),
json_serializer: Callable[..., Any] = Default(json.dumps),
label_length: Optional[int] = Default(None),
logging_name: Optional[str] = Default(None),
max_identifier_length: Optional[int] = Default(None),
max_overflow: int = Default(10),
module: Optional[Any] = Default(None),
paramstyle: Optional[_ParamStyle] = Default(None),
pool: Optional[Pool] = Default(None),
poolclass: Optional[Type[Pool]] = Default(None),
pool_logging_name: Optional[str] = Default(None),
pool_pre_ping: bool = Default(False),
pool_size: int = Default(5),
pool_recycle: int = Default(-1),
pool_reset_on_return: Optional[_ResetOnReturn] = Default("rollback"),
pool_timeout: float = Default(30),
pool_use_lifo: bool = Default(False),
plugins: Optional[List[str]] = Default(None),
query_cache_size: Optional[int] = Default(None),
**kwargs: Any,
) -> _FutureEngine:
current_kwargs: Dict[str, Any] = {
"future": future,
}
if not isinstance(echo, _DefaultPlaceholder):
current_kwargs["echo"] = echo
if not isinstance(echo_pool, _DefaultPlaceholder):
current_kwargs["echo_pool"] = echo_pool
if not isinstance(enable_from_linting, _DefaultPlaceholder):
current_kwargs["enable_from_linting"] = enable_from_linting
if not isinstance(connect_args, _DefaultPlaceholder):
current_kwargs["connect_args"] = connect_args
if not isinstance(encoding, _DefaultPlaceholder):
current_kwargs["encoding"] = encoding
if not isinstance(execution_options, _DefaultPlaceholder):
current_kwargs["execution_options"] = execution_options
if not isinstance(hide_parameters, _DefaultPlaceholder):
current_kwargs["hide_parameters"] = hide_parameters
if not isinstance(implicit_returning, _DefaultPlaceholder):
current_kwargs["implicit_returning"] = implicit_returning
if not isinstance(isolation_level, _DefaultPlaceholder):
current_kwargs["isolation_level"] = isolation_level
if not isinstance(json_deserializer, _DefaultPlaceholder):
current_kwargs["json_deserializer"] = json_deserializer
if not isinstance(json_serializer, _DefaultPlaceholder):
current_kwargs["json_serializer"] = json_serializer
if not isinstance(label_length, _DefaultPlaceholder):
current_kwargs["label_length"] = label_length
if not isinstance(logging_name, _DefaultPlaceholder):
current_kwargs["logging_name"] = logging_name
if not isinstance(max_identifier_length, _DefaultPlaceholder):
current_kwargs["max_identifier_length"] = max_identifier_length
if not isinstance(max_overflow, _DefaultPlaceholder):
current_kwargs["max_overflow"] = max_overflow
if not isinstance(module, _DefaultPlaceholder):
current_kwargs["module"] = module
if not isinstance(paramstyle, _DefaultPlaceholder):
current_kwargs["paramstyle"] = paramstyle | current_kwargs["pool"] = pool
if not isinstance(poolclass, _DefaultPlaceholder):
current_kwargs["poolclass"] = poolclass
if not isinstance(pool_logging_name, _DefaultPlaceholder):
current_kwargs["pool_logging_name"] = pool_logging_name
if not isinstance(pool_pre_ping, _DefaultPlaceholder):
current_kwargs["pool_pre_ping"] = pool_pre_ping
if not isinstance(pool_size, _DefaultPlaceholder):
current_kwargs["pool_size"] = pool_size
if not isinstance(pool_recycle, _DefaultPlaceholder):
current_kwargs["pool_recycle"] = pool_recycle
if not isinstance(pool_reset_on_return, _DefaultPlaceholder):
current_kwargs["pool_reset_on_return"] = pool_reset_on_return
if not isinstance(pool_timeout, _DefaultPlaceholder):
current_kwargs["pool_timeout"] = pool_timeout
if not isinstance(pool_use_lifo, _DefaultPlaceholder):
current_kwargs["pool_use_lifo"] = pool_use_lifo
if not isinstance(plugins, _DefaultPlaceholder):
current_kwargs["plugins"] = plugins
if not isinstance(query_cache_size, _DefaultPlaceholder):
current_kwargs["query_cache_size"] = query_cache_size
current_kwargs.update(kwargs)
return _create_async_engine(url, **current_kwargs) | if not isinstance(pool, _DefaultPlaceholder): |
flag.py | import enum
@enum.unique
class Flag(enum.IntEnum):
NOT_NULL = 1
PRI_KEY = 2
UNIQUE_KEY = 4
MULTIPLE_KEY = 8
BLOB = 16
UNSIGNED = 32
ZEROFILL = 64
BINARY = 128
ENUM = 256
AUTO_INCREMENT = 512
TIMESTAMP = 1024 | SET = 2048
PART_KEY = 16384
GROUP = 32767
UNIQUE = 65536 |
|
config_setup.go | package config
import (
"bufio"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/cli/cli/api"
"github.com/cli/cli/auth"
"gopkg.in/yaml.v3"
)
const (
oauthHost = "github.com"
)
var (
// The "GitHub CLI" OAuth app
oauthClientID = "178c6fc778ccc68e1d6a"
// This value is safe to be embedded in version control
oauthClientSecret = "34ddeff2b558a23d38fba8a6de74f086ede1cc0b"
)
// IsGitHubApp reports whether an OAuth app is "GitHub CLI" or "GitHub CLI (dev)"
func | (id string) bool {
// this intentionally doesn't use `oauthClientID` because that is a variable
// that can potentially be changed at build time via GH_OAUTH_CLIENT_ID
return id == "178c6fc778ccc68e1d6a" || id == "4d747ba5675d5d66553f"
}
func AuthFlow(notice string) (string, string, error) {
var verboseStream io.Writer
if strings.Contains(os.Getenv("DEBUG"), "oauth") {
verboseStream = os.Stderr
}
flow := &auth.OAuthFlow{
Hostname: oauthHost,
ClientID: oauthClientID,
ClientSecret: oauthClientSecret,
Scopes: []string{"repo", "read:org", "gist"},
WriteSuccessHTML: func(w io.Writer) {
fmt.Fprintln(w, oauthSuccessPage)
},
VerboseStream: verboseStream,
}
fmt.Fprintln(os.Stderr, notice)
fmt.Fprintf(os.Stderr, "Press Enter to open %s in your browser... ", flow.Hostname)
_ = waitForEnter(os.Stdin)
token, err := flow.ObtainAccessToken()
if err != nil {
return "", "", err
}
userLogin, err := getViewer(token)
if err != nil {
return "", "", err
}
return token, userLogin, nil
}
func AuthFlowComplete() {
fmt.Fprintln(os.Stderr, "Authentication complete. Press Enter to continue... ")
_ = waitForEnter(os.Stdin)
}
// FIXME: make testable
func setupConfigFile(filename string) (Config, error) {
token, userLogin, err := AuthFlow("Notice: authentication required")
if err != nil {
return nil, err
}
// TODO this sucks. It precludes us laying out a nice config with comments and such.
type yamlConfig struct {
Hosts map[string]map[string]string
}
yamlHosts := map[string]map[string]string{}
yamlHosts[oauthHost] = map[string]string{}
yamlHosts[oauthHost]["user"] = userLogin
yamlHosts[oauthHost]["oauth_token"] = token
defaultConfig := yamlConfig{
Hosts: yamlHosts,
}
err = os.MkdirAll(filepath.Dir(filename), 0771)
if err != nil {
return nil, err
}
cfgFile, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return nil, err
}
defer cfgFile.Close()
yamlData, err := yaml.Marshal(defaultConfig)
if err != nil {
return nil, err
}
_, err = cfgFile.Write(yamlData)
if err != nil {
return nil, err
}
// TODO cleaner error handling? this "should" always work given that we /just/ wrote the file...
return ParseConfig(filename)
}
func getViewer(token string) (string, error) {
http := api.NewClient(api.AddHeader("Authorization", fmt.Sprintf("token %s", token)))
response := struct {
Viewer struct {
Login string
}
}{}
err := http.GraphQL("{ viewer { login } }", nil, &response)
return response.Viewer.Login, err
}
func waitForEnter(r io.Reader) error {
scanner := bufio.NewScanner(r)
scanner.Scan()
return scanner.Err()
}
| IsGitHubApp |
model_list_messaging_country_response.go | /*
* Twilio - Pricing
*
* This is the public Twilio REST API. | */
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package openapi
// ListMessagingCountryResponse struct for ListMessagingCountryResponse
type ListMessagingCountryResponse struct {
Countries []PricingV1MessagingCountry `json:"countries,omitempty"`
Meta ListMessagingCountryResponseMeta `json:"meta,omitempty"`
} | *
* API version: 1.27.2
* Contact: [email protected] |
script.js | /**
* Scope and the var statement
* @link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/var
*/
var color = "purple";
document.querySelector(".left").style.backgroundColor = color;
document.querySelector(".left .color-value").innerHTML = color;
color = "skyblue";
function | () {
color = "blue";
document.querySelector(".title").style.color = color;
}
document.querySelector(".right").style.backgroundColor = color;
document.querySelector(".right .color-value").innerHTML = color;
headingColor();
| headingColor |
golden-app.perf.js | const path = require("path");
const Perf = require("../src/perf.js");
const dsl = require("./dsl/simple-typing").dsl;
const { actions } = require("./actions");
const { delay, makeid } = require("../src/utils/utils");
process.env["NODE_TLS_REJECT_UNAUTHORIZED"] = 0;
const SEL = {
category: "div[label=Uncategorized]",
multiSelect: ".rc-select-selection-search-input",
table: "#tablejabdu9f16g",
tableData: ".t--property-control-tabledata textarea",
tableRow:
"#tablejabdu9f16g > div.tableWrap > div > div:nth-child(1) > div > div.tbody.no-scroll > div:nth-child(6) > div:nth-child(2)",
titleInput: ".appsmith_widget_in8e51pg3y input",
updateButton:
"#comment-overlay-wrapper-4gnygu5jew > div > div > div > div > button",
tableRowCell:
"#tablejabdu9f16g > div.tableWrap > div > div:nth-child(1) > div > div.tbody.no-scroll > div:nth-child(6) > div:nth-child(2) > div > span > span > span",
deletePostButton:
"#tablejabdu9f16g > div.tableWrap > div > div:nth-child(1) > div > div.tbody.no-scroll > div:nth-child(1) > div:nth-child(27) > div > div > button",
modalTitle: "#reyoxo4oec",
closeModal:
"#comment-overlay-wrapper-lryg8kw537 > div > div > div > div > button",
commentsPageLink: "div[data-guided-tour-iid='Comments']",
commentsTableTitle: "#urzv99hdc8",
};
async function | () {
const perf = new Perf();
await perf.launch();
const page = perf.getPage();
await perf.importApplication(
`${APP_ROOT}/tests/dsl/blog-admin-app-postgres.json`,
);
await delay(5000, "for newly created page to settle down");
// Make the elements of the dropdown render
await page.waitForSelector(SEL.multiSelect);
await page.click(SEL.multiSelect);
await perf.startTrace(actions.SELECT_CATEGORY);
await page.waitForSelector(SEL.category);
await page.click(SEL.category);
await perf.stopTrace();
// Focus on the table widget
await page.waitForSelector(SEL.table);
await page.click(SEL.table);
// Profile table Data binding
await perf.startTrace(actions.BIND_TABLE_DATA);
await page.waitForSelector(SEL.tableData);
await page.type(SEL.tableData, "{{SelectQuery.data}}");
await page.waitForSelector(SEL.tableRow);
await perf.stopTrace();
// Click on table row
await perf.startTrace(actions.CLICK_ON_TABLE_ROW);
await page.click(SEL.tableRow);
await page.waitForFunction(
`document.querySelector("${SEL.titleInput}").value.includes("Template: Comments")`,
);
await perf.stopTrace();
// Edit title
await page.waitForSelector(SEL.titleInput);
await perf.startTrace(actions.UPDATE_POST_TITLE);
const randomString = makeid();
await page.type(SEL.titleInput, randomString);
await delay(5000, "For the evaluations to comeback?");
await page.waitForSelector(SEL.updateButton);
await page.click(SEL.updateButton);
// When the row is updated, selected row changes.
// await page.waitForSelector(SEL.tableRowCell);
await page.waitForFunction(
`document.querySelector("${SEL.table}").textContent.includes("${randomString}")`,
);
await perf.stopTrace();
// Open modal
await page.waitForSelector(SEL.deletePostButton);
await perf.startTrace(actions.OPEN_MODAL);
await page.click(SEL.deletePostButton);
await page.waitForSelector(SEL.modalTitle);
await perf.stopTrace();
// Close modal
await page.waitForSelector(SEL.closeModal);
await perf.startTrace(actions.CLOSE_MODAL);
await page.click(SEL.closeModal);
await delay(3000, "wait after closing modal");
await perf.stopTrace();
/* Enable this after the new entity explorer
// Navigate to a page
await page.waitForSelector(SEL.commentsPageLink);
await perf.startTrace("Switch page");
await page.click(SEL.commentsPageLink);
await page.waitForSelector(SEL.commentsTableTitle);
await perf.stopTrace();
*/
await perf.generateReport();
await perf.close();
}
async function runTests() {
await testTyping();
await testTyping();
await testTyping();
await testTyping();
await testTyping();
}
runTests();
| testTyping |
elasticips.go | // Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package models
import (
"context"
"database/sql"
"fmt"
"yunion.io/x/jsonutils"
"yunion.io/x/log"
"yunion.io/x/pkg/errors"
"yunion.io/x/pkg/tristate"
"yunion.io/x/pkg/util/compare"
"yunion.io/x/pkg/utils"
"yunion.io/x/sqlchemy"
"yunion.io/x/onecloud/pkg/apis"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudcommon/db"
"yunion.io/x/onecloud/pkg/cloudcommon/db/lockman"
"yunion.io/x/onecloud/pkg/cloudcommon/db/quotas"
"yunion.io/x/onecloud/pkg/cloudcommon/db/taskman"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/httperrors"
"yunion.io/x/onecloud/pkg/mcclient"
"yunion.io/x/onecloud/pkg/util/rbacutils"
"yunion.io/x/onecloud/pkg/util/stringutils2"
)
type SElasticipManager struct {
db.SVirtualResourceBaseManager
db.SExternalizedResourceBaseManager
SManagedResourceBaseManager
SCloudregionResourceBaseManager
}
var ElasticipManager *SElasticipManager
func init() |
type SElasticip struct {
db.SVirtualResourceBase
db.SExternalizedResourceBase
SManagedResourceBase
SCloudregionResourceBase `width:"36" charset:"ascii" nullable:"false" list:"user" create:"required"`
SBillingResourceBase
// IP子网Id, 仅私有云不为空
NetworkId string `width:"36" charset:"ascii" nullable:"true" get:"user" list:"user" create:"optional"`
// 标识弹性或非弹性
// | Mode | 说明 |
// |------------|------------|
// | public_ip | 公网IP |
// | elastic_ip | 弹性公网IP |
//
// example: elastic_ip
Mode string `width:"32" charset:"ascii" get:"user" list:"user" create:"optional"`
// IP地址
IpAddr string `width:"17" charset:"ascii" list:"user" create:"optional"`
// 绑定资源类型
AssociateType string `width:"32" charset:"ascii" list:"user"`
// 绑定资源Id
AssociateId string `width:"256" charset:"ascii" list:"user"`
// 带宽大小
Bandwidth int `list:"user" create:"optional" default:"0"`
// 计费类型: 流量、带宽
// example: bandwidth
ChargeType string `name:"charge_type" list:"user" create:"required"`
// 目前只有华为云此字段是必需填写的
BgpType string `list:"user" create:"optional"`
// 是否跟随主机删除而自动释放
AutoDellocate tristate.TriState `default:"false" get:"user" create:"optional" update:"user"`
// 区域Id
// CloudregionId string `width:"36" charset:"ascii" nullable:"false" list:"user" create:"required"`
}
// 弹性公网IP列表
func (manager *SElasticipManager) ListItemFilter(
ctx context.Context,
q *sqlchemy.SQuery,
userCred mcclient.TokenCredential,
query api.ElasticipListInput,
) (*sqlchemy.SQuery, error) {
var err error
q, err = manager.SVirtualResourceBaseManager.ListItemFilter(ctx, q, userCred, query.VirtualResourceListInput)
if err != nil {
return nil, errors.Wrap(err, "SVirtualResourceBaseManager.ListItemFilter")
}
q, err = manager.SExternalizedResourceBaseManager.ListItemFilter(ctx, q, userCred, query.ExternalizedResourceBaseListInput)
if err != nil {
return nil, errors.Wrap(err, "SExternalizedResourceBaseManager.ListItemFilter")
}
q, err = manager.SManagedResourceBaseManager.ListItemFilter(ctx, q, userCred, query.ManagedResourceListInput)
if err != nil {
return nil, errors.Wrap(err, "SManagedResourceBaseManager.ListItemFilter")
}
q, err = manager.SCloudregionResourceBaseManager.ListItemFilter(ctx, q, userCred, query.RegionalFilterListInput)
if err != nil {
return nil, errors.Wrap(err, "SCloudregionResourceBaseManager.ListItemFilter")
}
associateType := query.UsableEipForAssociateType
associateId := query.UsableEipForAssociateId
if len(associateType) > 0 && len(associateId) > 0 {
switch associateType {
case api.EIP_ASSOCIATE_TYPE_SERVER:
serverObj, err := GuestManager.FetchByIdOrName(userCred, associateId)
if err != nil {
if err == sql.ErrNoRows {
return nil, httperrors.NewResourceNotFoundError("server %s not found", associateId)
}
return nil, httperrors.NewGeneralError(err)
}
guest := serverObj.(*SGuest)
if guest.Hypervisor == api.HYPERVISOR_KVM || utils.IsInStringArray(guest.Hypervisor, api.PRIVATE_CLOUD_HYPERVISORS) {
zone := guest.getZone()
networks := NetworkManager.Query().SubQuery()
wires := WireManager.Query().SubQuery()
sq := networks.Query(networks.Field("id")).Join(wires, sqlchemy.Equals(wires.Field("id"), networks.Field("wire_id"))).
Filter(sqlchemy.Equals(wires.Field("zone_id"), zone.Id)).SubQuery()
q = q.Filter(sqlchemy.In(q.Field("network_id"), sq))
gns := GuestnetworkManager.Query("network_id").Equals("guest_id", guest.Id).SubQuery()
q = q.Filter(sqlchemy.NotIn(q.Field("network_id"), gns))
} else {
region := guest.getRegion()
q = q.Equals("cloudregion_id", region.Id)
}
managerId := guest.GetHost().ManagerId
if managerId != "" {
q = q.Equals("manager_id", managerId)
} else {
q = q.IsNullOrEmpty("manager_id")
}
default:
return nil, httperrors.NewInputParameterError("Not support associate type %s, only support %s", associateType, api.EIP_ASSOCIATE_VALID_TYPES)
}
}
if query.Usable != nil && *query.Usable {
q = q.Equals("status", api.EIP_STATUS_READY)
q = q.Filter(sqlchemy.OR(sqlchemy.IsNull(q.Field("associate_id")), sqlchemy.IsEmpty(q.Field("associate_id"))))
}
if len(query.Mode) > 0 {
q = q.In("mode", query.Mode)
}
if len(query.IpAddr) > 0 {
q = q.In("ip_addr", query.IpAddr)
}
if len(query.AssociateType) > 0 {
q = q.In("associate_type", query.AssociateType)
}
if len(query.AssociateId) > 0 {
q = q.In("associate_id", query.AssociateId)
}
if len(query.ChargeType) > 0 {
q = q.In("charge_type", query.ChargeType)
}
if len(query.BgpType) > 0 {
q = q.In("bgp_type", query.BgpType)
}
if query.AutoDellocate != nil {
if *query.AutoDellocate {
q = q.IsTrue("auto_dellocate")
} else {
q = q.IsFalse("auto_dellocate")
}
}
return q, nil
}
func (manager *SElasticipManager) OrderByExtraFields(
ctx context.Context,
q *sqlchemy.SQuery,
userCred mcclient.TokenCredential,
query api.ElasticipListInput,
) (*sqlchemy.SQuery, error) {
var err error
q, err = manager.SVirtualResourceBaseManager.OrderByExtraFields(ctx, q, userCred, query.VirtualResourceListInput)
if err != nil {
return nil, errors.Wrap(err, "SVirtualResourceBaseManager.OrderByExtraFields")
}
q, err = manager.SManagedResourceBaseManager.OrderByExtraFields(ctx, q, userCred, query.ManagedResourceListInput)
if err != nil {
return nil, errors.Wrap(err, "SManagedResourceBaseManager.OrderByExtraFields")
}
q, err = manager.SCloudregionResourceBaseManager.OrderByExtraFields(ctx, q, userCred, query.RegionalFilterListInput)
if err != nil {
return nil, errors.Wrap(err, "SCloudregionResourceBaseManager.OrderByExtraFields")
}
return q, nil
}
func (manager *SElasticipManager) QueryDistinctExtraField(q *sqlchemy.SQuery, field string) (*sqlchemy.SQuery, error) {
var err error
q, err = manager.SVirtualResourceBaseManager.QueryDistinctExtraField(q, field)
if err == nil {
return q, nil
}
q, err = manager.SManagedResourceBaseManager.QueryDistinctExtraField(q, field)
if err == nil {
return q, nil
}
q, err = manager.SCloudregionResourceBaseManager.QueryDistinctExtraField(q, field)
if err == nil {
return q, nil
}
return q, httperrors.ErrNotFound
}
func (manager *SElasticipManager) getEipsByRegion(region *SCloudregion, provider *SCloudprovider) ([]SElasticip, error) {
eips := make([]SElasticip, 0)
q := manager.Query().Equals("cloudregion_id", region.Id)
if provider != nil {
q = q.Equals("manager_id", provider.Id)
}
err := db.FetchModelObjects(manager, q, &eips)
if err != nil {
return nil, err
}
return eips, nil
}
func (self *SElasticip) GetRegion() *SCloudregion {
return CloudregionManager.FetchRegionById(self.CloudregionId)
}
func (self *SElasticip) GetNetwork() (*SNetwork, error) {
network, err := NetworkManager.FetchById(self.NetworkId)
if err != nil {
return nil, err
}
return network.(*SNetwork), nil
}
func (self *SElasticip) GetZone() *SZone {
if len(self.NetworkId) == 0 {
return nil
}
network, err := self.GetNetwork()
if err != nil {
return nil
}
return network.GetZone()
}
func (self *SElasticip) GetShortDesc(ctx context.Context) *jsonutils.JSONDict {
desc := self.SVirtualResourceBase.GetShortDesc(ctx)
// desc.Add(jsonutils.NewString(self.ChargeType), "charge_type")
desc.Add(jsonutils.NewInt(int64(self.Bandwidth)), "bandwidth")
desc.Add(jsonutils.NewString(self.Mode), "mode")
desc.Add(jsonutils.NewString(self.IpAddr), "ip_addr")
// region := self.GetRegion()
// if len(region.ExternalId) > 0 {
// regionInfo := strings.Split(region.ExternalId, "/")
// if len(regionInfo) == 2 {
// desc.Add(jsonutils.NewString(strings.ToLower(regionInfo[0])), "hypervisor")
// desc.Add(jsonutils.NewString(regionInfo[1]), "region")
// }
//}
billingInfo := SCloudBillingInfo{}
billingInfo.SCloudProviderInfo = self.getCloudProviderInfo()
billingInfo.SBillingBaseInfo = self.getBillingBaseInfo()
billingInfo.InternetChargeType = self.ChargeType
if priceKey := self.GetMetadata("ext:price_key", nil); len(priceKey) > 0 {
billingInfo.PriceKey = priceKey
}
desc.Update(jsonutils.Marshal(billingInfo))
return desc
}
func (manager *SElasticipManager) SyncEips(ctx context.Context, userCred mcclient.TokenCredential, provider *SCloudprovider, region *SCloudregion, eips []cloudprovider.ICloudEIP, syncOwnerId mcclient.IIdentityProvider) compare.SyncResult {
// ownerProjId := projectId
lockman.LockClass(ctx, manager, db.GetLockClassKey(manager, syncOwnerId))
defer lockman.ReleaseClass(ctx, manager, db.GetLockClassKey(manager, syncOwnerId))
// localEips := make([]SElasticip, 0)
// remoteEips := make([]cloudprovider.ICloudEIP, 0)
syncResult := compare.SyncResult{}
dbEips, err := manager.getEipsByRegion(region, provider)
if err != nil {
syncResult.Error(err)
return syncResult
}
for i := range dbEips {
if taskman.TaskManager.IsInTask(&dbEips[i]) {
syncResult.Error(fmt.Errorf("object in task"))
return syncResult
}
}
removed := make([]SElasticip, 0)
commondb := make([]SElasticip, 0)
commonext := make([]cloudprovider.ICloudEIP, 0)
added := make([]cloudprovider.ICloudEIP, 0)
err = compare.CompareSets(dbEips, eips, &removed, &commondb, &commonext, &added)
if err != nil {
syncResult.Error(err)
return syncResult
}
for i := 0; i < len(removed); i += 1 {
err = removed[i].syncRemoveCloudEip(ctx, userCred)
if err != nil {
syncResult.DeleteError(err)
} else {
syncResult.Delete()
}
}
for i := 0; i < len(commondb); i += 1 {
err = commondb[i].SyncWithCloudEip(ctx, userCred, provider, commonext[i], syncOwnerId)
if err != nil {
syncResult.UpdateError(err)
} else {
syncVirtualResourceMetadata(ctx, userCred, &commondb[i], commonext[i])
syncResult.Update()
}
}
for i := 0; i < len(added); i += 1 {
new, err := manager.newFromCloudEip(ctx, userCred, added[i], provider, region, syncOwnerId)
if err != nil {
syncResult.AddError(err)
} else {
syncVirtualResourceMetadata(ctx, userCred, new, added[i])
syncResult.Add()
}
}
return syncResult
}
func (self *SElasticip) syncRemoveCloudEip(ctx context.Context, userCred mcclient.TokenCredential) error {
lockman.LockObject(ctx, self)
defer lockman.ReleaseObject(ctx, self)
return self.RealDelete(ctx, userCred)
}
func (self *SElasticip) SyncInstanceWithCloudEip(ctx context.Context, userCred mcclient.TokenCredential, ext cloudprovider.ICloudEIP) error {
resource := self.GetAssociateResource()
vmExtId := ext.GetAssociationExternalId()
if resource == nil && len(vmExtId) == 0 {
return nil
}
if resource != nil && resource.(db.IExternalizedModel).GetExternalId() == vmExtId {
return nil
}
if resource != nil { // dissociate
err := self.Dissociate(ctx, userCred)
if err != nil {
log.Errorf("fail to dissociate vm: %s", err)
return err
}
}
if len(vmExtId) > 0 {
var manager db.IModelManager
switch ext.GetAssociationType() {
case api.EIP_ASSOCIATE_TYPE_SERVER:
manager = GuestManager
case api.EIP_ASSOCIATE_TYPE_NAT_GATEWAY:
manager = NatGatewayManager
case api.EIP_ASSOCIATE_TYPE_LOADBALANCER:
manager = LoadbalancerManager
default:
return errors.Error("unsupported association type")
}
extRes, err := db.FetchByExternalIdAndManagerId(manager, vmExtId, func(q *sqlchemy.SQuery) *sqlchemy.SQuery {
switch ext.GetAssociationType() {
case api.EIP_ASSOCIATE_TYPE_SERVER:
sq := HostManager.Query().SubQuery()
return q.Join(sq, sqlchemy.Equals(sq.Field("id"), q.Field("host_id"))).Filter(sqlchemy.Equals(sq.Field("manager_id"), self.ManagerId))
case api.EIP_ASSOCIATE_TYPE_NAT_GATEWAY, api.EIP_ASSOCIATE_TYPE_LOADBALANCER:
return q.Equals("manager_id", self.ManagerId)
}
return q
})
if err != nil {
log.Errorf("fail to find vm by external ID %s", vmExtId)
return err
}
switch newRes := extRes.(type) {
case *SGuest:
err = self.AssociateVM(ctx, userCred, newRes)
case *SLoadbalancer:
err = self.AssociateLoadbalancer(ctx, userCred, newRes)
case *SNatGateway:
err = self.AssociateNatGateway(ctx, userCred, newRes)
default:
return errors.Error("unsupported association type")
}
if err != nil {
log.Errorf("fail to associate with new vm %s", err)
return err
}
}
return nil
}
func (self *SElasticip) SyncWithCloudEip(ctx context.Context, userCred mcclient.TokenCredential, provider *SCloudprovider, ext cloudprovider.ICloudEIP, syncOwnerId mcclient.IIdentityProvider) error {
diff, err := db.UpdateWithLock(ctx, self, func() error {
// self.Name = ext.GetName()
if bandwidth := ext.GetBandwidth(); bandwidth != 0 {
self.Bandwidth = bandwidth
}
self.IpAddr = ext.GetIpAddr()
self.Mode = ext.GetMode()
self.Status = ext.GetStatus()
self.ExternalId = ext.GetGlobalId()
self.IsEmulated = ext.IsEmulated()
if chargeType := ext.GetInternetChargeType(); len(chargeType) > 0 {
self.ChargeType = chargeType
}
factory, _ := provider.GetProviderFactory()
if factory != nil && factory.IsSupportPrepaidResources() {
self.BillingType = ext.GetBillingType()
self.ExpiredAt = ext.GetExpiredAt()
self.AutoRenew = ext.IsAutoRenew()
}
if createAt := ext.GetCreatedAt(); !createAt.IsZero() {
self.CreatedAt = createAt
}
return nil
})
if err != nil {
log.Errorf("SyncWithCloudEip fail %s", err)
return err
}
db.OpsLog.LogSyncUpdate(self, diff, userCred)
err = self.SyncInstanceWithCloudEip(ctx, userCred, ext)
if err != nil {
return errors.Wrap(err, "fail to sync associated instance of EIP")
}
SyncCloudProject(userCred, self, syncOwnerId, ext, self.ManagerId)
return nil
}
func (manager *SElasticipManager) newFromCloudEip(ctx context.Context, userCred mcclient.TokenCredential, extEip cloudprovider.ICloudEIP, provider *SCloudprovider, region *SCloudregion, syncOwnerId mcclient.IIdentityProvider) (*SElasticip, error) {
eip := SElasticip{}
eip.SetModelManager(manager, &eip)
newName, err := db.GenerateName(manager, syncOwnerId, extEip.GetName())
if err != nil {
return nil, err
}
eip.Name = newName
eip.Status = extEip.GetStatus()
eip.ExternalId = extEip.GetGlobalId()
eip.IpAddr = extEip.GetIpAddr()
eip.Mode = extEip.GetMode()
eip.IsEmulated = extEip.IsEmulated()
eip.ManagerId = provider.Id
eip.CloudregionId = region.Id
eip.ChargeType = extEip.GetInternetChargeType()
if networkId := extEip.GetINetworkId(); len(networkId) > 0 {
network, err := db.FetchByExternalIdAndManagerId(NetworkManager, networkId, func(q *sqlchemy.SQuery) *sqlchemy.SQuery {
wire := WireManager.Query().SubQuery()
vpc := VpcManager.Query().SubQuery()
return q.Join(wire, sqlchemy.Equals(wire.Field("id"), q.Field("wire_id"))).
Join(vpc, sqlchemy.Equals(vpc.Field("id"), wire.Field("vpc_id"))).
Filter(sqlchemy.Equals(vpc.Field("manager_id"), provider.Id))
})
if err != nil {
msg := fmt.Sprintf("failed to found network by externalId %s error: %v", networkId, err)
log.Errorf(msg)
return nil, errors.Error(msg)
}
eip.NetworkId = network.GetId()
}
err = manager.TableSpec().Insert(ctx, &eip)
if err != nil {
log.Errorf("newFromCloudEip fail %s", err)
return nil, err
}
SyncCloudProject(userCred, &eip, syncOwnerId, extEip, eip.ManagerId)
err = eip.SyncInstanceWithCloudEip(ctx, userCred, extEip)
if err != nil {
return nil, errors.Wrap(err, "fail to sync associated instance of EIP")
}
db.OpsLog.LogEvent(&eip, db.ACT_CREATE, eip.GetShortDesc(ctx), userCred)
return &eip, nil
}
func (manager *SElasticipManager) getEipForInstance(instanceType string, instanceId string) (*SElasticip, error) {
return manager.getEip(instanceType, instanceId, api.EIP_MODE_STANDALONE_EIP)
}
func (manager *SElasticipManager) getEip(instanceType string, instanceId string, eipMode string) (*SElasticip, error) {
eip := SElasticip{}
q := manager.Query()
q = q.Equals("associate_type", instanceType)
q = q.Equals("associate_id", instanceId)
if len(eipMode) > 0 {
q = q.Equals("mode", eipMode)
}
err := q.First(&eip)
if err != nil {
if err != sql.ErrNoRows {
log.Errorf("getEipForInstance query fail %s", err)
return nil, err
} else {
return nil, nil
}
}
eip.SetModelManager(manager, &eip)
return &eip, nil
}
func (self *SElasticip) IsAssociated() bool {
if len(self.AssociateId) == 0 {
return false
}
if self.GetAssociateVM() != nil {
return true
}
if self.GetAssociateLoadbalancer() != nil {
return true
}
if self.GetAssociateNatGateway() != nil {
return true
}
return false
}
func (self *SElasticip) GetAssociateVM() *SGuest {
if self.AssociateType == api.EIP_ASSOCIATE_TYPE_SERVER && len(self.AssociateId) > 0 {
return GuestManager.FetchGuestById(self.AssociateId)
}
return nil
}
func (self *SElasticip) GetAssociateLoadbalancer() *SLoadbalancer {
if self.AssociateType == api.EIP_ASSOCIATE_TYPE_LOADBALANCER && len(self.AssociateId) > 0 {
_lb, err := LoadbalancerManager.FetchById(self.AssociateId)
if err != nil {
return nil
}
lb := _lb.(*SLoadbalancer)
if lb.PendingDeleted {
return nil
}
return lb
}
return nil
}
func (self *SElasticip) GetAssociateNatGateway() *SNatGateway {
if self.AssociateType == api.EIP_ASSOCIATE_TYPE_NAT_GATEWAY && len(self.AssociateId) > 0 {
natGateway, err := NatGatewayManager.FetchById(self.AssociateId)
if err != nil {
return nil
}
return natGateway.(*SNatGateway)
}
return nil
}
func (self *SElasticip) GetAssociateResource() db.IModel {
if vm := self.GetAssociateVM(); vm != nil {
return vm
}
if lb := self.GetAssociateLoadbalancer(); lb != nil {
return lb
}
if nat := self.GetAssociateNatGateway(); nat != nil {
return nat
}
return nil
}
func (self *SElasticip) Dissociate(ctx context.Context, userCred mcclient.TokenCredential) error {
if len(self.AssociateType) == 0 {
return nil
}
var vm *SGuest
var nat *SNatGateway
var lb *SLoadbalancer
switch self.AssociateType {
case api.EIP_ASSOCIATE_TYPE_SERVER:
vm = self.GetAssociateVM()
if vm == nil {
log.Errorf("dissociate VM not exists???")
}
case api.EIP_ASSOCIATE_TYPE_NAT_GATEWAY:
nat = self.GetAssociateNatGateway()
if nat == nil {
log.Errorf("dissociate Nat gateway not exists???")
}
case api.EIP_ASSOCIATE_TYPE_LOADBALANCER:
lb = self.GetAssociateLoadbalancer()
if lb == nil {
log.Errorf("dissociate loadbalancer not exists???")
}
}
_, err := db.Update(self, func() error {
self.AssociateId = ""
self.AssociateType = ""
return nil
})
if err != nil {
return err
}
if vm != nil {
db.OpsLog.LogDetachEvent(ctx, vm, self, userCred, self.GetShortDesc(ctx))
db.OpsLog.LogEvent(self, db.ACT_EIP_DETACH, vm.GetShortDesc(ctx), userCred)
db.OpsLog.LogEvent(vm, db.ACT_EIP_DETACH, self.GetShortDesc(ctx), userCred)
}
if nat != nil {
db.OpsLog.LogDetachEvent(ctx, nat, self, userCred, self.GetShortDesc(ctx))
db.OpsLog.LogEvent(self, db.ACT_EIP_DETACH, nat.GetShortDesc(ctx), userCred)
db.OpsLog.LogEvent(nat, db.ACT_EIP_DETACH, self.GetShortDesc(ctx), userCred)
}
if lb != nil {
db.OpsLog.LogDetachEvent(ctx, lb, self, userCred, self.GetShortDesc(ctx))
db.OpsLog.LogEvent(self, db.ACT_EIP_DETACH, lb.GetShortDesc(ctx), userCred)
db.OpsLog.LogEvent(lb, db.ACT_EIP_DETACH, self.GetShortDesc(ctx), userCred)
}
if self.Mode == api.EIP_MODE_INSTANCE_PUBLICIP {
self.RealDelete(ctx, userCred)
}
return nil
}
func (self *SElasticip) AssociateLoadbalancer(ctx context.Context, userCred mcclient.TokenCredential, lb *SLoadbalancer) error {
if lb.PendingDeleted {
return fmt.Errorf("loadbalancer is deleted")
}
if len(self.AssociateType) > 0 && len(self.AssociateId) > 0 {
if self.AssociateType == api.EIP_ASSOCIATE_TYPE_LOADBALANCER && self.AssociateId == lb.Id {
return nil
} else {
return fmt.Errorf("EIP has been associated!!")
}
}
_, err := db.Update(self, func() error {
self.AssociateType = api.EIP_ASSOCIATE_TYPE_LOADBALANCER
self.AssociateId = lb.Id
return nil
})
if err != nil {
return err
}
db.OpsLog.LogAttachEvent(ctx, lb, self, userCred, self.GetShortDesc(ctx))
db.OpsLog.LogEvent(self, db.ACT_EIP_ATTACH, lb.GetShortDesc(ctx), userCred)
db.OpsLog.LogEvent(lb, db.ACT_EIP_ATTACH, self.GetShortDesc(ctx), userCred)
return nil
}
func (self *SElasticip) AssociateVM(ctx context.Context, userCred mcclient.TokenCredential, vm *SGuest) error {
if vm.PendingDeleted || vm.Deleted {
return fmt.Errorf("vm is deleted")
}
if len(self.AssociateType) > 0 && len(self.AssociateId) > 0 {
if self.AssociateType == api.EIP_ASSOCIATE_TYPE_SERVER && self.AssociateId == vm.Id {
return nil
} else {
return fmt.Errorf("EIP has been associated!!")
}
}
_, err := db.Update(self, func() error {
self.AssociateType = api.EIP_ASSOCIATE_TYPE_SERVER
self.AssociateId = vm.Id
return nil
})
if err != nil {
return err
}
db.OpsLog.LogAttachEvent(ctx, vm, self, userCred, self.GetShortDesc(ctx))
db.OpsLog.LogEvent(self, db.ACT_EIP_ATTACH, vm.GetShortDesc(ctx), userCred)
db.OpsLog.LogEvent(vm, db.ACT_EIP_ATTACH, self.GetShortDesc(ctx), userCred)
return nil
}
func (self *SElasticip) AssociateNatGateway(ctx context.Context, userCred mcclient.TokenCredential, nat *SNatGateway) error {
if nat.Deleted {
return fmt.Errorf("nat gateway is deleted")
}
if len(self.AssociateType) > 0 && len(self.AssociateId) > 0 {
if self.AssociateType == api.EIP_ASSOCIATE_TYPE_NAT_GATEWAY && self.AssociateId == nat.Id {
return nil
} else {
return fmt.Errorf("Eip has been associated!!")
}
}
_, err := db.Update(self, func() error {
self.AssociateType = api.EIP_ASSOCIATE_TYPE_NAT_GATEWAY
self.AssociateId = nat.Id
return nil
})
if err != nil {
return err
}
db.OpsLog.LogAttachEvent(ctx, nat, self, userCred, self.GetShortDesc(ctx))
db.OpsLog.LogEvent(self, db.ACT_EIP_ATTACH, nat.GetShortDesc(ctx), userCred)
db.OpsLog.LogEvent(nat, db.ACT_EIP_ATTACH, self.GetShortDesc(ctx), userCred)
return nil
}
func (manager *SElasticipManager) getEipByExtEip(ctx context.Context, userCred mcclient.TokenCredential, extEip cloudprovider.ICloudEIP, provider *SCloudprovider, region *SCloudregion, syncOwnerId mcclient.IIdentityProvider) (*SElasticip, error) {
eipObj, err := db.FetchByExternalIdAndManagerId(manager, extEip.GetGlobalId(), func(q *sqlchemy.SQuery) *sqlchemy.SQuery {
return q.Equals("manager_id", provider.Id)
})
if err == nil {
return eipObj.(*SElasticip), nil
}
if err != sql.ErrNoRows {
log.Errorf("FetchByExternalId fail %s", err)
return nil, err
}
return manager.newFromCloudEip(ctx, userCred, extEip, provider, region, syncOwnerId)
}
func (manager *SElasticipManager) ValidateCreateData(ctx context.Context, userCred mcclient.TokenCredential, ownerId mcclient.IIdentityProvider, query jsonutils.JSONObject, input api.SElasticipCreateInput) (*jsonutils.JSONDict, error) {
var (
region *SCloudregion
provider *SCloudprovider
err error
)
for _, cloudregion := range []string{input.Cloudregion, input.Region, input.RegionId} {
if len(cloudregion) > 0 {
input.Cloudregion = cloudregion
break
}
}
if input.Cloudregion == "" {
input.Cloudregion = api.DEFAULT_REGION_ID
}
if obj, err := CloudregionManager.FetchByIdOrName(nil, input.Cloudregion); err != nil {
if err != sql.ErrNoRows {
return nil, httperrors.NewGeneralError(err)
} else {
return nil, httperrors.NewResourceNotFoundError("Region %s not found", input.Cloudregion)
}
} else {
region = obj.(*SCloudregion)
}
input.CloudregionId = region.GetId()
for _, cloudprovider := range []string{input.Cloudprovider, input.Manager, input.ManagerId} {
if len(cloudprovider) > 0 {
input.Cloudprovider = cloudprovider
break
}
}
if input.Cloudprovider != "" {
providerObj, err := CloudproviderManager.FetchByIdOrName(nil, input.Cloudprovider)
if err != nil {
if err != sql.ErrNoRows {
return nil, httperrors.NewGeneralError(err)
} else {
return nil, httperrors.NewResourceNotFoundError("Cloud provider %s not found", input.Cloudprovider)
}
}
provider = providerObj.(*SCloudprovider)
input.ManagerId = provider.Id
}
// publicIp cannot be created standalone
input.Mode = api.EIP_MODE_STANDALONE_EIP
if len(input.ChargeType) == 0 {
input.ChargeType = api.EIP_CHARGE_TYPE_DEFAULT
}
if !utils.IsInStringArray(input.ChargeType, []string{api.EIP_CHARGE_TYPE_BY_BANDWIDTH, api.EIP_CHARGE_TYPE_BY_TRAFFIC}) {
return nil, httperrors.NewInputParameterError("charge type %s not supported", input.ChargeType)
}
if input.VirtualResourceCreateInput, err = manager.SVirtualResourceBaseManager.ValidateCreateData(ctx, userCred, ownerId, query, input.VirtualResourceCreateInput); err != nil {
return nil, err
}
if err = region.GetDriver().ValidateCreateEipData(ctx, userCred, &input); err != nil {
return nil, err
}
//避免参数重名后还有pending.eip残留
eipPendingUsage := &SRegionQuota{Eip: 1}
quotaKeys := fetchRegionalQuotaKeys(rbacutils.ScopeProject, ownerId, region, provider)
eipPendingUsage.SetKeys(quotaKeys)
if err = quotas.CheckSetPendingQuota(ctx, userCred, eipPendingUsage); err != nil {
return nil, err
}
return input.JSON(input), nil
}
func (eip *SElasticip) GetQuotaKeys() (quotas.IQuotaKeys, error) {
region := eip.GetRegion()
if region == nil {
return nil, errors.Wrap(httperrors.ErrInvalidStatus, "no valid region")
}
return fetchRegionalQuotaKeys(
rbacutils.ScopeProject,
eip.GetOwnerId(),
region,
eip.GetCloudprovider(),
), nil
}
func (self *SElasticip) PostCreate(ctx context.Context, userCred mcclient.TokenCredential, ownerId mcclient.IIdentityProvider, query jsonutils.JSONObject, data jsonutils.JSONObject) {
self.SVirtualResourceBase.PostCreate(ctx, userCred, ownerId, query, data)
eipPendingUsage := &SRegionQuota{Eip: 1}
keys, err := self.GetQuotaKeys()
if err != nil {
log.Errorf("GetQuotaKeys fail %s", err)
} else {
eipPendingUsage.SetKeys(keys)
err := quotas.CancelPendingUsage(ctx, userCred, eipPendingUsage, eipPendingUsage, true)
if err != nil {
log.Errorf("SElasticip CancelPendingUsage error: %s", err)
}
}
self.startEipAllocateTask(ctx, userCred, data.(*jsonutils.JSONDict), "")
}
func (self *SElasticip) startEipAllocateTask(ctx context.Context, userCred mcclient.TokenCredential, params *jsonutils.JSONDict, parentTaskId string) error {
task, err := taskman.TaskManager.NewTask(ctx, "EipAllocateTask", self, userCred, params, parentTaskId, "", nil)
if err != nil {
log.Errorf("newtask EipAllocateTask fail %s", err)
return err
}
self.SetStatus(userCred, api.EIP_STATUS_ALLOCATE, "start allocate")
task.ScheduleRun(nil)
return nil
}
func (self *SElasticip) Delete(ctx context.Context, userCred mcclient.TokenCredential) error {
// Elasticip delete do nothing
return nil
}
func (self *SElasticip) RealDelete(ctx context.Context, userCred mcclient.TokenCredential) error {
return self.SVirtualResourceBase.Delete(ctx, userCred)
}
func (self *SElasticip) CustomizeDelete(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) error {
return self.StartEipDeallocateTask(ctx, userCred, "")
}
func (self *SElasticip) ValidateDeleteCondition(ctx context.Context) error {
if self.IsAssociated() {
return fmt.Errorf("eip is associated with resources")
}
return self.SVirtualResourceBase.ValidateDeleteCondition(ctx)
}
func (self *SElasticip) StartEipDeallocateTask(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string) error {
task, err := taskman.TaskManager.NewTask(ctx, "EipDeallocateTask", self, userCred, nil, parentTaskId, "", nil)
if err != nil {
log.Errorf("newTask EipDeallocateTask fail %s", err)
return err
}
self.SetStatus(userCred, api.EIP_STATUS_DEALLOCATE, "start to delete")
task.ScheduleRun(nil)
return nil
}
func (self *SElasticip) AllowPerformAssociate(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) bool {
return self.IsOwner(userCred) || db.IsAdminAllowPerform(userCred, self, "associate")
}
func (self *SElasticip) PerformAssociate(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) (jsonutils.JSONObject, error) {
if self.IsAssociated() {
return nil, httperrors.NewConflictError("eip has been associated with instance")
}
if self.Status != api.EIP_STATUS_READY {
return nil, httperrors.NewInvalidStatusError("eip cannot associate in status %s", self.Status)
}
if self.Mode == api.EIP_MODE_INSTANCE_PUBLICIP {
return nil, httperrors.NewUnsupportOperationError("fixed eip cannot be associated")
}
instanceId := jsonutils.GetAnyString(data, []string{"instance", "instance_id"})
if len(instanceId) == 0 {
return nil, httperrors.NewMissingParameterError("instance_id")
}
instanceType := jsonutils.GetAnyString(data, []string{"instance_type"})
if len(instanceType) == 0 {
instanceType = api.EIP_ASSOCIATE_TYPE_SERVER
}
if instanceType != api.EIP_ASSOCIATE_TYPE_SERVER {
return nil, httperrors.NewInputParameterError("Unsupported %s", instanceType)
}
vmObj, err := GuestManager.FetchByIdOrName(userCred, instanceId)
if err != nil {
if err == sql.ErrNoRows {
return nil, httperrors.NewResourceNotFoundError("server %s not found", instanceId)
} else {
return nil, httperrors.NewGeneralError(err)
}
}
server := vmObj.(*SGuest)
lockman.LockObject(ctx, server)
defer lockman.ReleaseObject(ctx, server)
if server.PendingDeleted {
return nil, httperrors.NewInvalidStatusError("cannot associate pending delete server")
}
// IMPORTANT: this serves as a guard against a guest to have multiple
// associated elastic_ips
seip, _ := server.GetEip()
if seip != nil {
return nil, httperrors.NewInvalidStatusError("instance is already associated with eip")
}
if ok, _ := utils.InStringArray(server.Status, []string{api.VM_READY, api.VM_RUNNING}); !ok {
return nil, httperrors.NewInvalidStatusError("cannot associate server in status %s", server.Status)
}
if len(self.NetworkId) > 0 {
gns, err := server.GetNetworks("")
if err != nil {
return nil, httperrors.NewGeneralError(errors.Wrap(err, "GetNetworks"))
}
for _, gn := range gns {
if gn.NetworkId == self.NetworkId {
return nil, httperrors.NewInputParameterError("cannot associate eip with same network")
}
}
}
serverRegion := server.getRegion()
if serverRegion == nil {
return nil, httperrors.NewInputParameterError("server region is not found???")
}
eipRegion := self.GetRegion()
if eipRegion == nil {
return nil, httperrors.NewInputParameterError("eip region is not found???")
}
if serverRegion.Id != eipRegion.Id {
return nil, httperrors.NewInputParameterError("eip and server are not in the same region")
}
eipZone := self.GetZone()
if eipZone != nil {
serverZone := server.getZone()
if serverZone.Id != eipZone.Id {
return nil, httperrors.NewInputParameterError("eip and server are not in the same zone")
}
}
srvHost := server.GetHost()
if srvHost == nil {
return nil, httperrors.NewInputParameterError("server host is not found???")
}
if srvHost.ManagerId != self.ManagerId {
return nil, httperrors.NewInputParameterError("server and eip are not managed by the same provider")
}
err = self.StartEipAssociateInstanceTask(ctx, userCred, server, "")
return nil, err
}
func (self *SElasticip) StartEipAssociateInstanceTask(ctx context.Context, userCred mcclient.TokenCredential, server *SGuest, parentTaskId string) error {
params := jsonutils.NewDict()
params.Add(jsonutils.NewString(server.ExternalId), "instance_external_id")
params.Add(jsonutils.NewString(server.Id), "instance_id")
params.Add(jsonutils.NewString(api.EIP_ASSOCIATE_TYPE_SERVER), "instance_type")
return self.StartEipAssociateTask(ctx, userCred, params, parentTaskId)
}
func (self *SElasticip) StartEipAssociateTask(ctx context.Context, userCred mcclient.TokenCredential, params *jsonutils.JSONDict, parentTaskId string) error {
task, err := taskman.TaskManager.NewTask(ctx, "EipAssociateTask", self, userCred, params, parentTaskId, "", nil)
if err != nil {
log.Errorf("create EipAssociateTask task fail %s", err)
return err
}
self.SetStatus(userCred, api.EIP_STATUS_ASSOCIATE, "start to associate")
task.ScheduleRun(nil)
return nil
}
func (self *SElasticip) AllowPerformDissociate(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) bool {
return self.IsOwner(userCred) || db.IsAdminAllowPerform(userCred, self, "dissociate")
}
func (self *SElasticip) PerformDissociate(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) (jsonutils.JSONObject, error) {
if len(self.AssociateId) == 0 {
return nil, nil // success
}
// associate with an invalid vm
if !self.IsAssociated() {
return nil, self.Dissociate(ctx, userCred)
}
if self.Status != api.EIP_STATUS_READY {
return nil, httperrors.NewInvalidStatusError("eip cannot dissociate in status %s", self.Status)
}
if self.Mode == api.EIP_MODE_INSTANCE_PUBLICIP {
return nil, httperrors.NewUnsupportOperationError("fixed public eip cannot be dissociated")
}
if self.AssociateType == api.EIP_ASSOCIATE_TYPE_NAT_GATEWAY {
model, err := NatGatewayManager.FetchById(self.AssociateId)
if err != nil {
return nil, errors.Wrapf(err, "fail to fetch natgateway %s", self.AssociateId)
}
natgateway := model.(*SNatGateway)
sCount, err := natgateway.GetSTableSize(func(q *sqlchemy.SQuery) *sqlchemy.SQuery {
return q.Equals("ip", self.IpAddr)
})
if err != nil {
return nil, errors.Wrapf(err, "fail to get stable size of natgateway %s", self.AssociateId)
}
if sCount > 0 {
return nil, httperrors.NewUnsupportOperationError(
"the associated natgateway has corresponding snat rules with eip %s, please delete them firstly", self.IpAddr)
}
dCount, err := natgateway.GetDTableSize(func(q *sqlchemy.SQuery) *sqlchemy.SQuery {
return q.Equals("external_ip", self.IpAddr)
})
if err != nil {
return nil, errors.Wrapf(err, "fail to get dtable size of natgateway %s", self.AssociateId)
}
if dCount > 0 {
return nil, httperrors.NewUnsupportOperationError(
"the associated natgateway has corresponding dnat rules with eip %s, please delete them firstly", self.IpAddr)
}
}
autoDelete := jsonutils.QueryBoolean(data, "auto_delete", false)
err := self.StartEipDissociateTask(ctx, userCred, autoDelete, "")
return nil, err
}
func (self *SElasticip) StartEipDissociateTask(ctx context.Context, userCred mcclient.TokenCredential, autoDelete bool, parentTaskId string) error {
params := jsonutils.NewDict()
if autoDelete {
params.Add(jsonutils.JSONTrue, "auto_delete")
}
task, err := taskman.TaskManager.NewTask(ctx, "EipDissociateTask", self, userCred, params, parentTaskId, "", nil)
if err != nil {
log.Errorf("create EipDissociateTask fail %s", err)
return nil
}
self.SetStatus(userCred, api.EIP_STATUS_DISSOCIATE, "start to dissociate")
task.ScheduleRun(nil)
return nil
}
func (self *SElasticip) GetIRegion() (cloudprovider.ICloudRegion, error) {
provider, err := self.GetDriver()
if err != nil {
return nil, errors.Wrap(err, "GetDriver")
}
region := self.GetRegion()
if region == nil {
return nil, fmt.Errorf("fail to find region for eip")
}
return provider.GetIRegionById(region.GetExternalId())
}
func (self *SElasticip) GetIEip() (cloudprovider.ICloudEIP, error) {
iregion, err := self.GetIRegion()
if err != nil {
return nil, err
}
return iregion.GetIEipById(self.GetExternalId())
}
func (self *SElasticip) AllowPerformSyncstatus(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) bool {
return self.IsOwner(userCred) || db.IsAdminAllowPerform(userCred, self, "syncstatus")
}
// 同步弹性公网IP状态
func (self *SElasticip) PerformSyncstatus(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input api.ElasticipSyncstatusInput) (jsonutils.JSONObject, error) {
if self.Mode == api.EIP_MODE_INSTANCE_PUBLICIP {
return nil, httperrors.NewUnsupportOperationError("fixed eip cannot sync status")
}
if self.IsManaged() {
return nil, StartResourceSyncStatusTask(ctx, userCred, self, "EipSyncstatusTask", "")
} else {
return nil, self.SetStatus(userCred, api.EIP_STATUS_READY, "eip sync status")
}
}
func (self *SElasticip) AllowPerformSync(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) bool {
return self.IsOwner(userCred) || db.IsAdminAllowPerform(userCred, self, "sync")
}
func (self *SElasticip) PerformSync(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) (jsonutils.JSONObject, error) {
if self.Mode == api.EIP_MODE_INSTANCE_PUBLICIP {
return nil, httperrors.NewUnsupportOperationError("fixed eip cannot sync status")
}
if self.IsManaged() {
return nil, StartResourceSyncStatusTask(ctx, userCred, self, "EipSyncstatusTask", "")
}
return nil, nil
}
func (self *SElasticip) GetExtraDetails(
ctx context.Context,
userCred mcclient.TokenCredential,
query jsonutils.JSONObject,
isList bool,
) (api.ElasticipDetails, error) {
return api.ElasticipDetails{}, nil
}
func (manager *SElasticipManager) FetchCustomizeColumns(
ctx context.Context,
userCred mcclient.TokenCredential,
query jsonutils.JSONObject,
objs []interface{},
fields stringutils2.SSortedStrings,
isList bool,
) []api.ElasticipDetails {
rows := make([]api.ElasticipDetails, len(objs))
virtRows := manager.SVirtualResourceBaseManager.FetchCustomizeColumns(ctx, userCred, query, objs, fields, isList)
managerRows := manager.SManagedResourceBaseManager.FetchCustomizeColumns(ctx, userCred, query, objs, fields, isList)
regionRows := manager.SCloudregionResourceBaseManager.FetchCustomizeColumns(ctx, userCred, query, objs, fields, isList)
for i := range rows {
rows[i] = api.ElasticipDetails{
VirtualResourceDetails: virtRows[i],
ManagedResourceInfo: managerRows[i],
CloudregionResourceInfo: regionRows[i],
}
rows[i] = objs[i].(*SElasticip).getMoreDetails(rows[i])
}
return rows
}
func (self *SElasticip) getMoreDetails(out api.ElasticipDetails) api.ElasticipDetails {
instance := self.GetAssociateResource()
if instance != nil {
out.AssociateName = instance.GetName()
}
return out
}
func (manager *SElasticipManager) NewEipForVMOnHost(ctx context.Context, userCred mcclient.TokenCredential, vm *SGuest,
host *SHost, bw int, chargeType string, autoDellocate bool, pendingUsage quotas.IQuota) (*SElasticip, error) {
region := host.GetRegion()
if len(chargeType) == 0 {
chargeType = api.EIP_CHARGE_TYPE_BY_TRAFFIC
}
eip := SElasticip{}
eip.SetModelManager(manager, &eip)
eip.Mode = api.EIP_MODE_STANDALONE_EIP
// do not implicitly auto dellocate EIP, should be set by user explicitly
// eip.AutoDellocate = tristate.True
eip.Bandwidth = bw
eip.ChargeType = chargeType
eip.AutoDellocate = tristate.NewFromBool(autoDellocate)
eip.DomainId = vm.DomainId
eip.ProjectId = vm.ProjectId
eip.ProjectSrc = string(apis.OWNER_SOURCE_LOCAL)
eip.ManagerId = host.ManagerId
eip.CloudregionId = region.Id
eip.Name = fmt.Sprintf("eip-for-%s", vm.GetName())
if host.ManagerId == "" {
hostq := HostManager.Query().SubQuery()
wireq := WireManager.Query().SubQuery()
hostwireq := HostwireManager.Query().SubQuery()
q := NetworkManager.Query()
q = q.Join(wireq, sqlchemy.Equals(wireq.Field("id"), q.Field("wire_id")))
q = q.Join(hostwireq, sqlchemy.Equals(hostwireq.Field("wire_id"), wireq.Field("id")))
q = q.Join(hostq, sqlchemy.Equals(hostq.Field("id"), host.Id))
q = q.Equals("server_type", api.NETWORK_TYPE_EIP)
var nets []SNetwork
if err := db.FetchModelObjects(NetworkManager, q, &nets); err != nil {
return nil, errors.Wrapf(err, "fetch eip networks usable in host %s(%s)",
host.Name, host.Id)
}
for i := range nets {
net := &nets[i]
cnt, err := net.GetFreeAddressCount()
if err != nil {
continue
}
if cnt > 0 {
eip.NetworkId = net.Id
break
}
}
}
var err error
eip.Name, err = db.GenerateName(manager, userCred, eip.Name)
if err != nil {
return nil, errors.Wrap(err, "db.GenerateName")
}
err = manager.TableSpec().Insert(ctx, &eip)
if err != nil {
log.Errorf("create EIP record fail %s", err)
return nil, err
}
eipPendingUsage := &SRegionQuota{Eip: 1}
keys := fetchRegionalQuotaKeys(
rbacutils.ScopeProject,
vm.GetOwnerId(),
region,
host.GetCloudprovider(),
)
eipPendingUsage.SetKeys(keys)
quotas.CancelPendingUsage(ctx, userCred, pendingUsage, eipPendingUsage, true)
return &eip, nil
}
func (eip *SElasticip) AllocateAndAssociateVM(ctx context.Context, userCred mcclient.TokenCredential, vm *SGuest, parentTaskId string) error {
params := jsonutils.NewDict()
params.Add(jsonutils.NewString(vm.ExternalId), "instance_external_id")
params.Add(jsonutils.NewString(vm.Id), "instance_id")
params.Add(jsonutils.NewString(api.EIP_ASSOCIATE_TYPE_SERVER), "instance_type")
vm.SetStatus(userCred, api.VM_ASSOCIATE_EIP, "allocate and associate EIP")
return eip.startEipAllocateTask(ctx, userCred, params, parentTaskId)
}
func (self *SElasticip) AllowPerformChangeBandwidth(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) bool {
return self.IsOwner(userCred) || db.IsAdminAllowPerform(userCred, self, "change-bandwidth")
}
func (self *SElasticip) PerformChangeBandwidth(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) (jsonutils.JSONObject, error) {
if self.Status != api.EIP_STATUS_READY {
return nil, httperrors.NewInvalidStatusError("cannot change bandwidth in status %s", self.Status)
}
bandwidth, err := data.Int("bandwidth")
if err != nil || bandwidth <= 0 {
return nil, httperrors.NewInputParameterError("Invalid bandwidth")
}
if self.IsManaged() {
factory, err := self.GetProviderFactory()
if err != nil {
return nil, err
}
if err := factory.ValidateChangeBandwidth(self.AssociateId, bandwidth); err != nil {
return nil, httperrors.NewInputParameterError(err.Error())
}
}
err = self.StartEipChangeBandwidthTask(ctx, userCred, bandwidth)
if err != nil {
return nil, httperrors.NewGeneralError(err)
}
return nil, nil
}
func (self *SElasticip) StartEipChangeBandwidthTask(ctx context.Context, userCred mcclient.TokenCredential, bandwidth int64) error {
self.SetStatus(userCred, api.EIP_STATUS_CHANGE_BANDWIDTH, "change bandwidth")
params := jsonutils.NewDict()
params.Add(jsonutils.NewInt(bandwidth), "bandwidth")
task, err := taskman.TaskManager.NewTask(ctx, "EipChangeBandwidthTask", self, userCred, params, "", "", nil)
if err != nil {
log.Errorf("create EipChangeBandwidthTask fail %s", err)
return err
}
task.ScheduleRun(nil)
return nil
}
func (self *SElasticip) DoChangeBandwidth(userCred mcclient.TokenCredential, bandwidth int) error {
changes := jsonutils.NewDict()
changes.Add(jsonutils.NewInt(int64(self.Bandwidth)), "obw")
_, err := db.Update(self, func() error {
self.Bandwidth = bandwidth
return nil
})
self.SetStatus(userCred, api.EIP_STATUS_READY, "finish change bandwidth")
if err != nil {
log.Errorf("DoChangeBandwidth update fail %s", err)
return err
}
changes.Add(jsonutils.NewInt(int64(bandwidth)), "nbw")
db.OpsLog.LogEvent(self, db.ACT_CHANGE_BANDWIDTH, changes, userCred)
return nil
}
type EipUsage struct {
PublicIPCount int
EIPCount int
EIPUsedCount int
}
func (u EipUsage) Total() int {
return u.PublicIPCount + u.EIPCount
}
func (manager *SElasticipManager) usageQByCloudEnv(q *sqlchemy.SQuery, providers []string, brands []string, cloudEnv string) *sqlchemy.SQuery {
return CloudProviderFilter(q, q.Field("manager_id"), providers, brands, cloudEnv)
}
func (manager *SElasticipManager) usageQByRanges(q *sqlchemy.SQuery, rangeObjs []db.IStandaloneModel) *sqlchemy.SQuery {
return RangeObjectsFilter(q, rangeObjs, q.Field("cloudregion_id"), nil, q.Field("manager_id"), nil, nil)
}
func (manager *SElasticipManager) usageQ(q *sqlchemy.SQuery, rangeObjs []db.IStandaloneModel, providers []string, brands []string, cloudEnv string) *sqlchemy.SQuery {
q = manager.usageQByRanges(q, rangeObjs)
q = manager.usageQByCloudEnv(q, providers, brands, cloudEnv)
return q
}
func (manager *SElasticipManager) TotalCount(scope rbacutils.TRbacScope, ownerId mcclient.IIdentityProvider, rangeObjs []db.IStandaloneModel, providers []string, brands []string, cloudEnv string) EipUsage {
usage := EipUsage{}
q1 := manager.Query().Equals("mode", api.EIP_MODE_INSTANCE_PUBLICIP)
q1 = manager.usageQ(q1, rangeObjs, providers, brands, cloudEnv)
q2 := manager.Query().Equals("mode", api.EIP_MODE_STANDALONE_EIP)
q2 = manager.usageQ(q2, rangeObjs, providers, brands, cloudEnv)
q3 := manager.Query().Equals("mode", api.EIP_MODE_STANDALONE_EIP).IsNotEmpty("associate_id")
q3 = manager.usageQ(q3, rangeObjs, providers, brands, cloudEnv)
switch scope {
case rbacutils.ScopeSystem:
// do nothing
case rbacutils.ScopeDomain:
q1 = q1.Equals("domain_id", ownerId.GetProjectDomainId())
q2 = q2.Equals("domain_id", ownerId.GetProjectDomainId())
q3 = q3.Equals("domain_id", ownerId.GetProjectDomainId())
case rbacutils.ScopeProject:
q1 = q1.Equals("tenant_id", ownerId.GetProjectId())
q2 = q2.Equals("tenant_id", ownerId.GetProjectId())
q3 = q3.Equals("tenant_id", ownerId.GetProjectId())
}
usage.PublicIPCount, _ = q1.CountWithError()
usage.EIPCount, _ = q2.CountWithError()
usage.EIPUsedCount, _ = q3.CountWithError()
return usage
}
func (self *SElasticip) AllowPerformPurge(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) bool {
return db.IsAdminAllowPerform(userCred, self, "purge")
}
func (self *SElasticip) PerformPurge(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) (jsonutils.JSONObject, error) {
err := self.ValidateDeleteCondition(ctx)
if err != nil {
return nil, err
}
provider := self.GetCloudprovider()
if provider != nil {
if provider.GetEnabled() {
return nil, httperrors.NewInvalidStatusError("Cannot purge elastic_ip on enabled cloud provider")
}
}
err = self.RealDelete(ctx, userCred)
return nil, err
}
func (self *SElasticip) DoPendingDelete(ctx context.Context, userCred mcclient.TokenCredential) {
if self.Mode == api.EIP_MODE_INSTANCE_PUBLICIP {
self.SVirtualResourceBase.DoPendingDelete(ctx, userCred)
return
}
self.Dissociate(ctx, userCred)
}
func (self *SElasticip) getCloudProviderInfo() SCloudProviderInfo {
region := self.GetRegion()
provider := self.GetCloudprovider()
return MakeCloudProviderInfo(region, nil, provider)
}
func (eip *SElasticip) GetUsages() []db.IUsage {
if eip.PendingDeleted || eip.Deleted {
return nil
}
usage := SRegionQuota{Eip: 1}
keys, err := eip.GetQuotaKeys()
if err != nil {
log.Errorf("disk.GetQuotaKeys fail %s", err)
return nil
}
usage.SetKeys(keys)
return []db.IUsage{
&usage,
}
}
func (manager *SElasticipManager) ListItemExportKeys(ctx context.Context,
q *sqlchemy.SQuery,
userCred mcclient.TokenCredential,
keys stringutils2.SSortedStrings,
) (*sqlchemy.SQuery, error) {
var err error
q, err = manager.SVirtualResourceBaseManager.ListItemExportKeys(ctx, q, userCred, keys)
if err != nil {
return nil, errors.Wrap(err, "SVirtualResourceBaseManager.ListItemExportKeys")
}
if keys.ContainsAny(manager.SManagedResourceBaseManager.GetExportKeys()...) {
q, err = manager.SManagedResourceBaseManager.ListItemExportKeys(ctx, q, userCred, keys)
if err != nil {
return nil, errors.Wrap(err, "SManagedResourceBaseManager.ListItemExportKeys")
}
}
return q, nil
}
| {
ElasticipManager = &SElasticipManager{
SVirtualResourceBaseManager: db.NewVirtualResourceBaseManager(
SElasticip{},
"elasticips_tbl",
"eip",
"eips",
),
}
ElasticipManager.SetVirtualObject(ElasticipManager)
ElasticipManager.TableSpec().AddIndex(true, "associate_id", "associate_type")
} |
package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Spdlog(CMakePackage):
"""Very fast, header only, C++ logging library"""
homepage = "https://github.com/gabime/spdlog"
url = "https://github.com/gabime/spdlog/archive/v0.9.0.tar.gz"
version('1.8.1', sha256='5197b3147cfcfaa67dd564db7b878e4a4b3d9f3443801722b3915cdeced656cb')
version('1.8.0', sha256='1e68e9b40cf63bb022a4b18cdc1c9d88eb5d97e4fd64fa981950a9cacf57a4bf')
version('1.7.0', sha256='f0114a4d3c88be9e696762f37a7c379619443ce9d668546c61b21d41affe5b62')
version('1.6.1', sha256='378a040d91f787aec96d269b0c39189f58a6b852e4cbf9150ccfacbe85ebbbfc')
version('1.6.0', sha256='0421667c9f2fc78e6548d44f7bc5921be0f03e612df384294c16cedb93d967f8')
version('1.5.0', sha256='b38e0bbef7faac2b82fed550a0c19b0d4e7f6737d5321d4fd8f216b80f8aee8a')
version('1.4.2', sha256='821c85b120ad15d87ca2bc44185fa9091409777c756029125a02f81354072157')
version('1.4.1', sha256='3291958eb54ed942d1bd3aef1b4f8ccf70566cbc04d34296ec61eb96ceb73cff')
version('1.2.1', sha256='867a4b7cedf9805e6f76d3ca41889679054f7e5a3b67722fe6d0eae41852a767')
version('1.2.0', sha256='0ba31b9e7f8e43a7be328ab0236d57810e5d4fc8a1a7842df665ae22d5cbd128')
version('1.1.0', sha256='3dbcbfd8c07e25f5e0d662b194d3a7772ef214358c49ada23c044c4747ce8b19')
version('1.0.0', sha256='90d5365121bcd2c41ce94dfe6a460e89507a2dfef6133fe5fad5bb35ac4ef0a1')
version('0.17.0', sha256='94f74fd1b3344733d1db3de2ec22e6cbeb769f93a8baa0d4a22b1f62dc7369f8')
version('0.16.3', sha256='b88d7be261d9089c817fc8cee6c000d69f349b357828e4c7f66985bc5d5360b8')
version('0.16.2', sha256='2081e5df5e87402398847431e16b87c71dd5c4d632314bb976ace8161f4d32de')
version('0.16.1', sha256='733260e1fbdcf1b3dc307fc585e4476240026de8be28eb905731d2ab0942deae')
version('0.16.0', sha256='9e64e3b10c2a3c54dfff63aa056057cf1db8a5fd506b3d9cf77207511820baac')
version('0.14.0', sha256='eb5beb4e53f4bfff5b32eb4db8588484bdc15a17b90eeefef3a9fc74fec1d83d')
version('0.13.0', sha256='d798a6ca19165f0a18a43938859359269f5a07fd8e0eb83ab8674739c9e8f361')
version('0.12.0', sha256='5cfd6a0b3182a88e1eb35bcb65a7ef9035140d7c73b16ba6095939dbf07325b9')
version('0.11.0', sha256='8c0f1810fb6b7d23fef70c2ea8b6fa6768ac8d18d6e0de39be1f48865e22916e')
version('0.10.0', sha256='fbbc53c1cc09b93b4c3d76b683bbe9315e2efe3727701227374dce6aa4264075')
version('0.9.0', sha256='bbbe5a855c8b309621352921d650449eb2f741d35d55ec50fb4d8122ddfb8f01')
variant('shared', default=True,
description='Build shared libraries (v1.4.0+)')
depends_on('[email protected]:', type='build')
def cmake_args(self):
spec = self.spec
args = []
if self.spec.version >= Version('1.4.0'):
args.extend([
'-DSPDLOG_BUILD_SHARED:BOOL={0}'.format(
'ON' if '+shared' in spec else 'OFF'),
# tests and examples
'-DSPDLOG_BUILD_TESTS:BOOL={0}'.format(
'ON' if self.run_tests else 'OFF'),
'-DSPDLOG_BUILD_EXAMPLE:BOOL={0}'.format(
'ON' if self.run_tests else 'OFF')
])
| return args | |
generate_symbolic_derivatives.py | #!/usr/bin/env python3
"""Generate symbolic derivatives as lambdified functions for gwbench.
When run as a script: generate all symbolic derivatives for tf2_tidal at all standard locations ahead of benchmarking.
Slurm gets upset when multiple tasks try to create the derivatives if there aren't any there already, so run in series.
Usage:
$ python3 generate_symbolic_derivatives.py
License:
BSD 3-Clause License
Copyright (c) 2022, James Gardner.
All rights reserved except for those for the gwbench code which remain reserved
by S. Borhanian; the gwbench code is included in this repository for convenience.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from typing import List, Set, Dict, Tuple, Optional, Union
import os
from gwbench import wf_class as wfc
from gwbench import detector_response_derivatives as drd
def | (
wf_model_name: str,
wf_other_var_dic: Optional[Dict[str, str]],
deriv_symbs_string: str,
locs: List[str],
use_rot: bool,
output_path: Optional[str] = None,
print_progress: bool = True,
) -> None:
"""Generate symbolic derivatives, from generate_lambdified_functions.py from gwbench.
Use network's wf_model_name, wf_other_var_dic, deriv_symbs_string, and use_rot.
Will print 'Done.' when finished unless all files already exist in which it will print as such.
Args:
wf_model_name: Waveform model name.
wf_other_var_dic: Waveform approximant.
deriv_symbs_string: Symbols to take derivatives wrt.
locs: Detector locations.
use_rot: Whether to account for Earth's rotation.
output_path: Output file path.
print_progress: Whether to print progress.
"""
# # how to print settings as a sanity check
# print('wf_model_name = \'{}\''.format(wf.wf_model_name))
# print('wf_other_var_dic = {}'.format(wf.wf_other_var_dic))
# print('deriv_symbs_string = \'{}\''.format(deriv_symbs_string))
# print('use_rot = %i'%use_rot)
# skip if derivatives already exist
file_names = [
"par_deriv_WFM_"
+ wf_model_name
+ "_VAR_"
+ deriv_symbs_string.replace(" ", "_")
+ "_DET_"
+ key
+ ".dat"
for key in locs
]
file_names.append(
"par_deriv_WFM_"
+ wf_model_name
+ "_VAR_"
+ deriv_symbs_string.replace(" ra", "")
.replace(" dec", "")
.replace(" psi", "")
.replace(" ", "_")
+ "_DET_"
+ "pl_cr"
+ ".dat"
)
path = "lambdified_functions/"
file_names_existing = [
file_name for file_name in file_names if os.path.isfile(path + file_name)
]
if len(file_names_existing) < len(file_names):
# if a file doesn't exist, generate them all again
# TODO: make this more efficient and just generate the missing files, or, do it in parallel
# waveform
wf = wfc.Waveform(wf_model_name, wf_other_var_dic)
# lambidified detector reponses and derivatives
drd.generate_det_responses_derivs_sym(
wf,
deriv_symbs_string,
locs=locs,
use_rot=use_rot,
user_lambdified_functions_path=output_path,
)
elif print_progress:
print("All lambdified derivatives already exist.")
if __name__ == "__main__":
# tf2_tidal is used as a replacement for numerical BNS simulations until they become well-conditioned
# TODO: make a user input file somewhere to unify the considered waveforms
wf_model_name, wf_other_var_dic = "tf2_tidal", None
deriv_symbs_string = "Mc eta DL tc phic iota ra dec psi"
# TODO: make this automated by using a locs list from networks.py
locs = ["H", "L", "V", "K", "I", "ET1", "ET2", "ET3", "C", "N", "S"]
use_rot = True
generate_symbolic_derivatives(
wf_model_name,
wf_other_var_dic,
deriv_symbs_string,
locs,
use_rot,
print_progress=False,
)
| generate_symbolic_derivatives |
toolPanelWrapper.js | "use strict";
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });
var core_1 = require("@ag-grid-community/core");
var horizontalResizeComp_1 = require("./horizontalResizeComp");
var ToolPanelWrapper = /** @class */ (function (_super) {
__extends(ToolPanelWrapper, _super);
function | () {
return _super.call(this, ToolPanelWrapper.TEMPLATE) || this;
}
ToolPanelWrapper.prototype.getToolPanelId = function () {
return this.toolPanelId;
};
ToolPanelWrapper.prototype.setToolPanelDef = function (toolPanelDef) {
this.toolPanelId = toolPanelDef.id;
var params = {
api: this.gridOptionsWrapper.getApi(),
columnApi: this.gridOptionsWrapper.getColumnApi()
};
var componentPromise = this.userComponentFactory.newToolPanelComponent(toolPanelDef, params);
if (componentPromise == null) {
console.warn("ag-grid: error processing tool panel component " + toolPanelDef.id + ". You need to specify either 'toolPanel' or 'toolPanelFramework'");
return;
}
componentPromise.then(this.setToolPanelComponent.bind(this));
};
ToolPanelWrapper.prototype.setupResize = function () {
var resizeBar = this.resizeBar = new horizontalResizeComp_1.HorizontalResizeComp();
this.getContext().wireBean(resizeBar);
resizeBar.setElementToResize(this.getGui());
this.appendChild(resizeBar);
};
ToolPanelWrapper.prototype.setToolPanelComponent = function (compInstance) {
this.toolPanelCompInstance = compInstance;
this.appendChild(compInstance);
};
ToolPanelWrapper.prototype.getToolPanelInstance = function () {
return this.toolPanelCompInstance;
};
ToolPanelWrapper.prototype.setResizerSizerSide = function (side) {
var isRtl = this.gridOptionsWrapper.isEnableRtl();
var isLeft = side === 'left';
var inverted = isRtl ? isLeft : !isLeft;
this.resizeBar.setInverted(inverted);
};
ToolPanelWrapper.prototype.refresh = function () {
this.toolPanelCompInstance.refresh();
};
ToolPanelWrapper.TEMPLATE = "<div class=\"ag-tool-panel-wrapper\"/>";
__decorate([
core_1.Autowired("userComponentFactory")
], ToolPanelWrapper.prototype, "userComponentFactory", void 0);
__decorate([
core_1.Autowired("gridOptionsWrapper")
], ToolPanelWrapper.prototype, "gridOptionsWrapper", void 0);
__decorate([
core_1.PostConstruct
], ToolPanelWrapper.prototype, "setupResize", null);
return ToolPanelWrapper;
}(core_1.Component));
exports.ToolPanelWrapper = ToolPanelWrapper;
//# sourceMappingURL=toolPanelWrapper.js.map | ToolPanelWrapper |
lib.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! The purpose of the JsonRpcClient presented here is to provide a lightweight and secure
//! JSON RPC client to talk to the JSON RPC service offered by Libra Full Nodes. This is useful
//! for various security-critical components (e.g., the secure key manager), as it allows
//! interaction with the Libra blockchain in a minimal and secure manner.
//!
//! Note: While a JSON RPC client implementation already exists in the Libra codebase, this
//! provides a simpler and (hopefully) more secure implementation with fewer dependencies.
#![forbid(unsafe_code)]
use hex::FromHexError;
use libra_types::{
account_address::AccountAddress, account_state::AccountState,
account_state_blob::AccountStateBlob, transaction::SignedTransaction,
};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::{convert::TryFrom, io};
use thiserror::Error;
use ureq::Response;
/// Various constants for the JSON RPC client implementation
const JSON_RPC_VERSION: &str = "2.0";
const REQUEST_TIMEOUT: u64 = 10_000;
#[derive(Debug, Error, PartialEq)]
pub enum Error {
#[error("JSON RPC call returned a custom internal error: {0}")]
InternalRPCError(String),
#[error("Data does not exist. Missing data: {0}")]
MissingData(String),
#[error("JSON RPC call failed with response: {0}")]
RPCFailure(String),
#[error("Serialization error: {0}")]
SerializationError(String),
#[error("Unknown error: {0}")]
UnknownError(String),
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Self {
Self::UnknownError(format!("{}", error))
}
}
impl From<lcs::Error> for Error {
fn from(error: lcs::Error) -> Self {
Self::SerializationError(format!("{}", error))
}
}
impl From<serde_json::Error> for Error {
fn from(error: serde_json::Error) -> Self {
Self::SerializationError(format!("{}", error))
}
}
impl From<FromHexError> for Error {
fn from(error: FromHexError) -> Self {
Self::SerializationError(format!("{}", error))
}
}
/// Provides a lightweight JsonRpcClient implementation.
#[derive(Clone)]
pub struct JsonRpcClient {
host: String,
}
impl JsonRpcClient {
pub fn new(host: String) -> Self {
Self { host }
}
/// Submits a signed transaction to the Libra blockchain via the JSON RPC API.
pub fn submit_signed_transaction(
&self,
signed_transaction: SignedTransaction,
) -> Result<(), Error> {
let response = self.submit_transaction(signed_transaction)?;
match response.status() {
200 => {
let response = &response.into_string()?;
if let Ok(failure_response) =
serde_json::from_str::<JSONRpcFailureResponse>(response)
{
Err(Error::InternalRPCError(format!("{:?}", failure_response)))
} else {
let _submit_response =
serde_json::from_str::<SubmitTransactionResponse>(response)?;
Ok(())
}
}
_ => Err(Error::RPCFailure(response.into_string()?)),
}
}
/// Returns the associated AccountState for a specific account at a given version height
/// using the JSON RCP API.
pub fn get_account_state(
&self,
account: AccountAddress,
version: Option<u64>,
) -> Result<AccountState, Error> {
let response = self.get_account_state_with_proof(account, version, version)?;
match response.status() {
200 => {
let response = &response.into_string()?;
if let Ok(failure_response) =
serde_json::from_str::<JSONRpcFailureResponse>(response)
{
Err(Error::InternalRPCError(format!("{:?}", failure_response)))
} else if let Some(blob_bytes) =
serde_json::from_str::<AccountStateWithProofResponse>(response)?
.result
.blob
{
let account_state_blob = AccountStateBlob::from(lcs::from_bytes::<Vec<u8>>(
&*blob_bytes.into_bytes()?,
)?);
if let Ok(account_state) = AccountState::try_from(&account_state_blob) {
Ok(account_state)
} else {
Err(Error::SerializationError(format!(
"Unable to convert account_state_blob to AccountState: {:?}",
account_state_blob
)))
}
} else {
Err(Error::MissingData("AccountState".into()))
}
}
_ => Err(Error::RPCFailure(response.into_string()?)),
}
}
// Executes the specified request method using the given parameters by contacting the JSON RPC
// server.
fn execute_request(&self, method: String, params: Vec<Value>) -> Response {
ureq::post(&self.host)
.timeout_connect(REQUEST_TIMEOUT)
.send_json(
json!({"jsonrpc": JSON_RPC_VERSION, "method": method, "params": params, "id": 0}),
)
}
// Sends a submit() request to the JSON RPC server using the given transaction.
fn submit_transaction(&self, signed_transaction: SignedTransaction) -> Result<Response, Error> {
let method = "submit".into();
let params = vec![Value::String(hex::encode(lcs::to_bytes(
&signed_transaction,
)?))];
Ok(self.execute_request(method, params))
}
// Sends a get_account_state_with_proof() request to the JSON RPC server for the specified
// account, optional version height and optional ledger_version height (for the proof).
fn get_account_state_with_proof(
&self,
account: AccountAddress,
version: Option<u64>,
ledger_version: Option<u64>,
) -> Result<Response, Error> {
let method = "get_account_state_with_proof".into();
let params = vec![
Value::String(account.to_string()),
json!(version),
json!(ledger_version),
];
Ok(self.execute_request(method, params))
}
}
/// Below is a sample response from a successful submit() JSON RPC call:
/// "{
/// "id": 0,
/// "jsonrpc": "2.0",
/// "result": null
/// }"
///
///
#[derive(Debug, Deserialize, PartialEq, Serialize)]
struct SubmitTransactionResponse {
id: u64,
jsonrpc: String,
result: Option<Value>,
}
/// Below is a sample response from a successful get_account_state_with_proof_call() JSON RPC call.
/// "{
/// "id": 0,
/// "jsonrpc": "2.0",
/// "result": {
/// "blob": "0100...",
/// "proof": {
/// "ledger_info_to_transaction_info_proof": "00..",
/// "transaction_info": "200000000000000000...<truncated>...ffffffffffffffff",
/// "transaction_info_to_account_proof": "0000..."
/// },
/// "version": 0
/// }
/// }"
#[derive(Debug, Deserialize, PartialEq, Serialize)]
struct AccountStateWithProofResponse {
id: u64,
jsonrpc: String,
result: AccountStateResponse,
}
/// In practice this represents an AccountStateWithProof, however, we only decode the relevant
/// fields here.
#[derive(Debug, Deserialize, PartialEq, Serialize)]
struct AccountStateResponse {
version: u64,
blob: Option<Bytes>,
}
/// Below is a sample response from a failed JSON RPC call:
/// "{
/// "error": {
/// "code": -32000,
/// "data": null,
/// "message": "Server error: send failed because receiver is gone"
/// },
/// "id": 0,
/// "jsonrpc: "2.0"
/// }"
#[derive(Debug, Deserialize, PartialEq, Serialize)]
struct JSONRpcFailureResponse {
id: u64,
jsonrpc: String,
error: JsonRpcError,
}
/// A custom error returned by the JSON RPC server for API calls that fail internally (e.g.,
/// an internal error during execution on the server side).
#[derive(Debug, Deserialize, PartialEq, Serialize)]
struct JsonRpcError {
code: i16,
message: String,
data: Option<Value>,
}
/// Represents a vector of bytes using hex encoding.
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub struct Bytes(pub String);
impl Bytes {
pub fn into_bytes(self) -> Result<Vec<u8>, Error> {
Ok(hex::decode(self.0)?)
}
}
impl From<&[u8]> for Bytes {
fn from(bytes: &[u8]) -> Self {
Self(hex::encode(bytes))
}
}
impl From<&Vec<u8>> for Bytes {
fn from(bytes: &Vec<u8>) -> Self {
Self(hex::encode(bytes))
}
}
#[cfg(test)]
mod test {
use super::*;
use anyhow::Result;
use futures::{channel::mpsc::channel, StreamExt};
use libra_config::utils;
use libra_crypto::{ed25519::Ed25519PrivateKey, HashValue, PrivateKey, Uniform};
use libra_json_rpc::test_bootstrap;
use libra_types::{
account_address::AccountAddress,
account_config::{AccountResource, BalanceResource},
account_state::AccountState,
account_state_blob::{AccountStateBlob, AccountStateWithProof},
block_info::BlockInfo,
contract_event::ContractEvent,
epoch_change::EpochChangeProof,
event::{EventHandle, EventKey},
ledger_info::{LedgerInfo, LedgerInfoWithSignatures},
mempool_status::{MempoolStatus, MempoolStatusCode},
proof::{
AccountStateProof, AccumulatorConsistencyProof, AccumulatorProof, SparseMerkleProof,
TransactionInfoWithProof,
},
test_helpers::transaction_test_helpers::get_test_signed_txn,
transaction::{
SignedTransaction, TransactionInfo, TransactionListWithProof, TransactionWithProof,
Version,
},
vm_error::StatusCode,
};
use libradb::errors::LibraDbError::NotFound;
use std::{collections::BTreeMap, convert::TryFrom, sync::Arc};
use storage_interface::{DbReader, StartupInfo, TreeState};
use tokio::runtime::Runtime;
use vm_validator::{
mocks::mock_vm_validator::MockVMValidator, vm_validator::TransactionValidation,
};
#[test]
fn test_submit_transaction() {
let mock_db = create_empty_mock_db();
let (client, _server) = create_client_and_server(mock_db, true);
let signed_transaction = generate_signed_transaction();
// Ensure transaction submitted and validated successfully
let result = client.submit_signed_transaction(signed_transaction);
assert!(result.is_ok());
}
#[test]
fn test_submit_transaction_failure() {
let mock_db = create_empty_mock_db();
// When creating the JSON RPC server, specify 'mock_validator=false' to prevent a vm validator
// being passed to the server. This will cause any transaction submission requests to the JSON
// RPC server to fail, thus causing the server to return an error.
let (client, _server) = create_client_and_server(mock_db, false);
let signed_transaction = generate_signed_transaction();
// Ensure transaction submitted successfully
let result = client.submit_signed_transaction(signed_transaction);
assert!(result.is_err());
}
#[test]
fn test_get_account_state() {
// Create test account state data
let account = AccountAddress::random();
let account_state = create_test_account_state();
let version_height = 0;
let account_state_with_proof = create_test_state_with_proof(&account_state, version_height);
// Create an account to account_state_with_proof mapping
let mut map = BTreeMap::new();
map.insert(account, account_state_with_proof);
// Populate the test database with the test data and create the client/server
let mock_db = MockLibraDB::new(map);
let (client, _server) = create_client_and_server(mock_db, true);
// Ensure the client returns the correct AccountState
let result = client.get_account_state(account, Some(version_height));
assert!(result.is_ok());
assert_eq!(result.unwrap(), account_state);
}
#[test]
fn test_get_account_state_version_not_specified() {
// Create test account state data
let account = AccountAddress::random();
let account_state = create_test_account_state();
let account_state_with_proof = create_test_state_with_proof(&account_state, 0);
// Create an account to account_state_with_proof mapping
let mut map = BTreeMap::new();
map.insert(account, account_state_with_proof);
// Populate the test database with the test data and create the client/server
let mock_db = MockLibraDB::new(map);
let (client, _server) = create_client_and_server(mock_db, true);
// Ensure the client returns the latest AccountState (even though no version was specified)
let result = client.get_account_state(account, None);
assert!(result.is_ok());
assert_eq!(result.unwrap(), account_state);
}
#[test]
fn test_get_account_state_missing() {
let mock_db = create_empty_mock_db();
let (client, _server) = create_client_and_server(mock_db, true);
// Ensure the client returns an error for a missing AccountState
let account = AccountAddress::random();
let result = client.get_account_state(account, Some(0));
assert!(result.is_err());
}
#[test]
fn test_get_account_state_missing_blob() |
/// Generates and returns a (client, server) pair, where the client is a lightweight JSON client
/// and the server is a JSON server that serves the JSON RPC requests. The server communicates
/// with the given database to handle each JSON RPC request. If mock_validator is set to true,
/// the server is also given a mock vm validator to validate any submitted transactions.
fn create_client_and_server(db: MockLibraDB, mock_validator: bool) -> (JsonRpcClient, Runtime) {
let address = "0.0.0.0";
let port = utils::get_available_port();
let host = format!("{}:{}", address, port);
let (mp_sender, mut mp_events) = channel(1024);
let server = test_bootstrap(host.parse().unwrap(), Arc::new(db), mp_sender);
let url = format!("http://{}", host);
let client = JsonRpcClient::new(url);
if mock_validator {
// Provide a VMValidator to the runtime.
server.spawn(async move {
while let Some((txn, cb)) = mp_events.next().await {
let vm_status = MockVMValidator.validate_transaction(txn).unwrap().status();
let result = if vm_status.is_some() {
(MempoolStatus::new(MempoolStatusCode::VmError), vm_status)
} else {
(MempoolStatus::new(MempoolStatusCode::Accepted), None)
};
cb.send(Ok(result)).unwrap();
}
});
}
(client, server)
}
/// Generates an AccountStateProof for testing.
fn create_test_state_proof() -> AccountStateProof {
let transaction_info = TransactionInfo::new(
HashValue::zero(),
HashValue::zero(),
HashValue::zero(),
0,
StatusCode::UNKNOWN_STATUS,
);
AccountStateProof::new(
TransactionInfoWithProof::new(AccumulatorProof::new(vec![]), transaction_info),
SparseMerkleProof::new(None, vec![]),
)
}
/// Generates an AccountStateWithProof using the given AccountState and version height for
/// testing.
fn create_test_state_with_proof(
account_state: &AccountState,
version_height: u64,
) -> AccountStateWithProof {
AccountStateWithProof::new(
version_height,
Some(AccountStateBlob::try_from(account_state).unwrap()),
create_test_state_proof(),
)
}
/// Generates an AccountState for testing.
fn create_test_account_state() -> AccountState {
let account_resource = create_test_account_resource();
let balance_resource = create_test_balance_resource();
AccountState::try_from((&account_resource, &balance_resource)).unwrap()
}
/// Generates an AccountResource for testing.
fn create_test_account_resource() -> AccountResource {
AccountResource::new(
10,
vec![],
None,
None,
EventHandle::random_handle(100),
EventHandle::random_handle(100),
false,
5, // PARENT_VASP_ROLE_ID
)
}
/// Generates a BalanceResource for testing.
fn create_test_balance_resource() -> BalanceResource {
BalanceResource::new(100)
}
/// Generates and returns a (randomized) SignedTransaction for testing.
fn generate_signed_transaction() -> SignedTransaction {
let sender = AccountAddress::random();
let private_key = Ed25519PrivateKey::generate_for_testing();
get_test_signed_txn(sender, 0, &private_key, private_key.public_key(), None)
}
/// Returns an empty mock database for testing.
fn create_empty_mock_db() -> MockLibraDB {
MockLibraDB::new(BTreeMap::new())
}
// This offers a simple mock of LibraDB for testing.
#[derive(Clone)]
pub struct MockLibraDB {
account_states_with_proof: BTreeMap<AccountAddress, AccountStateWithProof>,
}
/// A mock libra database for test purposes.
impl MockLibraDB {
pub fn new(
account_states_with_proof: BTreeMap<AccountAddress, AccountStateWithProof>,
) -> Self {
Self {
account_states_with_proof,
}
}
}
/// We only require implementing a subset of these API calls for testing purposes. To keep
/// our code as minimal as possible, the unimplemented API calls simply return an error.
impl DbReader for MockLibraDB {
fn get_transactions(
&self,
_start_version: u64,
_limit: u64,
_ledger_version: u64,
_fetch_events: bool,
) -> Result<TransactionListWithProof> {
unimplemented!()
}
fn get_events(
&self,
_event_key: &EventKey,
_start: u64,
_ascending: bool,
_limit: u64,
) -> Result<Vec<(u64, ContractEvent)>> {
unimplemented!()
}
fn get_latest_account_state(
&self,
_address: AccountAddress,
) -> Result<Option<AccountStateBlob>> {
unimplemented!()
}
fn get_latest_ledger_info(&self) -> Result<LedgerInfoWithSignatures> {
Ok(LedgerInfoWithSignatures::new(
LedgerInfo::new(
BlockInfo::new(0, 0, HashValue::zero(), HashValue::zero(), 0, 0, None),
HashValue::zero(),
),
BTreeMap::new(),
))
}
fn get_startup_info(&self) -> Result<Option<StartupInfo>> {
unimplemented!()
}
fn get_txn_by_account(
&self,
_address: AccountAddress,
_seq_num: u64,
_ledger_version: u64,
_fetch_events: bool,
) -> Result<Option<TransactionWithProof>> {
unimplemented!()
}
fn get_state_proof_with_ledger_info(
&self,
_known_version: u64,
_ledger_info: LedgerInfoWithSignatures,
) -> Result<(EpochChangeProof, AccumulatorConsistencyProof)> {
unimplemented!()
}
fn get_state_proof(
&self,
_known_version: u64,
) -> Result<(
LedgerInfoWithSignatures,
EpochChangeProof,
AccumulatorConsistencyProof,
)> {
unimplemented!()
}
/// Return the associated AccountStateWithProof for the given account address. If the
/// AccountStateWithProof doesn't exist, an error is returned.
fn get_account_state_with_proof(
&self,
address: AccountAddress,
_version: Version,
_ledger_version: Version,
) -> Result<AccountStateWithProof> {
if let Some(account_state_proof) = self.account_states_with_proof.get(&address) {
Ok(account_state_proof.clone())
} else {
Err(NotFound("AccountStateWithProof".into()).into())
}
}
fn get_account_state_with_proof_by_version(
&self,
_address: AccountAddress,
_version: u64,
) -> Result<(Option<AccountStateBlob>, SparseMerkleProof)> {
unimplemented!()
}
fn get_latest_state_root(&self) -> Result<(u64, HashValue)> {
unimplemented!()
}
fn get_latest_tree_state(&self) -> Result<TreeState> {
unimplemented!()
}
fn get_epoch_change_ledger_infos(&self, _: u64, _: u64) -> Result<EpochChangeProof> {
unimplemented!()
}
fn get_ledger_info(&self, _: u64) -> Result<LedgerInfoWithSignatures> {
unimplemented!()
}
}
}
| {
// Create test account state data
let account = AccountAddress::random();
let version_height = 0;
let account_state_proof = create_test_state_proof();
let account_state_with_proof =
AccountStateWithProof::new(version_height, None, account_state_proof);
// Create an account to account_state_with_proof mapping
let mut map = BTreeMap::new();
map.insert(account, account_state_with_proof);
// Populate the test database with the test data and create the client/server
let mock_db = MockLibraDB::new(map);
let (client, _server) = create_client_and_server(mock_db, true);
// Ensure the client returns an error for the missing AccountState
let result = client.get_account_state(account, Some(version_height));
assert!(result.is_err());
} |
custom_terms_of_service_settings.tsx | // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
import React from 'react';
import {FormattedMessage} from 'react-intl';
import {AdminConfig, ClientLicense} from 'mattermost-redux/types/config';
import {TermsOfService} from 'mattermost-redux/types/terms_of_service';
import AdminSettings, {BaseProps, BaseState} from 'components/admin_console/admin_settings';
import SettingsGroup from 'components/admin_console/settings_group.jsx';
import BooleanSetting from 'components/admin_console/boolean_setting';
import TextSetting from 'components/admin_console/text_setting';
import FormattedMarkdownMessage from 'components/formatted_markdown_message.jsx';
import LoadingScreen from 'components/loading_screen';
import {Constants} from 'utils/constants';
type Props = BaseProps & {
actions: {
getTermsOfService: () => Promise<{data: TermsOfService}>;
createTermsOfService: (text: string) => Promise<{data: TermsOfService; error?: Error}>;
};
config: AdminConfig;
license: ClientLicense;
setNavigationBlocked: () => void;
/*
* Action to save config file
*/
updateConfig: () => void;
};
type State = BaseState & {
termsEnabled?: boolean;
reAcceptancePeriod?: number;
loadingTermsText: boolean;
receivedTermsText: string;
termsText: string;
saveNeeded: boolean;
saving: boolean;
serverError: JSX.Element | string | null;
errorTooltip: boolean;
}
export default class CustomTermsOfServiceSettings extends AdminSettings<Props, State> {
constructor(props: Props) {
super(props);
this.state = {
termsEnabled: props.config.SupportSettings?.CustomTermsOfServiceEnabled,
reAcceptancePeriod: props.config.SupportSettings?.CustomTermsOfServiceReAcceptancePeriod,
loadingTermsText: true,
receivedTermsText: '',
termsText: '',
saveNeeded: false,
saving: false,
serverError: null,
errorTooltip: false,
};
}
getStateFromConfig(config: Props['config']) {
return {
termsEnabled: config.SupportSettings?.CustomTermsOfServiceEnabled,
reAcceptancePeriod: this.parseIntNonZero(String(config.SupportSettings?.CustomTermsOfServiceReAcceptancePeriod), Constants.DEFAULT_TERMS_OF_SERVICE_RE_ACCEPTANCE_PERIOD),
};
}
getConfigFromState = (config: Props['config']) => {
if (config && config.SupportSettings) {
config.SupportSettings.CustomTermsOfServiceEnabled = Boolean(this.state.termsEnabled);
config.SupportSettings.CustomTermsOfServiceReAcceptancePeriod = this.parseIntNonZero(String(this.state.reAcceptancePeriod), Constants.DEFAULT_TERMS_OF_SERVICE_RE_ACCEPTANCE_PERIOD);
}
return config;
}
componentDidMount() {
this.getTermsOfService();
}
doSubmit = async (callback?: () => void) => {
this.setState({
saving: true,
serverError: null,
});
if (this.state.termsEnabled && (this.state.receivedTermsText !== this.state.termsText || !this.props.config?.SupportSettings?.CustomTermsOfServiceEnabled)) {
const result = await this.props.actions.createTermsOfService(this.state.termsText);
if (result.error) {
this.handleAPIError(result.error, callback);
return;
}
}
// clone config so that we aren't modifying data in the stores
let config = JSON.parse(JSON.stringify(this.props.config));
config = this.getConfigFromState(config);
const {data, error} = await this.props.updateConfig(config);
if (data) {
this.setState(this.getStateFromConfig(data));
this.setState({
saveNeeded: false,
saving: false,
});
this.props.setNavigationBlocked(false);
if (callback) {
callback();
}
if (this.handleSaved) {
this.handleSaved(config);
}
} else if (error) {
this.handleAPIError({id: error.server_error_id, ...error}, callback, config);
}
};
handleAPIError = (err: any, callback?: (() => void), config?: Props['config']) => {
this.setState({
saving: false,
serverError: err.message,
serverErrorId: err.id,
});
if (callback) {
callback();
}
if (this.handleSaved && config) {
this.handleSaved(config as AdminConfig);
}
};
getTermsOfService = async () => {
this.setState({loadingTermsText: true});
const {data} = await this.props.actions.getTermsOfService();
if (data) {
this.setState({
termsText: data.text,
receivedTermsText: data.text,
});
}
this.setState({loadingTermsText: false});
};
handleTermsTextChange = (id: string, value: boolean) => {
this.handleChange('termsText', value);
};
handleTermsEnabledChange = (id: string, value: boolean) => {
this.handleChange('termsEnabled', value); | };
renderTitle() {
return (
<FormattedMessage
id='admin.support.termsOfServiceTitle'
defaultMessage='Custom Terms of Service (Beta)'
/>
);
}
renderSettings = () => {
if (this.state.loadingTermsText) {
return <LoadingScreen/>;
}
return (
<SettingsGroup>
<BooleanSetting
key={'customTermsOfServiceEnabled'}
id={'SupportSettings.CustomTermsOfServiceEnabled'}
label={
<FormattedMessage
id='admin.support.enableTermsOfServiceTitle'
defaultMessage='Enable Custom Terms of Service'
/>
}
helpText={
<FormattedMarkdownMessage
id='admin.support.enableTermsOfServiceHelp'
defaultMessage='When true, new users must accept the terms of service before accessing any Mattermost teams on desktop, web or mobile. Existing users must accept them after login or a page refresh.\n \nTo update terms of service link displayed in account creation and login pages, go to [Site Configuration > Customization](../site_config/customization).'
/>
}
value={Boolean(this.state.termsEnabled)}
onChange={this.handleTermsEnabledChange}
setByEnv={this.isSetByEnv('SupportSettings.CustomTermsOfServiceEnabled')}
disabled={this.props.isDisabled || !(this.props.license.IsLicensed && this.props.license.CustomTermsOfService === 'true')}
/>
<TextSetting
key={'customTermsOfServiceText'}
id={'SupportSettings.CustomTermsOfServiceText'}
type={'textarea'}
label={
<FormattedMessage
id='admin.support.termsOfServiceTextTitle'
defaultMessage='Custom Terms of Service Text'
/>
}
helpText={
<FormattedMessage
id='admin.support.termsOfServiceTextHelp'
defaultMessage='Text that will appear in your custom Terms of Service. Supports Markdown-formatted text.'
/>
}
onChange={this.handleTermsTextChange}
setByEnv={this.isSetByEnv('SupportSettings.CustomTermsOfServiceText')}
value={this.state.termsText}
maxLength={Constants.MAX_TERMS_OF_SERVICE_TEXT_LENGTH}
disabled={this.props.isDisabled || !this.state.termsEnabled}
/>
<TextSetting
key={'customTermsOfServiceReAcceptancePeriod'}
id={'SupportSettings.CustomTermsOfServiceReAcceptancePeriod'}
type={'number'}
label={
<FormattedMessage
id='admin.support.termsOfServiceReAcceptanceTitle'
defaultMessage='Re-Acceptance Period:'
/>
}
helpText={
<FormattedMessage
id='admin.support.termsOfServiceReAcceptanceHelp'
defaultMessage='The number of days before Terms of Service acceptance expires, and the terms must be re-accepted.'
/>
}
value={this.state.reAcceptancePeriod || ''}
onChange={this.handleReAcceptancePeriodChange}
setByEnv={this.isSetByEnv('SupportSettings.CustomTermsOfServiceReAcceptancePeriod')}
disabled={this.props.isDisabled || !this.state.termsEnabled}
/>
</SettingsGroup>
);
}
} | };
handleReAcceptancePeriodChange = (id: string, value: boolean) => {
this.handleChange('reAcceptancePeriod', value); |
plugin.go | /*
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"magma/orc8r/cloud/go/plugin"
"magma/orc8r/cloud/go/pluginimpl"
)
// plugins must implement a main - these are expected to be empty
func | () {}
// GetOrchestratorPlugin is a function that all modules are expected to provide
// which returns an instance of the module's OrchestratorPlugin implementation
func GetOrchestratorPlugin() plugin.OrchestratorPlugin {
return &pluginimpl.BaseOrchestratorPlugin{}
}
| main |
admin.py | from django.contrib import admin
from ballots.models import Poll, Category, CategoryItem, Ballot, Vote, Answer, AnswerItem
# class ItemInline(admin.TabularInline):
# model = CategoryItem
#
#
# class CategoryAdmin(admin.ModelAdmin):
# inlines = [ItemInline,
# ]
class CategoryInline(admin.TabularInline):
model = Category
class PollAdmin(admin.ModelAdmin):
inlines = [CategoryInline,
]
class VoteInline(admin.TabularInline):
model = Vote
class | (admin.ModelAdmin):
inlines = [VoteInline,
]
admin.site.register(Poll, PollAdmin)
admin.site.register(Category)
admin.site.register(CategoryItem)
admin.site.register(Ballot, BallotAdmin)
admin.site.register(Vote)
admin.site.register(Answer)
admin.site.register(AnswerItem) | BallotAdmin |
controllerrevision.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by lister-gen
package internalversion
import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
apps "k8s.io/kubernetes/pkg/apis/apps"
)
// ControllerRevisionLister helps list ControllerRevisions.
type ControllerRevisionLister interface {
// List lists all ControllerRevisions in the indexer.
List(selector labels.Selector) (ret []*apps.ControllerRevision, err error)
// ControllerRevisions returns an object that can list and get ControllerRevisions.
ControllerRevisions(namespace string) ControllerRevisionNamespaceLister
ControllerRevisionListerExpansion
}
// controllerRevisionLister implements the ControllerRevisionLister interface.
type controllerRevisionLister struct {
indexer cache.Indexer
}
// NewControllerRevisionLister returns a new ControllerRevisionLister.
func | (indexer cache.Indexer) ControllerRevisionLister {
return &controllerRevisionLister{indexer: indexer}
}
// List lists all ControllerRevisions in the indexer.
func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*apps.ControllerRevision, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*apps.ControllerRevision))
})
return ret, err
}
// ControllerRevisions returns an object that can list and get ControllerRevisions.
func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister {
return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// ControllerRevisionNamespaceLister helps list and get ControllerRevisions.
type ControllerRevisionNamespaceLister interface {
// List lists all ControllerRevisions in the indexer for a given namespace.
List(selector labels.Selector) (ret []*apps.ControllerRevision, err error)
// Get retrieves the ControllerRevision from the indexer for a given namespace and name.
Get(name string) (*apps.ControllerRevision, error)
ControllerRevisionNamespaceListerExpansion
}
// controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister
// interface.
type controllerRevisionNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all ControllerRevisions in the indexer for a given namespace.
func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*apps.ControllerRevision, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*apps.ControllerRevision))
})
return ret, err
}
// Get retrieves the ControllerRevision from the indexer for a given namespace and name.
func (s controllerRevisionNamespaceLister) Get(name string) (*apps.ControllerRevision, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(apps.Resource("controllerrevision"), name)
}
return obj.(*apps.ControllerRevision), nil
}
| NewControllerRevisionLister |
main.rs | // Copyright 2020-2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// #![deny(warnings)]
// This isn't a external crate so we don't worry about docs
// #![deny(missing_docs)]
#![recursion_limit = "1024"]
#![deny(
clippy::all,
clippy::unwrap_used,
clippy::unnecessary_unwrap,
clippy::pedantic | )]
#[macro_use]
extern crate serde_derive;
// This is silly but serde is forcing you to import serde if you want serde_derive
#[allow(unused_extern_crates)]
extern crate serde;
#[macro_use]
extern crate log;
extern crate rental;
use crate::errors::{Error, Result};
use crate::util::{load_config, FormatKind, TremorApp};
use async_std::task;
use clap::App;
use clap::{load_yaml, AppSettings, ArgMatches};
use std::fs::File;
use std::path::{Path, PathBuf};
use tremor_common::file;
// use tremor_runtime::errors;
mod alloc;
mod api;
mod completions;
mod debug;
mod doc;
mod env;
mod errors;
// mod explain;
mod job;
mod report;
mod run;
mod server;
pub(crate) mod status;
mod test;
mod util;
pub(crate) fn open_file<S>(path: &S, base: Option<&String>) -> Result<File>
where
S: AsRef<Path> + ?Sized,
{
match file::open(&path) {
Ok(f) => Ok(f),
Err(tremor_common::Error::FileOpen(io_error, msg)) => {
let msg = if let Some(base) = base {
let mut p = PathBuf::from(base);
p.push(path);
if let Ok(f) = file::open(&p) {
return Ok(f);
}
format!(
"Failed to open `{}` (or `{}`)",
path.as_ref().display(),
p.to_str().unwrap_or("<unknown>")
)
} else {
msg
};
Err(tremor_common::Error::FileOpen(io_error, msg).into())
}
Err(e) => Err(e.into()),
}
}
#[cfg(not(tarpaulin_include))]
fn main() -> Result<()> {
let yaml = load_yaml!("./cli.yaml");
let long_version = tremor_runtime::version::long_ver();
let app = App::from(yaml);
let app = app.version(long_version.as_str());
let app = app.global_setting(AppSettings::ColoredHelp);
let app = app.global_setting(AppSettings::ColorAlways);
tremor_runtime::functions::load()?;
let matches = app.clone().get_matches();
unsafe {
// We know that instance will only get set once at
// the very beginning nothing can access it yet,
// this makes it allowable to use unsafe here.
let s = matches
.value_of("instance")
.ok_or_else(|| Error::from("instance argument missing"))?;
// ALLOW: We do this on startup and forget the memory once we drop it, that's on purpose
let forget_s = std::mem::transmute(&s as &str);
// This means we're going to LEAK this memory, however
// it is fine since as we do actually need it for the
// rest of the program execution.
tremor_runtime::metrics::INSTANCE = forget_s;
}
if let Err(e) = run(app, &matches) {
eprintln!("{}", e);
// ALLOW: this is supposed to exit
std::process::exit(1);
}
Ok(())
}
fn run(mut app: App, cmd: &ArgMatches) -> Result<()> {
let format = match &cmd.value_of("format") {
Some("json") => FormatKind::Json,
_ => FormatKind::Yaml,
};
match cmd
.subcommand_name()
.map(|name| (name, cmd.subcommand_matches(name)))
{
Some(("explain", Some(_matches))) => Err("Not yet implemented".into()),
Some(("completions", Some(matches))) => completions::run_cmd(app, matches),
Some(("server", Some(matches))) => server::run_cmd(app, matches),
Some(("run", Some(matches))) => run::run_cmd(&matches),
Some(("doc", Some(matches))) => doc::run_cmd(&matches),
Some(("api", Some(matches))) => task::block_on(api::run_cmd(
TremorApp {
format,
config: load_config()?,
},
&matches,
)),
Some(("dbg", Some(matches))) => debug::run_cmd(&matches),
Some(("test", Some(matches))) => test::run_cmd(&matches),
_ => app
.print_long_help()
.map_err(|e| Error::from(format!("failed to print help: {}", e))),
}
} | |
config.py | import os
from base64 import b64encode
from socket import gethostname, gethostbyname
class Config:
| SECRET_KEY = os.environ.get("SECRET_KEY") or b64encode(os.urandom(24)).decode()
SERVER_URL = os.environ.get("SERVER_URL") or f"http://{gethostbyname(gethostname())}:8000"
CI_SECURITY = True if os.environ.get("ENVIRONMENT") == "prod" else False
DOWNLOAD_PATH = os.path.join(os.path.abspath(os.sep), "tmp")
BUCKET = "tpf-listings"
SESSION_COOKIE_SECURE = CI_SECURITY
TOKEN_EXPIRY = 3600 # 1 hour = 3600 seconds
REG_BITS: int = 32
REG_MAX: int = (1 << REG_BITS) - 1 # 0xFFFFFFFF
REGISTERS: tuple = ("R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7", "R8", "R9", "R10", "R11", "R12", "R13", "R14",
"R15")
ECB_LEVELS: tuple = ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F")
DEFAULT_MACROS: tuple = ("WA0AA", "EB0EB", "GLOBAL", "MI0MI")
AAAPNR: str = "AAAAAA"
PNR_KEYS = [
("name", "NAME"),
("hfax", "HFAX"),
("gfax", "GFAX"),
("fqtv", "FQTV"),
("itin", "ITIN"),
("subs_card_seg", "SUBS_CARD_SEG"),
("group_plan", "GROUP_PLAN"),
("rcvd_from", "RCVD_FROM"),
("phone", "PHONE"),
("record_loc", "RECORD_LOC"),
("remarks", "REMARKS"),
("header", "HEADER"),
("prs_seats", "PRS_SEATS"),
("vcr_coupon", "VCR_COUPON"),
("ice_data", "ICE_DATA"),
] |
|
audio_to_text.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
from typing import Callable, Dict, List, Optional, Union
import braceexpand
import torch
import webdataset as wd
from torch.nn import functional as F
from nemo.collections.asr.data import vocabs
from nemo.collections.asr.parts import collections, parsers
from nemo.collections.asr.parts.features import WaveformFeaturizer
from nemo.core.classes import Dataset, IterableDataset
from nemo.core.neural_types import *
from nemo.utils import logging
from nemo.utils.decorators import experimental
__all__ = [
'AudioToCharDataset',
'AudioToCharWithDursDataset',
'AudioToBPEDataset',
'AudioLabelDataset',
'TarredAudioToCharDataset',
'TarredAudioToBPEDataset',
]
def _speech_collate_fn(batch, pad_id):
"""collate batch of audio sig, audio len, tokens, tokens len
Args:
batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,
LongTensor): A tuple of tuples of signal, signal lengths,
encoded tokens, and encoded tokens length. This collate func
assumes the signals are 1d torch tensors (i.e. mono audio).
"""
_, audio_lengths, _, tokens_lengths = zip(*batch)
max_audio_len = 0
has_audio = audio_lengths[0] is not None
if has_audio:
max_audio_len = max(audio_lengths).item()
max_tokens_len = max(tokens_lengths).item()
audio_signal, tokens = [], []
for sig, sig_len, tokens_i, tokens_i_len in batch:
if has_audio:
sig_len = sig_len.item()
if sig_len < max_audio_len:
pad = (0, max_audio_len - sig_len)
sig = torch.nn.functional.pad(sig, pad)
audio_signal.append(sig)
tokens_i_len = tokens_i_len.item()
if tokens_i_len < max_tokens_len:
pad = (0, max_tokens_len - tokens_i_len)
tokens_i = torch.nn.functional.pad(tokens_i, pad, value=pad_id)
tokens.append(tokens_i)
if has_audio:
audio_signal = torch.stack(audio_signal)
audio_lengths = torch.stack(audio_lengths)
else:
audio_signal, audio_lengths = None, None
tokens = torch.stack(tokens)
tokens_lengths = torch.stack(tokens_lengths)
return audio_signal, audio_lengths, tokens, tokens_lengths
class _AudioTextDataset(Dataset):
"""
Dataset that loads tensors via a json file containing paths to audio files, transcripts, and durations (in seconds).
Each new line is a different sample. Example below:
{"audio_filepath": "/path/to/audio.wav", "text_filepath": "/path/to/audio.txt", "duration": 23.147}
...
{"audio_filepath": "/path/to/audio.wav", "text": "the transcription", "offset": 301.75, "duration": 0.82, "utt":
"utterance_id", "ctm_utt": "en_4156", "side": "A"}
Args:
manifest_filepath: Path to manifest json as described above. Can be comma-separated paths.
labels: String containing all the possible characters to map to
sample_rate (int): Sample rate to resample loaded audio to
int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor object used to augment loaded
audio
max_duration: If audio exceeds this length, do not include in dataset
min_duration: If audio is less than this length, do not include in dataset
max_utts: Limit number of utterances
blank_index: blank character index, default = -1
unk_index: unk_character index, default = -1
normalize: whether to normalize transcript text (default): True
bos_id: Id of beginning of sequence symbol to append if not None
eos_id: Id of end of sequence symbol to append if not None
load_audio: Boolean flag indicate whether do or not load audio
add_misc: True if add additional info dict.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'audio_signal': NeuralType(
('B', 'T'),
AudioSignal(freq=self._sample_rate) # TODO: self._sample_rate is not defined anywhere
if self is not None and hasattr(self, '_sample_rate')
else AudioSignal(),
),
'a_sig_length': NeuralType(tuple('B'), LengthsType()),
'transcripts': NeuralType(('B', 'T'), LabelsType()),
'transcript_length': NeuralType(tuple('B'), LengthsType()),
}
def __init__(
self,
manifest_filepath: str,
parser: Union[str, Callable],
sample_rate: int,
int_values: bool = False,
augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
max_duration: Optional[int] = None,
min_duration: Optional[int] = None,
max_utts: int = 0,
trim: bool = False,
bos_id: Optional[int] = None,
eos_id: Optional[int] = None,
pad_id: int = 0,
load_audio: bool = True,
add_misc: bool = False,
):
self.parser = parser
self.collection = collections.ASRAudioText(
manifests_files=manifest_filepath.split(','),
parser=parser,
min_duration=min_duration,
max_duration=max_duration,
max_number=max_utts,
)
self.featurizer = WaveformFeaturizer(sample_rate=sample_rate, int_values=int_values, augmentor=augmentor)
self.trim = trim
self.eos_id = eos_id
self.bos_id = bos_id
self.pad_id = pad_id
self.load_audio = load_audio
self._add_misc = add_misc
def __getitem__(self, index):
sample = self.collection[index]
if self.load_audio:
offset = sample.offset
if offset is None:
offset = 0
features = self.featurizer.process(
sample.audio_file, offset=offset, duration=sample.duration, trim=self.trim, orig_sr=sample.orig_sr
)
f, fl = features, torch.tensor(features.shape[0]).long()
else:
f, fl = None, None
t, tl = sample.text_tokens, len(sample.text_tokens)
if self.bos_id is not None:
t = [self.bos_id] + t
tl += 1
if self.eos_id is not None:
t = t + [self.eos_id]
tl += 1
output = f, fl, torch.tensor(t).long(), torch.tensor(tl).long()
if self._add_misc:
misc = dict()
misc['id'] = sample.id
misc['text_raw'] = sample.text_raw
misc['speaker'] = sample.speaker
output = (output, misc)
return output
def __len__(self):
return len(self.collection)
def _collate_fn(self, batch):
return _speech_collate_fn(batch, pad_id=self.pad_id)
@experimental
class AudioToCharDataset(_AudioTextDataset):
"""
Dataset that loads tensors via a json file containing paths to audio
files, transcripts, and durations (in seconds). Each new line is a
different sample. Example below:
{"audio_filepath": "/path/to/audio.wav", "text_filepath":
"/path/to/audio.txt", "duration": 23.147}
...
{"audio_filepath": "/path/to/audio.wav", "text": "the
transcription", "offset": 301.75, "duration": 0.82, "utt":
"utterance_id", "ctm_utt": "en_4156", "side": "A"}
Args:
manifest_filepath: Path to manifest json as described above. Can
be comma-separated paths.
labels: String containing all the possible characters to map to
sample_rate (int): Sample rate to resample loaded audio to
int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
object used to augment loaded audio
max_duration: If audio exceeds this length, do not include in dataset
min_duration: If audio is less than this length, do not include
in dataset
max_utts: Limit number of utterances
blank_index: blank character index, default = -1
unk_index: unk_character index, default = -1
normalize: whether to normalize transcript text (default): True
bos_id: Id of beginning of sequence symbol to append if not None
eos_id: Id of end of sequence symbol to append if not None
load_audio: Boolean flag indicate whether do or not load audio
add_misc: True if add additional info dict.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'audio_signal': NeuralType(
('B', 'T'),
AudioSignal(freq=self._sample_rate)
if self is not None and hasattr(self, '_sample_rate')
else AudioSignal(),
),
'a_sig_length': NeuralType(tuple('B'), LengthsType()),
'transcripts': NeuralType(('B', 'T'), LabelsType()),
'transcript_length': NeuralType(tuple('B'), LengthsType()),
}
def __init__(
self,
manifest_filepath: str,
labels: Union[str, List[str]],
sample_rate: int,
int_values: bool = False,
augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
max_utts: int = 0,
blank_index: int = -1,
unk_index: int = -1,
normalize: bool = True,
trim: bool = False,
bos_id: Optional[int] = None,
eos_id: Optional[int] = None,
pad_id: int = 0,
load_audio: bool = True,
parser: Union[str, Callable] = 'en',
add_misc: bool = False,
):
self.labels = labels
parser = parsers.make_parser(
labels=labels, name=parser, unk_id=unk_index, blank_id=blank_index, do_normalize=normalize
)
super().__init__(
manifest_filepath=manifest_filepath,
parser=parser,
sample_rate=sample_rate,
int_values=int_values,
augmentor=augmentor,
max_duration=max_duration,
min_duration=min_duration,
max_utts=max_utts,
trim=trim,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
load_audio=load_audio,
add_misc=add_misc,
)
class AudioToCharWithDursDataset(AudioToCharDataset):
"""
Dataset that loads tensors via a json file containing paths to audio
files, transcripts, and durations (in seconds). Each new line is a
different sample. Example below:
{"audio_filepath": "/path/to/audio.wav", "text_filepath":
"/path/to/audio.txt", "duration": 23.147}
...
{"audio_filepath": "/path/to/audio.wav", "text": "the
transcription", "offset": 301.75, "duration": 0.82, "utt":
"utterance_id", "ctm_utt": "en_4156", "side": "A"}
Additionally, user provides path to precomputed durations, which is a pickled python dict with 'tags' and 'durs'
keys, both of which are list of examples values. Tag is a unique example identifier, which is a wav filename
without suffix. Durations are an additional tuple of two tensors: graphemes durations and blanks durations.
Example below:
{'tags': ['LJ050-0234', 'LJ019-0373'],
'durs': [(graphemes_durs0, blanks_durs0), (graphemes_durs1, blanks_durs1)]}
Args:
**kwargs: Passed to AudioToCharDataset constructor.
durs_path (str): String path to pickled list of '[(tag, durs)]' durations location.
rep (bool): True if repeat text graphemes according to durs.
vocab: Vocabulary config (parser + set of graphemes to use). Constructor propagates these to
`self.make_vocab` function call to build a complete vocabulary.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports."""
return {
'audio': NeuralType(
('B', 'T'),
AudioSignal(freq=self._sample_rate)
if self is not None and hasattr(self, '_sample_rate')
else AudioSignal(),
),
'audio_len': NeuralType(('B',), LengthsType()),
'text': NeuralType(('B', 'T'), LabelsType()),
'text_len': NeuralType(('B',), LengthsType()),
'durs': NeuralType(('B', 'T'), LengthsType()),
}
@staticmethod
def make_vocab(notation='chars', punct=True, spaces=False, stresses=False):
"""Constructs vocabulary from given parameters.
Args:
notation (str): Either 'chars' or 'phonemes' as general notation.
punct (bool): True if reserve grapheme for basic punctuation.
spaces (bool): True if prepend spaces to every punctuation symbol.
stresses (bool): True if use phonemes codes with stresses (0-2).
Returns:
(vocabs.Base) Vocabulary
"""
if notation == 'chars':
vocab = vocabs.Chars(punct=punct, spaces=spaces)
elif notation == 'phonemes':
vocab = vocabs.Phonemes(punct=punct, stresses=stresses, spaces=spaces)
else:
raise ValueError("Unsupported vocab type.")
return vocab
def __init__(self, **kwargs):
durs_path = kwargs.pop('durs_path')
rep = kwargs.pop('rep', False)
self.vocab = self.make_vocab(**kwargs.pop('vocab', {}))
kwargs.setdefault('labels', [])
super().__init__(**kwargs)
pth = torch.load(durs_path)
tag2d = dict(zip(pth['tags'], pth['durs']))
durs = []
for i, e in enumerate(self.collection):
tag = os.path.splitext(os.path.basename(e.audio_file))[0]
durs.append(tag2d[tag])
self.durs = durs
self.rep = rep
def __getitem__(self, item):
sample = self.collection[item]
audio, audio_len, _, _ = super().__getitem__(item) # noqa
text = self.vocab.encode(sample.text_raw)
text, text_len = torch.tensor(text).long(), torch.tensor(len(text)).long()
blanks_durs, graphemes_durs = self.durs[item]
return (
audio,
audio_len,
text,
text_len,
blanks_durs,
graphemes_durs,
)
@staticmethod
def _merge(tensors, dim=0, value=0, dtype=None):
"""Merges list of tensors into one."""
tensors = [tensor if isinstance(tensor, torch.Tensor) else torch.tensor(tensor) for tensor in tensors]
dim = dim if dim != -1 else len(tensors[0].shape) - 1
dtype = tensors[0].dtype if dtype is None else dtype
max_len = max(tensor.shape[dim] for tensor in tensors)
new_tensors = []
for tensor in tensors:
pad = (2 * len(tensor.shape)) * [0]
pad[-2 * dim - 1] = max_len - tensor.shape[dim]
new_tensors.append(F.pad(tensor, pad=pad, value=value))
return torch.stack(new_tensors).to(dtype=dtype)
@staticmethod
def _interleave(x, y):
"""Interleave two tensors."""
xy = torch.stack([x[:-1], y], dim=1).view(-1)
xy = F.pad(xy, pad=[0, 1], value=x[-1])
return xy
def _collate_fn(self, batch):
batch = list(zip(*batch))
asr_batch = _speech_collate_fn(list(zip(*batch[:4])), pad_id=self.vocab.pad)
audio, audio_len, text, text_len = asr_batch
text = [
self._interleave(
x=torch.empty(len(t) + 1, dtype=torch.long, device=t.device,).fill_(self.vocab.blank), y=t,
)
for t in text
]
text = self._merge(text, value=self.vocab.pad, dtype=torch.long)
text_len = text_len * 2 + 1
blanks_durs, graphemes_durs = batch[4:]
durs = [self._interleave(b, c) for b, c in zip(blanks_durs, graphemes_durs)]
durs = self._merge(durs, dtype=torch.long).to(text.device)
if self.rep:
text = self._merge(
tensors=[torch.repeat_interleave(text1, durs1) for text1, durs1 in zip(text, durs)], dtype=torch.long,
)
text_len = durs.sum(-1)
return (
audio,
audio_len,
text,
text_len,
durs,
)
@experimental
class AudioToBPEDataset(_AudioTextDataset):
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'audio_signal': NeuralType(
('B', 'T'),
AudioSignal(freq=self._sample_rate)
if self is not None and hasattr(self, '_sample_rate')
else AudioSignal(),
),
'a_sig_length': NeuralType(tuple('B'), LengthsType()),
'transcripts': NeuralType(('B', 'T'), LabelsType()),
'transcript_length': NeuralType(tuple('B'), LengthsType()),
}
def __init__(
self,
manifest_filepath: str,
tokenizer: 'nemo.collections.common.tokenizers.TokenizerSpec',
sample_rate: int,
int_values: bool = False,
augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
max_duration: Optional[int] = None,
min_duration: Optional[int] = None,
max_utts: int = 0,
trim: bool = False,
load_audio: bool = True,
add_misc: bool = False,
use_start_end_token: bool = True,
):
if use_start_end_token and hasattr(tokenizer, 'bos_token'):
bos_id = tokenizer.bos_id
else:
bos_id = None
if use_start_end_token and hasattr(tokenizer, 'eos_token'):
eos_id = tokenizer.eos_id
else:
eos_id = None
if hasattr(tokenizer, 'pad_token'):
pad_id = tokenizer.pad_id
else:
pad_id = 0
class TokenizerWrapper:
def __init__(self, tokenizer):
self._tokenizer = tokenizer
def __call__(self, text):
t = self._tokenizer.text_to_ids(text)
return t
super().__init__(
manifest_filepath=manifest_filepath,
parser=TokenizerWrapper(tokenizer),
sample_rate=sample_rate,
int_values=int_values,
augmentor=augmentor,
max_duration=max_duration,
min_duration=min_duration,
max_utts=max_utts,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
trim=trim,
load_audio=load_audio,
add_misc=add_misc,
)
# Ported from https://github.com/NVIDIA/OpenSeq2Seq/blob/master/open_seq2seq/data/speech2text/speech_commands.py
@experimental
class AudioLabelDataset(Dataset):
"""
Dataset that loads tensors via a json file containing paths to audio
files, command class, and durations (in seconds). Each new line is a
different sample. Example below:
{"audio_filepath": "/path/to/audio.wav", "label":
"label", "duration": 23.147}
...
{"audio_filepath": "/path/to/audio.wav", "label": "label",
"offset": 301.75, "duration": 0.82}
Args:
manifest_filepath: Path to manifest json as described above. Can
be comma-separated paths.
labels (Optional[list]): String containing all the possible labels to map to
if None then automatically picks from ASRSpeechLabel collection.
featurizer: Initialized featurizer class that converts paths of
audio to feature tensors
max_duration: If audio exceeds this length, do not include in dataset
min_duration: If audio is less than this length, do not include
in dataset
trim: Boolean flag whether to trim the audio
load_audio: Boolean flag indicate whether do or not load audio
"""
def __init__(
self,
manifest_filepath,
featurizer,
labels=None,
max_duration=None,
min_duration=None,
trim=False,
load_audio=True,
):
self.collection = collections.ASRSpeechLabel(
manifests_files=manifest_filepath.split(','), min_duration=min_duration, max_duration=max_duration
)
self.featurizer = featurizer
self.trim = trim
self.load_audio = load_audio
self.labels = labels if labels else self.collection.uniq_labels
self.num_commands = len(self.labels)
self.label2id, self.id2label = {}, {}
for label_id, label in enumerate(self.labels):
self.label2id[label] = label_id
self.id2label[label_id] = label
def __getitem__(self, index):
sample = self.collection[index]
if self.load_audio:
offset = sample.offset
if offset is None:
offset = 0
features = self.featurizer.process(
sample.audio_file, offset=offset, duration=sample.duration, trim=self.trim
)
f, fl = features, torch.tensor(features.shape[0]).long()
else:
f, fl = None, None
t = self.label2id[sample.label]
tl = 1 # For compatibility with collate_fn used later
return f, fl, torch.tensor(t).long(), torch.tensor(tl).long()
def __len__(self):
return len(self.collection)
def _collate_fn(self, batch):
"""collate batch of audio sig, audio len, tokens (single token), tokens len (1)
Args:
batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,
LongTensor): A tuple of tuples of signal, signal lengths,
encoded tokens, and encoded tokens length. This collate func
assumes the signals are 1d torch tensors (i.e. mono audio).
"""
return _speech_collate_fn(batch, pad_id=0)
@experimental
class _TarredAudioToTextDataset(IterableDataset):
"""
A similar Dataset to the AudioToCharDataset/AudioToBPEDataset, but which loads tarred audio files.
Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToCharDataset/AudioToBPEDataset),
as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
contain the information for one audio file, including at least the transcript and name of the audio
file within the tarball.
Valid formats for the audio_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
(2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
Note: For brace expansion in (1), there may be cases where `{x..y}` syntax cannot be used due to shell interference.
This occurs most commonly inside SLURM scripts. Therefore we provide a few equivalent replacements.
Supported opening braces - { <=> (, [, < and the special tag _OP_.
Supported closing braces - } <=> ), ], > and the special tag _CL_.
For SLURM based tasks, we suggest the use of the special tags for ease of use.
See the WebDataset documentation for more information about accepted data and input formats.
If using multiple processes the number of shards should be divisible by the number of workers to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
is applied. We currently do not check for this, but your program may hang if the shards are uneven!
Notice that a few arguments are different from the AudioToCharDataset; for example, shuffle (bool) has been
replaced by shuffle_n (int).
Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
Args:
audio_tar_filepaths: Either a list of audio tarball filepaths, or a
string (can be brace-expandable).
manifest_filepath (str): Path to the manifest.
parser (callable): A callable which is used to pre-process the text output.
sample_rate (int): Sample rate to resample loaded audio to
int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
object used to augment loaded audio
shuffle_n (int): How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
Defaults to 0.
min_duration (float): Dataset parameter.
All training files which have a duration less than min_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to 0.1.
max_duration (float): Dataset parameter.
All training files which have a duration more than max_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to None.
max_utts (int): Limit number of utterances. 0 means no maximum.
blank_index (int): Blank character index, defaults to -1.
unk_index (int): Unknown character index, defaults to -1.
normalize (bool): Dataset parameter.
Whether to use automatic text cleaning.
It is highly recommended to manually clean text for best results.
Defaults to True.
trim (bool): Whether to use trim silence from beginning and end
of audio signal using librosa.effects.trim().
Defaults to False.
bos_id (id): Dataset parameter.
Beginning of string symbol id used for seq2seq models.
Defaults to None.
eos_id (id): Dataset parameter.
End of string symbol id used for seq2seq models.
Defaults to None.
pad_id (id): Token used to pad when collating samples in batches.
If this is None, pads using 0s.
Defaults to None.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
"""
def __init__(
self,
audio_tar_filepaths: Union[str, List[str]],
manifest_filepath: str,
parser: Callable,
sample_rate: int,
int_values: bool = False,
augmentor: Optional['nemo.collections.asr.parts.perturb.AudioAugmentor'] = None,
shuffle_n: int = 0,
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
max_utts: int = 0,
trim: bool = False,
bos_id: Optional[int] = None,
eos_id: Optional[int] = None,
add_misc: bool = False,
pad_id: int = 0,
global_rank: int = 0,
world_size: int = 0,
):
self.collection = collections.ASRAudioText(
manifests_files=manifest_filepath.split(','),
parser=parser,
min_duration=min_duration,
max_duration=max_duration,
max_number=max_utts,
index_by_file_id=True, # Must set this so the manifest lines can be indexed by file ID
)
self.featurizer = WaveformFeaturizer(sample_rate=sample_rate, int_values=int_values, augmentor=augmentor)
self.trim = trim
self.eos_id = eos_id
self.bos_id = bos_id
self.pad_id = pad_id
self._add_misc = add_misc
if isinstance(audio_tar_filepaths, str):
# Replace '(' and '[' with '{'
brace_keys_open = ['(', '[', '<', '_OP_']
for bkey in brace_keys_open:
if bkey in audio_tar_filepaths:
audio_tar_filepaths = audio_tar_filepaths.replace(bkey, "{")
# Replace ')' and ']' with '}'
brace_keys_close = [')', ']', '>', '_CL_']
for bkey in brace_keys_close:
if bkey in audio_tar_filepaths:
audio_tar_filepaths = audio_tar_filepaths.replace(bkey, "}")
# Check for distributed and partition shards accordingly
if world_size > 1:
if isinstance(audio_tar_filepaths, str):
# Brace expand
audio_tar_filepaths = list(braceexpand.braceexpand(audio_tar_filepaths))
if len(audio_tar_filepaths) % world_size != 0:
logging.warning(
f"Number of shards in tarred dataset ({len(audio_tar_filepaths)}) is not divisible "
f"by number of distributed workers ({world_size})."
)
begin_idx = (len(audio_tar_filepaths) // world_size) * global_rank
end_idx = begin_idx + (len(audio_tar_filepaths) // world_size)
audio_tar_filepaths = audio_tar_filepaths[begin_idx:end_idx]
logging.info(
"Partitioning tarred dataset: process (%d) taking shards [%d, %d)", global_rank, begin_idx, end_idx
)
# Put together WebDataset
self._dataset = (
wd.Dataset(audio_tar_filepaths)
.shuffle(shuffle_n)
.rename(audio='wav', key='__key__')
.to_tuple('audio', 'key')
.pipe(self._filter)
.map(f=self._build_sample)
)
def _filter(self, iterator):
"""This function is used to remove samples that have been filtered out by ASRAudioText already.
Otherwise, we would get a KeyError as _build_sample attempts to find the manifest entry for a sample
that was filtered out (e.g. for duration).
Note that if using multi-GPU training, filtering may lead to an imbalance in samples in each shard,
which may make your code hang as one process will finish before the other.
"""
class TarredAudioFilter:
def __init__(self, collection):
self.iterator = iterator
self.collection = collection
def __iter__(self):
return self
def __next__(self):
while True:
audio_bytes, audio_filename = next(self.iterator)
file_id, _ = os.path.splitext(os.path.basename(audio_filename))
if file_id in self.collection.mapping:
return audio_bytes, audio_filename
return TarredAudioFilter(self.collection)
def _collate_fn(self, batch):
return _speech_collate_fn(batch, self.pad_id)
def _build_sample(self, tup):
"""Builds the training sample by combining the data from the WebDataset with the manifest info.
"""
audio_bytes, audio_filename = tup
# Grab manifest entry from self.collection
file_id, _ = os.path.splitext(os.path.basename(audio_filename))
manifest_idx = self.collection.mapping[file_id]
manifest_entry = self.collection[manifest_idx]
offset = manifest_entry.offset
if offset is None:
offset = 0
# Convert audio bytes to IO stream for processing (for SoundFile to read)
audio_filestream = io.BytesIO(audio_bytes)
features = self.featurizer.process(
audio_filestream,
offset=offset,
duration=manifest_entry.duration,
trim=self.trim,
orig_sr=manifest_entry.orig_sr,
)
audio_filestream.close()
# Audio features
f, fl = features, torch.tensor(features.shape[0]).long()
# Text features
t, tl = manifest_entry.text_tokens, len(manifest_entry.text_tokens)
if self.bos_id is not None:
t = [self.bos_id] + t
tl += 1
if self.eos_id is not None:
t = t + [self.eos_id]
tl += 1
return f, fl, torch.tensor(t).long(), torch.tensor(tl).long()
def __iter__(self):
return self._dataset.__iter__()
def __len__(self):
return len(self.collection)
@experimental
class TarredAudioToCharDataset(_TarredAudioToTextDataset):
"""
A similar Dataset to the AudioToCharDataset, but which loads tarred audio files.
Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToCharDataset),
as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
contain the information for one audio file, including at least the transcript and name of the audio
file within the tarball.
Valid formats for the audio_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
(2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
See the WebDataset documentation for more information about accepted data and input formats.
If using multiple processes the number of shards should be divisible by the number of workers to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
is applied. We currently do not check for this, but your program may hang if the shards are uneven!
Notice that a few arguments are different from the AudioToCharDataset; for example, shuffle (bool) has been
replaced by shuffle_n (int).
Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
Args:
audio_tar_filepaths: Either a list of audio tarball filepaths, or a
string (can be brace-expandable).
manifest_filepath (str): Path to the manifest.
labels (list): List of characters that can be output by the ASR model.
For Jasper, this is the 28 character set {a-z '}. The CTC blank
symbol is automatically added later for models using ctc.
sample_rate (int): Sample rate to resample loaded audio to
int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
object used to augment loaded audio
shuffle_n (int): How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
Defaults to 0.
min_duration (float): Dataset parameter.
All training files which have a duration less than min_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to 0.1.
max_duration (float): Dataset parameter.
All training files which have a duration more than max_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to None.
max_utts (int): Limit number of utterances. 0 means no maximum.
blank_index (int): Blank character index, defaults to -1.
unk_index (int): Unknown character index, defaults to -1.
normalize (bool): Dataset parameter.
Whether to use automatic text cleaning.
It is highly recommended to manually clean text for best results.
Defaults to True.
trim (bool): Whether to use trim silence from beginning and end
of audio signal using librosa.effects.trim().
Defaults to False.
bos_id (id): Dataset parameter.
Beginning of string symbol id used for seq2seq models.
Defaults to None.
eos_id (id): Dataset parameter.
End of string symbol id used for seq2seq models.
Defaults to None.
pad_id (id): Token used to pad when collating samples in batches.
If this is None, pads using 0s.
Defaults to None.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
"""
def __init__(
self,
audio_tar_filepaths: Union[str, List[str]],
manifest_filepath: str,
labels: List[str],
sample_rate: int,
int_values: bool = False,
augmentor: Optional['nemo.collections.asr.parts.perturb.AudioAugmentor'] = None,
shuffle_n: int = 0,
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
max_utts: int = 0,
blank_index: int = -1,
unk_index: int = -1,
normalize: bool = True,
trim: bool = False,
bos_id: Optional[int] = None,
eos_id: Optional[int] = None,
parser: Optional[str] = 'en',
add_misc: bool = False,
pad_id: int = 0,
global_rank: int = 0,
world_size: int = 0,
):
self.labels = labels
parser = parsers.make_parser(
labels=labels, name=parser, unk_id=unk_index, blank_id=blank_index, do_normalize=normalize
)
super().__init__(
audio_tar_filepaths=audio_tar_filepaths,
manifest_filepath=manifest_filepath,
parser=parser,
sample_rate=sample_rate,
int_values=int_values,
augmentor=augmentor,
shuffle_n=shuffle_n,
min_duration=min_duration,
max_duration=max_duration,
max_utts=max_utts,
trim=trim,
bos_id=bos_id,
eos_id=eos_id,
add_misc=add_misc,
pad_id=pad_id,
global_rank=global_rank,
world_size=world_size,
)
@experimental
class TarredAudioToBPEDataset(_TarredAudioToTextDataset):
"""
A similar Dataset to the AudioToBPEDataset, but which loads tarred audio files.
Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToBPEDataset),
as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
contain the information for one audio file, including at least the transcript and name of the audio
file within the tarball.
Valid formats for the audio_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
(2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
See the WebDataset documentation for more information about accepted data and input formats.
If using multiple processes the number of shards should be divisible by the number of workers to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
is applied. We currently do not check for this, but your program may hang if the shards are uneven!
Notice that a few arguments are different from the AudioToBPEDataset; for example, shuffle (bool) has been
replaced by shuffle_n (int).
Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
Args:
audio_tar_filepaths: Either a list of audio tarball filepaths, or a
string (can be brace-expandable).
manifest_filepath (str): Path to the manifest.
tokenizer (TokenizerSpec): Either a Word Piece Encoding tokenizer (BERT),
or a Sentence Piece Encoding tokenizer (BPE). The CTC blank
symbol is automatically added later for models using ctc.
sample_rate (int): Sample rate to resample loaded audio to
int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
object used to augment loaded audio
shuffle_n (int): How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
Defaults to 0.
min_duration (float): Dataset parameter.
All training files which have a duration less than min_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to 0.1.
max_duration (float): Dataset parameter.
All training files which have a duration more than max_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to None.
max_utts (int): Limit number of utterances. 0 means no maximum.
trim (bool): Whether to use trim silence from beginning and end
of audio signal using librosa.effects.trim().
Defaults to False.
pad_id (id): Token used to pad when collating samples in batches.
If this is None, pads using 0s.
Defaults to None.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
"""
def __init__(
self,
audio_tar_filepaths: Union[str, List[str]],
manifest_filepath: str,
tokenizer: 'nemo.collections.common.tokenizers.TokenizerSpec',
sample_rate: int,
int_values: bool = False,
augmentor: Optional['nemo.collections.asr.parts.perturb.AudioAugmentor'] = None,
shuffle_n: int = 0,
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
max_utts: int = 0,
trim: bool = False,
add_misc: bool = False,
global_rank: int = 0,
world_size: int = 0,
use_start_end_token: bool = True,
):
if use_start_end_token and hasattr(tokenizer, 'bos_token'):
bos_id = tokenizer.bos_id
else:
bos_id = None
if use_start_end_token and hasattr(tokenizer, 'eos_token'):
eos_id = tokenizer.eos_id
else:
eos_id = None
if hasattr(tokenizer, 'pad_token'):
pad_id = tokenizer.pad_id
else:
pad_id = 0
class TokenizerWrapper:
def | (self, tokenizer):
self._tokenizer = tokenizer
def __call__(self, text):
t = self._tokenizer.text_to_ids(text)
return t
super().__init__(
audio_tar_filepaths=audio_tar_filepaths,
manifest_filepath=manifest_filepath,
parser=TokenizerWrapper(tokenizer),
sample_rate=sample_rate,
int_values=int_values,
augmentor=augmentor,
shuffle_n=shuffle_n,
min_duration=min_duration,
max_duration=max_duration,
max_utts=max_utts,
trim=trim,
bos_id=bos_id,
eos_id=eos_id,
add_misc=add_misc,
pad_id=pad_id,
global_rank=global_rank,
world_size=world_size,
)
| __init__ |
Sidebar_list.tsx | /* eslint-disable @typescript-eslint/explicit-function-return-type */
/* eslint-disable @typescript-eslint/camelcase */
import React, { useState } from 'react';
import { List } from './List';
export const Sidebar_list = (params: {
list: List,
setActive_list: React.Dispatch<React.SetStateAction<string>>,
list_modify_visible: boolean
}): JSX.Element => {
const [list_modify_visible_private_checked, setList_modify_visible_private_checked] = useState(false);
const [list_modify_visible_anonimous_checked, setList_modify_visible_anonimous_checked] = useState(false);
const [list_modify_visible_public_no_notify_checked, setList_modify_visible_public_no_notify_checked] = useState(false);
const [list_modify_visible_public_notify_checked, setList_modify_visible_public_notify_checked] = useState(false);
const [list_name, setList_name] = useState(params.list.name);
const radio_check_private = () => {
setList_modify_visible_private_checked(true);
setList_modify_visible_anonimous_checked(false);
setList_modify_visible_public_no_notify_checked(false);
setList_modify_visible_public_notify_checked(false);
};
const radio_check_anonimous = () => {
setList_modify_visible_private_checked(false);
setList_modify_visible_anonimous_checked(true);
setList_modify_visible_public_no_notify_checked(false);
setList_modify_visible_public_notify_checked(false);
};
const radio_check_public_no_notify = () => {
setList_modify_visible_private_checked(false);
setList_modify_visible_anonimous_checked(false);
setList_modify_visible_public_no_notify_checked(true);
setList_modify_visible_public_notify_checked(false);
};
const radio_check_public_notify = () => {
setList_modify_visible_private_checked(false);
setList_modify_visible_anonimous_checked(false);
setList_modify_visible_public_no_notify_checked(false);
setList_modify_visible_public_notify_checked(true);
};
const set_name = (e: React.ChangeEvent<HTMLInputElement>) => {
setList_name(e.target.value);
params.list.name = e.target.value;
};
if (params.list_modify_visible) {
return (
<div className='sidebar_list-item'>
<div className="sidebar_list-item-left">
<div className="sidebar_list-item-left-1">
<div className="sidebar_list-item-name_edit_wrapper">
<form onSubmit={e => e.preventDefault()}>
<label>
<input className="sidebar_list-item-name_edit" type="text" value={list_name} onChange={e => set_name(e)} placeholder={list_name} />
</label>
</form>
</div>
</div>
<div className="sidebar_list-item-screen_name_wrapper">
<span className="sidebar_list-item-screen_name">{'by ' + params.list.full_name.split('/')[0]}</span>
</div>
<div className="sidebar_list-item-modes">
<div className="sidebar_list-item-modes-each">
{
list_modify_visible_private_checked ? | ></button>
}
<span className="list_modify_visible_checkbox_label_text">private</span>
</div>
<div className="sidebar_list-item-modes-each">
{
list_modify_visible_anonimous_checked ?
<button className={`checkmark_anonimous_true buttons`}></button> :
<button className={`checkmark_anonimous_false buttons`}
onClick={e => typeof e === 'object' ? radio_check_anonimous() : 1 + 1}
></button>
}
<span className="list_modify_visible_checkbox_label_text">anonimous</span>
</div>
<div className="sidebar_list-item-modes-each">
{
list_modify_visible_public_no_notify_checked ?
<button className={`checkmark_public_no_notify_true buttons`}></button> :
<button className={`checkmark_public_no_notify_false buttons`}
onClick={e => typeof e === 'object' ? radio_check_public_no_notify() : 1 + 1}
></button>
}
<span className="list_modify_visible_checkbox_label_text">public_no_notify</span>
</div>
<div className="sidebar_list-item-modes-each">
{
list_modify_visible_public_notify_checked ?
<button className={`checkmark_public_notify_true buttons`}></button> :
<button className={`checkmark_public_notify_false buttons`}
onClick={e => typeof e === 'object' ? radio_check_public_notify() : 1 + 1}
></button>
}
<span className="list_modify_visible_checkbox_label_text">public_notify</span>
</div>
</div>
</div>
<div className="sidebar_list-item-right">
<span className="sidebar_list-item-timestamp">{'• ' + '23h'}</span>
<div className="sidebar_list-item-delete_button_wrapper">
<button className={`sidebar_list-item-delete_button buttons`}></button> :
</div>
</div>
<div className="sidebar_list-item-text"></div>
</div>
);
} else {
return (
<div className='sidebar_list-item'>
<div className="sidebar_list-item-left">
<div className="sidebar_list-item-left-1">
<div className="sidebar_list-item-name_wrapper">
<span className="sidebar_list-item-name">{params.list.name}</span>
</div>
</div>
<div className="sidebar_list-item-screen_name_wrapper">
<span className="sidebar_list-item-screen_name">{'by ' + params.list.full_name.split('/')[0]}</span>
</div>
</div>
<div className="sidebar_list-item-right">
<span className="sidebar_list-item-timestamp">{'• ' + '23h'}</span>
</div>
<div className="sidebar_list-item-text"></div>
<button className='sidebar_list-item-button' onClick={ e => typeof e === 'object' ? params.setActive_list(params.list.name) : 1 + 1}></button>
</div>
);
}
}; | <button className={`checkmark_private_true buttons`}></button> :
<button className={`checkmark_private_false buttons`}
onClick={e => typeof e === 'object' ? radio_check_private() : 1 + 1} |
dpi.rs | #![allow(non_snake_case, unused_unsafe)]
use std::sync::Once;
use crate::platform_impl::platform::util::{
ENABLE_NON_CLIENT_DPI_SCALING, GET_DPI_FOR_MONITOR, GET_DPI_FOR_WINDOW, SET_PROCESS_DPI_AWARE,
SET_PROCESS_DPI_AWARENESS, SET_PROCESS_DPI_AWARENESS_CONTEXT,
};
use winapi::{
shared::{
minwindef::FALSE,
windef::{DPI_AWARENESS_CONTEXT, DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE, HMONITOR, HWND},
winerror::S_OK,
},
um::{
shellscalingapi::{MDT_EFFECTIVE_DPI, PROCESS_PER_MONITOR_DPI_AWARE},
wingdi::{GetDeviceCaps, LOGPIXELSX},
winuser::{self, MONITOR_DEFAULTTONEAREST},
},
};
const DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2: DPI_AWARENESS_CONTEXT = -4isize as _;
pub fn become_dpi_aware() {
static ENABLE_DPI_AWARENESS: Once = Once::new();
ENABLE_DPI_AWARENESS.call_once(|| {
unsafe {
if let Some(SetProcessDpiAwarenessContext) = *SET_PROCESS_DPI_AWARENESS_CONTEXT {
// We are on Windows 10 Anniversary Update (1607) or later.
if SetProcessDpiAwarenessContext(DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2)
== FALSE
{
// V2 only works with Windows 10 Creators Update (1703). Try using the older
// V1 if we can't set V2.
SetProcessDpiAwarenessContext(DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE);
}
} else if let Some(SetProcessDpiAwareness) = *SET_PROCESS_DPI_AWARENESS {
// We are on Windows 8.1 or later.
SetProcessDpiAwareness(PROCESS_PER_MONITOR_DPI_AWARE);
} else if let Some(SetProcessDPIAware) = *SET_PROCESS_DPI_AWARE {
// We are on Vista or later.
SetProcessDPIAware();
}
}
});
}
pub fn enable_non_client_dpi_scaling(hwnd: HWND) {
unsafe {
if let Some(EnableNonClientDpiScaling) = *ENABLE_NON_CLIENT_DPI_SCALING {
EnableNonClientDpiScaling(hwnd);
}
}
}
pub fn get_monitor_dpi(hmonitor: HMONITOR) -> Option<u32> {
unsafe {
if let Some(GetDpiForMonitor) = *GET_DPI_FOR_MONITOR {
// We are on Windows 8.1 or later.
let mut dpi_x = 0;
let mut dpi_y = 0;
if GetDpiForMonitor(hmonitor, MDT_EFFECTIVE_DPI, &mut dpi_x, &mut dpi_y) == S_OK {
// MSDN says that "the values of *dpiX and *dpiY are identical. You only need to
// record one of the values to determine the DPI and respond appropriately".
// https://msdn.microsoft.com/en-us/library/windows/desktop/dn280510(v=vs.85).aspx
return Some(dpi_x as u32);
}
}
}
None
}
pub const BASE_DPI: u32 = 96;
pub fn | (dpi: u32) -> f64 {
dpi as f64 / BASE_DPI as f64
}
pub unsafe fn hwnd_dpi(hwnd: HWND) -> u32 {
let hdc = winuser::GetDC(hwnd);
if hdc.is_null() {
panic!("[winit] `GetDC` returned null!");
}
if let Some(GetDpiForWindow) = *GET_DPI_FOR_WINDOW {
// We are on Windows 10 Anniversary Update (1607) or later.
match GetDpiForWindow(hwnd) {
0 => BASE_DPI, // 0 is returned if hwnd is invalid
dpi => dpi as u32,
}
} else if let Some(GetDpiForMonitor) = *GET_DPI_FOR_MONITOR {
// We are on Windows 8.1 or later.
let monitor = winuser::MonitorFromWindow(hwnd, MONITOR_DEFAULTTONEAREST);
if monitor.is_null() {
return BASE_DPI;
}
let mut dpi_x = 0;
let mut dpi_y = 0;
if GetDpiForMonitor(monitor, MDT_EFFECTIVE_DPI, &mut dpi_x, &mut dpi_y) == S_OK {
dpi_x as u32
} else {
BASE_DPI
}
} else {
// We are on Vista or later.
if winuser::IsProcessDPIAware() != FALSE {
// If the process is DPI aware, then scaling must be handled by the application using
// this DPI value.
GetDeviceCaps(hdc, LOGPIXELSX) as u32
} else {
// If the process is DPI unaware, then scaling is performed by the OS; we thus return
// 96 (scale factor 1.0) to prevent the window from being re-scaled by both the
// application and the WM.
BASE_DPI
}
}
}
| dpi_to_scale_factor |
test_shortener.py | # -*- coding: utf-8 -*-
# Copyright (c) 2013-2016, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
from unittest import TestCase
from nose.plugins.attrib import attr
from pyramid import testing
from c2cgeoportal.tests.functional import ( # noqa
tear_down_common as tearDownModule, set_up_common as setUpModule
)
@attr(functional=True)
class TestshortenerView(TestCase):
def setUp(self): # noqa
pass
def tearDown(self): # noqa
testing.tearDown()
from c2cgeoportal.models import DBSession, Shorturl
import transaction
DBSession.query(Shorturl).delete()
transaction.commit()
def test_shortener(self):
from pyramid.testing import DummyRequest
from pyramid.httpexceptions import HTTPFound, HTTPNotFound, \
HTTPBadRequest
from c2cgeoportal.views.shortener import Shortener
def route_url(name, *elements, **kw):
return "https://example.com/s/" + kw["ref"]
request = DummyRequest()
request.user = None
request.host = "example.com:443"
request.server_name = "example.com" | shortener = Shortener(request)
request.params = {
"url": "https://example.com/hi"
}
result = shortener.create()
index = result["short_url"].rfind("/")
self.assertEqual(
result["short_url"][:index],
"https://example.com/s"
)
request.params = {}
request.matchdict = {
"ref": result["short_url"][index + 1:]
}
result = shortener.get()
self.assertEqual(type(result), HTTPFound)
self.assertEqual(result.location, "https://example.com/hi")
request.params = {}
request.matchdict = {
"ref": "AAAAAA"
}
self.assertRaises(HTTPNotFound, shortener.get)
request.params = {
"url": "https://example.com/short/truite"
}
result = shortener.create()
self.assertEqual(result["short_url"], "https://example.com/s/truite")
request.params = {}
request.matchdict = {}
self.assertRaises(HTTPBadRequest, shortener.create)
request.params = {
"url": "https://other-site.com/hi"
}
self.assertRaises(HTTPBadRequest, shortener.create)
def test_shortener_baseurl(self):
from pyramid.testing import DummyRequest
from c2cgeoportal.views.shortener import Shortener
request = DummyRequest()
request.user = None
request.host = "example.com:443"
request.server_name = "example.com"
request.registry.settings["shortener"] = {
"base_url": "http://my_host/my_short/"
}
shortener = Shortener(request)
request.params = {
"url": "https://example.com/hi"
}
result = shortener.create()
index = result["short_url"].rfind("/")
self.assertEqual(
result["short_url"][:index],
"http://my_host/my_short"
) | request.route_url = route_url |
export_test.go | package api
var (
BuildSQL = buildSQL
NewRequest = newRequest
)
type LogFilter logFilter
type LogDataSet logDataSet
type LogQueue logQueue
type SearchID searchID
func ExtractLogs(ch chan *LogQueue, filter LogFilter) (*LogDataSet, error) {
pipe := make(chan *logQueue)
go func() {
defer close(pipe)
for q := range ch {
pipe <- (*logQueue)(q)
}
}()
v, err := extractLogs(pipe, logFilter(filter))
return (*LogDataSet)(v), err
}
func newRequest(terms []string, start, end string) ExecSearchRequest {
var querySet []Query
for _, t := range terms { | }
return ExecSearchRequest{
Query: querySet,
StartDateTime: start,
EndDateTime: end,
}
} | querySet = append(querySet, Query{Term: t}) |
machineset_reconciler_suite_test.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package machineset
import (
"log"
"os"
"path/filepath"
"testing"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/cluster-api/pkg/apis"
"sigs.k8s.io/cluster-api/pkg/controller/external"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
var cfg *rest.Config
func TestMain(m *testing.M) {
t := &envtest.Environment{
CRDs: []*apiextensionsv1beta1.CustomResourceDefinition{
external.TestGenericInfrastructureCRD,
external.TestGenericInfrastructureTemplateCRD,
},
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
}
apis.AddToScheme(scheme.Scheme)
var err error
if cfg, err = t.Start(); err != nil {
log.Fatal(err)
}
code := m.Run()
t.Stop()
os.Exit(code)
}
// StartTestManager adds recFn
func StartTestManager(mgr manager.Manager, t *testing.T) chan struct{} | {
t.Helper()
stop := make(chan struct{})
go func() {
if err := mgr.Start(stop); err != nil {
t.Fatalf("error starting test manager: %v", err)
}
}()
return stop
} |
|
example_test.go | package linq
import (
"fmt"
"math/rand"
"strings"
"testing"
"time"
)
func ExampleKeyValue() {
m := make(map[int]bool)
m[10] = true
fmt.Println(From(m).Results())
// Output:
// [{10 true}]
}
func ExampleKeyValue_second() {
input := []KeyValue{
{10, true},
}
m := make(map[int]bool)
From(input).
ToMap(&m)
fmt.Println(m)
// Output:
// map[10:true]
}
// The following code example demonstrates how
// to use Range to generate a slice of values.
func ExampleRange() {
// Generate a slice of integers from 1 to 10
// and then select their squares.
var squares []int
Range(1, 10).
SelectT(
func(x int) int { return x * x },
).
ToSlice(&squares)
for _, num := range squares {
fmt.Println(num)
}
//Output:
//1
//4
//9
//16
//25
//36
//49
//64
//81
//100
}
// The following code example demonstrates how to use Repeat
// to generate a slice of a repeated value.
func ExampleRepeat() {
var slice []string
Repeat("I like programming.", 5).
ToSlice(&slice)
for _, str := range slice {
fmt.Println(str)
}
//Output:
//I like programming.
//I like programming.
//I like programming.
//I like programming.
//I like programming.
}
func | () {
query := From([]int{1, 2, 3, 4, 5}).Where(func(i interface{}) bool {
return i.(int) <= 3
})
next := query.Iterate()
for item, ok := next(); ok; item, ok = next() {
fmt.Println(item)
}
// Output:
// 1
// 2
// 3
}
// The following code example demonstrates how to use Aggregate function
func ExampleQuery_Aggregate() {
fruits := []string{"apple", "mango", "orange", "passionfruit", "grape"}
// Determine which string in the slice is the longest.
longestName := From(fruits).
Aggregate(
func(r interface{}, i interface{}) interface{} {
if len(r.(string)) > len(i.(string)) {
return r
}
return i
},
)
fmt.Println(longestName)
// Output:
// passionfruit
}
// The following code example demonstrates how to use AggregateWithSeed function
func ExampleQuery_AggregateWithSeed() {
ints := []int{4, 8, 8, 3, 9, 0, 7, 8, 2}
// Count the even numbers in the array, using a seed value of 0.
numEven := From(ints).
AggregateWithSeed(0,
func(total, next interface{}) interface{} {
if next.(int)%2 == 0 {
return total.(int) + 1
}
return total
},
)
fmt.Printf("The number of even integers is: %d", numEven)
// Output:
// The number of even integers is: 6
}
// The following code example demonstrates how to use AggregateWithSeedBy function
func ExampleQuery_AggregateWithSeedBy() {
input := []string{"apple", "mango", "orange", "passionfruit", "grape"}
// Determine whether any string in the array is longer than "banana".
longestName := From(input).
AggregateWithSeedBy("banana",
func(longest interface{}, next interface{}) interface{} {
if len(longest.(string)) > len(next.(string)) {
return longest
}
return next
},
// Return the final result
func(result interface{}) interface{} {
return fmt.Sprintf("The fruit with the longest name is %s.", result)
},
)
fmt.Println(longestName)
// Output:
// The fruit with the longest name is passionfruit.
}
// The following code example demonstrates how to
// use Distinct to return distinct elements from a slice of integers.
func ExampleOrderedQuery_Distinct() {
ages := []int{21, 46, 46, 55, 17, 21, 55, 55}
var distinctAges []int
From(ages).
OrderBy(
func(item interface{}) interface{} { return item },
).
Distinct().
ToSlice(&distinctAges)
fmt.Println(distinctAges)
// Output:
// [17 21 46 55]
}
// The following code example demonstrates how to
// use DistinctBy to return distinct elements from a ordered slice of elements.
func ExampleOrderedQuery_DistinctBy() {
type Product struct {
Name string
Code int
}
products := []Product{
{Name: "orange", Code: 4},
{Name: "apple", Code: 9},
{Name: "lemon", Code: 12},
{Name: "apple", Code: 9},
}
//Order and exclude duplicates.
var noduplicates []Product
From(products).
OrderBy(
func(item interface{}) interface{} { return item.(Product).Name },
).
DistinctBy(
func(item interface{}) interface{} { return item.(Product).Code },
).
ToSlice(&noduplicates)
for _, product := range noduplicates {
fmt.Printf("%s %d\n", product.Name, product.Code)
}
// Output:
// apple 9
// lemon 12
// orange 4
}
// The following code example demonstrates how to use ThenBy to perform
// a secondary ordering of the elements in a slice.
func ExampleOrderedQuery_ThenBy() {
fruits := []string{"grape", "passionfruit", "banana", "mango", "orange", "raspberry", "apple", "blueberry"}
// Sort the strings first by their length and then
//alphabetically by passing the identity selector function.
var query []string
From(fruits).
OrderBy(
func(fruit interface{}) interface{} { return len(fruit.(string)) },
).
ThenBy(
func(fruit interface{}) interface{} { return fruit },
).
ToSlice(&query)
for _, fruit := range query {
fmt.Println(fruit)
}
// Output:
// apple
// grape
// mango
// banana
// orange
// blueberry
// raspberry
// passionfruit
}
// The following code example demonstrates how to use All to determine
// whether all the elements in a slice satisfy a condition.
// Variable allStartWithB is true if all the pet names start with "B"
// or if the pets array is empty.
func ExampleQuery_All() {
type Pet struct {
Name string
Age int
}
pets := []Pet{
{Name: "Barley", Age: 10},
{Name: "Boots", Age: 4},
{Name: "Whiskers", Age: 6},
}
// Determine whether all pet names
// in the array start with 'B'.
allStartWithB := From(pets).
All(
func(pet interface{}) bool { return strings.HasPrefix(pet.(Pet).Name, "B") },
)
fmt.Printf("All pet names start with 'B'? %t", allStartWithB)
// Output:
//
// All pet names start with 'B'? false
}
// The following code example demonstrates how to use Any to determine
// whether a slice contains any elements.
func ExampleQuery_Any() {
numbers := []int{1, 2}
hasElements := From(numbers).Any()
fmt.Printf("Are there any element in the list? %t", hasElements)
// Output:
// Are there any element in the list? true
}
// The following code example demonstrates how to use AnyWith
// to determine whether any element in a slice satisfies a condition.
func ExampleQuery_AnyWith() {
type Pet struct {
Name string
Age int
Vaccinated bool
}
pets := []Pet{
{Name: "Barley", Age: 8, Vaccinated: true},
{Name: "Boots", Age: 4, Vaccinated: false},
{Name: "Whiskers", Age: 1, Vaccinated: false},
}
// Determine whether any pets over age 1 are also unvaccinated.
unvaccinated := From(pets).
AnyWith(
func(p interface{}) bool {
return p.(Pet).Age > 1 && p.(Pet).Vaccinated == false
},
)
fmt.Printf("Are there any unvaccinated animals over age one? %t", unvaccinated)
// Output:
//
// Are there any unvaccinated animals over age one? true
}
// The following code example demonstrates how to use Append
// to include an elements in the last position of a slice.
func ExampleQuery_Append() {
input := []int{1, 2, 3, 4}
q := From(input).Append(5)
last := q.Last()
fmt.Println(last)
// Output:
// 5
}
//The following code example demonstrates how to use Average
//to calculate the average of a slice of values.
func ExampleQuery_Average() {
grades := []int{78, 92, 100, 37, 81}
average := From(grades).Average()
fmt.Println(average)
// Output:
// 77.6
}
// The following code example demonstrates how to use Count
// to count the elements in an array.
func ExampleQuery_Count() {
fruits := []string{"apple", "banana", "mango", "orange", "passionfruit", "grape"}
numberOfFruits := From(fruits).Count()
fmt.Println(numberOfFruits)
// Output:
// 6
}
// The following code example demonstrates how to use Contains
// to determine whether a slice contains a specific element.
func ExampleQuery_Contains() {
slice := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
has5 := From(slice).Contains(5)
fmt.Printf("Does the slice contains 5? %t", has5)
// Output:
// Does the slice contains 5? true
}
//The following code example demonstrates how to use CountWith
//to count the even numbers in an array.
func ExampleQuery_CountWith() {
slice := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
evenCount := From(slice).
CountWith(
func(item interface{}) bool { return item.(int)%2 == 0 },
)
fmt.Println(evenCount)
// Output:
// 6
}
// The following example demonstrates how to use the DefaultIfEmpty
// method on the results of a group join to perform a left outer join.
//
// The first step in producing a left outer join of two collections is to perform
// an inner join by using a group join. In this example, the list of Person objects
// is inner-joined to the list of Pet objects based on a Person object that matches Pet.Owner.
//
// The second step is to include each element of the first (left) collection in the
// result set even if that element has no matches in the right collection.
// This is accomplished by calling DefaultIfEmpty on each sequence of matching
// elements from the group join.
// In this example, DefaultIfEmpty is called on each sequence of matching Pet elements.
// The method returns a collection that contains a single, default value if the sequence
// of matching Pet elements is empty for any Person element, thereby ensuring that each
// Person element is represented in the result collection.
func ExampleQuery_DefaultIfEmpty() {
type Person struct {
FirstName string
LastName string
}
type Pet struct {
Name string
Owner Person
}
magnus := Person{FirstName: "Magnus", LastName: "Hedlund"}
terry := Person{FirstName: "Terry", LastName: "Adams"}
charlotte := Person{FirstName: "Charlotte", LastName: "Weiss"}
arlene := Person{FirstName: "Arlene", LastName: "Huff"}
barley := Pet{Name: "Barley", Owner: terry}
boots := Pet{Name: "Boots", Owner: terry}
whiskers := Pet{Name: "Whiskers", Owner: charlotte}
bluemoon := Pet{Name: "Blue Moon", Owner: terry}
daisy := Pet{Name: "Daisy", Owner: magnus}
// Create two lists.
people := []Person{magnus, terry, charlotte, arlene}
pets := []Pet{barley, boots, whiskers, bluemoon, daisy}
results := []string{}
From(people).
GroupJoinT(
From(pets),
func(person Person) Person { return person },
func(pet Pet) Person { return pet.Owner },
func(person Person, pets []Pet) Group { return Group{Key: person, Group: From(pets).Results()} },
).
SelectManyByT(
func(g Group) Query { return From(g.Group).DefaultIfEmpty(Pet{}) },
func(pet Pet, group Group) string {
return fmt.Sprintf("%s: %s", group.Key.(Person).FirstName, pet.Name)
},
).
ToSlice(&results)
for _, s := range results {
fmt.Println(s)
}
// Output:
// Magnus: Daisy
// Terry: Barley
// Terry: Boots
// Terry: Blue Moon
// Charlotte: Whiskers
// Arlene:
}
//The following code example demonstrates how to use Distinct
//to return distinct elements from a slice of integers.
func ExampleQuery_Distinct() {
ages := []int{21, 46, 46, 55, 17, 21, 55, 55}
var distinctAges []int
From(ages).
Distinct().
ToSlice(&distinctAges)
fmt.Println(distinctAges)
// Output:
// [21 46 55 17]
}
// The following code example demonstrates how to
// use DistinctBy to return distinct elements from a ordered slice of elements.
func ExampleQuery_DistinctBy() {
type Product struct {
Name string
Code int
}
products := []Product{
{Name: "orange", Code: 4},
{Name: "apple", Code: 9},
{Name: "lemon", Code: 12},
{Name: "apple", Code: 9},
}
//Order and exclude duplicates.
var noduplicates []Product
From(products).
DistinctBy(
func(item interface{}) interface{} { return item.(Product).Code },
).
ToSlice(&noduplicates)
for _, product := range noduplicates {
fmt.Printf("%s %d\n", product.Name, product.Code)
}
// Output:
// orange 4
// apple 9
// lemon 12
}
// The following code example demonstrates how to use the Except
// method to compare two slices of numbers and return elements
// that appear only in the first slice.
func ExampleQuery_Except() {
numbers1 := []float32{2.0, 2.1, 2.2, 2.3, 2.4, 2.5}
numbers2 := []float32{2.2}
var onlyInFirstSet []float32
From(numbers1).
Except(From(numbers2)).
ToSlice(&onlyInFirstSet)
for _, number := range onlyInFirstSet {
fmt.Println(number)
}
// Output:
//2
//2.1
//2.3
//2.4
//2.5
}
// The following code example demonstrates how to use the Except
// method to compare two slices of numbers and return elements
// that appear only in the first slice.
func ExampleQuery_ExceptBy() {
type Product struct {
Name string
Code int
}
fruits1 := []Product{
{Name: "orange", Code: 4},
{Name: "apple", Code: 9},
{Name: "lemon", Code: 12},
{Name: "apple", Code: 9},
}
fruits2 := []Product{
{Name: "apple", Code: 9},
}
//Order and exclude duplicates.
var except []Product
From(fruits1).
ExceptBy(From(fruits2),
func(item interface{}) interface{} { return item.(Product).Code },
).
ToSlice(&except)
for _, product := range except {
fmt.Printf("%s %d\n", product.Name, product.Code)
}
// Output:
// orange 4
// lemon 12
}
// The following code example demonstrates how to use First
// to return the first element of an array.
func ExampleQuery_First() {
numbers := []int{9, 34, 65, 92, 87, 435, 3, 54, 83, 23, 87, 435, 67, 12, 19}
first := From(numbers).First()
fmt.Println(first)
// Output:
// 9
}
//The following code example demonstrates how to use FirstWith
// to return the first element of an array that satisfies a condition.
func ExampleQuery_FirstWith() {
numbers := []int{9, 34, 65, 92, 87, 435, 3, 54, 83, 23, 87, 435, 67, 12, 19}
first := From(numbers).
FirstWith(
func(item interface{}) bool { return item.(int) > 80 },
)
fmt.Println(first)
// Output:
// 92
}
//The following code example demonstrates how to use Intersect
//to return the elements that appear in each of two slices of integers.
func ExampleQuery_Intersect() {
id1 := []int{44, 26, 92, 30, 71, 38}
id2 := []int{39, 59, 83, 47, 26, 4, 30}
var both []int
From(id1).
Intersect(From(id2)).
ToSlice(&both)
for _, id := range both {
fmt.Println(id)
}
// Output:
// 26
// 30
}
//The following code example demonstrates how to use IntersectBy
//to return the elements that appear in each of two slices of products with same Code.
func ExampleQuery_IntersectBy() {
type Product struct {
Name string
Code int
}
store1 := []Product{
{Name: "orange", Code: 4},
{Name: "apple", Code: 9},
}
store2 := []Product{
{Name: "lemon", Code: 12},
{Name: "apple", Code: 9},
}
var duplicates []Product
From(store1).
IntersectBy(From(store2),
func(p interface{}) interface{} { return p.(Product).Code },
).
ToSlice(&duplicates)
for _, p := range duplicates {
fmt.Println(p.Name, "", p.Code)
}
// Output:
// apple 9
}
// The following code example demonstrates how to use Last
// to return the last element of an array.
func ExampleQuery_Last() {
numbers := []int{9, 34, 65, 92, 87, 435, 3, 54,
83, 23, 87, 67, 12, 19}
last := From(numbers).Last()
fmt.Println(last)
//Output:
//19
}
// The following code example demonstrates how to use LastWith
// to return the last element of an array.
func ExampleQuery_LastWith() {
numbers := []int{9, 34, 65, 92, 87, 435, 3, 54,
83, 23, 87, 67, 12, 19}
last := From(numbers).
LastWith(
func(n interface{}) bool { return n.(int) > 80 },
)
fmt.Println(last)
//Output:
//87
}
// The following code example demonstrates how to use Max
// to determine the maximum value in a slice.
func ExampleQuery_Max() {
numbers := []int64{4294967296, 466855135, 81125}
last := From(numbers).Max()
fmt.Println(last)
//Output:
//4294967296
}
// The following code example demonstrates how to use Min
// to determine the minimum value in a slice.
func ExampleQuery_Min() {
grades := []int{78, 92, 99, 37, 81}
min := From(grades).Min()
fmt.Println(min)
//Output:
//37
}
// The following code example demonstrates how to use OrderByDescending
// to sort the elements of a slice in descending order by using a selector function
func ExampleQuery_OrderByDescending() {
names := []string{"Ned", "Ben", "Susan"}
var result []string
From(names).
OrderByDescending(
func(n interface{}) interface{} { return n },
).ToSlice(&result)
fmt.Println(result)
// Output:
// [Susan Ned Ben]
}
// The following code example demonstrates how to use ThenByDescending to perform
// a secondary ordering of the elements in a slice in descending order.
func ExampleOrderedQuery_ThenByDescending() {
fruits := []string{"apPLe", "baNanA", "apple", "APple", "orange", "BAnana", "ORANGE", "apPLE"}
// Sort the strings first ascending by their length and
// then descending using a custom case insensitive comparer.
var query []string
From(fruits).
OrderBy(
func(fruit interface{}) interface{} { return len(fruit.(string)) },
).
ThenByDescending(
func(fruit interface{}) interface{} { return fruit.(string)[0] },
).
ToSlice(&query)
for _, fruit := range query {
fmt.Println(fruit)
}
// Output:
// apPLe
// apPLE
// apple
// APple
// orange
// baNanA
// ORANGE
// BAnana
}
// The following code example demonstrates how to use Concat
// to concatenate two slices.
func ExampleQuery_Concat() {
q := From([]int{1, 2, 3}).
Concat(From([]int{4, 5, 6}))
fmt.Println(q.Results())
// Output:
// [1 2 3 4 5 6]
}
func ExampleQuery_GroupBy() {
input := []int{1, 2, 3, 4, 5, 6, 7, 8, 9}
q := From(input).GroupBy(
func(i interface{}) interface{} { return i.(int) % 2 },
func(i interface{}) interface{} { return i.(int) })
fmt.Println(q.OrderBy(func(i interface{}) interface{} {
return i.(Group).Key
}).Results())
// Output:
// [{0 [2 4 6 8]} {1 [1 3 5 7 9]}]
}
// The following code example demonstrates how to use GroupJoin
// to perform a grouped join on two slices
func ExampleQuery_GroupJoin() {
fruits := []string{
"apple",
"banana",
"apricot",
"cherry",
"clementine",
}
q := FromString("abc").
GroupJoin(From(fruits),
func(i interface{}) interface{} { return i },
func(i interface{}) interface{} { return []rune(i.(string))[0] },
func(outer interface{}, inners []interface{}) interface{} {
return KeyValue{string(outer.(rune)), inners}
},
)
fmt.Println(q.Results())
// Output:
// [{a [apple apricot]} {b [banana]} {c [cherry clementine]}]
}
// The following code example demonstrates how to use IndexOf
// to retrieve the position of an item in the array and then
// update that item.
func ExampleQuery_IndexOf() {
type Item struct {
ID uint64
Name string
}
items := []Item{
{
ID: 1,
Name: "Joe",
},
{
ID: 2,
Name: "Bob",
},
{
ID: 3,
Name: "Rickster",
},
{
ID: 4,
Name: "Jim",
},
}
index := From(items).IndexOf(func(i interface{}) bool {
item, ok := i.(Item)
return ok && item.Name == "Rickster"
})
if index >= 0 {
// We found the item in the array. Change the name using the index.
items[index].Name = "Joshua"
fmt.Println("Item found at:", index, "new name:", items[index].Name)
}
// Output:
// Item found at: 2 new name: Joshua
}
// The following code example demonstrates how to use Join
// to perform an inner join of two slices based on a common key.
func ExampleQuery_Join() {
fruits := []string{
"apple",
"banana",
"apricot",
"cherry",
"clementine",
}
q := Range(1, 10).
Join(From(fruits),
func(i interface{}) interface{} { return i },
func(i interface{}) interface{} { return len(i.(string)) },
func(outer interface{}, inner interface{}) interface{} {
return KeyValue{outer, inner}
},
)
fmt.Println(q.Results())
// Output:
// [{5 apple} {6 banana} {6 cherry} {7 apricot} {10 clementine}]
}
// The following code example demonstrates how to use OrderBy
// to sort the elements of a slice.
func ExampleQuery_OrderBy() {
q := Range(1, 10).
OrderBy(
func(i interface{}) interface{} { return i.(int) % 2 },
).
ThenByDescending(
func(i interface{}) interface{} { return i },
)
fmt.Println(q.Results())
// Output:
// [10 8 6 4 2 9 7 5 3 1]
}
// The following code example demonstrates how to use Prepend
// to include an elements in the first position of a slice.
func ExampleQuery_Prepend() {
input := []int{2, 3, 4, 5}
q := From(input).Prepend(1)
first := q.First()
fmt.Println(first)
// Output:
// 1
}
// The following code example demonstrates how to use Reverse
// to reverse the order of elements in a string.
func ExampleQuery_Reverse() {
input := "apple"
var output []rune
From(input).
Reverse().
ToSlice(&output)
fmt.Println(string(output))
// Output:
// elppa
}
// The following code example demonstrates how to use Select
// to project over a slice of values.
func ExampleQuery_Select() {
squares := []int{}
Range(1, 10).
Select(
func(x interface{}) interface{} { return x.(int) * x.(int) },
).
ToSlice(&squares)
fmt.Println(squares)
// Output:
// [1 4 9 16 25 36 49 64 81 100]
}
func ExampleQuery_SelectMany() {
input := [][]int{{1, 2, 3}, {4, 5, 6, 7}}
q := From(input).
SelectMany(
func(i interface{}) Query { return From(i) },
)
fmt.Println(q.Results())
// Output:
// [1 2 3 4 5 6 7]
}
// The following code example demonstrates how to use Select
// to project over a slice of values and use the index of each element.
func ExampleQuery_SelectIndexed() {
fruits := []string{"apple", "banana", "mango", "orange", "passionfruit", "grape"}
result := []string{}
From(fruits).
SelectIndexed(
func(index int, fruit interface{}) interface{} { return fruit.(string)[:index] },
).
ToSlice(&result)
fmt.Println(result)
// Output:
// [ b ma ora pass grape]
}
// The following code example demonstrates how to use SelectManyByIndexed
// to perform a one-to-many projection over an array and use the index of each outer element.
func ExampleQuery_SelectManyByIndexed() {
type Pet struct {
Name string
}
type Person struct {
Name string
Pets []Pet
}
magnus := Person{
Name: "Hedlund, Magnus",
Pets: []Pet{{Name: "Daisy"}},
}
terry := Person{
Name: "Adams, Terry",
Pets: []Pet{{Name: "Barley"}, {Name: "Boots"}},
}
charlotte := Person{
Name: "Weiss, Charlotte",
Pets: []Pet{{Name: "Whiskers"}},
}
people := []Person{magnus, terry, charlotte}
var results []string
From(people).
SelectManyByIndexed(
func(index int, person interface{}) Query {
return From(person.(Person).Pets).
Select(func(pet interface{}) interface{} {
return fmt.Sprintf("%d - %s", index, pet.(Pet).Name)
})
},
func(indexedPet, person interface{}) interface{} {
return fmt.Sprintf("Pet: %s, Owner: %s", indexedPet, person.(Person).Name)
},
).
ToSlice(&results)
for _, result := range results {
fmt.Println(result)
}
// Output:
// Pet: 0 - Daisy, Owner: Hedlund, Magnus
// Pet: 1 - Barley, Owner: Adams, Terry
// Pet: 1 - Boots, Owner: Adams, Terry
// Pet: 2 - Whiskers, Owner: Weiss, Charlotte
}
// The following code example demonstrates how to use SelectManyIndexed
// to perform a one-to-many projection over an slice of log data and print out their contents.
func ExampleQuery_SelectManyIndexed() {
type LogFile struct {
Name string
Lines []string
}
file1 := LogFile{
Name: "file1.log",
Lines: []string{
"INFO: 2013/11/05 18:11:01 main.go:44: Special Information",
"WARNING: 2013/11/05 18:11:01 main.go:45: There is something you need to know about",
"ERROR: 2013/11/05 18:11:01 main.go:46: Something has failed",
},
}
file2 := LogFile{
Name: "file2.log",
Lines: []string{
"INFO: 2013/11/05 18:11:01 main.go:46: Everything is ok",
},
}
file3 := LogFile{
Name: "file3.log",
Lines: []string{
"2013/11/05 18:42:26 Hello World",
},
}
logFiles := []LogFile{file1, file2, file3}
var results []string
From(logFiles).
SelectManyIndexedT(func(fileIndex int, file LogFile) Query {
return From(file.Lines).
SelectIndexedT(func(lineIndex int, line string) string {
return fmt.Sprintf("File:[%d] - %s => line: %d - %s", fileIndex+1, file.Name, lineIndex+1, line)
})
}).
ToSlice(&results)
for _, result := range results {
fmt.Println(result)
}
// Output:
// File:[1] - file1.log => line: 1 - INFO: 2013/11/05 18:11:01 main.go:44: Special Information
// File:[1] - file1.log => line: 2 - WARNING: 2013/11/05 18:11:01 main.go:45: There is something you need to know about
// File:[1] - file1.log => line: 3 - ERROR: 2013/11/05 18:11:01 main.go:46: Something has failed
// File:[2] - file2.log => line: 1 - INFO: 2013/11/05 18:11:01 main.go:46: Everything is ok
// File:[3] - file3.log => line: 1 - 2013/11/05 18:42:26 Hello World
}
// The following code example demonstrates how to use SelectMany
// to perform a one-to-many projection over a slice
func ExampleQuery_SelectManyBy() {
type Pet struct {
Name string
}
type Person struct {
Name string
Pets []Pet
}
magnus := Person{
Name: "Hedlund, Magnus",
Pets: []Pet{{Name: "Daisy"}},
}
terry := Person{
Name: "Adams, Terry",
Pets: []Pet{{Name: "Barley"}, {Name: "Boots"}},
}
charlotte := Person{
Name: "Weiss, Charlotte",
Pets: []Pet{{Name: "Whiskers"}},
}
people := []Person{magnus, terry, charlotte}
var results []string
From(people).
SelectManyBy(
func(person interface{}) Query { return From(person.(Person).Pets) },
func(pet, person interface{}) interface{} {
return fmt.Sprintf("Owner: %s, Pet: %s", person.(Person).Name, pet.(Pet).Name)
},
).
ToSlice(&results)
for _, result := range results {
fmt.Println(result)
}
// Output:
// Owner: Hedlund, Magnus, Pet: Daisy
// Owner: Adams, Terry, Pet: Barley
// Owner: Adams, Terry, Pet: Boots
// Owner: Weiss, Charlotte, Pet: Whiskers
}
// The following code example demonstrates how to use SequenceEqual
// to determine whether two slices are equal.
func ExampleQuery_SequenceEqual() {
type Pet struct {
Name string
Age int
}
pets1 := []Pet{
{Name: "Barley", Age: 8},
{Name: "Boots", Age: 4},
{Name: "Whiskers", Age: 1},
{Name: "Daisy", Age: 4},
}
pets2 := []Pet{
{Name: "Barley", Age: 8},
{Name: "Boots", Age: 4},
{Name: "Whiskers", Age: 1},
{Name: "Daisy", Age: 4},
}
equal := From(pets1).SequenceEqual(From(pets2))
fmt.Printf("Are the lists equals? %t", equal)
// Output:
// Are the lists equals? true
}
// The following code example demonstrates how to use Single
// to select the only element of a slice.
func ExampleQuery_Single() {
fruits1 := []string{"orange"}
fruit1 := From(fruits1).Single()
fmt.Println(fruit1)
// Output:
// orange
}
// The following code example demonstrates how to use SingleWith
// to select the only element of a slice that satisfies a condition.
func ExampleQuery_SingleWith() {
fruits := []string{"apple", "banana", "mango", "orange", "passionfruit", "grape"}
fruit := From(fruits).
SingleWith(
func(f interface{}) bool { return len(f.(string)) > 10 },
)
fmt.Println(fruit)
// Output:
// passionfruit
}
// The following code example demonstrates how to use Skip
// to skip a specified number of elements in a sorted array
// and return the remaining elements.
func ExampleQuery_Skip() {
grades := []int{59, 82, 70, 56, 92, 98, 85}
var lowerGrades []int
From(grades).
OrderByDescending(
func(g interface{}) interface{} { return g },
).
Skip(3).
ToSlice(&lowerGrades)
//All grades except the top three are:
fmt.Println(lowerGrades)
// Output:
// [82 70 59 56]
}
// The following code example demonstrates how to use SkipWhile
// to skip elements of an array as long as a condition is true.
func ExampleQuery_SkipWhile() {
grades := []int{59, 82, 70, 56, 92, 98, 85}
var lowerGrades []int
From(grades).
OrderByDescending(
func(g interface{}) interface{} { return g },
).
SkipWhile(
func(g interface{}) bool { return g.(int) >= 80 },
).
ToSlice(&lowerGrades)
// All grades below 80:
fmt.Println(lowerGrades)
// Output:
// [70 59 56]
}
// The following code example demonstrates how to use SkipWhileIndexed
// to skip elements of an array as long as a condition that depends
// on the element's index is true.
func ExampleQuery_SkipWhileIndexed() {
amounts := []int{5000, 2500, 9000, 8000, 6500, 4000, 1500, 5500}
var query []int
From(amounts).
SkipWhileIndexed(
func(index int, amount interface{}) bool { return amount.(int) > index*1000 },
).
ToSlice(&query)
fmt.Println(query)
// Output:
// [4000 1500 5500]
}
// The following code example demonstrates how to use Sort
// to order elements of an slice.
func ExampleQuery_Sort() {
amounts := []int{5000, 2500, 9000, 8000, 6500, 4000, 1500, 5500}
var query []int
From(amounts).
Sort(
func(i interface{}, j interface{}) bool { return i.(int) < j.(int) },
).
ToSlice(&query)
fmt.Println(query)
// Output:
// [1500 2500 4000 5000 5500 6500 8000 9000]
}
// The following code example demonstrates how to use SumFloats
// to sum the values of a slice.
func ExampleQuery_SumFloats() {
numbers := []float64{43.68, 1.25, 583.7, 6.5}
sum := From(numbers).SumFloats()
fmt.Printf("The sum of the numbers is %f.", sum)
// Output:
// The sum of the numbers is 635.130000.
}
// The following code example demonstrates how to use SumInts
// to sum the values of a slice.
func ExampleQuery_SumInts() {
numbers := []int{43, 1, 583, 6}
sum := From(numbers).SumInts()
fmt.Printf("The sum of the numbers is %d.", sum)
// Output:
// The sum of the numbers is 633.
}
// The following code example demonstrates how to use SumUInts
// to sum the values of a slice.
func ExampleQuery_SumUInts() {
numbers := []uint{43, 1, 583, 6}
sum := From(numbers).SumUInts()
fmt.Printf("The sum of the numbers is %d.", sum)
// Output:
// The sum of the numbers is 633.
}
// The following code example demonstrates how to use Take
// to return elements from the start of a slice.
func ExampleQuery_Take() {
grades := []int{59, 82, 70, 56, 92, 98, 85}
var topThreeGrades []int
From(grades).
OrderByDescending(
func(grade interface{}) interface{} { return grade },
).
Take(3).
ToSlice(&topThreeGrades)
fmt.Printf("The top three grades are: %v", topThreeGrades)
// Output:
// The top three grades are: [98 92 85]
}
// The following code example demonstrates how to use TakeWhile
// to return elements from the start of a slice.
func ExampleQuery_TakeWhile() {
fruits := []string{"apple", "banana", "mango", "orange", "passionfruit", "grape"}
var query []string
From(fruits).
TakeWhile(
func(fruit interface{}) bool { return fruit.(string) != "orange" },
).
ToSlice(&query)
fmt.Println(query)
// Output:
// [apple banana mango]
}
// The following code example demonstrates how to use TakeWhileIndexed
// to return elements from the start of a slice as long as
// a condition that uses the element's index is true.
func ExampleQuery_TakeWhileIndexed() {
fruits := []string{"apple", "passionfruit", "banana", "mango",
"orange", "blueberry", "grape", "strawberry"}
var query []string
From(fruits).
TakeWhileIndexed(
func(index int, fruit interface{}) bool { return len(fruit.(string)) >= index },
).
ToSlice(&query)
fmt.Println(query)
// Output:
// [apple passionfruit banana mango orange blueberry]
}
// The following code example demonstrates how to use ToChannel
// to send a slice to a channel.
func ExampleQuery_ToChannel() {
c := make(chan interface{})
go func() {
Repeat(10, 3).ToChannel(c)
}()
for i := range c {
fmt.Println(i)
}
// Output:
// 10
// 10
// 10
}
// The following code example demonstrates how to use ToChannelT
// to send a slice to a typed channel.
func ExampleQuery_ToChannelT() {
c := make(chan string)
go Repeat("ten", 3).ToChannelT(c)
for i := range c {
fmt.Println(i)
}
// Output:
// ten
// ten
// ten
}
// The following code example demonstrates how to use ToMap to populate a map.
func ExampleQuery_ToMap() {
type Product struct {
Name string
Code int
}
products := []Product{
{Name: "orange", Code: 4},
{Name: "apple", Code: 9},
{Name: "lemon", Code: 12},
{Name: "apple", Code: 9},
}
map1 := map[int]string{}
From(products).
SelectT(
func(item Product) KeyValue { return KeyValue{Key: item.Code, Value: item.Name} },
).
ToMap(&map1)
fmt.Println(map1[4])
fmt.Println(map1[9])
fmt.Println(map1[12])
// Output:
// orange
// apple
// lemon
}
// The following code example demonstrates how to use ToMapBy
// by using a key and value selectors to populate a map.
func ExampleQuery_ToMapBy() {
input := [][]interface{}{{1, true}}
result := make(map[int]bool)
From(input).
ToMapBy(&result,
func(i interface{}) interface{} {
return i.([]interface{})[0]
},
func(i interface{}) interface{} {
return i.([]interface{})[1]
},
)
fmt.Println(result)
// Output:
// map[1:true]
}
// The following code example demonstrates how to use ToSlice to populate a slice.
func ExampleQuery_ToSlice() {
var result []int
Range(1, 10).ToSlice(&result)
fmt.Println(result)
// Output:
// [1 2 3 4 5 6 7 8 9 10]
}
// The following code example demonstrates how to use Union
// to obtain the union of two slices of integers.
func ExampleQuery_Union() {
q := Range(1, 10).Union(Range(6, 10))
fmt.Println(q.Results())
// Output:
// [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15]
}
// The following code example demonstrates how to use Where
// to filter a slices.
func ExampleQuery_Where() {
fruits := []string{"apple", "passionfruit", "banana", "mango",
"orange", "blueberry", "grape", "strawberry"}
var query []string
From(fruits).
Where(
func(f interface{}) bool { return len(f.(string)) > 6 },
).
ToSlice(&query)
fmt.Println(query)
// Output:
// [passionfruit blueberry strawberry]
}
// The following code example demonstrates how to use WhereIndexed
// to filter a slice based on a predicate that involves the index of each element.
func ExampleQuery_WhereIndexed() {
numbers := []int{0, 30, 20, 15, 90, 85, 40, 75}
var query []int
From(numbers).
WhereIndexed(
func(index int, number interface{}) bool { return number.(int) <= index*10 },
).
ToSlice(&query)
fmt.Println(query)
// Output:
// [0 20 15 40]
}
// The following code example demonstrates how to use the Zip
// method to merge two slices.
func ExampleQuery_Zip() {
number := []int{1, 2, 3, 4, 5}
words := []string{"one", "two", "three"}
q := From(number).
Zip(From(words),
func(a interface{}, b interface{}) interface{} { return []interface{}{a, b} },
)
fmt.Println(q.Results())
// Output:
// [[1 one] [2 two] [3 three]]
}
// The following code example demonstrates how to use ThenByDescendingT to perform
// a order in a slice of dates by year, and then by month descending.
func ExampleOrderedQuery_ThenByDescendingT() {
dates := []time.Time{
time.Date(2015, 3, 23, 0, 0, 0, 0, time.Local),
time.Date(2014, 7, 11, 0, 0, 0, 0, time.Local),
time.Date(2013, 5, 4, 0, 0, 0, 0, time.Local),
time.Date(2015, 1, 2, 0, 0, 0, 0, time.Local),
time.Date(2015, 7, 10, 0, 0, 0, 0, time.Local),
}
var orderedDates []time.Time
From(dates).
OrderByT(
func(date time.Time) int {
return date.Year()
}).
ThenByDescendingT(
func(date time.Time) int { return int(date.Month()) },
).
ToSlice(&orderedDates)
for _, date := range orderedDates {
fmt.Println(date.Format("2006-Jan-02"))
}
// Output:
// 2013-May-04
// 2014-Jul-11
// 2015-Jul-10
// 2015-Mar-23
// 2015-Jan-02
}
// The following code example demonstrates how to use ThenByT to perform
// a orders in a slice of dates by year, and then by day.
func ExampleOrderedQuery_ThenByT() {
dates := []time.Time{
time.Date(2015, 3, 23, 0, 0, 0, 0, time.Local),
time.Date(2014, 7, 11, 0, 0, 0, 0, time.Local),
time.Date(2013, 5, 4, 0, 0, 0, 0, time.Local),
time.Date(2015, 1, 2, 0, 0, 0, 0, time.Local),
time.Date(2015, 7, 10, 0, 0, 0, 0, time.Local),
}
var orderedDates []time.Time
From(dates).
OrderByT(
func(date time.Time) int { return date.Year() },
).
ThenByT(
func(date time.Time) int { return int(date.Day()) },
).
ToSlice(&orderedDates)
for _, date := range orderedDates {
fmt.Println(date.Format("2006-Jan-02"))
}
// Output:
// 2013-May-04
// 2014-Jul-11
// 2015-Jan-02
// 2015-Jul-10
// 2015-Mar-23
}
// The following code example demonstrates how to reverse
// the order of words in a string using AggregateT.
func ExampleQuery_AggregateT() {
sentence := "the quick brown fox jumps over the lazy dog"
// Split the string into individual words.
words := strings.Split(sentence, " ")
// Prepend each word to the beginning of the
// new sentence to reverse the word order.
reversed := From(words).AggregateT(
func(workingSentence string, next string) string { return next + " " + workingSentence },
)
fmt.Println(reversed)
// Output:
// dog lazy the over jumps fox brown quick the
}
// The following code example demonstrates how to use AggregateWithSeed function
func ExampleQuery_AggregateWithSeedT() {
fruits := []string{"apple", "mango", "orange", "passionfruit", "grape"}
// Determine whether any string in the array is longer than "banana".
longestName := From(fruits).
AggregateWithSeedT("banana",
func(longest, next string) string {
if len(next) > len(longest) {
return next
}
return longest
},
)
fmt.Printf("The fruit with the longest name is %s.", longestName)
// Output:
// The fruit with the longest name is passionfruit.
}
// The following code example demonstrates how to use AggregateWithSeedByT function
func ExampleQuery_AggregateWithSeedByT() {
input := []string{"apple", "mango", "orange", "passionfruit", "grape"}
// Determine whether any string in the array is longer than "banana".
longestName := From(input).AggregateWithSeedByT("banana",
func(longest string, next string) string {
if len(longest) > len(next) {
return longest
}
return next
},
// Return the final result
func(result string) string {
return fmt.Sprintf("The fruit with the longest name is %s.", result)
},
)
fmt.Println(longestName)
// Output:
// The fruit with the longest name is passionfruit.
}
// The following code example demonstrates how to use AllT
// to get the students having all marks greater than 70.
func ExampleQuery_AllT() {
type Student struct {
Name string
Marks []int
}
students := []Student{
{Name: "Hugo", Marks: []int{91, 88, 76, 93}},
{Name: "Rick", Marks: []int{70, 73, 66, 90}},
{Name: "Michael", Marks: []int{73, 80, 75, 88}},
{Name: "Fadi", Marks: []int{82, 75, 66, 84}},
{Name: "Peter", Marks: []int{67, 78, 70, 82}},
}
var approvedStudents []Student
From(students).
WhereT(
func(student Student) bool {
return From(student.Marks).
AllT(
func(mark int) bool { return mark > 70 },
)
},
).
ToSlice(&approvedStudents)
//List of approved students
for _, student := range approvedStudents {
fmt.Println(student.Name)
}
// Output:
// Hugo
// Michael
}
// The following code example demonstrates how to use AnyWithT
// to get the students with any mark lower than 70.
func ExampleQuery_AnyWithT() {
type Student struct {
Name string
Marks []int
}
students := []Student{
{Name: "Hugo", Marks: []int{91, 88, 76, 93}},
{Name: "Rick", Marks: []int{70, 73, 66, 90}},
{Name: "Michael", Marks: []int{73, 80, 75, 88}},
{Name: "Fadi", Marks: []int{82, 75, 66, 84}},
{Name: "Peter", Marks: []int{67, 78, 70, 82}},
}
var studentsWithAnyMarkLt70 []Student
From(students).
WhereT(
func(student Student) bool {
return From(student.Marks).
AnyWithT(
func(mark int) bool { return mark < 70 },
)
},
).
ToSlice(&studentsWithAnyMarkLt70)
//List of students with any mark lower than 70
for _, student := range studentsWithAnyMarkLt70 {
fmt.Println(student.Name)
}
// Output:
// Rick
// Fadi
// Peter
}
// The following code example demonstrates how to use CountWithT
// to count the elements in an slice that satisfy a condition.
func ExampleQuery_CountWithT() {
type Pet struct {
Name string
Vaccinated bool
}
pets := []Pet{
{Name: "Barley", Vaccinated: true},
{Name: "Boots", Vaccinated: false},
{Name: "Whiskers", Vaccinated: false},
}
numberUnvaccinated := From(pets).
CountWithT(
func(p Pet) bool { return p.Vaccinated == false },
)
fmt.Printf("There are %d unvaccinated animals.", numberUnvaccinated)
//Output:
//There are 2 unvaccinated animals.
}
// The following code example demonstrates how to use DistinctByT
// to return distinct elements from a slice of structs.
func ExampleQuery_DistinctByT() {
type Product struct {
Name string
Code int
}
products := []Product{
{Name: "apple", Code: 9},
{Name: "orange", Code: 4},
{Name: "apple", Code: 9},
{Name: "lemon", Code: 12},
}
//Exclude duplicates.
var noduplicates []Product
From(products).
DistinctByT(
func(item Product) int { return item.Code },
).
ToSlice(&noduplicates)
for _, product := range noduplicates {
fmt.Printf("%s %d\n", product.Name, product.Code)
}
// Output:
// apple 9
// orange 4
// lemon 12
}
// The following code example demonstrates how to use ExceptByT
func ExampleQuery_ExceptByT() {
type Product struct {
Name string
Code int
}
fruits1 := []Product{
{Name: "orange", Code: 4},
{Name: "apple", Code: 9},
{Name: "lemon", Code: 12},
{Name: "apple", Code: 9},
}
fruits2 := []Product{
{Name: "apple", Code: 9},
}
//Order and exclude duplicates.
var except []Product
From(fruits1).
ExceptByT(From(fruits2),
func(item Product) int { return item.Code },
).
ToSlice(&except)
for _, product := range except {
fmt.Printf("%s %d\n", product.Name, product.Code)
}
// Output:
// orange 4
// lemon 12
}
// The following code example demonstrates how to use FirstWithT
// to return the first element of an array that satisfies a condition.
func ExampleQuery_FirstWithT() {
numbers := []int{9, 34, 65, 92, 87, 435, 3, 54, 83, 23, 87, 435, 67, 12, 19}
first := From(numbers).
FirstWithT(
func(item int) bool { return item > 80 },
)
fmt.Println(first)
// Output:
// 92
}
// The following code example demonstrates how to use ForEach
// to output all elements of an array.
func ExampleQuery_ForEach() {
fruits := []string{"orange", "apple", "lemon", "apple"}
From(fruits).ForEach(func(fruit interface{}) {
fmt.Println(fruit)
})
// Output:
// orange
// apple
// lemon
// apple
}
// The following code example demonstrates how to use ForEachIndexed
// to output all elements of an array with its index.
func ExampleQuery_ForEachIndexed() {
fruits := []string{"orange", "apple", "lemon", "apple"}
From(fruits).ForEachIndexed(func(i int, fruit interface{}) {
fmt.Printf("%d.%s\n", i, fruit)
})
// Output:
// 0.orange
// 1.apple
// 2.lemon
// 3.apple
}
// The following code example demonstrates how to use ForEachT
// to output all elements of an array.
func ExampleQuery_ForEachT() {
fruits := []string{"orange", "apple", "lemon", "apple"}
From(fruits).ForEachT(func(fruit string) {
fmt.Println(fruit)
})
// Output:
// orange
// apple
// lemon
// apple
}
// The following code example demonstrates how to use ForEachIndexedT
// to output all elements of an array with its index.
func ExampleQuery_ForEachIndexedT() {
fruits := []string{"orange", "apple", "lemon", "apple"}
From(fruits).ForEachIndexedT(func(i int, fruit string) {
fmt.Printf("%d.%s\n", i, fruit)
})
// Output:
// 0.orange
// 1.apple
// 2.lemon
// 3.apple
}
// The following code example demonstrates how to use GroupByT
// to group the elements of a slice.
func ExampleQuery_GroupByT() {
type Pet struct {
Name string
Age int
}
// Create a list of pets.
pets := []Pet{
{Name: "Barley", Age: 8},
{Name: "Boots", Age: 4},
{Name: "Whiskers", Age: 1},
{Name: "Daisy", Age: 4},
}
// Group the pets using Age as the key value
// and selecting only the pet's Name for each value.
var query []Group
From(pets).GroupByT(
func(p Pet) int { return p.Age },
func(p Pet) string { return p.Name },
).OrderByT(
func(g Group) int { return g.Key.(int) },
).ToSlice(&query)
for _, petGroup := range query {
fmt.Printf("%d\n", petGroup.Key)
for _, petName := range petGroup.Group {
fmt.Printf(" %s\n", petName)
}
}
// Output:
// 1
// Whiskers
// 4
// Boots
// Daisy
// 8
// Barley
}
// The following code example demonstrates how to use GroupJoinT
// to perform a grouped join on two slices.
func ExampleQuery_GroupJoinT() {
type Person struct {
Name string
}
type Pet struct {
Name string
Owner Person
}
magnus := Person{Name: "Hedlund, Magnus"}
terry := Person{Name: "Adams, Terry"}
charlotte := Person{Name: "Weiss, Charlotte"}
barley := Pet{Name: "Barley", Owner: terry}
boots := Pet{Name: "Boots", Owner: terry}
whiskers := Pet{Name: "Whiskers", Owner: charlotte}
daisy := Pet{Name: "Daisy", Owner: magnus}
people := []Person{magnus, terry, charlotte}
pets := []Pet{barley, boots, whiskers, daisy}
// Create a slice where each element is a KeyValue
// that contains a person's name as the key and a slice of strings
// of names of the pets they own as a value.
q := []KeyValue{}
From(people).
GroupJoinT(From(pets),
func(p Person) Person { return p },
func(p Pet) Person { return p.Owner },
func(person Person, pets []Pet) KeyValue {
var petNames []string
From(pets).
SelectT(
func(pet Pet) string { return pet.Name },
).
ToSlice(&petNames)
return KeyValue{person.Name, petNames}
},
).ToSlice(&q)
for _, obj := range q {
// Output the owner's name.
fmt.Printf("%s:\n", obj.Key)
// Output each of the owner's pet's names.
for _, petName := range obj.Value.([]string) {
fmt.Printf(" %s\n", petName)
}
}
// Output:
// Hedlund, Magnus:
// Daisy
// Adams, Terry:
// Barley
// Boots
// Weiss, Charlotte:
// Whiskers
}
// The following code example demonstrates how to use IntersectByT
// to return the elements that appear in each of two slices of products
// with same Code.
func ExampleQuery_IntersectByT() {
type Product struct {
Name string
Code int
}
store1 := []Product{
{Name: "orange", Code: 4},
{Name: "apple", Code: 9},
}
store2 := []Product{
{Name: "lemon", Code: 12},
{Name: "apple", Code: 9},
}
var duplicates []Product
From(store1).
IntersectByT(From(store2),
func(p Product) int { return p.Code },
).
ToSlice(&duplicates)
for _, p := range duplicates {
fmt.Println(p.Name, "", p.Code)
}
// Output:
// apple 9
}
// The following code example demonstrates how to use JoinT
// to perform an inner join of two slices based on a common key.
func ExampleQuery_JoinT() {
type Person struct {
Name string
}
type Pet struct {
Name string
Owner Person
}
magnus := Person{Name: "Hedlund, Magnus"}
terry := Person{Name: "Adams, Terry"}
charlotte := Person{Name: "Weiss, Charlotte"}
barley := Pet{Name: "Barley", Owner: terry}
boots := Pet{Name: "Boots", Owner: terry}
whiskers := Pet{Name: "Whiskers", Owner: charlotte}
daisy := Pet{Name: "Daisy", Owner: magnus}
people := []Person{magnus, terry, charlotte}
pets := []Pet{barley, boots, whiskers, daisy}
// Create a list of Person-Pet pairs where
// each element is an anonymous type that contains a
// Pet's name and the name of the Person that owns the Pet.
query := []string{}
From(people).
JoinT(From(pets),
func(person Person) Person { return person },
func(pet Pet) Person { return pet.Owner },
func(person Person, pet Pet) string { return fmt.Sprintf("%s - %s", person.Name, pet.Name) },
).ToSlice(&query)
for _, line := range query {
fmt.Println(line)
}
//Output:
//Hedlund, Magnus - Daisy
//Adams, Terry - Barley
//Adams, Terry - Boots
//Weiss, Charlotte - Whiskers
}
// The following code example demonstrates how to use LastWithT
// to return the last element of an array.
func ExampleQuery_LastWithT() {
numbers := []int{9, 34, 65, 92, 87, 435, 3, 54,
83, 23, 87, 67, 12, 19}
last := From(numbers).
LastWithT(
func(n int) bool { return n > 80 },
)
fmt.Println(last)
//Output:
//87
}
// The following code example demonstrates how to use OrderByDescendingT
// to order an slice.
func ExampleQuery_OrderByDescendingT() {
type Player struct {
Name string
Points int64
}
players := []Player{
{Name: "Hugo", Points: 4757},
{Name: "Rick", Points: 7365},
{Name: "Michael", Points: 2857},
{Name: "Fadi", Points: 85897},
{Name: "Peter", Points: 48576},
}
//Order and get the top 3 players
var top3Players []KeyValue
From(players).
OrderByDescendingT(
func(p Player) int64 { return p.Points },
).
Take(3).
SelectIndexedT(
func(i int, p Player) KeyValue { return KeyValue{Key: i + 1, Value: p} },
).
ToSlice(&top3Players)
for _, rank := range top3Players {
fmt.Printf(
"Rank: #%d - Player: %s - Points: %d\n",
rank.Key,
rank.Value.(Player).Name,
rank.Value.(Player).Points,
)
}
// Output:
// Rank: #1 - Player: Fadi - Points: 85897
// Rank: #2 - Player: Peter - Points: 48576
// Rank: #3 - Player: Rick - Points: 7365
}
// The following code example demonstrates how to use OrderByT
// to sort the elements of a slice.
func ExampleQuery_OrderByT() {
type Pet struct {
Name string
Age int
}
// Create a list of pets.
pets := []Pet{
{Name: "Barley", Age: 8},
{Name: "Boots", Age: 4},
{Name: "Whiskers", Age: 1},
{Name: "Daisy", Age: 4},
}
var orderedPets []Pet
From(pets).
OrderByT(
func(pet Pet) int { return pet.Age },
).
ToSlice(&orderedPets)
for _, pet := range orderedPets {
fmt.Println(pet.Name, "-", pet.Age)
}
// Output:
// Whiskers - 1
// Boots - 4
// Daisy - 4
// Barley - 8
}
// The following code example demonstrates how to use SelectT
// to project over a slice.
func ExampleQuery_SelectT() {
squares := []int{}
Range(1, 10).
SelectT(
func(x int) int { return x * x },
).
ToSlice(&squares)
fmt.Println(squares)
// Output:
// [1 4 9 16 25 36 49 64 81 100]
}
// The following code example demonstrates how to use SelectIndexedT
// to determine if the value in a slice of int match their position
// in the slice.
func ExampleQuery_SelectIndexedT() {
numbers := []int{5, 4, 1, 3, 9, 8, 6, 7, 2, 0}
var numsInPlace []KeyValue
From(numbers).
SelectIndexedT(
func(index, num int) KeyValue { return KeyValue{Key: num, Value: (num == index)} },
).
ToSlice(&numsInPlace)
fmt.Println("Number: In-place?")
for _, n := range numsInPlace {
fmt.Printf("%d: %t\n", n.Key, n.Value)
}
// Output:
// Number: In-place?
// 5: false
// 4: false
// 1: false
// 3: true
// 9: false
// 8: false
// 6: true
// 7: true
// 2: false
// 0: false
}
// The following code example demonstrates how to use SelectManyT
// to perform a one-to-many projection over a slice
func ExampleQuery_SelectManyByT() {
type Pet struct {
Name string
}
type Person struct {
Name string
Pets []Pet
}
magnus := Person{
Name: "Hedlund, Magnus",
Pets: []Pet{{Name: "Daisy"}},
}
terry := Person{
Name: "Adams, Terry",
Pets: []Pet{{Name: "Barley"}, {Name: "Boots"}},
}
charlotte := Person{
Name: "Weiss, Charlotte",
Pets: []Pet{{Name: "Whiskers"}},
}
people := []Person{magnus, terry, charlotte}
var results []string
From(people).
SelectManyByT(
func(person Person) Query { return From(person.Pets) },
func(pet Pet, person Person) interface{} {
return fmt.Sprintf("Owner: %s, Pet: %s", person.Name, pet.Name)
},
).
ToSlice(&results)
for _, result := range results {
fmt.Println(result)
}
// Output:
// Owner: Hedlund, Magnus, Pet: Daisy
// Owner: Adams, Terry, Pet: Barley
// Owner: Adams, Terry, Pet: Boots
// Owner: Weiss, Charlotte, Pet: Whiskers
}
// The following code example demonstrates how to use SelectManyT
// to perform a projection over a list of sentences and rank the
// top 5 most used words
func ExampleQuery_SelectManyT() {
sentences := []string{
"the quick brown fox jumps over the lazy dog",
"pack my box with five dozen liquor jugs",
"several fabulous dixieland jazz groups played with quick tempo",
"back in my quaint garden jaunty zinnias vie with flaunting phlox",
"five or six big jet planes zoomed quickly by the new tower",
"I quickly explained that many big jobs involve few hazards",
"The wizard quickly jinxed the gnomes before they vaporized",
}
var results []string
From(sentences).
//Split the sentences in words
SelectManyT(func(sentence string) Query {
return From(strings.Split(sentence, " "))
}).
//Grouping by word
GroupByT(
func(word string) string { return word },
func(word string) string { return word },
).
//Ordering by word counts
OrderByDescendingT(func(wordGroup Group) int {
return len(wordGroup.Group)
}).
//Then order by word
ThenByT(func(wordGroup Group) string {
return wordGroup.Key.(string)
}).
//Take the top 5
Take(5).
//Project the words using the index as rank
SelectIndexedT(func(index int, wordGroup Group) string {
return fmt.Sprintf("Rank: #%d, Word: %s, Counts: %d", index+1, wordGroup.Key, len(wordGroup.Group))
}).
ToSlice(&results)
for _, result := range results {
fmt.Println(result)
}
// Output:
// Rank: #1, Word: the, Counts: 4
// Rank: #2, Word: quickly, Counts: 3
// Rank: #3, Word: with, Counts: 3
// Rank: #4, Word: big, Counts: 2
// Rank: #5, Word: five, Counts: 2
}
// The following code example demonstrates how to use SelectManyIndexedT
// to perform a one-to-many projection over an slice of log files and
// print out their contents.
func ExampleQuery_SelectManyIndexedT() {
type LogFile struct {
Name string
Lines []string
}
file1 := LogFile{
Name: "file1.log",
Lines: []string{
"INFO: 2013/11/05 18:11:01 main.go:44: Special Information",
"WARNING: 2013/11/05 18:11:01 main.go:45: There is something you need to know about",
"ERROR: 2013/11/05 18:11:01 main.go:46: Something has failed",
},
}
file2 := LogFile{
Name: "file2.log",
Lines: []string{
"INFO: 2013/11/05 18:11:01 main.go:46: Everything is ok",
},
}
file3 := LogFile{
Name: "file3.log",
Lines: []string{
"2013/11/05 18:42:26 Hello World",
},
}
logFiles := []LogFile{file1, file2, file3}
var results []string
From(logFiles).
SelectManyIndexedT(func(fileIndex int, file LogFile) Query {
return From(file.Lines).
SelectIndexedT(func(lineIndex int, line string) string {
return fmt.Sprintf("File:[%d] - %s => line: %d - %s", fileIndex+1, file.Name, lineIndex+1, line)
})
}).
ToSlice(&results)
for _, result := range results {
fmt.Println(result)
}
// Output:
// File:[1] - file1.log => line: 1 - INFO: 2013/11/05 18:11:01 main.go:44: Special Information
// File:[1] - file1.log => line: 2 - WARNING: 2013/11/05 18:11:01 main.go:45: There is something you need to know about
// File:[1] - file1.log => line: 3 - ERROR: 2013/11/05 18:11:01 main.go:46: Something has failed
// File:[2] - file2.log => line: 1 - INFO: 2013/11/05 18:11:01 main.go:46: Everything is ok
// File:[3] - file3.log => line: 1 - 2013/11/05 18:42:26 Hello World
}
// The following code example demonstrates how to use SelectManyByIndexedT
// to perform a one-to-many projection over an array and use the index of
// each outer element.
func ExampleQuery_SelectManyByIndexedT() {
type Pet struct {
Name string
}
type Person struct {
Name string
Pets []Pet
}
magnus := Person{
Name: "Hedlund, Magnus",
Pets: []Pet{{Name: "Daisy"}},
}
terry := Person{
Name: "Adams, Terry",
Pets: []Pet{{Name: "Barley"}, {Name: "Boots"}},
}
charlotte := Person{
Name: "Weiss, Charlotte",
Pets: []Pet{{Name: "Whiskers"}},
}
people := []Person{magnus, terry, charlotte}
var results []string
From(people).
SelectManyByIndexedT(
func(index int, person Person) Query {
return From(person.Pets).
SelectT(func(pet Pet) string {
return fmt.Sprintf("%d - %s", index, pet.Name)
})
},
func(indexedPet string, person Person) string {
return fmt.Sprintf("Pet: %s, Owner: %s", indexedPet, person.Name)
},
).
ToSlice(&results)
for _, result := range results {
fmt.Println(result)
}
// Output:
// Pet: 0 - Daisy, Owner: Hedlund, Magnus
// Pet: 1 - Barley, Owner: Adams, Terry
// Pet: 1 - Boots, Owner: Adams, Terry
// Pet: 2 - Whiskers, Owner: Weiss, Charlotte
}
//The following code example demonstrates how to use SingleWithT
// to select the only element of a slice that satisfies a condition.
func ExampleQuery_SingleWithT() {
fruits := []string{"apple", "banana", "mango", "orange", "passionfruit", "grape"}
fruit := From(fruits).
SingleWithT(
func(f string) bool { return len(f) > 10 },
)
fmt.Println(fruit)
// Output:
// passionfruit
}
// The following code example demonstrates how to use SkipWhileT
// to skip elements of an array as long as a condition is true.
func ExampleQuery_SkipWhileT() {
grades := []int{59, 82, 70, 56, 92, 98, 85}
var lowerGrades []int
From(grades).
OrderByDescendingT(
func(g int) int { return g },
).
SkipWhileT(
func(g int) bool { return g >= 80 },
).
ToSlice(&lowerGrades)
//"All grades below 80:
fmt.Println(lowerGrades)
// Output:
// [70 59 56]
}
// The following code example demonstrates how to use SkipWhileIndexedT
// to skip elements of an array as long as a condition that depends
// on the element's index is true.
func ExampleQuery_SkipWhileIndexedT() {
amounts := []int{5000, 2500, 9000, 8000, 6500, 4000, 1500, 5500}
var query []int
From(amounts).
SkipWhileIndexedT(
func(index int, amount int) bool { return amount > index*1000 },
).
ToSlice(&query)
fmt.Println(query)
// Output:
// [4000 1500 5500]
}
// The following code example demonstrates how to use SortT
// to order elements of an slice.
func ExampleQuery_SortT() {
type Pet struct {
Name string
Age int
}
// Create a list of pets.
pets := []Pet{
{Name: "Barley", Age: 8},
{Name: "Boots", Age: 4},
{Name: "Whiskers", Age: 1},
{Name: "Daisy", Age: 4},
}
orderedPets := []Pet{}
From(pets).
SortT(
func(pet1 Pet, pet2 Pet) bool { return pet1.Age < pet2.Age },
).
ToSlice(&orderedPets)
for _, pet := range orderedPets {
fmt.Println(pet.Name, "-", pet.Age)
}
// Output:
// Whiskers - 1
// Boots - 4
// Daisy - 4
// Barley - 8
}
// The following code example demonstrates how to use TakeWhileT
// to return elements from the start of a slice.
func ExampleQuery_TakeWhileT() {
fruits := []string{"apple", "banana", "mango", "orange", "passionfruit", "grape"}
var query []string
From(fruits).
TakeWhileT(
func(fruit string) bool { return fruit != "orange" },
).
ToSlice(&query)
fmt.Println(query)
// Output:
// [apple banana mango]
}
// The following code example demonstrates how to use TakeWhileIndexedT
// to return elements from the start of a slice as long asa condition
// that uses the element's index is true.
func ExampleQuery_TakeWhileIndexedT() {
fruits := []string{"apple", "passionfruit", "banana", "mango",
"orange", "blueberry", "grape", "strawberry"}
var query []string
From(fruits).
TakeWhileIndexedT(
func(index int, fruit string) bool { return len(fruit) >= index },
).
ToSlice(&query)
fmt.Println(query)
// Output:
// [apple passionfruit banana mango orange blueberry]
}
// The following code example demonstrates how to use ToMapBy
// by using a key and value selectors to populate a map.
func ExampleQuery_ToMapByT() {
type Product struct {
Name string
Code int
}
products := []Product{
{Name: "orange", Code: 4},
{Name: "apple", Code: 9},
{Name: "lemon", Code: 12},
{Name: "apple", Code: 9},
}
map1 := map[int]string{}
From(products).
ToMapByT(&map1,
func(item Product) int { return item.Code },
func(item Product) string { return item.Name },
)
fmt.Println(map1[4])
fmt.Println(map1[9])
fmt.Println(map1[12])
// Output:
// orange
// apple
// lemon
}
// The following code example demonstrates how to use WhereT
// to filter a slices.
func ExampleQuery_WhereT() {
fruits := []string{"apple", "passionfruit", "banana", "mango",
"orange", "blueberry", "grape", "strawberry"}
var query []string
From(fruits).
WhereT(
func(f string) bool { return len(f) > 6 },
).
ToSlice(&query)
fmt.Println(query)
// Output:
// [passionfruit blueberry strawberry]
}
// The following code example demonstrates how to use WhereIndexedT
// to filter a slice based on a predicate that involves the index of each element.
func ExampleQuery_WhereIndexedT() {
numbers := []int{0, 30, 20, 15, 90, 85, 40, 75}
var query []int
From(numbers).
WhereIndexedT(
func(index int, number int) bool { return number <= index*10 },
).
ToSlice(&query)
fmt.Println(query)
// Output:
// [0 20 15 40]
}
// The following code example demonstrates how to use the ZipT
// method to merge two slices.
func ExampleQuery_ZipT() {
number := []int{1, 2, 3, 4, 5}
words := []string{"one", "two", "three"}
q := From(number).
ZipT(From(words),
func(a int, b string) []interface{} { return []interface{}{a, b} },
)
fmt.Println(q.Results())
// Output:
// [[1 one] [2 two] [3 three]]
}
// The following code example demonstrates how to use the FromChannelT
// to make a Query from typed channel.
func ExampleFromChannelT() {
ch := make(chan string, 3)
ch <- "one"
ch <- "two"
ch <- "three"
close(ch)
q := FromChannelT(ch)
fmt.Println(q.Results())
// Output:
// [one two three]
}
type TestModel struct {
Name string
Age int
Birth time.Time
Books []TestBook
}
type TestBook struct {
BookName string
BookPrice float64
}
func mockTestModelData() []*TestModel {
mockNames := []string{"niko", "mark", "shelly", "jack", "roman", "alisa", "alisa"}
mockData := make([]*TestModel, 0, len(mockNames))
for _, name := range mockNames {
model := &TestModel{}
model.Name = name
model.Age = mockAge()
model.Birth = time.Now().AddDate(model.Age*-1, 0, 0)
books := make([]TestBook, 0, 2)
books = append(books, TestBook{
BookName: "代码的整洁之道",
BookPrice: 90,
})
books = append(books, TestBook{
BookName: "程序员的自我修养",
BookPrice: 90,
})
model.Books = books
mockData = append(mockData, model)
}
//for i := 0; i < 500; i++ {
// for _, name := range mockNames {
// model := &TestModel{}
// model.Name = name
// model.Age = mockAge()
// model.Birth = time.Now().AddDate(model.Age*-1, 0, 0)
//
// books := make([]TestBook, 0, 2)
//
// books = append(books, TestBook{
// BookName: "代码的整洁之道",
// BookPrice: 90,
// })
//
// books = append(books, TestBook{
// BookName: "程序员的自我修养",
// BookPrice: 90,
// })
//
// model.Books = books
//
// mockData = append(mockData, model)
// }
//}
return mockData
}
func mockAge() int {
rand.Seed(time.Now().UnixNano())
randNum := rand.Intn(50)
return randNum
}
var data = mockTestModelData()
func TestWhereAndSelect(t *testing.T) {
// 筛选name为mark的获取books的第一条
result := From(data).Where(func(i interface{}) bool {
return i.(*TestModel).Name == "mark"
}).Select(func(i interface{}) interface{} {
return i.(*TestModel).Books
}).First()
m := result.([]TestBook)
for _, s := range m {
t.Log(s.BookName)
t.Log(s.BookPrice)
}
}
| ExampleQuery |
sanitycheck_test.go | /*
Copyright IBM Corp. 2017 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sanitycheck
import (
"testing"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric/bccsp/factory"
"github.com/hyperledger/fabric/common/cauthdsl"
"github.com/hyperledger/fabric/common/channelconfig"
"github.com/hyperledger/fabric/common/tools/configtxgen/configtxgentest"
"github.com/hyperledger/fabric/common/tools/configtxgen/encoder"
genesisconfig "github.com/hyperledger/fabric/common/tools/configtxgen/localconfig"
cb "github.com/hyperledger/fabric/protos/common"
mspprotos "github.com/hyperledger/fabric/protos/msp"
"github.com/hyperledger/fabric/protos/utils"
"github.com/stretchr/testify/assert"
)
var (
insecureConfig *cb.Config
singleMSPConfig *cb.Config
)
func init() {
factory.InitFactories(nil)
insecureChannelGroup, err := encoder.NewChannelGroup(configtxgentest.Load(genesisconfig.SampleInsecureSoloProfile))
if err != nil {
panic(err)
}
insecureConfig = &cb.Config{ChannelGroup: insecureChannelGroup}
singleMSPChannelGroup, err := encoder.NewChannelGroup(configtxgentest.Load(genesisconfig.SampleSingleMSPSoloProfile))
if err != nil {
panic(err)
}
singleMSPConfig = &cb.Config{ChannelGroup: singleMSPChannelGroup}
}
func TestSimpleCheck(t *testing.T) {
result, err := Check(insecureConfig)
assert.NoError(t, err, "Simple empty config")
assert.Equal(t, &Messages{}, result)
}
func TestOneMSPCheck(t *testing.T) {
result, err := Check(singleMSPConfig)
assert.NoError(t, err, "Simple single MSP config")
assert.Equal(t, &Messages{}, result)
}
func TestEmptyConfigCheck(t *testing.T) {
result, err := Check(&cb.Config{})
assert.NoError(t, err, "Simple single MSP config")
assert.Empty(t, result.ElementErrors)
assert.Empty(t, result.ElementWarnings)
assert.NotEmpty(t, result.GeneralErrors)
}
func TestWrongMSPID(t *testing.T) {
localConfig := proto.Clone(insecureConfig).(*cb.Config)
policyName := "foo"
localConfig.ChannelGroup.Groups[channelconfig.OrdererGroupKey].Policies[policyName] = &cb.ConfigPolicy{
Policy: &cb.Policy{
Type: int32(cb.Policy_SIGNATURE),
Value: utils.MarshalOrPanic(cauthdsl.SignedByMspAdmin("MissingOrg")),
},
}
result, err := Check(localConfig)
assert.NoError(t, err, "Simple empty config")
assert.Empty(t, result.GeneralErrors)
assert.Empty(t, result.ElementErrors)
assert.Len(t, result.ElementWarnings, 1)
assert.Equal(t, ".groups."+channelconfig.OrdererGroupKey+".policies."+policyName, result.ElementWarnings[0].Path)
}
func TestCorruptRolePrincipal(t *testing.T) |
func TestCorruptOUPrincipal(t *testing.T) {
localConfig := proto.Clone(insecureConfig).(*cb.Config)
policyName := "foo"
sigPolicy := cauthdsl.SignedByMspAdmin("MissingOrg")
sigPolicy.Identities[0].PrincipalClassification = mspprotos.MSPPrincipal_ORGANIZATION_UNIT
sigPolicy.Identities[0].Principal = []byte("garbage which corrupts the evaluation")
localConfig.ChannelGroup.Policies[policyName] = &cb.ConfigPolicy{
Policy: &cb.Policy{
Type: int32(cb.Policy_SIGNATURE),
Value: utils.MarshalOrPanic(sigPolicy),
},
}
result, err := Check(localConfig)
assert.NoError(t, err, "Simple empty config")
assert.Empty(t, result.GeneralErrors)
assert.Empty(t, result.ElementWarnings)
assert.Len(t, result.ElementErrors, 1)
assert.Equal(t, ".policies."+policyName, result.ElementErrors[0].Path)
}
| {
localConfig := proto.Clone(insecureConfig).(*cb.Config)
policyName := "foo"
sigPolicy := cauthdsl.SignedByMspAdmin("MissingOrg")
sigPolicy.Identities[0].Principal = []byte("garbage which corrupts the evaluation")
localConfig.ChannelGroup.Policies[policyName] = &cb.ConfigPolicy{
Policy: &cb.Policy{
Type: int32(cb.Policy_SIGNATURE),
Value: utils.MarshalOrPanic(sigPolicy),
},
}
result, err := Check(localConfig)
assert.NoError(t, err, "Simple empty config")
assert.Empty(t, result.GeneralErrors)
assert.Empty(t, result.ElementWarnings)
assert.Len(t, result.ElementErrors, 1)
assert.Equal(t, ".policies."+policyName, result.ElementErrors[0].Path)
} |
issue-21974.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that (for now) we report an ambiguity error here, because
// specific trait relationships are ignored for the purposes of trait
// matching. This behavior should likely be improved such that this
// test passes. See #21974 for more details.
trait Foo {
fn foo(self);
}
fn foo<'a,'b,T>(x: &'a T, y: &'b T)
where &'a T : Foo,
&'b T : Foo
|
fn main() { }
| {
x.foo(); //~ ERROR type annotations required
y.foo();
} |
source.rs | #[derive(Serialize, Deserialize, Debug)]
pub struct Text {
// This is needed to workaround #955 in compiler-explorer where it
// may return objects without text field.
#[serde(default)]
pub text: String, | #[derive(Serialize, Deserialize, Debug, Default)]
pub struct ExecResult {
pub code: i32,
pub stdout: Vec<Text>,
pub stderr: Vec<Text>,
#[serde(rename = "didExecute")]
pub did_execute: bool,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Output {
pub code: i32,
pub stderr: Vec<Text>,
pub asm: Vec<Text>,
#[serde(default, rename = "execResult")]
pub exec_result: ExecResult,
} | }
|
stopper_test.go | // Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License. See the AUTHORS file
// for names of contributors.
//
// Author: Spencer Kimball ([email protected])
package stop_test
import (
"testing"
"time"
"github.com/cockroachdb/cockroach/util"
_ "github.com/cockroachdb/cockroach/util/log" // for flags
"github.com/cockroachdb/cockroach/util/stop"
)
func TestStopper(t *testing.T) {
s := stop.NewStopper()
running := make(chan struct{})
waiting := make(chan struct{})
s.RunWorker(func() {
<-running
})
go func() {
<-s.ShouldStop()
select {
case <-waiting:
t.Fatal("expected stopper to have blocked")
case <-time.After(1 * time.Millisecond):
// Expected.
}
close(running)
select {
case <-waiting:
// Success.
case <-time.After(100 * time.Millisecond):
t.Fatal("stopper should have finished waiting")
}
}()
s.Stop()
close(waiting)
}
type blockingCloser struct {
block chan struct{}
}
func newBlockingCloser() *blockingCloser {
return &blockingCloser{block: make(chan struct{})}
}
func (bc *blockingCloser) Unblock() {
close(bc.block)
}
func (bc *blockingCloser) Close() {
<-bc.block
}
func TestStopperIsStopped(t *testing.T) {
s := stop.NewStopper()
bc := newBlockingCloser()
s.AddCloser(bc)
go s.Stop()
select {
case <-s.ShouldStop():
case <-time.After(100 * time.Millisecond):
t.Fatal("stopper should have finished waiting")
}
select {
case <-s.IsStopped():
t.Fatal("expected blocked closer to prevent stop")
case <-time.After(1 * time.Millisecond):
// Expected.
}
bc.Unblock()
select {
case <-s.IsStopped():
// Expected
case <-time.After(100 * time.Millisecond):
t.Fatal("stopper should have finished stopping")
}
}
func TestStopperMultipleStopees(t *testing.T) {
const count = 3
s := stop.NewStopper()
for i := 0; i < count; i++ {
s.RunWorker(func() {
<-s.ShouldStop()
})
}
done := make(chan struct{})
go func() {
s.Stop()
close(done)
}()
select {
case <-done:
case <-time.After(10 * time.Millisecond):
t.Errorf("timed out waiting for stop")
}
}
func TestStopperStartFinishTasks(t *testing.T) {
s := stop.NewStopper()
if !s.RunTask(func() {
go s.Stop()
select {
case <-s.ShouldStop():
t.Fatal("expected stopper to be draining")
case <-time.After(1 * time.Millisecond):
// Expected.
} | case <-s.ShouldStop():
// Success.
case <-time.After(100 * time.Millisecond):
t.Fatal("stopper should be ready to stop")
}
}
func TestStopperRunWorker(t *testing.T) {
s := stop.NewStopper()
s.RunWorker(func() {
select {
case <-s.ShouldStop():
return
}
})
closer := make(chan struct{})
go func() {
s.Stop()
close(closer)
}()
select {
case <-closer:
// Success.
case <-time.After(100 * time.Millisecond):
t.Fatal("stopper should be ready to stop")
}
}
// TestStopperQuiesce tests coordinate drain with Quiesce.
func TestStopperQuiesce(t *testing.T) {
var stoppers []*stop.Stopper
for i := 0; i < 3; i++ {
stoppers = append(stoppers, stop.NewStopper())
}
var quiesceDone []chan struct{}
var runTaskDone []chan struct{}
for _, s := range stoppers {
qc := make(chan struct{})
quiesceDone = append(quiesceDone, qc)
sc := make(chan struct{})
runTaskDone = append(runTaskDone, sc)
s.RunWorker(func() {
// Wait until Quiesce() is called.
<-qc
if s.RunTask(func() {}) {
t.Error("expected RunTask to fail")
}
// Make the stoppers call Stop().
close(sc)
<-s.ShouldStop()
})
}
done := make(chan struct{})
go func() {
for _, s := range stoppers {
s.Quiesce()
}
// Make the tasks call RunTask().
for _, qc := range quiesceDone {
close(qc)
}
// Wait until RunTask() is called.
for _, sc := range runTaskDone {
<-sc
}
close(done)
}()
select {
case <-done:
case <-time.After(10 * time.Millisecond):
t.Errorf("timed out waiting for stop")
}
}
type testCloser bool
func (tc *testCloser) Close() {
*tc = true
}
func TestStopperClosers(t *testing.T) {
s := stop.NewStopper()
var tc1, tc2 testCloser
s.AddCloser(&tc1)
s.AddCloser(&tc2)
s.Stop()
if bool(tc1) != true || bool(tc2) != true {
t.Errorf("expected true & true; got %t & %t", tc1, tc2)
}
}
func TestStopperNumTasks(t *testing.T) {
s := stop.NewStopper()
var tasks []chan bool
for i := 0; i < 3; i++ {
c := make(chan bool)
tasks = append(tasks, c)
s.RunAsyncTask(func() {
// Wait for channel to close
<-c
})
tm := s.RunningTasks()
if numTypes, numTasks := len(tm), s.NumTasks(); numTypes != 1 || numTasks != i+1 {
t.Errorf("stopper should have %d running tasks, got %d / %+v", i+1, numTasks, tm)
}
m := s.RunningTasks()
if len(m) != 1 {
t.Fatalf("expected exactly one task map entry: %+v", m)
}
for _, v := range m {
if expNum := len(tasks); v != expNum {
t.Fatalf("%d: expected %d tasks, got %d", i, expNum, v)
}
}
}
for i, c := range tasks {
m := s.RunningTasks()
if len(m) != 1 {
t.Fatalf("%d: expected exactly one task map entry: %+v", i, m)
}
for _, v := range m {
if expNum := len(tasks[i:]); v != expNum {
t.Fatalf("%d: expected %d tasks, got %d:\n%s", i, expNum, v, m)
}
}
// Close the channel to let the task proceed.
close(c)
expNum := len(tasks[i+1:])
err := util.IsTrueWithin(func() bool { return s.NumTasks() == expNum }, 20*time.Millisecond)
if err != nil {
t.Errorf("%d: stopper should have %d running tasks, got %d", i, expNum, s.NumTasks())
}
}
// The taskmap should've been cleared out.
if m := s.RunningTasks(); len(m) != 0 {
t.Fatalf("task map not empty: %+v", m)
}
s.Stop()
}
// TestStopperRunTaskPanic ensures that tasks are not leaked when they panic.
// RunAsyncTask has a similar bit of logic, but it is not testable because
// we cannot insert a recover() call in the right place.
func TestStopperRunTaskPanic(t *testing.T) {
s := stop.NewStopper()
// If RunTask were not panic-safe, Stop() would deadlock.
defer s.Stop()
func() {
defer func() {
_ = recover()
}()
s.RunTask(func() {
panic("ouch")
})
}()
} | }) {
t.Error("expected RunTask to succeed")
}
select { |
material.module.ts | import { NgModule } from '@angular/core';
import { MatToolbarModule } from '@angular/material/toolbar';
import { MatIconModule } from '@angular/material/icon';
import { MatButtonModule } from '@angular/material/button';
import { MatSidenavModule } from '@angular/material/sidenav';
import { MatListModule } from '@angular/material/list';
import { MatGridListModule } from '@angular/material/grid-list';
import { MatFormFieldModule } from '@angular/material/form-field';
import { MatInputModule } from '@angular/material/input';
import { MatSelectModule } from '@angular/material/select';
import { MatTableModule } from '@angular/material/table';
import { MatSortModule } from '@angular/material/sort';
@NgModule({
exports: [
MatToolbarModule,
MatIconModule,
MatButtonModule,
MatSidenavModule,
MatListModule,
MatGridListModule,
MatFormFieldModule,
MatInputModule,
MatSelectModule,
MatTableModule,
MatSortModule,
],
})
export class | {}
| LoginMaterials |
name.rs | use crate::color::{ColoredString, Colors, Elem};
use crate::icon::Icons;
use crate::meta::filetype::FileType;
use std::cmp::{Ordering, PartialOrd};
use std::path::Path;
#[derive(Clone, Debug, Eq)]
pub struct Name {
pub name: String,
path: String,
extension: Option<String>,
file_type: FileType,
}
impl Name {
pub fn new(path: &Path, file_type: FileType) -> Self {
let name = match path.file_name() {
Some(name) => name.to_string_lossy().to_string(),
None => path.to_string_lossy().to_string(),
};
let mut extension = None;
if let Some(res) = path.extension() {
extension = Some(
res.to_str()
.expect("failed to encode file name")
.to_string(),
);
}
let path_string = path.to_string_lossy().to_string();
Self {
name,
path: path_string,
extension,
file_type,
}
}
pub fn name_string(&self, icons: &Icons) -> String {
let icon = icons.get(self);
let mut content = String::with_capacity(icon.len() + self.name.len() + 3 /* spaces */);
content += icon.as_str();
content += &self.name;
content
}
pub fn render(&self, colors: &Colors, icons: &Icons) -> ColoredString {
let content = self.name_string(&icons);
let elem = match self.file_type {
FileType::CharDevice => Elem::CharDevice,
FileType::Directory { uid } => Elem::Dir { uid },
FileType::SymLink => Elem::SymLink,
FileType::File { uid, exec } => Elem::File { uid, exec },
_ => Elem::File {
exec: false,
uid: false,
},
};
colors.colorize_using_path(content, &self.path, &elem)
}
pub fn name(&self) -> String {
self.name.clone()
}
pub fn extension(&self) -> Option<String> {
self.extension.clone()
}
pub fn file_type(&self) -> FileType {
self.file_type
}
}
impl Ord for Name {
fn cmp(&self, other: &Self) -> Ordering {
self.name.to_lowercase().cmp(&other.name.to_lowercase())
}
}
impl PartialOrd for Name {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.name.to_lowercase().cmp(&other.name.to_lowercase()))
}
}
impl PartialEq for Name {
fn | (&self, other: &Self) -> bool {
self.name.eq_ignore_ascii_case(&other.name)
}
}
#[cfg(test)]
mod test {
use super::Name;
use crate::color::{self, Colors};
use crate::icon::{self, Icons};
use crate::meta::FileType;
use crate::meta::Meta;
#[cfg(unix)]
use crate::meta::Permissions;
use ansi_term::Colour;
use std::cmp::Ordering;
use std::fs::{self, File};
#[cfg(unix)]
use std::os::unix::fs::symlink;
use std::path::Path;
#[cfg(unix)]
use std::process::Command;
use tempfile::tempdir;
#[test]
#[cfg(unix)] // Windows uses different default permissions
fn test_print_file_name() {
let tmp_dir = tempdir().expect("failed to create temp dir");
let icons = Icons::new(icon::Theme::Fancy);
// Create the file;
let file_path = tmp_dir.path().join("file.txt");
File::create(&file_path).expect("failed to create file");
let meta = file_path.metadata().expect("failed to get metas");
let colors = Colors::new(color::Theme::NoLscolors);
let file_type = FileType::new(&meta, &Permissions::from(&meta));
let name = Name::new(&file_path, file_type);
assert_eq!(
Colour::Fixed(184).paint(" file.txt"),
name.render(&colors, &icons)
);
}
#[test]
fn test_print_dir_name() {
let tmp_dir = tempdir().expect("failed to create temp dir");
let icons = Icons::new(icon::Theme::Fancy);
// Chreate the directory
let dir_path = tmp_dir.path().join("directory");
fs::create_dir(&dir_path).expect("failed to create the dir");
let meta = Meta::from_path(&dir_path).unwrap();
let colors = Colors::new(color::Theme::NoLscolors);
assert_eq!(
Colour::Fixed(33).paint(" directory"),
meta.name.render(&colors, &icons)
);
}
#[test]
#[cfg(unix)] // Symlinks are hard on Windows
fn test_print_symlink_name() {
let tmp_dir = tempdir().expect("failed to create temp dir");
let icons = Icons::new(icon::Theme::Fancy);
// Create the file;
let file_path = tmp_dir.path().join("file.tmp");
File::create(&file_path).expect("failed to create file");
// Create the symlink
let symlink_path = tmp_dir.path().join("target.tmp");
symlink(&file_path, &symlink_path).expect("failed to create symlink");
let meta = symlink_path
.symlink_metadata()
.expect("failed to get metas");
let colors = Colors::new(color::Theme::NoLscolors);
let file_type = FileType::new(&meta, &Permissions::from(&meta));
let name = Name::new(&symlink_path, file_type);
assert_eq!(
Colour::Fixed(44).paint(" target.tmp"),
name.render(&colors, &icons)
);
}
#[test]
#[cfg(unix)]
fn test_print_other_type_name() {
let tmp_dir = tempdir().expect("failed to create temp dir");
let icons = Icons::new(icon::Theme::Fancy);
// Create the pipe;
let pipe_path = tmp_dir.path().join("pipe.tmp");
let success = Command::new("mkfifo")
.arg(&pipe_path)
.status()
.expect("failed to exec mkfifo")
.success();
assert_eq!(true, success, "failed to exec mkfifo");
let meta = pipe_path.metadata().expect("failed to get metas");
let colors = Colors::new(color::Theme::NoLscolors);
let file_type = FileType::new(&meta, &Permissions::from(&meta));
let name = Name::new(&pipe_path, file_type);
assert_eq!(
Colour::Fixed(184).paint(" pipe.tmp"),
name.render(&colors, &icons)
);
}
#[test]
fn test_print_without_icon_or_color() {
let tmp_dir = tempdir().expect("failed to create temp dir");
let icons = Icons::new(icon::Theme::NoIcon);
// Create the file;
let file_path = tmp_dir.path().join("file.txt");
File::create(&file_path).expect("failed to create file");
let meta = Meta::from_path(&file_path).unwrap();
let colors = Colors::new(color::Theme::NoColor);
assert_eq!(
"file.txt",
meta.name.render(&colors, &icons).to_string().as_str()
);
}
#[test]
fn test_extensions_with_valid_file() {
let path = Path::new("some-file.txt");
let name = Name::new(
&path,
FileType::File {
uid: false,
exec: false,
},
);
assert_eq!(Some(String::from("txt")), name.extension());
}
#[test]
fn test_extensions_with_file_without_extension() {
let path = Path::new(".gitignore");
let name = Name::new(
&path,
FileType::File {
uid: false,
exec: false,
},
);
assert_eq!(None, name.extension());
}
#[test]
fn test_order_impl_is_case_insensitive() {
let path_1 = Path::new("AAAA");
let name_1 = Name::new(
&path_1,
FileType::File {
uid: false,
exec: false,
},
);
let path_2 = Path::new("aaaa");
let name_2 = Name::new(
&path_2,
FileType::File {
uid: false,
exec: false,
},
);
assert_eq!(Ordering::Equal, name_1.cmp(&name_2));
}
#[test]
fn test_partial_order_impl() {
let path_a = Path::new("aaaa");
let name_a = Name::new(
&path_a,
FileType::File {
uid: false,
exec: false,
},
);
let path_z = Path::new("zzzz");
let name_z = Name::new(
&path_z,
FileType::File {
uid: false,
exec: false,
},
);
assert_eq!(true, name_a < name_z);
}
#[test]
fn test_partial_order_impl_is_case_insensitive() {
let path_a = Path::new("aaaa");
let name_a = Name::new(
&path_a,
FileType::File {
uid: false,
exec: false,
},
);
let path_z = Path::new("ZZZZ");
let name_z = Name::new(
&path_z,
FileType::File {
uid: false,
exec: false,
},
);
assert_eq!(true, name_a < name_z);
}
#[test]
fn test_partial_eq_impl() {
let path_1 = Path::new("aaaa");
let name_1 = Name::new(
&path_1,
FileType::File {
uid: false,
exec: false,
},
);
let path_2 = Path::new("aaaa");
let name_2 = Name::new(
&path_2,
FileType::File {
uid: false,
exec: false,
},
);
assert_eq!(true, name_1 == name_2);
}
#[test]
fn test_partial_eq_impl_is_case_insensitive() {
let path_1 = Path::new("AAAA");
let name_1 = Name::new(
&path_1,
FileType::File {
uid: false,
exec: false,
},
);
let path_2 = Path::new("aaaa");
let name_2 = Name::new(
&path_2,
FileType::File {
uid: false,
exec: false,
},
);
assert_eq!(true, name_1 == name_2);
}
}
| eq |
f71435965233b3f42528cb19260b66df48dda165dispatch.go | package frontend
import (
"errors"
"fmt"
"github.com/juju/loggo"
"github.com/zeronetscript/universal_p2p/backend"
"io"
"net/http"
"strings"
)
var dispatchLog = loggo.GetLogger("Dispatch")
func parseHttpRequest(URL string) (commonRequest backend.CommonRequest, pathArray []string, err error) |
func HttpAndLogError(str string, l *loggo.Logger, w http.ResponseWriter) {
l.Errorf(str)
http.Error(w, str, 404)
}
func Dispatch(w http.ResponseWriter, request *http.Request) {
if request.Method != "GET" && request.Method != "POST" {
HttpAndLogError(fmt.Sprintf("unsupported method %s", request.Method), &dispatchLog, w)
return
}
commonRequest, pathArray, err := parseHttpRequest(request.URL.Path)
if err != nil {
frontLog.Errorf(err.Error())
http.Error(w, err.Error(), 404)
return
}
frontend, exist := AllFrontEnd[commonRequest.RootProtocol]
if !exist {
HttpAndLogError(fmt.Sprintf("not support protocol", commonRequest.RootProtocol), &dispatchLog, w)
return
}
var parsedRequest interface{}
var rd io.ReadCloser
if request.Method == "GET" {
parsedRequest = &backend.AccessRequest{
CommonRequest: commonRequest,
SubPath: pathArray,
}
} else {
const MAX_POST_DATA = 2 * 1024 * 1024
er := request.ParseMultipartForm(MAX_POST_DATA)
if er != nil {
HttpAndLogError(fmt.Sprintf("error parsing form %s", er), &dispatchLog, w)
return
}
const UPLOAD_KEY = "UPLOAD"
list, ok := request.MultipartForm.File[UPLOAD_KEY]
for k := range request.MultipartForm.File {
dispatchLog.Debugf("key %s", k)
}
if !ok {
HttpAndLogError(fmt.Sprintf("no such key %s", UPLOAD_KEY), &dispatchLog, w)
return
}
if len(list) <= 0 {
HttpAndLogError("file list 0", &dispatchLog, w)
return
}
f, err := list[0].Open()
if err != nil {
HttpAndLogError(fmt.Sprintf("error open multi part:%s", err), &dispatchLog, w)
return
}
rd = f
parsedRequest = &backend.UploadDataRequest{
CommonRequest: commonRequest,
UploadReader: f,
}
}
//predefined command
if rd != nil {
defer rd.Close()
}
_, exist = backend.AllBackend[commonRequest.RootProtocol]
if !exist {
errStr := fmt.Sprintf("protocol %s not supported", commonRequest.RootProtocol)
dispatchLog.Warningf(errStr)
http.Error(w, errStr, http.StatusServiceUnavailable)
return
}
frontend.HandleRequest(w, request, parsedRequest)
}
| {
dispatchLog.Tracef("accessing %s", URL)
trimmed := strings.TrimRight(strings.TrimLeft(URL, backend.SLASH), backend.SLASH)
allPathArray := strings.Split(trimmed, backend.SLASH)
if len(allPathArray) < 3 {
errStr := fmt.Sprintf("url access path is '%s', less than needed (at least 3)", trimmed)
return backend.CommonRequest{}, nil, errors.New(errStr)
}
commonRequest = backend.CommonRequest{
RootProtocol: allPathArray[0],
SubVersion: allPathArray[1],
RootCommand: allPathArray[2],
}
pathArray = allPathArray[3:]
dispatchLog.Debugf("RootProtocol:%s,SubVersion:%s,RootCommand:%s",
commonRequest.RootProtocol, commonRequest.SubVersion, commonRequest.RootCommand)
err = nil
return
} |
messages.py | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message strcutures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization."""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70914 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def ser_uint64(u):
rs = b""
for i in range(2):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress():
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "Error",
1: "TX",
2: "Block",
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nAccumulatorCheckpoint = header.nAccumulatorCheckpoint
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.nAccumulatorCheckpoint = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.nAccumulatorCheckpoint = deser_uint256(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
# PCH Uniqueness
def get_uniqueness(self, prevout):
r = b""
r += struct.pack("<I", prevout.n)
r += ser_uint256(prevout.hash)
return r
def solve_stake(self, prevouts):
target0 = uint256_from_compact(self.nBits)
loop = True
while loop:
for prevout in prevouts:
nvalue, txBlockTime, stakeModifier, hashStake = prevouts[prevout]
target = int(target0 * nvalue / 100) % 2**256
data = b""
data += ser_uint64(stakeModifier)
data += struct.pack("<I", txBlockTime)
# prevout for zPoS is serial hashes hex strings
if isinstance(prevout, COutPoint):
data += self.get_uniqueness(prevout)
else:
data += ser_uint256(uint256_from_str(bytes.fromhex(hashStake)[::-1]))
data += struct.pack("<I", self.nTime)
posHash = uint256_from_str(hash256(data))
if posHash <= target:
self.prevoutStake = prevout
loop = False
break
if loop:
self.nTime += 1
return True
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
if hasattr(self, 'vchBlockSig'):
r += ser_string(self.vchBlockSig)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def sign_block(self, key, low_s=True):
data = b""
data += struct.pack("<i", self.nVersion)
data += ser_uint256(self.hashPrevBlock)
data += ser_uint256(self.hashMerkleRoot)
data += struct.pack("<I", self.nTime)
data += struct.pack("<I", self.nBits)
data += struct.pack("<I", self.nNonce)
data += ser_uint256(self.nAccumulatorCheckpoint)
sha256NoSig = hash256(data)
self.vchBlockSig = key.sign(sha256NoSig, low_s=low_s)
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree():
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = [] | self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock():
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r | self.fBad = False
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0] |
config.py | # -*- coding: utf-8 -*-
import os
import re
import json
import os.path
import unittest
reg_cmnt = re.compile(r"/\*.*?\*/", re.DOTALL)
class Config:
"Работа с конфигурационным файлом"
def __init__(self, main_path=None, user_path=None):
if main_path is None:
self._main_path = "config.json5"
else:
self._main_path = main_path
if user_path is None:
self._user_path = "config_user.json5"
else:
self._user_path = user_path
self._cfg_dict = {}
def __getitem__(self, key):
return self._cfg_dict[key]
def __len__(self):
return len(self._cfg_dict)
def _load_json(self, path):
data = {}
if os.path.exists(path):
txt = open(path).read() | txt = reg_cmnt.sub("", txt) # remove comments
data = json.loads(txt)
return data
def _set_default(self, cfg):
cfg["path_to_dict"] = cfg.get("path_to_dict", "dict.json")
cfg["path_to_stat"] = cfg.get("path_to_stat", "statistic.json")
cfg["words_per_lesson"] = int(cfg.get("words_per_lesson", 5))
cfg["CntStudyWords"] = int(cfg.get("CntStudyWords", 50))
cfg["MinPercent"] = float(cfg.get("MinPercent", 97.0))
cfg["MinSuccessCnt"] = int(cfg.get("MinSuccessCnt", 10))
cfg["retry_time"] = int(cfg.get("retry_time", 1800))
cfg["hide_transcription"] = cfg.get("hide_transcription", "no")
cfg["start_time_delay"] = int(cfg.get("start_time_delay", 1))
cfg["stat_count_row"] = int(cfg.get("stat_count_row", 200))
cfg["right_answer_percent"] = float(cfg.get("right_answer_percent", 10.0))
cfg["wrong_answer_percent"] = float(cfg.get("wrong_answer_percent", 40.0))
cfg["empty_answer_is_error"] = cfg.get("empty_answer_is_error", "no")
cfg["internet_dictionary_url"] = cfg.get("internet_dictionary_url",
{"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/",
"RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"})
def create_default_user_config(self):
if not os.path.isfile(self._user_path):
txt = "{\n /*\n User config\n */\n\n}"
open(self._user_path, "wt").write(txt)
def reload(self):
self._cfg_dict = {}
self._cfg_dict.update(self._load_json(self._main_path))
self._cfg_dict.update(self._load_json(self._user_path))
self._set_default(self._cfg_dict)
return self._cfg_dict
def get_dict(self):
return self._cfg_dict
class ConfigTestCase(unittest.TestCase):
"Набор тестов для класса Config"
def setUp(self):
if os.path.isfile("test_config_user.json"):
os.remove("test_config_user.json")
def tearDown(self):
if os.path.isfile("test_config_user.json"):
os.remove("test_config_user.json")
def equal_cfg(self, cfg, test_dict):
for key, val in test_dict.items():
self.assertEqual(cfg[key], val)
self.assertEqual(len(cfg), 14)
def test_main(self):
"Тестирование загрузки основного файла с конфигурацией"
test_dict = {
"path_to_dict": "dict.json",
"path_to_stat": "statistic.json",
"words_per_lesson": 5,
"CntStudyWords": 50,
"MinPercent": 97.0,
"MinSuccessCnt": 10,
"retry_time": 1800,
"hide_transcription": "no",
"start_time_delay": 1,
"stat_count_row": 200,
"right_answer_percent": 10.0,
"wrong_answer_percent": 40.0,
"empty_answer_is_error": "no",
"internet_dictionary_url": {"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/",
"RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"}}
cfg = Config("config.json5", "fake_config_user.json")
cfg.reload()
self.equal_cfg(cfg, test_dict)
def test_user(self):
"Тестирование загрузки пользовательского файла с конфигурацией"
test_dict = {
"path_to_dict": "dict1.json",
"path_to_stat": "statistic1.json",
"words_per_lesson": 6,
"CntStudyWords": 60,
"MinPercent": 98.0,
"MinSuccessCnt": 11,
"retry_time": 1801,
"hide_transcription": "yes",
"start_time_delay": 2,
"stat_count_row": 300,
"right_answer_percent": 20.0,
"wrong_answer_percent": 50.0,
"empty_answer_is_error": "yes",
"internet_dictionary_url": {"EN_RU": "http1://slovari.yandex.ru/{word}/en-ru/#lingvo/",
"RU_EN": "http1://slovari.yandex.ru/{word}/en/#lingvo/"}}
json.dump(test_dict, open("test_config_user.json", "w"))
cfg = Config("config.json5", "test_config_user.json")
cfg.reload()
self.equal_cfg(cfg, test_dict)
def test_user_part(self):
"Тестирование загрузки пользовательского файла с конфигурацией, который перекрывает только часть настроек"
test_dict = {
"path_to_dict": "dict1.json",
"path_to_stat": "statistic1.json",
"words_per_lesson": 6,
"CntStudyWords": 60,
"MinPercent": 98.0,
"MinSuccessCnt": 11}
json.dump(test_dict, open("test_config_user.json", "w"))
test_dict.update({
"retry_time": 1800,
"hide_transcription": "no",
"start_time_delay": 1,
"stat_count_row": 200,
"right_answer_percent": 10.0,
"wrong_answer_percent": 40.0,
"empty_answer_is_error": "no"})
cfg = Config("config.json5", "test_config_user.json")
cfg.reload()
self.equal_cfg(cfg, test_dict)
def test_not_exists(self):
"Тестирование выставления дефолтных настроек"
test_dict = {
"path_to_dict": "dict.json",
"path_to_stat": "statistic.json",
"words_per_lesson": 5,
"CntStudyWords": 50,
"MinPercent": 97.0,
"MinSuccessCnt": 10,
"retry_time": 1800,
"hide_transcription": "no",
"start_time_delay": 1,
"stat_count_row": 200,
"right_answer_percent": 10.0,
"wrong_answer_percent": 40.0,
"empty_answer_is_error": "no",
"internet_dictionary_url": {"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/",
"RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"}}
cfg = Config("config.json5", "fake_config_user.json")
cfg.reload()
self.equal_cfg(cfg, test_dict)
cfg = Config("fake_config.json", "fake_config_user.json")
cfg.reload()
if __name__ == "__main__":
os.chdir(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
suite = unittest.TestLoader().loadTestsFromTestCase(ConfigTestCase)
unittest.TextTestRunner(verbosity=2).run(suite) | |
config.py | import configparser
import functools
from os import path
from pathlib import Path
class Config():
"""Config wrapper that reads global config and user config."""
PROJECT_ROOT = path.join(path.dirname(path.realpath(__file__)), '..')
CONFIG_INI = path.join(PROJECT_ROOT, 'config.ini')
HOME_DIR = Path.home()
CONN_INI = path.join(HOME_DIR, '.snowflake-cicd.ini')
def __init__(self):
pass
def __lazy_init(func):
"""Reads and parses global config file and user's conn file."""
@functools.wraps(func)
def wrap(self, *args, **kwargs):
if not hasattr(self, '_config'):
assert path.exists(Config.CONFIG_INI), f"Missing config file at path {Config.CONFIG_INI}"
self._config = configparser.ConfigParser()
self._config.read(Config.CONFIG_INI)
if not hasattr(self, '_conn'):
assert path.exists(Config.CONN_INI), f"Missing connection settings file at path {Config.CONN_INI}"
self._conn = configparser.ConfigParser()
self._conn.read(Config.CONN_INI)
return func(self, *args, **kwargs)
return wrap
@__lazy_init
def read_config(self, key, section='default', default=None) -> str:
"""Reads [section] key from user's conn file or use global file
if the key is missing."""
return self._conn[section].get(key,
self._config[section].get(key, default))
@__lazy_init
def | (self, key, section='default', default=None) -> str:
"""Reads [section] from user .snowflake-cicd.ini file."""
return self._conn[section].get(key, default)
@__lazy_init
def sql(self, query_id) -> str:
"""Returns value from config section 'queries'."""
return self._config['queries'].get(query_id)
config = Config()
| read_user_config |
controlsd.py | #!/usr/bin/env python3
import os
import math
from cereal import car, log
from common.numpy_fast import clip, interp
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.longcontrol import LongControl, STARTING_TARGET_SPEED
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.controls.lib.longitudinal_planner import LON_MPC_STEP
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI
from selfdrive.car.hyundai.scc_smoother import SccSmoother
from selfdrive.ntune import ntune_get, ntune_isEnabled
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL
STEER_ANGLE_SATURATION_THRESHOLD = 2.5 # Degrees
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = set(["rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned", "logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad"])
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH)
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.camera_packets = ["roadCameraState", "driverCameraState"]
if TICI:
self.camera_packets.append("wideRoadCameraState")
self.sm = sm
if self.sm is None:
ignore = ['driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState', 'liveParameters', 'radarState'] + self.camera_packets,
ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan'])
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
if TICI:
self.log_sock = messaging.sub_sock('androidLog')
# wait for one pandaState and one CAN packet
hw_type = messaging.recv_one(self.sm.sock['pandaState']).pandaState.pandaType
has_relay = hw_type in [PandaType.blackPanda, PandaType.uno, PandaType.dos]
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'], has_relay)
# read params
params = Params()
self.is_metric = params.get_bool("IsMetric")
self.is_ldw_enabled = params.get_bool("IsLdwEnabled")
self.enable_lte_onroad = params.get_bool("EnableLteOnroad")
community_feature_toggle = params.get_bool("CommunityFeaturesToggle")
openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle")
passive = params.get_bool("Passive") or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
fuzzy_fingerprint = self.CP.fuzzyFingerprint
# If stock camera is disconnected, we loaded car controls and it's not dashcam mode
controller_available = self.CP.enableCamera and self.CI.CC is not None and not passive and not self.CP.dashcamOnly
community_feature = self.CP.communityFeature or fuzzy_fingerprint
community_feature_disallowed = community_feature and (not community_feature_toggle)
self.read_only = not car_recognized or not controller_available or \
self.CP.dashcamOnly or community_feature_disallowed
if self.read_only:
self.CP.safetyModel = car.CarParams.SafetyModel.noOutput
# Write CarParams for radard
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP, self.CI.compute_gb)
self.VM = VehicleModel(self.CP)
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.LaC = LatControlAngle(self.CP)
elif self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP)
self.initialized = False
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.can_error_counter = 0
self.last_blinker_frame = 0
self.saturated_count = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
# scc smoother
self.is_cruise_enabled = False
self.cruiseVirtualMaxSpeed = 0
self.clu_speed_ms = 0.
self.apply_accel = 0.
self.fused_accel = 0.
self.lead_drel = 0.
self.aReqValue = 0.
self.aReqValueMin = 0.
self.aReqValueMax = 0.
self.angle_steers_des = 0.
# TODO: no longer necessary, aside from process replay
self.sm['liveParameters'].valid = True
self.startup_event = get_startup_event(car_recognized, controller_available, fuzzy_fingerprint)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if community_feature_disallowed:
self.events.add(EventName.communityFeatureDisallowed, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
elif self.read_only:
self.events.add(EventName.dashcamMode, static=True)
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Handle startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Don't add any more events if not initialized
if not self.initialized:
self.events.add(EventName.controlsInitializing)
return
# Create events for battery, temperature, disk space, and memory
if self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
if self.sm['deviceState'].memoryUsagePercent > 90:
self.events.add(EventName.lowMemory)
# Alert if fan isn't spinning for 5 seconds
if self.sm['pandaState'].pandaType in [PandaType.uno, PandaType.dos]:
if self.sm['pandaState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \ | self.events.add(EventName.laneChangeBlocked)
elif self.sm['lateralPlan'].autoLaneChangeEnabled and self.sm['lateralPlan'].autoLaneChangeTimer > 0:
self.events.add(EventName.autoLaneChange)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in [LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing]:
self.events.add(EventName.laneChange)
if self.can_rcv_error or not CS.canValid:
self.events.add(EventName.canError)
safety_mismatch = self.sm['pandaState'].safetyModel != self.CP.safetyModel or self.sm['pandaState'].safetyParam != self.CP.safetyParam
if safety_mismatch or self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if not self.sm['liveParameters'].valid:
self.events.add(EventName.vehicleModelInvalid)
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.valid["pandaState"]:
self.events.add(EventName.usbError)
elif not self.sm.all_alive_and_valid():
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
cloudlog.error(f"commIssue - valid: {self.sm.valid} - alive: {self.sm.alive}")
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['lateralPlan'].mpcSolutionValid and not (EventName.turningIndicatorOn in self.events.names):
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
if log.PandaState.FaultType.relayMalfunction in self.sm['pandaState'].faults:
self.events.add(EventName.relayMalfunction)
if self.sm['longitudinalPlan'].fcw or (self.enabled and self.sm['modelV2'].meta.hardBrakePredicted):
self.events.add(EventName.fcw)
if TICI and self.enable_lte_onroad:
logs = messaging.drain_sock(self.log_sock, wait_for_one=False)
messages = []
for m in logs:
try:
messages.append(m.androidLog.message)
except UnicodeDecodeError:
pass
for err in ["ERROR_CRC", "ERROR_ECC", "ERROR_STREAM_UNDERFLOW", "APPLY FAILED"]:
for m in messages:
if err not in m:
continue
csid = m.split("CSID:")[-1].split(" ")[0]
evt = {"0": EventName.wideRoadCameraError, "1": EventName.roadCameraError,
"2": EventName.driverCameraError}.get(csid, None)
if evt is not None:
self.events.add(evt)
# TODO: fix simulator
if not SIMULATION:
#if not NOSENSOR:
# if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000) and \
# (not TICI or self.enable_lte_onroad):
# # Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
# self.events.add(EventName.noGps)
if not self.sm.all_alive(self.camera_packets):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
# Check if all manager processes are running
not_running = set(p.name for p in self.sm['managerState'].processes if not p.running)
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
#if CS.brakePressed and self.sm['longitudinalPlan'].vTargetFuture >= STARTING_TARGET_SPEED \
#and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
#self.events.add(EventName.noTarget)
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
all_valid = CS.canValid and self.sm.all_alive_and_valid()
if not self.initialized and (all_valid or self.sm.frame * DT_CTRL > 2.0):
self.initialized = True
Params().put_bool("ControlsReady", True)
# Check for CAN timeout
if not can_strs:
self.can_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
if not self.sm['pandaState'].controlsAllowed and self.enabled:
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
self.CP.enableCruise = self.CI.CP.enableCruise
#if not self.CP.enableCruise:
# self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.enabled, self.is_metric)
#elif self.CP.enableCruise and CS.cruiseState.enabled:
# self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
SccSmoother.update_cruise_buttons(self, CS, self.CP.openpilotLongitudinalControl)
# decrease the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = 50 # 0.5s
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
# Update VehicleModel
params = self.sm['liveParameters']
x = max(params.stiffnessFactor, 0.1)
#sr = max(params.steerRatio, 0.1)
if ntune_isEnabled('useLiveSteerRatio'):
sr = max(params.steerRatio, 0.1)
else:
sr = max(ntune_get('steerRatio'), 0.1)
self.VM.update_params(x, sr)
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
long_plan_age = DT_CTRL * (self.sm.frame - self.sm.rcv_frame['longitudinalPlan'])
# no greater than dt mpc + dt, to prevent too high extraps
dt = min(long_plan_age, LON_MPC_STEP + DT_CTRL) + DT_CTRL
a_acc_sol = long_plan.aStart + (dt / LON_MPC_STEP) * (long_plan.aTarget - long_plan.aStart)
v_acc_sol = long_plan.vStart + dt * (a_acc_sol + long_plan.aStart) / 2.0
# Gas/Brake PID loop
#actuators.gas, actuators.brake = self.LoC.update(self.active, CS, v_acc_sol, long_plan.vTargetFuture, a_acc_sol, self.CP)
# scc smoother
actuators.gas, actuators.brake = self.LoC.update(self.active and CS.cruiseState.speed > 1.,
CS,
v_acc_sol,
long_plan.vTargetFuture,
a_acc_sol,
self.CP,
self.sm['radarState'])
# Steering PID loop and lateral MPC
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(self.active, CS, self.CP, self.VM, params, lat_plan)
# Check for difference between desired angle and angle for angle based control
angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \
abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD
if angle_control_saturated and not CS.steeringPressed and self.active:
self.saturated_count += 1
else:
self.saturated_count = 0
# Send a "steering required alert" if saturation count has reached the limit
if (lac_log.saturated and not CS.steeringPressed) or \
(self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT):
if len(lat_plan.dPathPoints):
# Check if we deviated from the path
left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.1
right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.1
# if left_deviation or right_deviation:
# self.events.add(EventName.steerSaturated)
return actuators, v_acc_sol, a_acc_sol, lac_log
def publish_logs(self, CS, start_time, actuators, v_acc, a_acc, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.actuators = actuators
CC.cruiseControl.override = True
CC.cruiseControl.cancel = self.CP.enableCruise and not self.enabled and CS.cruiseState.enabled
# Some override values for Honda
# brake discount removes a sharp nonlinearity
brake_discount = (1.0 - clip(actuators.brake * 3., 0.0, 1.0))
speed_override = max(0.0, (self.LoC.v_pid + CS.cruiseState.speedOffset) * brake_discount)
CC.cruiseControl.speedOverride = float(speed_override if self.CP.enableCruise else 0.0)
CC.cruiseControl.accelOverride = self.CI.calc_accel_override(CS.aEgo, self.sm['longitudinalPlan'].aTarget, CS.vEgo, self.sm['longitudinalPlan'].vTarget)
CC.hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
CC.hudControl.speedVisible = self.enabled
CC.hudControl.lanesVisible = self.enabled
CC.hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
CC.hudControl.rightLaneVisible = bool(right_lane_visible)
CC.hudControl.leftLaneVisible = bool(left_lane_visible)
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
meta = self.sm['modelV2'].meta
if len(meta.desirePrediction) and ldw_allowed:
l_lane_change_prob = meta.desirePrediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = meta.desirePrediction[Desire.laneChangeRight - 1]
cameraOffset = ntune_get("cameraOffset")
l_lane_close = left_lane_visible and (self.sm['modelV2'].laneLines[1].y[0] > -(1.08 + cameraOffset))
r_lane_close = right_lane_visible and (self.sm['modelV2'].laneLines[2].y[0] < (1.08 - cameraOffset))
CC.hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
CC.hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if CC.hudControl.rightLaneDepart or CC.hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event = ET.WARNING if ET.WARNING not in self.current_alert_types else None
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric])
self.AM.add_many(self.sm.frame, alerts, self.enabled)
self.AM.process_alerts(self.sm.frame, clear_event)
CC.hudControl.visualAlert = self.AM.visual_alert
if not self.read_only and self.initialized:
# send car controls over can
can_sends = self.CI.apply(CC, self)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
# Curvature & Steering angle
params = self.sm['liveParameters']
lat_plan = self.sm['lateralPlan']
steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetAverageDeg)
curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo)
self.angle_steers_des = math.degrees(self.VM.get_steer_from_curvature(-lat_plan.curvature, CS.vEgo))
self.angle_steers_des += params.angleOffsetDeg
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
controlsState.alertText1 = self.AM.alert_text_1
controlsState.alertText2 = self.AM.alert_text_2
controlsState.alertSize = self.AM.alert_size
controlsState.alertStatus = self.AM.alert_status
controlsState.alertBlinkingRate = self.AM.alert_rate
controlsState.alertType = self.AM.alert_type
controlsState.alertSound = self.AM.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = curvature
controlsState.steeringAngleDesiredDeg = self.angle_steers_des
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.cruiseVirtualMaxSpeed if self.CP.openpilotLongitudinalControl else self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.vTargetLead = float(v_acc)
controlsState.aTarget = float(a_acc)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_error_counter
controlsState.angleSteers = steer_angle_without_offset * CV.RAD_TO_DEG
controlsState.cluSpeedMs = self.clu_speed_ms
controlsState.applyAccel = self.apply_accel
controlsState.fusedAccel = self.fused_accel
controlsState.leadDist = self.lead_drel
controlsState.aReqValue = self.aReqValue
controlsState.aReqValueMin = self.aReqValueMin
controlsState.aReqValueMax = self.aReqValueMax
controlsState.steerRatio = self.VM.sR
controlsState.steerRateCost = ntune_get('steerRateCost')
controlsState.steerActuatorDelay = ntune_get('steerActuatorDelay')
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
controlsState.lateralControlState.angleState = lac_log
elif self.CP.lateralTuning.which() == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif self.CP.lateralTuning.which() == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif self.CP.lateralTuning.which() == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only and self.initialized:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, v_acc, a_acc, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, v_acc, a_acc, lac_log)
self.prof.checkpoint("Sent")
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main() | (CS.rightBlindspot and direction == LaneChangeDirection.right): |
game.py | #!/usr/bin/env python3
# coding: utf-8
"""The game logic.
This should be independent of media used to interact with player."""
from typing import Tuple, List, Set, Dict
from const import PLAYER_SHIFT, LAST_ON_PATH, END_PROGRESS
from piece import Piece
from player import Player
from util import progress_to_position
from action import roll_dice
def set_board(num_players: int, num_pieces: int):
pieces: List[Piece] = []
for player_num in range(num_players):
for piece_num in range(num_pieces):
pieces.append(Piece(player_num, piece_num))
return pieces
def do_move(
status: List[Piece],
player: Player,
piece_to_move: int,
dice: int,
player_shift: int = PLAYER_SHIFT,
last_on_path: int = LAST_ON_PATH,
) -> bool:
"""Check if the move is valid. If it is, perform it. Returns whether it is valid."""
movable_piece_nums = [p.index() for p in get_valid_moves(player, dice, status)]
if not (piece_to_move in movable_piece_nums):
return False
current = [
p for p in status if p.player() == player.number and p.index() == piece_to_move
]
assert len(current) == 1
piece = current[0]
if piece.progress() == 0:
if dice == 6:
piece.move(1)
else:
raise ValueError("Home can only be left with a full dice")
else:
piece.move(dice)
if 0 < piece.progress() <= last_on_path:
others = others_on_position(
status, player.number, piece.position(), player_shift, last_on_path
)
for other in others:
other.send_home()
return True
def choose_first(players: Set[Player]) -> Player:
""" score 0 means player hasn't drawn, -1 means is already out of drawing
"""
m = 0
score = [0] * len(players)
need_more = True
while need_more:
for i in range(len(score)):
if score[i] != -1:
# TODO: Decouple this logic from console interaction
score[i] = roll_dice(player_num=i)
m = max(score)
if len([v for v in score if v == m]) > 1:
for i in range(len(score)):
score[i] = 0 if score[i] == m else -1
else:
need_more = False
return Player.get(score.index(m))
def check_endgame(status: List[Piece]) -> bool:
"""Check if any of the players has ended the game.
>>> check_endgame([Piece(0, 0),Piece(0, 1),Piece(0, 2),Piece(0, 3),\
Piece(1, 0),Piece(1, 1),Piece(1, 2),Piece(1, 3),\
Piece(2, 0),Piece(2, 1),Piece(2, 2),Piece(2, 3),\
Piece(3, 0),Piece(3, 1),Piece(3, 2),Piece(3, 3)])
False
>>> check_endgame([Piece(0, 0),Piece(0, 1),Piece(0, 2),Piece(0, 3),\
Piece(1, 0),Piece(1, 1),Piece(1, 2),Piece(1, 3),\
Piece(2, 0),Piece(2, 1),Piece(2, 2),Piece(2, 3),\
Piece(3, 0, 62),Piece(3, 1, 62),Piece(3, 2, 62),Piece(3, 3, 62)])
True
>>> check_endgame([Piece(0, 0),Piece(0, 1),Piece(0, 2),Piece(0, 3),\
Piece(1, 0, 62),Piece(1, 1, 62),Piece(1, 2, 62),Piece(1, 3, 61),\
Piece(2, 0, 60),Piece(2, 1, 60),Piece(2, 2, 60),Piece(2, 3, 60),\
Piece(3, 0, 10),Piece(3, 1, 20),Piece(3, 2, 30),Piece(3, 3, 40)])
False
A real game we played that had a bug:
>>> check_endgame([Piece(0,0,62),Piece(0,1,57),Piece(0,2,62),Piece(0,3,21),\
Piece(1,0,28),Piece(1,1,62),Piece(1,2,62),Piece(1,3,62),\
Piece(2,0,62),Piece(2,1,20),Piece(2,2,58),Piece(2,3,62),\
Piece(3,0,62),Piece(3,1,62),Piece(3,2,0),Piece(3,3,62)])
False
"""
player_finished: Dict[int, bool] = {}
for piece in status:
player = piece.player()
preexisting = player_finished[player] if player in player_finished else True
player_finished[player] = preexisting and piece.is_finished()
return len([k for k, v in player_finished.items() if v]) > 0
def __coord_in_home(piece: Piece) -> Tuple[int, int]:
"""Draw in home positions: each piece has its location. Progress is always same, thus irrelevant
>>> __coord_in_home(Piece(0, 0))
(5, 2)
>>> __coord_in_home(Piece(1, 1))
(2, 13)
>>> __coord_in_home(Piece(2, 2))
(13, 15)
>>> __coord_in_home(Piece(3, 3))
(16, 6)
"""
assert piece.progress() == 0
zones = [(5, 2), (2, 12), (12, 15), (15, 5)]
shift = [(0, 0), (0, 1), (1, 0), (1, 1)]
return (
zones[piece.player()][0] + shift[piece.index()][0],
zones[piece.player()][1] + shift[piece.index()][1],
)
def __coord_on_path(piece: Piece) -> Tuple[int, int]:
"""Draws on path: if two or more pieces on same cell, instead of number,
draws a placeholder, which does not need to show piece number
Logic split this in 4 different cases, determined by player offset.
Parameter piece does't influence logic.
Player Progress to Position conversion:
P0 1..56: (pos)
P1 1..42: (p_num * shift + pos)
43..56: (p_num * shift + pos) % end_progress
P2 1..28: (p_num * shift + pos)
29..56: (p_num * shift + pos) % end_progress
P3 1..14: (p_num * shift + pos)
15..56: (p_num * shift + pos) % end_progress
Test player 1:
>>> __coord_on_path(Piece(0, 1, 1))
(8, 2)
Test player 2:
>>> __coord_on_path(Piece(1, 1, 1))
(2, 10)
Test player 3:
>>> __coord_on_path(Piece(2, 1, 1))
(10, 16)
Test player 4:
>>> __coord_on_path(Piece(3, 1, 1))
(16, 8)
Test path wrap:
>>> __coord_on_path(Piece(3, 1, 56))
(16, 9)
Test overlap:
>> __coord_on_path(Piece(2, 1, 17))
(10, 14)
"""
assert 1 <= piece.progress() <= LAST_ON_PATH and 0 <= piece.player() <= 3
POSITION_TO_ROWCOL: Tuple[Tuple[int, int], ...] = (
(0, 0),
(8, 2),
(8, 3),
(8, 4),
(8, 5),
(7, 5),
(6, 5),
(5, 5),
(5, 6),
(5, 7),
(5, 8),
(4, 8),
(3, 8),
(2, 8),
(2, 9),
(2, 10),
(3, 10),
(4, 10),
(5, 10),
(5, 11),
(5, 12),
(5, 13),
(6, 13),
(7, 13),
(8, 13),
(8, 14),
(8, 15),
(8, 16),
(9, 16),
(10, 16),
(10, 15),
(10, 14),
(10, 13),
(11, 13),
(12, 13),
(13, 13),
(13, 12),
(13, 11),
(13, 10),
(14, 10),
(15, 10),
(16, 10),
(16, 9),
(16, 8),
(15, 8),
(14, 8),
(13, 8),
(13, 7),
(13, 6),
(13, 5),
(12, 5),
(11, 5),
(10, 5),
(10, 4),
(10, 3),
(10, 2),
(9, 2),
)
return POSITION_TO_ROWCOL[piece.position()]
def __coord_on_finish(piece: Piece) -> Tuple[int, int]:
"""Piece number is irrelevant
>>> __coord_on_finish(Piece(0, 1, 57))
(9, 3)
>>> __coord_on_finish(Piece(0, 1, 61))
(9, 7)
>>> __coord_on_finish(Piece(1, 1, 57))
(3, 9)
>>> __coord_on_finish(Piece(2, 1, 58))
(9, 14)
>>> __coord_on_finish(Piece(3, 1, 59))
(13, 9)
>>> __coord_on_finish(Piece(3, 1, 61))
(11, 9)
"""
pos = piece.progress() - LAST_ON_PATH
assert 0 < pos < 6
player = piece.player()
(x, y) = (0, 0)
if player in [0, 2]:
x = 9
y = pos + 2 if player == 0 else 15 - (pos - 1)
elif player in [1, 3]:
x = pos + 2 if player == 1 else 15 - (pos - 1)
y = 9
else:
raise NotImplementedError()
return (x, y)
def __coord_in_target(piece: Piece) -> Tuple[int, int]:
"""Draw in target positions: each piece has its location.
Progress is always same, thus irrelevant
>>> __coord_in_target(Piece(0, 0, 62))
(7, 6)
>>> __coord_in_target(Piece(1, 1, 62))
(6, 11)
>>> __coord_in_target(Piece(2, 2, 62))
(11, 11)
>>> __coord_in_target(Piece(3, 3, 62))
(12, 8)
"""
assert piece.progress() == 62
zones = [(7, 6), (6, 10), (10, 11), (11, 7)]
shift = [(0, 0), (0, 1), (1, 0), (1, 1)]
return (
zones[piece.player()][0] + shift[piece.index()][0],
zones[piece.player()][1] + shift[piece.index()][1],
)
def put_piece_on_board(piece: Piece) -> Tuple[int, int]:
"""Currently player is in [1..4], piece is in [0..3]. Do we need to change this?
TODO: Refactor to implement startegy pattern
"""
coords = (0, 0)
progress = piece.progress()
if progress == 0:
coords = __coord_in_home(piece)
elif 0 < progress <= LAST_ON_PATH:
coords = __coord_on_path(piece)
elif LAST_ON_PATH < progress < END_PROGRESS:
coords = __coord_on_finish(piece)
elif progress == END_PROGRESS:
coords = __coord_in_target(piece)
else:
raise NotImplementedError()
return coords
def is_valid_move(
piece: Piece,
dice: int,
status: List[Piece],
player_shift: int = PLAYER_SHIFT,
last_on_path: int = LAST_ON_PATH,
end_progress: int = END_PROGRESS,
) -> bool:
"""
>>> p = Piece(1, 1); is_valid_move(p, 6, [p])
True
>>> p = Piece(1, 1); is_valid_move(p, 1, [p])
False
>>> p = Piece(1, 1, 1); is_valid_move(p, 1, [p])
True
>>> p = Piece(1, 1, 1); is_valid_move(p, 6, [p])
True
>> p = Piece(1, 1); is_valid_move(p, 6, [p, Piece(0, 0, 15)])
True
>>> p = Piece(1, 1); is_valid_move(p, 6, [p, Piece(0, 0, 15), Piece(0, 1, 15)])
False
>>> piece = Piece(0, 0, 58); is_valid_move(piece, 6, [piece])
False
>>> piece = Piece(1, 0, 0); is_valid_move(piece, 5, [piece])
False
>>> piece = Piece(2, 0, 28); is_valid_move(piece, 1, [piece, Piece(0, 0, 1), Piece(0, 1, 1)])
False
>>> p = Piece(0,0,0); is_valid_move(p, 6, [p, Piece(0,1,0), Piece(1,0,29), Piece(1,1,29)],28,56)
False
>>> p = Piece(0,1,0); is_valid_move(p, 6, [Piece(0,0,0), p, Piece(1,0,29), Piece(1,1,29)],28,56)
False
"""
if dice < 1 or dice > 6:
raise ValueError("Invalid dice: {}".format(dice))
# can exit from home?
pos = piece.progress()
if pos == 0:
if dice != 6:
return False
# Do other players block exit from home
expected = progress_to_position(piece.player(), 1, player_shift, last_on_path)
return 2 > len(
others_on_position(
status, piece.player(), expected, player_shift, last_on_path
)
)
if 0 < pos <= last_on_path:
if pos + dice > last_on_path:
return True
expected = progress_to_position(
piece.player(), pos + dice, player_shift, last_on_path
)
return 2 > len(
others_on_position(
status, piece.player(), expected, player_shift, last_on_path
)
)
if last_on_path < pos < end_progress:
return pos + dice <= end_progress
assert pos == end_progress
return False
def get_valid_moves(player: Player, dice: int, status: List[Piece]) -> List[Piece]:
"""
>>> p = Player.create(); p2 = Player.create(); p = Player.get(0)
>>> get_valid_moves(p, 6, [Piece(0, 0), Piece(0, 1), Piece(1, 0), Piece(1, 1)])
[0, 1]
>>> get_valid_moves(p, 1, [Piece(0, 0), Piece(0, 1), Piece(0, 2), Piece(1, 0)])
[]
>>> get_valid_moves(p, 1, [Piece(0, 0, 1), Piece(0, 1), Piece(0, 2), Piece(1, 0)])
[0]
>>> get_valid_moves(p, 1, [Piece(0, 0, 1), Piece(0, 1, 57), Piece(0, 2), Piece(1, 0)])
[0, 1]
>>> get_valid_moves(p, 6, [Piece(0, 0, 1), Piece(0, 1, 60), Piece(0, 2), Piece(0, 3, 0)])
[0, 2, 3]
"""
own = [p for p in status if p.player() == player.number]
return [p for p in own if is_valid_move(p, dice, status)]
def __pieces_on_path_position(
pieces: List[Piece],
path_pos: int,
player_shift: int = PLAYER_SHIFT,
last_on_path: int = LAST_ON_PATH,
) -> List[Piece]:
"""
>>> __pieces_on_path_position([Piece(1, 0, 1)], 15)
[0]
>>> __pieces_on_path_position([Piece(2, 0, 1)], 29)
[0]
>>> __pieces_on_path_position([Piece(0, 0, 15), Piece(0, 1, 15)], 15)
[0, 1]
"""
return [p for p in pieces if path_pos == p.position(player_shift, last_on_path)]
def | (pieces: List[Piece], player_num: int) -> List[Piece]:
return [p for p in pieces if p.player() != player_num]
def others_on_position(
pieces: List[Piece],
player: int,
pos: int,
player_shift: int = PLAYER_SHIFT,
last_on_path: int = LAST_ON_PATH,
) -> List[Piece]:
"""Do other players block the position by having more than one piece on it.
Position argument is board position, not piece progress.
>>> others_on_position([Piece(1,0,29)], 0, 1, 28, 56)
[0]
>>> others_on_position([Piece(1,0,29), Piece(1,1,29)], 0, 1, 28, 56)
[0, 1]
"""
assert 0 < pos <= last_on_path
at_dest = __pieces_on_path_position(pieces, pos, player_shift, last_on_path)
others = __other_player_pieces(at_dest, player)
return others
| __other_player_pieces |
osr_ct.py | #!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test coordinate transformations.
# Author: Frank Warmerdam <[email protected]>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <[email protected]>
# Copyright (c) 2009-2013, Even Rouault <even dot rouault at spatialys.com>
# Copyright (c) 2014, Google
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import math
import sys
from osgeo import gdal
from osgeo import osr
from osgeo import ogr
import gdaltest
import pytest
###############################################################################
# Verify that we have PROJ.4 available.
def test_osr_ct_1():
utm_srs = osr.SpatialReference()
utm_srs.SetUTM(11)
utm_srs.SetWellKnownGeogCS('WGS84')
ll_srs = osr.SpatialReference()
ll_srs.SetWellKnownGeogCS('WGS84')
try:
gdal.PushErrorHandler('CPLQuietErrorHandler')
ct = osr.CoordinateTransformation(ll_srs, utm_srs)
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find('Unable to load PROJ.4') != -1:
pytest.skip('PROJ.4 missing, transforms not available.')
except ValueError:
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find('Unable to load PROJ.4') != -1:
pytest.skip('PROJ.4 missing, transforms not available.')
pytest.fail(gdal.GetLastErrorMsg())
assert not (ct is None or ct.this is None), \
'Unable to create simple CoordinateTransformat.'
###############################################################################
# Actually perform a simple LL to UTM conversion.
def test_osr_ct_2():
utm_srs = osr.SpatialReference()
utm_srs.SetUTM(11)
utm_srs.SetWellKnownGeogCS('WGS84')
ll_srs = osr.SpatialReference()
ll_srs.SetWellKnownGeogCS('WGS84')
ct = osr.CoordinateTransformation(ll_srs, utm_srs)
result = ct.TransformPoint(32.0, -117.5, 0.0)
assert result[0] == pytest.approx(452772.06, abs=0.01) and result[1] == pytest.approx(3540544.89, abs=0.01) and result[2] == pytest.approx(0.0, abs=0.01), \
'Wrong LL to UTM result'
###############################################################################
# Transform an OGR geometry ... this is mostly aimed at ensuring that
# the OGRCoordinateTransformation target SRS isn't deleted till the output
# geometry which also uses it is deleted.
def test_osr_ct_3():
utm_srs = osr.SpatialReference()
utm_srs.SetUTM(11)
utm_srs.SetWellKnownGeogCS('WGS84')
ll_srs = osr.SpatialReference()
ll_srs.SetWellKnownGeogCS('WGS84')
ll_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
ct = osr.CoordinateTransformation(ll_srs, utm_srs)
pnt = ogr.CreateGeometryFromWkt('POINT(-117.5 32.0)', ll_srs)
result = pnt.Transform(ct)
assert result == 0
ll_srs = None
ct = None
utm_srs = None
out_srs = pnt.GetSpatialReference().ExportToPrettyWkt()
assert out_srs[0:6] == 'PROJCS', 'output srs corrupt, ref counting issue?'
pnt = None
###############################################################################
# Actually perform a simple LL to UTM conversion.
# Works for both OG and NG bindings
def test_osr_ct_4():
utm_srs = osr.SpatialReference()
utm_srs.SetUTM(11)
utm_srs.SetWellKnownGeogCS('WGS84')
ll_srs = osr.SpatialReference()
ll_srs.SetWellKnownGeogCS('WGS84')
ll_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
ct = osr.CoordinateTransformation(ll_srs, utm_srs)
result = ct.TransformPoints([(-117.5, 32.0, 0.0), (-117.5, 32.0)])
assert len(result) == 2
assert len(result[0]) == 3
for i in range(2):
assert result[i][0] == pytest.approx(452772.06, abs=0.01) and result[i][1] == pytest.approx(3540544.89, abs=0.01) and result[i][2] == pytest.approx(0.0, abs=0.01), \
'Wrong LL to UTM result'
###############################################################################
# Same test, but with any sequence of tuples instead of a tuple of tuple
# New in NG bindings (#3020)
def test_osr_ct_5():
utm_srs = osr.SpatialReference()
utm_srs.SetUTM(11)
utm_srs.SetWellKnownGeogCS('WGS84')
ll_srs = osr.SpatialReference()
ll_srs.SetWellKnownGeogCS('WGS84')
ll_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
ct = osr.CoordinateTransformation(ll_srs, utm_srs)
result = ct.TransformPoints(((-117.5, 32.0, 0.0), (-117.5, 32.0)))
for i in range(2):
assert result[i][0] == pytest.approx(452772.06, abs=0.01) and result[i][1] == pytest.approx(3540544.89, abs=0.01) and result[i][2] == pytest.approx(0.0, abs=0.01), \
'Wrong LL to UTM result'
###############################################################################
# Test osr.CreateCoordinateTransformation() method
def test_osr_ct_6():
with gdaltest.error_handler():
ct = osr.CreateCoordinateTransformation(None, None)
assert ct is None
utm_srs = osr.SpatialReference()
utm_srs.SetUTM(11)
utm_srs.SetWellKnownGeogCS('WGS84')
ll_srs = osr.SpatialReference()
ll_srs.SetWellKnownGeogCS('WGS84')
ll_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
ct = osr.CreateCoordinateTransformation(ll_srs, utm_srs)
assert ct is not None
result = ct.TransformPoints(((-117.5, 32.0, 0.0), (-117.5, 32.0)))
for i in range(2):
assert result[i][0] == pytest.approx(452772.06, abs=0.01) and result[i][1] == pytest.approx(3540544.89, abs=0.01) and result[i][2] == pytest.approx(0.0, abs=0.01), \
'Wrong LL to UTM result'
###############################################################################
# Actually perform a simple Pseudo Mercator to LL conversion.
def test_osr_ct_7():
pm_srs = osr.SpatialReference()
pm_srs.ImportFromEPSG(3857)
ll_srs = osr.SpatialReference()
ll_srs.SetWellKnownGeogCS('WGS84')
ll_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
ct = osr.CoordinateTransformation(pm_srs, ll_srs)
(x, y, z) = ct.TransformPoint(7000000, 7000000, 0)
(exp_x, exp_y, exp_z) = (62.8820698884, 53.0918187696, 0.0)
if (exp_x != pytest.approx(x, abs=0.00001) or
exp_y != pytest.approx(y, abs=0.00001) or
exp_z != pytest.approx(z, abs=0.00001)):
print('Got: (%f, %f, %f)' % (x, y, z))
print('Expected: (%f, %f, %f)' % (exp_x, exp_y, exp_z))
pytest.fail('Wrong LL for Pseudo Mercator result')
pnt = ogr.CreateGeometryFromWkt('POINT(%g %g)' % (7000000, 7000000),
pm_srs)
expected_pnt = ogr.CreateGeometryFromWkt('POINT(%.10f %.10f)' % (exp_x, exp_y),
ll_srs)
result = pnt.Transform(ct)
assert result == 0
if (expected_pnt.GetX() != pytest.approx(pnt.GetX(), abs=0.00001) or
expected_pnt.GetY() != pytest.approx(pnt.GetY(), abs=0.00001) or
expected_pnt.GetZ() != pytest.approx(pnt.GetZ(), abs=0.00001)):
print('Got: %s' % pnt.ExportToWkt())
print('Expected: %s' % expected_pnt.ExportToWkt())
pytest.fail('Failed to transform from Pseudo Mercator to LL')
###############################################################################
# Test WebMercator -> WGS84 optimized transform
def test_osr_ct_8():
src_srs = osr.SpatialReference()
src_srs.ImportFromEPSG(3857)
dst_srs = osr.SpatialReference()
dst_srs.SetWellKnownGeogCS('WGS84')
dst_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
ct = osr.CoordinateTransformation(src_srs, dst_srs)
pnts = [(0, 6274861.39400658), (1, 6274861.39400658)]
result = ct.TransformPoints(pnts)
expected_result = [(0.0, 49.000000000000007, 0.0), (8.9831528411952125e-06, 49.000000000000007, 0.0)]
for i in range(2):
for j in range(3):
if result[i][j] != pytest.approx(expected_result[i][j], abs=1e-10):
print('Got: %s' % str(result))
print('Expected: %s' % str(expected_result))
pytest.fail('Failed to transform from Pseudo Mercator to LL')
pnts = [(0, 6274861.39400658), (1 + 0, 1 + 6274861.39400658)]
result = ct.TransformPoints(pnts)
expected_result = [(0.0, 49.000000000000007, 0.0), (8.9831528411952125e-06, 49.000005893478189, 0.0)]
for i in range(2):
for j in range(3):
if result[i][j] != pytest.approx(expected_result[i][j], abs=1e-10):
print('Got: %s' % str(result))
print('Expected: %s' % str(expected_result))
pytest.fail('Failed to transform from Pseudo Mercator to LL')
###############################################################################
# Test coordinate transformation where only one CRS has a towgs84 clause (#1156)
def test_osr_ct_towgs84_only_one_side():
srs_towgs84 = osr.SpatialReference()
srs_towgs84.SetFromUserInput("+proj=longlat +ellps=GRS80 +towgs84=100,200,300")
srs_just_ellps = osr.SpatialReference()
srs_just_ellps.SetFromUserInput('+proj=longlat +ellps=GRS80')
ct = osr.CoordinateTransformation(srs_towgs84, srs_just_ellps)
(x, y, z) = ct.TransformPoint(0, 0, 0)
assert x == 0
assert y == 0
assert z == 0
ct = osr.CoordinateTransformation(srs_just_ellps, srs_towgs84)
(x, y, z) = ct.TransformPoint(0, 0, 0)
assert x == 0 | assert y == 0
assert z == 0
###############################################################################
# Test coordinate transformation where both side have towgs84/datum clause (#1156)
def test_osr_ct_towgs84_both_side():
srs_towgs84 = osr.SpatialReference()
srs_towgs84.SetFromUserInput("+proj=longlat +ellps=GRS80 +towgs84=100,200,300")
srs_other_towgs84 = osr.SpatialReference()
srs_other_towgs84.SetFromUserInput("+proj=longlat +ellps=GRS80 +towgs84=0,0,0")
ct = osr.CoordinateTransformation(srs_towgs84, srs_other_towgs84)
(x, y, z) = ct.TransformPoint(0, 0, 20)
assert x != 0
assert y != 0
assert z == 20
srs_datum_wgs84 = osr.SpatialReference()
srs_datum_wgs84.SetFromUserInput("+proj=longlat +datum=WGS84")
ct = osr.CoordinateTransformation(srs_towgs84, srs_datum_wgs84)
(x, y, z) = ct.TransformPoint(0, 0, 20)
assert x != 0
assert y != 0
assert z == 20
ct = osr.CoordinateTransformation(srs_datum_wgs84, srs_towgs84)
(x, y, z) = ct.TransformPoint(0, 0, 20)
assert x != 0
assert y != 0
assert z == 20
###############################################################################
# Test coordinate transformation with custom operation
def test_osr_ct_options_operation():
options = osr.CoordinateTransformationOptions()
assert options.SetOperation('+proj=affine +s11=-1')
ct = osr.CoordinateTransformation(None, None, options)
assert ct
x, y, z = ct.TransformPoint(1, 2, 3)
assert x == -1
assert y == 2
assert z == 3
###############################################################################
# Test coordinate transformation with area of interest
def test_osr_ct_options_area_of_interest():
srs_nad27 = osr.SpatialReference()
srs_nad27.SetFromUserInput("NAD27")
srs_wgs84 = osr.SpatialReference()
srs_wgs84.SetFromUserInput("WGS84")
options = osr.CoordinateTransformationOptions()
assert not options.SetAreaOfInterest(-200,40,-99,41)
assert not options.SetAreaOfInterest(-100,-100,-99,41)
assert not options.SetAreaOfInterest(-100,40,200,41)
assert not options.SetAreaOfInterest(-100,40,-99,100)
assert options.SetAreaOfInterest(-100,40,-99,41)
ct = osr.CoordinateTransformation(srs_nad27, srs_wgs84, options)
assert ct
x, y, z = ct.TransformPoint(40.5,-99.5,0)
assert x != 40.5
assert x == pytest.approx(40.5, abs=1e-3)
x, y, z = ct.TransformPoint(0,0,0)
if sys.platform == 'darwin':
print("ct.TransformPoint(0,0,0) doesn't return expected result on MacOSX. Not sure why.")
else:
assert x == float('inf')
###############################################################################
# Test 4D transformations
def test_osr_ct_4D():
options = osr.CoordinateTransformationOptions()
assert options.SetOperation('+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=cart +step +proj=helmert +convention=position_vector +x=0.0127 +dx=-0.0029 +rx=-0.00039 +drx=-0.00011 +y=0.0065 +dy=-0.0002 +ry=0.00080 +dry=-0.00019 +z=-0.0209 +dz=-0.0006 +rz=-0.00114 +drz=0.00007 +s=0.00195 +ds=0.00001 +t_epoch=1988.0 +step +proj=cart +inv +step +proj=unitconvert +xy_in=rad +xy_out=deg')
ct = osr.CoordinateTransformation(None, None, options)
assert ct
x, y, z, t = ct.TransformPoint(2, 49, 0, 2000)
assert x == pytest.approx(2.0000005420366, abs=1e-10), x
assert y == pytest.approx(49.0000003766711, abs=1e-10), y
assert z == pytest.approx(-0.0222802283242345, abs=1e-8), z
assert t == pytest.approx(2000, abs=1e-10), t
ret = ct.TransformPoints([[2, 49, 0, 2000], [2, 49, 0, 1988]])
assert len(ret) == 2, ret
assert len(ret[0]) == 4, ret
x, y, z, t = ret[0]
assert x == pytest.approx(2.0000005420366, abs=1e-10), x
assert y == pytest.approx(49.0000003766711, abs=1e-10), y
assert z == pytest.approx(-0.0222802283242345, abs=1e-8), z
assert t == pytest.approx(2000, abs=1e-10), t
assert len(ret[1]) == 4, ret
x, y, z, t = ret[1]
assert x == pytest.approx(1.9999998809056305, abs=1e-10), x
assert y == pytest.approx(48.9999995630005, abs=1e-10), y
assert z == pytest.approx(0.005032399669289589, abs=1e-8), z
assert t == pytest.approx(1988, abs=1e-10), t
###############################################################################
# Test geocentric transformations
def test_osr_ct_geocentric():
s = osr.SpatialReference()
s.SetFromUserInput("IGNF:RGR92")
t = osr.SpatialReference()
t.SetFromUserInput("IGNF:REUN47")
ct = osr.CoordinateTransformation(s, t)
assert ct
x, y, z = ct.TransformPoint(3356123.5400, 1303218.3090, 5247430.6050)
assert x == pytest.approx(3353420.949, abs=1e-1)
assert y == pytest.approx(1304075.021, abs=1e-1)
assert z == pytest.approx(5248935.144, abs=1e-1)
###############################################################################
# Test with +lon_wrap=180
def test_osr_ct_lon_wrap():
if osr.GetPROJVersionMajor() * 10000 + osr.GetPROJVersionMinor() * 100 + osr.GetPROJVersionMicro() < 70001:
# Issue before PROJ 7.0.1
pytest.skip()
s = osr.SpatialReference()
s.SetFromUserInput("+proj=longlat +ellps=GRS80")
t = osr.SpatialReference()
t.SetFromUserInput("+proj=longlat +ellps=GRS80 +lon_wrap=180")
ct = osr.CoordinateTransformation(s, t)
assert ct
x, y, _ = ct.TransformPoint(-25, 60, 0)
assert x == pytest.approx(-25 + 360, abs=1e-12)
assert y == pytest.approx(60, abs=1e-12)
###############################################################################
# Test ct.TransformPointWithErrorCode
def test_osr_ct_transformpointwitherrorcode():
if osr.GetPROJVersionMajor() < 8:
# Issue before PROJ 8
pytest.skip()
s = osr.SpatialReference()
s.SetFromUserInput("+proj=longlat +ellps=GRS80")
t = osr.SpatialReference()
t.SetFromUserInput("+proj=tmerc +ellps=GRS80")
ct = osr.CoordinateTransformation(s, t)
assert ct
x, y, z, t, error_code = ct.TransformPointWithErrorCode(1, 2, 3, 4)
assert x == pytest.approx(111257.80439304397, rel=1e-10)
assert y == pytest.approx(221183.3401672801, rel=1e-10)
assert z == 3
assert t == 4
assert error_code == 0
x, y, z, t, error_code = ct.TransformPointWithErrorCode(90, 0, 0, 0)
assert math.isinf(x)
assert error_code == osr.PROJ_ERR_COORD_TRANSFM_OUTSIDE_PROJECTION_DOMAIN
###############################################################################
# Test CoordinateTransformationOptions.SetDesiredAccuracy
def test_osr_ct_options_accuracy():
s = osr.SpatialReference()
s.SetFromUserInput("EPSG:4326")
t = osr.SpatialReference()
t.SetFromUserInput("EPSG:4258") # ETRS89
options = osr.CoordinateTransformationOptions()
options.SetDesiredAccuracy(0.05)
with gdaltest.error_handler():
ct = osr.CoordinateTransformation(s, t, options)
try:
ct.TransformPoint(49, 2, 0)
assert False
except:
pass
###############################################################################
# Test CoordinateTransformationOptions.SetBallparkAllowed
def test_osr_ct_options_ballpark_disallowed():
s = osr.SpatialReference()
s.SetFromUserInput("EPSG:4267") # NAD27
t = osr.SpatialReference()
t.SetFromUserInput("EPSG:4258") # ETRS89
options = osr.CoordinateTransformationOptions()
options.SetBallparkAllowed(False)
with gdaltest.error_handler():
ct = osr.CoordinateTransformation(s, t, options)
try:
ct.TransformPoint(49, 2, 0)
assert False
except:
pass
###############################################################################
# Test that we pass a neutral time when not explicitly specified
def test_osr_ct_non_specified_time_with_time_dependent_transformation():
options = osr.CoordinateTransformationOptions()
options.SetOperation('+proj=pipeline +step +proj=axisswap +order=2,1 +step +proj=unitconvert +xy_in=deg +z_in=m +xy_out=rad +z_out=m +step +proj=cart +ellps=GRS80 +step +inv +proj=helmert +dx=0.0008 +dy=-0.0006 +dz=-0.0014 +drx=6.67e-05 +dry=-0.0007574 +drz=-5.13e-05 +ds=-7e-05 +t_epoch=2010 +convention=coordinate_frame +step +inv +proj=cart +ellps=GRS80 +step +proj=unitconvert +xy_in=rad +z_in=m +xy_out=deg +z_out=m +step +proj=axisswap +order=2,1')
ct = osr.CoordinateTransformation(None, None, options)
assert ct
x, y, _ = ct.TransformPoint(50, -40, 0)
assert x == pytest.approx(50, abs=1e-10)
assert y == pytest.approx(-40, abs=1e-10)
###############################################################################
# Test using OGRSpatialReference::CoordinateEpoch()
def test_osr_ct_take_into_account_srs_coordinate_epoch():
if osr.GetPROJVersionMajor() * 100 + osr.GetPROJVersionMinor() < 702:
pytest.skip('requires PROJ 7.2 or later')
s = osr.SpatialReference()
s.SetFromUserInput("EPSG:7844") # GDA2020
t_2020 = osr.SpatialReference()
t_2020.SetFromUserInput("EPSG:9000") # ITRF2014
t_2020.SetCoordinateEpoch(2020)
# 2020 is the central epoch of the transformation, so no coordinate
# change is expected
ct = osr.CoordinateTransformation(s, t_2020)
x, y, _ = ct.TransformPoint(-30, 150, 0)
assert x == pytest.approx(-30, abs=1e-10)
assert y == pytest.approx(150, abs=1e-10)
t_2030 = osr.SpatialReference()
t_2030.SetFromUserInput("EPSG:9000") # ITRF2014
t_2030.SetCoordinateEpoch(2030)
ct = osr.CoordinateTransformation(s, t_2030)
x, y, _ = ct.TransformPoint(-30, 150, 0)
assert x == pytest.approx(-29.9999950478, abs=1e-10)
assert y == pytest.approx(150.0000022212, abs=1e-10)
ct = osr.CoordinateTransformation(t_2030, s)
x, y, _ = ct.TransformPoint(-29.9999950478, 150.0000022212, 0)
assert x == pytest.approx(-30, abs=1e-10)
assert y == pytest.approx(150, abs=1e-10)
# Not properly supported currently
gdal.ErrorReset()
with gdaltest.error_handler():
ct = osr.CoordinateTransformation(t_2020, t_2030)
assert gdal.GetLastErrorMsg() != '' | |
conf.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.27.1
// protoc v3.17.3
// source: app/app/service/internal/conf/conf.proto
package conf
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
durationpb "google.golang.org/protobuf/types/known/durationpb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Bootstrap struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Server *Server `protobuf:"bytes,1,opt,name=server,proto3" json:"server,omitempty"`
Data *Data `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
}
func (x *Bootstrap) Reset() {
*x = Bootstrap{}
if protoimpl.UnsafeEnabled {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Bootstrap) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Bootstrap) ProtoMessage() {}
func (x *Bootstrap) ProtoReflect() protoreflect.Message {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Bootstrap.ProtoReflect.Descriptor instead.
func (*Bootstrap) Descriptor() ([]byte, []int) {
return file_app_app_service_internal_conf_conf_proto_rawDescGZIP(), []int{0}
}
func (x *Bootstrap) GetServer() *Server {
if x != nil {
return x.Server
}
return nil
}
func (x *Bootstrap) GetData() *Data {
if x != nil {
return x.Data
}
return nil
}
type Server struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Http *Server_HTTP `protobuf:"bytes,1,opt,name=http,proto3" json:"http,omitempty"`
Grpc *Server_GRPC `protobuf:"bytes,2,opt,name=grpc,proto3" json:"grpc,omitempty"`
}
func (x *Server) Reset() {
*x = Server{}
if protoimpl.UnsafeEnabled {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Server) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Server) ProtoMessage() {}
func (x *Server) ProtoReflect() protoreflect.Message {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Server.ProtoReflect.Descriptor instead.
func (*Server) Descriptor() ([]byte, []int) {
return file_app_app_service_internal_conf_conf_proto_rawDescGZIP(), []int{1}
}
func (x *Server) GetHttp() *Server_HTTP {
if x != nil {
return x.Http
}
return nil
}
func (x *Server) GetGrpc() *Server_GRPC {
if x != nil {
return x.Grpc
}
return nil
}
type Data struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Database *Data_Database `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
Redis *Data_Redis `protobuf:"bytes,2,opt,name=redis,proto3" json:"redis,omitempty"`
}
func (x *Data) Reset() {
*x = Data{}
if protoimpl.UnsafeEnabled {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Data) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Data) ProtoMessage() {}
func (x *Data) ProtoReflect() protoreflect.Message {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Data.ProtoReflect.Descriptor instead.
func (*Data) Descriptor() ([]byte, []int) {
return file_app_app_service_internal_conf_conf_proto_rawDescGZIP(), []int{2}
}
func (x *Data) GetDatabase() *Data_Database {
if x != nil {
return x.Database
}
return nil
}
func (x *Data) GetRedis() *Data_Redis {
if x != nil {
return x.Redis
}
return nil
}
type Registry struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Consul *Registry_Consul `protobuf:"bytes,1,opt,name=consul,proto3" json:"consul,omitempty"`
}
func (x *Registry) Reset() {
*x = Registry{}
if protoimpl.UnsafeEnabled {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Registry) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Registry) ProtoMessage() {}
func (x *Registry) ProtoReflect() protoreflect.Message {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Registry.ProtoReflect.Descriptor instead.
func (*Registry) Descriptor() ([]byte, []int) {
return file_app_app_service_internal_conf_conf_proto_rawDescGZIP(), []int{3}
}
func (x *Registry) GetConsul() *Registry_Consul {
if x != nil {
return x.Consul
}
return nil
}
type Server_HTTP struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Network string `protobuf:"bytes,1,opt,name=network,proto3" json:"network,omitempty"`
Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"`
Timeout *durationpb.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"`
}
func (x *Server_HTTP) Reset() {
*x = Server_HTTP{}
if protoimpl.UnsafeEnabled {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Server_HTTP) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Server_HTTP) ProtoMessage() {}
func (x *Server_HTTP) ProtoReflect() protoreflect.Message {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Server_HTTP.ProtoReflect.Descriptor instead.
func (*Server_HTTP) Descriptor() ([]byte, []int) {
return file_app_app_service_internal_conf_conf_proto_rawDescGZIP(), []int{1, 0}
}
func (x *Server_HTTP) GetNetwork() string {
if x != nil {
return x.Network
}
return ""
}
func (x *Server_HTTP) GetAddr() string {
if x != nil {
return x.Addr
}
return ""
}
func (x *Server_HTTP) GetTimeout() *durationpb.Duration {
if x != nil {
return x.Timeout
}
return nil
}
type Server_GRPC struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Network string `protobuf:"bytes,1,opt,name=network,proto3" json:"network,omitempty"`
Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"`
Timeout *durationpb.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"`
}
func (x *Server_GRPC) Reset() {
*x = Server_GRPC{}
if protoimpl.UnsafeEnabled {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Server_GRPC) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Server_GRPC) ProtoMessage() {}
func (x *Server_GRPC) ProtoReflect() protoreflect.Message {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Server_GRPC.ProtoReflect.Descriptor instead.
func (*Server_GRPC) Descriptor() ([]byte, []int) {
return file_app_app_service_internal_conf_conf_proto_rawDescGZIP(), []int{1, 1}
}
func (x *Server_GRPC) GetNetwork() string {
if x != nil {
return x.Network
}
return ""
}
func (x *Server_GRPC) GetAddr() string {
if x != nil {
return x.Addr
}
return ""
}
func (x *Server_GRPC) GetTimeout() *durationpb.Duration {
if x != nil {
return x.Timeout
}
return nil
}
type Data_Database struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Driver string `protobuf:"bytes,1,opt,name=driver,proto3" json:"driver,omitempty"`
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
}
func (x *Data_Database) Reset() {
*x = Data_Database{}
if protoimpl.UnsafeEnabled {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Data_Database) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Data_Database) ProtoMessage() {}
func (x *Data_Database) ProtoReflect() protoreflect.Message {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Data_Database.ProtoReflect.Descriptor instead.
func (*Data_Database) Descriptor() ([]byte, []int) {
return file_app_app_service_internal_conf_conf_proto_rawDescGZIP(), []int{2, 0}
}
func (x *Data_Database) GetDriver() string {
if x != nil {
return x.Driver
}
return ""
}
func (x *Data_Database) GetSource() string {
if x != nil {
return x.Source
}
return ""
}
type Data_Redis struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Network string `protobuf:"bytes,1,opt,name=network,proto3" json:"network,omitempty"`
Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"`
ReadTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=read_timeout,json=readTimeout,proto3" json:"read_timeout,omitempty"`
WriteTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=write_timeout,json=writeTimeout,proto3" json:"write_timeout,omitempty"`
}
func (x *Data_Redis) Reset() {
*x = Data_Redis{}
if protoimpl.UnsafeEnabled {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Data_Redis) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Data_Redis) ProtoMessage() {}
func (x *Data_Redis) ProtoReflect() protoreflect.Message {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Data_Redis.ProtoReflect.Descriptor instead.
func (*Data_Redis) Descriptor() ([]byte, []int) {
return file_app_app_service_internal_conf_conf_proto_rawDescGZIP(), []int{2, 1}
}
func (x *Data_Redis) GetNetwork() string {
if x != nil {
return x.Network
}
return ""
}
func (x *Data_Redis) GetAddr() string {
if x != nil {
return x.Addr
}
return ""
}
func (x *Data_Redis) GetReadTimeout() *durationpb.Duration {
if x != nil {
return x.ReadTimeout
}
return nil
}
func (x *Data_Redis) GetWriteTimeout() *durationpb.Duration {
if x != nil {
return x.WriteTimeout
}
return nil
}
type Registry_Consul struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
Scheme string `protobuf:"bytes,2,opt,name=scheme,proto3" json:"scheme,omitempty"`
}
func (x *Registry_Consul) Reset() {
*x = Registry_Consul{}
if protoimpl.UnsafeEnabled {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Registry_Consul) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Registry_Consul) ProtoMessage() {}
func (x *Registry_Consul) ProtoReflect() protoreflect.Message {
mi := &file_app_app_service_internal_conf_conf_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Registry_Consul.ProtoReflect.Descriptor instead.
func (*Registry_Consul) Descriptor() ([]byte, []int) {
return file_app_app_service_internal_conf_conf_proto_rawDescGZIP(), []int{3, 0}
}
func (x *Registry_Consul) GetAddress() string {
if x != nil {
return x.Address
}
return ""
}
func (x *Registry_Consul) GetScheme() string {
if x != nil {
return x.Scheme
}
return ""
}
var File_app_app_service_internal_conf_conf_proto protoreflect.FileDescriptor
var file_app_app_service_internal_conf_conf_proto_rawDesc = []byte{
0x0a, 0x28, 0x61, 0x70, 0x70, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x2f,
0x63, 0x6f, 0x6e, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x61, 0x70, 0x70, 0x2e,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7b, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
0x61, 0x70, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x70, 0x70, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x2e, 0x53,
0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x33, 0x0a,
0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x61, 0x70,
0x70, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e,
0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61,
0x74, 0x61, 0x22, 0xd6, 0x02, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x3a, 0x0a,
0x04, 0x68, 0x74, 0x74, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x61, 0x70,
0x70, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e,
0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x48,
0x54, 0x54, 0x50, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x3a, 0x0a, 0x04, 0x67, 0x72, 0x70,
0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x70, 0x2e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63,
0x6f, 0x6e, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x52, 0x50, 0x43, 0x52,
0x04, 0x67, 0x72, 0x70, 0x63, 0x1a, 0x69, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x18, 0x0a,
0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x64, 0x64, 0x72, 0x18,
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x12, 0x33, 0x0a, 0x07, 0x74,
0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
0x1a, 0x69, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77,
0x6f, 0x72, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f,
0x72, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75,
0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xfb, 0x02, 0x0a, 0x04,
0x44, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x70, 0x2e, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f,
0x6e, 0x66, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x72, 0x65,
0x64, 0x69, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x70, 0x2e,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x73,
0x52, 0x05, 0x72, 0x65, 0x64, 0x69, 0x73, 0x1a, 0x3a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62,
0x61, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x1a, 0xb3, 0x01, 0x0a, 0x05, 0x52, 0x65, 0x64, 0x69, 0x73, 0x12, 0x18, 0x0a,
0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x64, 0x64, 0x72, 0x18,
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x12, 0x3c, 0x0a, 0x0c, 0x72,
0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x72, 0x65,
0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x77, 0x72, 0x69,
0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x77, 0x72, 0x69,
0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x8a, 0x01, 0x0a, 0x08, 0x52, 0x65,
0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x61, 0x70, 0x70, 0x2e, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f,
0x6e, 0x66, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x73,
0x75, 0x6c, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x1a, 0x3a, 0x0a, 0x06, 0x43, 0x6f,
0x6e, 0x73, 0x75, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16,
0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x42, 0x2c, 0x5a, 0x2a, 0x6d, 0x61, 0x6c, 0x6c, 0x2d, 0x67,
0x6f, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x3b,
0x63, 0x6f, 0x6e, 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_app_app_service_internal_conf_conf_proto_rawDescOnce sync.Once
file_app_app_service_internal_conf_conf_proto_rawDescData = file_app_app_service_internal_conf_conf_proto_rawDesc
)
func file_app_app_service_internal_conf_conf_proto_rawDescGZIP() []byte {
file_app_app_service_internal_conf_conf_proto_rawDescOnce.Do(func() {
file_app_app_service_internal_conf_conf_proto_rawDescData = protoimpl.X.CompressGZIP(file_app_app_service_internal_conf_conf_proto_rawDescData)
})
return file_app_app_service_internal_conf_conf_proto_rawDescData
}
var file_app_app_service_internal_conf_conf_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
var file_app_app_service_internal_conf_conf_proto_goTypes = []interface{}{
(*Bootstrap)(nil), // 0: app.service.internal.conf.Bootstrap
(*Server)(nil), // 1: app.service.internal.conf.Server
(*Data)(nil), // 2: app.service.internal.conf.Data
(*Registry)(nil), // 3: app.service.internal.conf.Registry
(*Server_HTTP)(nil), // 4: app.service.internal.conf.Server.HTTP
(*Server_GRPC)(nil), // 5: app.service.internal.conf.Server.GRPC
(*Data_Database)(nil), // 6: app.service.internal.conf.Data.Database
(*Data_Redis)(nil), // 7: app.service.internal.conf.Data.Redis
(*Registry_Consul)(nil), // 8: app.service.internal.conf.Registry.Consul
(*durationpb.Duration)(nil), // 9: google.protobuf.Duration
}
var file_app_app_service_internal_conf_conf_proto_depIdxs = []int32{
1, // 0: app.service.internal.conf.Bootstrap.server:type_name -> app.service.internal.conf.Server
2, // 1: app.service.internal.conf.Bootstrap.data:type_name -> app.service.internal.conf.Data
4, // 2: app.service.internal.conf.Server.http:type_name -> app.service.internal.conf.Server.HTTP
5, // 3: app.service.internal.conf.Server.grpc:type_name -> app.service.internal.conf.Server.GRPC
6, // 4: app.service.internal.conf.Data.database:type_name -> app.service.internal.conf.Data.Database
7, // 5: app.service.internal.conf.Data.redis:type_name -> app.service.internal.conf.Data.Redis
8, // 6: app.service.internal.conf.Registry.consul:type_name -> app.service.internal.conf.Registry.Consul
9, // 7: app.service.internal.conf.Server.HTTP.timeout:type_name -> google.protobuf.Duration
9, // 8: app.service.internal.conf.Server.GRPC.timeout:type_name -> google.protobuf.Duration
9, // 9: app.service.internal.conf.Data.Redis.read_timeout:type_name -> google.protobuf.Duration
9, // 10: app.service.internal.conf.Data.Redis.write_timeout:type_name -> google.protobuf.Duration
11, // [11:11] is the sub-list for method output_type
11, // [11:11] is the sub-list for method input_type
11, // [11:11] is the sub-list for extension type_name
11, // [11:11] is the sub-list for extension extendee
0, // [0:11] is the sub-list for field type_name
}
func init() |
func file_app_app_service_internal_conf_conf_proto_init() {
if File_app_app_service_internal_conf_conf_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_app_app_service_internal_conf_conf_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Bootstrap); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_app_app_service_internal_conf_conf_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Server); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_app_app_service_internal_conf_conf_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Data); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_app_app_service_internal_conf_conf_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Registry); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_app_app_service_internal_conf_conf_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Server_HTTP); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_app_app_service_internal_conf_conf_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Server_GRPC); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_app_app_service_internal_conf_conf_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Data_Database); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_app_app_service_internal_conf_conf_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Data_Redis); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_app_app_service_internal_conf_conf_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Registry_Consul); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_app_app_service_internal_conf_conf_proto_rawDesc,
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_app_app_service_internal_conf_conf_proto_goTypes,
DependencyIndexes: file_app_app_service_internal_conf_conf_proto_depIdxs,
MessageInfos: file_app_app_service_internal_conf_conf_proto_msgTypes,
}.Build()
File_app_app_service_internal_conf_conf_proto = out.File
file_app_app_service_internal_conf_conf_proto_rawDesc = nil
file_app_app_service_internal_conf_conf_proto_goTypes = nil
file_app_app_service_internal_conf_conf_proto_depIdxs = nil
}
| { file_app_app_service_internal_conf_conf_proto_init() } |
tl_secure_required_type_gen.go | // Code generated by gotdgen, DO NOT EDIT.
package tg
import (
"context"
"fmt"
"strings"
"github.com/gotd/td/bin"
)
// No-op definition for keeping imports.
var _ = bin.Buffer{}
var _ = context.Background()
var _ = fmt.Stringer(nil)
var _ = strings.Builder{}
// SecureRequiredType represents TL type `secureRequiredType#829d99da`.
// Required type
//
// See https://core.telegram.org/constructor/secureRequiredType for reference.
type SecureRequiredType struct {
// Flags, see TL conditional fields¹
//
// Links:
// 1) https://core.telegram.org/mtproto/TL-combinators#conditional-fields
Flags bin.Fields
// Native names
NativeNames bool
// Is a selfie required
SelfieRequired bool
// Is a translation required
TranslationRequired bool
// Secure value type
Type SecureValueTypeClass
}
// SecureRequiredTypeTypeID is TL type id of SecureRequiredType.
const SecureRequiredTypeTypeID = 0x829d99da
// String implements fmt.Stringer.
func (s *SecureRequiredType) String() string {
if s == nil {
return "SecureRequiredType(nil)"
}
var sb strings.Builder
sb.WriteString("SecureRequiredType")
sb.WriteString("{\n")
sb.WriteString("\tFlags: ")
sb.WriteString(s.Flags.String())
sb.WriteString(",\n")
sb.WriteString("\tType: ")
sb.WriteString(s.Type.String())
sb.WriteString(",\n")
sb.WriteString("}")
return sb.String()
}
// Encode implements bin.Encoder.
func (s *SecureRequiredType) Encode(b *bin.Buffer) error {
if s == nil {
return fmt.Errorf("can't encode secureRequiredType#829d99da as nil")
}
b.PutID(SecureRequiredTypeTypeID)
if err := s.Flags.Encode(b); err != nil {
return fmt.Errorf("unable to encode secureRequiredType#829d99da: field flags: %w", err)
}
if s.Type == nil {
return fmt.Errorf("unable to encode secureRequiredType#829d99da: field type is nil")
}
if err := s.Type.Encode(b); err != nil {
return fmt.Errorf("unable to encode secureRequiredType#829d99da: field type: %w", err)
}
return nil
}
// SetNativeNames sets value of NativeNames conditional field.
func (s *SecureRequiredType) SetNativeNames(value bool) {
if value {
s.Flags.Set(0)
s.NativeNames = true
} else {
s.Flags.Unset(0)
s.NativeNames = false
}
}
// SetSelfieRequired sets value of SelfieRequired conditional field.
func (s *SecureRequiredType) SetSelfieRequired(value bool) {
if value {
s.Flags.Set(1)
s.SelfieRequired = true
} else {
s.Flags.Unset(1)
s.SelfieRequired = false
}
}
// SetTranslationRequired sets value of TranslationRequired conditional field.
func (s *SecureRequiredType) SetTranslationRequired(value bool) {
if value {
s.Flags.Set(2)
s.TranslationRequired = true
} else {
s.Flags.Unset(2)
s.TranslationRequired = false
}
}
// Decode implements bin.Decoder.
func (s *SecureRequiredType) Decode(b *bin.Buffer) error {
if s == nil {
return fmt.Errorf("can't decode secureRequiredType#829d99da to nil")
}
if err := b.ConsumeID(SecureRequiredTypeTypeID); err != nil {
return fmt.Errorf("unable to decode secureRequiredType#829d99da: %w", err)
}
{
if err := s.Flags.Decode(b); err != nil {
return fmt.Errorf("unable to decode secureRequiredType#829d99da: field flags: %w", err)
}
}
s.NativeNames = s.Flags.Has(0)
s.SelfieRequired = s.Flags.Has(1)
s.TranslationRequired = s.Flags.Has(2)
{
value, err := DecodeSecureValueType(b)
if err != nil {
return fmt.Errorf("unable to decode secureRequiredType#829d99da: field type: %w", err)
}
s.Type = value
}
return nil
}
// construct implements constructor of SecureRequiredTypeClass.
func (s SecureRequiredType) construct() SecureRequiredTypeClass { return &s }
// Ensuring interfaces in compile-time for SecureRequiredType.
var (
_ bin.Encoder = &SecureRequiredType{}
_ bin.Decoder = &SecureRequiredType{}
_ SecureRequiredTypeClass = &SecureRequiredType{}
)
// SecureRequiredTypeOneOf represents TL type `secureRequiredTypeOneOf#27477b4`.
// One of
//
// See https://core.telegram.org/constructor/secureRequiredTypeOneOf for reference.
type SecureRequiredTypeOneOf struct {
// Secure required value types
Types []SecureRequiredTypeClass
}
// SecureRequiredTypeOneOfTypeID is TL type id of SecureRequiredTypeOneOf.
const SecureRequiredTypeOneOfTypeID = 0x27477b4
// String implements fmt.Stringer.
func (s *SecureRequiredTypeOneOf) String() string {
if s == nil {
return "SecureRequiredTypeOneOf(nil)"
}
var sb strings.Builder
sb.WriteString("SecureRequiredTypeOneOf")
sb.WriteString("{\n")
sb.WriteByte('[')
for _, v := range s.Types {
sb.WriteString(fmt.Sprint(v))
}
sb.WriteByte(']')
sb.WriteString("}")
return sb.String()
}
// Encode implements bin.Encoder.
func (s *SecureRequiredTypeOneOf) Encode(b *bin.Buffer) error {
if s == nil {
return fmt.Errorf("can't encode secureRequiredTypeOneOf#27477b4 as nil")
}
b.PutID(SecureRequiredTypeOneOfTypeID)
b.PutVectorHeader(len(s.Types))
for idx, v := range s.Types {
if v == nil {
return fmt.Errorf("unable to encode secureRequiredTypeOneOf#27477b4: field types element with index %d is nil", idx)
}
if err := v.Encode(b); err != nil {
return fmt.Errorf("unable to encode secureRequiredTypeOneOf#27477b4: field types element with index %d: %w", idx, err)
}
}
return nil
}
// Decode implements bin.Decoder.
func (s *SecureRequiredTypeOneOf) Decode(b *bin.Buffer) error {
if s == nil {
return fmt.Errorf("can't decode secureRequiredTypeOneOf#27477b4 to nil")
}
if err := b.ConsumeID(SecureRequiredTypeOneOfTypeID); err != nil {
return fmt.Errorf("unable to decode secureRequiredTypeOneOf#27477b4: %w", err)
}
{
headerLen, err := b.VectorHeader()
if err != nil {
return fmt.Errorf("unable to decode secureRequiredTypeOneOf#27477b4: field types: %w", err)
}
for idx := 0; idx < headerLen; idx++ {
value, err := DecodeSecureRequiredType(b)
if err != nil {
return fmt.Errorf("unable to decode secureRequiredTypeOneOf#27477b4: field types: %w", err)
}
s.Types = append(s.Types, value)
}
}
return nil
}
// construct implements constructor of SecureRequiredTypeClass.
func (s SecureRequiredTypeOneOf) construct() SecureRequiredTypeClass { return &s }
// Ensuring interfaces in compile-time for SecureRequiredTypeOneOf.
var (
_ bin.Encoder = &SecureRequiredTypeOneOf{}
_ bin.Decoder = &SecureRequiredTypeOneOf{}
_ SecureRequiredTypeClass = &SecureRequiredTypeOneOf{}
)
// SecureRequiredTypeClass represents SecureRequiredType generic type.
//
// See https://core.telegram.org/type/SecureRequiredType for reference.
//
// Example:
// g, err := DecodeSecureRequiredType(buf)
// if err != nil {
// panic(err)
// }
// switch v := g.(type) {
// case *SecureRequiredType: // secureRequiredType#829d99da
// case *SecureRequiredTypeOneOf: // secureRequiredTypeOneOf#27477b4
// default: panic(v)
// }
type SecureRequiredTypeClass interface {
bin.Encoder
bin.Decoder
construct() SecureRequiredTypeClass
fmt.Stringer
}
// DecodeSecureRequiredType implements binary de-serialization for SecureRequiredTypeClass.
func D | buf *bin.Buffer) (SecureRequiredTypeClass, error) {
id, err := buf.PeekID()
if err != nil {
return nil, err
}
switch id {
case SecureRequiredTypeTypeID:
// Decoding secureRequiredType#829d99da.
v := SecureRequiredType{}
if err := v.Decode(buf); err != nil {
return nil, fmt.Errorf("unable to decode SecureRequiredTypeClass: %w", err)
}
return &v, nil
case SecureRequiredTypeOneOfTypeID:
// Decoding secureRequiredTypeOneOf#27477b4.
v := SecureRequiredTypeOneOf{}
if err := v.Decode(buf); err != nil {
return nil, fmt.Errorf("unable to decode SecureRequiredTypeClass: %w", err)
}
return &v, nil
default:
return nil, fmt.Errorf("unable to decode SecureRequiredTypeClass: %w", bin.NewUnexpectedID(id))
}
}
// SecureRequiredType boxes the SecureRequiredTypeClass providing a helper.
type SecureRequiredTypeBox struct {
SecureRequiredType SecureRequiredTypeClass
}
// Decode implements bin.Decoder for SecureRequiredTypeBox.
func (b *SecureRequiredTypeBox) Decode(buf *bin.Buffer) error {
if b == nil {
return fmt.Errorf("unable to decode SecureRequiredTypeBox to nil")
}
v, err := DecodeSecureRequiredType(buf)
if err != nil {
return fmt.Errorf("unable to decode boxed value: %w", err)
}
b.SecureRequiredType = v
return nil
}
// Encode implements bin.Encode for SecureRequiredTypeBox.
func (b *SecureRequiredTypeBox) Encode(buf *bin.Buffer) error {
if b == nil || b.SecureRequiredType == nil {
return fmt.Errorf("unable to encode SecureRequiredTypeClass as nil")
}
return b.SecureRequiredType.Encode(buf)
}
| ecodeSecureRequiredType( |
utils.ts | export function | (date: Date): string {
return new Date(date).toLocaleString('default', {
year: 'numeric',
day: '2-digit',
month: 'short'
});
}
| format_date |
inline_response2001.py | """
Time Series API For Digital Portals
Time series data, end-of-day or intraday, tick-by-tick or subsampled. Additional vendor-specific endpoints provide a modified interface for seamless integration with the ChartIQ chart library. # noqa: E501
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.TimeSeriesAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.TimeSeriesAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.TimeSeriesAPIforDigitalPortals.model.inline_response2001_data import InlineResponse2001Data
from fds.sdk.TimeSeriesAPIforDigitalPortals.model.inline_response2001_meta import InlineResponse2001Meta
globals()['InlineResponse2001Data'] = InlineResponse2001Data
globals()['InlineResponse2001Meta'] = InlineResponse2001Meta
class InlineResponse2001(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': (InlineResponse2001Data,), # noqa: E501
'meta': (InlineResponse2001Meta,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
'meta': 'meta', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse2001 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (InlineResponse2001Data): [optional] # noqa: E501
meta (InlineResponse2001Meta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse2001 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (InlineResponse2001Data): [optional] # noqa: E501
meta (InlineResponse2001Meta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__, | path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | ), |
option_future.rs | pub trait Future {}
#[rustc_with_negative_coherence]
impl<E> !Future for Option<E> where E: Sized {} | #![crate_type = "lib"]
#![feature(negative_impls)]
#![feature(rustc_attrs)]
|
|
tableRows.go | // Copyright © 2018 NAME HERE <EMAIL ADDRESS>
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"encoding/json"
"fmt"
"os"
"github.com/eoscanada/eos-go"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var tableRowsCmd = &cobra.Command{
Use: "rows [contract] [scope] [table]",
Short: "List the producers",
Long: `List the producers`,
Args: cobra.ExactArgs(3),
Run: func(cmd *cobra.Command, args []string) {
api := api()
response, err := api.GetTableRows(
eos.GetTableRowsRequest{
Code: args[0],
Scope: args[1], | Limit: uint32(viper.GetInt("tableCmd.limit")),
},
)
if err != nil {
fmt.Printf("Get table rows , %s\n", err.Error())
os.Exit(1)
}
data, err := json.MarshalIndent(response, "", " ")
if err != nil {
fmt.Printf("Error: json conversion , %s\n", err.Error())
os.Exit(1)
}
fmt.Println(string(data))
},
}
var tableCmd = &cobra.Command{
Use: "table",
Short: "table related commands",
}
func init() {
// RootCmd.AddCommand(tableCmd)
// tableCmd.AddCommand(tableRowsCmd)
tableRowsCmd.Flags().IntP("limit", "", 50, "maximum producers that will be return")
for _, flag := range []string{"limit"} {
if err := viper.BindPFlag("tableCmd."+flag, tableRowsCmd.Flags().Lookup(flag)); err != nil {
panic(err)
}
}
} | Table: args[2],
JSON: true, |
mq.go | package backend
import (
mqtt "github.com/eclipse/paho.mqtt.golang"
"github.com/spf13/viper"
"sync"
"time"
log "github.com/Sirupsen/logrus"
)
var client mqtt.Client
var onceMQ sync.Once
func defaultHandler(_ mqtt.Client, msg mqtt.Message) {
log := log.WithFields(log.Fields{
"prefix": "mq",
"message": msg,
})
log.Error("default handler called")
}
func | (_ mqtt.Client) {
log := log.WithField("prefix", "mq")
log.Info("connected")
}
func onConnectionLostHandler(_ mqtt.Client, err error) {
log := log.WithField("prefix", "mq")
log.Error("connection lost: ", err)
}
// MQClient returns an initialized and connected, singleton MQTT client instance.
func MQClient() mqtt.Client {
log := log.WithField("prefix", "mq")
onceMQ.Do(func() {
opts := mqtt.NewClientOptions()
opts.AddBroker(viper.GetString("mqtt.url"))
opts.SetClientID(viper.GetString("mqtt.clientid"))
opts.SetAutoReconnect(true)
opts.SetKeepAlive(time.Second * 10)
opts.SetDefaultPublishHandler(defaultHandler)
opts.SetOnConnectHandler(onConnectHandler)
opts.SetConnectionLostHandler(onConnectionLostHandler)
opts.SetProtocolVersion(3)
//fs := mqtt.NewFileStore(viper.GetString("mqtt.store"))
//fs.Open()
//opts.SetStore(fs)
opts.SetStore(mqtt.NewFileStore(viper.GetString("mqtt.store")))
log.Info("connecting: ", viper.GetString("mqtt.url"))
client = mqtt.NewClient(opts)
if token := client.Connect(); token.Wait() && token.Error() != nil {
log.Panic(token.Error())
}
})
return client
}
func MQCleanup() {
log := log.WithField("prefix", "mq")
MQClient().Disconnect(0)
log.Info("disconnected")
} | onConnectHandler |
0004_auto_20180518_1257.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-05-18 07:27
from __future__ import unicode_literals |
class Migration(migrations.Migration):
dependencies = [
('app', '0003_auto_20180518_1221'),
]
operations = [
migrations.CreateModel(
name='crimes_against_women',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Area_Name', models.CharField(max_length=140)),
('Year', models.IntegerField()),
('Subgroup', models.CharField(max_length=140)),
('Rape_Cases_Reported', models.IntegerField()),
('Victims_Above_50_Yrs', models.IntegerField()),
('Victims_Between_10to14_Yrs', models.IntegerField()),
('Victims_Between_14to18_Yrs', models.IntegerField()),
('Victims_Between_18to30_Yrs', models.IntegerField()),
('Victims_Between_30to50_Yrs', models.IntegerField()),
('Victims_of_Rape_Total', models.IntegerField()),
('Victims_Upto_10_Yrs', models.IntegerField()),
],
),
migrations.DeleteModel(
name='Pod',
),
] |
from django.db import migrations, models |
caching_bucket_factory.go | // Copyright (c) The Thanos Authors.
// Licensed under the Apache License 2.0.
package storecache
import (
"regexp"
"strings"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/yaml.v2"
"github.com/thanos-io/thanos/pkg/block/metadata"
cache "github.com/thanos-io/thanos/pkg/cache"
"github.com/thanos-io/thanos/pkg/cacheutil"
"github.com/thanos-io/thanos/pkg/model"
"github.com/thanos-io/thanos/pkg/objstore"
)
// BucketCacheProvider is a type used to evaluate all bucket cache providers.
type BucketCacheProvider string
const (
InMemoryBucketCacheProvider BucketCacheProvider = "IN-MEMORY" // In-memory cache-provider for caching bucket.
MemcachedBucketCacheProvider BucketCacheProvider = "MEMCACHED" // Memcached cache-provider for caching bucket.
RedisBucketCacheProvider BucketCacheProvider = "REDIS" // Redis cache-provider for caching bucket.
)
// CachingWithBackendConfig is a configuration of caching bucket used by Store component.
type CachingWithBackendConfig struct {
Type BucketCacheProvider `yaml:"type"`
BackendConfig interface{} `yaml:"config"`
// Basic unit used to cache chunks.
ChunkSubrangeSize int64 `yaml:"chunk_subrange_size"`
// Maximum number of GetRange requests issued by this bucket for single GetRange call. Zero or negative value = unlimited.
MaxChunksGetRangeRequests int `yaml:"max_chunks_get_range_requests"`
// TTLs for various cache items.
ChunkObjectAttrsTTL time.Duration `yaml:"chunk_object_attrs_ttl"`
ChunkSubrangeTTL time.Duration `yaml:"chunk_subrange_ttl"`
// How long to cache result of Iter call in root directory.
BlocksIterTTL time.Duration `yaml:"blocks_iter_ttl"`
// Config for Exists and Get operations for metadata files.
MetafileExistsTTL time.Duration `yaml:"metafile_exists_ttl"`
MetafileDoesntExistTTL time.Duration `yaml:"metafile_doesnt_exist_ttl"`
MetafileContentTTL time.Duration `yaml:"metafile_content_ttl"`
MetafileMaxSize model.Bytes `yaml:"metafile_max_size"`
}
func (cfg *CachingWithBackendConfig) Defaults() {
cfg.ChunkSubrangeSize = 16000 // Equal to max chunk size.
cfg.ChunkObjectAttrsTTL = 24 * time.Hour
cfg.ChunkSubrangeTTL = 24 * time.Hour
cfg.MaxChunksGetRangeRequests = 3
cfg.BlocksIterTTL = 5 * time.Minute
cfg.MetafileExistsTTL = 2 * time.Hour
cfg.MetafileDoesntExistTTL = 15 * time.Minute
cfg.MetafileContentTTL = 24 * time.Hour
cfg.MetafileMaxSize = 1024 * 1024 // Equal to default MaxItemSize in memcached client.
}
// NewCachingBucketFromYaml uses YAML configuration to create new caching bucket.
func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger log.Logger, reg prometheus.Registerer) (objstore.InstrumentedBucket, error) {
level.Info(logger).Log("msg", "loading caching bucket configuration")
config := &CachingWithBackendConfig{}
config.Defaults()
if err := yaml.UnmarshalStrict(yamlContent, config); err != nil |
backendConfig, err := yaml.Marshal(config.BackendConfig)
if err != nil {
return nil, errors.Wrap(err, "marshal content of cache backend configuration")
}
var c cache.Cache
switch strings.ToUpper(string(config.Type)) {
case string(MemcachedBucketCacheProvider):
var memcached cacheutil.RemoteCacheClient
memcached, err := cacheutil.NewMemcachedClient(logger, "caching-bucket", backendConfig, reg)
if err != nil {
return nil, errors.Wrapf(err, "failed to create memcached client")
}
c = cache.NewMemcachedCache("caching-bucket", logger, memcached, reg)
case string(InMemoryBucketCacheProvider):
c, err = cache.NewInMemoryCache("caching-bucket", logger, reg, backendConfig)
if err != nil {
return nil, errors.Wrapf(err, "failed to create inmemory cache")
}
case string(RedisBucketCacheProvider):
redisCache, err := cacheutil.NewRedisClient(logger, "caching-bucket", backendConfig, reg)
if err != nil {
return nil, errors.Wrapf(err, "failed to create redis client")
}
c = cache.NewRedisCache("caching-bucket", logger, redisCache, reg)
default:
return nil, errors.Errorf("unsupported cache type: %s", config.Type)
}
// Include interactions with cache in the traces.
c = cache.NewTracingCache(c)
cfg := NewCachingBucketConfig()
// Configure cache.
cfg.CacheGetRange("chunks", c, isTSDBChunkFile, config.ChunkSubrangeSize, config.ChunkObjectAttrsTTL, config.ChunkSubrangeTTL, config.MaxChunksGetRangeRequests)
cfg.CacheExists("meta.jsons", c, isMetaFile, config.MetafileExistsTTL, config.MetafileDoesntExistTTL)
cfg.CacheGet("meta.jsons", c, isMetaFile, int(config.MetafileMaxSize), config.MetafileContentTTL, config.MetafileExistsTTL, config.MetafileDoesntExistTTL)
// Cache Iter requests for root.
cfg.CacheIter("blocks-iter", c, isBlocksRootDir, config.BlocksIterTTL, JSONIterCodec{})
cb, err := NewCachingBucket(bucket, cfg, logger, reg)
if err != nil {
return nil, err
}
return cb, nil
}
var chunksMatcher = regexp.MustCompile(`^.*/chunks/\d+$`)
func isTSDBChunkFile(name string) bool { return chunksMatcher.MatchString(name) }
func isMetaFile(name string) bool {
return strings.HasSuffix(name, "/"+metadata.MetaFilename) || strings.HasSuffix(name, "/"+metadata.DeletionMarkFilename)
}
func isBlocksRootDir(name string) bool {
return name == ""
}
| {
return nil, errors.Wrap(err, "parsing config YAML file")
} |
makedev.rs | use crate::imp;
use imp::fs::Dev;
/// `makedev(maj, min)`
///
/// # References
/// - [Linux]
///
/// [Linux]: https://man7.org/linux/man-pages/man3/makedev.3.html
#[inline]
pub fn | (maj: u32, min: u32) -> Dev {
imp::fs::makedev(maj, min)
}
/// `minor(dev)`
///
/// # References
/// - [Linux]
///
/// [Linux]: https://man7.org/linux/man-pages/man3/minor.3.html
#[inline]
pub fn minor(dev: Dev) -> u32 {
imp::fs::minor(dev)
}
/// `major(dev)`
///
/// # References
/// - [Linux]
///
/// [Linux]: https://man7.org/linux/man-pages/man3/major.3.html
#[inline]
pub fn major(dev: Dev) -> u32 {
imp::fs::major(dev)
}
| makedev |
index.js | import { createElement } from "./domMethods";
// Setting up dummy topics data
let topicData = [{
id: 1,
name: "Politics"
},
{
id: 2,
name: "Environment"
},
{
id: 3,
name: "Sports"
},
{
id: 4,
name: "Entertainment"
}
];
let lastId = 4;
// Empty topic container, render topics
function renderTopics() {
const topicContainer = document.querySelector(".topic-container");
const topics = createTopics(topicData);
while (topicContainer.firstChild) {
topicContainer.removeChild(topicContainer.firstChild);
}
| function createTopics(topicData) {
const fragment = document.createDocumentFragment();
topicData.forEach(data => {
const topic = createTopic(data);
fragment.appendChild(topic);
});
return fragment;
}
// Return markup for a topic object
function createTopic({ name, id }) {
return createElement(
"div", { class: "topic" },
createElement(
"button", { "aria-label": "Close", "data-id": id, onClick: handleTopicDelete },
"×"
),
createElement("a", { href: `topic.html?query=${name}` }, name)
);
}
// Deletes a topic on click
function handleTopicDelete(event) {
const id = Number(event.target.getAttribute("data-id"));
topicData = topicData.filter(topic => topic.id !== id);
renderTopics();
}
function handleTopicAdd(event) {
event.preventDefault();
const input = document.querySelector("#add-topic");
const value = input.value.trim();
if (!value) {
return;
}
topicData = [
...topicData,
{ id: ++lastId, name: value }
];
input.value = "";
renderTopics();
}
// // Helper function for creating elements
// function createElement(type, attributes, ...children) {
// const element = document.createElement(type);
// if (typeof attributes === "object") {
// for (const key in attributes) {
// if (key.startsWith("on")) {
// const event = key.substring(2).toLowerCase();
// const handler = attributes[key];
// element.addEventListener(event, handler);
// } else {
// element.setAttribute(key, attributes[key]);
// }
// }
// }
// children.forEach(child => {
// if (typeof child === "boolean" || child === null || child === undefined) {
// return;
// }
// let node;
// if (child instanceof HTMLElement) {
// node = child;
// } else {
// node = document.createTextNode(child);
// }
// element.appendChild(node);
// });
// return element;
// }
// Renders topics on page load
renderTopics();
// Handle new topic submissions
document
.querySelector("#submit-topic")
.addEventListener("click", handleTopicAdd); | topicContainer.appendChild(topics);
}
// Return HTML for each topic provided |
hoop_detection_angle.py | from __future__ import print_function
import time
import math
import thread
# Dk imports
from pymavlink import mavutil
from dronekit import connect, VehicleMode, LocationGlobal, LocationGlobalRelative
# Mux and TOF imports
import I2CMultiplexer
import VL53L1X
# CV imports
import cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
from fractions import Fraction
from PIL import Image
import random
from sympy import Point, Polygon, pi
#cap = cv2.VideoCapture(0)
camera = PiCamera()
camera.resolution = (426, 240)
camera.framerate = 24
camera.exposure_mode = 'auto'
camera.exposure_compensation = -3
camera.drc_strength = 'off'
camera.still_stats = False
camera.awb_mode = 'off'
camera.awb_gains = (Fraction(167, 103), Fraction(27,16))
rawCapture = PiRGBArray(camera, size=(426, 240))
out = cv2.VideoWriter(str(time.time()) + ".avi",cv2.VideoWriter_fourcc('M','J','P','G'), 10, (426, 240))
# allow the camera to warmup
time.sleep(0.1)
# Connect to Vehicle
connection_string = '/dev/ttyUSB0'
sitl = None
# Start SITL if no connection string specified
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
# Connect to the Vehicle
print('Connecting to vehicle on: %s' % connection_string)
vehicle = connect(connection_string, wait_ready=True, baud=57600)
# Global variables for distance:
distance_in_mm_N = 0 # North Sensor
distance_in_mm_S = 0 # South Sensor
distance_in_mm_E = 0 # East Sensor
distance_in_mm_W = 0 # West Sensor
distance_in_mm_45 = 0 # 45 degree south east sensor
dX = 0
dY = 0
#Create an I2C Multiplexer object, the address of I2C Multiplexer is 0X70
I2CMulti = I2CMultiplexer.I2CMultiplexer(0x70)
# Init TOF obj
tof = VL53L1X.VL53L1X()
# STarts the TOFs on their respective ports
try:
# for i in [0,2,4,6]:
for i in [0,1,2,7,3]:
I2CMulti.selectPort(i)
tof = VL53L1X.VL53L1X(i2c_bus=1, i2c_address=0x29)
tof.open() # Initialise the i2c bus and configure the sensor
tof.start_ranging(3) # Start ranging, 1 = Short Range, 2 = Medium Range, 3 = Long Range
except:
print("port init failed")
def detect_circle():
global dX
global dY
for img in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
for i in range(5): # Clears the 5 frame buffer
frame = img.array
height, width = frame.shape[:2]
centre = (int(width/2), int(height/2))
b_channel = np.array(frame[:,:,0]).astype('float')
g_channel = np.array(frame[:,:,1]).astype('float')
r_channel = np.array(frame[:,:,2]).astype('float')
bgr_channel = np.add((np.add(b_channel, g_channel)), r_channel)
img_rec_red2 = np.subtract(r_channel,((b_channel + g_channel)/ 2))
#img_rec_red2 = np.divide(r_channel, 255)
img_rec_red2 = np.divide(img_rec_red2,255)
#img_rec_red2 = np.square(img_rec_red2)
img_rec_red2[img_rec_red2 < 0.3] = 0
#dX, dY = 0,0
trials = 1
try:
# Get the array of indices of detected pixels
thresholded_array = np.argwhere(img_rec_red2 >= 0.3)
thresholded_list = thresholded_array.tolist()
#print(thresholded_list)
if len(thresholded_list) > trials*3:
# sets the number of trials before averaging to get the centre
total_centres_X = 0
total_centres_Y = 0
hoop_centre = (0,0)
arr_len_3rd = int(len(thresholded_list) / 3)
for i in range(trials):
r1 = random.randrange(0, int(arr_len_3rd/2))
#r2 = random.randrange(0, arr_len_3rd)
# rerolls if the same number was rolled
#while r2 == r1:
r2 = random.randrange(arr_len_3rd, 2*arr_len_3rd)
r3 = random.randrange(int(2.5*arr_len_3rd), len(thresholded_list))
#while r3 == r1 or r3 == r2:
#r3 = random.randrange(0, len(thresholded_list))
print(thresholded_list[r1],thresholded_list[r2],thresholded_list[r3])
current_centre = Polygon(thresholded_list[r1],thresholded_list[r2],thresholded_list[r3]).circumcenter
#print(current_centre)
total_centres_X += int(current_centre.y)
total_centres_Y += int(current_centre.x)
cv2.circle(frame, (thresholded_list[r1][1], thresholded_list[r1][0]), 5, (0, 0, 255), -1)
cv2.circle(frame, (thresholded_list[r2][1], thresholded_list[r2][0]), 5, (0, 0, 255), -1)
cv2.circle(frame, (thresholded_list[r3][1], thresholded_list[r3][0]), 5, (0, 0, 255), -1)
cX = int(total_centres_X / trials)
cY = int(total_centres_Y / trials)
#print(cX,cY)
except:
print("no hoop detected")
# put text and highlight the center
try:
cv2.circle(frame, (cX, cY), 5, (255, 255, 255), -1)
cv2.line(frame, centre, (cX, cY), (255,0,0), 2)
#cv2.putText(frame, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
dX = cX - centre[0]
dY = centre[1] - cY
cv2.putText(frame, ("(" + str(dX) + ", " + str(dY) + " )"), (centre[0] - 20, centre[1] - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
#print('Velocities: ' + str(dX) + "," + str(dY))
except:
#print("No centre detected")
#dX = 0
#dY = 0
dX = None
dY = None
out.write(frame)
k = cv2.waitKey(1)
rawCapture.truncate(0)
# Arm and rakeoff to specific altitude
def arm_and_takeoff(aTargetAltitude):
"""
Arms vehicle and fly to aTargetAltitude.
"""
print("Basic pre-arm checks")
#Don't try to arm until autopilot is ready
# while not vehicle.is_armable:
# print(" Waiting for vehicle to initialise...")
# time.sleep(1)
print("Arming motors")
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
# while not vehicle.armed == True:
# print("Not Armed")
# time.sleep(0.4)
# while not vehicle.armed == True:
# vehicle.armed = True
# print("Not Armed 2")
# time.sleep(0.4)
#Confirm vehicle armed before attempting to take off
while not vehicle.armed:
print(" Waiting for arming...")
time.sleep(1)
print("Taking off!")
vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude
# Wait until the vehicle reaches a safe height before processing the goto
# (otherwise the command after Vehicle.simple_takeoff will execute
# immediately).
while True:
print(" Altitude: ", vehicle.rangefinder.distance)
current_alt = vehicle.rangefinder.distance
if current_alt > 20:
current_alt = 0
print(" Arm state: ", vehicle.armed)
# Break and return from function just below target altitude.
if current_alt >= aTargetAltitude * 0.95:
print("Reached target altitude")
break
time.sleep(1)
def goto_position_target_local_ned(north, east, down):
"""
Send SET_POSITION_TARGET_LOCAL_NED command to request the vehicle fly to a specified
location in the North, East, Down frame.
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111111000, # type_mask (only positions enabled)
north, east, down,
0, 0, 0, # x, y, z velocity in m/s (not used)
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle
vehicle.send_mavlink(msg)
def get_distance_metres(aLocation1, aLocation2):
"""
Returns the ground distance in metres between two LocationGlobal objects.
This method is an approximation, and will not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
def get_location_metres(original_location, dNorth, dEast):
"""
Returns a LocationGlobal object containing the latitude/longitude `dNorth` and `dEast` metres from the
specified `original_location`. The returned LocationGlobal has the same `alt` value
as `original_location`.
The function is useful when you want to move the vehicle around specifying locations relative to
the current vehicle position.
The algorithm is relatively accurate over small distances (10m within 1km) except close to the poles.
For more information see:
http://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters
"""
earth_radius = 6378137.0 #Radius of "spherical" earth
#Coordinate offsets in radians
dLat = dNorth/earth_radius
dLon = dEast/(earth_radius*math.cos(math.pi*original_location.lat/180))
#New position in decimal degrees
newlat = original_location.lat + (dLat * 180/math.pi)
newlon = original_location.lon + (dLon * 180/math.pi)
if type(original_location) is LocationGlobal:
targetlocation=LocationGlobal(newlat, newlon,original_location.alt)
elif type(original_location) is LocationGlobalRelative:
targetlocation=LocationGlobalRelative(newlat, newlon,original_location.alt)
else:
raise Exception("Invalid Location object passed")
return targetlocation
def goto(dNorth, dEast, gotoFunction=vehicle.simple_goto):
"""
Moves the vehicle to a position dNorth metres North and dEast metres East of the current position.
The method takes a function pointer argument with a single `dronekit.lib.LocationGlobal` parameter for
the target position. This allows it to be called with different position-setting commands.
By default it uses the standard method: dronekit.lib.Vehicle.simple_goto().
The method reports the distance to target every two seconds.
"""
currentLocation = vehicle.location.global_relative_frame
targetLocation = get_location_metres(currentLocation, dNorth, dEast)
targetDistance = get_distance_metres(currentLocation, targetLocation)
gotoFunction(targetLocation)
#print "DEBUG: targetLocation: %s" % targetLocation
#print "DEBUG: targetLocation: %s" % targetDistance
print("Initiating GOTO")
while vehicle.mode.name=="GUIDED": #Stop action if we are no longer in guided mode.
#print "DEBUG: mode: %s" % vehicle.mode.name
remainingDistance=get_distance_metres(vehicle.location.global_relative_frame, targetLocation)
print("Distance to target: " + str(remainingDistance))
if remainingDistance < 0.11: #Just below target, in case of undershoot.
print("Reached target")
break;
time.sleep(2)
# Sends a velocity to the drone at a rate of 2 Hx
def send_global_velocity(velocity_x, velocity_y, velocity_z, duration):
"""
Move vehicle in direction based on specified velocity vectors.
"""
msg = vehicle.message_factory.set_position_target_global_int_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, # lat_int - X Position in WGS84 frame in 1e7 * meters
0, # lon_int - Y Position in WGS84 frame in 1e7 * meters
0, # alt - Altitude in meters in AMSL altitude(not WGS84 if absolute or relative)
# altitude above terrain if GLOBAL_TERRAIN_ALT_INT
velocity_x, # X velocity in NED frame in m/s
velocity_y, # Y velocity in NED frame in m/s
velocity_z, # Z velocity in NED frame in m/s
0, 0, 0, # afx, afy, afz acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
for x in range(0,duration):
vehicle.send_mavlink(msg)
time.sleep(0.5)
# Sets the Yaw - vehicle will yaw according to the yaw slew rate set in params
# give the vehicle more time (give a 0 velocity vector for x amount of seconds - enough for
# the drone to complete the yaw)
def condition_yaw(heading, relative=False):
|
# The following 2 methods allow for the drone attitude to be directly controlled
# the movement is not OF corrected - avoid usage where possible
def set_attitude(roll_angle = 0.0, pitch_angle = 0.0, yaw_rate = 0.0, thrust = 0.5, duration = 0):
"""
Note that from AC3.3 the message should be re-sent every second (after about 3 seconds
with no message the velocity will drop back to zero). In AC3.2.1 and earlier the specified
velocity persists until it is canceled. The code below should work on either version
(sending the message multiple times does not cause problems).
"""
"""
The roll and pitch rate cannot be controllbed with rate in radian in AC3.4.4 or earlier,
so you must use quaternion to control the pitch and roll for those vehicles.
"""
# Thrust > 0.5: Ascend
# Thrust == 0.5: Hold the altitude
# Thrust < 0.5: Descend
msg = vehicle.message_factory.set_attitude_target_encode(
0, # time_boot_ms
1, # Target system
1, # Target component
0b00000000, # Type mask: bit 1 is LSB
to_quaternion(roll_angle, pitch_angle), # Quaternion
0, # Body roll rate in radian
0, # Body pitch rate in radian
math.radians(yaw_rate), # Body yaw rate in radian
thrust # Thrust
)
vehicle.send_mavlink(msg)
start = time.time()
while time.time() - start < duration:
vehicle.send_mavlink(msg)
#time.sleep(0.1)
def to_quaternion(roll = 0.0, pitch = 0.0, yaw = 0.0):
"""
Convert degrees to quaternions
"""
t0 = math.cos(math.radians(yaw * 0.5))
t1 = math.sin(math.radians(yaw * 0.5))
t2 = math.cos(math.radians(roll * 0.5))
t3 = math.sin(math.radians(roll * 0.5))
t4 = math.cos(math.radians(pitch * 0.5))
t5 = math.sin(math.radians(pitch * 0.5))
w = t0 * t2 * t4 + t1 * t3 * t5
x = t0 * t3 * t4 - t1 * t2 * t5
y = t0 * t2 * t5 + t1 * t3 * t4
z = t1 * t2 * t4 - t0 * t3 * t5
# Gets the readings from the TOF sensors and updates the distance vars
def get_I2C_readings():
global distance_in_mm_N
global distance_in_mm_S
global distance_in_mm_E
global distance_in_mm_W
global distance_in_mm_45
while(True):
I2CMulti.selectPort(0)
distance_in_mm_N = tof.get_distance() # Grab the range in mm
I2CMulti.selectPort(3)
distance_in_mm_S = tof.get_distance() # Grab the range in mm
I2CMulti.selectPort(7)
distance_in_mm_E = tof.get_distance() # Grab the range in mm
I2CMulti.selectPort(2)
distance_in_mm_W = tof.get_distance() # Grab the range in mm
I2CMulti.selectPort(1)
distance_in_mm_45 = tof.get_distance() # Grab the range in mm
#print("Sensor N distance: " + str(distance_in_mm_N) + " \nSensor S distance: " + str(distance_in_mm_S) + "\nSensor E distance: " + str(distance_in_mm_E) + "\nSensor W distance: " + str(distance_in_mm_W))
time.sleep(0.05)
def calculate_velocity(ground_heading, angle):
rads = math.radian(angle)
rads += math.radians(ground_heading)
if rads > math.radians(360):
rads -= math.radians(360)
elif rads < -math.radians(360):
rads += math.radians(360)
vel_x = (np.cos(heading_rad) / 5)
vel_y = (np.sin(heading_rad) / 5)
return vel_x, vel_y
# Starts TOF readings before takeoff
#thread.start_new_thread(get_I2C_readings, ())
# Starts CV code
thread.start_new_thread(detect_circle, ())
# Gets vehcle heading on thr ground (this is assumed to be the forward heading)
ground_heading = vehicle.heading
# Takeoff to 1.5m
arm_and_takeoff(1.5)
# Corridor Variables
INCREMENT_DISTANCE = 0.1
CORRIDOR_WIDTH_HALVED = 1300 # in mm
THRESHOLD_DISTANCE = 100
lower_bound = CORRIDOR_WIDTH_HALVED - THRESHOLD_DISTANCE
upper_bound = CORRIDOR_WIDTH_HALVED + THRESHOLD_DISTANCE
#print(str(right_X) + str(right_Y))
VEL_SCALE_Y = 0.005 # velocity scaling factor from openCV
VEL_SCALE_X = 0.001
px_threshold = 10 # sets the threshold before any velocity is taken
print(dX, dY)
# Hoop alignment code
x_aligned = False
y_aligned = False
### SINGLE AXIS ALIGNMENT CODE
# while True:
# if dX < -px_threshold or dX > px_threshold:
# # remember, negative means up
# up_vel = -dX*VEL_SCALE
# if up_vel > 0.05:
# up_vel = 0.05
# elif up_vel < 0.05:
# up_vel = -0.05
# send_global_velocity(0,0,(up_vel), 2)
# send_global_velocity(0,0,0,1) # reset the global vels
# else:
# break
# print("x aligned")
# while True:
# if dY < -px_threshold or dY > px_threshold:
# right_vel_X = -right_X*dY*VEL_SCALE
# right_vel_Y = -right_Y*dY*VEL_SCALE
# if right_vel_X > 0.05:
# right_vel_X = 0.05
# elif right_vel_X < -0.05:
# right_vel_X = -0.05
# if right_vel_Y > 0.05:
# right_vel_Y = 0.05
# elif right_vel_Y < -0.05:
# right_vel_Y = -0.05
# send_global_velocity(right_vel_X,right_vel_Y,0,2)
# send_global_velocity(0,0,0,1) # reset the global vels
# else :
# break
### DOUBLE AXIS ALIGNMENT
up_vel, right_vel_X, right_vel_Y = 0,0,0
forward_scale = 0.1
stab_seconds_X = 0
stab_seconds_Y = 0
stab_threshold = 1
while (not x_aligned) or (not y_aligned):
if dX == None:
print("hoop not detected")
break
line_d = (dX**2 + dY**2)**0.5
if line_d == 0:
fwd_x, fwd_y = calculate_velocity(ground_heading, 0)
send_global_velocity(fwd_X,fwd_Y,0,2)
send_global_velocity(0,0,0,1)
total_scale = forward_scale/line_d
print(dX, dY)
if dX < -px_threshold or dX > px_threshold:
x_aligned = False
up_vel = round((-dX*VEL_SCALE_X), 3)
if up_vel > 0.1:
up_vel = 0.1
elif up_vel < -0.1:
up_vel = -0.1
stab_seconds_X = 0
else:
if stab_seconds_X == stab_threshold:
x_aligned = True
else:
x_aligned = False
stab_seconds_X += 1
up_vel = 0
if dY < -px_threshold or dY > px_threshold:
y_aligned = False
angle = math.degrees(np.arctan2(total_scale / line_d))
right_vel_X, right_vel_Y = calculate_velocity(ground_heading, angle)
stab_seconds_Y = 0
else:
if stab_seconds_Y == stab_threshold:
y_aligned = True
else:
y_aligned = False
stab_seconds_Y += 1
right_vel_X = 0
right_vel_Y = 0
print("alignment x: " + str(x_aligned))
print("alignment y: " + str(y_aligned))
print("velocity: " + str(right_vel_X) + " : " + str(right_vel_Y) + " : " + str(up_vel))
send_global_velocity(right_vel_X,right_vel_Y,up_vel,2)
send_global_velocity(0,0,0,1) # reset the global vels
print("Fully Aligned")
send_global_velocity(0,0,0,10) # reset the global vels
# condition_yaw(90, True)
# condition_yaw(-90, True)
print("Landing")
vehicle.mode = VehicleMode("LAND")
# Close vehicle object before exiting script
print("Close vehicle object")
vehicle.close()
# Shut down simulator if it was started.
if sitl:
sitl.stop()
I2CMulti.i2c.write_byte(0x70,0) # how it closes?
tof.stop_ranging() # Stop ranging
out.release()
| """
Send MAV_CMD_CONDITION_YAW message to point vehicle at a specified heading (in degrees).
This method sets an absolute heading by default, but you can set the `relative` parameter
to `True` to set yaw relative to the current yaw heading.
By default the yaw of the vehicle will follow the direction of travel. After setting
the yaw using this function there is no way to return to the default yaw "follow direction
of travel" behaviour (https://github.com/diydrones/ardupilot/issues/2427)
For more information see:
http://copter.ardupilot.com/wiki/common-mavlink-mission-command-messages-mav_cmd/#mav_cmd_condition_yaw
"""
if relative:
is_relative = 1 #yaw relative to direction of travel
else:
is_relative = 0 #yaw is an absolute angle
# create the CONDITION_YAW command using command_long_encode()
msg = vehicle.message_factory.command_long_encode(
0, 0, # target system, target component
mavutil.mavlink.MAV_CMD_CONDITION_YAW, #command
0, #confirmation
heading, # param 1, yaw in degrees
0, # param 2, yaw speed deg/s
1, # param 3, direction -1 ccw, 1 cw
is_relative, # param 4, relative offset 1, absolute angle 0
0, 0, 0) # param 5 ~ 7 not used
# send command to vehicle
vehicle.send_mavlink(msg) |
first_missing_positive.go | package hard
// Given an unsorted integer array, find the smallest missing positive integer.
//
// Example 1:
// Input: [1,2,0]
// Output: 3
//
// Example 2:
// Input: [3,4,-1,1]
// Output: 2
//
// Example 3:
// Input: [7,8,9,11,12]
// Output: 1
//
// Note:
// Your algorithm should run in O(n) time and uses constant extra space.
func firstMissingPositive(nums []int) int {
for i := 0; i < len(nums); i++ {
if nums[i] <= 0 || nums[i] == i+1 || nums[i] > len(nums) {
continue
}
for nums[i] > 0 && nums[i] != i+1 && nums[i] <= len(nums) {
if nums[i] == nums[nums[i]-1] {
break
}
nums[i], nums[nums[i]-1] = nums[nums[i]-1], nums[i]
}
}
for i := 0; i < len(nums); i++ {
if nums[i] != i+1 |
}
return len(nums) + 1
}
| {
return i + 1
} |
api_image_image_background_removal.go | /*
* FastAPI
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* API version: 0.1.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
import (
"context"
"io/ioutil"
"net/http"
"net/url"
"strings"
"os"
"github.com/antihax/optional"
)
// Linger please
var (
_ context.Context
)
type ImageImageBackgroundRemovalApiService service
/*
ImageImageBackgroundRemovalApiService Apply model for the background-removal task for a given models
* @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param image
* @param optional nil or *ApplyImageImageBackgroundRemovalPostOpts - Optional Parameters:
* @param "Model" (optional.String) -
@return map[string]interface{}
*/
type ApplyImageImageBackgroundRemovalPostOpts struct {
Model optional.String
}
func (a *ImageImageBackgroundRemovalApiService) ApplyImageImageBackgroundRemovalPost(ctx context.Context, image *os.File, localVarOptionals *ApplyImageImageBackgroundRemovalPostOpts) (map[string]interface{}, *http.Response, error) {
var (
localVarHttpMethod = strings.ToUpper("Post")
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
localVarReturnValue map[string]interface{}
)
// create path and map variables
localVarPath := a.client.cfg.BasePath + "/image/image/background-removal/"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := url.Values{}
localVarFormParams := url.Values{}
if localVarOptionals != nil && localVarOptionals.Model.IsSet() {
localVarQueryParams.Add("model", parameterToString(localVarOptionals.Model.Value(), ""))
}
// to determine the Content-Type header
localVarHttpContentTypes := []string{"multipart/form-data"}
// set Content-Type header
localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)
if localVarHttpContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHttpContentType
}
// to determine the Accept header
localVarHttpHeaderAccepts := []string{"application/json"}
// set Accept header
localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)
if localVarHttpHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHttpHeaderAccept
}
localVarFormFileName = "image"
localVarFile := image
if localVarFile != nil {
fbs, _ := ioutil.ReadAll(localVarFile)
localVarFileBytes = fbs
localVarFileName = localVarFile.Name()
localVarFile.Close()
}
r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
return localVarReturnValue, nil, err
}
localVarHttpResponse, err := a.client.callAPI(r)
if err != nil || localVarHttpResponse == nil {
return localVarReturnValue, localVarHttpResponse, err
}
localVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)
localVarHttpResponse.Body.Close()
if err != nil {
return localVarReturnValue, localVarHttpResponse, err
}
if localVarHttpResponse.StatusCode >= 300 {
newErr := GenericOpenAPIError{
body: localVarBody,
error: localVarHttpResponse.Status,
}
if localVarHttpResponse.StatusCode == 200 {
var v map[string]interface{}
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHttpResponse, newErr
}
newErr.model = v
return localVarReturnValue, localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 422 {
var v HttpValidationError
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHttpResponse, newErr
}
newErr.model = v
return localVarReturnValue, localVarHttpResponse, newErr
}
return localVarReturnValue, localVarHttpResponse, newErr
}
err = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
newErr := GenericOpenAPIError{
body: localVarBody,
error: err.Error(),
}
return localVarReturnValue, localVarHttpResponse, newErr
}
return localVarReturnValue, localVarHttpResponse, nil
}
/*
ImageImageBackgroundRemovalApiService Get list of models available for background-removal
* @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
@return map[string]interface{}
*/
func (a *ImageImageBackgroundRemovalApiService) GetVersionsImageImageBackgroundRemovalGet(ctx context.Context) (map[string]interface{}, *http.Response, error) {
var (
localVarHttpMethod = strings.ToUpper("Get")
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
localVarReturnValue map[string]interface{}
)
// create path and map variables
localVarPath := a.client.cfg.BasePath + "/image/image/background-removal/"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := url.Values{}
localVarFormParams := url.Values{}
// to determine the Content-Type header
localVarHttpContentTypes := []string{}
// set Content-Type header
localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)
if localVarHttpContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHttpContentType
}
// to determine the Accept header
localVarHttpHeaderAccepts := []string{"application/json"}
// set Accept header
localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)
if localVarHttpHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHttpHeaderAccept
}
r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
return localVarReturnValue, nil, err
}
localVarHttpResponse, err := a.client.callAPI(r)
if err != nil || localVarHttpResponse == nil |
localVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)
localVarHttpResponse.Body.Close()
if err != nil {
return localVarReturnValue, localVarHttpResponse, err
}
if localVarHttpResponse.StatusCode >= 300 {
newErr := GenericOpenAPIError{
body: localVarBody,
error: localVarHttpResponse.Status,
}
if localVarHttpResponse.StatusCode == 200 {
var v map[string]interface{}
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = err.Error()
return localVarReturnValue, localVarHttpResponse, newErr
}
newErr.model = v
return localVarReturnValue, localVarHttpResponse, newErr
}
return localVarReturnValue, localVarHttpResponse, newErr
}
err = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
newErr := GenericOpenAPIError{
body: localVarBody,
error: err.Error(),
}
return localVarReturnValue, localVarHttpResponse, newErr
}
return localVarReturnValue, localVarHttpResponse, nil
}
| {
return localVarReturnValue, localVarHttpResponse, err
} |
profile_tags.py | from django import template
register = template.Library()
@register.filter
def package_usage(user):
| return user.package_set.all() |
|
simple-web-test.js | "use strict";
const https = require("https");
const Router = require("koa-router");
const superagent = require("superagent");
const { assertThat, equalTo, is } = require("hamjest");
const { hasHeader, hasStatusCode } = require("superjest");
const SimpleWeb = require("../src/simple-web");
const PORT = process.env.PORT || 8080;
const TLS_PORT = process.env.TLS_PORT || 43434;
const MAX_HEADERS_COUNT = 1111;
const HEADERS_TIMEOUT = 2222;
const TIMEOUT = 3333;
const KEEP_ALIVE_TIMEOUT = 4444;
const config = {
port: PORT,
maxHeadersCount: MAX_HEADERS_COUNT,
headersTimeout: HEADERS_TIMEOUT,
timeout: TIMEOUT,
keepAliveTimeout: KEEP_ALIVE_TIMEOUT
};
describe("simple web", function() {
let web;
beforeEach(async function() {
web = new SimpleWeb(config);
});
afterEach(async function() {
await web.stop();
});
describe("lifecycle", function() {
function lifecycleTests(getServer) {
it("should not throw error stopping unstarted component", async function() {
await web.stop();
await web.start();
assertThat(getServer().listening, is(true));
});
it("should only be able to be started once", async function() {
await web.start();
return web.start().then(
() => { throw new Error("Expected error to be thrown") },
// catch the "thrown" error.
() => {}
)
});
it("should handle being stopped multiple times", async function() {
await web.stop();
await web.stop();
});
it("should handle being restarted again", async function() {
await web.start();
await web.stop();
assertThat(getServer().listening, is(false));
await web.start();
assertThat(getServer().listening, is(true));
})
}
describe("with no server provided", function() {
lifecycleTests(() => web._server);
});
describe("with server provided", function() {
let server;
beforeEach(function() {
const alteredConfig = Object.assign({}, config);
alteredConfig.port = TLS_PORT;
server = https.createServer();
web = new SimpleWeb(alteredConfig, server);
});
lifecycleTests(() => server);
});
});
describe("routes", function() {
beforeEach(async function() {
web.route(givenRootRoute());
await web.start();
});
it("should mount routes", function(done) {
superagent.get(`http://localhost:${PORT}`)
.end((error, response) => {
assertThat(response, hasStatusCode(200));
assertThat(response.text, is("OK"));
done();
});
});
it("should only allow defined methods", function(done) {
superagent.post(`http://localhost:${PORT}`)
.end((error, response) => {
assertThat(response, hasStatusCode(405));
/*
* See koa-router for more information about the contents of the 405 response.
*
* We're not testing koa-router we just want to make sure we've wired the routes together
* properly.
*/
done();
});
});
}); |
describe("middleware", function() {
beforeEach(async function() {
web.use(async (ctx, next) => {
ctx.response.set("x-foo", "bar");
return await next();
});
web.route(givenRootRoute());
await web.start();
});
it("should allow arbitrary middleware", function(done) {
superagent.get(`http://localhost:${PORT}`)
.end((error, response) => {
assertThat(response, hasStatusCode(200));
assertThat(response, hasHeader("x-foo", equalTo("bar")));
done();
});
});
});
describe("context", function() {
beforeEach(async function() {
const router = new Router();
web.route(router);
router.get("/name", async (ctx) => {
ctx.status = 200;
ctx.body = ctx.name();
});
await web.start();
});
it("should add to context", function(done) {
const name = "Bruce Wayne";
web.addContext("name", function() {
return name;
});
superagent.get(`http://localhost:${PORT}/name`)
.end((error, response) => {
assertThat(response, hasStatusCode(200));
assertThat(response.text, is(name));
done();
});
});
});
});
function givenRootRoute() {
const router = new Router();
router.get("/", async (ctx) => {
ctx.status = 200;
ctx.body = "OK"
});
return router;
} | |
class_registration.py | """
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import networkx as nx
import os
from enum import Enum
from mo.graph.graph import Graph
from mo.middle.passes.eliminate import shape_inference
from mo.middle.pattern_match import for_graph_and_each_sub_graph_recursively
from mo.utils.error import Error, InternalError, FrameworkError
from mo.utils.logger import progress_bar
from mo.utils.utils import refer_to_faq_msg
_registered_classes_dict = {}
def _check_unique_ids():
"""
Check that idxs is unique for all registered replacements.
"""
unique_idxs = set()
for class_type, classes_set in _registered_classes_dict.items():
for cls in classes_set:
replacers = [c for c in cls.registered_cls if not hasattr(c, 'op')] + \
[c for op, c in cls.registered_ops.items() if c]
for replacer_cls in replacers:
if hasattr(replacer_cls, 'id'):
id_cls = getattr(replacer_cls, 'id')
if id_cls in unique_idxs:
raise Error('Found replacer {} with not unique id!'.format(replacer_cls))
unique_idxs.add(id_cls)
log.debug("All replacers has unique idxs.")
def get_enabled_and_disabled_transforms():
"""
:return: tuple of lists with force enabled and disabled id of transformations.
"""
disabled_transforms = os.environ['MO_DISABLED_TRANSFORMS'] if 'MO_DISABLED_TRANSFORMS' in os.environ else ''
enabled_transforms = os.environ['MO_ENABLED_TRANSFORMS'] if 'MO_ENABLED_TRANSFORMS' in os.environ else ''
assert isinstance(enabled_transforms, str)
assert isinstance(disabled_transforms, str)
disabled_transforms = disabled_transforms.split(',')
enabled_transforms = enabled_transforms.split(',')
return enabled_transforms, disabled_transforms
class ClassType(Enum):
EXTRACTOR = 0
OP = 1
FRONT_REPLACER = 2
MIDDLE_REPLACER = 3
BACK_REPLACER = 4
IR_READER_EXTENDER = 5
LOADER = 6
def _update(cls, registered_list: list, registered_dict: dict, key: str, enabled_transforms: list,
disabled_transforms: list):
new_keys = {} # maps a custom name to class
new_keys_lower = {} # translates lowered custom name to its original form
# print('Registering new subclasses for', cls)
for c in cls.__subclasses__():
# Force enabling operations
if hasattr(c, 'id') and c.id in enabled_transforms:
setattr(c, 'enabled', True)
# Force disabling operations
if hasattr(c, 'id') and c.id in disabled_transforms:
setattr(c, 'enabled', False)
if c not in registered_list:
if hasattr(cls, 'excluded_classes') and c in cls.excluded_classes:
continue
registered_list.append(c)
log.info('New subclass: {}'.format(c))
if hasattr(c, key) and getattr(c, key) is not None:
k = getattr(c, key)
if k.lower() in new_keys_lower:
raise Error(
'Attempt to register of custom name {} for the second time as class {}. ' \
'Note that custom names are case-insensitive. ' +
refer_to_faq_msg(55), k, c)
else:
new_keys_lower[k.lower()] = k
new_keys[k] = c
log.info('Registered a new subclass with key: {}'.format(k))
else:
log.warning('Skipped {} registration because it was already registered or it was disabled. '.format(c))
registered_dict.update(new_keys)
def update_registration(classes: list, enabled_transforms: list, disabled_transforms: list):
|
class DependencyGraph(Graph):
def __init__(self, data=None, **attr):
super().__init__(data, **attr)
def dump_graph_for_graphviz(self, node_attrs: list = [], edge_attrs: list = [], nodes_to_dump: list = None,
save_to_svg=False, highlight_nodes: list = None):
log.debug("---- GRAPHVIZ OUTPUT STARTS ----")
if nodes_to_dump is None:
nodes_to_dump = self.nodes()
string = '\ndigraph {\n'
string += 'node [color=lightblue2, style=filled];\n'
for node in nodes_to_dump:
attrs = ""
if hasattr(node, 'enabled') and not node.enabled:
attrs += "color=gray70,"
string += '"{}" [{}];\n'.format(node, attrs)
visited_nodes = set()
for src_node_name, dst_node_name, attrs in self.edges(data=True):
visited_nodes.add(src_node_name)
visited_nodes.add(dst_node_name)
if src_node_name not in nodes_to_dump or dst_node_name not in nodes_to_dump:
continue
src_node = self.node[src_node_name]
dst_node = self.node[dst_node_name]
src_node_string = str(src_node_name) + '\\n'.join(
[str(key) + '=' + str(src_node.get(key, 'None')) for key in node_attrs if key in src_node])
dst_node_string = str(dst_node_name) + '\\n'.join(
[str(key) + '=' + str(dst_node.get(key, 'None')) for key in node_attrs if key in dst_node])
edge_string = ' '.join([str(key) + '=' + str(attrs.get(key, 'None')) for key in edge_attrs if key in attrs])
string += '"{}" -> "{}" [label = "{}"];\n'.format(src_node_string, dst_node_string, edge_string)
for node in nodes_to_dump:
if node not in visited_nodes:
string += '"{}";\n'.format(node)
visited_nodes.add(node)
string += '}'
log.debug(string)
log.debug("---- GRAPHVIZ OUTPUT ENDS ----")
if save_to_svg:
try:
import graphviz
import os
file_name = "{}_{}.txt".format(self.name.replace('/', '_'), 0)
id = 1
while os.path.exists(file_name):
file_name = "{}_{}.txt".format(self.name.replace('/', '_'), id)
id += 1
with open(file_name, "w") as f:
f.write(string)
graphviz.render('dot', 'svg', file_name)
print('Graph was saved to {}.{}'.format(file_name, 'svg'))
except ImportError:
raise ImportError('Can\'t import graphviz')
except Exception as e:
raise Error('Can\'t save graph to svg') from e
return string
def cycle_check(self):
try:
list(nx.topological_sort(self))
except nx.NetworkXUnfeasible as exception:
cycles = nx.simple_cycles(self)
raise Error(
'There is(are) cyclic dependency(ies) between replacers. One of the cycles is the following: {}',
' -> '.join([str(node) for node in list(cycles)[0]])) from exception
def repeated_cls_names_check(self):
name_to_class_map = {}
for transform_class in self.node:
transform_name = transform_class.__name__
assert transform_name not in name_to_class_map, \
'Transform name `{}` is not unique: at least {} and {} exist' \
''.format(transform_name, transform_class, name_to_class_map[transform_name])
name_to_class_map[transform_name] = transform_class
def sort_util(self, v, visited, stack):
visited.append(v)
for i in sorted([child for _, child in self.out_edges(v)], key=lambda x: x.__name__):
if i not in visited:
self.sort_util(i, visited, stack)
stack.insert(0, v)
def determined_sort(self):
self.cycle_check()
self.repeated_cls_names_check()
transforms = sorted([cls for cls in self.nodes() if len(self.in_edges(cls)) == 0], key=lambda x: x.__name__)
order, visited = [], []
for transform in transforms:
self.sort_util(transform, visited, order)
graph_copy = self.copy()
for i in range(len(order) - 1):
graph_copy.add_edge(order[i], order[i + 1])
try:
nx_order = list(nx.topological_sort(graph_copy))
except Exception as e:
raise InternalError(
"Internal DependencyGraph determined_sort function behaves unexpectedly: cycle found") from e
assert nx_order == order, \
"Internal DependencyGraph determined_sort function behaves unexpectedly: nx_order != order"
return order
def get_replacers_order(transform_types: list):
"""
Gets all transforms that do not have 'op'.
If two or more classes replaces the same op (both have op class attribute and values match), such
pattern is not applied (while registration it will warn user that we have a conflict).
"""
dependency_graph = DependencyGraph(name="UnifiedPipeline" if len(transform_types) != 1 else transform_types[0].name)
replacers = []
for class_type, classes_set in _registered_classes_dict.items():
if class_type in transform_types:
for cls in classes_set:
cur_cls_replacers = [c for c in cls.registered_cls if not hasattr(c, 'op')] + \
[c for op, c in cls.registered_ops.items() if c]
replacers.extend(
[replacer for replacer in cur_cls_replacers if replacer not in cls.excluded_replacers])
for replacer_cls in replacers:
dependency_graph.add_node(replacer_cls)
for i, replacer_cls in enumerate(replacers):
for cls_after in replacer_cls().run_before():
dependency_graph.add_edge(replacer_cls, cls_after)
for cls_before in replacer_cls().run_after():
dependency_graph.add_edge(cls_before, replacer_cls)
replacers_order = dependency_graph.determined_sort()
debug_msg_list = ['| id | enabled | class ']
for i, replacer_cls in enumerate(replacers_order):
debug_msg_list.append('|{:5} |{:^9}| {}'.format(i, str(getattr(replacer_cls, 'enabled', None)), replacer_cls))
log.debug('Replacers execution order: \n{}'.format('\n'.join(debug_msg_list)))
return replacers_order
@progress_bar
def apply_transform(graph: Graph, replacer_cls, **kwargs):
"""
Safely executes transform if it should be and validates graph after transform execution
"""
replacer = replacer_cls()
replacement_id = 'REPLACEMENT_ID'
if hasattr(replacer, 'replacement_id'):
replacement_id = replacer.replacement_id
if hasattr(replacer, 'enabled') and not replacer.enabled:
log.info("Skip replacer {} (enabled = False)".format(replacer_cls))
return
if hasattr(replacer, 'graph_condition') and \
not all([condition(graph) for condition in replacer.graph_condition]):
log.info("Skip replacer {} (graph_condition not satisfied)".format(replacer_cls))
return
log.debug("Run replacer {}".format(replacer_cls))
try:
if hasattr(replacer, 'run_not_recursively'):
replacer.find_and_replace_pattern(graph)
else:
for_graph_and_each_sub_graph_recursively(graph, replacer.find_and_replace_pattern)
if hasattr(replacer, 'force_clean_up') and replacer.force_clean_up:
for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())
if hasattr(replacer, 'force_shape_inference') and replacer.force_shape_inference:
shape_inference(graph)
for_graph_and_each_sub_graph_recursively(graph, lambda _: graph.check_empty_graph(replacer_cls))
for_graph_and_each_sub_graph_recursively(graph, lambda _: graph.check_shapes_consistency())
except Error as err:
raise Error('Exception occurred during running replacer "{}" ({}): {}'.format(
replacement_id,
replacer_cls,
str(err).replace('[REPLACEMENT_ID]', replacement_id),
)) from err
except FrameworkError as err:
raise FrameworkError('{}'.format(str(err))) from err
except Exception as err:
raise Exception('Exception occurred during running replacer "{} ({})": {}'.format(
replacement_id,
replacer_cls,
str(err).replace('[REPLACEMENT_ID]', replacement_id),
)) from err
def apply_replacements_list(graph: Graph, replacers_order: list):
"""
Apply all transformations from replacers_order
"""
for i, replacer_cls in enumerate(replacers_order):
apply_transform(
graph=graph,
replacer_cls=replacer_cls,
curr_transform_num=i,
num_transforms=len(replacers_order))
def apply_replacements(graph: Graph, replacements_type: list):
"""
Apply all patterns that do not have 'op' first, then apply patterns from registered_ops.
If two or more classes replaces the same op (both have op class attribute and values match), such
pattern is not applied (while registration it will warn user that we have a conflict).
"""
replacers_order = get_replacers_order(replacements_type)
apply_replacements_list(graph, replacers_order)
| for cls in classes:
_update(cls, cls.registered_cls, cls.registered_ops, 'op', enabled_transforms, disabled_transforms)
_registered_classes_dict.setdefault(cls.class_type(), set()).add(cls) |
SRP_DKL_forward.py | ###############################################################################
# Code by Christoph Aurnhammer, based on #
# https://github.com/pytorch/examples/tree/master/word_language_model #
# Citation: Aurnhammer & Frank (2019), Neuropsychologia. #
# LIG_2: #
# This code averages all instances of a model snapshot and #
# then computes surprisal and the Kullbach-Leibler Divergence #
# on the experimental stimuli from Frank (2013) #
# (Christoph Aurnhammer, 05.04.2019 #
# for Aurnhammer, Frank (upcoming) #
###############################################################################
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.functional import softmax
import argparse
import pandas
import glob
import re
import numpy as np
from math import log, exp, isnan
# script data.py
import data
def parse_args():
parser = argparse.ArgumentParser(description='PyTorch ENCOW Language Model')
parser.add_argument('--data', type=str, default='./corpus/',
help='location of the data corpus')
parser.add_argument('--bptt', type=int, default=42,
help='sequence length')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
arguments = parser.parse_args()
return arguments
# Get snapshots absolute paths from dir
def get_paths(directory, names_snapshot):
snapshots = []
for snap in names_snapshot:
for repetition in range(0, 6):
found_checkpoints = glob.glob(directory + snap + str(repetition))
# If list is not empty
if found_checkpoints:
for cp in sorted(found_checkpoints, key=numerical_sort):
snapshots.append(cp)
return snapshots
def numerical_sort(value):
# Numerical sort from here on
numbers = re.compile(r'(\d+)')
parts = numbers.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
def prepare_outfile(out_path):
# Load items from the experimental stimuli
with open('./corpus/test.txt') as inputfile:
inputfile = inputfile.read()
inputfile = inputfile.replace(' ,', ',')
inputfile = inputfile.replace(' n\'t', 'n\'t')
inputfile = inputfile.replace(' \'', '\'')
inputfile = inputfile.split('\n')
del inputfile[-1]
inputfile = [sentence.split(' ') for sentence in inputfile]
sent_nr = []
word_pos = []
words = []
for sent_ind, sentence in enumerate(inputfile):
for word_ind, word in enumerate(sentence):
sent_nr.append(sent_ind + 1)
word_pos.append(word_ind + 1)
words.append(word)
# Prepare output file
dataframe = pandas.DataFrame()
dataframe['sent_nr'] = sent_nr
dataframe['word_pos'] = word_pos
dataframe['word'] = words
dataframe['item'] = pandas.read_csv('./input/itemnumbers_frank2013.csv', delimiter='\t', header=None)
dataframe['ENCOW_log_freq'] = pandas.read_csv('./input/ENCOWfreqs_frank2013.csv', delimiter='\t')
dataframe.to_csv(out_path, sep='\t', index=False)
return dataframe
def batchify(dt, bsz, arguments):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = dt.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
dt = dt.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
dt = dt.view(bsz, -1).t().contiguous()
if arguments.cuda:
dt = dt.cuda()
return dt
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
def get_batch(source, i):
seq_len = min(args.bptt, len(source) - 1 - i)
dt = Variable(source[i:i + seq_len])
target = Variable(source[i + 1:i + 1 + seq_len].view(-1))
return dt, target
def get_eos(arguments):
# chr: define end of sentence index
if arguments.cuda:
eos_tensor = torch.cuda.LongTensor([corpus.dictionary.word2idx['<eos>']])
else:
eos_tensor = torch.LongTensor([corpus.dictionary.word2idx['<eos>']])
return eos_tensor
def forward_model(rnn, sequence, types, args):
# Initialise hidden state of PLM to zeros for new sequence
hidden_true = rnn.init_hidden(1)
# List of probability distributions over w_t and w_t1
w_t_list = []
w_t1_list = []
# For each word in the sentence (starting from <sos>)
for item in sequence:
# Reformat item (technicality)
if args.cuda:
item = torch.cuda.LongTensor([[int(item)]])
else:
item = torch.LongTensor([[int(item)]])
# Model current item.
# This is returns the "true" output / hidden states corresponding
# to the actually occuring items in the stimuli
output_true, hidden_true = rnn(item, hidden_true)
# Collect current P(w_t|w_1..._t-1) probability distribution
p_wt_dist = softmax(output_true, dim=-1).data[0][0]
# For P(w_t+1|w_1...t):
# Allocate array with vocabulary size as rows and columns
if args.cuda:
probs = torch.cuda.FloatTensor(np.empty([types, types]))
else:
probs = torch.FloatTensor(np.empty([types, types]))
# For each possible possible w_t
for wt in range(0, types):
# Select probability of current w_t
p_wt = p_wt_dist[wt]
# Run using current wt and rnn hidden state produced after last true item
if args.cuda:
output_wt1, hidden_wt1 = rnn(torch.cuda.LongTensor([[wt]]), hidden_true)
else:
output_wt1, hidden_wt1 = rnn(torch.LongTensor([[wt]]), hidden_true)
# Collect current P(w_t+1|w_1...t) distribution
p_wt1_dist = softmax(output_wt1, dim=-1).data[0][0]
# Enter as column into matrix
# Each cell is the probability of the j-th w_t1
# multiplied by the prob of the current w_t
probs[:, wt] = p_wt1_dist * p_wt
# Compute sum per row, leaving a single vector with
# one probability per possible w_t1
p_wt1_dist = probs.sum(dim=1)
# Append to output lists
w_t_list.append(p_wt_dist)
w_t1_list.append(p_wt1_dist)
return w_t_list, w_t1_list
def average_sent(sentence_output):
average_dist = []
num_words = len(sentence_output[0])
for word_outer in range(0, num_words):
word_dists = []
for model in sentence_output:
word_dists.append(model[word_outer])
word_avg = sum(word_dists)/len(word_dists)
average_dist.append(word_avg)
return average_dist
def compute_surprisal(dists, seq_targets):
# accept a list of probability distributions and the indices of the correct word and compute surprisal for each word
sent_surprisal = []
for target, prob in zip(seq_targets.data, dists):
sent_surprisal.append([corpus.dictionary.idx2word[target], round(log(float((prob[target]))), 4) * -1])
return sent_surprisal
def compute_kld(dist_nextword, dist_plusone, seq_targets):
# accept two lists of probability distributions and compute the Kullback-Leibler Divergence for each pair
del dist_nextword[0]
del dist_plusone[-1]
# We can't compute the KLD for the first words of sentence
sent_kld = [[corpus.dictionary.idx2word[seq_targets[0]], None]]
seq_targets = seq_targets[1:]
for dist_nw, dist_po, target in zip(dist_nextword, dist_plusone, seq_targets):
cross_entropy = -sum(dist_nw * dist_po.log())
plusone_entropy = -sum(dist_nw * dist_nw.log())
kld = cross_entropy - plusone_entropy
sent_kld.append([corpus.dictionary.idx2word[target], round(kld.item(), 4)])
return sent_kld
def add_to_df(input_lol, dataframe, snap, metric_name):
# Clean up sentences: remove commas and words with clitics (ensures equal lengths of sentences)
for i_index, sentence in enumerate(input_lol):
for j_index, word in enumerate(sentence):
if word[0] == ',':
del (input_lol[i_index][j_index])
elif '\'' in word[0]:
del (input_lol[i_index][j_index])
# Add metrics to new column
new_col = []
for row in dataframe.iterrows():
word_value = input_lol[row[1][0] - 1][row[1][1] - 1]
word = row[1][2].lower()
word = word.strip(',')
if word_value[0] == word and word_value[1] is not None:
new_col.append(float(word_value[1]))
else:
new_col.append(None)
assert len(df) == len(new_col)
dataframe[metric_name + '_' + snap] = new_col
return dataframe
def evaluate(surprisal_values):
N = 0
Psum = 0
for surp in surprisal_values:
if isnan(surp):
pass
else:
N += 1
Psum += -surp
print("Evaluated: Perplexity {}".format(exp(-1 / N * Psum)))
return exp(-1 / N * Psum)
def store_eval(wt_perf, wt1_perf):
output = pandas.DataFrame()
output['snapshots'] = ['1K', '3K', '10K', '30K', '100K', '300K', '1M', '3M', '6.47M']
output['wt'] = wt_perf
output['wt1'] = wt1_perf
output.to_csv('srp_entr/PLM_ppl.csv')
print(output)
if __name__ == "__main__":
# Parse command line arguments
args = parse_args()
# Notify user if a cuda decive could be used
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
#####################################
# Define all project specific input #
#####################################
# Define the names of the snapshots and the directories where they are found
snapshot_names = ['_1000_', '_3000_', '_10000_', '_30000_', '_100000_', '_300000_', '_1000000_',
'_3000000_', '_6470000_']
# We are only select LSTM models (in case other matching models are in dir)
snapshot_names = ['LSTM' + name for name in snapshot_names]
# Shortened names used for output
snapshot_output_names = ['1k', '3k', '10k', '30k', '100k', '300k', '1M', '3M', '6-47M']
# Directories where the models are stored (normal LM = nextword; two steps ahead = plusone)
nextword_dir = './output/'
# Get full paths
nextword_paths = get_paths(nextword_dir, snapshot_names)
# Prepare output file
outfile_path = './srp_entr/SRP_DKL_snapshots.txt'
df = prepare_outfile(outfile_path)
# Load corpus into memory (requires data.py)
print('Loading corpus from {}'.format(args.data))
corpus = data.Corpus(args.data, args.bptt)
ntypes = len(corpus.dictionary)
eos = get_eos(args)
test_data = batchify(corpus.test, bsz=1, arguments=args)
# Evaluation output
wt_ppl = []
wt1_ppl = []
######################################
# Loop through snapshots, sentences #
# Compute surprisal, KLD #
# Add data to file for each snapshot #
######################################
for snap_name, snap_out_name in zip(snapshot_names, snapshot_output_names):
snap_paths_nextword = [path for path in nextword_paths if snap_name in path]
sent_counter = 0
wt_surprisal_list = []
KLD_list = []
wt1_surprisal_list = []
for x in range(0, test_data.size(0) - 1, args.bptt):
data, targets = get_batch(test_data, x)
# chr: cut off data at end of sentence
for j in range(len(data)):
if (int(data[j].data) == int(eos)) is True:
sent_counter += 1
data = data[:j, :1]
# normal targets
wt_targets = targets[:j]
# targets for wt1
wt1_targets = targets[1:j]
break
# Wt list
wt_list = []
wt1_list = []
# Forward modeling of two steps ahead probability
for model_path in snap_paths_nextword:
with open(model_path, 'rb') as f:
rnn_model = torch.load(f)
rnn_model.eval()
wt_seq, wt1_seq = forward_model(rnn_model, data, ntypes, args) # return two lists of dists
wt_list.append(wt_seq)
wt1_list.append(wt1_seq)
wt_avg = average_sent(wt_list)
wt1_avg = average_sent(wt1_list)
# For each word in the current sentence get surprisal and KLD
# Compute surprisal for each word wt
wt_surprisal_list.append(compute_surprisal(wt_avg, wt_targets))
# Compute KLD for each word (targets used to make word step identifiable)
KLD_list.append(compute_kld(wt_avg, wt1_avg, wt_targets))
# Compute surprisal for each word wt1 (only for evaluation purposes)
wt1_surprisal_list.append(compute_surprisal(wt1_avg, wt1_targets))
print("Classified {} sentences at snapshot {}".format(sent_counter, snap_out_name), end='\r') | # Add surprisal to output
df = add_to_df(wt_surprisal_list, df, snap_out_name, metric_name='srp')
df = add_to_df(KLD_list, df, snap_out_name, metric_name='KLD')
df.to_csv(outfile_path, sep='\t', index=False)
# List surprisal for evaluation
wt_ppl.append(evaluate([y for x in wt_surprisal_list for y in x]))
wt1_ppl.append(evaluate([y for x in wt1_surprisal_list for y in x]))
# Store evaluation output
store_eval(wt_ppl, wt1_ppl) | |
CustomerBalanceTransactions.d.ts | // File generated from our OpenAPI spec
declare module 'stripe' {
namespace Stripe {
/**
* The CustomerBalanceTransaction object.
*/
interface CustomerBalanceTransaction {
/**
* Unique identifier for the object.
*/
id: string;
/**
* String representing the object's type. Objects of the same type share the same value.
*/
object: 'customer_balance_transaction';
/**
* The amount of the transaction. A negative value is a credit for the customer's balance, and a positive value is a debit to the customer's `balance`.
*/
amount: number;
/**
* Time at which the object was created. Measured in seconds since the Unix epoch.
*/
created: number;
/**
* The ID of the credit note (if any) related to the transaction.
*/
credit_note: string | Stripe.CreditNote | null;
/**
* Three-letter [ISO currency code](https://www.iso.org/iso-4217-currency-codes.html), in lowercase. Must be a [supported currency](https://stripe.com/docs/currencies).
*/
currency: string;
/**
* The ID of the customer the transaction belongs to.
*/
customer: string | Stripe.Customer;
/**
* An arbitrary string attached to the object. Often useful for displaying to users.
*/
description: string | null;
/**
* The customer's `balance` after the transaction was applied. A negative value decreases the amount due on the customer's next invoice. A positive value increases the amount due on the customer's next invoice.
*/
ending_balance: number; | */
invoice: string | Stripe.Invoice | null;
/**
* Has the value `true` if the object exists in live mode or the value `false` if the object exists in test mode.
*/
livemode: boolean;
/**
* Set of [key-value pairs](https://stripe.com/docs/api/metadata) that you can attach to an object. This can be useful for storing additional information about the object in a structured format.
*/
metadata: Metadata | null;
/**
* Transaction type: `adjustment`, `applied_to_invoice`, `credit_note`, `initial`, `invoice_too_large`, `invoice_too_small`, `unspent_receiver_credit`, or `unapplied_from_invoice`. See the [Customer Balance page](https://stripe.com/docs/billing/customer/balance#types) to learn more about transaction types.
*/
type: CustomerBalanceTransaction.Type;
}
namespace CustomerBalanceTransaction {
type Type =
| 'adjustment'
| 'applied_to_invoice'
| 'credit_note'
| 'initial'
| 'invoice_too_large'
| 'invoice_too_small'
| 'migration'
| 'unapplied_from_invoice'
| 'unspent_receiver_credit';
}
interface CustomerBalanceTransactionCreateParams {
/**
* The integer amount in **%s** to apply to the customer's balance. Pass a negative amount to credit the customer's balance, and pass in a positive amount to debit the customer's balance.
*/
amount: number;
/**
* Three-letter [ISO currency code](https://www.iso.org/iso-4217-currency-codes.html), in lowercase. Must be a [supported currency](https://stripe.com/docs/currencies). If the customer's [`currency`](https://stripe.com/docs/api/customers/object#customer_object-currency) is set, this value must match it. If the customer's `currency` is not set, it will be updated to this value.
*/
currency: string;
/**
* An arbitrary string attached to the object. Often useful for displaying to users.
*/
description?: string;
/**
* Specifies which fields in the response should be expanded.
*/
expand?: Array<string>;
/**
* Set of [key-value pairs](https://stripe.com/docs/api/metadata) that you can attach to an object. This can be useful for storing additional information about the object in a structured format. Individual keys can be unset by posting an empty value to them. All keys can be unset by posting an empty value to `metadata`.
*/
metadata?: MetadataParam | null;
}
interface CustomerBalanceTransactionRetrieveParams {
/**
* Specifies which fields in the response should be expanded.
*/
expand?: Array<string>;
}
interface CustomerBalanceTransactionUpdateParams {
/**
* An arbitrary string attached to the object. Often useful for displaying to users.
*/
description?: string;
/**
* Specifies which fields in the response should be expanded.
*/
expand?: Array<string>;
/**
* Set of [key-value pairs](https://stripe.com/docs/api/metadata) that you can attach to an object. This can be useful for storing additional information about the object in a structured format. Individual keys can be unset by posting an empty value to them. All keys can be unset by posting an empty value to `metadata`.
*/
metadata?: MetadataParam | null;
}
interface CustomerBalanceTransactionListParams extends PaginationParams {
/**
* Specifies which fields in the response should be expanded.
*/
expand?: Array<string>;
}
}
} |
/**
* The ID of the invoice (if any) related to the transaction. |
jsonFsJson.js | /*
artifact generator: C:\My\wizzi\wizzi\node_modules\wizzi-js\lib\artifacts\js\module\gen\main.js
primary source IttfDocument: C:\My\wizzi\wizzi\packages\wizzi-repo\.wizzi\ittf\examples\jsonFsJson.js.ittf
*/
'use strict';
/**
EXAMPLE: wizzi-repo.examples.jsonFsJson
*/
var myname = 'wizzi-repo.examples.jsonFsJson';
var util = require('util');
var path = require('path');
var stringify = require('json-stringify-safe');
var vfile = require('wizzi-utils').vfile;
// defaults to filesystem
var fsfile = vfile();
var verify = require('wizzi-utils').verify;
var json = require('../lib/json/index');
heading1('start');
var fsJson = new json.FsJson();
dump();
fsJson.insertItem({
basename: 'alpha.js.ittf',
dirname: 'w:/zero',
kind: 1
}, function(err, result) {
if (err) {
console.log('err', err);
throw new Error(err.message);
}
console.log('insert.alpha.js.ittf.result', result);
var insertedId = result.insertedId;
dump();
result.item.basename = 'beta.js.ittf';
fsJson.updateItem(result.item, function(err, result) {
if (err) {
console.log('err', err);
throw new Error(err.message);
}
console.log('update.beta.js.ittf.result', result);
dump();
fsJson.writeDocument(result.item._id, 'My content', function(err, result) {
if (err) {
console.log('err', err);
throw new Error(err.message);
}
console.log('write.beta.js.ittf.result', result);
dump();
fsJson.readDocument(result.item._id, function(err, result) {
if (err) {
console.log('err', err);
throw new Error(err.message);
}
console.log('read.beta.js.ittf.result', result);
fsJson.deleteItem(insertedId, function(err, result) {
if (err) {
console.log('err', err);
throw new Error(err.message);
}
console.log('delete.beta.js.ittf.result', result);
dump();
});
});
});
});
});
function dump() {
printValue('fsJson.items', fsJson.items);
printValue('fsJson.documents', fsJson.documents);
}
function heading1(text) {
console.log('');
console.log('********************************************************************************');
console.log('** Example wizzi-repo.examples.jsonFsJson - ' + text);
console.log('********************************************************************************');
console.log('');
}
function heading2(text) {
console.log('');
console.log('--------------------------------------------------------------------------------');
console.log('-- Example wizzi-repo.examples.jsonFsJson - ' + text);
console.log('--------------------------------------------------------------------------------');
console.log('');
}
function printArray(name, arr, fields) {
console.log('* array ' + name + ' : ');
var i, i_items=arr, i_len=arr.length, item;
for (i=0; i<i_len; i++) {
item = arr[i];
console.log(' {', i);
var keys = fields || Object.keys(item);
var j, j_items=keys, j_len=keys.length, k;
for (j=0; j<j_len; j++) {
k = keys[j];
printValue(k, item[k]);
}
}
}
function printValue(k, v) {
if (verify.isNotEmpty(v)) {
var lines = verify.splitLines(v, {
numbered: true
});
if (lines.length === 1) {
console.log(' ', k, ':', lines[0].text);
}
else {
for (var i=0; i<lines.length; i++) {
if (i === 0) {
console.log(' ', k, ':', lines[0].numFmt, lines[0].text);
}
else {
console.log(' ', spaces(k.length+1), ' ', lines[i].numFmt, lines[i].text);
}
}
}
}
else {
console.log(' ', k, ':', v); | function spaces(len) {
return new Array(len).join(' ');
} | }
} |
server.js | 'use strict';
const EventEmitter = require('events');
const http = require('http');
const Client = require('./client');
class | {
constructor(req) {
this.req = req;
}
get(key) {
return this.req.headers[key] || this.req.headers[key.toLowerCase()];
}
parseJsonBody(callback) {
let postBody = '';
this.req.setEncoding('utf8');
this.req.on('data', data => {
postBody += data;
});
this.req.on('end', () => {
try {
this.body = JSON.parse(postBody);
} catch (err) {
return callback(new Error(`AWS SNS request body JSON parsing error: ${err.message}`));
}
callback();
});
}
}
class Response {
constructor(res) {
this.res = res;
}
status(code) {
this.res.statusCode = code;
}
end() {
this.res.end();
}
}
class Server extends EventEmitter {
constructor(options) {
if (!options.port) {
throw new Error('Server requires options.port');
}
if (!options.hostname) {
throw new Error('Server requires options.hostname');
}
super();
this.port = options.port;
this.hostname = options.hostname;
this.server = null;
}
start(callback) {
this.server = http.createServer((req, res) => {
const request = new Request(req);
const response = new Response(res);
request.parseJsonBody(err => {
if (err) {
return this.handleError(err, req, res);
}
this.handleRequest(request, response, err => {
if (err) {
return this.handleError(err, req, res);
}
this.handleError(
new Error(`next() called unexpectedly without an error`),
req,
res
);
});
});
});
this.server.on('error', callback);
this.server.listen(this.port, this.hostname, () => {
callback(null, this);
});
}
handleRequest(req, res, next) {
const type = req.get('x-amz-sns-message-type');
switch (type) {
case 'SubscriptionConfirmation':
return this.handleSubscriptionConfirmation(req, res, next);
case 'Notification':
return this.handleNotification(req, res, next);
default:
return next(new Error(`Unknown SNS message type: "${type}"`));
}
}
handleNotification(req, res, next) {
try {
this.emit('message', req.body);
res.status(200).end();
} catch (err) {
next(err);
}
}
handleSubscriptionConfirmation(req, res, next) {
const subscribeUrl = req.body.SubscribeURL;
return Client.sendSubscriptionConfirmation(subscribeUrl).then(() => {
res.status(200).end();
return null;
}).catch(next);
}
handleError(err, req, res) {
const body = 'Server error';
res.writeHead(500, {
'Content-Type': 'text/plain',
'Content-Length': Buffer.byteLength(body)
});
res.end(body);
this.emit('log', {level: 'error', message: 'SNS subscription server error', context: {error: err}});
}
}
module.exports = Server;
| Request |
fee-proxy-contract.ts | import { erc20FeeProxyArtifact } from '@requestnetwork/smart-contracts';
import {
AdvancedLogicTypes,
ExtensionTypes,
PaymentTypes,
RequestLogicTypes,
} from '@requestnetwork/types';
import { CurrencyDefinition, ICurrencyManager } from '@requestnetwork/currency';
import ProxyInfoRetriever from './proxy-info-retriever';
import { networkSupportsTheGraph } from '../thegraph';
import TheGraphInfoRetriever from './thegraph-info-retriever';
import { loadCurrencyFromContract } from './currency';
import { FeeReferenceBasedDetector } from '../fee-reference-based-detector';
import { makeGetDeploymentInformation } from '../utils';
const PROXY_CONTRACT_ADDRESS_MAP = {
['0.1.0']: '0.1.0',
['0.2.0']: '0.2.0',
};
/**
* Handle payment networks with ERC20 fee proxy contract extension, or derived
*/
export abstract class ERC20FeeProxyPaymentDetectorBase<
TExtension extends ExtensionTypes.PnFeeReferenceBased.IFeeReferenceBased,
TPaymentEventParameters extends PaymentTypes.IERC20FeePaymentEventParameters,
> extends FeeReferenceBasedDetector<TExtension, TPaymentEventParameters> {
/**
* @param extension The advanced logic payment network extensions
*/
public constructor(
paymentNetworkId: PaymentTypes.PAYMENT_NETWORK_ID,
extension: TExtension,
protected _currencyManager: ICurrencyManager,
) {
super(paymentNetworkId, extension);
}
protected async getCurrency(
storageCurrency: RequestLogicTypes.ICurrency,
): Promise<CurrencyDefinition> {
const currency = this._currencyManager.fromStorageCurrency(storageCurrency);
if (currency) {
return currency;
}
if (storageCurrency.type !== RequestLogicTypes.CURRENCY.ERC20) {
throw new Error(`Currency ${storageCurrency.value} not known`);
}
const contractCurrency = await loadCurrencyFromContract(storageCurrency);
if (!contractCurrency) {
throw new Error(
`Cannot retrieve currency for contrat ${storageCurrency.value} (${storageCurrency.network})`,
);
}
return contractCurrency;
}
}
/**
* Handle payment networks with ERC20 fee proxy contract extension
*/
export class ERC20FeeProxyPaymentDetector extends ERC20FeeProxyPaymentDetectorBase<
ExtensionTypes.PnFeeReferenceBased.IFeeReferenceBased,
PaymentTypes.IERC20FeePaymentEventParameters
> {
constructor({
advancedLogic,
currencyManager,
}: {
advancedLogic: AdvancedLogicTypes.IAdvancedLogic;
currencyManager: ICurrencyManager;
}) {
super(
PaymentTypes.PAYMENT_NETWORK_ID.ERC20_FEE_PROXY_CONTRACT,
advancedLogic.extensions.feeProxyContractErc20,
currencyManager,
);
}
/**
* Extracts the payment events of a request
*/
protected async extractEvents(
eventName: PaymentTypes.EVENTS_NAMES,
address: string | undefined,
paymentReference: string,
requestCurrency: RequestLogicTypes.ICurrency,
paymentChain: string,
paymentNetwork: ExtensionTypes.IState<ExtensionTypes.PnFeeReferenceBased.ICreationParameters>,
): Promise<PaymentTypes.AllNetworkEvents<PaymentTypes.IERC20FeePaymentEventParameters>> {
if (!address) {
return Promise.resolve({
paymentEvents: [],
});
}
const { address: proxyContractAddress, creationBlockNumber: proxyCreationBlockNumber } =
ERC20FeeProxyPaymentDetector.getDeploymentInformation(paymentChain, paymentNetwork.version);
if (networkSupportsTheGraph(paymentChain)) {
const graphInfoRetriever = new TheGraphInfoRetriever(
paymentReference,
proxyContractAddress,
requestCurrency.value,
address,
eventName,
paymentChain,
); | proxyContractAddress,
proxyCreationBlockNumber,
requestCurrency.value,
address,
eventName,
paymentChain,
);
const paymentEvents = await proxyInfoRetriever.getTransferEvents();
return {
paymentEvents,
};
}
}
/*
* Returns deployment information for the underlying smart contract for a given payment network version
*/
public static getDeploymentInformation = makeGetDeploymentInformation(
erc20FeeProxyArtifact,
PROXY_CONTRACT_ADDRESS_MAP,
);
} | return graphInfoRetriever.getTransferEvents();
} else {
const proxyInfoRetriever = new ProxyInfoRetriever(
paymentReference, |
OsAbstract.py | """
OS abstraction
"""
import os, shutil, os.path, re, traceback
import wx
from . import SystemInfo
from .StringOps import mbcsEnc, urlQuote, pathnameFromUrl, pathEnc
# import WindowsHacks
try:
import WindowsHacks
except:
if SystemInfo.isWindows():
traceback.print_exc()
WindowsHacks = None
try:
import GtkHacks
except:
import ExceptionLogger
ExceptionLogger.logOptionalComponentException(
"Initialize GTK hacks in OsAbstract.py")
GtkHacks = None
# Define startFile
if SystemInfo.isWindows():
if SystemInfo.isWinNT() and SystemInfo.isUnicode() and WindowsHacks:
startFile = WindowsHacks.startFile
else:
def startFile(mainControl, link):
os.startfile(mbcsEnc(link, "replace")[0])
else:
def startFile(mainControl, link):
# We need mainControl only for this version of startFile()
startPath = mainControl.getConfig().get("main", "fileLauncher_path", u"")
if startPath == u"":
wx.LaunchDefaultBrowser(link)
return
if link.startswith("file:"):
link = pathnameFromUrl(link)
os.spawnlp(os.P_NOWAIT, startPath, startPath, link)
# Define copyFile
if SystemInfo.isWinNT() and WindowsHacks:
copyFile = WindowsHacks.copyFile
moveFile = WindowsHacks.moveFile
deleteFile = WindowsHacks.deleteFile
else:
# TODO Mac version
def copyFile(srcPath, dstPath):
"""
Copy file from srcPath to dstPath. dstPath may be overwritten if
existing already. dstPath must point to a file, not a directory.
If some directories in dstPath do not exist, they are created.
This currently just calls shutil.copy2() TODO!
"""
dstDir = os.path.dirname(dstPath)
if not os.path.exists(pathEnc(dstDir)):
os.makedirs(dstDir)
shutil.copy2(srcPath, dstPath)
def moveFile(srcPath, dstPath):
"""
Move file from srcPath to dstPath. dstPath may be overwritten if
existing already. dstPath must point to a file, not a directory.
If some directories in dstPath do not exist, they are created.
"""
dstDir = os.path.dirname(dstPath)
if not os.path.exists(pathEnc(dstDir)):
os.makedirs(dstDir)
shutil.move(srcPath, dstPath)
def deleteFile(path):
"""
Delete file or directory path.
"""
# TODO: Check for directories
# os.rmdir(path) ?
if os.path.isfile(path) or os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
os.rmdir(path)
# Define samefile
if SystemInfo.isWindows():
if WindowsHacks:
def samefile(path1, path2):
# Not fully reliable. Does anybody know something better?
if WindowsHacks.getLongPath(path1).lower() == \
WindowsHacks.getLongPath(path2).lower():
return True
return WindowsHacks.getLongPath(os.path.abspath(path1)).lower() == \
WindowsHacks.getLongPath(os.path.abspath(path2)).lower()
else:
def samefile(path1, path2):
return os.path.abspath(path1) == os.path.abspath(path2)
else:
samefile = os.path.samefile
if WindowsHacks:
def normalizePath(path):
return WindowsHacks.getLongPath(os.path.abspath(path)).lower()
else:
def normalizePath(path):
return os.path.normcase(os.path.abspath(path))
# Define checkForOtherInstances
# If defined properly it returns a list of process identifier of other WikidPad
# processes. This list should be empty if option "Single process per user"
# is selected. If it is not, there is an error.
if WindowsHacks:
checkForOtherInstances = WindowsHacks.checkForOtherInstances
else:
def checkForOtherInstances():
return []
# Define createInterceptCollection, createClipboardInterceptor (may return None)
# Define supportsClipboardInterceptor
# Fallback def.
def supportsClipboardInterceptor():
return False
def createInterceptCollection(interceptors=None):
return None
def createClipboardInterceptor(callingWindow):
return None
if SystemInfo.isWindows():
if WindowsHacks:
def supportsClipboardInterceptor():
return True
def createInterceptCollection(interceptors=None):
return WindowsHacks.WinProcInterceptCollection(interceptors)
def createClipboardInterceptor(callingWindow):
return WindowsHacks.ClipboardCatchIceptor(callingWindow)
else:
if GtkHacks:
def supportsClipboardInterceptor():
return True
def createInterceptCollection(interceptors=None):
return GtkHacks.FakeInterceptCollection(interceptors)
def createClipboardInterceptor(callingWindow):
return GtkHacks.ClipboardCatchFakeIceptor(callingWindow)
if WindowsHacks:
|
else:
def translateAcceleratorByKbLayout(accStr):
return accStr
| translateAcceleratorByKbLayout = WindowsHacks.translateAcceleratorByKbLayout |
expand.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast::{Block, Crate, DeclLocal, PatMac};
use ast::{Local, Ident, Mac_, Name};
use ast::{ItemMac, MacStmtWithSemicolon, Mrk, Stmt, StmtDecl, StmtMac};
use ast::{StmtExpr, StmtSemi};
use ast::TokenTree;
use ast;
use ext::mtwt;
use ext::build::AstBuilder;
use attr;
use attr::{AttrMetaMethods, WithAttrs};
use codemap;
use codemap::{Span, Spanned, ExpnInfo, NameAndSpan, MacroBang, MacroAttribute};
use ext::base::*;
use feature_gate::{self, Features};
use fold;
use fold::*;
use util::move_map::MoveMap;
use parse;
use parse::token::{fresh_mark, fresh_name, intern};
use ptr::P;
use util::small_vector::SmallVector;
use visit;
use visit::Visitor;
use std_inject;
use std::collections::HashSet;
pub fn expand_expr(e: P<ast::Expr>, fld: &mut MacroExpander) -> P<ast::Expr> {
let expr_span = e.span;
return e.clone().and_then(|ast::Expr {id, node, span, attrs}| match node {
// expr_mac should really be expr_ext or something; it's the
// entry-point for all syntax extensions.
ast::ExprMac(mac) => {
// Assert that we drop any macro attributes on the floor here
drop(attrs);
let expanded_expr = match expand_mac_invoc(mac, span,
|r| r.make_expr(),
mark_expr, fld) {
Some(expr) => expr,
None => {
// Ignore unknown macros.
// return DummyResult::raw_expr(span);
return e;
}
};
// Keep going, outside-in.
let fully_expanded = fld.fold_expr(expanded_expr);
let span = fld.new_span(span);
fld.cx.bt_pop();
fully_expanded.map(|e| ast::Expr {
id: ast::DUMMY_NODE_ID,
node: e.node,
span: span,
attrs: e.attrs,
})
}
ast::ExprInPlace(placer, value_expr) => {
// Ensure feature-gate is enabled
feature_gate::check_for_placement_in(
fld.cx.ecfg.features,
&fld.cx.parse_sess.span_diagnostic,
expr_span);
let placer = fld.fold_expr(placer);
let value_expr = fld.fold_expr(value_expr);
fld.cx.expr(span, ast::ExprInPlace(placer, value_expr))
.with_attrs(fold_thin_attrs(attrs, fld))
}
ast::ExprWhile(cond, body, opt_ident) => {
let cond = fld.fold_expr(cond);
let (body, opt_ident) = expand_loop_block(body, opt_ident, fld);
fld.cx.expr(span, ast::ExprWhile(cond, body, opt_ident))
.with_attrs(fold_thin_attrs(attrs, fld))
}
ast::ExprWhileLet(pat, expr, body, opt_ident) => {
let pat = fld.fold_pat(pat);
let expr = fld.fold_expr(expr);
// Hygienic renaming of the body.
let ((body, opt_ident), mut rewritten_pats) =
rename_in_scope(vec![pat],
fld,
(body, opt_ident),
|rename_fld, fld, (body, opt_ident)| {
expand_loop_block(rename_fld.fold_block(body), opt_ident, fld)
});
assert!(rewritten_pats.len() == 1);
fld.cx.expr(span, ast::ExprWhileLet(rewritten_pats.remove(0), expr, body, opt_ident))
.with_attrs(fold_thin_attrs(attrs, fld))
}
ast::ExprLoop(loop_block, opt_ident) => {
let (loop_block, opt_ident) = expand_loop_block(loop_block, opt_ident, fld);
fld.cx.expr(span, ast::ExprLoop(loop_block, opt_ident))
.with_attrs(fold_thin_attrs(attrs, fld))
}
ast::ExprForLoop(pat, head, body, opt_ident) => {
let pat = fld.fold_pat(pat);
// Hygienic renaming of the for loop body (for loop binds its pattern).
let ((body, opt_ident), mut rewritten_pats) =
rename_in_scope(vec![pat],
fld,
(body, opt_ident),
|rename_fld, fld, (body, opt_ident)| {
expand_loop_block(rename_fld.fold_block(body), opt_ident, fld)
});
assert!(rewritten_pats.len() == 1);
let head = fld.fold_expr(head);
fld.cx.expr(span, ast::ExprForLoop(rewritten_pats.remove(0), head, body, opt_ident))
.with_attrs(fold_thin_attrs(attrs, fld))
}
ast::ExprIfLet(pat, sub_expr, body, else_opt) => {
let pat = fld.fold_pat(pat);
// Hygienic renaming of the body.
let (body, mut rewritten_pats) =
rename_in_scope(vec![pat],
fld,
body,
|rename_fld, fld, body| {
fld.fold_block(rename_fld.fold_block(body))
});
assert!(rewritten_pats.len() == 1);
let else_opt = else_opt.map(|else_opt| fld.fold_expr(else_opt));
let sub_expr = fld.fold_expr(sub_expr);
fld.cx.expr(span, ast::ExprIfLet(rewritten_pats.remove(0), sub_expr, body, else_opt))
.with_attrs(fold_thin_attrs(attrs, fld))
}
ast::ExprClosure(capture_clause, fn_decl, block) => {
let (rewritten_fn_decl, rewritten_block)
= expand_and_rename_fn_decl_and_block(fn_decl, block, fld);
let new_node = ast::ExprClosure(capture_clause,
rewritten_fn_decl,
rewritten_block);
P(ast::Expr{id:id, node: new_node, span: fld.new_span(span),
attrs: fold_thin_attrs(attrs, fld)})
}
_ => {
P(noop_fold_expr(ast::Expr {
id: id,
node: node,
span: span,
attrs: attrs
}, fld))
}
});
}
/// Expand a (not-ident-style) macro invocation. Returns the result
/// of expansion and the mark which must be applied to the result.
/// Our current interface doesn't allow us to apply the mark to the
/// result until after calling make_expr, make_items, etc.
fn expand_mac_invoc<T, F, G>(mac: ast::Mac,
span: codemap::Span,
parse_thunk: F,
mark_thunk: G,
fld: &mut MacroExpander)
-> Option<T> where
F: for<'a> FnOnce(Box<MacResult+'a>) -> Option<T>,
G: FnOnce(T, Mrk) -> T,
{
// it would almost certainly be cleaner to pass the whole
// macro invocation in, rather than pulling it apart and
// marking the tts and the ctxt separately. This also goes
// for the other three macro invocation chunks of code
// in this file.
let Mac_ { path: pth, tts, .. } = mac.node;
if pth.segments.len() > 1 {
fld.cx.span_err(pth.span,
"expected macro name without module \
separators");
// let compilation continue
return None;
}
let extname = pth.segments[0].identifier.name;
match fld.cx.syntax_env.find(extname) {
None => {
// Ignore unknown macros.
/*
let mut err = fld.cx.struct_span_err(
pth.span,
&format!("macro undefined: '{}!'",
&extname));
fld.cx.suggest_macro_name(&extname.as_str(), pth.span, &mut err);
err.emit();
*/
// let compilation continue
None
}
Some(rc) => match *rc {
NormalTT(ref expandfun, exp_span, allow_internal_unstable) => {
fld.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
format: MacroBang(extname),
span: exp_span,
allow_internal_unstable: allow_internal_unstable,
},
});
let fm = fresh_mark();
let marked_before = mark_tts(&tts[..], fm);
// The span that we pass to the expanders we want to
// be the root of the call stack. That's the most
// relevant span and it's the actual invocation of
// the macro.
let mac_span = fld.cx.original_span();
let opt_parsed = {
let expanded = expandfun.expand(fld.cx,
mac_span,
&marked_before[..]);
parse_thunk(expanded)
};
let parsed = match opt_parsed {
Some(e) => e,
None => {
fld.cx.span_err(
pth.span,
&format!("non-expression macro in expression position: {}",
extname
));
return None;
}
};
Some(mark_thunk(parsed,fm))
}
_ => {
fld.cx.span_err(
pth.span,
&format!("'{}' is not a tt-style macro",
extname));
None
}
}
}
}
/// Rename loop label and expand its loop body
///
/// The renaming procedure for loop is different in the sense that the loop
/// body is in a block enclosed by loop head so the renaming of loop label
/// must be propagated to the enclosed context.
fn expand_loop_block(loop_block: P<Block>,
opt_ident: Option<Ident>,
fld: &mut MacroExpander) -> (P<Block>, Option<Ident>) {
match opt_ident {
Some(label) => {
let new_label = fresh_name(label);
let rename = (label, new_label);
// The rename *must not* be added to the pending list of current
// syntax context otherwise an unrelated `break` or `continue` in
// the same context will pick that up in the deferred renaming pass
// and be renamed incorrectly.
let mut rename_list = vec!(rename);
let mut rename_fld = IdentRenamer{renames: &mut rename_list};
let renamed_ident = rename_fld.fold_ident(label);
// The rename *must* be added to the enclosed syntax context for
// `break` or `continue` to pick up because by definition they are
// in a block enclosed by loop head.
fld.cx.syntax_env.push_frame();
fld.cx.syntax_env.info().pending_renames.push(rename);
let expanded_block = expand_block_elts(loop_block, fld);
fld.cx.syntax_env.pop_frame();
(expanded_block, Some(renamed_ident))
}
None => (fld.fold_block(loop_block), opt_ident)
}
}
// eval $e with a new exts frame.
// must be a macro so that $e isn't evaluated too early.
macro_rules! with_exts_frame {
($extsboxexpr:expr,$macros_escape:expr,$e:expr) =>
({$extsboxexpr.push_frame();
$extsboxexpr.info().macros_escape = $macros_escape;
let result = $e;
$extsboxexpr.pop_frame();
result
})
}
// When we enter a module, record it, for the sake of `module!`
pub fn expand_item(it: P<ast::Item>, fld: &mut MacroExpander)
-> SmallVector<P<ast::Item>> {
let it = expand_item_multi_modifier(Annotatable::Item(it), fld);
expand_annotatable(it, fld)
.into_iter().map(|i| i.expect_item()).collect()
}
/// Expand item_underscore
fn expand_item_underscore(item: ast::Item_, fld: &mut MacroExpander) -> ast::Item_ {
match item {
ast::ItemFn(decl, unsafety, constness, abi, generics, body) => {
let (rewritten_fn_decl, rewritten_body)
= expand_and_rename_fn_decl_and_block(decl, body, fld);
let expanded_generics = fold::noop_fold_generics(generics,fld);
ast::ItemFn(rewritten_fn_decl, unsafety, constness, abi,
expanded_generics, rewritten_body)
}
_ => noop_fold_item_underscore(item, fld)
}
}
// does this attribute list contain "macro_use" ?
fn contains_macro_use(fld: &mut MacroExpander, attrs: &[ast::Attribute]) -> bool {
for attr in attrs {
let mut is_use = attr.check_name("macro_use");
if attr.check_name("macro_escape") {
let mut err =
fld.cx.struct_span_warn(attr.span,
"macro_escape is a deprecated synonym for macro_use");
is_use = true;
if let ast::AttrStyle::Inner = attr.node.style {
err.fileline_help(attr.span, "consider an outer attribute, \
#[macro_use] mod ...").emit();
} else {
err.emit();
}
};
if is_use {
match attr.node.value.node {
ast::MetaWord(..) => (),
_ => fld.cx.span_err(attr.span, "arguments to macro_use are not allowed here"),
}
return true;
}
}
false
}
// Support for item-position macro invocations, exactly the same
// logic as for expression-position macro invocations.
pub fn expand_item_mac(it: P<ast::Item>,
fld: &mut MacroExpander) -> SmallVector<P<ast::Item>> {
let (extname, path_span, tts, span, attrs, ident) = it.clone().and_then(|it| match it.node {
ItemMac(codemap::Spanned { node: Mac_ { path, tts, .. }, .. }) =>
(path.segments[0].identifier.name, path.span, tts, it.span, it.attrs, it.ident),
_ => fld.cx.span_bug(it.span, "invalid item macro invocation")
});
let fm = fresh_mark();
let items = {
let expanded = match fld.cx.syntax_env.find(extname) {
None => {
// Ignore unknown macros.
/*
fld.cx.span_err(path_span,
&format!("macro undefined: '{}!'",
extname));
*/
// let compilation continue
return SmallVector::one(it);
}
Some(rc) => match *rc {
NormalTT(ref expander, tt_span, allow_internal_unstable) => {
if ident.name != parse::token::special_idents::invalid.name {
fld.cx
.span_err(path_span,
&format!("macro {}! expects no ident argument, given '{}'",
extname,
ident));
return SmallVector::zero();
}
fld.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
format: MacroBang(extname),
span: tt_span,
allow_internal_unstable: allow_internal_unstable,
}
});
// mark before expansion:
let marked_before = mark_tts(&tts[..], fm);
expander.expand(fld.cx, span, &marked_before[..])
}
IdentTT(ref expander, tt_span, allow_internal_unstable) => {
if ident.name == parse::token::special_idents::invalid.name {
fld.cx.span_err(path_span,
&format!("macro {}! expects an ident argument",
extname));
return SmallVector::zero();
}
fld.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
format: MacroBang(extname),
span: tt_span,
allow_internal_unstable: allow_internal_unstable,
}
});
// mark before expansion:
let marked_tts = mark_tts(&tts[..], fm);
expander.expand(fld.cx, span, ident, marked_tts)
}
MacroRulesTT => {
if ident.name == parse::token::special_idents::invalid.name {
fld.cx.span_err(path_span, "macro_rules! expects an ident argument");
return SmallVector::zero();
}
fld.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
format: MacroBang(extname),
span: None,
// `macro_rules!` doesn't directly allow
// unstable (this is orthogonal to whether
// the macro it creates allows it)
allow_internal_unstable: false,
}
});
// DON'T mark before expansion.
let allow_internal_unstable = attr::contains_name(&attrs,
"allow_internal_unstable");
// ensure any #[allow_internal_unstable]s are
// detected (including nested macro definitions
// etc.)
if allow_internal_unstable && !fld.cx.ecfg.enable_allow_internal_unstable() {
feature_gate::emit_feature_err(
&fld.cx.parse_sess.span_diagnostic,
"allow_internal_unstable",
span,
feature_gate::GateIssue::Language,
feature_gate::EXPLAIN_ALLOW_INTERNAL_UNSTABLE)
}
let export = attr::contains_name(&attrs, "macro_export");
let def = ast::MacroDef {
ident: ident,
attrs: attrs,
id: ast::DUMMY_NODE_ID,
span: span,
imported_from: None,
export: export,
use_locally: true,
allow_internal_unstable: allow_internal_unstable,
body: tts,
};
fld.cx.insert_macro(def);
// macro_rules! has a side effect but expands to nothing.
fld.cx.bt_pop();
return SmallVector::zero();
}
_ => {
fld.cx.span_err(span,
&format!("{}! is not legal in item position",
extname));
return SmallVector::zero();
}
}
};
expanded.make_items()
};
let items = match items {
Some(items) => {
items.into_iter()
.map(|i| mark_item(i, fm))
.flat_map(|i| fld.fold_item(i).into_iter())
.collect()
}
None => {
fld.cx.span_err(path_span,
&format!("non-item macro in item position: {}",
extname));
return SmallVector::zero();
}
};
fld.cx.bt_pop();
items
}
/// Expand a stmt
fn expand_stmt(stmt: P<Stmt>, fld: &mut MacroExpander) -> SmallVector<P<Stmt>> {
let stmt = stmt.and_then(|stmt| stmt);
let (mac, style, attrs) = match stmt.clone().node {
StmtMac(mac, style, attrs) => (mac, style, attrs),
_ => return expand_non_macro_stmt(stmt, fld)
};
// Assert that we drop any macro attributes on the floor here
drop(attrs);
let maybe_new_items =
expand_mac_invoc(mac.and_then(|m| m), stmt.span,
|r| r.make_stmts(),
|stmts, mark| stmts.move_map(|m| mark_stmt(m, mark)),
fld);
let mut fully_expanded = match maybe_new_items {
Some(stmts) => {
// Keep going, outside-in.
let new_items = stmts.into_iter().flat_map(|s| {
fld.fold_stmt(s).into_iter()
}).collect();
fld.cx.bt_pop();
new_items
}
None => {
// Ignore unknown macros.
// SmallVector::zero()
SmallVector::one(P(stmt))
}
};
// If this is a macro invocation with a semicolon, then apply that
// semicolon to the final statement produced by expansion.
if style == MacStmtWithSemicolon {
if let Some(stmt) = fully_expanded.pop() {
let new_stmt = stmt.map(|Spanned {node, span}| {
Spanned {
node: match node {
StmtExpr(e, stmt_id) => StmtSemi(e, stmt_id),
_ => node /* might already have a semi */
},
span: span
}
});
fully_expanded.push(new_stmt);
}
}
fully_expanded
}
// expand a non-macro stmt. this is essentially the fallthrough for
// expand_stmt, above.
fn expand_non_macro_stmt(Spanned {node, span: stmt_span}: Stmt, fld: &mut MacroExpander)
-> SmallVector<P<Stmt>> {
// is it a let?
match node {
StmtDecl(decl, node_id) => decl.and_then(|Spanned {node: decl, span}| match decl {
DeclLocal(local) => {
// take it apart:
let rewritten_local = local.map(|Local {id, pat, ty, init, span, attrs}| {
// expand the ty since TyFixedLengthVec contains an Expr
// and thus may have a macro use
let expanded_ty = ty.map(|t| fld.fold_ty(t));
// expand the pat (it might contain macro uses):
let expanded_pat = fld.fold_pat(pat);
// find the PatIdents in the pattern:
// oh dear heaven... this is going to include the enum
// names, as well... but that should be okay, as long as
// the new names are gensyms for the old ones.
// generate fresh names, push them to a new pending list
let idents = pattern_bindings(&expanded_pat);
let mut new_pending_renames =
idents.iter().map(|ident| (*ident, fresh_name(*ident))).collect();
// rewrite the pattern using the new names (the old
// ones have already been applied):
let rewritten_pat = {
// nested binding to allow borrow to expire:
let mut rename_fld = IdentRenamer{renames: &mut new_pending_renames};
rename_fld.fold_pat(expanded_pat)
};
// add them to the existing pending renames:
fld.cx.syntax_env.info().pending_renames
.extend(new_pending_renames);
Local {
id: id,
ty: expanded_ty,
pat: rewritten_pat,
// also, don't forget to expand the init:
init: init.map(|e| fld.fold_expr(e)),
span: span,
attrs: fold::fold_thin_attrs(attrs, fld),
}
});
SmallVector::one(P(Spanned {
node: StmtDecl(P(Spanned {
node: DeclLocal(rewritten_local),
span: span
}),
node_id),
span: stmt_span
}))
}
_ => {
noop_fold_stmt(Spanned {
node: StmtDecl(P(Spanned {
node: decl,
span: span
}),
node_id),
span: stmt_span
}, fld)
}
}),
_ => {
noop_fold_stmt(Spanned {
node: node,
span: stmt_span
}, fld)
}
}
}
// expand the arm of a 'match', renaming for macro hygiene
fn expand_arm(arm: ast::Arm, fld: &mut MacroExpander) -> ast::Arm {
// expand pats... they might contain macro uses:
let expanded_pats = arm.pats.move_map(|pat| fld.fold_pat(pat));
if expanded_pats.is_empty() {
panic!("encountered match arm with 0 patterns");
}
// apply renaming and then expansion to the guard and the body:
let ((rewritten_guard, rewritten_body), rewritten_pats) =
rename_in_scope(expanded_pats,
fld,
(arm.guard, arm.body),
|rename_fld, fld, (ag, ab)|{
let rewritten_guard = ag.map(|g| fld.fold_expr(rename_fld.fold_expr(g)));
let rewritten_body = fld.fold_expr(rename_fld.fold_expr(ab));
(rewritten_guard, rewritten_body)
});
ast::Arm {
attrs: fold::fold_attrs(arm.attrs, fld),
pats: rewritten_pats,
guard: rewritten_guard,
body: rewritten_body,
}
}
fn rename_in_scope<X, F>(pats: Vec<P<ast::Pat>>,
fld: &mut MacroExpander,
x: X,
f: F)
-> (X, Vec<P<ast::Pat>>)
where F: Fn(&mut IdentRenamer, &mut MacroExpander, X) -> X
{
// all of the pats must have the same set of bindings, so use the
// first one to extract them and generate new names:
let idents = pattern_bindings(&pats[0]);
let new_renames = idents.into_iter().map(|id| (id, fresh_name(id))).collect();
// apply the renaming, but only to the PatIdents:
let mut rename_pats_fld = PatIdentRenamer{renames:&new_renames};
let rewritten_pats = pats.move_map(|pat| rename_pats_fld.fold_pat(pat));
let mut rename_fld = IdentRenamer{ renames:&new_renames };
(f(&mut rename_fld, fld, x), rewritten_pats)
}
/// A visitor that extracts the PatIdent (binding) paths
/// from a given thingy and puts them in a mutable
/// array
#[derive(Clone)]
struct PatIdentFinder {
ident_accumulator: Vec<ast::Ident>
}
impl<'v> Visitor<'v> for PatIdentFinder {
fn visit_pat(&mut self, pattern: &ast::Pat) {
match *pattern {
ast::Pat { id: _, node: ast::PatIdent(_, ref path1, ref inner), span: _ } => {
self.ident_accumulator.push(path1.node);
// visit optional subpattern of PatIdent:
if let Some(ref subpat) = *inner {
self.visit_pat(subpat)
}
}
// use the default traversal for non-PatIdents
_ => visit::walk_pat(self, pattern)
}
}
}
/// find the PatIdent paths in a pattern
fn pattern_bindings(pat: &ast::Pat) -> Vec<ast::Ident> {
let mut name_finder = PatIdentFinder{ident_accumulator:Vec::new()};
name_finder.visit_pat(pat);
name_finder.ident_accumulator
}
/// find the PatIdent paths in a
fn fn_decl_arg_bindings(fn_decl: &ast::FnDecl) -> Vec<ast::Ident> {
let mut pat_idents = PatIdentFinder{ident_accumulator:Vec::new()};
for arg in &fn_decl.inputs {
pat_idents.visit_pat(&arg.pat);
}
pat_idents.ident_accumulator
}
// expand a block. pushes a new exts_frame, then calls expand_block_elts
pub fn expand_block(blk: P<Block>, fld: &mut MacroExpander) -> P<Block> {
// see note below about treatment of exts table
with_exts_frame!(fld.cx.syntax_env,false,
expand_block_elts(blk, fld))
}
// expand the elements of a block.
pub fn expand_block_elts(b: P<Block>, fld: &mut MacroExpander) -> P<Block> {
b.map(|Block {id, stmts, expr, rules, span}| {
let new_stmts = stmts.into_iter().flat_map(|x| {
// perform all pending renames
let renamed_stmt = {
let pending_renames = &mut fld.cx.syntax_env.info().pending_renames;
let mut rename_fld = IdentRenamer{renames:pending_renames};
rename_fld.fold_stmt(x).expect_one("rename_fold didn't return one value")
};
// expand macros in the statement
fld.fold_stmt(renamed_stmt).into_iter()
}).collect();
let new_expr = expr.map(|x| {
let expr = {
let pending_renames = &mut fld.cx.syntax_env.info().pending_renames;
let mut rename_fld = IdentRenamer{renames:pending_renames};
rename_fld.fold_expr(x)
};
fld.fold_expr(expr)
});
Block {
id: fld.new_id(id),
stmts: new_stmts,
expr: new_expr,
rules: rules,
span: span
}
})
}
fn expand_pat(p: P<ast::Pat>, fld: &mut MacroExpander) -> P<ast::Pat> {
match p.node {
PatMac(_) => {}
_ => return noop_fold_pat(p, fld)
}
p.clone().map(|ast::Pat {node, span, ..}| {
let (pth, tts) = match node {
PatMac(mac) => (mac.node.path, mac.node.tts),
_ => unreachable!()
};
if pth.segments.len() > 1 {
fld.cx.span_err(pth.span, "expected macro name without module separators");
return DummyResult::raw_pat(span);
}
let extname = pth.segments[0].identifier.name;
let marked_after = match fld.cx.syntax_env.find(extname) {
None => {
// Ignore unknown macros.
/*
fld.cx.span_err(pth.span,
&format!("macro undefined: '{}!'",
extname));
*/
// let compilation continue
return p.and_then(|p| p);
}
Some(rc) => match *rc {
NormalTT(ref expander, tt_span, allow_internal_unstable) => {
fld.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
format: MacroBang(extname),
span: tt_span,
allow_internal_unstable: allow_internal_unstable,
}
});
let fm = fresh_mark();
let marked_before = mark_tts(&tts[..], fm);
let mac_span = fld.cx.original_span();
let pat = expander.expand(fld.cx,
mac_span,
&marked_before[..]).make_pat();
let expanded = match pat {
Some(e) => e,
None => {
fld.cx.span_err(
pth.span,
&format!(
"non-pattern macro in pattern position: {}",
extname
)
);
return DummyResult::raw_pat(span);
}
};
// mark after:
mark_pat(expanded,fm)
}
_ => {
fld.cx.span_err(span,
&format!("{}! is not legal in pattern position",
extname));
return DummyResult::raw_pat(span);
}
}
};
let fully_expanded =
fld.fold_pat(marked_after).node.clone();
fld.cx.bt_pop();
ast::Pat {
id: ast::DUMMY_NODE_ID,
node: fully_expanded,
span: span
}
})
}
/// A tree-folder that applies every rename in its (mutable) list
/// to every identifier, including both bindings and varrefs
/// (and lots of things that will turn out to be neither)
pub struct IdentRenamer<'a> {
renames: &'a mtwt::RenameList,
}
impl<'a> Folder for IdentRenamer<'a> {
fn fold_ident(&mut self, id: Ident) -> Ident {
Ident::new(id.name, mtwt::apply_renames(self.renames, id.ctxt))
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac {
fold::noop_fold_mac(mac, self)
}
}
/// A tree-folder that applies every rename in its list to
/// the idents that are in PatIdent patterns. This is more narrowly
/// focused than IdentRenamer, and is needed for FnDecl,
/// where we want to rename the args but not the fn name or the generics etc.
pub struct PatIdentRenamer<'a> {
renames: &'a mtwt::RenameList,
}
impl<'a> Folder for PatIdentRenamer<'a> {
fn fold_pat(&mut self, pat: P<ast::Pat>) -> P<ast::Pat> {
match pat.node {
ast::PatIdent(..) => {},
_ => return noop_fold_pat(pat, self)
}
pat.map(|ast::Pat {id, node, span}| match node {
ast::PatIdent(binding_mode, Spanned{span: sp, node: ident}, sub) => {
let new_ident = Ident::new(ident.name,
mtwt::apply_renames(self.renames, ident.ctxt));
let new_node =
ast::PatIdent(binding_mode,
Spanned{span: self.new_span(sp), node: new_ident},
sub.map(|p| self.fold_pat(p)));
ast::Pat {
id: id,
node: new_node,
span: self.new_span(span)
}
},
_ => unreachable!()
})
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac {
fold::noop_fold_mac(mac, self)
}
}
fn expand_annotatable(a: Annotatable,
fld: &mut MacroExpander)
-> SmallVector<Annotatable> {
let a = expand_item_multi_modifier(a, fld);
let mut decorator_items = SmallVector::zero();
let mut new_attrs = Vec::new();
expand_decorators(a.clone(), fld, &mut decorator_items, &mut new_attrs);
let mut new_items: SmallVector<Annotatable> = match a {
Annotatable::Item(it) => match it.node {
ast::ItemMac(..) => {
expand_item_mac(it, fld).into_iter().map(|i| Annotatable::Item(i)).collect()
}
ast::ItemMod(_) | ast::ItemForeignMod(_) => {
let valid_ident =
it.ident.name != parse::token::special_idents::invalid.name;
if valid_ident {
fld.cx.mod_push(it.ident);
}
let macro_use = contains_macro_use(fld, &new_attrs[..]);
let result = with_exts_frame!(fld.cx.syntax_env,
macro_use,
noop_fold_item(it, fld));
if valid_ident {
fld.cx.mod_pop();
}
result.into_iter().map(|i| Annotatable::Item(i)).collect()
},
_ => {
let it = P(ast::Item {
attrs: new_attrs,
..(*it).clone()
});
noop_fold_item(it, fld).into_iter().map(|i| Annotatable::Item(i)).collect()
}
},
Annotatable::TraitItem(it) => match it.node {
ast::MethodTraitItem(_, Some(_)) => SmallVector::one(it.map(|ti| ast::TraitItem {
id: ti.id,
ident: ti.ident,
attrs: ti.attrs,
node: match ti.node {
ast::MethodTraitItem(sig, Some(body)) => {
let (sig, body) = expand_and_rename_method(sig, body, fld);
ast::MethodTraitItem(sig, Some(body))
}
_ => unreachable!()
},
span: fld.new_span(ti.span)
})),
_ => fold::noop_fold_trait_item(it, fld)
}.into_iter().map(Annotatable::TraitItem).collect(),
Annotatable::ImplItem(ii) => {
expand_impl_item(ii, fld).into_iter().map(Annotatable::ImplItem).collect()
}
};
new_items.extend(decorator_items.into_iter());
new_items
}
// Partition a set of attributes into one kind of attribute, and other kinds.
macro_rules! partition {
($fn_name: ident, $variant: ident) => {
fn $fn_name(attrs: &[ast::Attribute],
fld: &MacroExpander)
-> (Vec<ast::Attribute>, Vec<ast::Attribute>) {
attrs.iter().cloned().partition(|attr| {
match fld.cx.syntax_env.find(intern(&attr.name())) {
Some(rc) => match *rc {
$variant(..) => true,
_ => false
},
_ => false
}
})
}
}
}
partition!(multi_modifiers, MultiModifier);
fn expand_decorators(a: Annotatable,
fld: &mut MacroExpander,
decorator_items: &mut SmallVector<Annotatable>,
new_attrs: &mut Vec<ast::Attribute>)
{
for attr in a.attrs() {
let mname = intern(&attr.name());
match fld.cx.syntax_env.find(mname) {
Some(rc) => match *rc {
MultiDecorator(ref dec) => {
attr::mark_used(&attr);
fld.cx.bt_push(ExpnInfo {
call_site: attr.span,
callee: NameAndSpan {
format: MacroAttribute(mname),
span: Some(attr.span),
// attributes can do whatever they like,
// for now.
allow_internal_unstable: true,
}
});
// we'd ideally decorator_items.push_all(expand_annotatable(ann, fld)),
// but that double-mut-borrows fld
let mut items: SmallVector<Annotatable> = SmallVector::zero();
dec.expand(fld.cx,
attr.span,
&attr.node.value,
&a,
&mut |ann| items.push(ann));
decorator_items.extend(items.into_iter()
.flat_map(|ann| expand_annotatable(ann, fld).into_iter()));
fld.cx.bt_pop();
}
_ => new_attrs.push((*attr).clone()),
},
_ => new_attrs.push((*attr).clone()),
}
}
}
fn expand_item_multi_modifier(mut it: Annotatable,
fld: &mut MacroExpander)
-> Annotatable {
let (modifiers, other_attrs) = multi_modifiers(it.attrs(), fld);
// Update the attrs, leave everything else alone. Is this mutation really a good idea?
it = it.fold_attrs(other_attrs);
if modifiers.is_empty() {
return it
}
for attr in &modifiers {
let mname = intern(&attr.name());
match fld.cx.syntax_env.find(mname) {
Some(rc) => match *rc {
MultiModifier(ref mac) => {
attr::mark_used(attr);
fld.cx.bt_push(ExpnInfo {
call_site: attr.span,
callee: NameAndSpan {
format: MacroAttribute(mname),
span: Some(attr.span),
// attributes can do whatever they like,
// for now
allow_internal_unstable: true,
}
});
it = mac.expand(fld.cx, attr.span, &*attr.node.value, it);
fld.cx.bt_pop();
}
_ => unreachable!()
},
_ => unreachable!()
}
}
// Expansion may have added new ItemModifiers.
expand_item_multi_modifier(it, fld)
}
fn expand_impl_item(ii: P<ast::ImplItem>, fld: &mut MacroExpander)
-> SmallVector<P<ast::ImplItem>> {
match ii.node {
ast::ImplItemKind::Method(..) => SmallVector::one(ii.map(|ii| ast::ImplItem {
id: ii.id,
ident: ii.ident,
attrs: ii.attrs,
vis: ii.vis,
node: match ii.node {
ast::ImplItemKind::Method(sig, body) => {
let (sig, body) = expand_and_rename_method(sig, body, fld);
ast::ImplItemKind::Method(sig, body)
}
_ => unreachable!()
},
span: fld.new_span(ii.span)
})),
ast::ImplItemKind::Macro(_) => {
let (span, mac) = ii.clone().and_then(|ii| match ii.node {
ast::ImplItemKind::Macro(mac) => (ii.span, mac),
_ => unreachable!()
});
let maybe_new_items =
expand_mac_invoc(mac, span,
|r| r.make_impl_items(),
|meths, mark| meths.move_map(|m| mark_impl_item(m, mark)),
fld);
match maybe_new_items {
Some(impl_items) => {
// expand again if necessary
let new_items = impl_items.into_iter().flat_map(|ii| {
expand_impl_item(ii, fld).into_iter()
}).collect();
fld.cx.bt_pop();
new_items
}
None => {
// Ignore unknown macros.
// SmallVector::zero()
SmallVector::one(ii)
}
}
}
_ => fold::noop_fold_impl_item(ii, fld)
}
}
/// Given a fn_decl and a block and a MacroExpander, expand the fn_decl, then use the
/// PatIdents in its arguments to perform renaming in the FnDecl and
/// the block, returning both the new FnDecl and the new Block.
fn expand_and_rename_fn_decl_and_block(fn_decl: P<ast::FnDecl>, block: P<ast::Block>,
fld: &mut MacroExpander)
-> (P<ast::FnDecl>, P<ast::Block>) {
let expanded_decl = fld.fold_fn_decl(fn_decl);
let idents = fn_decl_arg_bindings(&expanded_decl);
let renames =
idents.iter().map(|id| (*id,fresh_name(*id))).collect();
// first, a renamer for the PatIdents, for the fn_decl:
let mut rename_pat_fld = PatIdentRenamer{renames: &renames};
let rewritten_fn_decl = rename_pat_fld.fold_fn_decl(expanded_decl);
// now, a renamer for *all* idents, for the body:
let mut rename_fld = IdentRenamer{renames: &renames};
let rewritten_body = fld.fold_block(rename_fld.fold_block(block));
(rewritten_fn_decl,rewritten_body)
}
fn expand_and_rename_method(sig: ast::MethodSig, body: P<ast::Block>,
fld: &mut MacroExpander)
-> (ast::MethodSig, P<ast::Block>) {
let (rewritten_fn_decl, rewritten_body)
= expand_and_rename_fn_decl_and_block(sig.decl, body, fld);
(ast::MethodSig {
generics: fld.fold_generics(sig.generics),
abi: sig.abi,
explicit_self: fld.fold_explicit_self(sig.explicit_self),
unsafety: sig.unsafety,
constness: sig.constness,
decl: rewritten_fn_decl
}, rewritten_body)
}
pub fn expand_type(t: P<ast::Ty>, fld: &mut MacroExpander) -> P<ast::Ty> {
let t = match t.node.clone() {
ast::Ty_::TyMac(mac) => {
if fld.cx.ecfg.features.unwrap().type_macros {
let expanded_ty = match expand_mac_invoc(mac, t.span,
|r| r.make_ty(),
mark_ty,
fld) {
Some(ty) => ty,
None => {
return DummyResult::raw_ty(t.span);
}
};
// Keep going, outside-in.
let fully_expanded = fld.fold_ty(expanded_ty);
fld.cx.bt_pop();
fully_expanded.map(|t| ast::Ty {
id: ast::DUMMY_NODE_ID,
node: t.node,
span: t.span,
})
} else {
feature_gate::emit_feature_err(
&fld.cx.parse_sess.span_diagnostic,
"type_macros",
t.span,
feature_gate::GateIssue::Language,
"type macros are experimental");
DummyResult::raw_ty(t.span)
}
}
_ => t
};
fold::noop_fold_ty(t, fld)
}
/// A tree-folder that performs macro expansion
pub struct MacroExpander<'a, 'b:'a> {
pub cx: &'a mut ExtCtxt<'b>,
}
impl<'a, 'b> MacroExpander<'a, 'b> {
pub fn new(cx: &'a mut ExtCtxt<'b>) -> MacroExpander<'a, 'b> {
MacroExpander { cx: cx }
}
}
impl<'a, 'b> Folder for MacroExpander<'a, 'b> {
fn fold_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
expand_expr(expr, self)
}
fn | (&mut self, pat: P<ast::Pat>) -> P<ast::Pat> {
expand_pat(pat, self)
}
fn fold_item(&mut self, item: P<ast::Item>) -> SmallVector<P<ast::Item>> {
expand_item(item, self)
}
fn fold_item_underscore(&mut self, item: ast::Item_) -> ast::Item_ {
expand_item_underscore(item, self)
}
fn fold_stmt(&mut self, stmt: P<ast::Stmt>) -> SmallVector<P<ast::Stmt>> {
expand_stmt(stmt, self)
}
fn fold_block(&mut self, block: P<Block>) -> P<Block> {
expand_block(block, self)
}
fn fold_arm(&mut self, arm: ast::Arm) -> ast::Arm {
expand_arm(arm, self)
}
fn fold_trait_item(&mut self, i: P<ast::TraitItem>) -> SmallVector<P<ast::TraitItem>> {
expand_annotatable(Annotatable::TraitItem(i), self)
.into_iter().map(|i| i.expect_trait_item()).collect()
}
fn fold_impl_item(&mut self, i: P<ast::ImplItem>) -> SmallVector<P<ast::ImplItem>> {
expand_annotatable(Annotatable::ImplItem(i), self)
.into_iter().map(|i| i.expect_impl_item()).collect()
}
fn fold_ty(&mut self, ty: P<ast::Ty>) -> P<ast::Ty> {
expand_type(ty, self)
}
fn new_span(&mut self, span: Span) -> Span {
new_span(self.cx, span)
}
}
fn new_span(cx: &ExtCtxt, sp: Span) -> Span {
/* this discards information in the case of macro-defining macros */
Span {
lo: sp.lo,
hi: sp.hi,
expn_id: cx.backtrace(),
}
}
pub struct ExpansionConfig<'feat> {
pub crate_name: String,
pub features: Option<&'feat Features>,
pub recursion_limit: usize,
pub trace_mac: bool,
}
macro_rules! feature_tests {
($( fn $getter:ident = $field:ident, )*) => {
$(
pub fn $getter(&self) -> bool {
match self.features {
Some(&Features { $field: true, .. }) => true,
_ => false,
}
}
)*
}
}
impl<'feat> ExpansionConfig<'feat> {
pub fn default(crate_name: String) -> ExpansionConfig<'static> {
ExpansionConfig {
crate_name: crate_name,
features: None,
recursion_limit: 64,
trace_mac: false,
}
}
feature_tests! {
fn enable_quotes = allow_quote,
fn enable_asm = allow_asm,
fn enable_log_syntax = allow_log_syntax,
fn enable_concat_idents = allow_concat_idents,
fn enable_trace_macros = allow_trace_macros,
fn enable_allow_internal_unstable = allow_internal_unstable,
fn enable_custom_derive = allow_custom_derive,
fn enable_pushpop_unsafe = allow_pushpop_unsafe,
}
}
pub fn expand_crate(mut cx: ExtCtxt,
// these are the macros being imported to this crate:
imported_macros: Vec<ast::MacroDef>,
user_exts: Vec<NamedSyntaxExtension>,
c: Crate) -> (Crate, HashSet<Name>) {
if std_inject::no_core(&c) {
cx.crate_root = None;
} else if std_inject::no_std(&c) {
cx.crate_root = Some("core");
} else {
cx.crate_root = Some("std");
}
let ret = {
let mut expander = MacroExpander::new(&mut cx);
for def in imported_macros {
expander.cx.insert_macro(def);
}
for (name, extension) in user_exts {
expander.cx.syntax_env.insert(name, extension);
}
let mut ret = expander.fold_crate(c);
ret.exported_macros = expander.cx.exported_macros.clone();
cx.parse_sess.span_diagnostic.abort_if_errors();
ret
};
return (ret, cx.syntax_env.names);
}
// HYGIENIC CONTEXT EXTENSION:
// all of these functions are for walking over
// ASTs and making some change to the context of every
// element that has one. a CtxtFn is a trait-ified
// version of a closure in (SyntaxContext -> SyntaxContext).
// the ones defined here include:
// Marker - add a mark to a context
// A Marker adds the given mark to the syntax context
struct Marker { mark: Mrk }
impl Folder for Marker {
fn fold_ident(&mut self, id: Ident) -> Ident {
ast::Ident::new(id.name, mtwt::apply_mark(self.mark, id.ctxt))
}
fn fold_mac(&mut self, Spanned {node, span}: ast::Mac) -> ast::Mac {
Spanned {
node: Mac_ {
path: self.fold_path(node.path),
tts: self.fold_tts(&node.tts),
ctxt: mtwt::apply_mark(self.mark, node.ctxt),
},
span: span,
}
}
}
// apply a given mark to the given token trees. Used prior to expansion of a macro.
fn mark_tts(tts: &[TokenTree], m: Mrk) -> Vec<TokenTree> {
noop_fold_tts(tts, &mut Marker{mark:m})
}
// apply a given mark to the given expr. Used following the expansion of a macro.
fn mark_expr(expr: P<ast::Expr>, m: Mrk) -> P<ast::Expr> {
Marker{mark:m}.fold_expr(expr)
}
// apply a given mark to the given pattern. Used following the expansion of a macro.
fn mark_pat(pat: P<ast::Pat>, m: Mrk) -> P<ast::Pat> {
Marker{mark:m}.fold_pat(pat)
}
// apply a given mark to the given stmt. Used following the expansion of a macro.
fn mark_stmt(stmt: P<ast::Stmt>, m: Mrk) -> P<ast::Stmt> {
Marker{mark:m}.fold_stmt(stmt)
.expect_one("marking a stmt didn't return exactly one stmt")
}
// apply a given mark to the given item. Used following the expansion of a macro.
fn mark_item(expr: P<ast::Item>, m: Mrk) -> P<ast::Item> {
Marker{mark:m}.fold_item(expr)
.expect_one("marking an item didn't return exactly one item")
}
// apply a given mark to the given item. Used following the expansion of a macro.
fn mark_impl_item(ii: P<ast::ImplItem>, m: Mrk) -> P<ast::ImplItem> {
Marker{mark:m}.fold_impl_item(ii)
.expect_one("marking an impl item didn't return exactly one impl item")
}
fn mark_ty(ty: P<ast::Ty>, m: Mrk) -> P<ast::Ty> {
Marker { mark: m }.fold_ty(ty)
}
/// Check that there are no macro invocations left in the AST:
pub fn check_for_macros(sess: &parse::ParseSess, krate: &ast::Crate) {
visit::walk_crate(&mut MacroExterminator{sess:sess}, krate);
}
/// A visitor that ensures that no macro invocations remain in an AST.
struct MacroExterminator<'a>{
sess: &'a parse::ParseSess
}
impl<'a, 'v> Visitor<'v> for MacroExterminator<'a> {
fn visit_mac(&mut self, mac: &ast::Mac) {
self.sess.span_diagnostic.span_bug(mac.span,
"macro exterminator: expected AST \
with no macro invocations");
}
}
#[cfg(test)]
mod tests {
use super::{pattern_bindings, expand_crate};
use super::{PatIdentFinder, IdentRenamer, PatIdentRenamer, ExpansionConfig};
use ast;
use ast::Name;
use codemap;
use ext::base::ExtCtxt;
use ext::mtwt;
use fold::Folder;
use parse;
use parse::token;
use util::parser_testing::{string_to_parser};
use util::parser_testing::{string_to_pat, string_to_crate, strs_to_idents};
use visit;
use visit::Visitor;
// a visitor that extracts the paths
// from a given thingy and puts them in a mutable
// array (passed in to the traversal)
#[derive(Clone)]
struct PathExprFinderContext {
path_accumulator: Vec<ast::Path> ,
}
impl<'v> Visitor<'v> for PathExprFinderContext {
fn visit_expr(&mut self, expr: &ast::Expr) {
if let ast::ExprPath(None, ref p) = expr.node {
self.path_accumulator.push(p.clone());
}
visit::walk_expr(self, expr);
}
}
// find the variable references in a crate
fn crate_varrefs(the_crate : &ast::Crate) -> Vec<ast::Path> {
let mut path_finder = PathExprFinderContext{path_accumulator:Vec::new()};
visit::walk_crate(&mut path_finder, the_crate);
path_finder.path_accumulator
}
/// A Visitor that extracts the identifiers from a thingy.
// as a side note, I'm starting to want to abstract over these....
struct IdentFinder {
ident_accumulator: Vec<ast::Ident>
}
impl<'v> Visitor<'v> for IdentFinder {
fn visit_ident(&mut self, _: codemap::Span, id: ast::Ident){
self.ident_accumulator.push(id);
}
}
/// Find the idents in a crate
fn crate_idents(the_crate: &ast::Crate) -> Vec<ast::Ident> {
let mut ident_finder = IdentFinder{ident_accumulator: Vec::new()};
visit::walk_crate(&mut ident_finder, the_crate);
ident_finder.ident_accumulator
}
// these following tests are quite fragile, in that they don't test what
// *kind* of failure occurs.
fn test_ecfg() -> ExpansionConfig<'static> {
ExpansionConfig::default("test".to_string())
}
// make sure that macros can't escape fns
#[should_panic]
#[test] fn macros_cant_escape_fns_test () {
let src = "fn bogus() {macro_rules! z (() => (3+4));}\
fn inty() -> i32 { z!() }".to_string();
let sess = parse::ParseSess::new();
let crate_ast = parse::parse_crate_from_source_str(
"<test>".to_string(),
src,
Vec::new(), &sess);
// should fail:
let mut gated_cfgs = vec![];
let ecx = ExtCtxt::new(&sess, vec![], test_ecfg(), &mut gated_cfgs);
expand_crate(ecx, vec![], vec![], crate_ast);
}
// make sure that macros can't escape modules
#[should_panic]
#[test] fn macros_cant_escape_mods_test () {
let src = "mod foo {macro_rules! z (() => (3+4));}\
fn inty() -> i32 { z!() }".to_string();
let sess = parse::ParseSess::new();
let crate_ast = parse::parse_crate_from_source_str(
"<test>".to_string(),
src,
Vec::new(), &sess);
let mut gated_cfgs = vec![];
let ecx = ExtCtxt::new(&sess, vec![], test_ecfg(), &mut gated_cfgs);
expand_crate(ecx, vec![], vec![], crate_ast);
}
// macro_use modules should allow macros to escape
#[test] fn macros_can_escape_flattened_mods_test () {
let src = "#[macro_use] mod foo {macro_rules! z (() => (3+4));}\
fn inty() -> i32 { z!() }".to_string();
let sess = parse::ParseSess::new();
let crate_ast = parse::parse_crate_from_source_str(
"<test>".to_string(),
src,
Vec::new(), &sess);
let mut gated_cfgs = vec![];
let ecx = ExtCtxt::new(&sess, vec![], test_ecfg(), &mut gated_cfgs);
expand_crate(ecx, vec![], vec![], crate_ast);
}
fn expand_crate_str(crate_str: String) -> ast::Crate {
let ps = parse::ParseSess::new();
let crate_ast = panictry!(string_to_parser(&ps, crate_str).parse_crate_mod());
// the cfg argument actually does matter, here...
let mut gated_cfgs = vec![];
let ecx = ExtCtxt::new(&ps, vec![], test_ecfg(), &mut gated_cfgs);
expand_crate(ecx, vec![], vec![], crate_ast).0
}
// find the pat_ident paths in a crate
fn crate_bindings(the_crate : &ast::Crate) -> Vec<ast::Ident> {
let mut name_finder = PatIdentFinder{ident_accumulator:Vec::new()};
visit::walk_crate(&mut name_finder, the_crate);
name_finder.ident_accumulator
}
#[test] fn macro_tokens_should_match(){
expand_crate_str(
"macro_rules! m((a)=>(13)) ;fn main(){m!(a);}".to_string());
}
// should be able to use a bound identifier as a literal in a macro definition:
#[test] fn self_macro_parsing(){
expand_crate_str(
"macro_rules! foo ((zz) => (287;));
fn f(zz: i32) {foo!(zz);}".to_string()
);
}
// renaming tests expand a crate and then check that the bindings match
// the right varrefs. The specification of the test case includes the
// text of the crate, and also an array of arrays. Each element in the
// outer array corresponds to a binding in the traversal of the AST
// induced by visit. Each of these arrays contains a list of indexes,
// interpreted as the varrefs in the varref traversal that this binding
// should match. So, for instance, in a program with two bindings and
// three varrefs, the array [[1, 2], [0]] would indicate that the first
// binding should match the second two varrefs, and the second binding
// should match the first varref.
//
// Put differently; this is a sparse representation of a boolean matrix
// indicating which bindings capture which identifiers.
//
// Note also that this matrix is dependent on the implicit ordering of
// the bindings and the varrefs discovered by the name-finder and the path-finder.
//
// The comparisons are done post-mtwt-resolve, so we're comparing renamed
// names; differences in marks don't matter any more.
//
// oog... I also want tests that check "bound-identifier-=?". That is,
// not just "do these have the same name", but "do they have the same
// name *and* the same marks"? Understanding this is really pretty painful.
// in principle, you might want to control this boolean on a per-varref basis,
// but that would make things even harder to understand, and might not be
// necessary for thorough testing.
type RenamingTest = (&'static str, Vec<Vec<usize>>, bool);
#[test]
fn automatic_renaming () {
let tests: Vec<RenamingTest> =
vec!(// b & c should get new names throughout, in the expr too:
("fn a() -> i32 { let b = 13; let c = b; b+c }",
vec!(vec!(0,1),vec!(2)), false),
// both x's should be renamed (how is this causing a bug?)
("fn main () {let x: i32 = 13;x;}",
vec!(vec!(0)), false),
// the use of b after the + should be renamed, the other one not:
("macro_rules! f (($x:ident) => (b + $x)); fn a() -> i32 { let b = 13; f!(b)}",
vec!(vec!(1)), false),
// the b before the plus should not be renamed (requires marks)
("macro_rules! f (($x:ident) => ({let b=9; ($x + b)})); fn a() -> i32 { f!(b)}",
vec!(vec!(1)), false),
// the marks going in and out of letty should cancel, allowing that $x to
// capture the one following the semicolon.
// this was an awesome test case, and caught a *lot* of bugs.
("macro_rules! letty(($x:ident) => (let $x = 15;));
macro_rules! user(($x:ident) => ({letty!($x); $x}));
fn main() -> i32 {user!(z)}",
vec!(vec!(0)), false)
);
for (idx,s) in tests.iter().enumerate() {
run_renaming_test(s,idx);
}
}
// no longer a fixme #8062: this test exposes a *potential* bug; our system does
// not behave exactly like MTWT, but a conversation with Matthew Flatt
// suggests that this can only occur in the presence of local-expand, which
// we have no plans to support. ... unless it's needed for item hygiene....
#[ignore]
#[test]
fn issue_8062(){
run_renaming_test(
&("fn main() {let hrcoo = 19; macro_rules! getx(()=>(hrcoo)); getx!();}",
vec!(vec!(0)), true), 0)
}
// FIXME #6994:
// the z flows into and out of two macros (g & f) along one path, and one
// (just g) along the other, so the result of the whole thing should
// be "let z_123 = 3; z_123"
#[ignore]
#[test]
fn issue_6994(){
run_renaming_test(
&("macro_rules! g (($x:ident) =>
({macro_rules! f(($y:ident)=>({let $y=3;$x}));f!($x)}));
fn a(){g!(z)}",
vec!(vec!(0)),false),
0)
}
// match variable hygiene. Should expand into
// fn z() {match 8 {x_1 => {match 9 {x_2 | x_2 if x_2 == x_1 => x_2 + x_1}}}}
#[test]
fn issue_9384(){
run_renaming_test(
&("macro_rules! bad_macro (($ex:expr) => ({match 9 {x | x if x == $ex => x + $ex}}));
fn z() {match 8 {x => bad_macro!(x)}}",
// NB: the third "binding" is the repeat of the second one.
vec!(vec!(1,3),vec!(0,2),vec!(0,2)),
true),
0)
}
// interpolated nodes weren't getting labeled.
// should expand into
// fn main(){let g1_1 = 13; g1_1}}
#[test]
fn pat_expand_issue_15221(){
run_renaming_test(
&("macro_rules! inner ( ($e:pat ) => ($e));
macro_rules! outer ( ($e:pat ) => (inner!($e)));
fn main() { let outer!(g) = 13; g;}",
vec!(vec!(0)),
true),
0)
}
// create a really evil test case where a $x appears inside a binding of $x
// but *shouldn't* bind because it was inserted by a different macro....
// can't write this test case until we have macro-generating macros.
// method arg hygiene
// method expands to fn get_x(&self_0, x_1: i32) {self_0 + self_2 + x_3 + x_1}
#[test]
fn method_arg_hygiene(){
run_renaming_test(
&("macro_rules! inject_x (()=>(x));
macro_rules! inject_self (()=>(self));
struct A;
impl A{fn get_x(&self, x: i32) {self + inject_self!() + inject_x!() + x;} }",
vec!(vec!(0),vec!(3)),
true),
0)
}
// ooh, got another bite?
// expands to struct A; impl A {fn thingy(&self_1) {self_1;}}
#[test]
fn method_arg_hygiene_2(){
run_renaming_test(
&("struct A;
macro_rules! add_method (($T:ty) =>
(impl $T { fn thingy(&self) {self;} }));
add_method!(A);",
vec!(vec!(0)),
true),
0)
}
// item fn hygiene
// expands to fn q(x_1: i32){fn g(x_2: i32){x_2 + x_1};}
#[test]
fn issue_9383(){
run_renaming_test(
&("macro_rules! bad_macro (($ex:expr) => (fn g(x: i32){ x + $ex }));
fn q(x: i32) { bad_macro!(x); }",
vec!(vec!(1),vec!(0)),true),
0)
}
// closure arg hygiene (ExprClosure)
// expands to fn f(){(|x_1 : i32| {(x_2 + x_1)})(3);}
#[test]
fn closure_arg_hygiene(){
run_renaming_test(
&("macro_rules! inject_x (()=>(x));
fn f(){(|x : i32| {(inject_x!() + x)})(3);}",
vec!(vec!(1)),
true),
0)
}
// macro_rules in method position. Sadly, unimplemented.
#[test]
fn macro_in_method_posn(){
expand_crate_str(
"macro_rules! my_method (() => (fn thirteen(&self) -> i32 {13}));
struct A;
impl A{ my_method!(); }
fn f(){A.thirteen;}".to_string());
}
// another nested macro
// expands to impl Entries {fn size_hint(&self_1) {self_1;}
#[test]
fn item_macro_workaround(){
run_renaming_test(
&("macro_rules! item { ($i:item) => {$i}}
struct Entries;
macro_rules! iterator_impl {
() => { item!( impl Entries { fn size_hint(&self) { self;}});}}
iterator_impl! { }",
vec!(vec!(0)), true),
0)
}
// run one of the renaming tests
fn run_renaming_test(t: &RenamingTest, test_idx: usize) {
let invalid_name = token::special_idents::invalid.name;
let (teststr, bound_connections, bound_ident_check) = match *t {
(ref str,ref conns, bic) => (str.to_string(), conns.clone(), bic)
};
let cr = expand_crate_str(teststr.to_string());
let bindings = crate_bindings(&cr);
let varrefs = crate_varrefs(&cr);
// must be one check clause for each binding:
assert_eq!(bindings.len(),bound_connections.len());
for (binding_idx,shouldmatch) in bound_connections.iter().enumerate() {
let binding_name = mtwt::resolve(bindings[binding_idx]);
let binding_marks = mtwt::marksof(bindings[binding_idx].ctxt, invalid_name);
// shouldmatch can't name varrefs that don't exist:
assert!((shouldmatch.is_empty()) ||
(varrefs.len() > *shouldmatch.iter().max().unwrap()));
for (idx,varref) in varrefs.iter().enumerate() {
let print_hygiene_debug_info = || {
// good lord, you can't make a path with 0 segments, can you?
let final_varref_ident = match varref.segments.last() {
Some(pathsegment) => pathsegment.identifier,
None => panic!("varref with 0 path segments?")
};
let varref_name = mtwt::resolve(final_varref_ident);
let varref_idents : Vec<ast::Ident>
= varref.segments.iter().map(|s| s.identifier)
.collect();
println!("varref #{}: {:?}, resolves to {}",idx, varref_idents, varref_name);
println!("varref's first segment's string: \"{}\"", final_varref_ident);
println!("binding #{}: {}, resolves to {}",
binding_idx, bindings[binding_idx], binding_name);
mtwt::with_sctable(|x| mtwt::display_sctable(x));
};
if shouldmatch.contains(&idx) {
// it should be a path of length 1, and it should
// be free-identifier=? or bound-identifier=? to the given binding
assert_eq!(varref.segments.len(),1);
let varref_name = mtwt::resolve(varref.segments[0].identifier);
let varref_marks = mtwt::marksof(varref.segments[0]
.identifier
.ctxt,
invalid_name);
if !(varref_name==binding_name) {
println!("uh oh, should match but doesn't:");
print_hygiene_debug_info();
}
assert_eq!(varref_name,binding_name);
if bound_ident_check {
// we're checking bound-identifier=?, and the marks
// should be the same, too:
assert_eq!(varref_marks,binding_marks.clone());
}
} else {
let varref_name = mtwt::resolve(varref.segments[0].identifier);
let fail = (varref.segments.len() == 1)
&& (varref_name == binding_name);
// temp debugging:
if fail {
println!("failure on test {}",test_idx);
println!("text of test case: \"{}\"", teststr);
println!("");
println!("uh oh, matches but shouldn't:");
print_hygiene_debug_info();
}
assert!(!fail);
}
}
}
}
#[test]
fn fmt_in_macro_used_inside_module_macro() {
let crate_str = "macro_rules! fmt_wrap(($b:expr)=>($b.to_string()));
macro_rules! foo_module (() => (mod generated { fn a() { let xx = 147; fmt_wrap!(xx);}}));
foo_module!();
".to_string();
let cr = expand_crate_str(crate_str);
// find the xx binding
let bindings = crate_bindings(&cr);
let cxbinds: Vec<&ast::Ident> =
bindings.iter().filter(|b| b.name.as_str() == "xx").collect();
let cxbinds: &[&ast::Ident] = &cxbinds[..];
let cxbind = match (cxbinds.len(), cxbinds.get(0)) {
(1, Some(b)) => *b,
_ => panic!("expected just one binding for ext_cx")
};
let resolved_binding = mtwt::resolve(*cxbind);
let varrefs = crate_varrefs(&cr);
// the xx binding should bind all of the xx varrefs:
for (idx,v) in varrefs.iter().filter(|p| {
p.segments.len() == 1
&& p.segments[0].identifier.name.as_str() == "xx"
}).enumerate() {
if mtwt::resolve(v.segments[0].identifier) != resolved_binding {
println!("uh oh, xx binding didn't match xx varref:");
println!("this is xx varref \\# {}", idx);
println!("binding: {}", cxbind);
println!("resolves to: {}", resolved_binding);
println!("varref: {}", v.segments[0].identifier);
println!("resolves to: {}",
mtwt::resolve(v.segments[0].identifier));
mtwt::with_sctable(|x| mtwt::display_sctable(x));
}
assert_eq!(mtwt::resolve(v.segments[0].identifier),
resolved_binding);
};
}
#[test]
fn pat_idents(){
let pat = string_to_pat(
"(a,Foo{x:c @ (b,9),y:Bar(4,d)})".to_string());
let idents = pattern_bindings(&pat);
assert_eq!(idents, strs_to_idents(vec!("a","c","b","d")));
}
// test the list of identifier patterns gathered by the visitor. Note that
// 'None' is listed as an identifier pattern because we don't yet know that
// it's the name of a 0-ary variant, and that 'i' appears twice in succession.
#[test]
fn crate_bindings_test(){
let the_crate = string_to_crate("fn main (a: i32) -> i32 {|b| {
match 34 {None => 3, Some(i) | i => j, Foo{k:z,l:y} => \"banana\"}} }".to_string());
let idents = crate_bindings(&the_crate);
assert_eq!(idents, strs_to_idents(vec!("a","b","None","i","i","z","y")));
}
// test the IdentRenamer directly
#[test]
fn ident_renamer_test () {
let the_crate = string_to_crate("fn f(x: i32){let x = x; x}".to_string());
let f_ident = token::str_to_ident("f");
let x_ident = token::str_to_ident("x");
let int_ident = token::str_to_ident("i32");
let renames = vec!((x_ident,Name(16)));
let mut renamer = IdentRenamer{renames: &renames};
let renamed_crate = renamer.fold_crate(the_crate);
let idents = crate_idents(&renamed_crate);
let resolved : Vec<ast::Name> = idents.iter().map(|id| mtwt::resolve(*id)).collect();
assert_eq!(resolved, [f_ident.name,Name(16),int_ident.name,Name(16),Name(16),Name(16)]);
}
// test the PatIdentRenamer; only PatIdents get renamed
#[test]
fn pat_ident_renamer_test () {
let the_crate = string_to_crate("fn f(x: i32){let x = x; x}".to_string());
let f_ident = token::str_to_ident("f");
let x_ident = token::str_to_ident("x");
let int_ident = token::str_to_ident("i32");
let renames = vec!((x_ident,Name(16)));
let mut renamer = PatIdentRenamer{renames: &renames};
let renamed_crate = renamer.fold_crate(the_crate);
let idents = crate_idents(&renamed_crate);
let resolved : Vec<ast::Name> = idents.iter().map(|id| mtwt::resolve(*id)).collect();
let x_name = x_ident.name;
assert_eq!(resolved, [f_ident.name,Name(16),int_ident.name,Name(16),x_name,x_name]);
}
}
| fold_pat |
xastore.go | package stores
import "jvmgo/ch10/instructions/base"
import "jvmgo/ch10/rtda"
import "jvmgo/ch10/rtda/heap"
// Store into reference array
type AASTORE struct{ base.NoOperandsInstruction }
func (self *AASTORE) Execute(frame *rtda.Frame) {
stack := frame.OperandStack()
ref := stack.PopRef()
index := stack.PopInt()
arrRef := stack.PopRef()
checkNotNil(arrRef)
refs := arrRef.Refs()
checkIndex(len(refs), index)
refs[index] = ref
}
// Store into byte or boolean array
type BASTORE struct{ base.NoOperandsInstruction }
func (self *BASTORE) Execute(frame *rtda.Frame) {
stack := frame.OperandStack()
val := stack.PopInt()
index := stack.PopInt()
arrRef := stack.PopRef()
checkNotNil(arrRef)
bytes := arrRef.Bytes()
checkIndex(len(bytes), index)
bytes[index] = int8(val)
}
// Store into char array
type CASTORE struct{ base.NoOperandsInstruction }
func (self *CASTORE) Execute(frame *rtda.Frame) {
stack := frame.OperandStack()
val := stack.PopInt()
index := stack.PopInt()
arrRef := stack.PopRef()
checkNotNil(arrRef)
chars := arrRef.Chars()
checkIndex(len(chars), index)
chars[index] = uint16(val)
}
// Store into double array
type DASTORE struct{ base.NoOperandsInstruction }
func (self *DASTORE) Execute(frame *rtda.Frame) {
stack := frame.OperandStack()
val := stack.PopDouble()
index := stack.PopInt()
arrRef := stack.PopRef()
checkNotNil(arrRef)
doubles := arrRef.Doubles()
checkIndex(len(doubles), index)
doubles[index] = float64(val)
}
// Store into float array
type FASTORE struct{ base.NoOperandsInstruction }
func (self *FASTORE) Execute(frame *rtda.Frame) {
stack := frame.OperandStack()
val := stack.PopFloat()
index := stack.PopInt()
arrRef := stack.PopRef()
checkNotNil(arrRef)
floats := arrRef.Floats()
checkIndex(len(floats), index)
floats[index] = float32(val)
}
// Store into int array
type IASTORE struct{ base.NoOperandsInstruction }
func (self *IASTORE) Execute(frame *rtda.Frame) {
stack := frame.OperandStack()
val := stack.PopInt()
index := stack.PopInt()
arrRef := stack.PopRef()
checkNotNil(arrRef) | ints[index] = int32(val)
}
// Store into long array
type LASTORE struct{ base.NoOperandsInstruction }
func (self *LASTORE) Execute(frame *rtda.Frame) {
stack := frame.OperandStack()
val := stack.PopLong()
index := stack.PopInt()
arrRef := stack.PopRef()
checkNotNil(arrRef)
longs := arrRef.Longs()
checkIndex(len(longs), index)
longs[index] = int64(val)
}
// Store into short array
type SASTORE struct{ base.NoOperandsInstruction }
func (self *SASTORE) Execute(frame *rtda.Frame) {
stack := frame.OperandStack()
val := stack.PopInt()
index := stack.PopInt()
arrRef := stack.PopRef()
checkNotNil(arrRef)
shorts := arrRef.Shorts()
checkIndex(len(shorts), index)
shorts[index] = int16(val)
}
func checkNotNil(ref *heap.Object) {
if ref == nil {
panic("java.lang.NullPointerException")
}
}
func checkIndex(arrLen int, index int32) {
if index < 0 || index >= int32(arrLen) {
panic("ArrayIndexOutOfBoundsException")
}
} | ints := arrRef.Ints()
checkIndex(len(ints), index) |
general.py | import time
import numpy as np
import torch
def to_tensor(tensor):
if isinstance(tensor, np.ndarray):
tensor = torch.from_numpy(tensor)
if torch.cuda.is_available():
return torch.autograd.Variable(tensor).cuda()
return torch.autograd.Variable(tensor)
def set_default_device_cuda():
"""Sets the default device (cpu or cuda) used for all tensors."""
if torch.cuda.is_available() == False:
|
else: # device_name == "cuda":
tensor = torch.cuda.FloatTensor # pylint: disable=E1101
torch.set_default_tensor_type(tensor)
return True
def estimate_run_time(start_time, n_steps, step):
time_elapsed = int(time.time() - start_time)
time_left = (time_elapsed * ((n_steps - step) / (step + 1)))
summary = {"elapsed": time_elapsed, "left": time_left}
return summary | tensor = torch.FloatTensor
torch.set_default_tensor_type(tensor)
return False |
ussd.route.js | import { Router } from 'express';
import { ussd } from '../controllers/v1/ussd.controller';
const ussdRoutes = Router();
ussdRoutes
.post(
'/send',
ussd
).get( | );
export default ussdRoutes; | '/send',
ussd |
move_semantics3.rs | // move_semantics3.rs
// Make me compile without adding new lines-- just changing existing lines!
// (no lines with multiple semicolons necessary!)
// Execute `rustlings hint move_semantics3` for hints :)
fn main() {
let mut vec1 = Vec::new();
fill_vec(&mut vec1);
println!("{} has length {} content `{:?}`", "vec1", vec1.len(), vec1);
vec1.push(88);
println!("{} has length {} content `{:?}`", "vec1", vec1.len(), vec1);
}
fn fill_vec(vec: &mut Vec<i32>) | {
vec.push(22);
vec.push(44);
vec.push(66);
} |
|
stream_scene_camera_video.py | import cv2
from pupil_labs.realtime_api.simple import discover_one_device
def main():
# Look for devices. Returns as soon as it has found the first device.
|
def draw_time(frame, time):
frame_txt_font_name = cv2.FONT_HERSHEY_SIMPLEX
frame_txt_font_scale = 1.0
frame_txt_thickness = 1
# first line: frame index
frame_txt = str(time)
cv2.putText(
frame,
frame_txt,
(20, 50),
frame_txt_font_name,
frame_txt_font_scale,
(255, 255, 255),
thickness=frame_txt_thickness,
lineType=cv2.LINE_8,
)
if __name__ == "__main__":
main()
| print("Looking for the next best device...")
device = discover_one_device(max_search_duration_seconds=10)
if device is None:
print("No device found.")
raise SystemExit(-1)
print(f"Connecting to {device}...")
try:
while True:
bgr_pixels, frame_datetime = device.receive_scene_video_frame()
draw_time(bgr_pixels, frame_datetime)
cv2.imshow("Scene Camera - Press ESC to quit", bgr_pixels)
if cv2.waitKey(1) & 0xFF == 27:
break
except KeyboardInterrupt:
pass
finally:
print("Stopping...")
device.close() # explicitly stop auto-update |
Cert_9_2_12_Announce.py | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
LEADER1 = 1
ROUTER1 = 2
LEADER2 = 3
ROUTER2 = 4
MED = 5
DATASET1_TIMESTAMP = 20
DATASET1_CHANNEL = 11
DATASET1_PANID = 0xface
DATASET2_TIMESTAMP = 10
DATASET2_CHANNEL = 12
DATASET2_PANID = 0xafce
class Cert_9_2_12_Announce(unittest.TestCase):
def setUp(self):
|
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER1].start()
self.nodes[LEADER1].set_state('leader')
self.assertEqual(self.nodes[LEADER1].get_state(), 'leader')
self.nodes[LEADER1].commissioner_start()
time.sleep(3)
self.nodes[ROUTER1].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[LEADER2].start()
self.nodes[LEADER2].set_state('leader')
self.assertEqual(self.nodes[LEADER2].get_state(), 'leader')
self.nodes[ROUTER2].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.nodes[MED].start()
time.sleep(5)
self.assertEqual(self.nodes[MED].get_state(), 'child')
ipaddrs = self.nodes[ROUTER1].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.nodes[LEADER1].announce_begin(0x1000, 1, 1000, ipaddr)
time.sleep(30)
self.assertEqual(self.nodes[LEADER2].get_state(), 'router')
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.assertEqual(self.nodes[MED].get_state(), 'child')
ipaddrs = self.nodes[MED].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
self.assertTrue(self.nodes[LEADER1].ping(ipaddr))
if __name__ == '__main__':
unittest.main()
| self.nodes = {}
for i in range(1,6):
self.nodes[i] = node.Node(i)
self.nodes[LEADER1].set_active_dataset(DATASET1_TIMESTAMP, channel=DATASET1_CHANNEL, panid=DATASET1_PANID)
self.nodes[LEADER1].set_mode('rsdn')
self.nodes[LEADER1].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER1].enable_whitelist()
self.nodes[ROUTER1].set_active_dataset(DATASET1_TIMESTAMP, channel=DATASET1_CHANNEL, panid=DATASET1_PANID)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER1].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER2].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[LEADER2].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID)
self.nodes[LEADER2].set_mode('rsdn')
self.nodes[LEADER2].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER2].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[LEADER2].enable_whitelist()
self.nodes[LEADER2].set_router_selection_jitter(1)
self.nodes[ROUTER2].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID)
self.nodes[ROUTER2].set_mode('rsdn')
self.nodes[ROUTER2].add_whitelist(self.nodes[LEADER2].get_addr64())
self.nodes[ROUTER2].add_whitelist(self.nodes[MED].get_addr64())
self.nodes[ROUTER2].enable_whitelist()
self.nodes[ROUTER2].set_router_selection_jitter(1)
self.nodes[MED].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID)
self.nodes[MED].set_mode('rsn')
self.nodes[MED].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[MED].enable_whitelist() |
spi.rs | #![no_std]
#![no_main]
#![feature(type_alias_impl_trait)]
#[path = "../example_common.rs"]
mod example_common;
use embassy::executor::Spawner;
use embassy_stm32::gpio::{Level, Output, Speed};
use embedded_hal::digital::v2::OutputPin;
use example_common::*;
use embassy_stm32::dma::NoDma;
use embassy_stm32::spi::{Config, Spi};
use embassy_stm32::time::Hertz;
use embassy_stm32::Peripherals;
use embedded_hal::blocking::spi::Transfer;
#[embassy::main]
async fn | (_spawner: Spawner, p: Peripherals) {
info!("Hello World, folks!");
let mut spi = Spi::new(
p.SPI1,
p.PB3,
p.PA7,
p.PA6,
NoDma,
NoDma,
Hertz(1_000_000),
Config::default(),
);
let mut cs = Output::new(p.PA15, Level::High, Speed::VeryHigh);
loop {
let mut buf = [0x0Au8; 4];
unwrap!(cs.set_low());
unwrap!(spi.transfer(&mut buf));
unwrap!(cs.set_high());
info!("xfer {=[u8]:x}", buf);
}
}
| main |
test_tokenizer.py | # coding: utf8
from __future__ import unicode_literals
import pytest
NB_TOKEN_EXCEPTION_TESTS = [
(
"Smørsausen brukes bl.a. til fisk",
["Smørsausen", "brukes", "bl.a.", "til", "fisk"],
),
(
"Jeg kommer først kl. 13 pga. diverse forsinkelser",
["Jeg", "kommer", "først", "kl.", "13", "pga.", "diverse", "forsinkelser"],
),
]
@pytest.mark.parametrize("text,expected_tokens", NB_TOKEN_EXCEPTION_TESTS)
def test_nb_tokenizer_handles_exception_cases(nb_tokenizer, text, expected_tokens):
toke | ns = nb_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
|
|
client.go | // Copyright 2016-2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kvstore
import (
"fmt"
)
var (
// defaultClient is the default client initialized by initClient
defaultClient BackendOperations
// defaultClientSet is a channel that is closed whenever the defaultClient
// is set.
defaultClientSet = make(chan struct{})
)
func initClient(module backendModule, opts *ExtraOptions) error {
c, errChan := module.newClient(opts)
if c == nil {
err := <-errChan
log.WithError(err).Fatalf("Unable to create etcd client")
}
defaultClient = c
select {
case <-defaultClientSet:
// avoid closing channel already closed.
default:
close(defaultClientSet)
}
go func() {
err, isErr := <-errChan
if isErr && err != nil {
log.WithError(err).Fatalf("Unable to connect to kvstore")
}
deleteLegacyPrefixes()
}()
return nil
}
// Client returns the global kvstore client or nil if the client is not configured yet
func Client() BackendOperations |
// NewClient returns a new kvstore client based on the configuration
func NewClient(selectedBackend string, opts map[string]string, options *ExtraOptions) (BackendOperations, chan error) {
// Channel used to report immediate errors, module.newClient will
// create and return a different channel, caller doesn't need to know
errChan := make(chan error, 1)
defer close(errChan)
module := getBackend(selectedBackend)
if module == nil {
errChan <- fmt.Errorf("unknown key-value store type %q. See cilium.link/err-kvstore for details", selectedBackend)
return nil, errChan
}
if err := module.setConfig(opts); err != nil {
errChan <- err
return nil, errChan
}
if err := module.setExtraConfig(options); err != nil {
errChan <- err
return nil, errChan
}
return module.newClient(options)
}
| {
<-defaultClientSet
return defaultClient
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.